diff --git "a/5575.jsonl" "b/5575.jsonl" new file mode 100644--- /dev/null +++ "b/5575.jsonl" @@ -0,0 +1,691 @@ +{"seq_id":"521611164","text":"# THIS SCRIPT DEFINES THE CONSTRAINTS OF MICHAEL SANKUR'S LUPFM MODEL\n#SPBC-HIL files moved 7/15/19\n\n# In[2]:\n\n# from setup_3 import *\nimport datetime\nimport numpy as np\nimport cvxpy as cp\nfrom setup_3 import transformer\n# In[3]:\n\n\n# HELPER FUNCTIONS\n\n\n# In[4]:\n\ndef cvx_setzeros(cvxvar, phasevec, timesteps):\n# Given a cvx variable, set it equal to zero on the phases where phasevec==0 for all timesteps\n conslist = list()\n for ts in range(0, timesteps):\n for idx in range(0, 3):\n if phasevec[idx,0]==0:\n conslist.append(cvxvar[idx,ts]==0)\n return conslist\n\ndef loadRX2Y(Rvec,Xvec):\n# Convert a 3x1 vector of Rs and a 3x1 vector of Xs to a 3x3 diagonal Y matrix for multiplying Vmag^2\n Ymat = np.zeros((3,3), dtype=np.complex_)\n for idx in range(0,3):\n if Rvec[idx,0]!=0 or Xvec[idx,0]!=0:\n Ymat[idx,idx]=1./(Rvec[idx,0]-1j*Xvec[idx,0]) # Remember that we're using the conjugate here\n return Ymat\n\ndef cvx_setrealdemandpu(currentnode,timestep):\n# Create an expression for the real power demand at each load (p.u.)\n demand = 0.\n for iload in currentnode.loads:\n cP = iload.constP\n cZ = iload.constZ\n cI = iload.constI\n \n Sbase = currentnode.kVAbase\n Vbase = currentnode.kVbase_phg\n Zbase = currentnode.Zbase\n \n currPpu = iload.Psched[:, timestep:timestep+1]/Sbase\n currRpu = iload.Rsched[:, timestep:timestep+1]/Zbase\n currXpu = iload.Xsched[:, timestep:timestep+1]/Zbase\n \n demand = demand + currPpu*(cP + cI)\n demand = demand + np.real(cZ*loadRX2Y(currRpu,currXpu))*currentnode.Vmagsq_linopt[:, timestep:timestep+1]\n return demand\n\ndef cvx_setreactivedemandpu(currentnode,timestep):\n# Create an expression for the reactive power demand at each load (p.u.)\n demand = 0.\n for iload in currentnode.loads:\n cP = iload.constP\n cZ = iload.constZ\n cI = iload.constI\n \n Sbase = currentnode.kVAbase\n Vbase = currentnode.kVbase_phg\n Zbase = currentnode.Zbase\n \n currQpu = iload.Qsched[:, timestep:timestep+1]/Sbase\n currRpu = iload.Rsched[:, timestep:timestep+1]/Zbase\n currXpu = iload.Xsched[:, timestep:timestep+1]/Zbase\n \n demand = demand + currQpu*(cP + cI)\n demand = demand + np.imag(cZ*loadRX2Y(currRpu,currXpu))*currentnode.Vmagsq_linopt[:, timestep:timestep+1]\n \n for icap in currentnode.cap:\n Sbase = currentnode.kVAbase\n Vbase = currentnode.kVbase_phg\n Zbase = currentnode.Zbase\n \n # Cap Qvec is read in with a negative sign, so this works out right\n # synchronous capacitors (condensors) are accounted for as actuators in cons_reactivepowerbalance\n demand = demand + icap.Qvec/Sbase\n return demand\n\n\n\ndef magang2complex(magvec,angvec):\n# Create a complex number from a column of magnitudes and a column of angles\n assert(len(magvec) == len(angvec)),\"Vector length problems\"\n outvec = np.zeros((len(magvec), 1), dtype=np.complex_)\n for idx in range(0,len(magvec)):\n outvec[idx,0] = magvec[idx,0]*np.cos(np.deg2rad(angvec[idx,0]))+1j*magvec[idx,0]*np.sin(np.deg2rad(angvec[idx,0]))\n return outvec\n\ndef cvx_buildgamma(currentline,timestep):\n# A helper function necessary for the LUPFM (Having Michael's dissertation open as a reference will be useful in understanding these constraints)\n# Note that this is referenced to receiving end\n if isinstance(currentline,transformer):\n Vtomag = currentline.w1_node.Vmag_NLpu[:, timestep:timestep+1]\n Vtoang = currentline.w1_node.Vang_NL[:, timestep:timestep+1]\n else:\n Vtomag = currentline.to_node.Vmag_NLpu[:, timestep:timestep+1]\n Vtoang = currentline.to_node.Vang_NL[:, timestep:timestep+1]\n Vto = magang2complex(Vtomag,Vtoang)\n \n gammaout = np.zeros((len(Vto), len(Vto)), dtype=np.complex_)\n \n for idx1 in range(0,len(Vto)):\n if currentline.phasevec[idx1,0] != 0:\n for idx2 in range(0,len(Vto)):\n if currentline.phasevec[idx2,0] != 0:\n gammaout[idx1,idx2] = Vto[idx1]/Vto[idx2]\n \n \n return gammaout\n\ndef cvx_buildM(currentline,timestep):\n# A helper function necessary for the LUPFM\n gamma = cvx_buildgamma(currentline,timestep)\n return np.real(np.multiply(gamma,np.conj(currentline.Zpu)))\n\ndef cvx_buildN(currentline,timestep):\n# A helper function necessary for the LUPFM\n gamma = cvx_buildgamma(currentline,timestep)\n return np.imag(np.multiply(gamma,np.conj(currentline.Zpu)))\n\n\n# In[5]:\n\n\n# CONSTRAINT DEFINITION FUNCTIONS\n\n\n# In[6]:\n\n\ndef cons_slack(feeder):\n# This will set slack bus magnitude to 1 and angles to 0, 4pi/3, 2pi/3.\n# A more sophisticated later version might set the voltages at multiple buses based on Vsrc objects.\n #[HIL] - refphasor - set slack = to refphasor\n#JP kyle sets slack to NL values instead of ref phasor. What should I do?\n conslist = list()\n for key, inode in feeder.busdict.items():\n if inode.type == 'SLACK' or inode.type == 'Slack' or inode.type == 'slack':\n for idx in range(feeder.timesteps):\n conslist.append(inode.Vmagsq_linopt[:,idx:idx+1] == feeder.refphasor[:,0:1])\n conslist.append(inode.Vang_linopt[:,idx:idx+1] == feeder.refphasor[:,1:2]) \n\n # conslist.append(inode.Vmagsq_linopt[:,idx:idx+1] == np.ones([3,1]))\n # conslist.append(inode.Vang_linopt[:,idx:idx+1] == np.array([[0],[4*np.pi/3],[2*np.pi/3]]))\n\n return conslist\n\n\n# In[7]:\n\n\ndef cons_missingnode(feeder):\n# This will set any nonexistent phase voltages at a node to zero, along with the powers on lines attached to those phases\n conslist = list()\n for key, inode in feeder.busdict.items():\n conslist = conslist + cvx_setzeros(inode.Vmagsq_linopt, inode.phasevec, feeder.timesteps)\n conslist = conslist + cvx_setzeros(inode.Vang_linopt, inode.phasevec, feeder.timesteps)\n for iline in inode.edges_in:\n conslist = conslist + cvx_setzeros(iline.P_linopt, inode.phasevec, feeder.timesteps)\n conslist = conslist + cvx_setzeros(iline.Q_linopt, inode.phasevec, feeder.timesteps)\n for iline in inode.edges_out:\n conslist = conslist + cvx_setzeros(iline.P_linopt, inode.phasevec, feeder.timesteps)\n conslist = conslist + cvx_setzeros(iline.Q_linopt, inode.phasevec, feeder.timesteps)\n return conslist\n\n\n# In[8]:\n\n\ndef cons_missingline(feeder):\n# This will set the powers on any nonexistent lines to 0 (covers any missed by cons_missingnode)\n conslist = list()\n for key, iline in feeder.linedict.items():\n assert (iline.from_phases == iline.to_phases), \"Phases across lines don't match\"\n conslist = conslist + cvx_setzeros(iline.P_linopt,iline.phasevec, feeder.timesteps)\n conslist = conslist + cvx_setzeros(iline.Q_linopt,iline.phasevec, feeder.timesteps)\n return conslist\n\n\n# In[9]:\n\n\ndef cons_Vineq(feeder):\n# Sets upper and lower bounds on nodal voltages. The allowable band may need to be reduced to get good nonlinear solutions!\n conslist = list()\n Vmax, Vmin = 1.1, .9\n \n for key, inode in feeder.busdict.items():\n for ts in range(0,feeder.timesteps):\n V = inode.Vmagsq_linopt[:,ts:ts+1]\n for idx in range(0,3):\n if inode.phasevec[idx,0] != 0:\n conslist.append(V[idx,0] <= Vmax*Vmax)\n conslist.append(V[idx,0] >= Vmin*Vmin)\n return conslist\n\n\n# In[10]: \n\n\ndef cons_realpwrbalance(feeder):\n# An expression for the p.u. real power balance at each node\n conslist = list()\n for key, inode in feeder.busdict.items():\n if inode.type != 'SLACK' and inode.type != 'Slack' and inode.type != 'slack':\n for ts in range(0,feeder.timesteps):\n power_in = np.zeros((3,1), dtype=np.complex_)\n for iedgein in inode.edges_in:\n power_in = power_in + iedgein.P_linopt[:,ts:ts+1]\n\n #JP added losses here and to constraint \n power_out = np.zeros((3,1), dtype=np.complex_)\n losses = np.zeros((3,1), dtype=np.complex_)\n for iedgeout in inode.edges_out:\n power_out = power_out + iedgeout.P_linopt[:,ts:ts+1]\n losses = losses + iedgeout.Lpu_real[:,ts:ts+1]\n\n actuation = np.zeros((3,1), dtype=np.complex_)\n for iact in inode.actuators:\n actuation = (actuation + iact.Pgen[:,ts:ts+1])\n# =============================================================================\n# if int(key) < 50:\n# conslist.append(iact.Pgen >= 0)\n# =============================================================================\n #print(f'{inode.name}: {power_in-power_out} == {cvx_setrealdemandpu(inode,ts)-actuation}')\n # print(\"in\",power_in)\n # print(\"out\",power_out)\n # print(\"loss\",losses)\n conslist.append(power_in-power_out-losses == cvx_setrealdemandpu(inode,ts)-actuation)\n # conslist.append(power_in-power_out == cvx_setrealdemandpu(inode,ts)-actuation)\n return conslist\n\ndef cons_reactivepwrbalance(feeder):\n# An expression for the p.u. reactive power balance at each node\n conslist = list()\n for key, inode in feeder.busdict.items():\n if inode.type != 'SLACK' and inode.type != 'Slack' and inode.type != 'slack':\n for ts in range(0,feeder.timesteps):\n power_in = np.zeros((3,1), dtype=np.complex_) \n for iedgein in inode.edges_in:\n power_in = power_in + iedgein.Q_linopt[:,ts:ts+1]\n\n #JP added losses here and to constraint \n power_out = np.zeros((3,1), dtype=np.complex_)\n losses = np.zeros((3,1), dtype=np.complex_)\n for iedgeout in inode.edges_out:\n power_out = power_out + iedgeout.Q_linopt[:,ts:ts+1]\n losses = losses + iedgeout.Lpu_imag[:,ts:ts+1]\n\n actuation = np.zeros((3,1), dtype=np.complex_)\n for iact in inode.actuators:\n actuation = (actuation + iact.Qgen[:,ts:ts+1])\n for icap in inode.sync_cap:\n actuation = (actuation + icap.Qgen[:,ts:ts+1])\n\n conslist.append(power_in-power_out-losses == cvx_setreactivedemandpu(inode,ts)-actuation)\n # conslist.append(power_in-power_out == cvx_setreactivedemandpu(inode,ts)-actuation)\n return conslist\n\n# In[11a]:\n \n### Calculate weight for PV for real time simulation ### \n\n# calculate solar radiation (adapted from Masters)\n#inputs of lat, lon, maridian [degrees] & fraction of PV [fraction i.e. 0.5=50%].\ndef solweight_realtime(lat,lon,maridian): #[HIL]\n deg_rad = np.pi/180.\n rad_deg = 180./np.pi\n\n lat = lat*deg_rad\n \n date = datetime.datetime.now()\n month = date.month\n day = date.day\n hour = date.hour\n minute = date.minute\n DOY = datetime.datetime.now().timetuple().tm_yday\n \n dsol = 23.45*np.sin(360/365*(DOY-81)*deg_rad)*deg_rad #solar declination\n Hsol = 15*(12-(hour+(minute+(lon-maridian)*4)/60))*deg_rad #hour angle\n\n alt = np.arcsin(np.cos(lat)*np.cos(dsol)*np.cos(Hsol)+np.sin(lat)*np.sin(dsol)) #solar altitude\n azi = np.arcsin(np.cos(dsol)*np.sin(Hsol)/np.cos(alt)) #solar azimuth\n\n Asol = 1160 + 75*np.sin(360/365*(DOY-275)*deg_rad) #[W/m^2] #extraterrestial flux\n ksol = 0.174 + 0.035*np.sin(360/365*(DOY-100)*deg_rad) #optical depth\n msol = np.sqrt((708*np.sin(alt))**2 + 1417) - 708*np.sin(alt) #air mass ratio\n\n Ib = Asol*np.e**(-ksol*msol) #[W/m^2] #beam radiation\n solweight = Ib/1000\n \n return solweight\n\n#currently tilt is not included, assumed that panel always faces sun directly as to avoid underestimating\n#seems implausible to know the tilt of every array in the network\n \n# In[11b]:\n\n\n# def cons_actuators(feeder,acttoggle):\n# # Imports constraints on the actuators from the appropriate excel file\n# # acttoggle can turn actuators off altogether\n# # This version creates the constraints as a full QCQP\n# conslist = list()\n# for key, inode in feeder.busdict.items():\n# for iact in inode.actuators:\n# for ts in range(0,feeder.timesteps):\n# if acttoggle == True:\n# conslist.append(cp.abs(iact.Pgen[:,ts:ts+1]) <= iact.Psched[:,ts:ts+1]/inode.kVAbase)\n# for idx in range(0,3):\n# conslist.append(cp.square(iact.Pgen[idx,ts])+cp.square(iact.Qgen[idx,ts]) \\\n# <= cp.square(iact.Ssched[idx,ts]/inode.kVAbase))\n# else:\n# for idx in range(0,3):\n# conslist.append(iact.Pgen[idx,ts] == 0)\n# conslist.append(iact.Qgen[idx,ts] == 0)\n# return conslist \n\ndef cons_actuators(feeder,acttoggle):\n # Imports constraints on the actuators from the appropriate excel file\n # acttoggle can turn actuators off altogether\n # This version creates box constraints and can be used if the circular constraints defined above cause problems\n \n\n\n conslist = list()\n \n for key, inode in feeder.busdict.items():\n \n # Creates a feedback for saturated actuators.\n # This is a very basic implementation, should be improved in later versions\n \n #[HIL] - ICDI\n Psatmul = [1,1,1]\n Qsatmul = [1,1,1]\n for phidx, ph in enumerate (['a','b','c']):\n if any(key in x for x in feeder.Psat_nodes) & any(ph in x for x in feeder.Psat_nodes) == True:\n Psatmul[phidx] = 0.8\n if any(key in x for x in feeder.Qsat_nodes) & any(ph in x for x in feeder.Qsat_nodes) == True:\n Qsatmul[phidx] = 0.8\n for iact in inode.actuators:\n #scale PV actuation down based on TOD irradiance (really only uses beam) [HIL]\n if bool(feeder.PVforecast) == False:\n solweight = 0\n PVfrac = 0 \n for pvkey in feeder.PVforecast:\n if str(pvkey) == str(key):\n if feeder.PVforecast[str(key)]['on_off'] == True:\n solweight = solweight_realtime(feeder.PVforecast[str(key)]['lat'],feeder.PVforecast[str(key)]['lon'],\n feeder.PVforecast[str(key)]['maridian'])\n PVfrac = feeder.PVforecast[str(key)]['PVfrac']\n else:\n solweight = 0\n PVfrac = 0\n else:\n solweight = 0\n PVfrac = 0\n \n for ts in range(0,feeder.timesteps):\n if acttoggle == True:\n for idx in range(0,3):\n if iact.type == 'act':\n #adjust max Pgen to be scaled by insolation [HIL]\n Pmax = iact.Psched[idx,ts:ts+1]*(1-PVfrac*(1-solweight))\n conslist.append(cp.abs(iact.Pgen[idx,ts:ts+1]) <= (Pmax*Psatmul[idx])/inode.kVAbase) #[HIL] - ICDI\n #conslist.append(cp.abs(iact.Pgen[idx,ts:ts+1]) <= (iact.Psched[idx,ts:ts+1]*Psatmul)/inode.kVAbase) #[HIL] - ICDI\n #[HIL] - edit Ssched - Qgen cons\n conslist.append(cp.abs(iact.Qgen[idx,ts:ts+1]) <= ((iact.Ssched[idx,ts:ts+1]-cp.abs(iact.Pgen[idx,ts:ts+1])*inode.kVAbase)*Qsatmul[idx])/inode.kVAbase) #new\n #conslist.append(cp.abs(iact.Qgen[idx,ts:ts+1]) <= ((iact.Ssched[idx,ts:ts+1]-iact.Psched[idx,ts:ts+1])*Qsatmul)/inode.kVAbase) #old\n if iact.type == 'gen':\n conslist.append(cp.abs(iact.Pgen[idx,ts:ts+1]) <= (iact.Psched[idx,ts:ts+1])/inode.kVAbase)\n conslist.append(cp.abs(iact.Qgen[idx,ts:ts+1]) <= (iact.Qsched[idx,ts:ts+1])/inode.kVAbase)\n conslist.append(iact.Pgen[idx,ts:ts+1] >= 0)\n\n \n else:\n for idx in range(0,3):\n conslist.append(iact.Pgen[idx,ts] == 0)\n conslist.append(iact.Qgen[idx,ts] == 0)\n \n return conslist\n\n\n# In[11c]:\n \ndef cons_sync_caps(feeder):\n conslist = list()\n \n for key, icap in feeder.shuntdict.items():\n if icap.id[0:8] == 'sync_cap':\n for ts in range(0,feeder.timesteps):\n for idx in range(0,3):\n Qmax = np.abs(icap.Qvec[idx])\n conslist.append(cp.abs(icap.Qgen[idx,ts:ts+1]) <= Qmax/icap.node.kVAbase)\n conslist.append(icap.Qgen[idx,ts:ts+1] >= 0)\n \n return conslist\n\n# In[12]:\n\n\ndef cons_Mageq(feeder):\n# Enforces the relationships between voltage magnitudes and power flows specified by the LUPFM\n conslist = list()\n \n # Lines\n for key, iline in feeder.linedict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vsqsend = iline.from_node.Vmagsq_linopt[:,ts:ts+1]\n Vsqrec = iline.to_node.Vmagsq_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n # JP added Hvec here\n Hvec = iline.Hvec\n \n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n # conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q)\n conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q + Hvec[idx,0])\n \n # Switches\n for key, iline in feeder.switchdict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vsqsend = iline.from_node.Vmagsq_linopt[:,ts:ts+1]\n Vsqrec = iline.to_node.Vmagsq_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n # JP added Hvec here\n Hvec = iline.Hvec\n \n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q)\n # conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q + Hvec[idx,0])\n \n # Transformers\n for key, iline in feeder.transdict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vsqsend = iline.w0_node.Vmagsq_linopt[:,ts:ts+1]\n Vsqrec = iline.w1_node.Vmagsq_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n # JP added Hvec here\n Hvec = iline.Hvec\n \n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q)\n # conslist.append(Vsqsend[idx,0] == Vsqrec[idx,0] + 2*M[idx,:]*P - 2*N[idx,:]*Q + Hvec[idx,0])\n return conslist\n\n\n# In[13]:\n\n\ndef cons_Angeq(feeder):\n# Enforces the relationships between voltage magnitudes and power flows specified by the LUPFM\n conslist = list()\n \n # Lines\n for key, iline in feeder.linedict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vangsend = iline.from_node.Vang_linopt[:,ts:ts+1]\n Vangrec = iline.to_node.Vang_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n Vvec = np.multiply(iline.from_node.Vmag_NLpu,iline.to_node.Vmag_NLpu)\n # - JP added Dang and taylor approx to constraint\n Dang = (iline.from_node.Vang_NL - iline.to_node.Vang_NL)*np.pi/180\n\n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n conslist.append(Vvec[idx,0]*(np.sin(Dang[idx,0]) + np.cos(Dang[idx,0])*(Vangsend[idx,0]-Vangrec[idx,0]-Dang[idx,0])) == -N[idx,:]*P - M[idx,:]*Q)\n # conslist.append(Vvec[idx,0]*(Vangsend[idx,0]-Vangrec[idx,0]) == -N[idx,:]*P - M[idx,:]*Q)\n\n # conslist.append(Vvec[idx,0]*(Dang[idx,0] - (Vangsend[idx,0]-Vangrec[idx,0])*Dang[idx,0]*Dang[idx,0]/3/2) == -N[idx,:]*P - M[idx,:]*Q)\n # Switches\n for key, iline in feeder.switchdict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vangsend = iline.from_node.Vang_linopt[:,ts:ts+1]\n Vangrec = iline.to_node.Vang_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n Vvec = np.multiply(iline.from_node.Vmag_NLpu,iline.to_node.Vmag_NLpu)\n # - JP added Dang and taylor approx to constraint\n Dang = (iline.from_node.Vang_NL - iline.to_node.Vang_NL)*np.pi/180\n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n conslist.append(Vvec[idx,0]*(np.sin(Dang[idx,0]) + np.cos(Dang[idx,0]) * (Vangsend[idx,0]-Vangrec[idx,0]-Dang[idx,0])) == -N[idx,:]*P - M[idx,:]*Q) \n # conslist.append(Vvec[idx,0]*(Vangsend[idx,0]-Vangrec[idx,0]) == -N[idx,:]*P - M[idx,:]*Q)\n\n # conslist.append(Vvec[idx,0]*(Dang[idx,0] - (Vangsend[idx,0]-Vangrec[idx,0])*Dang[idx,0]*Dang[idx,0]/3/2) == -N[idx,:]*P - M[idx,:]*Q)\n # Transformers\n for key, iline in feeder.transdict.items():\n for ts in range(0,feeder.timesteps):\n M = cvx_buildM(iline,ts)\n N = cvx_buildN(iline,ts)\n Vangsend = iline.w0_node.Vang_linopt[:,ts:ts+1]\n Vangrec = iline.w1_node.Vang_linopt[:,ts:ts+1]\n P = iline.P_linopt[:,ts:ts+1]\n Q = iline.Q_linopt[:,ts:ts+1]\n Vvec = np.multiply(iline.w0_node.Vmag_NLpu,iline.w1_node.Vmag_NLpu)\n # - JP added Dang and taylor approx to constraint\n Dang = (iline.w0_node.Vang_NL - iline.w1_node.Vang_NL)*np.pi/180\n\n for idx in range(0,3):\n if iline.phasevec[idx,0] !=0:\n conslist.append(Vvec[idx,0]*(np.sin(Dang[idx,0]) + np.cos(Dang[idx,0]) * (Vangsend[idx,0]-Vangrec[idx,0]-Dang[idx,0])) == -N[idx,:]*P - M[idx,:]*Q) \n # conslist.append(Vvec[idx,0]*(Vangsend[idx,0]-Vangrec[idx,0]) == -N[idx,:]*P - M[idx,:]*Q)\n\n # conslist.append(Vvec[idx,0]*(Dang[idx,0] - (Vangsend[idx,0]-Vangrec[idx,0])*Dang[idx,0]*Dang[idx,0]/3/2) == -N[idx,:]*P - M[idx,:]*Q)\n # conslist.append(Vvec[idx,0]*np.sin(Dang[idx,0]) == -N[idx,:]*P - M[idx,:]*Q) # Doesn't solve, no Vaang_linopt used\n return conslist\n\n\n# In[14]:\n\ndef cons_linecapacity(feeder):\n# Enforces thermal limits of lines - not sure if this should be done for just thermal or rated power of line including stability margin\n \n conslist = []\n for key, iline in feeder.linedict.items():\n # tturn i into P/Q for purpose of constraints?\n \n # using ampacity\n # conslist.append(iline.ampacity/feeder.subkVAbase >= cp.abs(iline.P_linopt[:,:]))\n \n # using line S rating\n if iline.MVArating_3ph != complex(0):\n kVAratingvec = np.ones((3,feeder.timesteps))*iline.MVArating_3ph/3 * 1000 * 1 #devide by 3 to account for per phase capacity\n conslist.append(kVAratingvec/iline.kVAbase >= cp.abs(iline.P_linopt[:,:]) + cp.abs(iline.Q_linopt[:,:]))\n else:\n pass\n return(conslist)\n\n\n\n\ndef cvx_set_constraints(feeder, acttoggle):\n# Sets all constraints\n# Acttoggle toggles actuators on or off. It also removes the inequality bounds on in the case where actuators are disabled.\n \n conslist = cons_slack(feeder)\n conslist = conslist + cons_missingnode(feeder) \n conslist = conslist + cons_missingline(feeder)\n conslist = conslist + cons_realpwrbalance(feeder) \n conslist = conslist + cons_reactivepwrbalance(feeder)\n # conslist = conslist + cons_realpwrbalance_kyle(feeder, slackdict) \n # conslist = conslist + cons_reactivepwrbalance_kyle(feeder, slackdict) \n \n if acttoggle == True:\n conslist = conslist + cons_Vineq(feeder)\n \n conslist = conslist + cons_actuators(feeder, acttoggle)\n conslist = conslist + cons_Mageq(feeder)\n conslist = conslist + cons_Angeq(feeder)\n conslist = conslist + cons_linecapacity(feeder)\n conslist = conslist + cons_sync_caps(feeder)\n return conslist","sub_path":"SPBC/spbc_iter/constraints_3.py","file_name":"constraints_3.py","file_ext":"py","file_size_in_byte":25039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"236097714","text":"from app.models import *\nfrom app.models import db\nfrom app.friends import get_non_friends, are_connected\n\n\ndef discover_friends(user_id_1):\n\tnon_friends = get_non_friends(user_id_1)\n\tinterests_1 = db.session.query(Interest).join(User_Interest).filter(User_Interest.user_id == user_id_1, Interest.id == User_Interest.interest_id).all()\n\n\tusers_interests = []\n\n\tfor user_2 in non_friends:\n\t\tadded = False\n\t\tshared_interests = []\n\t\tinterests_2 = db.session.query(Interest).join(User_Interest).filter(User_Interest.user_id == user_2.id, Interest.id == User_Interest.interest_id).all()\n\t\tif len(interests_1) > 0:\n\t\t\tfor interest_1 in interests_1:\n\t\t\t\tfor interest_2 in interests_2:\n\t\t\t\t\tif interest_1.name == interest_2.name:\n\t\t\t\t\t\tif added is False:\n\t\t\t\t\t\t\tadded = True\n\t\t\t\t\t\tshared_interests.append(interest_1)\n\t\t\tif added is True:\n\t\t\t\tusers_interests.append((user_2, shared_interests))\n\t\telse:\n\t\t\tif len(interests_2) > 0:\n\t\t\t\tusers_interests.append((user_2, interests_2))\n\n\treturn users_interests\n\n\ndef search_interests(interest, user_id):\n\tinterest_1 = db.session.query(Interest).filter(Interest.name == interest).first()\n\tnon_friends = []\n\tif interest_1 is not None:\n\t\tusers = db.session.query(User).join(User_Interest, User_Interest.user_id == User.id).filter(interest_1.id == User_Interest.interest_id).all()\n\t\tfor user in users:\n\t\t\tif user_id != user.id and not are_connected(user_id, user.id):\n\t\t\t\tnon_friends.append(user)\n\treturn non_friends\n\n\ndef get_interests(user_id):\n\tinterests = db.session.query(Interest).join(User_Interest).filter(User_Interest.user_id == user_id, Interest.id == User_Interest.interest_id).all()\n\treturn interests\n","sub_path":"app/discover.py","file_name":"discover.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144778034","text":"# open.gl/framebuffers\nfrom glab import *\n\nboot_glut(640, 480, __file__)\n\nclass lines:\n tex = Texture2D(640, 480, type=GL_UNSIGNED_BYTE)\n rb = Renderbuffer()\n fb = Framebuffer()\n fb.attach_tex(tex)\n fb.attach_rb(rb)\n vb = Vertexbuffer(random.randn(200, 2)/2.0)\n p = Program(\"\"\" attribute vec2 v; void main() { gl_Position=vec4(v.x, v.y, 0., 1.);}\"\"\",\n \"\"\"void main() {gl_FragColor = vec4(0.0, 0.5, 0.5, 1.0);}\"\"\", ['v'], [])\n @classmethod\n def draw(l):\n with l.fb, l.p, l.vb as v:\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glVertexAttribPointer(l.p.loc['v'], 2, GL_FLOAT, False, 0, v)\n glDrawArrays(GL_LINE_STRIP, 0, 200)\n\nclass proj:\n p = Program(\"\"\"\n attribute vec2 v;\n varying vec2 texcoord;\n void main() { \n gl_Position = vec4(v.x, v.y, 0., 1.0); \n texcoord = v*0.6 + 0.5;\n }\"\"\", \"\"\"\n varying vec2 texcoord;\n uniform sampler2D tex;\n void main() { \n const float bsh = 1./900., bsv = 1./900.0;\n vec4 sum = vec4(0.0);\n for (int x=-4; x<=4; x++)\n for (int y=-4; y<=4; y++)\n sum += texture2D(tex, vec2(texcoord.x+x*bsh, texcoord.y+y*bsv))/81.0;\n gl_FragColor = sum;\n } \"\"\", ['v'], [])\n tex = lines.tex\n vb = Vertexbuffer(array([ 1, -1, 1, 1,-1,-1, -1, 1]))\n @classmethod\n def draw(p):\n with p.tex, p.p, p.vb:\n glVertexAttribPointer(p.p.loc['v'], 2, GL_FLOAT, False, 0, p.vb.vbo)\n glDrawArrays(GL_TRIANGLE_STRIP, 0, 6)\n\n\ndef draw():\n lines.draw()\n proj.draw()\n glutSwapBuffers()\n\nloop_glut(draw, draw)\n","sub_path":"attic/to_clean_up/fbuf.py","file_name":"fbuf.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"408120695","text":"def pivotInex(nums):\n com = {}\n c = nums[0]\n com[c] = 0\n for i in range(1,len(nums)):\n c = nums[i] + c\n com[c] = i\n d = nums[len(nums)-1]\n for i in range(len(nums)-1,0,-1):\n if d in com.keys():\n return com[d]+1\n else:\n d = nums[i] + d\n \n\n\n \n\nprint(pivotInex([1, 7, 3, 6, 5, 6]))\n \n","sub_path":"leetcode/findPivotIndex.py","file_name":"findPivotIndex.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417693524","text":"import pytest\nimport virtool.jobs.fastqc\n\n\n@pytest.mark.parametrize(\"paired\", [True, False])\ndef test_run_fastqc(paired, mocker):\n read_paths = [\n \"/reads/reads_1.fq.gz\"\n ]\n\n if paired:\n read_paths.append(\"/reads/reads_2.fq.gz\")\n\n m_run_subprocess = mocker.stub()\n\n virtool.jobs.fastqc.run_fastqc(\n m_run_subprocess,\n 4,\n read_paths,\n \"/foo/bar/fastqc\"\n )\n\n expected = [\n \"fastqc\",\n \"-f\", \"fastq\",\n \"-o\", \"/foo/bar/fastqc\",\n \"-t\", \"4\",\n \"--extract\",\n \"/reads/reads_1.fq.gz\"\n ]\n\n if paired:\n expected.append(\"/reads/reads_2.fq.gz\")\n\n m_run_subprocess.assert_called_with(expected)\n\n\n@pytest.mark.parametrize(\"split_line,result\", [\n ([\"120-125\", \"NaN\", \"4.0\", \"8\", \"NaN\"], [4, 4, 4, 4]),\n ([\"120-125\", \"NaN\", \"NaN\", \"NaN\", \"NaN\"], [0, 0, 0, 0])\n])\ndef test_handle_base_quality_nan(split_line, result):\n assert virtool.jobs.fastqc.handle_base_quality_nan(split_line) == result\n","sub_path":"tests/jobs/test_fastqc.py","file_name":"test_fastqc.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"653369091","text":"from flask import request\nfrom flask_login import logout_user, current_user\n\nfrom const.path import BUILD_PATH\nfrom const.datasources import DS_PATH\nfrom env import QuerybookSettings\nfrom lib.utils.plugin import import_plugin\n\n\nauth = None\nlogin_config = None\n\n\ndef init_app(flask_app):\n load_auth()\n\n global auth\n auth.init_app(flask_app)\n\n @flask_app.before_request\n def check_auth():\n ignore_paths = [\"/ping/\"] + getattr(auth, \"ignore_paths\", [])\n\n if request.path in ignore_paths:\n return\n # API LOGIC and Static File are handled differently\n if request.path.startswith(DS_PATH) or request.path.startswith(BUILD_PATH):\n return\n if not current_user.is_authenticated:\n return auth.login(request)\n\n check_auth # PYLINT :(\n\n\ndef load_auth():\n global auth\n auth = import_plugin(QuerybookSettings.AUTH_BACKEND)\n get_login_config()\n\n\ndef logout():\n logout_user()\n\n\ndef get_login_config():\n from app.datasource import register\n\n global auth\n global login_config\n if login_config is None:\n has_login = hasattr(auth, \"login_user_endpoint\")\n has_signup = hasattr(auth, \"signup_user_endpoint\")\n if has_login:\n register(\"/login/\", methods=[\"POST\"], require_auth=False)(\n auth.login_user_endpoint\n )\n\n if has_signup:\n register(\"/signup/\", methods=[\"POST\"], require_auth=False)(\n auth.signup_user_endpoint\n )\n login_config = {\"has_login\": has_login, \"has_signup\": has_signup}\n return login_config\n","sub_path":"querybook/server/app/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358846272","text":"# 1. set random.seed and cudnn performance\nrandom.seed(config.seed)\nnp.random.seed(config.seed)\ntorch.manual_seed(config.seed)\ntorch.cuda.manual_seed_all(config.seed)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpus\ntorch.backends.cudnn.benchmark = True\n\n# 2. evaluate func\n\n\ndef evaluate(val_loader, model, criterion):\n # 2.1 define meters\n losses = AverageMeter()\n top1 = AverageMeter()\n top2 = AverageMeter()\n # 2.2 switch to evaluate mode and confirm model has been transfered to cuda\n model.cuda()\n model.eval()\n with torch.no_grad():\n for i, (input, target) in enumerate(val_loader):\n input = Variable(input).cuda()\n target = Variable(torch.from_numpy(np.array(target)).long()).cuda()\n # 2.2.1 compute output\n output = model(input)\n loss = criterion(output, target)\n # 2.2.2 measure accuracy and record loss\n precision1, precision2 = accuracy(output, target, topk=(1, 2))\n losses.update(loss.item(), input.size(0))\n top1.update(precision1[0], input.size(0))\n top2.update(precision2[0], input.size(0))\n return [losses.avg, top1.avg, top2.avg]\n\n\n# 创建模型\nmodel = StyleNet(num_classes=18)\n# 检查CUDA可用性\nif torch.cuda.is_available():\n model.cuda()\n# 定义优化器设定学习率等参数\noptimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.0001)\n# 定义损失函数(这里是交叉熵)\nloss_fn = nn.CrossEntropyLoss()\n","sub_path":"tempDocs/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326032787","text":"import os\nfrom ftplib import FTP\nfrom pysus.utilities.readdbc import read_dbc\nfrom pysus.online_data import CACHEPATH\nfrom dbfread import DBF\nfrom io import StringIO\nimport pandas as pd\n\nagravos = {\n 'Animais Peçonhentos': 'ANIM',\n 'Botulismo': 'BOTU',\n 'Chagas': 'CHAG',\n 'Colera': 'COLE',\n 'Coqueluche': 'COQU',\n 'Dengue': 'DENG',\n 'Difteria': 'DIFT',\n 'Esquistossomose': 'ESQU',\n 'Febre Amarela': 'FAMA',\n 'Febre Maculosa': 'FMAC',\n 'Febre Tifoide': 'FTIF',\n 'Hanseniase': 'HANS',\n 'Hantavirose': 'HANT',\n 'Hepatites Virais': 'HEPA',\n 'Intoxicação Exógena': 'IEXO',\n 'Leishmaniose Visceral': 'LEIV',\n 'Leptospirose': 'LEPT',\n 'Leishmaniose Tegumentar': 'LTAN',\n 'Malaria': 'MALA',\n 'Meningite': 'MENI',\n 'Peste': 'PEST',\n 'Poliomielite': 'PFAN',\n 'Raiva Humana': 'RAIV',\n 'Tétano Acidental': 'TETA',\n 'Tétano Neonatal': 'TETN',\n 'Tuberculose': 'TUBE',\n 'Violência Domestica': 'VIOL'\n}\n\n\ndef list_diseases():\n \"\"\"List available diseases on SINAN\"\"\"\n return list(agravos.keys())\n\ndef get_available_years(state, disease):\n ftp = FTP('ftp.datasus.gov.br')\n ftp.login()\n ftp.cwd(\"/dissemin/publicos/SINAN/DADOS/FINAIS\")\n # res = StringIO()\n res = ftp.nlst(f'{agravos[disease.title()]}{state}*.dbc')\n return res\n\ndef download(state, year, disease, cache=True):\n \"\"\"\n Downloads SINAN data directly from Datasus ftp server\n :param state: two-letter state identifier: MG == Minas Gerais\n :param year: 4 digit integer\n :disease: Diseases\n :return: pandas dataframe\n \"\"\"\n try:\n assert disease.title() in agravos\n except AssertionError:\n print(f'Disease {disease} is not available in SINAN.\\nAvailable diseases: {list_diseases()}')\n year2 = str(year)[-2:].zfill(2)\n state = state.upper()\n if year < 2007:\n raise ValueError(\"SINAN does not contain data before 2007\")\n ftp = FTP('ftp.datasus.gov.br')\n ftp.login()\n ftp.cwd(\"/dissemin/publicos/SINAN/DADOS/FINAIS\")\n dis_code = agravos[disease.title()]\n fname = f'{dis_code}{state}{year2}.DBC'\n\n cachefile = os.path.join(CACHEPATH, 'SINAN_' + fname.split('.')[0] + '_.parquet')\n if os.path.exists(cachefile):\n df = pd.read_parquet(cachefile)\n return df\n\n try:\n ftp.retrbinary('RETR {}'.format(fname), open(fname, 'wb').write)\n except:\n try:\n ftp.retrbinary('RETR {}'.format(fname.upper()), open(fname, 'wb').write)\n except Exception as e:\n raise Exception(\"{}\\nFile {} not available\".format(e, fname))\n\n df = read_dbc(fname, encoding='iso-8859-1')\n if cache:\n df.to_parquet(cachefile)\n os.unlink(fname)\n return df\n","sub_path":"pysus/online_data/SINAN.py","file_name":"SINAN.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"440788594","text":"#!/usr/bin/env python\n\nimport yaml\n\nfrom intmaniac.testset import Testset\nfrom intmaniac import tools\nfrom intmaniac import output\n\nimport sys\nfrom os.path import join\nfrom errno import *\nfrom argparse import ArgumentParser\n\nconfig = None\nlogger = None\nderived_basedir = None\nglobal_overrides = None\n\n\n##############################################################################\n# #\n# default configuration values for test config #\n# #\n##############################################################################\n\n\n##############################################################################\n# #\n# reading of config data #\n# initialization of test set objects #\n# #\n##############################################################################\n\n\ndef _get_test_sets(setupdata):\n \"\"\"Always returns a list of list of Testsets\n :param setupdata the full yaml setup data\n \"\"\"\n testsets = setupdata['testsets']\n global_config = setupdata['global']\n rv = []\n for tsname, tests in sorted(testsets.items()):\n ts = Testset(name=tsname)\n rv.append(ts)\n # remove global settings from test set\n ts.set_global_config(tools.deep_merge(global_config,\n tests.pop(\"_global\", {})))\n for test_name, test_config in sorted(tests.items()):\n # the overrides have precedence above everything\n use_test_config = tools.deep_merge(test_config, global_overrides)\n ts.add_from_config(test_name, use_test_config)\n return rv\n\n\ndef _get_setupdata():\n stub = tools.get_full_stub()\n filedata = None\n try:\n with open(config.config_file, \"r\") as ifile:\n filedata = yaml.safe_load(ifile)\n except IOError as e:\n # FileNotFoundError is python3 only. yihah.\n if e.errno == ENOENT:\n tools.fail(\"Could not find configuration file: %s\" % config.config_file)\n else:\n tools.fail(\"Unspecified IO error: %s\" % str(e))\n logger.info(\"Read configuration file %s\" % config.config_file)\n return tools.deep_merge(stub, filedata)\n\n\ndef _prepare_overrides():\n global global_overrides\n global_overrides = tools.get_test_stub()\n # add config file entry\n global_overrides['meta']['_configfile'] = config.config_file\n # add test_basedir entry\n global_overrides['meta']['test_basedir'] = derived_basedir\n # add env settings from command line\n for tmp in config.env:\n try:\n k, v = tmp.split(\"=\", 1)\n global_overrides['environment'][k] = v\n except ValueError:\n tools.fail(\"Invalid environment setting: %s\" % tmp)\n\n\ndef _get_and_init_configuration():\n setupdata = _get_setupdata()\n _prepare_overrides()\n if \"output_format\" in setupdata:\n logger.warning(\"Text output format: %s\" % setupdata['output_format'])\n output.init_output(setupdata['output_format'])\n return setupdata\n\n\n##############################################################################\n# #\n# run test sets logic #\n# #\n##############################################################################\n\n\ndef _run_test_sets(testsets):\n retval = True\n dumps = []\n for testset in testsets:\n testset.run()\n retval = testset.succeeded() and retval\n dumps.append(testset.dump)\n output.output.block_open(\"Test protocol\")\n for dump_function in dumps:\n dump_function()\n output.output.block_done()\n return retval\n\n\n##############################################################################\n# #\n# startup initialization #\n# #\n##############################################################################\n\n\ndef _prepare_environment(arguments):\n global config, logger, derived_basedir\n parser = ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\",\n help=\"specify configuration file\",\n default=\"./intmaniac.yaml\")\n parser.add_argument(\"-e\", \"--env\",\n help=\"dynamically add a value to the environment\",\n default=[],\n action=\"append\")\n parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase verbosity level, use multiple times\",\n default=0,\n action=\"count\")\n parser.add_argument(\"-t\", \"--temp-output-dir\",\n help=\"test dir location, default: $pwd/intmaniac\")\n config = parser.parse_args(arguments)\n tools.init_logging(config)\n derived_basedir = tools.setup_up_test_directory(config)\n logger = tools.get_logger(__name__,\n filename=join(derived_basedir, \"root.log\"))\n\n\ndef _internal_entrypoint(args):\n _prepare_environment(args)\n configuration = _get_and_init_configuration()\n result = _run_test_sets(_get_test_sets(configuration))\n if not result:\n sys.exit(1)\n\n\ndef console_entrypoint():\n _internal_entrypoint(sys.argv[1:])\n","sub_path":"intmaniac/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211254596","text":"iterations = 128\n\ndef sweep_color(val):\n r,g,b = (0,0,0)\n if val == iterations: return (r,g,b)\n wavelength = (iterations-val)*(750-380)/iterations+380\n attenuation = 0\n\n if wavelength >= 380 and wavelength <=440:\n attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)\n r = 0xff * ((-(wavelength-440)/(440-380)) * attenuation)\n b = 0xff * attenuation\n\n elif wavelength >= 440 and wavelength <= 490:\n g = 0xff * ((wavelength - 440)/(490 - 440))\n b = 0xff\n\n elif wavelength >= 490 and wavelength <= 510:\n g = 0xff\n b = 0xff * (-(wavelength - 510) / (510 - 490))\n\n elif wavelength >= 510 and wavelength <= 580:\n r = 0xff * ((wavelength - 510) / (580 - 510))\n g = 0xff\n\n elif wavelength >= 580 and wavelength <= 645:\n r = 0xff\n g = 0xff * (-(wavelength - 645) / (645 - 580))\n\n elif wavelength >= 645 and wavelength <= 750:\n attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)\n r = 0xff * attenuation\n\n return (int(r),int(g),int(b))\n\nfor i in range(0,127):\n color = sweep_color(i)\n color_str = '{:02x}{:02x}{:02x}'.format(color[0],color[1],color[2])\n print('x\\\"{}\\\" when iteration_s = {:04x} else'.format(color_str,i))\n","sub_path":"lut_vhdl.py","file_name":"lut_vhdl.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"283014417","text":"from django.http import HttpResponse\nfrom django.shortcuts import render,redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom . models import A_class,B_class,G_class\n# Create your views here.\n\ndef add_c(req):\n list = A_class.objects.all()\n list1 = []\n for var in list:\n dic = {}\n dic['name'] = str(var.name)\n dic['id'] = str(var.id)\n list1.append(dic)\n\n\n\n return render(req,'subadd.html',{'list':list1})\n@csrf_exempt\ndef add_x(req):\n name = req.POST['name']\n desc = req.POST['desc']\n ac_id = req.POST.get('list')\n print('***********')\n print(ac_id)\n print('***********')\n b = B_class()\n b.name = name\n b.desc = desc\n b.ac_id = ac_id\n b.save()\n return HttpResponse('添加成功')","sub_path":"django/mysites/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"622614085","text":"# как написать бота\r\n\r\n'''\r\nfdgdf\r\nfhdhdg\r\nhjghj\r\n'''\r\n\r\nimport telebot\r\nfrom telebot import TeleBot\r\n\r\nimport constants\r\nimport descriptions\r\nimport prices\r\n\r\nbot: TeleBot = telebot.TeleBot(constants.token)\r\n\r\n# bot.send_message(5527665, \"Тест\")\r\n\r\n\r\nprint(bot.get_me())\r\n\r\n\r\ndef log(message, answer):\r\n print('\\n ------')\r\n from datetime import datetime\r\n print(datetime.now())\r\n print('Сообщение от {0} {1} (id = {2}) \\n {3}'.format(message.from_user.first_name,\r\n message.from_user.last_name,\r\n str(message.from_user.id),\r\n message.text))\r\n print('Ответ бота \\n', answer)\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef handle_text(message):\r\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\r\n user_markup.row('Дженерики Виагры', 'Дженерики Левитры', 'Дженерики Сиалиса')\r\n bot.send_message(message.from_user.id, 'Добро пожаловать!', reply_markup=user_markup)\r\n\r\n\r\n@bot.message_handler(commands=['help'])\r\ndef handle_text(message):\r\n bot.send_message(message.chat.id, 'Мои возможности весьма специфичны!')\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef handle_text(message):\r\n answer = 'Я не понимаю 😕'\r\n if message.text == 'Дженерики Виагры':\r\n answer = descriptions.sildenafil\r\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\r\n user_markup.row('Силденафил 100 мг')\r\n bot.send_message(message.from_user.id, answer, reply_markup=user_markup)\r\n log(message, answer)\r\n elif message.text == 'Силденафил 100 мг':\r\n answer = 'Цены: \\n 1 шт. - {0} \\n 5 шт. - {1} \\n 10 шт. - {2} \\n 20 шт. - {3}'.format(prices.sildenafil100_1,\r\n prices.sildenafil100_5,\r\n prices.sildenafil100_10,\r\n prices.sildenafil100_20)\r\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\r\n user_markup.row('999')\r\n bot.send_message(message.from_user.id, answer, reply_markup=user_markup)\r\n log(message, answer)\r\n else:\r\n bot.send_message(message.from_user.id, answer)\r\n log(message, answer)\r\n\r\n\r\nbot.polling(none_stop=True, interval=0)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"460260803","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__description__ = \" check if PMID or DOI are already listed in Wikidata\"\n__author__ = \"Eva Seidlmayer \"\n__copyright__ = \"2020 by Eva Seidlmayer\"\n__license__ = \"ISC license\"\n__email__ = \"seidlmayer@zbmed.de\"\n__version__ = \"1 \"\n\n\nimport argparse\nimport pandas as pd\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nimport csv\nimport time\nfrom re import search\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__description__)\n parser.add_argument(\"input_file_name\")\n parser.add_argument(\"output_file_name\")\n args = parser.parse_args()\n\n# with open(args.output_file_name, 'w') as csvfile:\n # csv_writer = csv.writer(csvfile)\n # csv_writer.writerow(['orcid','pmid','pmc','doi', 'wosuid', 'eid','dnb', 'article-qnr'])\n\n df = pd.read_csv(args.input_file_name)\n print(len(df))\n\n df_clean = df.drop_duplicates()\n print(len(df_clean))\n print(df_clean)\n df_clean.to_csv(args.output_file_name, index=False)\n\nmain()\n","sub_path":"analysis/dedublicate.py","file_name":"dedublicate.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"68247260","text":"from binary_search_tree import *\n\ndef check(node, visited):\n if (node == None):\n return None\n \n check(node.left, visited) ## adds each node in order in the visited list\n if (len(visited) > 0 and\n node.item < visited[-1]):\n return False\n\n visited.append(node.item)\n \n right = check(node.right, visited)\n if (right == False):\n return False\n \n return node.item\n\n\ndef isBST(node):\n return check(node, []) != False\n\nnew = Node(10, None, None)\nnew.left = Node(5, None, None)\nnew.left.right = Node(25, None, None)\n\nprint(isBST(new))","sub_path":"Trees/isBST.py","file_name":"isBST.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"236768431","text":"#############################################################################################\n#\n# A basic script to simulate Markov chain sample paths from a mixture of transition matrices.\n# Transition matrices and initialisation probabilities sampled as Dirichlet random variables.\n#\n# matthew@refute.me.uk\n#\n#############################################################################################\n\n\nimport scipy as sp\nimport scipy.stats as stats\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport bisect\n\ndef sample(p):\n \"\"\"p a (1 x k) array\"\"\"\n P = np.cumsum(p)\n r = stats.uniform.rvs()\n return bisect.bisect_right(P,r)\n\ndef sample_path(I,P,n):\n \"\"\"I a (1 x k) array, P a (k x k) array, n an integer\"\"\"\n X = [sample(I)]\n for i in xrange(n-1):\n X.append(sample(P[X[i-1],:]))\n return np.array(X)\n\ndef rdirichlet(a):\n \"\"\"a a (1 x k) vector of weights, returns a draw from the Dirichlet(a) distribution\"\"\"\n G = stats.gamma.rvs(a) \n return G/sum(G) \n\ndef make_IP(k):\n \"\"\" Return a transition matrix and initial prob vector\"\"\"\n a = np.array([1 for i in xrange(k)])\n return rdirichlet(a), np.array([rdirichlet(a) for i in range(k)])\n\ndef gen_data(d,p,l,n):\n \"\"\" d being the cardinality of the state space, \n p a (1 x c) vector of mixing probabilities,\n l the length of each obs, n the total number of obs.\"\"\"\n k = len(p)\n C = [make_IP(d) for i in xrange(k)]\n K = []\n D = []\n for i in xrange(n):\n c = sample(p)\n I,P = C[c]\n K.append(c)\n D.append(sample_path(I,P,l)) \n return np.array(D),K \n\ndef jitter(v,s=10.):\n e = stats.norm.rvs(size=len(v))/s\n return v + e\n\ndef plot_paths(D,K,s=10.):\n cols = [\"red\",\"blue\"]\n for i in range(len(K)):\n plt.plot(jitter(D[i,:],s=s),color=cols[K[i]])\n plt.show()\n\nif __name__ == \"__main__\":\n D,K = gen_data(10,[.5,.5],5,20) \n plot_paths(D,K,s=5.)\n","sub_path":"mixmarkov.py","file_name":"mixmarkov.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"255872028","text":"import os\nimport sys\nimport contextlib\nimport phonenumbers\n\nfrom asteval import Interpreter\nfrom twilio.rest import Client\nfrom django.core.mail import send_mail\nfrom django.template import Template\nfrom django.template import Context\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email as django_validate_email\n\nfrom config import settings\n\nfrom logging import getLogger\n\nlogger = getLogger('django')\n\n\n@contextlib.contextmanager\ndef limited_recursion(recursion_limit):\n \"\"\"\n Prevent unlimited recursion.\n \"\"\"\n\n old_limit = sys.getrecursionlimit()\n sys.setrecursionlimit(recursion_limit)\n\n try:\n yield\n finally:\n sys.setrecursionlimit(old_limit)\n\n\ndef evaluate(expression, data):\n try:\n formula = Template(expression).render(Context(data))\n\n with limited_recursion(250):\n aeval = Interpreter()\n return aeval(formula)\n\n except Exception as exc:\n logger.exception(exc)\n return None\n\n\ndef send_email(subject, message, to_email, from_email=None, html_message=None):\n\n logger.info('Sending email to: ' + to_email)\n\n if not from_email:\n from_email = settings.DEFAULT_FROM_EMAIL\n\n send_mail(subject, message, from_email, (to_email,), html_message=html_message)\n\n\ndef send_sms(message, number, twilio_sid, twilio_token, twilio_from_number):\n logger.info('Sending sms to: ' + number)\n\n details = dict(\n to=str(number),\n from_=twilio_from_number,\n body=str(message),\n )\n\n if os.environ.get('DEBUG', True) not in ('True', 'true', True,):\n client = Client(twilio_sid, twilio_token)\n client.messages.create(**details)\n else:\n logger.info(details)\n\n\ndef validate_email(email):\n try:\n django_validate_email(email)\n except ValidationError:\n return False\n else:\n return True\n\n\ndef validate_mobile(phone):\n try:\n number = phonenumbers.parse(phone)\n # Return 0, 2 or 1. 1 represents MOBILE\n number_type = phonenumbers.number_type(number)\n\n if not phonenumbers.is_valid_number(number) or number_type != 1:\n raise ValueError\n except (phonenumbers.NumberParseException, ValueError):\n return False\n else:\n return True\n","sub_path":"src/notifications/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"33501963","text":"#\n# def saudacao(saudacao, nome):\n# print(f'{saudacao} {nome}')\n#\n# saudacao('ola', 'Joao')\n# saudacao('Oi', 'Maria')\n#\n# def soma(n1, n2, n3):\n# print(n1 + n2 + n3)\n#\n# soma(10,0,2)\n# soma(10,33,22)\n# soma(10,550,2)\n\n# def aumento_percentual(valor, percentual):\n# return(valor+(valor * percentual/100))\n#\n# ap= aumento_percentual(334,6)\n# print(ap)\n# ap = aumento_percentual(1320,15)\n# print(ap)\n# ap = aumento_percentual(337,8)\n# print(ap)\n\n\ndef fb(n):\n if n % 3 == 0:\n return f'fizzbuzz, {n} é divisivel por 3 e 5'\n if n % 5 == 0:\n return f'buzz, {n} é divisivel por 3 e 5'\n if n % 3 == 0:\n return f'fizz, {n} é divisivel por 3 e 5'\n\n return n\n\nfrom random import randint\n\nfor i in range(100):\n aleatorio = randint(0,100)\n print(fb(aleatorio))\n\n","sub_path":"ProgramacaoProcedural/ExecFuncoes_P1_P2.py","file_name":"ExecFuncoes_P1_P2.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"127792815","text":"# Sabaody\n# Copyright 2018 Shaik Asifullah and J Kyle Medley\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom sabaody.timecourse.timecourse_sim_validate import TimecourseSimValidate\nfrom sabaody.scripts.benchmarks.biopredyn.launcher import BioPreDynUDP\n\nfrom params import param_list, getBestKnownValues\n\nclass B2ProblemValidator(TimecourseSimValidate):\n ''' Class that performs a timecourse simulation\n and calculates the residuals for b4.'''\n\n def __init__(self, sbml, n):\n self.param_list = param_list\n super().__init__(\n sbml,\n measured_quantities = ['cpep', 'cg6p', 'cpyr', 'cf6p', 'cglcex', 'cg1p', 'cpg', 'cfdp', 'cgap'],\n param_list = param_list,\n reference_param_values = getBestKnownValues(),\n time_start = 0.,\n time_end = 300.,\n n = n,\n )\n\nclass B2Validator_UDP(BioPreDynUDP):\n def __init__(self, lb, ub, sbml_file='b2.xml', n=100):\n super().__init__(lb=lb, ub=ub, sbml_file=sbml_file)\n self.n = n\n\n\n def __getstate__(self):\n s = super().__getstate__()\n s.update({'n': self.n})\n return s\n\n\n def __setstate__(self, state):\n super().__setstate__(state)\n self.n = state['n']\n\n def fitness(self, x):\n if self.evaluator is None:\n from b2problem_validator import B2ProblemValidator\n self.evaluator = B2ProblemValidator(self.sbml_file, self.n)\n return (self.evaluator.evaluate(x),)\n","sub_path":"sabaody/scripts/benchmarks/biopredyn/b2/b2problem_validator.py","file_name":"b2problem_validator.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"402868254","text":"\nimport socket\nimport json\nimport threading\n\nHOST = 'localhost'\nPORT = 8000\nBUFSIZE = 1024\nADDR = (HOST, PORT)\nSRC = 100\nDES = 101\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(ADDR)\n\ndef recv_from():\n\twhile True:\n\t\tfromseerver = client.recv(BUFSIZE)\n\t\tif not fromseerver:\n\t\t\tcontinue\n\t\tprint('from', DES, fromseerver)\n\t\nt = threading.Thread(target=recv_from)\nt.start()\t\n\t\n\t\nwhile True:\n\tdata = input('> ')\n\tif data == 'q':\n\t\tbreak\n\tform = {\n\t\t'src': SRC,\n\t\t'des': DES,\n\t\t'data': data\n\t}\n\ts = json.dumps(form, ensure_ascii=False)\n\ts = s.encode('utf-8')\n\tclient.send(s)\n\n\n\n\t\nclient.close()\n\n\"\"\"\n\tfromseerver = client.recv(BUFSIZE)\n\tif not fromseerver:\n\t\tcontinue\n\tprint('from server', fromseerver)\n\"\"\"","sub_path":"note/python/socket/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"159993116","text":"\"\"\"\nHashtag Generator\n\nCreate a function that is a Hashtag Generator by using the following rules:\n\nThe output must start with a hashtag (#).\nEach word in the string must have its first letter capitalized.\nIf the final result, a single string, is longer than 140 characters, the function should return false.\nIf either the input (str) or the result is an empty string, the function should return false.\nExamples\ngenerate_hashtag(\" Hello World \" ) ➞ \"#HelloWorld\"\n\ngenerate_hashtag(\"\") ➞ false, \"Expected an empty string to return false\"\n\ngenerate_hashtag(\"Edabit Is Great\") ➞ \"#EdabitIsGreat\", \"Should\n\"\"\"\n\n\n\n################################################################\n\"\"\"\nSolution 1\n\"\"\"\n\n\ndef generate_hashtag(txt):\n\ttxt = txt.title().replace(\" \", \"\")\n\treturn '#' + txt if 0 < len(txt) < 140 else False\n\n\n\n################################################################\n\"\"\"\nSolution 2\n\"\"\"\n\n\ndef generate_hashtag(txt):\n\tres = '#'\n\tfor i in txt.split():\n\t\tres+=i[0].upper()+i[1:]\n\treturn res if 1= 140:\n\t\treturn False\n\treturn '#' + result\n\n\n\n################################################################\n\"\"\"\nSolution 4\n\"\"\"\n\n\nimport re\ndef generate_hashtag(txt):\n\tif txt:\n\t\tA=re.findall(r'(?i)[a-z]+', txt)\n\t\tif A:\n\t\t\tres=''.join([x.capitalize() for x in A])\n\t\t\treturn '#'+res if len(res)<140 else False\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False\n\n\n\n\n\n","sub_path":"+1500 Python Challenges/Hard/Hashtag Generator.py","file_name":"Hashtag Generator.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"518507315","text":"\n\nfrom xai.brain.wordbase.nouns._bauble import _BAUBLE\n\n#calss header\nclass _BAUBLES(_BAUBLE, ):\n\tdef __init__(self,): \n\t\t_BAUBLE.__init__(self)\n\t\tself.name = \"BAUBLES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bauble\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_baubles.py","file_name":"_baubles.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"630690790","text":"import re\nimport looter as lt\nfrom concurrent import futures\n\ndomain = 'http://www.mm131.com'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'Referer': 'http://www.mm131.com/' # 必须设定referer,否则会被重定向为qq图片\n}\n\ndef crawl(url):\n tree = lt.fetch(url)\n imgs = tree.cssselect('dl.list-left dd')[:-1]\n for img in imgs:\n link = img.cssselect('a')[0].get('href')\n bango = link.split('/')[-1][:-5]\n detail = lt.fetch(link, headers=headers)\n pagination = detail.cssselect('.content-page .page-ch')[0].text\n max_page = int(re.findall(r'\\d+', pagination)[0])\n img_urls = [f'http://img1.mm131.me/pic/{bango}/{n}.jpg' for n in range(1, max_page+1)]\n lt.async_save_imgs(img_urls, headers=headers, random_name=True)\n\n\nif __name__ == '__main__':\n tasklist = [*[f'{domain}/xinggan/'] ,*[f'{domain}/xinggan/list_6_{n}.html' for n in range(2, 153)]]\n with futures.ThreadPoolExecutor(50) as executor:\n executor.map(crawl, tasklist)\n","sub_path":"looter/examples/mm131.py","file_name":"mm131.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"380156150","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\nclass RNNHardCell(nn.Module):\n def __init__(self, n_input:int, n_hidden:int, state=None) -> None:\n super(RNNHardCell, self).__init__()\n self.n_input = n_input\n self.n_hidden = n_hidden\n self.in_h = nn.Linear(self.n_input, self.n_hidden, bias=False)\n self.h_h = nn.Linear(self.n_hidden, self.n_hidden, bias=False)\n self.state = state\n self.register_parameter()\n\n def register_parameter(self) -> None:\n stdv = 1.0 / math.sqrt(self.n_hidden)\n for weight in self.parameters():\n nn.init.uniform_(weight, -stdv, stdv)\n \n def forward(self, x, state=None):\n self.state = state\n if self.state is None:\n #self.state = torch.tanh(self.in_h(x))\n #self.state = F.hardtanh(self.in_h(x))\n self.state = F.relu(self.in_h(x))\n\n else:\n #self.state = torch.tanh(self.in_h(x) + self.h_h(self.state))\n #self.state = F.hardtanh(self.in_h(x) + self.h_h(self.state))\n self.state = F.relu(self.in_h(x) + self.h_h(self.state))\n return self.state\n\nclass RNNModel(nn.Module):\n def __init__(self, n_input, n_hidden, n_output, num_layers=1):\n super(RNNModel, self).__init__()\n self.rnn = RNNHardCell(n_input, n_hidden)\n #self.rnn = nn.RNN(n_input, n_hidden, num_layers, nonlinearity='relu')\n self.out = nn.Linear(n_hidden, n_output, bias=False)\n self.num_layers = num_layers\n \n def forward(self, xs, state=None):\n state = None\n h_seq = []\n \n for x in xs:\n x = torch.from_numpy(np.asarray(x)).float()\n x = x.unsqueeze(0)\n for _ in range(self.num_layers):\n state = self.rnn(x, state)\n h_seq.append(state)\n \n h_seq = torch.stack(h_seq)\n ys = self.out(h_seq)\n ys = torch.transpose(ys, 0, 1)\n\n return ys\n\n\nclass LSTMHardCell(nn.Module):\n def __init__(self, n_input:int, n_hidden:int, state=None, cell=None) -> None:\n super(LSTMHardCell, self).__init__()\n self.n_input = n_input\n self.n_hidden = n_hidden\n self.in_f = nn.Linear(self.n_input, self.n_hidden, bias=False)\n self.in_i = nn.Linear(self.n_input, self.n_hidden, bias=False)\n self.in_o = nn.Linear(self.n_input, self.n_hidden, bias=False)\n self.in_u = nn.Linear(self.n_input, self.n_hidden, bias=False)\n self.h_f = nn.Linear(self.n_hidden, self.n_hidden, bias=False)\n self.h_i = nn.Linear(self.n_hidden, self.n_hidden, bias=False)\n self.h_o = nn.Linear(self.n_hidden, self.n_hidden, bias=False)\n self.h_u = nn.Linear(self.n_hidden, self.n_hidden, bias=False)\n self.state = state\n self.cell = cell\n self.register_parameter()\n\n def register_parameter(self) -> None:\n stdv = 1.0 / math.sqrt(self.n_hidden)\n for weight in self.parameters():\n nn.init.uniform_(weight, -stdv, stdv)\n \n def forward(self, x, state=None, cell=None):\n self.state = state\n self.cell = cell\n if self.state is None:\n f = torch.sigmoid(self.in_f(x))\n i = torch.sigmoid(self.in_i(x))\n o = torch.sigmoid(self.in_o(x))\n u = F.tanh(self.in_u(x))\n else:\n f = torch.sigmoid(self.in_f(x) + self.h_f(self.state))\n i = torch.sigmoid(self.in_i(x) + self.h_i(self.state))\n o = torch.sigmoid(self.in_o(x) + self.h_o(self.state))\n u = F.tanh(self.in_u(x) + self.h_u(self.state))\n if self.cell is None:\n self.cell = (i * u)\n else:\n self.cell = (f * self.cell) + (i * u)\n \n self.state = o * F.tanh(self.cell)\n\n return self.state, self.cell\n\n\nclass LSTMModel(nn.Module):\n def __init__(self, n_input, n_hidden, n_output):\n super(LSTMModel, self).__init__()\n self.rnn = LSTMHardCell(n_input, n_hidden)\n #self.rnn = nn.LSTM(n_input, n_hidden)\n self.out = nn.Linear(n_hidden, n_output, bias=False)\n \n def forward(self, xs, state=None, cell=None):\n state = None\n cell = None\n h_seq = []\n \n for x in xs:\n x = torch.from_numpy(np.asarray(x)).double()\n x = x.unsqueeze(0)\n state, cell = self.rnn(x, state, cell)\n h_seq.append(state)\n \n h_seq = torch.stack(h_seq)\n ys = self.out(h_seq)\n ys = torch.transpose(ys, 0, 1)\n\n return ys\n\n\nclass LSTM(nn.Module):\n def __init__(self, n_input, n_hidden, n_output, n_layers, batch_size):\n super().__init__()\n self.n_hidden = n_hidden\n self.batch_size = batch_size\n self.rnn = nn.LSTM(input_size=n_input, hidden_size=n_hidden, num_layers=n_layers)\n self.out = nn.Linear(n_hidden, n_output, bias=False)\n self.reset_parameters()\n\n def reset_parameters(self):\n std = 1.0 / math.sqrt(self.n_hidden)\n for w in self.parameters():\n w.data.uniform_(-std, std)\n\n def forward(self, xs, s=None):\n #self.hidden_cell = (torch.zeros(1, self.batch_size, self.n_hidden), torch.zeros(1, self.batch_size, self.n_hidden))\n output, hp = self.rnn(xs, s)\n predictions = self.out(output)\n return predictions","sub_path":"stock_predict/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"150415479","text":"import umap\nimport warnings\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom train_UMAP import fit_classifier_params\nfrom utils.TransformParameterParser import TransformParameterParser\nfrom utils.DataInput import DataInput\nfrom utils.GateInitializerPrimKDE import GateInitializerPrimKDE\nfrom utils.GateInitializerClustering import GateInitializerClustering\nfrom utils.DepthOneModel import DepthOneModel\nfrom utils.DataAndGatesPlotter import DataAndGatesPlotterDepthOne\nfrom utils.DataTransformerFactory import DataTransformerFactory\nfrom train_UMAP import run_train_model\nimport torch\nimport os\nimport time\nfrom copy import deepcopy\n\ndef cross_validate_just_first_gate(path_to_params, n_splits=20):\n params = TransformParameterParser(path_to_params).parse_params()\n print(params)\n check_consistency_of_params(params)\n trackers_per_seed = []\n models_per_seed = []\n for split in range(n_splits):\n params['random_seed'] = split + 1\n tracker, model = single_run_single_gate(params)\n \n trackers_per_seed.append(tracker)\n models_per_seed.append(model)\n with open(os.path.join(params['save_dir'], 'trackers_per_seed.pkl'), 'wb') as f:\n pickle.dump(trackers_per_seed, f)\n with open(os.path.join(params['save_dir'], 'models_per_seed.pkl'), 'wb') as f:\n pickle.dump(models_per_seed, f)\n \n \n \n\ndef single_run_single_gate(params):\n start_time = time.time()\n\n\n #evauntually uncomment this leaving asis in order ot keep the same results as before to compare.\n #set_random_seeds(params)\n\n if not os.path.exists(params['save_dir']):\n os.makedirs(params['save_dir'])\n\n with open(os.path.join(params['save_dir'], 'params.pkl'), 'wb') as f:\n pickle.dump(params, f)\n\n data_input = DataInput(params['data_params'])\n data_input.split_data(split_seed=params['random_seed'])\n\n data_transformer = DataTransformerFactory(params['transform_params'], params['random_seed']).manufacture_transformer()\n\n data_input.embed_data_and_fit_transformer(\\\n data_transformer,\n cells_to_subsample=params['transform_params']['cells_to_subsample'],\n num_cells_for_transformer=params['transform_params']['num_cells_for_transformer'] \n ) \n data_input.save_transformer(params['save_dir'])\n data_input.normalize_data()\n unused_cluster_gate_inits = init_plot_and_save_gates(data_input, params)\n #everything below differs from the other main_UMAP\n data_input.convert_all_data_to_tensors()\n init_gate_tree, unused_cluster_gate_inits = get_next_gate_tree(unused_cluster_gate_inits, data_input, params, model=None)\n model = initialize_model(params['model_params'], [init_gate_tree])\n performance_tracker = run_train_model(model, params['train_params'], data_input)\n \n model_save_path = os.path.join(params['save_dir'], 'model.pkl')\n torch.save(model.state_dict(), model_save_path)\n \n trackers_save_path = os.path.join(params['save_dir'], 'last_CV_rounds_tracker.pkl')\n with open(trackers_save_path, 'wb') as f:\n pickle.dump(performance_tracker, f)\n results_plotter = DataAndGatesPlotterDepthOne(model, np.concatenate(data_input.x_tr))\n #fig, axes = plt.subplots(params['gate_init_params']['n_clusters'], figsize=(1 * params['gate_init_params']['n_clusters'], 3 * params['gate_init_params']['n_clusters']))\n results_plotter.plot_data_with_gates(np.array(np.concatenate([data_input.y_tr[i] * torch.ones([data_input.x_tr[i].shape[0], 1]) for i in range(len(data_input.x_tr))])))\n\n plt.savefig(os.path.join(params['save_dir'], 'final_gates.png'))\n\n with open(os.path.join(params['save_dir'], 'configs.pkl'), 'wb') as f:\n pickle.dump(params, f)\n\n print('Complete main loop took %.4f seconds' %(time.time() - start_time))\n return performance_tracker, model\n\n\ndef set_random_seeds(params):\n torch.manual_seed(params['random_seed'])\n np.random.seed(params['random_seed'])\n\ndef check_consistency_of_params(params):\n if params['train_params']['descent_type'] == 'joint_descent':\n if not params['train_params']['learning_rate_gates'] == params['train_params']['learning_rate_classifier']:\n raise ValueError('For joint descent learning rate gates and learning rate classifier must be equal')\n if params['train_params']['conv_thresh']:\n if params['train_params']['n_epoch']:\n warnings.warn('n_epoch parameter is not used when a conv_thresh is set. Training will continue until the change in loss is less than conv_thresh regardless of the number of epochs.')\n\ndef initialize_model(model_params, init_gate_tree):\n model = DepthOneModel(init_gate_tree, model_params)\n return model\n\ndef init_plot_and_save_gates(data_input, params):\n gate_initializer = GateInitializerClustering(data_input.x_tr, params['gate_init_cluster_params'])\n gate_initializer.initialize_gates() \n gate_initializer.construct_init_gate_tree()\n gate_initializer.plot_init_gate_tree_with_data()\n plt.savefig(os.path.join(params['save_dir'], 'init_gates.png'))\n plt.clf()\n return gate_initializer.init_gate_tree\n\ndef get_next_gate_tree(unused_gate_trees, data_input, params, model=None):\n if model:\n losses = []\n for gate_tree in unused_gate_trees:\n dummy_model_state = deepcopy(model.state_dict())\n dummy_model = DepthOneModel(model.get_gate_tree(), params['model_params'])\n dummy_model.load_state_dict(dummy_model_state)\n\n dummy_model.add_node(gate_tree)\n performance_tracker = run_train_model(dummy_model, params['train_params'], data_input)\n losses.append(dummy_model(data_input.x_tr, data_input.y_tr)['log_loss'].cpu().detach().numpy())\n best_gate_idx = np.argmin(np.array(losses))\n else:\n losses = []\n for gate_tree in unused_gate_trees:\n model = DepthOneModel([gate_tree], params['model_params'])\n performance_tracker = run_train_model(model, params['train_params'], data_input)\n losses.append(model(data_input.x_tr, data_input.y_tr)['log_loss'].cpu().detach().numpy())\n best_gate_idx = np.argmin(np.array(losses))\n best_gate = unused_gate_trees[best_gate_idx]\n del unused_gate_trees[best_gate_idx]\n return best_gate, unused_gate_trees\n \n\n\nif __name__ == '__main__':\n # to run:\n # 1. Double check yaml file name is correct\n # 2. Double check the number of splits is as desired\n path_to_params = '../configs/umap_CV_cur_model.yaml'\n cross_validate_just_first_gate(path_to_params, 30)\n\n","sub_path":"src/old_mains/CV_repeated_init_kmeans.py","file_name":"CV_repeated_init_kmeans.py","file_ext":"py","file_size_in_byte":6599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"301178695","text":" \n# SetValue\n# 2021 Yong-Jun Shin\n\n# Ctrl-Shift-P --> Terminal: Create New Integrated Terminal\n# .venv\\scripts\\activate --> activate the virtual environment in the current folder\n# gremlinpython==3.5.0 --> requirments.txt \n# gremelinpython supports python 3.4 or higher\n# python -m pip install -r requirements.txt\n\nimport logging\nfrom gremlin_python.driver import client, serializer\nimport azure.functions as func\nimport json \n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request.')\n req_body = req.get_json()\n input_id = req_body.get('id') # vertex id\n # key = req_body.get('key') # vertex property key\n value = req_body.get('value') # vertex perperty value\n\n dbclient = client.Client('wss://peridymegraph.gremlin.cosmos.azure.com:443/','g', \n message_serializer=serializer.GraphSONSerializersV2d0(),\n username=\"/dbs/db/colls/Graph1\", \n password=\"\")\n\n if type(value) == str:\n query = f\"g.V('{input_id}').property('value', '{value}')\"\n elif type(value) == bool:\n if value == True:\n value = 'true'\n elif value == False:\n value = 'false'\n query = f\"g.V('{input_id}').property('value', {value})\"\n else: query = f\"g.V('{input_id}').property('value', {value})\"\n \n callback = dbclient.submitAsync(query)\n callback_result = callback.result().all().result()\n response_json = json.dumps(callback_result)\n logging.info(response_json)\n dbclient.close()\n return func.HttpResponse(body = response_json)\n\n\n","sub_path":"SetValue/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"629360868","text":"# For data loading\nfrom torchtext import data, datasets\nimport numpy as np\nimport torch\n\nseed = 42\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\n\nimport spacy \nspacy_de = spacy.load('fr_core_news_sm')\nspacy_en = spacy.load('en_core_web_sm')\n\ndef tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(text)]\n\n\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\nUNK_TOKEN = \"\"\nPAD_TOKEN = \"\"\nSOS_TOKEN = \"\"\nEOS_TOKEN = \"\"\nLOWER = True\n\nclass Corpus:\n def __init__(self, lang1, lang2):\n \n self.SRC = data.Field(tokenize=tokenize_de, batch_first=True, lower=LOWER, include_lengths=True,\n unk_token=UNK_TOKEN, pad_token=PAD_TOKEN, init_token=None, eos_token=EOS_TOKEN)\n\n self.TRG = data.Field(tokenize=tokenize_en, batch_first=True, lower=LOWER, include_lengths=True,\n unk_token=UNK_TOKEN, pad_token=PAD_TOKEN, init_token=None, eos_token=EOS_TOKEN)\n\n\n MAX_LEN = 25 # Note: we filter out a lot of sentences for speed\n self.train_data, self.valid_data, self.test_data = datasets.IWSLT.splits(\n exts=('.{}'.format(lang1), '.{}'.format(lang2)), fields=(self.SRC, self.TRG),\n filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)\n\n\n MIN_FREQ = 5 # Note: we limite the vocabulary to frequent words for speed\n self.SRC.build_vocab(self.train_data.src, min_freq=MIN_FREQ)\n self.TRG.build_vocab(self.train_data.trg, min_freq=MIN_FREQ)\n\n self.pad_index = self.TRG.vocab.stoi[PAD_TOKEN]\n\n\ndef print_data_info(train_data, valid_data, test_data, src_field, trg_field):\n \"\"\" This prints some useful stuff about our data sets. \"\"\"\n\n print(\"Data set sizes (number of sentence pairs):\")\n print('train', len(train_data))\n print('valid', len(valid_data))\n print('test', len(test_data), \"\\n\")\n\n print(\"First training example:\")\n print(\"src:\", \" \".join(vars(train_data[0])['src']))\n print(\"trg:\", \" \".join(vars(train_data[0])['trg']), \"\\n\")\n\n print(\"Most common words (src):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in src_field.vocab.freqs.most_common(10)]), \"\\n\")\n print(\"Most common words (trg):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in trg_field.vocab.freqs.most_common(10)]), \"\\n\")\n\n print(\"First 10 words (src):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(src_field.vocab.itos[:10])), \"\\n\")\n print(\"First 10 words (trg):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(trg_field.vocab.itos[:10])), \"\\n\")\n\n print(\"Number of German words (types):\", len(src_field.vocab))\n print(\"Number of English words (types):\", len(trg_field.vocab), \"\\n\")\n \n\nif __name__ == \"__main__\":\n corpus = Corpus('de', 'en')\n print_data_info(corpus.train_data, corpus.valid_data, corpus.test_data, corpus.SRC, corpus.TRG)","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"375059905","text":"import pySurfaceHierarchy\nfrom read_data import DataReader\nimport os\nmesh_dir_path = \"D:/data/MPI-FAUST_training/normalized/cleaned_10k\"\nmesh_name = \"tr_scan_000_0\"\noutput_path = \"D:/data/MPI-FAUST_training/normalized/cleaned_dataset/\"\ntop_level_size = 1000\nlevel_size_factor = 1.0\nstart_level = 0\nlevel_num = 3\nsym_N = 4\ngrid_length = [0.01, 0.015, 0.025]\ngrid_size = 5\nif not os.path.exists(output_path):\n os.mkdir(output_path)\n\next = \".obj\"\nfor filename in os.listdir(mesh_dir_path):\n if ext in filename:\n mesh_name = '.'.join(filename.split('.')[:-1])\n pySurfaceHierarchy.hierarchy(mesh_dir_path, mesh_name + ext, output_path, top_level_size, level_size_factor, level_num, sym_N)\n pySurfaceHierarchy.conv_para(output_path, mesh_name, start_level, level_num, grid_length, grid_size)\n\n\n","sub_path":"PFCNN/Generate_Dataset/preprocess_data_faust_regression.py","file_name":"preprocess_data_faust_regression.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623893111","text":"from PySide2.QtWidgets import QLabel, QWidget, QPushButton, QApplication, QHBoxLayout\n\nclass Window(QWidget):\n\n def __init__(self):\n QWidget.__init__(self)\n\n self.layout = QHBoxLayout()\n\n self.label = QLabel(\"Ceci est un QLabel\")\n self.button = QPushButton(\"Ceci est un QPushButton\")\n\n self.layout.addWidget(self.label)\n self.layout.addWidget(self.button)\n\n self.setLayout(self.layout)\n\nif __name__ == \"__main__\":\n app = QApplication([])\n win = Window()\n win.show()\n app.exec_()\n","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"642443750","text":"\"\"\"Test serialized BDT models against independent data.\n\nUsage:\n\ne.g.\n\npython parity_tests/jet_bdt_test.py \\\n --datapath /home/tombs/Downloads/truth_ktdurham200/liv_3j_4j_1/ \\\n --outpath out_test_2 \\\n --ntest 1_000_000 \\\n --with_flav --with_hel\n\n\"\"\"\nimport argparse\nimport os\n\nfrom .jet_bdt_lib import bdt_test, fit_load\nfrom .jet_lib import load_invariant_momenta, result_dump, stitch_parts\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--datapath\", type=str, required=True)\n parser.add_argument(\"--outpath\", type=str, required=True)\n parser.add_argument(\"--ntest\", type=int, default=None)\n parser.add_argument(\"--private\", action=\"store_true\")\n parser.add_argument(\"--with_flav\", action=\"store_true\")\n parser.add_argument(\"--with_hel\", action=\"store_true\")\n args = parser.parse_args()\n\n jet_bdt_test(\n args.datapath,\n args.outpath,\n args.ntest,\n args.with_flav,\n args.with_hel,\n private=args.private,\n )\n\n\ndef jet_bdt_test(\n datapath, outpath, ntest, with_flav, with_hel, *, private=False\n):\n assert ntest is None or ntest > 0\n\n model, meta = fit_load(outpath)\n\n if private:\n test = \"private_test\"\n else:\n test = \"test\"\n\n with_truth = with_flav or with_hel\n\n test_parts = load_invariant_momenta(\n os.path.join(datapath, test, \"*.h5\"),\n nmax=ntest,\n with_truth=with_truth,\n )\n test_real = stitch_parts(test_parts, with_flav, with_hel)\n\n result = bdt_test(\n model,\n meta,\n test_real,\n tag={\n \"datapath\": datapath,\n \"with_flav\": with_flav,\n \"with_hel\": with_hel,\n },\n )\n result_dump(result, outpath, private=private)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"parity_tests/jet_bdt_test.py","file_name":"jet_bdt_test.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"69095501","text":"import csv\n\ntotal = 0.0\n\nwith open('data/portfolio.csv', 'r') as file_content:\n rows = csv.reader(file_content)\n headers = next(rows) # skip the heaer row\n for row in rows:\n #line = line.strip() # strip whitespace\n #parts = line.split(',')\n #parts[0] = parts[0].strip('\"')\n #parts[1] = parts[1].strip('\"')\n row[2] = int(row[2])\n row[3] = float(row[3])\n #print(parts)\n total += row[2]*row[3]\n\nprint('Total cost:',total )","sub_path":"lesson_03/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"646735409","text":"import unittest\nimport logging\nimport creaturecast_maya.nodes.depend_node as dep\n\n\nclass NodeTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(NodeTest, self).__init__(*args, **kwargs)\n self.log = logging.getLogger(self.__class__.__name__)\n\n def runTest(self):\n self.test_depend_node()\n self.log.debug('runTest finished')\n\n def test_maya(self):\n node_a = dep.DependNode(node_type='multiplyDivide')\n node_b = dep.DependNode(node_type='multiplyDivide')\n node_b.set_parent(node_a)\n node_a.create()\n\n\n self.log.debug('test_depend_node finished')\n\n","sub_path":"tests/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"325506937","text":"import shutil\nfrom whoosh.fields import Schema, TEXT, KEYWORD, ID, STORED\nfrom whoosh.analysis import StemmingAnalyzer\nimport os\nfrom functools import reduce\nimport helpers\nimport json\nfrom whoosh import index\n\nschema = Schema(post_id=ID(stored=True),\n title=TEXT(stored=True),\n tokens=KEYWORD(stored=True, commas=True, scorable=True))\n\nif __name__ == \"__main__\":\n if os.path.exists(\"indexdir\"):\n shutil.rmtree(\"indexdir\")\n\n os.mkdir(\"indexdir\")\n\n ix = index.create_in(\"indexdir\", schema)\n writer = ix.writer()\n\n with open('../dataset.json') as dataset_file:\n for (post_id, post) in json.load(dataset_file).items():\n terms = set(post['BodyTokens'])\n terms = terms.union(set(helpers.preprocess_text(post['Title'])))\n\n child_body_terms = reduce(lambda child_one, child_two: child_one.union(child_two), map(lambda child: set(child['BodyTokens']), post['Children'][1:]))\n terms = terms.union(child_body_terms)\n\n child_title_terms = reduce(lambda child_one, child_two: child_one.union(child_two), map(lambda child: set(helpers.preprocess_text(child['Title'])), post['Children']))\n terms = terms.union(child_title_terms)\n\n child_tags = reduce(lambda child_one, child_two: child_one.union(child_two), map(lambda child: set(child['Tags']), post['Children']))\n terms = terms.union(child_tags)\n terms = terms.union(set(post['Tags']))\n\n writer.add_document(post_id=post_id, title=post['Title'], tokens=','.join(terms))\n print(post['Title'])\n writer.commit()\n\n print(ix.doc_count())","sub_path":"whoosh/index_posts.py","file_name":"index_posts.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"52305807","text":"import random\nimport numpy as np\nimport pylab as pl\nimport matplotlib\nfrom matplotlib import collections as mc\n\n# Question 1\ndef slhc(ge_distances):\n\n deletion_order = []\n mat = ge_distances\n while not len(mat) == 1:\n n = len(mat)\n for q in range(n):\n mat[q,q] = np.nan\n\n min_val = np.nanmin(mat)\n i,j = 0,0\n not_found = True\n while not_found:\n if mat[i][j] == min_val:\n not_found = False\n if not_found:\n j += 1\n if j > n-1:\n i += 1\n j = 0\n\n #print(np.array(mat))\n\n deletion_order.append(min_val)\n reduced_distances = np.delete(np.delete(mat, [i,j], 0), [i,j], 1)\n\n new_row = np.zeros(n)\n for k in range(len(new_row)):\n new_row[k] = min(mat[i][k], mat[j][k])\n new_row = np.delete(np.delete(new_row, i), j-1)\n\n mat = np.vstack((reduced_distances,new_row))\n new_col = np.concatenate((new_row, [[np.nan]]), None).reshape((len(new_row)+1, 1))\n mat = np.concatenate((mat, new_col), 1)\n #print(mat)\n\n anchors = []\n for x in range(len(deletion_order)+1):\n anchors.append(float(1-(x/len(deletion_order))))\n #print(anchors)\n\n bars = []\n for k in range(len(deletion_order)):\n bars.append((0,deletion_order[k]))\n bars.append((anchors[k], anchors[k]))\n bars.append('black')\n #bars.append((0,100000000))\n #bars.append((0,0))\n #bars.append('black')\n #print(bars)\n\n matplotlib.pyplot.plot(*bars)\n matplotlib.pyplot.axis('off')\n\n# Question 2\ndef generate_points(numpoints, numclusters, spread):\n\n centres = [\n (random.randint(0,100000001),random.randint(0, 100000001))\n for i in range(numclusters)\n ]\n\n points = []\n for i in range(numclusters):\n points.append([])\n\n p = 0\n for (x,y) in centres:\n for j in range(numclusters):\n if len(points[p]) >= int(numpoints/numclusters):\n p += 1\n break\n points[p].append((random.normalvariate(x,spread),\n random.normalvariate(y,spread)))\n\n return points\n\ndef plot_points(points):\n xs, ys = [], []\n for cluster in points:\n for p in cluster:\n xs.append(p[0])\n ys.append(p[1])\n pl.plot(xs, ys, \"b.\")\n\n\n# Question 3\ndef euclidean_distance(x,y):\n p = np.asarray(x).flatten()\n q = np.asarray(y).flatten()\n return np.sqrt(np.sum(np.power((p-q),2)))\n\ndef taxicab_distance(x,y):\n return np.minimum(abs(x[0]-y[0]), abs(x[1]-y[1]))\n\ndef euclidean_matrix(n,entries):\n entries = sum(entries, [])\n mat = np.zeros((n,n))\n #print(entries)\n for i in range(n):\n for j in range(n):\n mat[i][j] = euclidean_distance(entries[i],entries[j])\n\n return mat\n\ndef taxicab_matrix(n,entries):\n entries = sum(entries, [])\n mat = np.zeros((n,n))\n\n for i in range(n):\n for j in range(n):\n mat[i][j] = taxicab_distance(entries[i],entries[j])\n\n return mat\n\n# Question 4\n\n#a = generate_points(100, 20, 0.25)\n#plot_points(generate_points(3000,100,0.25))\n#slhc(euclidean_matrix(100,a))\n#matplotlib.pyplot.show()\n#slhc(taxicab_matrix(100, a))\n#plot_points(generate_points(3000,100,0.25))\nmatplotlib.pyplot.show()","sub_path":"homework3/MA500_Third_Homework_Kelvin_Killeen.py","file_name":"MA500_Third_Homework_Kelvin_Killeen.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"23176751","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nImage Segmentation \nImage: Picture of Toshi \n\"\"\"\n\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom skimage import img_as_float, segmentation, filters, color \n\n#%%\n\n# load and plot image \nimage = plt.imread('exploratory/image_seg/toshi.jpg') \nimage = color.rgb2gray(img_as_float(image)) \n\nplt.figure(figsize=(7, 7)) \nplt.imshow(image, cmap='gray') \nplt.show() \n\n#%%\n\nthresholds = filters.threshold_multiotsu(image, classes=3) \nregions = np.digitize(image, bins=thresholds) \n\nplt.figure(figsize=(7, 7)) \nplt.imshow(regions, cmap='gray') \nplt.show() \n\n\n\n","sub_path":"exploratory/image_seg/segment_toshi.py","file_name":"segment_toshi.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"406247660","text":"# vim: ts=4:sw=4:expandtabs\n\n__author__ = 'zach.mott@gmail.com'\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .SnacksDBBase import SnacksDBBase\n\n\nclass Ballot(SnacksDBBase):\n \"\"\"\n Model that represents the votes a user has placed for particular snacks.\n users is allowed to vote for three different snacks per calendar month.\n \"\"\"\n user = models.ForeignKey(\n 'auth.User', on_delete=models.CASCADE, # If a user is deleted, delete their votes, too.\n related_name='votes', help_text=_('User who placed the vote.')\n )\n snack_id = models.PositiveIntegerField(\n verbose_name=_('Snack ID'),\n help_text=_('ID of the snack being voted for.')\n )\n\n def __str__(self):\n tmpl = \"{self.user.username} => {self.snack_id} on {self.created:%Y-%m-%d %H:%M:%S}\"\n return tmpl.format(self=self)\n","sub_path":"snacksdb/models/Ballot.py","file_name":"Ballot.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154058726","text":"#! python3\n# PP5_Spreadsheet2TextFiles.py\n# Write a program that performs the tasks of the previous program in reverse\n# order: The program should open a spreadsheet and write the cells of column\n# A into one text file, the cells of column B into another text file, and\n# so on.\nimport sys, openpyxl, random\n\nxlsxfile = sys.argv[1]\nwb = openpyxl.load_workbook(xlsxfile)\nsheet = wb.get_active_sheet()\nfor col in range(1, sheet.max_column+1):\n txtfile = open(f'2text{random.randint(0, 1000)}.txt','w',encoding='utf-8')\n for row in range(1, sheet.max_row+1):\n value = sheet.cell(row=row, column=col).value\n if value is not None:\n txtfile.write(value)\n txtfile.close()\n","sub_path":"Chapter12/PP5_Spreadsheet2TextFiles.py","file_name":"PP5_Spreadsheet2TextFiles.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"528480220","text":"# Specify structure for custom cmake functions\nadditional_commands = {\n \"opencensus_lib\": {\n \"flags\": [\n \"PUBLIC\",\n ],\n \"kwargs\": {\n \"HEADERS\": \"*\",\n \"DEPENDS\": \"*\",\n \"SOURCES\": \"*\"\n }\n }\n}\n\n# If comment markup is enabled, don't reflow the first comment block in\n# eachlistfile. Use this to preserve formatting of your\n# copyright/licensestatements.\nfirst_comment_is_literal = True\n","sub_path":".cmake-format.py","file_name":".cmake-format.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"323772051","text":"import os\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom Project.configure.parsing import Parse\nfrom Project.configure.parser_browser_names import ParseNames\nfrom Project.configure.parser_for_paths import ParsePaths\n\n\nclass BrowserFactory:\n\n @staticmethod\n def create_browser():\n\n #browser_name = Parse().get_json_browser_name()\n browser_name = os.getenv(\"browser_name\")\n browser_names_google = ParseNames().parsed_browser_names_google()\n browser_names_mozilla = ParseNames().parsed_browser_names_mozilla()\n language = Parse().get_json_language()\n path_to_download = ParsePaths().parsed_path_download_directory()\n\n if browser_name in browser_names_mozilla:\n options = webdriver.FirefoxOptions()\n options.set_preference(\"intl.accept_languages\", language)\n options.set_preference(\"browser.download.folderList\", 2)\n options.set_preference(\"browser.download.manager.showWhenStarting\", False)\n options.set_preference(\"browser.download.dir\", path_to_download)\n options.set_preference(\"browser.helperApps.neverAsk.saveToDisk\",\n \"application/msword, application/csv, application/ris, text/csv, image/png, \"\n \"application/pdf, text/html, text/plain, application/zip, application/x-zip, \"\n \"application/x-zip-compressed, application/download, application/octet-stream\")\n\n BrowserFactory.driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options)\n BrowserFactory.driver.maximize_window()\n elif browser_name in browser_names_google:\n options = webdriver.ChromeOptions()\n preferences = {\"download.default_directory\": path_to_download,\n \"directory_upgrade\": True,\n \"safebrowsing.enabled\": True,\n \"intl.accept_languages\": language}\n options.add_argument(\"--start-maximized\")\n options.add_experimental_option(\"prefs\", preferences)\n BrowserFactory.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n\n return BrowserFactory.driver\n","sub_path":"Project/utility/driver_factory.py","file_name":"driver_factory.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"574673618","text":"import cv2\nimport numpy as np\nimport imutils\nfrom scipy.spatial import distance as dist\n\ndef detect_people(frame, net, ln, personIdx=0):\n (H,W) = frame.shape[:2] # get dimensions of the frame\n results = []\n \n # construct a blob from input frame and perform yolo object detector, that give us boundary boxes and associated proprbability\n blob = cv2.dnn.blobFromImage(frame, 1/255, (416,416), (0,0,0), swapRB=True, crop=False)\n net.setInput(blob)\n layerOutputs = net.forward(ln)\n \n boxes = []\n confidences = []\n centroids = []\n \n for output in layerOutputs: #abstract all info from layeroutputs\n for detection in output: # abstract the each info from outputs\n scores = detection[5:] # abstract the classid and its confidence\n classId = np.argmax(scores)\n confidence = scores[classId]\n\n if classId == personIdx and confidence > 0.5: # check for person object and mimmum confidence\n box = detection[0:4] * np.array([W, H, W, H]) # scale bounding box cordinates back relative to the size of image\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # get top left cordinates of the frame(img/object)\n x = int(centerX - (width/2))\n y = int(centerY - (height/2))\n\n boxes.append([x, y, int(width), int(height)])\n centroids.append((centerX, centerY))\n confidences.append(float(confidence))\n\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) # 0.5 - min conficence 0.3 - NMS threshold\n \n if len(idxs) > 0:\n for i in idxs.flatten():\n # extract the bounding box cordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n \n \n # update the result with person detection probability\n r = (confidences[i], (x, y, x+w, y+h), centroids[i])\n results.append(r)\n \n return results\n\nnet = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')\n\nclasses = [] #LABELS\nwith open ('coco.names','r') as f:\n classes = f.read().splitlines()\n \n# print(classes)\n\n# determine only the output layer that we need from yolo \nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\nimg = cv2.imread('1.jpg')\n\nheight, width, _ = img.shape\n\nframe = imutils.resize(img, width=700)\nresults = detect_people(frame, net, ln, personIdx = classes.index(\"person\"))\n\nviolate = set()\n\n\n\nif len(results) >=2:\n# extract all centroids from result and calculate euclidean distance between them\n centroids = np.array([r[2] for r in results])\n D = dist.cdist(centroids, centroids, metric=\"euclidean\")\n \n \n # loop over the upper triangular of the distance matrix\n for i in range(0, D.shape[0]):\n for j in range(i+1, D.shape[1]):\n if D[i,j] < 75: # 75 - min distance\n violate.add(i)\n violate.add(j)\n \n \nfor(i, (prob, bbox, centroid)) in enumerate(results):\n (startX, startY, endX, endY) = bbox\n (cX, cY) = centroid\n color = (0, 255, 0)\n \n if i in violate:\n color = (0 , 0, 255)\n \n cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n cv2.circle(frame, (cX, cY), 5, color, 1)\n \ntext = \"Social Distancing Violations: {}\".format(len(violate))\ncv2.putText(frame, text, (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)\n\n\ncv2.imshow(\"img\",frame)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"code/social_distancing_img.py","file_name":"social_distancing_img.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"287085085","text":"# coding=utf-8\n'''\n Created by lyy on 2019-04-19\n'''\nimport json\n\nfrom app.model.res import Res\nfrom app.utils.common_utils import get_date_now, get_ip_info\n\n__author__ = 'lyy'\n\nfrom flask import Blueprint, request, jsonify\n\n# 定义一个蓝图\nuser = Blueprint('user', __name__)\n\nfrom app.api.v1.user import feedback, logd, login\n\n\n@user.route('/ip')\ndef get_ip():\n ip = request.remote_addr\n try:\n _ip = request.headers[\"X-Real-IP\"]\n if _ip is not None:\n ip = _ip\n except Exception as e:\n print(e)\n\n status = 200\n\n info = {\n 'ip': ip,\n 'created_time': get_date_now()\n }\n msg = 'IP获取成功'\n\n res_json = Res(status, msg, info)\n return jsonify(res_json.__dict__)\n\n\n@user.route('/ip/info', methods=['POST'])\ndef get_ip_info_from_api():\n try:\n ip = request.form['ip']\n ip_info = get_ip_info(ip)\n status = 200\n\n info = {\n 'ip': ip,\n 'result': ip_info,\n 'created_time': get_date_now()\n }\n msg = 'IP获取成功'\n except Exception:\n status = 500\n info = {}\n msg = 'IP获取失败'\n\n res_json = Res(status, msg, info)\n return jsonify(res_json.__dict__)\n","sub_path":"app/api/v1/user/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"645123069","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom . import backend as bkd\n\n\nclass Real(object):\n def __init__(self, precision):\n self.precision = None\n self.reals = None\n if precision == 32:\n self.set_float32()\n elif precision == 64:\n self.set_float64()\n\n def __call__(self, package):\n return self.reals[package]\n\n def set_float32(self):\n self.precision = 32\n self.reals = {np: np.float32, bkd.lib: bkd.float32}\n\n def set_float64(self):\n self.precision = 64\n self.reals = {np: np.float64, bkd.lib: bkd.float64}\n","sub_path":"deepxde/real.py","file_name":"real.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"164855043","text":"\nimport mysql.connector\nfrom mysql.connector import Error\nimport pandas as pd\n\nimport plotly.express as px # (version 4.7.0)\nimport plotly.graph_objects as go\n\nfrom jupyter_dash import JupyterDash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nimport mysql.connector\nfrom mysql.connector import Error\n\n\ndef create_db_connection(host_name, user_name, user_password, db_name):\n connection = None\n try:\n connection = mysql.connector.connect(\n host=host_name,\n user=user_name,\n passwd=user_password,\n database=db_name,\n port=3307\n )\n print(\"MySQL Database connection successful\")\n except Error as err:\n print(f\"Error: '{err}'\")\n\n return connection\n\ndef read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except Error as err:\n print(f\"Error: '{err}'\")\n\ndef get_estrato(estrato,promedio):\n if estrato=='Estrato 1':\n promedio[0]+=1\n elif estrato=='Estrato 2':\n promedio[1]+=1\n elif estrato=='Estrato 3':\n promedio[2]+=1\n else:\n promedio[3]+=1\n \n return promedio\n\ndef get_range(dfA):\n promedioA=[0,0,0,0]\n promedioB=[0,0,0,0]\n promedioC=[0,0,0,0]\n promedioD=[0,0,0,0]\n\n for i in range(len(dfA)):\n puntaje=dfA.loc[i,'puntaje']\n if puntaje>=100 and puntaje<=200:\n estrato=dfA.loc[i,'estrato']\n promedioA= get_estrato(estrato,promedioA)\n elif puntaje>=201 and puntaje<=300:\n estrato= dfA.loc[i,'estrato']\n promedioB= get_estrato(estrato,promedioB)\n elif puntaje>=301 and puntaje<=400:\n estrato=dfA.loc[i,'estrato']\n promedioC= get_estrato(estrato,promedioC)\n elif puntaje>=401 and puntaje<=500:\n estrato=dfA.loc[i,'estrato']\n promedioD= get_estrato(estrato,promedioD)\n \n \n return promedioA,promedioB,promedioC,promedioD\n\ndef get_pastel(rango):\n dicc={\n 'estrato':estrato,\n 'total':tot[rango]\n }\n \n df_pastel = pd.DataFrame(dicc, columns = ['estrato', 'total'])\n return df_pastel\n\n\nconnection = create_db_connection(\"testdb_mysql\", \"user\", 'password','ICFES')\n\n\n\nres=read_query(connection, \"select * from Datos\")\ndf= pd.DataFrame(res,columns=['id','departamento','estrato','internet','puntaje'])\n\n\ndf_median=df.groupby(['departamento','estrato'])[['puntaje']].mean()\ndf_median.reset_index(inplace=True)\n\n\ndf_internet=df.groupby(['internet','estrato'])[['puntaje']].mean()\ndf_internet.reset_index(inplace=True)\ndf_internet=df_internet.drop(df_internet.index[[6,13]])\n\n\nconnection.close()\n\n\ntot = get_range(df)\nestrato=['Estrato 1','Estrato 2','Estrato 3','Estrato 4']\n\n\n\n\nimport plotly.graph_objects as go\n\napp = JupyterDash(__name__)\n\n# ------------------------------------------------------------------------------\n# App layout\napp.layout = html.Div([\n\n html.H1(\"Web Application Dashboards with Dash\", style={'text-align': 'center'}),\n\n dcc.Dropdown(id=\"slct_range\",\n options=[\n {\"label\": \"100-200\", \"value\": 0},\n {\"label\": \"201-300\", \"value\": 1},\n {\"label\": \"301-400\", \"value\": 2},\n {\"label\": \"401-500\", \"value\": 3}],\n multi=False,\n value=0,\n style={'width': \"40%\"}\n ),\n dcc.Graph(id='pie-chart', figure={}),\n dcc.Dropdown(id=\"slct_estrato\",\n options=[\n {\"label\": \"Estrato 1\", \"value\": 'Estrato 1'},\n {\"label\": \"Estrato 2\", \"value\": 'Estrato 2'},\n {\"label\": \"Estrato 3\", \"value\": 'Estrato 3'},\n {\"label\": \"Estrato 4\", \"value\": 'Estrato 4'},\n {\"label\": \"Estrato 5\", \"value\": 'Estrato 5'}],\n multi=False,\n value='Estrato 1',\n style={'width': \"40%\"}\n ),\n dcc.Graph(id='scatter-plot', figure={}),\n dcc.Graph(id='horizontal-plot', figure={})\n \n\n])\n\n# ------------------------------------------------------------------------------\n# Connect the Plotly graphs with Dash Components\n\n@app.callback(\n Output(\"pie-chart\", \"figure\"), \n [Input(\"slct_range\", \"value\")])\ndef generate_chart(option_value):\n fig = px.pie(get_pastel(option_value), values='total',names='estrato')\n return fig\n \n \n@app.callback(\n Output(\"scatter-plot\", \"figure\"), \n [Input(\"slct_estrato\", \"value\")])\ndef update_bar_chart(slct_estrato): \n df_aux=df_median.loc[df_median['estrato'] == slct_estrato]\n fig = px.scatter(df_aux, x='departamento', y='puntaje', color='estrato')\n return fig\n\n@app.callback(\n Output(\"horizontal-plot\", \"figure\"), \n [Input(\"slct_estrato\", \"value\")])\ndef update_bar_chart(slct_estrato): \n# fig = px.bar(df_internet, x=\"puntaje\", y=\"internet\", orientation='h',height=100)\n fig = px.line(df_internet, x='estrato', y='puntaje', color='internet')\n return fig\n\n\n# ------------------------------------------------------------------------------\nif __name__ == '__main__':\n app.run_server(host='localhost', port=8080, debug=True)\n ","sub_path":"proyecto.py","file_name":"proyecto.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"268933169","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Class for parsing description.xml file\n\nimport xml.dom.minidom\n\nclass DescriptionParser: \n def __init__(self):\n self.description_file=None\n self.dom = None\n \n def setXMLFileName(self, filename):\n self.description_file=filename\n try:\n df = open(filename, \"rw\")\n except IOError:\n df = open(filename, \"w\")\n df.write(\"\")\n df.close()\n self.dom = xml.dom.minidom.parse(self.description_file) \n\n def getXMLFileName(self):\n return self.description_file\n\n\n def saveToXML(self):\n self.dom.normalize()\n data=self.dom.toxml(\"utf-8\")\n df = open(self.description_file, \"w\")\n df.write(data)\n df.close() \n self.dom = xml.dom.minidom.parse(self.description_file) \n\n \n def setInfo(self, param_path, data):\n p = self.findParamByPath(param_path)\n if p==None:\n p = self.createParam(param_path)\n try:\n infotext = p.getElementsByTagName(\"info\")[0]\n infotext.removeChild(infotext.childNodes[0])\n p.removeChild(infotext)\n except IndexError:\n # if tag doesn't exist, create it\n infotext = self.dom.createElement(\"info\")\n if len(data)!=0:\n infotext.appendChild(self.dom.createTextNode(data))\n p.appendChild(infotext)\n\n\n def findParamByPath(self, param_path):\n for p in self.dom.getElementsByTagName(\"param\"):\n if p.getAttribute(\"path\") == param_path:\n return p\n return None\n \n def getInfo(self, param_path):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n try:\n infotext = p.getElementsByTagName(\"info\")[0]\n \n except IndexError:\n # If tag doesn't exist, return None\n return None\n\n rc = \"\"\n for i in infotext.childNodes:\n if i.nodeType == i.TEXT_NODE:\n rc = rc + i.data\n return rc\n\n def isLocked(self, param_path):\n # return values:\n # True - locked XML attribute set to 1\n # False - locked XML attribute set to 0 (default!)\n # None - can't find this parameter in description XML file\n p = self.findParamByPath(param_path)\n if p==None:\n return False\n\n locked = p.getAttribute(\"locked\")\n if locked==\"\":\n return 0\n if int(locked)>0:\n return 1\n else:\n return 0\n\n def setLocked(self, param_path, locked=0):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n\n if locked==True:\n locked=1\n else:\n locked=0\n p.setAttribute(\"locked\", str(locked))\n return p\n\n def getSubParams(self, param_path):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n try:\n elements = p.getElementsByTagName(\"subparams\")[0]\n except IndexError:\n # If tag doesn't exist, return none\n return None\n \n subparamslist=[]\n for e in elements.childNodes:\n if e.nodeType!=e.TEXT_NODE and e.tagName==\"subparam\":\n subparamslist.append(e)\n return subparamslist\n \n def getSubParamsAttr(self, param_path):\n subparamslist = self.getSubParams(param_path)\n if subparamslist==None or subparamslist==[]:\n return None\n subparamstorage=[]\n for s in subparamslist:\n subparamstorage.append({'index':s.getAttribute('index'),\n 'name':s.getAttribute('name'),\n 'type':s.getAttribute('type'),\n 'min':s.getAttribute('min'),\n 'max':s.getAttribute('max'),\n 'maxlen':s.getAttribute('maxlen')})\n # sort subparameters by index\n for i in range(len(subparamstorage)-1):\n for j in range(i+1,len(subparamstorage)):\n if(int(subparamstorage[i]['index'])>int(subparamstorage[j]['index'])):\n tmp=subparamstorage[i]\n subparamstorage[i]=subparamstorage[j]\n subparamstorage[j]=tmp\n return subparamstorage\n \n\n def getSubParamInfo(self, param_path, subparam_index):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n subparam = self.findSubparamByIndex(p, subparam_index)\n if subparam==None:\n return None\n try:\n infotext = subparam.getElementsByTagName(\"subinfo\")[0]\n \n if infotext.childNodes[0].nodeType == infotext.childNodes[0].TEXT_NODE:\n return infotext.childNodes[0].data \n \n except IndexError:\n # if tag doesn't exist or index is wrong then return None\n return None\n\n def setSubParamInfo(self, param_path, subparam_index, data):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n subparam = self.findSubparamByIndex(p, subparam_index)\n if subparam==None:\n return None\n try:\n infotext = subparam.getElementsByTagName(\"subinfo\")[0]\n subparam.removeChild(infotext)\n \n except IndexError:\n pass\n\n infotext = self.dom.createElement(\"subinfo\") \n infotext.appendChild(self.dom.createTextNode(data))\n subparam.appendChild(infotext)\n\n \n def getSubParamNumberItems(self, param_path, subparam_index):\n p = self.findParamByPath(param_path)\n if p==None:\n return None\n subparam = self.findSubparamByIndex(p, subparam_index)\n if subparam==None:\n return None\n itemstorage=[]\n try:\n items=subparam.getElementsByTagName(\"items\")[0]\n except IndexError:\n # if tag doesn't exist or index is wrong then return None\n return None\n for i in items.childNodes:\n if i.nodeType!=i.TEXT_NODE and i.tagName==\"item\":\n itemstorage.append({'value':i.getAttribute('value'),\n 'description':i.getAttribute('description')})\n return itemstorage\n\n def getItemDescription(self, param_path, subparam_index, item_value):\n itemstorage = self.getSubParamNumberItems(param_path, subparam_index)\n description = None\n if itemstorage!=None or itemstorage!=[]:\n for i in itemstorage:\n if i['value']==item_value:\n description = i['description']\n break\n return description\n\n def setSubParamAttr(self,param_path, subparam_index, name=\"\", sub_type='string',\n sub_len=0, sub_min=0, sub_max=0):\n p = self.findParamByPath(param_path)\n if p==None:\n p = self.createParam(param_path)\n subparamslist = self.getSubParams(param_path)\n if subparamslist==None:\n s = p.appendChild(self.dom.createElement(\"subparams\"))\n subparamslist=[]\n \n if subparamslist==[]:\n ss = self.dom.createElement(\"subparam\")\n subparam = s.appendChild(ss)\n subparam.setAttribute(\"index\", str(subparam_index))\n subparam.setAttribute(\"name\", name)\n subparam.setAttribute(\"type\", sub_type)\n subparam.setAttribute(\"maxlen\", str(sub_len))\n subparam.setAttribute(\"min\", str(sub_min))\n subparam.setAttribute(\"max\", str(sub_max))\n return subparam\n\n subparam = self.findSubparamByIndex(p, subparam_index)\n subparams = p.getElementsByTagName(\"subparams\")[0]\n if subparam!=None:\n subparams.removeChild(subparam)\n\n subparam = subparams.appendChild(self.dom.createElement(\"subparam\"))\n \n subparam.setAttribute(\"index\", str(subparam_index))\n subparam.setAttribute(\"name\", name)\n subparam.setAttribute(\"type\", sub_type)\n subparam.setAttribute(\"maxlen\", str(sub_len))\n subparam.setAttribute(\"min\", str(sub_min))\n subparam.setAttribute(\"max\", str(sub_max)) \n \n return subparam\n\n def setSubParamNumberItems(self, param_path, subparam_index,\n name, itemstorage):\n subparam = self.setSubParamAttr(param_path, subparam_index, name,\n sub_type='number_items')\n items = self.findItemsInSubparam(subparam)\n if items!=None:\n subparam.removeChild(items)\n\n items = subparam.appendChild(self.dom.createElement(\"items\"))\n \n for i in itemstorage:\n cur_item = items.appendChild(self.dom.createElement(\"item\"))\n cur_item.setAttribute(\"value\", str(i['value']))\n cur_item.setAttribute(\"description\", str(i['description'])) \n\n def findItemsInSubparam(self, subparam):\n if subparam==None:\n return None\n try:\n items = subparam.getElementsByTagName(\"items\")[0]\n except IndexError: \n return None\n return items\n\n \n def findSubparamByIndex(self, p, subparam_index):\n try:\n elements = p.getElementsByTagName(\"subparams\")[0]\n except IndexError:\n # if tag doesn't exist then return None \n return None\n for e in elements.childNodes:\n if e.nodeType!=e.TEXT_NODE and e.tagName==\"subparam\":\n if e.getAttribute('index')==str(subparam_index):\n return e\n return None\n \n def createParam(self,param_path):\n p = self.findParamByPath(param_path)\n if p==None:\n # if parameter not found in XML-file then create it\n root_element = self.dom.getElementsByTagName(\"params\")[0]\n p = self.dom.createElement(\"param\")\n p.setAttribute(\"path\", param_path)\n return root_element.appendChild(p)\n","sub_path":"descriptionparser.py","file_name":"descriptionparser.py","file_ext":"py","file_size_in_byte":10366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228772031","text":"import tensorflow as tf\nfrom tensorflow.python.framework.errors_impl import PermissionDeniedError\n\nfrom src.file_utils import get_dir, build_checkpoint_file_name\nfrom src.game import BatchGameState, BatchGame\nfrom src.play_state import PlayState, opponent\nimport os\n\nfrom src import nn\n\ntf.enable_eager_execution()\n\n\nclass Policy:\n def __init__(self, player: PlayState=PlayState.X, lr=1e-3, root_dir=None, descriptor=\"C4\"):\n self._player = player\n self._container = tf.contrib.eager.EagerVariableStore()\n self._optimizer = tf.train.AdamOptimizer(lr)\n num_policy_layers = 5\n num_policy_filters = 64\n num_reward_layers = 2\n num_reward_filters = 64\n with self._container.as_default():\n # Policy network params\n self._p_conv_params = []\n for i in range(num_policy_layers):\n num_in_filters = 3 if i == 0 else num_policy_filters\n w = tf.get_variable(\n \"p_conv_w_%s_%i\" % (self.player, i),\n shape=[3, 3, num_in_filters, num_policy_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n b = tf.get_variable(\n \"p_conv_b_%s_%i\" % (self.player, i),\n shape=[num_policy_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(0.)\n )\n s = tf.get_variable(\n \"p_skip_%s_%i\" % (self.player, i),\n shape=[1, 1, 3, num_policy_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n self._p_conv_params.append((w, b, s))\n self._p_conv_1_w = tf.get_variable(\n \"p_conv_1_w_params_%s\" % self.player,\n shape=[3, num_policy_filters, 1], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n self._p_conv_1_b = tf.get_variable(\n \"p_conv_1_b_params_%s\" % self.player,\n shape=[1], dtype=tf.float32,\n initializer=tf.constant_initializer(0.)\n )\n\n # reward network params\n self._r_conv_params = []\n for i in range(num_reward_layers):\n num_in_filters = 3 if i == 0 else num_reward_filters\n w = tf.get_variable(\n \"r_conv_w_%s_%i\" % (self.player, i),\n shape=[3, 3, num_in_filters, num_reward_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n b = tf.get_variable(\n \"r_conv_b_%s_%i\" % (self.player, i),\n shape=[num_reward_filters], dtype=tf.float32,\n initializer=tf.constant_initializer(0.)\n )\n self._r_conv_params.append((w, b))\n self._r_dense_mu_w = tf.get_variable(\n \"r_dense_mu_w_%s\" % self.player,\n shape=[num_reward_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0.)\n )\n self._r_dense_mu_b = tf.get_variable(\n \"r_dense_mu_b_%s\" % self.player,\n shape=[1], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0.)\n )\n self._r_dense_sig_w = tf.get_variable(\n \"r_log_sig_w_%s\" % self.player,\n shape=[num_reward_filters], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n self._r_dense_sig_b = tf.get_variable(\n \"r_log_sig_b_%s\" % self.player,\n shape=[1], dtype=tf.float32,\n initializer=tf.random_normal_initializer(0, 0.05)\n )\n self._global_step = tf.train.get_or_create_global_step()\n self._saver = tf.train.Saver(\n var_list=self._container.trainable_variables()+[self._global_step],\n save_relative_paths=True,\n sharded=False\n )\n if root_dir is None:\n root_dir = get_dir(\"Checkpoints\")\n print(\"Saving params in new dir:\", root_dir)\n self._root_dir = root_dir\n self._summary_writer = tf.contrib.summary.create_file_writer(\"{}/logs\".format(self._root_dir), flush_millis = 10000)\n self._summary_writer.set_as_default()\n self._descriptor=descriptor\n\n def reward_logits(self, gs: BatchGameState):\n assert gs.turn == self._player, \"Can't play on this turn\"\n gs_array = gs.as_array()\n if self.player == PlayState.O:\n gs_array = tf.gather(gs_array, [0, 2, 1], axis=-1)\n x = tf.constant(gs_array, dtype=tf.float32)\n for w,b in self._r_conv_params:\n x = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding=\"VALID\") + b\n x = tf.nn.relu(x)\n y = tf.reduce_max(x, axis=[1,2])\n mu = tf.einsum(\"nk,k->n\", y, self._r_dense_mu_w) \\\n + self._r_dense_mu_b\n log_sig = tf.einsum(\"nk,k->n\", y, self._r_dense_sig_w) \\\n + self._r_dense_sig_b\n return mu, log_sig\n\n def ln_pi(self, gs: BatchGameState):\n logits = self.logits(gs)\n return logits - tf.reduce_logsumexp(logits, axis=1)[:,tf.newaxis]\n\n def logits(self, gs: BatchGameState):\n assert gs.turn == self._player, \"Can't play on this turn\"\n gs_array = gs.as_array()\n if self.player == PlayState.O:\n gs_array = tf.gather(gs_array, [0, 2, 1], axis=-1)\n x = tf.constant(gs_array, dtype=tf.float32)\n y = x\n for w, b, s in self._p_conv_params:\n y = tf.nn.conv2d(y, w, strides=[1, 1, 1, 1], padding=\"SAME\") + b\n y = tf.nn.relu(y)\n # Skip connections:\n y += tf.nn.conv2d(x, s, strides=[1, 1, 1, 1], padding=\"SAME\")\n y = tf.reduce_max(y, axis=1)\n logits = tf.nn.conv1d(y, self._p_conv_1_w, stride=1, padding=\"SAME\") \\\n + self._p_conv_1_b\n return tf.reshape(logits, [gs.batch_size, -1])\n\n def learn_from_games(self, games: BatchGame, alpha=1., verbose=True):\n self._global_step.assign_add(1)\n last_state = games.cur_state\n rewards = tf.zeros(last_state.batch_size,dtype=tf.float32)\n num_p_rewards = []\n num_m_rewards = []\n r_loss = []\n p_loss = []\n for i, state in enumerate(games.reversed_states):\n if state.turn == self.player and i > 0:\n # Update the reward function\n with tf.GradientTape() as gr_tape, self._container.as_default():\n expected_reward_mu, expected_reward_log_sig = self.reward_logits(state)\n loss = tf.reduce_mean(nn.gaussian_neg_log_likelihood(\n mu=expected_reward_mu,\n log_sig=expected_reward_log_sig,\n x=rewards\n ))\n r_loss.append(float(loss))\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar(\"%s reward loss\" % self.player.name, loss)\n rw_gradients = gr_tape.gradient(\n loss,\n self._container.trainable_variables()\n )\n self._optimizer.apply_gradients(\n zip(rw_gradients, self._container.trainable_variables())\n )\n\n # Update the policy function\n with tf.GradientTape() as gr_tape, self._container.as_default():\n expected_reward = expected_reward_mu \\\n + tf.random.normal(shape=expected_reward_mu.shape) \\\n * tf.exp(expected_reward_log_sig)\n td = tf.stop_gradient(rewards - expected_reward)\n un_weighted_ln_pi = tf.reduce_mean(-td[:,tf.newaxis]*self.ln_pi(state))\n rw_ln_pi = un_weighted_ln_pi*(alpha**i)\n p_loss.append(float(un_weighted_ln_pi))\n\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar(\"%s policy loss\" % self.player.name, un_weighted_ln_pi)\n gradients = gr_tape.gradient(\n rw_ln_pi,\n self._container.trainable_variables()\n )\n self._optimizer.apply_gradients(\n zip(gradients, self._container.trainable_variables())\n )\n winners = state.winners()\n rewards_list = [\n 1. if winner == self.player else\n -1. if winner == self.opponent else\n float(old_reward * alpha) if winner is None else 0.\n for old_reward, winner in zip(rewards, winners)\n ]\n rewards = tf.constant(rewards_list, dtype=tf.float32)\n num_p_rewards.append(sum(1 for w in winners if w == self.player))\n num_m_rewards.append(sum(1 for w in winners if w == self.opponent))\n total_p_rewards = sum(num_p_rewards)\n total_m_rewards = sum(num_m_rewards)\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar(\"%s avg_policy_loss\" % self.player.name,\n sum(p_loss) / (i + 1))\n tf.contrib.summary.scalar(\"%s avg_reward_loss\" % self.player.name,\n sum(r_loss) / (i + 1))\n tf.contrib.summary.scalar(\"%s positive rewards\" % self.player.name,\n total_p_rewards)\n tf.contrib.summary.scalar(\"%s reward cnt\" % self.player.name,\n total_p_rewards+total_m_rewards)\n if verbose and total_p_rewards > 0 or total_m_rewards > 0:\n print(\"Noticed %d: (+%d, -%d) rewards for %s\"\n % (total_p_rewards + total_m_rewards, total_p_rewards,\n total_m_rewards, self.player))\n print(\"Reward Loss: %s, Policy Loss: %s, Player: %s\"\n % (sum(r_loss)/(i+1), sum(p_loss)/(i+1), self.player))\n\n @property\n def player(self):\n return self._player\n\n @property\n def opponent(self):\n return opponent(self._player)\n\n def save(self, descriptor=None):\n if descriptor is None:\n descriptor = self._descriptor\n path = build_checkpoint_file_name(self._root_dir, descriptor)\n fp = self._saver.save(None, path, global_step=self._global_step)\n print(\"model saved at %s\" % fp)\n\n @classmethod\n def load(cls, root_dir, descriptor=\"C4\", player=PlayState.X):\n ckpt_dir = os.path.join(root_dir, descriptor)\n root_dir = os.path.dirname(ckpt_dir)\n print(\"ckpt:\", ckpt_dir)\n print(\"root:\", root_dir)\n p = Policy(player, root_dir=root_dir, descriptor=descriptor)\n p._saver.restore(None, tf.train.latest_checkpoint(ckpt_dir))\n return p\n\n\nclass AI:\n def __init__(self, pi: Policy):\n self._pi = pi\n\n def next_moves(self, gs: BatchGameState):\n impossible_actions = tf.logical_not(gs.next_actions())\n logits = self._pi.logits(gs)\n return nn.sample_masked_multinomial(logits, impossible_actions, axis=1)\n\n @property\n def player(self):\n return self._pi.player\n\n def learn_from_games(self, game: BatchGame, alpha=0.5, verbose=True):\n self._pi.learn_from_games(game, alpha, verbose=verbose)\n\n def save(self, fp=None):\n self._pi.save(fp)\n\n @classmethod\n def load(cls, rp, dp, player=PlayState.X):\n try:\n pi = Policy.load(rp, dp, player)\n except (FileNotFoundError, ValueError, PermissionDeniedError) as exc:\n print(\"Unable to load policy:\", exc)\n pi = Policy(player=player, descriptor=dp)\n return AI(pi)","sub_path":"src/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":11960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"278301343","text":"#문제 1 2진수 10진수 변환\r\n\r\n\r\nx = input(\"2진수를 입력하시오: \") # 1010\r\n\r\ny = x[::-1]\r\na = 0\r\nawnser = 0\r\n\r\nfor z in y:\r\n awnser += int(z)*2**a\r\n a += 1\r\n\r\nprint(awnser) # 10\r\n\r\n\r\n\r\n\r\n","sub_path":"ICT2_Quiz/Mid_term_Quiz_1.py","file_name":"Mid_term_Quiz_1.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"434740143","text":"from mongoengine import *\nfrom urllib.error import HTTPError\nfrom api import model_api, settings\nfrom api.errors import *\nfrom api.base_classes import *\nfrom datetime import date, timedelta\nfrom django.utils.functional import cached_property\n\n\n\nclass Episodes():\n\n\tdef __init__(self, season_id_pk):\n\t\tepisodes = Episode.objects(season_id_pk = season_id_pk)\n\t\tself.__episodes = []\n\t\tfor episode in _episodes:\n\t\t\tself.__episodes.append(episode)\n\t\tself.__shellSort()\n\n\tdef __iter__(self):\n\t\treturn self.__episodes\n\n\tdef __getitem__(self,index):\n\t\treturn self.__episodes[index - 1]\n\n\tdef __shellSort(self):\n\t\tinc = len(self.__episodes) // 2\n\t\twhile inc:\n\t\t\tfor i in range(0, len(self.__episodes)):\n\t\t\t\tj = i\n\t\t\t\ttemp = self.__episodes[i]\n\t\t\t\twhile j >= inc and self.__episodes[j - inc].number > temp.number:\n\t\t\t\t\tself.__episodes[j] = self.__episodes[j - inc]\n\t\t\t\t\tj -= inc\n\t\t\tself.__episodes[j] = temp\n\t\tinc = inc // 2 if inc // 2 else (0 if inc == 1 else 1)\n\n\n# Create your models here.\n\nclass Movie(BaseDocumment):\n\n\t\"\"\"docstring for Movie\"\"\"\n\tdef __init__(self, id):\n\t\ttry:\n\t\t\tself = Movie.objects.get(id=id)\n\t\t\tnow = datetime.now()\n\t\t\tif (now - self.__date_update).days > settings.UPDATE_FREQUENCY:\n\t\t\t\tmovie_from_api = model_api.Movie(id, images =False)\n\t\t\t\tself.__update_attr(movie_from_api)\n\t\t\t\tself.save()\n\n\t\texcept DoesNotExist:\n\t\t\ttry:\n\t\t\t\tmovie_from_api = model_api.Movie(id, images =True)\n\t\t\t\tself.__update_attr(movie_from_api)\n\t\t\t\tself.__update_images(movie_from_api)\n\t\t\t\tself.save()\n\t\t\texcept HTTPError:\n\t\t\t\traise NotFoundError(value=\"movie\")\n\n\t\texcept MultipleObjectsReturned:\n\t\t\tMovie.objects(id=id).delete()\n\t\t\tmovie_from_api = model_api.Movie(id, images =True)\n\t\t\tself.__update_attr(movie_from_api)\n\t\t\tself.__update_images(movie_from_api)\n\t\t\tself.save()\n# filme deletado\n#\t\texcept HTTPError:\n#\t\t\trating NotFoundError(value=\"movie\")\n\n\tdef __update_attr(self, movie_from_api):\n\t\tself.title = movie_from_api['title']\n\t\tself.year = movie_from_api['year']\n\t\tself.overview = movie_from_api['overview']\n\t\tself.rating = movie_from_api['rating']\n\t\tself.votes = movie_from_api['votes']\n\t\tself.__date_update = date.today()\n\t\t\n\t\tself.ids = ID()\n\t\tself.ids.trakt = movie_from_api['ids']['trakt']\n\t\tself.ids.imdb = movie_from_api['ids']['imdb']\n\t\tself.ids.slug = movie_from_api['ids']['slug']\n\n\tdef __update_images(self, movie_from_api):\n\t\tself.images = {}\n\t\tself.images['fanart'] = movie_from_api['fanart']\n\t\tself.images['poster'] = movie_from_api['poster']\n\n\n\n\n\nclass Episode(BaseDocumentIntern):\n\tseason_id_pk = ObjectIdField(required=True)\n\n\t\"\"\"docstring for Episode\n\t\tshow_id não é ObjectIdField\"\"\"\n\tdef __init__(self, show_id, season, number):\n\t\tseason = Season(show_id = show_id, season = season)\n\t\ttry:\n\t\t\tepisode = Episode.objects.get(season_id_pk = season.pk, number = number)\n\t\t\tnow = datetime.now()\n\t\t\tif (now - self.__date_update).days > settings.UPDATE_FREQUENCY:\n\t\t\t\tepisode_from_api = model_api.Episode(show_id = show_id, season_number = season, episode_number = number)\n\t\t\t\tself.__update_attr(episode_from_api)\n\t\t\t\tself.save()\n\n\t\texcept DoesNotExist:\n\t\t\ttry:\n\t\t\t\tepisode_from_api = model_api.Episode(show_id = show_id, season_number = season, episode_number = number, images = True)\n\t\t\t\tself.season_id_pk = season.pk\n\t\t\t\tself.__update_attr(episode_from_api)\n\t\t\t\tself.__update_image(episode_from_api)\n\t\t\t\tself.save()\n\t\t\texcept HTTPError:\n\t\t\t\traise NotFoundError(value=\"episode\")\n\n\t\texcept MultipleObjectsReturned:\n\t\t\tEpisode.objects(season_id_pk = season.pk, number = number).delete()\n\t\t\tself.season_id_pk = season.pk\n\t\t\tself.__update_attr(episode_from_api)\n\t\t\tself.__update_image(episode_from_api)\n\t\t\tself.save()\t\t\t\n# episodio deletado\n#\t\texcept HTTPError:\n#\t\t\traise NotFindException(value=\"episode\")\n\t\t\t\n\t@cached_property\n\tdef season(self):\n\t\tif not hasattr(self, '_season'):\n\t\t\tself._season = Season.objects.get(pk = self.season_id_pk)\n\t\treturn self._season\n\n\t@cached_property\n\tdef show(self):\n\t\tif not hasattr(self, '_show'):\n\t\t\tself._show = Show.objects.get(pk = self.season.show_id_pk)\n\t\treturn self._show\n\n\tdef __update_attr(self, episode_from_api):\n\t\tself.number = episode_from_api['number']\n\t\tself.overview = episode_from_api['overview']\n\t\tself.rating = episode_from_api['rating']\n\t\tself.votes = episode_from_api['votes']\n\t\tself.__date_update = date.today()\n\t\t\n\t\tself.ids = ID()\n\t\tself.ids.trakt = episode_from_api['ids']['trakt']\n\t\tself.ids.imdb = episode_from_api['ids']['imdb']\n\n\tdef __update_image(self, episode_from_api):\n\t\tself.images = episode_from_api['image']\n\nclass Season(BaseDocumentIntern):\n\tshow_id_pk = ObjectIdField(required=True)\n\tepisode_count = IntField(required=True)\n\taired_episodes = IntField(required=True)\n\n\t\"\"\"docstring for Season\n\t\tshow_id não pe ObjectIdField\"\"\"\n\tdef __init__(self, show_id, season):\n\t\tshow = Show(show_id)\n\t\ttry:\n\t\t\tself = Season.objects.get(show_id_pk = show.pk, number = season)\n\t\t\tnow = datetime.now()\n\t\t\tif (now - self.__date_update).days > settings.UPDATE_FREQUENCY:\n\t\t\t\tseason_from_api = model_api.Season(show_id = show_id, season_number = season)\n\t\t\t\tself.__update_attr(season_from_api)\n\t\t\t\tself.save()\n\t\texcept DoesNotExist:\n\t\t\ttry:\n\t\t\t\tseason_from_api = model_api.Season(show_id = show_id, season_number = season, images = True)\n\t\t\t\tself.__update_attr(season_from_api)\n\t\t\t\tself.__update_image(season_from_api)\n\t\t\t\tself.show_id_pk = show.pk\n\t\t\t\tself.save()\n\t\t\texcept HTTPError:\n\t\t\t\traise NotFoundError(value=\"season\")\n\n\t@cached_property\n\tdef episodes(self):\n\t\tif not hasattr(self, '_episodes'):\n\t\t\tself._episodes = Episodes(season_id_pk = self.pk)\n\t\treturn self._episodes\n\n\t@cached_property\n\tdef show(self):\n\t\tif not hasattr(self, '_show'):\n\t\t\tself._show = Show.objects.get(pk = self.show_id_pk)\n\t\treturn self._show\n\n\tdef __update_attr(self, season_from_api):\n\t\tself.number = season_from_api['number']\n\t\tself.overview = season_from_api['overview']\n\t\tself.rating = season_from_api['rating']\n\t\tself.votes = season_from_api['votes']\n\t\tself.episode_count = season_from_api['episode_count']\n\t\tself.aired_episodes = season_from_api['aired_episodes']\n\t\tself.__date_update = date.today()\n\n\t\tself.ids = ID()\n\t\tself.ids.trakt = season_from_api['ids']['trakt']\n\t\tself.ids.imdb = season_from_api['ids']['imdb']\n\n\tdef __update_episodes(self, season_from_api):\n\t\tfor i in range(1, self.episode_count + 1):\n\t\t\tself.episodes[i] = Episode(show_id, season, i)\n\n\tdef __update_image(self, season_from_api):\n\t\tself.image = season_from_api['image']\n\t\t\n\nclass Seasons():\n\n\t\"\"\"docstring for Seasons\n\t\tshow_id não é ObjectIdField\"\"\"\n\tdef __init__(self, show_id):\n\t\tshow = Show(show_id)\n\t\ttry:\n\t\t\tseasons = Season.objects(show_id_pk = show.pk)\n\t\t\t#self.show_id é ObjectIdField\n\t\t\tself.show_id_pk = seasons[0].show_id_pk\n\t\t\tself.seasons = []\n\t\t\tfor season in seasons:\n\t\t\t\tself.seasons.append(season)\n\t\t\tself.__shellSort()\n\t\t\tself.number_season = len(self.seasons)\n\t\t\n\t\texcept DoesNotExist:\n\t\t\tself.number_season = len(api_seasons)\n\t\t\tSeasons.update(show_id = show_id)\n\t\t\t#perigoso, potencial loop\n\t\t\tself = Seasons(show_id = show_id)\n\n\tdef __getitem__(self,index):\n\t\treturn self.seasons[index]\n\n\tdef __iter__(self):\n\t\treturn self.seasons\n\n\t@staticmethod\n\tdef update(show_id):\n\t\tapi_seasons = model_api.Seasons(show_id = show_id, images = False, extended = False)\n\t\tfor season in api_seasons:\n\t\t\tSeason(show_id = show_id, season = season['number']) \n\n\t@cached_property\n\tdef show(self):\n\t\tif not hasattr(self, '_show'):\n\t\t\tself._show = Show.objects.get(pk = self.show_id_pk)\n\t\treturn self._show\n\n\tdef __shellSort(self):\n\t\tinc = len(self.seasons) // 2\n\t\twhile inc:\n\t\t\tfor i in range(0, len(self.seasons)):\n\t\t\t\tj = i\n\t\t\t\ttemp = self.seasons[i]\n\t\t\t\twhile j >= inc and self.seasons[j - inc].number > temp.number:\n\t\t\t\t\tself.seasons[j] = self.seasons[j - inc]\n\t\t\t\t\tj -= inc\n\t\t\tself.seasons[j] = temp\n\t\tinc = inc // 2 if inc // 2 else (0 if inc == 1 else 1)\n\nclass Show(BaseDocumment):\n\n\t\"\"\"docstring for Show\"\"\"\n\tdef __init__(self, id):\n\t\ttry:\n\t\t\tself = Show.objects.get(id=id)\n\t\t\tdelta_time = (datetime.now() - self.__date_update).days\n\t\t\tif delta_time > settings.UPDATE_FREQUENCY:\n\t\t\t\tshow_from_api = model_api.Movie(id, images =False)\n\t\t\t\tself.__update_attr(show_from_api)\n\t\t\t\tself.save()\n\t\t\tif delta_time > ( settings.UPDATE_FREQUENCY // 2 ):\n\t\t\t\tSeasons.update(show_id = id)\n\n\t\texcept DoesNotExist:\n\t\t\ttry:\n\t\t\t\tshow_from_api = model_api.Show(id, images =True)\n\t\t\t\tself.__update_attr(show_from_api)\n\t\t\t\tself.__update_images(show_from_api)\n\t\t\t\tself.save()\n\t\t\texcept HTTPError:\n\t\t\t\traise NotFoundError(value=\"show\")\n\n\t\texcept MultipleObjectsReturned:\n\t\t\tShow.objects(id = id).delete()\n\t\t\tshow_from_api = model_api.Show(id, images =True)\n\t\t\tself.__update_attr(show_from_api)\n\t\t\tself.__update_images(show_from_api)\n\t\t\tself.save()\n\n\t@cached_property\n\tdef seasons(self):\n\t\tif not hasattr(self, '_seasons'):\n\t\t\tself._seasons = Seasons(show_id = self.ids.slug)\n\t\treturn self._seasons\n\n\tdef __update_attr(self, show_from_api):\n\t\tself.title = show_from_api['title']\n\t\tself.year = show_from_api['year']\n\t\tself.overview = show_from_api['overview']\n\t\tself.rating = show_from_api['rating']\n\t\tself.votes = show_from_api['votes']\n\t\tself.__date_update = date.today()\n\t\t\n\t\tself.ids = ID()\n\t\tself.ids.trakt = show_from_api['ids']['trakt']\n\t\tself.ids.imdb = show_from_api['ids']['imdb']\n\t\tself.ids.slug = show_from_api['ids']['slug']\n\n\tdef __update_images(self, show_from_api):\n\t\tself.images = {}\n\t\tself.images['fanart'] = show_from_api['fanart']\n\t\tself.images['poster'] = show_from_api['poster']\n\n","sub_path":"api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"518827600","text":"#!/usr/bin/python3.6\n\n\n#\nimport sys\nimport argparse\nsys.path.append('..')\nsys.path.append('../..')\n\nfrom common.Utils import *\nfrom common.MongoDb import *\nfrom CvParseImpl import *\nfrom model.CvParseResult import *\n\n# This program is intended to \n# -- read specified email box\n# -- download the attachment\n# -- parse the CV\n# -- generate report with skill matrix\n#\n\n\n#----------------------------------------\n# Parse command line args\n#----------------------------------------\ndef parseArgs(progArgs):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-a','--action', help='parse|parsepersist', required=True)\n\tparser.add_argument('-d','--dir', help='dir', required=True)\n\t#parser.add_argument('-s','--skills', help='skills', required=False)\n\n\treturn parser.parse_args()\n\n#----------------------------------------\n# doAction\n#----------------------------------------\ndef\tdoAction(cmdArgs):\n\n\ttry:\n\n\t\tcvExtns = ['txt']\n\t\tcvfileList = Utils.getFileList(cmdArgs.dir, cvExtns)\n\n\t\tfor filepath in cvfileList:\n\n\t\t\tlogger.info('cmdArgs:' + str(cmdArgs))\n\t\t\tif (cmdArgs.action == 'parse'):\n\t\t\t\tdoParse(filepath)\n\n\t\t\tif (cmdArgs.action == 'parsepersist'):\n\t\t\t\tdoParsePersist(filepath)\n\texcept Exception as e:\t\n\t\tlogger.exception(e)\n\n\n#----------------------------------------\n# Specific action\n#----------------------------------------\ndef doParse(filepath):\n\tprResult = None\n\tparser = CvParseImpl(filepath)\n\tprResult = parser.parse()\n\tlogger.info('--------------------------------')\n\tlogger.info('parse-result-json:' + str(prResult.getSecDictAsJson()))\n\treturn prResult\n\n#----------------------------------------\n# Specific action\n#----------------------------------------\ndef doParsePersist(filepath):\n\tconnString = 'mongodb://localhost:27017/'\n\tmdb = MongoDb(connString, \"vishal\", \"vishal\", \"resim\")\n\tmdb.authenticate()\n\n\tpr = doParse(filepath)\n\n\tmdb.insertOne(\"resim\", \"cv\", pr.getSecDict())\n\n#----------------------------------------\n# Main\n#----------------------------------------\nfh1 = logging.StreamHandler()\nfh1.setLevel(logging.INFO)\n\nfh2 = logging.FileHandler('./debug.log')\nfh2.setLevel(logging.DEBUG)\n\nlogger = logging.getLogger('cvreader')\nlogger.addHandler(fh1)\nlogger.addHandler(fh2)\n\n\n\ncmdArgs = parseArgs(sys.argv);\n\ndoAction(cmdArgs)\n","sub_path":"python/cvreader/CvMain.py","file_name":"CvMain.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"435672659","text":"from copy import deepcopy\n\ndef dfs(cur, graph):\n last = cur[-1]\n check = False\n for g in graph:\n if g[0] == last:\n dfs(cur + \" \" + g[1], graph)\n check = True\n if check == False:\n print(cur)\n\ndef main():\n skills = input().split()\n\n N = int(input())\n graph = []\n for _ in range(N):\n graph.append(input().split())\n\n first = deepcopy(skills)\n for g in graph:\n idx = first.index(g[1])\n del first[idx]\n\n for s in first:\n dfs(s, graph)\n\n\n\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n main()","sub_path":"swmaestro/1st/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"125952588","text":"#!/usr/bin/env python\n\"\"\"\nfla.gr controller for view a list of current invite requests\n\nFor more information, see: https://github.com/JoshAshby/\n\nhttp://xkcd.com/353/\n\nJosh Ashby\n2013\nhttp://joshashby.com\njoshuaashby@joshashby.com\n\"\"\"\nfrom seshat.route import autoRoute\nfrom utils.baseHTMLObject import baseHTMLObject\n\nfrom views.admin.requests.adminViewRequestsTmpl import adminViewRequestsTmpl\n\nimport models.couch.request.requestModel as rm\nimport models.couch.template.templateModel as tm\nimport models.redis.setting.settingModel as sm\nimport models.couch.baseCouchCollection as bcc\n\n\n@autoRoute()\nclass adminRequestsIndex(baseHTMLObject):\n _title = \"admin requests\"\n __level__ = 50\n __login__ = True\n def GET(self):\n \"\"\"\n \"\"\"\n if self.env[\"cfg\"].enableRequests:\n page = self.env[\"members\"][\"p\"] \\\n if self.env[\"members\"].has_key(\"p\") else 1\n view = adminViewRequestsTmpl(searchList=[self.tmplSearchList])\n\n view.scripts = [\"handlebars_1.0.min\",\n \"jquery.json-2.4.min\",\n \"sidebarTabs.flagr\",\n \"adminModal.flagr\",\n \"bulkCheck.flagr\",\n \"editForm.flagr\",\n \"adminViewRequests.flagr\"]\n\n requests = bcc.baseCouchCollection(rm.requestORM)\n requests.paginate(page, 25)\n requests.fetch()\n requests.format()\n\n view.requests = requests\n\n try:\n currentTmpl = sm.getSetting(\"enableRequests\", \"tmplid\")\n except:\n currentTmpl = \"\"\n\n tmpl = bcc.baseCouchCollection(tm.templateORM)\n tmpl.fetch()\n tmpl.filterBy(\"type\", \"email\")\n for tmp in tmpl:\n if tmp.id == currentTmpl:\n tmp.current = True\n\n view.tmpls = tmpl\n\n return view\n else:\n self._404()\n","sub_path":"flagr_core/controllers/admin/requests/adminViewRequestsController.py","file_name":"adminViewRequestsController.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"372146209","text":"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.signup,name='signup'),\n path('login/',views.login_user,name='login'),\n path('success/',views.success,name='success'),\n path('logout/',views.logout_user,name='logout')\n]","sub_path":"Task2_2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"635894244","text":"import discord\nfrom discord.ext import commands, tasks\nfrom core.classes import Cog_Extension, JsonApi\nimport core.functions as func\n\n\nclass Task(Cog_Extension):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.quiz_auto.start()\n\n @tasks.loop(minutes=30)\n async def quiz_auto(self):\n await self.bot.wait_until_ready()\n\n status = JsonApi().get_json('DynamicSettingJson')\n\n if func.now_time_info('hour') != 23 or status['buffer_flush'] == 1:\n if func.now_time_info('hour') == 1:\n\n status = JsonApi().get_json('DynamicSettingJson')\n status['buffer_flush'] = 0\n JsonApi().put_json('DynamicSettingJson', status)\n\n return\n\n await func.buffer_pack(func.getChannel(self.bot, 'sqcs_report'))\n await func.buffer_pack(func.getChannel(self.bot, 'working_report'))\n\n status['buffer_flush'] = 1\n\n JsonApi().put_json('DynamicSettingJson', status)\n\n\ndef setup(bot):\n bot.add_cog(Task(bot))\n","sub_path":"cogs/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"282654395","text":"# -*- encoding: utf-8 -*-\n# Copyright (c) 2016 b<>com\n#\n# Authors: Vincent FRANCOISE \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nimport oslo_messaging as om\n\n\nclass NotificationFilter(om.NotificationFilter):\n \"\"\"Notification Endpoint base class\n\n This class is responsible for handling incoming notifications. Depending\n on the priority level of the incoming, you may need to implement one or\n more of the following methods:\n\n .. code: py\n def audit(self, ctxt, publisher_id, event_type, payload, metadata):\n do_something(payload)\n\n def info(self, ctxt, publisher_id, event_type, payload, metadata):\n do_something(payload)\n\n def warn(self, ctxt, publisher_id, event_type, payload, metadata):\n do_something(payload)\n\n def error(self, ctxt, publisher_id, event_type, payload, metadata):\n do_something(payload)\n\n def critical(self, ctxt, publisher_id, event_type, payload, metadata):\n do_something(payload)\n \"\"\"\n\n def _build_regex_dict(self, regex_list):\n if regex_list is None:\n return {}\n\n regex_mapping = {}\n for key, value in regex_list.items():\n if isinstance(value, dict):\n regex_mapping[key] = self._build_regex_dict(value)\n else:\n if callable(value):\n regex_mapping[key] = value\n elif value is not None:\n regex_mapping[key] = re.compile(value)\n else:\n regex_mapping[key] = None\n\n return regex_mapping\n\n def _check_for_mismatch(self, data, regex):\n if isinstance(regex, dict):\n mismatch_results = [\n k not in data or not self._check_for_mismatch(data[k], v)\n for k, v in regex.items()\n ]\n if not mismatch_results:\n return False\n\n return all(mismatch_results)\n elif callable(regex):\n # The filter is a callable that should return True\n # if there is a mismatch\n return regex(data)\n elif regex is not None and data is None:\n return True\n elif (regex is not None and\n isinstance(data, str) and\n not regex.match(data)):\n return True\n\n return False\n","sub_path":"watcher/decision_engine/model/notification/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417566035","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Brian/Work/github_projects/marvin_pypi/python/marvin/extern/wtforms-alchemy/wtforms_alchemy/validators.py\n# Compiled at: 2018-01-12 14:08:15\n# Size of source mod 2**32: 3313 bytes\nfrom collections import Iterable, Mapping\nimport six\nfrom sqlalchemy import Column\nfrom sqlalchemy.orm.attributes import InstrumentedAttribute\nfrom wtforms import ValidationError\n\nclass Unique(object):\n __doc__ = \"Checks field values unicity against specified table fields.\\n\\n :param column:\\n InstrumentedAttribute object, eg. User.name, or\\n Column object, eg. user.c.name, or\\n a field name, eg. 'name' or\\n a tuple of InstrumentedAttributes, eg. (User.name, User.email) or\\n a dictionary mapping field names to InstrumentedAttributes, eg.\\n {\\n 'name': User.name,\\n 'email': User.email\\n }\\n :param get_session:\\n A function that returns a SQAlchemy Session. This parameter is not\\n needed if the given model supports Flask-SQLAlchemy styled query\\n parameter.\\n :param message:\\n The error message.\\n \"\n field_flags = ('unique', )\n\n def __init__(self, column, get_session=None, message=None):\n self.column = column\n self.message = message\n self.get_session = get_session\n\n @property\n def query(self):\n self._check_for_session(self.model)\n if self.get_session:\n return self.get_session().query(self.model)\n if hasattr(self.model, 'query'):\n return getattr(self.model, 'query')\n raise Exception('Validator requires either get_session or Flask-SQLAlchemy styled query parameter')\n\n def _check_for_session(self, model):\n if not hasattr(model, 'query'):\n if not self.get_session:\n raise Exception('Could not obtain SQLAlchemy session.')\n\n def _syntaxes_as_tuples(self, form, field, column):\n \"\"\"Converts a set of different syntaxes into a tuple of tuples\"\"\"\n if isinstance(column, six.string_types):\n return (\n (\n column, getattr(form.Meta.model, column)),)\n else:\n if isinstance(column, Mapping):\n return tuple((x[0], self._syntaxes_as_tuples(form, field, x[1])[0][1]) for x in column.items())\n if isinstance(column, Iterable):\n return tuple(self._syntaxes_as_tuples(form, field, x)[0] for x in column)\n if isinstance(column, (Column, InstrumentedAttribute)):\n return (\n (\n column.key, column),)\n raise TypeError('Invalid syntax for column')\n\n def __call__(self, form, field):\n columns = self._syntaxes_as_tuples(form, field, self.column)\n self.model = columns[0][1].class_\n query = self.query\n for field_name, column in columns:\n query = query.filter(column == form[field_name].data)\n\n obj = query.first()\n if not hasattr(form, '_obj'):\n raise Exception(\"Couldn't access Form._obj attribute. Either make your form inherit WTForms-Alchemy ModelForm or WTForms-Components ModelForm or make this attribute available in your form.\")\n else:\n if obj:\n if not form._obj == obj:\n if self.message is None:\n self.message = field.gettext('Already exists.')\n raise ValidationError(self.message)","sub_path":"pycfiles/sdss_marvin-2.3.6-py2.py3-none-any/validators.cpython-36.py","file_name":"validators.cpython-36.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"585799459","text":"import argparse\nfrom contextlib import redirect_stdout\nimport io\nimport json\nimport logging\nimport os\n\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom tensorflow.python.lib.io import file_io\nimport tensorflow_data_validation as tfdv\n\n\n_logger = logging.getLogger()\n\n\ndef _generate_stats(known_args, pipeline_args):\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n stats_output_path = os.path.join(known_args.output_dir, 'stats.tfrecord')\n stats = tfdv.generate_statistics_from_tfrecord(\n known_args.data_location,\n output_path=stats_output_path,\n pipeline_options=pipeline_options\n )\n file_io.write_string_to_file('/tmp/stats_output_path.txt', stats_output_path)\n return stats\n\n\ndef _write_stats_visualization(output_dir, stats):\n stats_viz_output_path = os.path.join(output_dir, 'stats_viz.html')\n stats_viz_rendered_html = tfdv.utils.display_util.get_statistics_html(stats)\n file_io.write_string_to_file(stats_viz_output_path, stats_viz_rendered_html)\n file_io.write_string_to_file('/tmp/stats_viz_output_path.txt', stats_viz_output_path)\n return stats_viz_output_path\n\n\ndef _infer_schema(output_dir, stats):\n inferred_schema_output_path = os.path.join(output_dir, 'inferred_schema.pb2')\n inferred_schema = tfdv.infer_schema(stats)\n file_io.write_string_to_file(inferred_schema_output_path, inferred_schema.SerializeToString())\n file_io.write_string_to_file(\n '/tmp/inferred_schema_output_path.txt', inferred_schema_output_path\n )\n return inferred_schema\n\n\ndef _render_inferred_schema_summary_markdown(inferred_schema):\n display_schema_out = io.StringIO()\n with redirect_stdout(display_schema_out):\n tfdv.display_schema(inferred_schema)\n return f'''# Inferred schema summary\n```\n{display_schema_out.getvalue()}\n```'''\n\n\ndef _write_mlpipeline_ui_metadata(stats_viz_output_path, inferred_schema):\n metadata_as_json = json.dumps({\n 'outputs': [\n {\n 'type': 'web-app',\n 'storage': 'gcs',\n 'source': stats_viz_output_path,\n },\n {\n 'storage': 'inline',\n 'source': _render_inferred_schema_summary_markdown(inferred_schema),\n 'type': 'markdown',\n },\n ]\n })\n file_io.write_string_to_file('/tmp/mlpipeline-ui-metadata.json', metadata_as_json)\n\n\ndef _main(argv=None):\n _logger.setLevel(logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_location', required=True)\n parser.add_argument('--output_dir', required=True)\n known_args, pipeline_args = parser.parse_known_args(argv)\n\n stats = _generate_stats(known_args, pipeline_args)\n stats_viz_output_path = _write_stats_visualization(known_args.output_dir, stats)\n inferred_schema = _infer_schema(known_args.output_dir, stats)\n _write_mlpipeline_ui_metadata(stats_viz_output_path, inferred_schema)\n\n\nif __name__ == '__main__':\n _main()","sub_path":"mlpipeline_utils/mlpipeline_utils/scripts/tfrecord_stats_gen.py","file_name":"tfrecord_stats_gen.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"122785383","text":"from pysmoothstreams import Server, Quality, Protocol, Service, Feed\n\n\nclass Playlist:\n def __init__(self, auth_sign, guide):\n self.auth_sign = auth_sign\n self.guide = guide\n\n self.channels = guide.channels\n\n def generate_m3u_playlist(\n self, server, auth_sign, quality=Quality.HD, protocol=Protocol.HLS\n ):\n playlist = \"#EXTM3U\\n\"\n for channel in self.channels:\n clean_channel_name = channel[\"name\"].strip()\n\n playlist += '#EXTINF: tvg-id=\"{channel_id}\" tvg-name=\"{clean_channel_name}\" tvg-logo=\"{channel_icon}\" tvg-chno=\"{channel_number}\", {clean_channel_name}\\n'.format(\n channel_id=channel[\"id\"],\n clean_channel_name=clean_channel_name,\n channel_icon=channel[\"icon\"],\n channel_number=channel[\"number\"],\n )\n playlist += \"{url}\\n\".format(\n url=self.guide.build_stream_url(\n server, channel[\"number\"], auth_sign, quality, protocol\n )\n )\n return playlist\n","sub_path":"pysmoothstreams/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"568820709","text":"import csv\r\nimport random\r\nfrom docxtpl import DocxTemplate\r\ndoc = DocxTemplate(\"example.docx\")\r\n\r\nSPR_RUB = \"\"\r\nSPR_KOP = \"\"\r\n\r\nnum2words = {1: 'ОДИН', 2: 'ДВА', 3: 'ТРИ', 4: 'ЧЕТЫРЕ', 5: 'ПЯТЬ', \\\r\n 6: 'ШЕСТЬ', 7: 'СЕМЬ', 8: 'ВОСЕМЬ', 9: 'ДЕВЯТЬ', 10: 'ДЕСЯТЬ', \\\r\n 11: 'ОДИННАДЦАТЬ', 12: 'ДВЕНАДЦАТЬ', 13: 'ТРИНАДЦАТЬ', 14: 'ЧЕТЫРНАДЦАТЬ', \\\r\n 15: 'ПЯТНАДЦАТЬ', 16: 'ШЕСТНАДЦАТЬ', 17: 'СЕМНАДЦАТЬ', 18: 'ВОСЕМНАДЦАТЬ',\r\n 19: 'ДЕВЯТНАДЦАТЬ', 20:'ДВАДЦАТЬ', 30:'ТРИДЦАТЬ', 40:'СОРОК', 50:'ПЯТДЕСЯТ',\r\n 60: 'ШЕСТЬДЕСЯТ', 70:'СЕМЬДЕСЯТ', 80:'ВОСЕМЬДЕСЯТ', 90:'ДЕВЯНОСТО', 100:'СТО',\r\n 200: 'ДВЕСТИ', 300: 'ТРИСТА', 400: 'ЧЕТЫРЕСТА', 500: 'ПЯТЬСОТ', 600: 'ШЕСТЬСОТ',\r\n 700: 'СЕМЬСОТ', 800: 'ВОСЕМЬСОТ', 900: 'ДЕВЯТЬСОТ'}\r\n\r\nnum2words_k = {1: 'ОДНА', 2: 'ДВЕ', 3: 'ТРИ', 4: 'ЧЕТЫРЕ', 5: 'ПЯТЬ', \\\r\n 6: 'ШЕСТЬ', 7: 'СЕМЬ', 8: 'ВОСЕМЬ', 9: 'ДЕВЯТЬ', 10: 'ДЕСЯТЬ', \\\r\n 11: 'ОДИННАДЦАТЬ', 12: 'ДВЕНАДЦАТЬ', 13: 'ТРИНАДЦАТЬ', 14: 'ЧЕТЫРНАДЦАТЬ', \\\r\n 15: 'ПЯТНАДЦАТЬ', 16: 'ШЕСТНАДЦАТЬ', 17: 'СЕМНАДЦАТЬ', 18: 'ВОСЕМНАДЦАТЬ',\r\n 19: 'ДЕВЯТНАДЦАТЬ', 20:'ДВАДЦАТЬ', 30:'ТРИДЦАТЬ', 40:'СОРОК', 50:'ПЯТДЕСЯТ',\r\n 60: 'ШЕСТЬДЕСЯТ', 70:'СЕМЬДЕСЯТ', 80:'ВОСЕМЬДЕСЯТ', 90:'ДЕВЯНОСТО', 100:'СТО',\r\n 200: 'ДВЕСТИ', 300: 'ТРИСТА', 400: 'ЧЕТЫРЕСТА', 500: 'ПЯТЬСОТ', 600: 'ШЕСТЬСОТ',\r\n 700: 'СЕМЬСОТ', 800: 'ВОСЕМЬСОТ', 900: 'ДЕВЯТЬСОТ'}\r\n\r\nBANK = \"АО \\\"Банкрот Банк\\\" Г. НАБЕРЕЖНЫЕ ЧЕЛТЫ\"\r\nCOMP = \"ООО \\\"СВЯЗЬ FINGER-STICK\\\"\"\r\n\r\nBIK = random.randint(100000000, 999999999)\r\nprint(\"generated BIK: \", BIK)\r\n\r\nINN = random.randint(1000000000, 9999999999)\r\nprint(\"generated INN: \", INN)\r\n\r\nKPP = random.randint(100000000, 999999999)\r\nprint(\"generated KPP: \", KPP)\r\n\r\nACC_1 = '3010' + str(random.randint(10000, 99999)) + '0000000' + str(random.randint(1000, 9999))\r\nprint(\"generated ACC_1: \", ACC_1)\r\n\r\nACC_2 = '4070' + str(random.randint(10000, 99999)) + '0000000' + str(random.randint(1000, 9999))\r\nprint(\"generated ACC_2: \", ACC_2)\r\n\r\nACC_3 = random.randint(1, 99999)\r\nprint(\"generated ACC_3: \", ACC_3)\r\n\r\nDAY = random.randint(1, 30)\r\nprint(\"generated DAY: \", DAY)\r\n\r\nMONTH = random.randint(1, 12)\r\nprint(\"generated MONTH: \", MONTH)\r\n\r\nYEAR = random.randint(0, 20)\r\nif YEAR < 10:\r\n YEAR = '0' + str(YEAR)\r\nprint(\"generated YEAR: \", YEAR)\r\n\r\nINDEX = random.randint(100000, 999999)\r\nINDEXE = random.randint(100000, 999999)\r\n\r\nINNE = random.randint(1000000000, 9999999999)\r\nprint(\"generated INNE: \", INNE)\r\n\r\nKPPE = random.randint(100000000, 999999999)\r\nprint(\"generated KPPE: \", KPPE)\r\n\r\nEXECUTOR = \"ООО \\\"СВЯЗЬ FINGER-STICK\\\", ИНН \" + str(INN) + \", КПП \" + str(KPP) + \", \" + str(INDEX) + \", Сладководск г, Неплохая ул, дом 5\"\r\nEMPLOYER = \"ООО \\\"ОТДЫХ\\\", ИНН \" + str(INNE) + \", КПП \" + str(KPPE) + \", \" + str(INDEXE) + \", Омск г, Такая Себе ул, дом 6\"\r\nFOUNDING = str(random.randint(10000000, 99999999)) + \" от \" + str(random.randint(1, 30)) + \".\" + str(random.randint(1, 12)) + \".\" + str(random.randint(1970, 2020))\r\n\r\nJOB_1 = \"ИСХОДЯЩИЕ ЗВОНКИ\"\r\nJOB_2 = \"ВХОДЯЩИЕ ЗВОНКИ\"\r\nJOB_3 = \"СМС\"\r\nJOB_4 = \"ИНТЕРНЕТ\"\r\n\r\n#_lab1_#\r\n\r\ndata_file_1 = \"D:\\DEV\\mobile\\lab3\\data\\data1.csv\"\r\n\r\nsms_sum = 0 \r\nout_calls_sum = 0\r\ninc_calls_sum = 0\r\n\r\nnumber = '915642913'\r\nsms_tariff = 1\r\n#first 5 for free\r\nout_calls_tariff = 1\r\ninc_calls_tariff = 1\r\n\r\n# initializing the titles and rows\r\nfields = [] \r\nrows = [] \r\n \r\n# reading csv file \r\nwith open(data_file_1, 'r') as csvfile: \r\n # creating a csv reader object \r\n csvreader = csv.reader(csvfile) \r\n \r\n # extracting field names through first row \r\n fields = next(csvreader)\r\n \r\n # extracting each data row one by one \r\n for row in csvreader: \r\n rows.append(row) \r\n \r\n # get total number of rows \r\n #print(\"Total no. of lines in a CDR file: %d\"%(csvreader.line_num)) \r\n \r\n # calculating SMS tariffication\r\n for row in rows[:csvreader.line_num]: \r\n if number in row[1]:\r\n sms_sum += float(row[4])\r\n sms_price = sms_sum*sms_tariff-5\r\n if sms_price < 0:\r\n sms_price = 0\r\n #print(sms_price, \"rubles for SMS\")\r\n\r\n # calculating outgoing calls tariffication\r\n for row in rows[:10]: \r\n if number in row[1]:\r\n out_calls_sum += float(row[3])\r\n out_calls_price = out_calls_sum*out_calls_tariff\r\n if out_calls_price < 0:\r\n out_calls_price = 0\r\n #print(out_calls_price, \"rubles for outgoing calls\")\r\n\r\n # calculating incoming calls tariffication\r\n for row in rows[:10]: \r\n if number in row[2]:\r\n inc_calls_sum += float(row[3])\r\n inc_calls_price = inc_calls_sum*inc_calls_tariff\r\n if inc_calls_price < 0:\r\n inc_calls_price = 0\r\n #print(inc_calls_price, \"rubles for incoming calls\")\r\n\r\n # total price\r\n total_price = sms_price+out_calls_price+inc_calls_price\r\n #print(\"total price is:\", total_price)\r\n\r\n#_lab2_#\r\n \r\ntotal_traffic = 0\r\ntotal_occurences = 0\r\ni = 0\r\ndata_file_2 = \"D:\\DEV\\mobile\\lab3\\data\\data2.csv\"\r\n#file = input(\"Enter path to CDR file: \")\r\n\r\n#number = input(\"Enter IP address: \")\r\nip_number = \"192.168.250.59\"\r\n#first 1000#b for free\r\ntariff = 1\r\n\r\n# initializing the titles and rows\r\nfields_2 = [] \r\nrows_2 = [] \r\nrowlist = []\r\ntraflist = []\r\n# reading csv file \r\nwith open(data_file_2, 'r') as csvfile_2: \r\n # creating a csv reader object \r\n csvreader_2 = csv.reader(csvfile_2) \r\n \r\n # extracting field names through first row \r\n fields_2 = next(csvreader_2)\r\n \r\n # extracting each data row one by one \r\n for row in csvreader_2: \r\n rows_2.append(row) \r\n \r\n # get total number of rows \r\n #print(\"Total no. of lines in a CDR file: %d\"%(csvreader.line_num)) \r\n \r\n #\r\n # calculating incoming calls tariffication\r\n for row in rows_2[1:17450]:\r\n if row[3] == ip_number:\r\n total_traffic = total_traffic + int(row[12])\r\n total_occurences = total_occurences + 1\r\n #print(int(row[12]))\r\n traflist += {(row[1], row[12])}\r\n if row[4] == ip_number:\r\n total_traffic = total_traffic + int(row[12])\r\n total_occurences = total_occurences + 1\r\n traflist += {(row[1], row[12])}\r\n c = \"\"\r\n # total price\r\n if total_traffic < 1000:\r\n c = \"байт\"\r\n total_cost = (total_traffic*1000-1000)*1/1000000000\r\n if total_cost < 0:\r\n total_cost = 0\r\n if 1000000 > total_traffic > 1000:\r\n total_traffic = total_traffic/1000\r\n c = \"КБ\"\r\n total_cost = (total_traffic*1000-1000)*1/1000000\r\n if total_cost < 0:\r\n total_cost = 0\r\n if 1000000000 > total_traffic > 1000000:\r\n total_traffic = total_traffic/1000000\r\n c = \"МБ\"\r\n total_cost = (total_traffic-1)*1\r\n if total_cost < 0:\r\n total_cost = 0\r\n #print(\"total traffic:\", total_traffic, c)\r\n #print(\"total occurences:\", total_occurences)\r\n #print(\"total cost: \", round(total_cost, 2), \"rubles\")\r\n \r\ndef cringe(N):\r\n try:\r\n print(num2words[N])\r\n except KeyError:\r\n try:\r\n sotkas = num2words[N-N%100]\r\n tens = num2words[N%100-N%10]\r\n odins = num2words[N%10]\r\n if 9 < (N%100) < 20:\r\n odins = ''\r\n tens = num2words[N%100]\r\n #print(sotkas + \" \" + tens + \" \" + odins)\r\n return(sotkas + \" \" + tens + \" \" + odins)\r\n except KeyError:\r\n print('Number out of range')\r\n \r\ndef kringe(N):\r\n try:\r\n print(num2words_k[N])\r\n except KeyError:\r\n try:\r\n tens = num2words_k[N%100-N%10]\r\n odins = num2words_k[N%10]\r\n if 9 < (N%100) < 20:\r\n odins = ''\r\n tens = num2words_k[N%100]\r\n #print(tens + \" \" + odins)\r\n return(tens + \" \" + odins)\r\n except KeyError:\r\n print('Number out of range')\r\n\r\nTOTAL = (out_calls_price+inc_calls_price+sms_price+round(total_cost, 2))\r\n\r\npogs = round(TOTAL%1, 2)*100\r\nif 4 < pogs%10 < 10:\r\n SPR_KOP = \"КОПЕЕК\"\r\nelif pogs%10 == 1:\r\n SPR_KOP = \"КОПЕЙКА\"\r\nelif pogs%10 == 0:\r\n SPR_KOP = \"КОПЕЕК\"\r\nelif 1 < pogs%10 < 5:\r\n SPR_KOP = \"КОПЕЙКИ\"\r\nif pogs-pogs%10 == 10:\r\n SPR_KOP = \"КОПЕЕК\"\r\n#\r\n\r\nrpogs = int(TOTAL)%10\r\nif 4 < rpogs%10 < 10:\r\n SPR_RUB = \"РУБЛЕЙ\"\r\nelif rpogs%10 == 1:\r\n SPR_RUB = \"РУБЛЬ\"\r\nelif rpogs%10 == 0:\r\n SPR_RUB = \"РУБЛЕЙ\"\r\nelif 1 < rpogs%10 < 5:\r\n SPR_RUB = \"РУБЛЯ\"\r\nif int(TOTAL)%100-rpogs == 10:\r\n SPR_RUB = \"РУБЛЕЙ\"\r\n\r\nTOTAL_K = round(TOTAL%1, 2)\r\nTOTAL_K = TOTAL_K*100\r\nTOTAL_K = kringe(int(TOTAL_K))\r\n\r\ncontext = { 'BANK' : BANK, 'BIK' : BIK, 'COMP' : COMP, 'INN' : INN, 'KPP' : KPP, 'ACC_1' : ACC_1, 'ACC_2' : ACC_2,\r\n 'ACC_3' : ACC_3, 'DAY' : DAY, 'MONTH' : MONTH, 'YEAR' : YEAR, 'EXECUTOR' : EXECUTOR, 'EMPLOYER' : EMPLOYER,\r\n 'FOUNDING' : FOUNDING, 'JOB_1' : JOB_1, 'JOB_2' : JOB_2, 'JOB_3' : JOB_3, 'JOB_4' : JOB_4, 'AM_1' : out_calls_sum,\r\n 'C_1' : \"мин\", 'P_1' : out_calls_tariff, 'S_1' : out_calls_price, 'AM_2' : inc_calls_sum, 'C_2' : \"мин\",\r\n 'P_2' : inc_calls_tariff, 'S_2' : inc_calls_price, 'AM_3' : sms_sum,'C_3' : \"шт\", 'P_3' : sms_tariff,\r\n 'S_3' : sms_price, 'AM_4' : round(total_traffic, 2), 'C_4' : c, 'P_4' : '1', 'S_4' : round(total_cost, 2),\r\n 'TOTAL' : TOTAL, 'NDS' : round(TOTAL/5, 2), 'TOTAL_TEXT_RUB' : cringe(int(TOTAL)), 'TOTAL_TEXT_KOP' : TOTAL_K,\r\n 'SPR_KOP' : SPR_KOP, 'SPR_RUB' : SPR_RUB, 'BOSS' : \"Важный Х.З.\", 'BUHGALTER' : \"Маркелова Е.А.\"}\r\ndoc.render(context)\r\ndoc.save(\"example_final.docx\")\r\n\r\nprint(\"Starting conversion.... \")\r\n\r\nfrom docx2pdf import convert\r\n#### Edit this to edit output file location\r\nconvert(\"D:\\DEV\\mobile\\lab3\\example_final.docx\")\r\n\r\nprint(\"pdf ready!\")\r\n","sub_path":"lab3/forma.py","file_name":"forma.py","file_ext":"py","file_size_in_byte":10654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"117081236","text":"import tkinter as tk\n\n\nclass Test():\n def __init__(self):\n self.root = tk.Tk()\n self.root.geometry(\"250x100\")\n self.buttonA = tk.Button(self.root,\n text=\"Color\",\n bg=\"blue\",\n fg=\"red\")\n\n self.buttonB = tk.Button(self.root,\n text=\"Click to change color\",\n command=self.changeColor)\n self.buttonA.pack(side=tk.LEFT)\n self.buttonB.pack(side=tk.RIGHT)\n self.root.mainloop()\n\n def changeColor(self):\n self.buttonA.configure(bg=\"yellow\")\n\n\napp = Test()\n","sub_path":"button_color.py","file_name":"button_color.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"207311995","text":"import json\nimport csv\nimport re\nimport traceback\n\n'''\nProcessing free agent signings from a summary csv and generating a league file\n'''\n\ndef main():\n ''' \n ------------------------------------------\n CHANGE FILENAME HERE\n '''\n league_file = 'currentExport.json'\n fa_file = 'faSummary.csv'\n '''\n ------------------------------------------\n '''\n\n # Open json export file\n with open(league_file, 'r', encoding='utf-8-sig') as read_file:\n export = json.load(read_file)\n \n \n rows = list() # will store csv rows in a 2d list\n\n # Open csv signings file\n with open(fa_file, 'r') as read_file:\n reader = csv.reader(read_file, delimiter = ',', quotechar = '\"')\n \n for row in reader:\n rows.append(row)\n\n keys = rows[0] # for converting signings to list of dictionaries\n signings = list()\n\n for row in rows[1:]:\n x = { key : row[keys.index(key)] for key in keys }\n if x['Wave Raw Output'] != \"\": # anyone who didn't sign has empty string for this field\n signings.append(x) # store the players who did sign\n \n year = int(export['meta']['phaseText'][:export['meta']['phaseText'].index(' ')])\n \n for signing in signings: # convert summary column Yrs/$M into salary amount, contract years & any options\n \n # Anything between '/' and 'M' is our annual salary\n signing['amount'] = int(float(signing['Yrs/$M'][signing['Yrs/$M'].index(\"/\") + 1:signing['Yrs/$M'].index(\"M\")]) * 1000)\n \n # + means there is a player/team option\n if '+' in signing['Yrs/$M']:\n signing['exp'] = int(signing['Yrs/$M'][:signing['Yrs/$M'].index(\"+\")]) + year\n else:\n signing['exp'] = int(signing['Yrs/$M'][:signing['Yrs/$M'].index(\"/\")]) + year\n \n # Any options/NTCs are stored in the player 'loc' field\n signing['locModifier'] = ''\n multi_option = re.search('\\d((PO)|(TO))', signing['Yrs/$M']) # A number before PO/TO means it is for multiple years\n if multi_option:\n option_start = signing['exp'] + 1 # options start the year after expiry\n option_end = int(multi_option.group(0)[0]) + signing['exp'] # get number of option years and add it to expiry year\n option_type = multi_option.group(0)[1:] # get option type (PO/TO) from matched pattern\n # add option info to our modifier\n signing['locModifier'] += ' - ' + str(option_start)[2:] + '-' + str(option_end)[2:] + ' ' + option_type\n \n elif 'PO' in signing['Yrs/$M']: # as above but for single year options\n option_start = signing['exp'] + 1\n option_type = 'PO'\n \n signing['locModifier'] += ' - ' + str(option_start)[2:] + ' ' + option_type\n \n elif 'TO' in signing['Yrs/$M']: # as above but for single year options\n option_start = signing['exp'] + 1\n option_type = 'TO'\n \n signing['locModifier'] += ' - ' + str(option_start)[2:] + ' ' + option_type\n \n if 'NTC' in signing['Yrs/$M']:\n signing['locModifier'] += ' NTC'\n if 'NLC' in signing['Yrs/$M']:\n signing['locModifier'] += ' NLC'\n\n # Make a team name:tid dictionary to use in output file\n teams = { team['region'] + ' ' + team['name'] : team['tid'] for team in export['teams'] }\n\n for player in export['players']: # update each player's contract info in the export\n \n for signing in signings:\n \n if player['firstName'] + ' ' + player['lastName'] == signing['Player name']: # check if the player is in our list of signings\n player['tid'] = teams[signing['Team']]\n player['contract']['amount'] = signing['amount']\n player['contract']['exp'] = signing['exp']\n if '-' in player['born']['loc']: # loc has a '-' iff a player had options stored from a previous contract\n player['born']['loc'] = player['born']['loc'][:player['born']['loc'].index('-')] # remove previous options\n player['born']['loc'] += signing['locModifier'] # add new options info\n \n # write new league file\n with open('ibfExport.json', 'w') as outfile:\n outfile = json.dump(export, outfile)\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n traceback.print_exc()\n finally:\n input('Press any key to close')\n","sub_path":"tools/fa.py","file_name":"fa.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63819173","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nImplementation of model from \"End-to-end video background subtraction with 3d\nconvolutional neural networks\" by Sakkos et al.\n\nCreated on Mon Feb 24, 2018\n@author: Juan Terven\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass BackSubModel3d_2(nn.Module):\n def __init__(self):\n super(BackSubModel3d, self).__init__()\n\n self.crp1 = nn.Sequential(\n nn.Conv3d(3, 64, (3, 3, 3), stride=1, padding=(1, 1, 1)),\n nn.ReLU(),\n nn.MaxPool3d(kernel_size=(1, 2, 2))\n )\n\n self.crp2 = nn.Sequential(\n nn.Conv3d(64, 128, (3, 3, 3), stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool3d(kernel_size=(1, 2, 2))\n )\n \n self.crp3 = nn.Sequential(\n nn.Conv3d(128, 256, (16, 3, 3), stride=1, padding=(0, 1, 1)),\n nn.ReLU(),\n nn.MaxPool3d(kernel_size=(1, 2, 2))\n )\n\n self.crp4 = nn.Sequential(\n nn.Conv2d(256, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(512, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(512, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.cr = nn.Sequential(\n nn.Conv2d(512, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(512, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(512, 512, 3, stride=1, padding=1),\n nn.ReLU(),\n )\n\n# self.us1 = nn.ConvTranspose2d(256, 16, kernel_size=4,\n# stride=2, padding=1)\n self.us2 = nn.ConvTranspose2d(256, 16, kernel_size=8,\n stride=8, padding=0)\n self.us3 = nn.ConvTranspose2d(512, 16, kernel_size=16,\n stride=16, padding=0)\n self.us4 = nn.ConvTranspose2d(512, 16, kernel_size=16,\n stride=16, padding=0)\n\n self.fc = nn.Conv2d(48, 2, kernel_size=1)\n self.softmax = nn.LogSoftmax(1)\n\n def forward(self, x):\n \"\"\" Forward pass\"\"\"\n x1 = x[:, :, 0:4, :, :]\n x2 = x[:, :, 2:6, :, :]\n x3 = x[:, :, 4:8, :, :]\n x4 = x[:, :, 6:10, :, :]\n\n# print('x1:', x1.shape)\n\n crp1_1 = self.crp1_1(x1)\n# print('crp1_1:', crp1_1.shape)\n crp1_2 = self.crp1_1(x2)\n# print('crp1_2:', crp1_2.shape)\n crp1_3 = self.crp1_1(x3)\n# print('crp1_3:', crp1_3.shape)\n crp1_4 = self.crp1_1(x4)\n# print('crp1_4:', crp1_4.shape)\n\n # concatenate crp1_1 and crp1_2 in crp_12 and pass it to crp2_1\n crp_12 = torch.cat((crp1_1, crp1_2), dim=2)\n# print('crp_12:', crp_12.shape)\n crp2_1 = self.crp2_1(crp_12)\n# print('crp2_1:', crp2_1.shape)\n\n # concatenate crp1_3 and crp1_4 in crp_34 and pass it to crp2_2\n crp_34 = torch.cat((crp1_3, crp1_4), dim=2)\n# print('crp_34:', crp_34.shape)\n crp2_2 = self.crp2_2(crp_34)\n# print('crp2_2:', crp2_2.shape)\n\n # concatenate crp2_1 and crp2_2 in crp2 and input it to crp3\n crp2 = torch.cat((crp2_1, crp2_2), dim=2)\n# print('crp2:', crp2.shape)\n crp3 = self.crp3(crp2)\n #crp3 = crp3.view(1, 256, 30, 40)\n crp3 = torch.squeeze(crp3, dim=2)\n# print('crp3:', crp3.shape)\n\n crp4 = self.crp4(crp3)\n# print('crp4:', crp4.shape)\n cr = self.cr(crp4)\n# print('cr:', cr.shape)\n\n # Upsamplings\n# us1 = self.us1(crp2)\n# print('us1:', us1.shape)\n us2 = self.us2(crp3)\n# print('us2:', us2.shape)\n us3 = self.us3(crp4)\n# print('us3:', us3.shape)\n us4 = self.us4(cr)\n# print('us4:', us4.shape)\n\n # concatenate us1, us2, us3, us4 into us\n us = torch.cat((us2, us3, us4), dim=1)\n# print('us:', us.shape)\n out = self.fc(us)\n\n# print('fc:', out.shape)\n out = self.softmax(out)\n# print('out:', out.shape)\n\n return out\n","sub_path":"model3d_2.py","file_name":"model3d_2.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"280843395","text":"import os\nfrom PIL import ImageColor, Image, ImageDraw, ImageFont\n\nos.chdir('/Users/ilja/Dropbox/atbs')\n\nprint(\"-------------------- COLORS --------------------\")\nred = ImageColor.getcolor('red', 'RGBA')\nprint(red)\nchoc = ImageColor.getcolor('chocolate', 'RGBA')\nprint(choc)\n\n# pillow generally expects a tuple with 4 coordinates (x_min, y_min, x_max, y_max)\n# NOTE that min is including, but max is excluding the actual pixel!\n\n\nprint(\"-------------------- BASICS --------------------\")\ncat = Image.open('automate_online-materials/zophie.png')\nprint(cat)\nprint(cat.size)\nprint(cat.filename)\nprint(cat.format)\nprint(cat.format_description)\n\n# save with a different image extension\ncat.save('zophie2.jpg')\n\nprint(\"-------------------- NEW --------------------\")\nim = Image.new('RGBA', (100, 200), 'purple')\nim.save('purple.png')\n\n\nprint(\"-------------------- CROP --------------------\")\ncropped_cat = cat.crop((355, 345, 565, 560))\ncropped_cat.save('cropped_cat.jpg')\n\n\nprint(\"-------------------- C/P --------------------\")\n# copy to create a new image\ncat2 = cat.copy()\n\n# paste one on top of the other\n# NOTE paste modifies an image in place\nprint(cropped_cat.size)\ncat2.paste(cropped_cat, (0, 0))\ncat2.paste(cropped_cat, (400, 500))\ncat2.save('weird_cat.jpg')\n\n# lets fill the entire image with cat's photos\nw, h = cat.size\nw2, h2 = cropped_cat.size\ncat3 = cat.copy()\nfor left in range(0, w, w2):\n for top in range(0, h, h2):\n cat3.paste(cropped_cat, (left, top))\ncat3.save('a_lot_of_cats.jpg')\n\n\nprint(\"-------------------- RESIZE, ROTATE, FLIP --------------------\")\nresized_cat = cat.resize((int(w/20), int(h/10)))\nresized_cat.save('resized_cat.png')\n\nrot_cat = cat.rotate(90).save('rot_cat.png')\n\nrot_cat2 = cat.rotate(6).save('rot_cat2.png')\n\n# this will expand the size of the image so that the entire rotated image can be filled in, without cropping parts of it\nrot_cat3 = cat.rotate(6, expand=True).save('rot_cat3.png')\n\nflip_cat = cat.transpose(Image.FLIP_LEFT_RIGHT).save('flipped_cat.png')\n\n\nprint(\"-------------------- CHANGE PIXELS --------------------\")\nim = Image.new('RGBA', (100, 100))\n\n# get\np = im.getpixel((0, 0))\nprint(p)\n\n# put\nfor x in range(50):\n for y in range(75):\n im.putpixel((x, y), (210, 210, 210))\nfor x in range(25):\n for y in range(25):\n im.putpixel((x, y), ImageColor.getcolor('yellow', 'RGBA'))\nim.save('colorz.png')\n\n\nprint(\"-------------------- DRAWING ON IMAGES --------------------\")\nim = Image.new('RGBA', (200, 200), 'white')\ndraw = ImageDraw.Draw(im) # receive the draw object\ndraw.line([(0, 0), (199, 10), (199, 199), (0, 0)], fill='black')\ndraw.rectangle([20, 30, 60, 90], fill='blue')\nfor i in range(100, 200, 10):\n draw.line([i, 0, 200, i-100], fill='green') # note we can pass in tuples or without\n draw.line([(i-1, 1), (199, i-99)], fill='yellow') # note we can pass in tuples or without\n\n# add text - standard typeface & size\ndraw.text((20, 150), 'hello world', fill='purple')\n\n# custom typeface & size\nfontsFolder = '/System/Library/Fonts/Supplemental'\narialFont = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), 32)\ndraw.text((100, 150), 'Howdy', fill='gray', font=arialFont)\n\nim.save('drawing.png')\n","sub_path":"19_images.py","file_name":"19_images.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"118348397","text":"from store.utils import cookieCart\nfrom django.shortcuts import render\nfrom store.models import *\nfrom django.http import JsonResponse\nimport json\nimport datetime\nfrom django.db.models import Q\n\nfrom . utils import cookieCart, cartData, guestOrder\n\n# Create your views here.\n\ndef store(request):\n\n\tcart = cartData(request)\n\tcartItems = cart[\"cartItems\"]\n\t\t\n\tproducts = Product.objects.all()\n\n\t## Search Functionality\n\tquery= request.GET.get('q')\n\tif query is not None:\n\t\tlookups= Q(name__icontains=query) | Q(id__icontains=query)\n\t\tproducts= products.filter(lookups).distinct()\n\n\tcontext = {\"products\":products,\n\t\t\t\t\"cartItems\":cartItems\n\t\t\t}\n\treturn render(request, \"store.html\", context)\n\ndef cart(request):\n\n\tcart = cartData(request)\n\titems = cart[\"items\"]\n\torder = cart[\"order\"]\n\tcartItems = cart[\"cartItems\"]\n\t\t\n\tcontext = {\n\t\t'items':items,\n\t\t'order':order,\n\t\t\"cartItems\":cartItems\n\t}\n\treturn render(request, \"cart.html\", context)\n\ndef checkout(request):\n\t\n\tcart = cartData(request)\n\titems = cart[\"items\"]\n\torder = cart[\"order\"]\n\tcartItems = cart[\"cartItems\"]\n\n\tcontext = {\n\t\t'items':items,\n\t\t'order':order,\n\t\t\"cartItems\":cartItems\n\t}\n\treturn render(request, \"checkout.html\", context)\n\ndef updateItem(request):\n\tdata = json.loads(request.body)\n\tproductId = data['productId']\n\taction = data['action']\n\tprint('product ID ', productId)\n\tprint('Action', action)\n\tcustomer = request.user.customer\n\tproduct = Product.objects.get(id=productId)\n\torder, created = Order.objects.get_or_create(customer=customer, complete =False)\n\n\torderItem, created = OrderItem.objects.get_or_create(order=order, product= product)\n\n\tif action == \"add\":\n\t\torderItem.quantity += 1\n\telif action== \"remove\":\n\t\torderItem.quantity -= 1\n\n\torderItem.save()\n\n\tif orderItem.quantity <= 0:\n\t\torderItem.delete()\n\n\treturn JsonResponse('Item has been added', safe=False)\n\ndef processOrder(request):\n\ttransaction_id = datetime.datetime.now().timestamp()\n\tdata = json.loads(request.body)\n\n\tif request.user.is_authenticated:\n\t\tcustomer = request.user.customer\n\t\torder, created = Order.objects.get_or_create(customer=customer, complete =False)\n\n\telse:\n\t\tcustomer, order = guestOrder(request, data)\n\n\ttotal = float(data['form']['total'])\n\torder.transaction_id = transaction_id\n\t\n\tif total == float(order.get_cart_total):\n\t\torder.complete = True\n\torder.save()\n\n\tif order.shipping == True:\n\t\tShippingAdress.objects.create(\n\t\t\tcustomer = customer,\n\t\t\torder = order,\n\t\t\taddress = data['shipping']['address'],\n\t\t\tcity = data['shipping']['city'],\n\t\t\tstate = data['shipping']['state'],\n\t\t\tzipcode = data['shipping']['zipcode'],\n\t\t)\n\n\treturn JsonResponse('Payment successfull!', safe=False)\n","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184033056","text":"import sys\n\nimport cv2\nimport numpy as np\n\n\ndef main():\n if len(sys.argv) < 7:\n print(f'Error: Expect more arguments.\\n'\n f'Usage: python {__file__} -s source.jpg -t target.jpg -o output.jpg\\n'\n f'if output filename is not provided, \\'output.jpg\\' is default.')\n exit()\n outfilename = ''\n for i in range(len(sys.argv)):\n if sys.argv[i] == '-s':\n sourcefilename = sys.argv[i + 1]\n if sys.argv[i] == '-t':\n targetfilename = sys.argv[i + 1]\n if sys.argv[i] == '-o':\n outfilename = sys.argv[i + 1]\n if outfilename == '':\n outfilename = 'output.jpg'\n sourcefile = cv2.imread(sourcefilename)\n targetfile = cv2.imread(targetfilename)\n outputfile = color_transfer(sourcefile, targetfile)\n cv2.imwrite(outfilename, outputfile)\n\n\ndef color_transfer(source, target, sideinfodeci='sideinfodeci.txt'):\n '''\n source, target: both are np.ndarray\n '''\n source_b, source_g, source_r = cv2.split(source)\n (source_mean_r, source_std_r, source_mean_g, source_std_g, source_mean_b, source_std_b) = _get_img_properties(\n source)\n (target_mean_r, target_std_r, target_mean_g, target_std_g, target_mean_b, target_std_b) = _get_img_properties(\n target)\n with open(sideinfodeci, 'w') as f:\n f.write('%.4f\\n%.4f\\n%.4f\\n%.4f\\n%.4f\\n%.4f\\n'\n '%.4f\\n%.4f\\n%.4f\\n%.4f\\n%.4f\\n%.4f'\n % (source_mean_r, source_mean_g, source_mean_b, source_std_r, source_std_g, source_std_b,\n target_mean_r, target_mean_g, target_mean_b, target_std_r, target_std_g, target_std_b))\n out_r = source_r.astype(dtype=np.uint8)\n out_g = source_g.astype(dtype=np.uint8)\n out_b = source_b.astype(dtype=np.uint8)\n out_r = (target_std_r / source_std_r) * (out_r - source_mean_r) + target_mean_r\n out_g = (target_std_g / source_std_g) * (out_g - source_mean_g) + target_mean_g\n out_b = (target_std_b / source_std_b) * (out_b - source_mean_b) + target_mean_b\n out = cv2.merge((out_b, out_g, out_r))\n return out\n\n\ndef _get_img_properties(img):\n '''\n helper function for color_transform\n '''\n b, g, r = cv2.split(img)\n mean_r = r.mean()\n std_r = r.std()\n mean_g = g.mean()\n std_g = g.std()\n mean_b = b.mean()\n std_b = b.std()\n return mean_r, std_r, mean_g, std_g, mean_b, std_b\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw4/4107056006-04-color-transfer-info.py","file_name":"4107056006-04-color-transfer-info.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"500448160","text":"# -*- coding: utf-8 -*-\n# Copyright © 2011-2013 Binet Réseau\n# See the LICENCE file for more informations\n\nimport os\nimport os.path\nimport tempfile\nfrom time import sleep\nimport shutil\nimport signal\nimport sys\n\nfrom .common import unittest, get_local_conf\nimport kaoz.hooks\nfrom threading import Lock\n\nclass HooksTestCase(unittest.TestCase):\n\n def setUp(self):\n self.config = get_local_conf()\n\n def tearDown(self):\n pass\n\n def test_config(self):\n \"\"\"Test default configuration\"\"\"\n self.assertTrue(self.config is not None)\n self.assertTrue(self.config.get('irc', 'hook_directory') is not None)\n self.assertEqual(self.config.get('irc', 'hook_directory') , '')\n\n def test_load_hook(self):\n \"\"\"Test loading modules in directory, recursively\"\"\"\n class MockPublisher(object):\n def __init__(self):\n self.calls=0\n self.valid=False\n self.lock = Lock();\n def send(self, channel, message):\n with self.lock:\n self.calls += 1\n self.valid = (channel == '#hook1') and \\\n (message == 'hook1 pubmsg(_, None, None)' or \\\n message == 'hook1 privmsg(_, None, None)')\n # setUp\n tmp_dir_name = tempfile.mkdtemp()\n self.assertEqual(self.config.get('irc', 'hook_directory'), '')\n self.config.set('irc', 'hook_directory', tmp_dir_name)\n os.mkdir(os.path.join(tmp_dir_name, 'more'))\n hook1 = os.path.join(tmp_dir_name, 'hook1.py')\n hook2 = os.path.join(tmp_dir_name, 'hook2.pyc')\n hook3 = os.path.join(tmp_dir_name, 'hook3.sh')\n hook4 = os.path.join(tmp_dir_name, 'hook4.py')\n hook5 = os.path.join(tmp_dir_name, 'hook5.py')\n hook6 = os.path.join(tmp_dir_name, 'more', 'hook6.py')\n with open(hook1, 'w') as f:\n f.write('''\ndef pubmsg(publisher, connection, event):\n publisher.send('#hook1', 'hook1 pubmsg(_, %s, %s)' % (str(connection), str(event)))\n''')\n with open(hook2, 'w') as f:\n f.write('''\n#Not really a pyc\ndef pubmsg(publisher, connection, event):\n publisher.send('oops!')\n''')\n with open(hook3, 'w') as f:\n f.write('''\n#!/bin/sh\necho 'Fail!'\n''')\n with open(hook4, 'w') as f:\n f.write('''\ndef pubmsg(publisher, connection, event):\n publisher.send('#hook4', str(1/0))\n''')\n with open(hook5, 'w') as f:\n f.write('''\ndef pubmsg(publisher, connection, event):\n publisher.send('#hook5', 'syntax...\n''')\n with open(hook6, 'w') as f:\n f.write('''\ndef pubmsg(publisher, connection, event):\n publisher.send('#hook6', 'oops!')\n''')\n\n self.assertTrue('hook1' not in sys.modules)\n self.assertTrue('hook4' not in sys.modules)\n hooks = kaoz.hooks.Hooks(self.config)\n hooks.load_hook_modules()\n self.assertTrue('hook1' in sys.modules)\n self.assertTrue('hook2' not in sys.modules)\n self.assertTrue('hook3' not in sys.modules)\n self.assertTrue('hook4' in sys.modules)\n self.assertTrue('hook4' in hooks.modules)\n self.assertTrue('hook5' not in sys.modules)\n self.assertTrue('hook6' not in sys.modules)\n publisher = MockPublisher()\n pmret = hooks.pubmsg(publisher, None, None)\n self.assertTrue(pmret)\n pmret = hooks.privmsg(publisher, None, None)\n self.assertFalse(pmret)\n sleep(0.1) # Test is racy\n with publisher.lock:\n self.assertEqual(publisher.calls, 1)\n self.assertTrue(publisher.valid)\n\n # Add privmsg to one hook and reload\n with open(hook1, 'a') as f:\n f.write('''\ndef privmsg(publisher, connection, event):\n publisher.send('#hook1', 'hook1 privmsg(_, %s, %s)' % (str(connection), str(event)))\n''')\n os.remove(hook4)\n os.kill(os.getpid(), signal.SIGUSR1)\n\n self.assertTrue('hook1' in sys.modules)\n self.assertTrue('hook2' not in sys.modules)\n self.assertTrue('hook3' not in sys.modules)\n # TODO; forcefully unloading a module is not strictly needed\n #self.assertTrue('hook4' not in sys.modules)\n self.assertTrue('hook4' not in hooks.modules)\n self.assertTrue('hook5' not in sys.modules)\n self.assertTrue('hook6' not in sys.modules)\n publisher = MockPublisher()\n pmret = hooks.pubmsg(publisher, None, None)\n self.assertTrue(pmret)\n pmret = hooks.privmsg(publisher, None, None)\n self.assertTrue(pmret)\n sleep(0.1) # Test is racy\n with publisher.lock:\n self.assertEqual(publisher.calls, 2)\n self.assertTrue(publisher.valid)\n\n # tearDown\n shutil.rmtree(tmp_dir_name)\n del sys.modules['hook1']\n del hook1\n # TODO; see above\n del sys.modules['hook4']\n del hook4\n\n","sub_path":"kaoz/tests/test_hooks.py","file_name":"test_hooks.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228598814","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: AC4Fun\n@license: huifangshuyuan.com\n@contact: ximuzmzj@gmail.com\n@file: 986. Interval List Intersections.py\n@time: 2022-01-10 07:36\n@desc: doc\nYou are given two lists of closed intervals, firstList and secondList, where firstList[i] = [starti, endi] and secondList[j] = [startj, endj]. Each list of intervals is pairwise disjoint and in sorted order.\n\nReturn the intersection of these two interval lists.\n\nA closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b.\n\nThe intersection of two closed intervals is a set of real numbers that are either empty or represented as a closed interval. For example, the intersection of [1, 3] and [2, 4] is [2, 3].\n\n\n\nExample 1:\n\n\nInput: firstList = [[0,2],[5,10],[13,23],[24,25]], secondList = [[1,5],[8,12],[15,24],[25,26]]\nOutput: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]]\nExample 2:\n\nInput: firstList = [[1,3],[5,9]], secondList = []\nOutput: []\n\n\nConstraints:\n\n0 <= firstList.length, secondList.length <= 1000\nfirstList.length + secondList.length >= 1\n0 <= starti < endi <= 109\nendi < starti+1\n0 <= startj < endj <= 109\nendj < startj+1\n\"\"\"\n\n# 寻找区间的交集,原区间不相交\nclass Solution:\n def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:\n first = 0\n second = 0\n result = []\n while first < len(firstList) and second < len(secondList):\n low = max(firstList[first][0], secondList[second][0])\n high = min(firstList[first][1], secondList[second][1])\n if low <= high:\n result.append([low, high])\n if firstList[first][1] <= secondList[second][1]:\n first += 1\n else:\n second += 1\n return result\n\n\n","sub_path":"leetcode/986. Interval List Intersections.py","file_name":"986. Interval List Intersections.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"284404578","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\n\nclass Ocean:\n\n def __init__(self, init_state):\n raise NotImplementedError\n\n def __str__(self):\n raise NotImplementedError\n\n def gen_next_quantum(self):\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n n_quantums = int(sys.stdin.readline())\n n_rows, n_clms = [int(i) for i in sys.stdin.readline().split()]\n init_state = []\n for i in range(n_rows):\n line = [int(i) for i in sys.stdin.readline().split()]\n init_state.append(line)\n\n ocean = Ocean(init_state=init_state)\n for _ in range(n_quantums):\n ocean = ocean.gen_next_quantum()\n print(ocean)\n","sub_path":"python/labs/lab02/ocean/ocean.py","file_name":"ocean.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623603546","text":"# -*- coding: utf-8 -*-\n\nfrom gluon.sqlhtml import FormWidget\nfrom gluon.sqlhtml import UploadWidget\n\n# ----------------------------------------------------------------------------------------------------------------------\n# dal represent/format helpers\n# ----------------------------------------------------------------------------------------------------------------------\n# https://mkaz.tech/python-string-format.html\n# https://pyformat.info/#number\n\n\ndef dal_represent_number(v, r):\n return \"{:,}\".format(v) if v is not None else v\n\n\ndef dal_represent_percent(v, r):\n return \"{:.2%}\".format(v / 100) if v is not None else v\n\n\n# https://regex101.com/\n# http://stackoverflow.com/questions/16699007/regular-expression-to-match-standard-10-digit-phone-number\ndal_regex_phone_num = '^\\s*(?:\\+?(\\d{1,3}))?[-. (]*(\\d{3})[-. )]*(\\d{3})[-. ]*(\\d{4})(?: *x(\\d+))?\\s*$'\ndal_regex_na_phone_num = '^(?:(?:\\+?1\\s*(?:[.-]\\s*)?)?(?:\\(\\s*([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\\s*\\)|([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\\s*(?:[.-]\\s*)?)?([2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\\s*(?:[.-]\\s*)?([0-9]{4})(?:\\s*(?:#|x\\.?|ext\\.?|extension)\\s*(\\d+))?$'\ndal_regex_ssn = '^\\d{3}-\\d{2}-\\d{4}$'\n\ndal_list_genders = [('F', 'Female'), ('M', 'Male')]\ndal_list_booleans = [('Y', 'Yes'), ('N', 'No')]\n\ndal_list_states = [\n ('AL', 'Alabama, USA'),\n ('AK', 'Alaska, USA'),\n ('AB', 'Alberta, CAN'),\n ('AS', 'American Samoa, USA'),\n ('AZ', 'Arizona, USA'),\n ('AR', 'Arkansas, USA'),\n ('BC', 'British Columbia, CAN'),\n ('CA', 'California, USA'),\n ('CO', 'Colorado, USA'),\n ('CT', 'Connecticut, USA'),\n ('DE', 'Delaware, USA'),\n ('FL', 'Florida, USA'),\n ('GA', 'Georgia, USA'),\n ('GU', 'Guam, USA'),\n ('HI', 'Hawaii, USA'),\n ('ID', 'Idaho, USA'),\n ('IL', 'Illinois, USA'),\n ('IN', 'Indiana, USA'),\n ('IA', 'Iowa, USA'),\n ('KS', 'Kansas, USA'),\n ('KY', 'Kentucky, USA'),\n ('LA', 'Louisiana, USA'),\n ('ME', 'Maine, USA'),\n ('MB', 'Manitoba, CAN'),\n ('MD', 'Maryland, USA'),\n ('MA', 'Massachusetts, USA'),\n ('MI', 'Michigan, USA'),\n ('MN', 'Minnesota, USA'),\n ('MS', 'Mississippi, USA'),\n ('MO', 'Missouri, USA'),\n ('MT', 'Montana, USA'),\n ('NE', 'Nebraska, USA'),\n ('NV', 'Nevada, USA'),\n ('NB', 'New Brunswick, CAN'),\n ('NH', 'New Hampshire, USA'),\n ('NJ', 'New Jersey, USA'),\n ('NM', 'New Mexico, USA'),\n ('NY', 'New York, USA'),\n ('NL', 'Newfoundland and Labrador, CAN'),\n ('NC', 'North Carolina, USA'),\n ('ND', 'North Dakota, USA'),\n ('MP', 'Northern Mariana Islands, USA'),\n ('NT', 'Northwest Territories, CAN'),\n ('NS', 'Nova Scotia, CAN'),\n ('NU', 'Nunavut, CAN'),\n ('OH', 'Ohio, USA'),\n ('OK', 'Oklahoma, USA'),\n ('ON', 'Ontario, CAN'),\n ('OR', 'Oregon, USA'),\n ('PA', 'Pennsylvania, USA'),\n ('PE', 'Prince Edward Island, CAN'),\n ('PR', 'Puerto Rico, USA'),\n ('QC', 'Quebec, CAN'),\n ('RI', 'Rhode Island, USA'),\n ('SK', 'Saskatchewan, CAN'),\n ('SC', 'South Carolina, USA'),\n ('SD', 'South Dakota, USA'),\n ('TN', 'Tennessee, USA'),\n ('TX', 'Texas, USA'),\n ('VI', 'U.S. Virgin Islands, USA'),\n ('UT', 'Utah, USA'),\n ('VT', 'Vermont, USA'),\n ('VA', 'Virginia, USA'),\n ('WA', 'Washington, USA'),\n ('DC', 'Washington DC, USA'),\n ('WV', 'West Virginia, USA'),\n ('WI', 'Wisconsin, USA'),\n ('WY', 'Wyoming, USA'),\n ('YT', 'Yukon Territory, CAN'),\n]\n\n\nclass Titleize(object):\n '''Field(..., requires=Titleize())'''\n\n def __call__(self, value):\n # return (value.title(), None)\n articles = ('in', 'the', 'a', 'an', 'of', 'is')\n return (' '.join([w if w in articles else w.title() if w.islower() else w for w in value.split()]), None)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# custom forms\n# https://groups.google.com/d/msg/web2py/1yCGgKANssE/MvOL4mUqRQ4J\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef widget(type='string', placeholder=''):\n '''Allow Field('name', widget=widget('string', 'my placeholder text'))'''\n # https://groups.google.com/d/msg/web2py/CTsUjEFUcR4/Vy-wIekEBAAJ\n # could also do https://groups.google.com/d/msg/web2py/VSr2oLNnozg/5AlMTNzdGgkJ\n return lambda field, value: SQLFORM.widgets[type].widget(field, value, _placeholder=placeholder)\n\n\ndef datepicker_widget(placeholder='', **settings):\n '''Datepicker plugin widget, also see in starter.js'''\n\n def widget(field, value, **attributes):\n\n default = {'_value': value}\n\n attributes = FormWidget._attributes(field, default, **attributes)\n attributes['_class'] = 'form-control date'\n\n data_attributes = {}\n data_attributes['date-format'] = 'dd.mm.yyyy'\n data_attributes['date-week-start'] = 1\n data_attributes['date-calendar-weeks'] = True\n for item in settings.items():\n data_attributes['date-'+item[0].replace('_', '-')] = item[1]\n\n return INPUT(\n data=data_attributes,\n _placeholder=placeholder,\n **attributes\n )\n\n return widget\n\n\ndef clockpicker_widget(placeholder='', **settings):\n '''Clockpicker plugin widget, also see in starter.js'''\n\n def widget(field, value, **attributes):\n\n default = {'_value': value}\n attributes = FormWidget._attributes(field, default, **attributes)\n attributes['_class'] = 'form-control time'\n\n data_attributes = {}\n for item in settings.items():\n data_attributes[item[0].replace('_', '-')] = item[1]\n\n return INPUT(\n data=data_attributes,\n _placeholder=placeholder,\n **attributes\n )\n\n return widget\n\n\ndef upload_image_widget(**settings):\n \"\"\"Custom upload image widget with bootstrap style button and responsive image thumbnail\n \"\"\"\n\n response.files.insert(1, URL('static', 'plugins/croppie/croppie.css'))\n response.files.insert(2, URL('static', 'plugins/croppie/croppie.min.js'))\n response.files.insert(3, URL('static', 'plugins/croppie/upload_image_widget.js'))\n\n def widget(field, value, download_url=None, **attributes):\n \"\"\"Generates an INPUT file tag.\n\n Optionally provides an A link to the file, including a checkbox so\n the file can be deleted.\n\n All is wrapped in a DIV.\n\n see also: `FormWidget.widget`\n\n Args:\n field: the field\n value: the field value\n download_url: url for the file download (default = None)\n \"\"\"\n\n modal = XML(\"\"\"\n
\n
\n
\n
\n \n

Edit image

\n
\n
\n
\n
\n
\n \n
\n
\n
\n
\n \"\"\")\n\n # Input button styling\n # https://stackoverflow.com/questions/11235206/twitter-bootstrap-form-file-element-upload-button\n default = dict(\n _type='file',\n _style='display: none',\n _onchange=\"$('#upload-file-info').html(this.files[0].name);\"\n \"initCroppie(this);\"\n )\n attributes = UploadWidget._attributes(field, default, **attributes)\n\n inp = DIV(\n LABEL('Choose image...', INPUT(**attributes), _class='btn btn-default', _for=attributes['_id'], _role='button'),\n SPAN(_class='label label-default', _id='upload-file-info', _style='margin-left: 5px;'),\n )\n\n if download_url and value:\n if not UploadWidget.is_image(value):\n raise Exception('Only images supported: set validator for the field \"requires = IS_IMAGE()\"')\n\n if callable(download_url):\n url = download_url(value)\n else:\n url = f'{download_url}/{value}'\n delete_button, image_label, br = '', '', ''\n image = IMG(_src=url, _height='40px', _class='img-responsive')\n\n requires = attributes[\"requires\"]\n\n # Add delete button if image upload field may be empty\n if requires == [] or isinstance(requires, IS_EMPTY_OR):\n br = BR()\n # Delete image button changes #image-label html content\n delete_button = LABEL(INPUT(_type='checkbox',\n _style='display: none',\n _name=field.name + UploadWidget.ID_DELETE_SUFFIX,\n _id=field.name + UploadWidget.ID_DELETE_SUFFIX),\n I(_class='fa fa-times'),\n _class='btn btn-danger btn-sm pull-right',\n _role='checkbox',\n _title='Delete',\n _autocomplete='off',\n _style='border: 0;',\n _onmouseup=\"var img=$('#image-label'); \"\n \"if (img.html()==='') \"\n \"{img.html('Marked to delete');} \"\n \"else {img.html('');}\",\n data={'toggle': 'input'})\n image_label = LABEL(_id='image-label', _class='label label-danger')\n\n inp = DIV(\n inp,\n BR(),\n DIV(\n DIV(\n delete_button,\n # Download image button\n A(I(_class='glyphicon glyphicon-cloud-download'),\n _href=url,\n _class='btn btn-primary btn-sm pull-right',\n _role='button',\n _title='Download',\n _style='margin-right: 5px; border: 0;',\n ),\n br,\n image_label,\n _class='box-header box-tools pull-right',\n ),\n DIV(image, _class='box-body'),\n _class='box box-solid',\n ),\n )\n\n return DIV(modal, inp)\n\n return widget\n\n\ndef upload_image_represent(**settings):\n \"\"\"Custom image represent function with responsive image thumbnail\n \"\"\"\n\n def represent(field, value, download_url=None):\n \"\"\"How to represent the file:\n\n - with download url and if it is an image: \n - otherwise with download url: file\n - otherwise: file\n\n Args:\n field: the field\n value: the field value\n download_url: url for the file download (default = None)\n \"\"\"\n\n inp = current.T(UploadWidget.GENERIC_DESCRIPTION)\n\n if download_url and value:\n if callable(download_url):\n url = download_url(value)\n else:\n url = download_url + '/' + value\n if UploadWidget.is_image(value):\n inp = IMG(_src=url, _height='40px', _class='img-responsive img-thumbnail')\n inp = A(inp, _href=url)\n\n return inp\n\n return represent\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Sidebar menu\n# ----------------------------------------------------------------------------------------------------------------------\ndef sidebar_menu_item(label, url=None, icon='link'):\n '''\n
  • About
  • \n Admin \n '''\n\n if url:\n active = 'active' if url == URL() else None\n return LI(\n A(\n (I(' ', _class='fa fa-%s' % icon), SPAN(T(label))),\n _href=url\n ),\n _class=active\n )\n else:\n return A(\n (\n I(' ', _class='fa fa-%s' % icon),\n SPAN(T(label)),\n SPAN(\n I(\n ' ', \n _class='fa fa-angle-left pull-right'\n ),\n _class='pull-right-container'\n )\n ),\n _href=\"#\"\n )\n\n\n# this is the main application menu add/remove items as required\n# original response menu in layout.html\n#\n#\n# def menu_item(label, controller, action, icon='link', args=[], user_signature=False, submenu=[]):\n# link = URL(controller, action, args=args, user_signature=user_signature)\n# menu_item = ((I(' ', _class='fa fa-%s' % icon), T(label)), link == URL(), link, submenu)\n# return menu_item\n#\n#\n# response.menu = [\n# menu_item('Home', 'default', 'index', icon='home'),\n# menu_item('People', 'person', 'list', icon='home'),\n# menu_item('Dogs', 'dog', 'list', icon='home'),\n# menu_item('Dog Owners', 'dog_owner', 'list', icon='home'),\n# ]\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# User helpers\n# ----------------------------------------------------------------------------------------------------------------------\ndef is_user_member(*roles):\n # @auth.requires(lambda: is_user_member('arg', 'list', 'of', 'roles')\n # if is_user_member('arg', 'list', 'of', 'roles'):\n\n # @auth.requires(lambda: any([auth.has_membership(r) for r in ['list', 'of', 'roles'])) # db lookups!?\n # if auth.user and any(auth.has_membership(r) for r in ['customer_service', 'admin']): # performs potentially 4 database queries\n # if auth.has_membership('customer_service'): # performs two database\n # restrict menu options based on membership\n # https://groups.google.com/d/msg/web2py/bz-mKIFqP1w/eEma0XOyCAAJ\n # https://groups.google.com/forum/#!searchin/web2py/response.menu$20auth.user_id$20auth.has_membership/web2py/E8Krnt9cxB8/xSpuPy8d6M4J\n # https://groups.google.com/forum/#!searchin/web2py/response.menu$20auth.user_id$20auth.has_membership/web2py/GvDAXRIpKA0/sEcPeB8a40oJ\n # https://groups.google.com/forum/#!topic/web2py/8AHYqV_EKy0\n\n user_auth_groups = [x.lower() for x in auth.user_groups.values()]\n required_auth_groups = [x.lower() for x in roles]\n\n if auth.user and any(role in required_auth_groups for role in user_auth_groups):\n return True\n else:\n return False\n\n\ndef user_visibility(*groups):\n \"\"\"in views, in class attribute: {{=user_visibility('list', 'of', 'authorized', 'user_groups')}}\"\"\"\n return 'hidden' if not is_user_member(*groups) else 'visible'\n\n\ndef user_photo(user):\n \"\"\"\n Return user photo or default avatar based on user gender\n :param user: db.auth_user.row\n :return: path to user photo file\n \"\"\"\n\n if user.photo:\n return URL('default', 'download', args=user.photo)\n elif user.sex:\n return URL('static', f'img/avatar_{user.sex.lower()}_1.png')\n else:\n return URL('static', 'img/boxed_bg.png')\n","sub_path":"models/db0_helpers.py","file_name":"db0_helpers.py","file_ext":"py","file_size_in_byte":15934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"574500618","text":"import board\r\nimport digitalio\r\nimport adafruit_character_lcd.character_lcd as character_lcd\r\nimport RPi.GPIO as GPIO\r\nimport GPIO_EX\r\nfrom time import sleep\r\n\r\nlcd_rs = digitalio.DigitalInOut(board.D22)\r\nlcd_en = digitalio.DigitalInOut(board.D24)\r\nlcd_d7 = digitalio.DigitalInOut(board.D21)\r\nlcd_d6 = digitalio.DigitalInOut(board.D26)\r\nlcd_d5 = digitalio.DigitalInOut(board.D20)\r\nlcd_d4 = digitalio.DigitalInOut(board.D19)\r\n\r\nlcd_columns = 16\r\nlcd_rows = 2\r\n\r\nlcd = character_lcd.Character_LCD_Mono(lcd_rs,lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows)\r\n\r\nROW0_PIN = 0\r\nROW1_PIN = 1\r\nROW2_PIN = 2\r\nROW3_PIN = 3\r\nCOL0_PIN = 4\r\nCOL1_PIN = 5\r\nCOL2_PIN = 6\r\n\r\nCOL_NUM = 3\r\nROW_NUM = 4\r\n\r\ng_preData = 0\r\n\r\ncolTable = [COL0_PIN, COL1_PIN, COL2_PIN]\r\nrowTable = [ROW0_PIN, ROW1_PIN, ROW2_PIN, ROW3_PIN]\r\n\r\nli = []\r\n\r\npassword = [1,2,3,4]\r\n\r\ndef initTextlcd():\r\n lcd.clear()\r\n lcd.home()\r\n lcd.cursor_position(0,0)\r\n sleep(1.0)\r\n\r\ndef displayText(text='',col=0,row=0):\r\n lcd.cursor_position(col,row)\r\n lcd.message = text\r\n\r\ndef clearTextlcd():\r\n lcd.clear()\r\n lcd.message = 'clear LCD\\nGoodbye!'\r\n sleep(2.0)\r\n lcd.clear()\r\n\r\ndef initKeypad():\r\n for i in range(0, COL_NUM):\r\n GPIO_EX.setup(colTable[i], GPIO_EX.IN)\r\n for i in range(0, ROW_NUM):\r\n GPIO_EX.setup(rowTable[i], GPIO_EX.OUT)\r\n\r\ndef selectRow(rowNum):\r\n for i in range(0, ROW_NUM):\r\n if rowNum == (i + 1):\r\n GPIO_EX.output(rowTable[i], GPIO_EX.HIGH)\r\n sleep(0.001)\r\n else :\r\n GPIO_EX.output(rowTable[i], GPIO_EX.LOW)\r\n sleep(0.001)\r\n return rowNum\r\n\r\ndef readCol():\r\n Keypadstate = -1\r\n for i in range(0, COL_NUM):\r\n inputKey = GPIO_EX.input(colTable[i])\r\n if inputKey:\r\n Keypadstate = Keypadstate + (i + 2)\r\n sleep(0.5)\r\n return Keypadstate\r\n\r\ndef readKeypad():\r\n global g_preData\r\n global li\r\n global password\r\n global res\r\n keyData = -1 \r\n\r\n runningStep = selectRow(1)\r\n row1Data = readCol()\r\n selectRow(0) \r\n sleep(0.001)\r\n if (row1Data != -1):\r\n keyData = row1Data\r\n\r\n if runningStep == 1:\r\n if keyData == -1:\r\n runningStep = selectRow(2)\r\n row2Data = readCol()\r\n selectRow(0)\r\n sleep(0.001)\r\n if (row2Data != -1):\r\n keyData = row2Data + 3\r\n\r\n if runningStep == 2:\r\n if keyData == -1:\r\n runningStep = selectRow(3) \r\n row3Data = readCol() \r\n selectRow(0)\r\n sleep(0.001)\r\n if (row3Data != -1):\r\n keyData = row3Data + 6\r\n \r\n if runningStep == 3:\r\n if keyData == -1:\r\n runningStep = selectRow(4) \r\n row4Data = readCol() \r\n selectRow(0)\r\n sleep(0.001)\r\n if(row4Data ==1):\r\n keyData = \"*\"\r\n elif(row4Data ==2):\r\n keyData = 0\r\n elif(row4Data ==3):\r\n keyData = \"#\"\r\n \r\n sleep(0.1)\r\n\r\n if keyData == -1:\r\n return -1\r\n\r\n if g_preData == keyData:\r\n g_preData = -1\r\n return -1\r\n g_preData = keyData\r\n\r\n print(\"\\r\\nKeypad Data : %s\" % keyData)\r\n\r\n return keyData\r\n\r\ndef main():\r\n GPIO.setwarnings(False)\r\n GPIO.setmode(GPIO.BCM)\r\n initTextlcd()\r\n print(\"start textlcd program ...\")\r\n initKeypad()\r\n print(\"setup keypad pin\")\r\n\r\n try:\r\n while(1):\r\n\r\n keyData = readKeypad()\r\n if (keyData != -1) and (len(li) < 4):\r\n li.append(keyData)\r\n elif len(li) == 4:\r\n li.clear()\r\n if keyData == \"*\":\r\n li.clear()\r\n \r\n if (li == password):\r\n res = \"\\nCORRECT\"\r\n elif ((li != password) and (len(li) == 4)):\r\n res = \"\\nFAIL\"\r\n else:\r\n res = \"\\n\"\r\n\r\n line = str(li) + res\r\n lcd.clear()\r\n displayText(line,0,0)\r\n sleep(1)\r\n \r\n except KeyboardInterrupt:\r\n clearTextlcd()\r\n GPIO.cleanup()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"IoT/raspi/Exercise5_1.py","file_name":"Exercise5_1.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"205813204","text":"# grid search sarima hyperparameters for monthly mean temp dataset\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom sklearn.metrics import mean_squared_error\nfrom multiprocessing import cpu_count\nfrom warnings import catch_warnings, filterwarnings\nfrom joblib import Parallel, delayed\nfrom pandas import read_csv \nfrom math import sqrt, isinf\n\n# ===== training model to search best parameter (grid search) ===== \n# one-step sarima forecast\ndef sarima_forecast(data_train, config, n_test):\n\t# lengthData = len(data_train.values)\n\torder, sorder, trend = config\n\t# define model\n\tmodel = SARIMAX(data_train, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False)\n\t# fit model\n\tmodel_fit = model.fit(disp=False)\n\t# make one step forecast\n\tpredictions = model_fit.predict(len(data_train)-n_test, len(data_train)-1)\n\treturn predictions\n\n# root mean squared error or rmse\ndef measure_rmse(actual, predicted):\n\treturn sqrt(mean_squared_error(actual, predicted))\n\n# split a univariate dataset into train/test sets\ndef train_test_split(data, n_test):\n\treturn data[:-n_test], data[-n_test:]\n\n# walk-forward validation for univariate data\ndef walk_forward_validation(data, n_test, cfg):\n\t# seed history with training dataset\n\tdata_train = [x for x in data]\n\ttest = data[-n_test:]\n\t# fit model and make forecast for history\n\tpredictions = sarima_forecast(data_train, cfg, n_test)\n\t# estimate prediction error\n\terror = measure_rmse(test, predictions)\n\treturn error\n\n# score a model, return None on failure\ndef score_model(data, n_test, cfg, debug=False):\n\tresult = None\n\t# convert config to a key\n\tkey = str(cfg)\n\t# show all warnings and fail on exception if debugging\n\tif debug:\n\t\tresult = walk_forward_validation(data, n_test, cfg)\n\telse:\n\t\t# one failure during model validation suggests an unstable config\n\t\ttry:\n\t\t\t# never show warnings when grid searching, too noisy\n\t\t\twith catch_warnings():\n\t\t\t\tfilterwarnings(\"ignore\")\n\t\t\t\tresult = walk_forward_validation(data, n_test, cfg)\n\t\texcept:\n\t\t\terror = None\n\t# check for an interesting result\n\t# if result is not None:\n\t# \tprint(' > Model[%s] %.3f' % (key, result))\n\treturn (key, result)\n\n# grid search configs\ndef grid_search(data, cfg_list, n_test, parallel=True):\n\tscores = None\n\tif parallel:\n\t\t# execute configs in parallel\n\t\texecutor = Parallel(n_jobs=cpu_count(), backend='multiprocessing')\n\t\ttasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list)\n\t\tscores = executor(tasks)\n\telse:\n\t\tscores = [score_model(data, n_test, cfg) for cfg in cfg_list]\n\t# remove empty results\n\tscores = [r for r in scores if r[1] != None]\n\t# sort configs by error, asc\n\tscores.sort(key=lambda tup: tup[1])\n\treturn scores\n\n# create a set of sarima configs to try\ndef sarima_configs(seasonal=[0]):\n\tmodels = list()\n\t# define config lists\n\tp_params = [0, 1, 2]\n\td_params = [0, 1]\n\tq_params = [0, 1, 2]\n\tt_params = ['n', 'c', 't', 'ct']\n\tP_params = [0, 1, 2]\n\tD_params = [0, 1]\n\tQ_params = [0, 1, 2]\n\tm_params = seasonal\n\t# create config instances\n\tfor p in p_params:\n\t\tfor d in d_params:\n\t\t\tfor q in q_params:\n\t\t\t\tfor t in t_params:\n\t\t\t\t\tfor P in P_params:\n\t\t\t\t\t\tfor D in D_params:\n\t\t\t\t\t\t\tfor Q in Q_params:\n\t\t\t\t\t\t\t\tfor m in m_params:\n\t\t\t\t\t\t\t\t\tcfg = [(p,d,q), (P,D,Q,m), t]\n\t\t\t\t\t\t\t\t\tmodels.append(cfg)\n\treturn models\n\ndef main(series, n_test):\n\t# load dataset\n\tdata = series.values\n\t# model configs\n\tcfg_list = sarima_configs(seasonal=[0, n_test])\n\t# grid search\n\tscores = grid_search(data, cfg_list, n_test)\n\tprint(len(cfg_list))\n\t# list top 1 configs\n\tscore = scores[:1][0]\n\tcfg, error = score[0], score[1]\n\n\tif isinf(error):\n\t\terror = 0\n\n\torder = cfg[1:10]\n\tp, d, q = order[1], order[4], order[7] \n\n\tsorder = cfg[12:25]\n\tP, D, Q, N = sorder[1], sorder[4], sorder[7], sorder[10]\n\tt = cfg[27]\n\n\treturn int(p), int(d), int(q), int(P), int(D), int(Q), int(N), str(t), error","sub_path":"App/GridSearch/sarimax.py","file_name":"sarimax.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"132552775","text":"def ciclazza (possibilita, indici=None, base=None):\n '''data una lista possibilita di liste di elementi tra cui scegliere, restituisce\n tutte le possibili scelte ciclando sugli indici nella tupla indici.\n per gli elementi non ciclati usa i valori in base. Senza base usa il primo, mentre\n passando possibilita come base li lascia immutati'''\n \n combs=[]\n if (indici == None): indici=range(len(possibilita))\n indici=tuple(indici)\n if base==None:\n b=[[0]*(len(possibilita))]\n else:\n b=[base]\n for i in indici:\n for bb in b: #print \"ricavo nuovi valori da bb \",bb\n for x in possibilita[i]:\n bbb=bb[:]\n bbb[i]=x #print \"aggiungo \",bbb,\" a combs: \",combs\n combs.append(bbb)\n b=combs[:]\n combs=[]\n return b","sub_path":"pyGeneralRoutines/ciclazzza.py","file_name":"ciclazzza.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"59829517","text":"\"\"\"\n Inmanta clearwater plugins\n\n :copyright: 2017 Inmanta NV\n :contact: code@inmanta.com\n\"\"\"\n\nfrom inmanta.plugins import plugin, Context\nfrom inmanta.execute.proxy import UnknownException\nfrom inmanta import config\n\n\ndef get_vnf_instances(ctx: Context, vnf_name):\n env = config.Config.get(\"config\", \"environment\", None)\n\n def get_instances():\n return ctx.get_client().list_params(env, {\"module\": \"state\"})\n\n result = ctx.run_sync(get_instances)\n vnfs = []\n params = {}\n if result.code != 200:\n return None\n else:\n for p in result.result[\"parameters\"]:\n if p[\"name\"].startswith(\"fsm_\" + vnf_name):\n vnfs.append(int(p[\"name\"].split(\"_\")[-1]))\n params[p[\"name\"]] = p\n\n return vnfs, params\n\n\ndef set_state(ctx, fsm_name, value, metadata):\n env = config.Config.get(\"config\", \"environment\", None)\n\n def set_state():\n return ctx.get_client().set_param(tid=env, id=fsm_name, value=value, source=\"plugin\", metadata=metadata, recompile=True)\n\n result = ctx.run_sync(set_state)\n\n\nNOT_SET_STATES = [\"decommission\", \"remove\"]\n\n\n@plugin\ndef instances(ctx: Context, vnf: \"clearwater::openstack::ClearwaterVNF\") -> \"number[]\":\n \"\"\"\n Return a list of instances\n \"\"\"\n fsm_instances, params = get_vnf_instances(ctx, vnf.name)\n x = 0\n try:\n x = vnf.instances\n except UnknownException as e:\n pass\n\n instances = max(min(x, vnf.max_instances), vnf.min_instances)\n instance_list = []\n for i in range(1, instances + 1):\n if i in fsm_instances:\n fsm_instances.remove(i)\n instance_list.append(i)\n\n for i in fsm_instances:\n name = \"fsm_%s_%d\" % (vnf.name, i)\n # transfer its state machine\n if params[name][\"value\"] not in NOT_SET_STATES:\n set_state(ctx, name, \"decommission\", params[name][\"metadata\"])\n\n instance_list.append(i)\n\n return instance_list\n\n\n@plugin\ndef get_param(ctx: Context, name: \"string\") -> \"string\":\n \"\"\"\n Get a parameter from the SO\n \"\"\"\n env = config.Config.get(\"config\", \"environment\", None)\n\n def get():\n return ctx.get_client().get_param(tid=env, id=name)\n\n result = ctx.run_sync(get)\n\n if result.code == 200:\n return result.result[\"parameter\"][\"value\"]\n return None\n\n\n@plugin\ndef set_param(ctx: Context, name: \"string\", value: \"string\", recompile: \"bool\"=False) -> \"string\":\n \"\"\"\n Set a parameter on the SO\n \"\"\"\n env = config.Config.get(\"config\", \"environment\", None)\n\n def setp():\n return ctx.get_client().set_param(tid=env, id=name, value=value, source=\"plugin\", metadata={\"module\": \"clearwater\"},\n recompile=recompile)\n\n result = ctx.run_sync(setp)\n\n if result.code != 200:\n raise Exception(result.result)\n\n\n@plugin\ndef select_repo(ctx: Context, service: \"clearwater::ClearwaterService\", versions: \"dict\") -> \"string\":\n \"\"\"\n Select the current repo to use for this component\n \"\"\"\n if service.upgrade_version not in versions:\n raise Exception(\"Version %s for vnf %s-%s does not exist.\" % (service.upgrade_version, service.instance_name))\n return versions[service.upgrade_version]\n\n\ndef get_service(services, vnf_name, vnf_instance):\n for svc in services:\n if svc.vnf_name == vnf_name and svc.vnf_instance == vnf_instance:\n return svc\n return None\n\n\n@plugin\ndef next_upgrade(ctx: Context, service: \"clearwater::ClearWater\", upgrade_order: \"string[]\",\n current: \"clearwater::ClearwaterService\"=None) -> \"clearwater::ClearwaterService\":\n upgrade_order = list(upgrade_order) # unwrap\n if current is None:\n return get_service(service.services, upgrade_order[0], 1)\n\n # check next index first\n svc = get_service(service.services, current.vnf_name, current.vnf_instance + 1)\n if svc is not None:\n return svc\n\n pos = upgrade_order.index(current.vnf_name)\n if pos + 1 == len(upgrade_order):\n # we reached the end\n return None\n\n return get_service(service.services, upgrade_order[pos + 1], 1)\n\n\n@plugin\ndef select_service_version(ctx: Context, cw: \"clearwater::ClearWater\") -> \"string\":\n \"\"\"\n Select the current version for clearwater\n \"\"\"\n param_name = cw.name + \"_version\"\n current_version = get_param(ctx, param_name)\n if current_version is None:\n set_param(ctx, param_name, cw.upgrade_version)\n return cw.upgrade_version\n\n return current_version\n\n\n@plugin\ndef get_current_version(ctx: Context, component: \"clearwater::ClearwaterService\") -> \"string\":\n \"\"\"\n Get the current version of this component from the orchestrator. When the version is not yet defined, use the current\n version of the service.\n \"\"\"\n param_name = component.name + \"_current_version\"\n current_version = get_param(ctx, param_name)\n if current_version is None:\n set_param(ctx, param_name, component.clearwater.version)\n return component.clearwater.version\n\n return current_version\n\n\n@plugin\ndef get_upgrade_version(ctx: Context, component: \"clearwater::ClearwaterService\") -> \"string\":\n \"\"\"\n Get the upgrade version of this component from the orchestrator. When the version is not yet defined, use the current\n version of the service.\n \"\"\"\n param_name = component.name + \"_upgrade_version\"\n current_version = get_param(ctx, param_name)\n if current_version is None:\n set_param(ctx, param_name, component.clearwater.version)\n return component.clearwater.version\n\n return current_version\n\n\n@plugin\ndef set_upgrade(ctx: Context, component: \"any\", service: \"clearwater::ClearWater\"):\n \"\"\"\n Set the given component to the upgrade version of the service.\n \"\"\"\n if component is not None:\n param_name = component.name + \"_upgrade_version\"\n set_param(ctx, param_name, component.clearwater.upgrade_version, True)\n else:\n param_name = service.name + \"_version\"\n set_param(ctx, param_name, service.upgrade_version, True)\n\n\n@plugin\ndef finish_upgrade(ctx: Context, component: \"clearwater::ClearwaterService\"):\n \"\"\"\n Finish the upgrade by setting the current version to the upgrade version.\n \"\"\"\n param_name = component.name + \"_current_version\"\n set_param(ctx, param_name, component.upgrade_version)","sub_path":"plugins/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"541465272","text":"import cv2\r\nimport numpy as np\r\nimport time\r\nimport sklearn.svm as SVC\r\nimport pickle\r\n\r\npath_wuchong = \"/mnt/myshare/linuxshare/wuchong/circle_\"\r\npath_yuantu = \"/mnt/myshare/linuxshare/yuantu/live_\"\r\npath_tiaoshi = \"/mnt/myshare/linuxshare/tiaoshi/live_\"\r\npath_youchong = \"/mnt/myshare/linuxshare/youchong/circle_\"\r\npath_circle = \"/mnt/myshare/linuxshare/circle/circle_\"\r\npath_fengmi = \"/mnt/myshare/linuxshare/fengmi/circle_\"\r\n\r\ny = 174\r\n#y = 129\r\nx = 270\r\nr = 65\r\nnum = 0\r\n\r\ncap=cv2.VideoCapture()\r\ncap.open(\"/dev/video0\")\r\n\r\ninput(\"press enter button\")\r\n\r\nwhile 1: # get a frame\r\n\tif num>0:\r\n\t\tt1 = time.time()#开始计时\r\n\tfor i in range(5):\r\n\t\tret, frame = cap.read() # show a frame\r\n\tcv2.imwrite(path_yuantu+str(num)+'.jpg', frame)\r\n\t\r\n\tdst = frame[(y-r):(y+r),(x-r):(x+r)]#截圆\r\n\t#cv2.imshow(\"1\",dst)\r\n\t\r\n\t#检测圆,重新截图\r\n\tgray = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)\r\n\tshape = np.shape(gray)\r\n\t#for i in range(shape[0]):\r\n\t#\tfor j in range(shape[1]):\r\n\t#\t\tif gray[i][j] > 60:\r\n\t#\t\t\tgray[i][j] == 255\r\n\t#\t\t\tpass\r\n\t#\t\telse:\r\n\t#\t\t\tgray[i][j] == 0\r\n\r\n\tcircles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,60,param1=100,param2=32,minRadius=30,maxRadius=50)\r\n\t\r\n\tif circles is None:\r\n\t\tr=45\r\n\t\tdst = frame[(y-r):(y+r),(x-r):(x+r)]#截圆\r\n\t\tr=65\r\n\telse:\r\n\t\tfor circle in circles[0,:]:\r\n\t\t\tprint(circle)\r\n\t\t\tcir_x = int(circle[0])\r\n\t\t\tcir_y = int(circle[1])\r\n\t\t\tcir_r = int(circle[2])\r\n\t\tdst = frame[(cir_y+y-r-cir_r):(cir_y+y-r+cir_r),(cir_x+x-r-cir_r):(cir_x+x-r+cir_r)]\r\n\t\t#dst = frame[(y-r):(y+r),(x-r):(x+r)]#截圆\r\n\tprint('circle ok')\r\n\t#cv2.imshow(\"2\",dst)\r\n\tcv2.imwrite(path_circle+str(num)+'.jpg',dst)\r\n\t\r\n\t#开始过模型\r\n\tif num==0:\r\n\t\tt1 = time.time()#开始计时\r\n\twith open('zuixinfl', 'rb') as fr:\r\n\t\tvocabulary = pickle.load(fr)\r\n\textract = cv2.xfeatures2d.SIFT_create()\r\n\tflann_params = dict(algorithm = 1, trees = 5)\r\n\tflann = cv2.FlannBasedMatcher(flann_params, {})\r\n\textract_bow = cv2.BOWImgDescriptorExtractor(extract, flann)\r\n\textract_bow.setVocabulary(vocabulary)\r\n\t\r\n\tf = extract_bow.compute(dst, extract.detect(dst))\r\n\tx1 = np.array(f)\r\n\t\r\n\twith open('svm2.pickle', 'rb') as fr:\r\n\t\tnew_svm = pickle.load(fr)\r\n\t\tprint(new_svm.predict(x1))#得出结果\r\n\t\t\r\n\t\t#得出的结果分开两个文件夹\r\n\t\tif new_svm.predict(x1)[0]==1:\r\n\t\t\tcv2.imwrite(path_youchong+str(num)+'.jpg', dst)\r\n\t\telif new_svm.predict(x1)[0]==2:\r\n\t\t\tcv2.imwrite(path_fengmi+str(num)+'.jpg', dst)\r\n\t\telse:\r\n\t\t\tcv2.imwrite(path_wuchong+str(num)+'.jpg', dst)\r\n\t\tframe = cv2.rectangle(frame,(x-r,y-r),(x+r,y+r),(0,0,255),5)\r\n\t\tcv2.imwrite(path_tiaoshi+str(num)+'.jpg', frame)\r\n\tprint('pic_live',num)\r\n\tt2 = time.time()\r\n\tprint('time_pic',t2-t1)#得出处理结果所需要的时间\r\n\ttotal_time = 2-(t2-t1)\r\n\ttotal_time = round(total_time,3)\r\n\tif total_time<2:\r\n\t\ttime.sleep(total_time)\r\n\telse:\r\n\t\ttime.sleep(2)\r\n\tnum+=1\r\n\tif num>3000:\r\n\t\tbreak\r\n\tt3 = time.time()\r\n\tprint('time_total',t3-t1)\r\ncap.release()","sub_path":"python_codes/second_demo_circle_mode_change/my_jieyuan.py","file_name":"my_jieyuan.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"324784515","text":"#%%#\nimport numpy as np \nimport pandas as pd \nimport os\nimport datetime\nimport matplotlib.pyplot as plt\nimport timeit\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom distancemethod import *\nfrom helpers import *\nfrom cointmethod import *\nfrom config import *\n\nformation = (datetime.date(*[2018,1,1]), datetime.date(*[2018,11,30]))\ntrading = (formation[1], formation[1]+relativedelta(months=+1))\n\n#%%#\n#load all the time series retrieved\n\nfiles = os.listdir(data_folder)\n#we exclude CLOAKBTC because theres some data-level mixed types mistake that breaks prefilter and it would get deleted anyways\n#it also breakts at ETHBTC (I manually deleted the first wrong part in Excel)\npaths = ['C:\\Bach\\concatenated_price_data\\ '[:-1] + x for x in files if x not in ['BTCUSDT.csv', 'ETHUSDT.csv', 'CLOAKBTC.csv']]\nnames = [file.partition('.')[0] for file in files]\ndf = pd.read_csv(paths[0])\n#%%#\n#rerunning is computationally intensive\n#x=prefilter(paths, cutoff=0.7)\n#np.save('prefiltered', x)\nx=np.load('prefiltered.npy')\n#%%#\n#y=preprocess(x[:,0], first_n=15)\n#y.to_pickle('preprocessed.pkl')\ny=pd.read_pickle('preprocessed.pkl')\n#%%#\n\n#%%\n(0.75263108*y.loc['IOTABTC','Price']-y.loc['TRXBTC', 'Price']).mean()\ndf1 = y.loc['TRXBTC']\ndf2 = y.loc['IOTABTC']\ndf3= df1-0.75*df2\n#%%\n#COINTEGRATION TESTING\n#Instead of y we should only use the formation period to find integrated right?\n#During testing, I will load precomputed since it takes a bit of time\ncoint_head = pick_range(y, formation[0], formation[1])\nk=cointegration(find_integrated(coint_head))\n#The easiest way to get this saved for testing\n#k=[[('IOTABTC', 'TRXBTC'), np.array([-5.57313182, 0.75263108])], [('IOTABTC', 'ETHBTC'), np.array([1.94803311, 0.52900665])], [('XLMBTC', 'ADABTC'), np.array([6.4033369 , 1.65243466])], [('XLMBTC', 'BCCBTC'), np.array([5.47509147, 0.74036753])], [('ICXBTC', 'XRPBTC'), np.array([-5.71046939, 0.45890932])], [('ADABTC', 'ETHBTC'), np.array([1.58203978, 0.40205806])], [('BCCBTC', 'TRXBTC'), np.array([-9.83511717, 1.07832306])], [('BCCBTC', 'ETHBTC'), np.array([-1.01401577, 0.77402422])], [('XRPBTC', 'ETHBTC'), np.array([2.38969272, 0.53416221])]]\n\n#%%\ncoint_spreads = coint_spread(y, [item[0] for item in k], timeframe=formation, betas = [item[1] for item in k])\ncoint_spreads.sort_index(inplace=True)\n\n#%%\ncoint_signal = signals(coint_spreads, timeframe = trading, lag = 1)\n\n#%%\ncoint_signal = signals_numeric(coint_signal)\nweights_from_signals(coint_signal, cost=0.008)\n#%%\n#look at LTCxNEO on 12/29 for confirmation\npropagate_weights(coint_signal, formation)\n\n#%%\ncalculate_profit(coint_signal, cost=0.008)\n#%%\n\n#%%\n\n#%%\n#DISTANCE TESTING\n#we take timeframe corresponding to Formation period when finding the lowest SSDs\nhead = pick_range(y, formation[0], formation[1])\ndistances = distance(head)\nspreads=distance_spread(y,distances[2], formation)\n# this is some technical detail needed later?\nspreads.sort_index(inplace=True) \n#%%\ndist_signal=signals(spreads, timeframe=trading)\nweights_from_signals(dist_signal)\n\n#%%\n#look at IOTAxTRX at 7.12\npropagate_weights(dist_signal, formation)\n#%%\n\n#%%\n((adj.groupby(level=0)['1Price'].shift(0)-adj.groupby(level=0)['1Price'].shift(1))*adj.groupby(level=0)['1Price'].apply(np.sign)).cumsum()\n#%%\n#TESTS\n# is the formation period properly normalized? all normXXXX shoudl be about zero\nprint(pick_range(spreads, *formation).mean(level=0))\nprint(spreads.mean(level=0))\n\n# does the formation period contain only FORMATION signals?\nprint(spreads.loc[pd.IndexSlice[:, formation[0]:formation[1]],'Signals'].value_counts())\n#does trading period cointain no FORMATION?\nprint(spreads.loc[pd.IndexSlice[:, trading[0]:trading[1]],'Signals'].value_counts())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"236137726","text":"import sys \nimport random\nsys.path.append(\"/home/wanghuo/mygit/django/text5/booktest\")\nimport models\ndef main():\n new=models.mymodel()\n text=['胡凯莉',\"五五开\",\"太原马超\",\"大司马\",\"杨超越\",\"神超\"]\n for i in range(25):\n index=random.randint(0,5)\n name=text[index]+str(i)\n s=new.create(name,str(i*30))\n s.save()\n\nif __name__==\"__main__\":\n main()","sub_path":"text5/booktest/shujuku.py","file_name":"shujuku.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"82310043","text":"\"\"\"\nScript for analysing processed firefly data and comparing the models used\nto literature values. \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nfrom astropy.io import fits\nimport math\n\nplt.rcParams.update({'font.size': 16})\n\ndef lighten_color(color, amount=0.5):\n \"\"\"\n Lightens the given color by multiplying (1-luminosity) by the given amount.\n Input can be matplotlib color string, hex string, or RGB tuple.\n\n Examples:\n >> lighten_color('g', 0.3)\n >> lighten_color('#F034A3', 0.6)\n >> lighten_color((.3,.55,.1), 0.5)\n \"\"\"\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n\nif __name__ == \"__main__\":\n\n\t#Get optinal argument to display plots.\n\t#If no argument given, default is True\n\ttry:\n\t\tdisplay_plot = sys.argv[1].lower() == 'true'\n\texcept:\n\t\tdisplay_plot = False\n\n\ttry:\n\t\tabsolute_value = sys.argv[2].lower() == 'true'\n\texcept:\n\t\tabsolute_value = False\n\n\t#light_weight = 0\n\t#mass_weight = 1\n\tfirefly_values_to_use = 1\n\n\t#Setup arrays to store calculated values\n\tmodel_array = []\n\tlit_used_array = []\n\tparameter_array = []\n\tsample_size_array = []\n\tmean_array = [[], []]\n\tmedium_array = [[], []]\n\tmed_error_array = [[], []]\n\tsigma_array = [[], []]\n\n\t#Paths (relative to firefly directory) to read in the data processed by firefly\n\n\tpaths = [\n\t\t\"output/dissertation/MASTAR_TH_VMPL7_KR\",\n\t\t\"output/dissertation/MASTAR_TH_VMPL9_KR\",\n\t\t\"output/dissertation/MASTAR_TH_VMPL11_KR\",\n\t\t\"output/dissertation/MASTAR_E_VMPL7_KR\",\n\t\t\"output/dissertation/CONROY_E_KR/downgraded\",\n\t]\n\n\t#Loop through the model data\n\tfor path in paths:\n\n\t\tif path.split('/')[-1] == \"downgraded\" and not \"CONROY\" in path.split('/')[-2]:\n\t\t\tdowngraded_to_conroy = \"- downgraded\"\n\t\telse:\n\t\t\tdowngraded_to_conroy = \"\"\n\t \n\t \t#Get every file in the path of the folder and store in a list\n\t\tfiles = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\n\t\t#List of the literature data to read\n\t\tlit_files = [\n\t\t\t\"UsherGC.txt\", \n\t\t\t\"DeAngeli_GB.txt\", \n\t\t\t\"DeAngeli_HST.txt\"\n\t\t]\n\n\t\t#Setup figure\n\t\tfig = plt.figure(figsize=(20,10))\n\n\t\t#Initialise index for plotting graph\n\t\tindex = 1\n\n\t\tif \"MPL7\" in path:\n\t\t\tversion = \"(MPL7)\"\n\t\telif \"VMPL9\" in path:\n\t\t\tversion = \"(VMPL9)\"\n\t\telif \"VMPL11\" in path:\n\t\t\tversion = \"(VMPL11)\"\n\n\t\t#Loop through literature data and plot/calculate stats\n\t\tfor lit_file in lit_files:\n\n\t\t\t#Read in the literature data\n\t\t\tlit_table = pd.read_table(os.path.join(os.getcwd(), \"output\", \"dissertation\",\"literature_values\", lit_file), \n\t\t\t\t\t\t\t\t\t delim_whitespace= True)\n\n\t\t\t#Initialise data of model and lit to be stored\n\t\t\tlit_age_array = []\n\t\t\tlit_age_up_array = []\n\t\t\tlit_age_low_array = []\n\n\t\t\tlit_metal_array = []\n\t\t\tlit_metal_up_array = []\n\t\t\tlit_metal_low_array = []\n\n\t\t\t#[0] is parameter_lightW\n\t\t\t#[1] is parameter_massW\n\t\t\tmodel_age_array = [[], []]\n\t\t\tmodel_age_up_array = [[], []]\n\t\t\tmodel_age_low_array = [[], []]\n\t\t\t\n\t\t\tmodel_metal_array = [[], []]\n\t\t\tmodel_metal_up_array = [[], []]\n\t\t\tmodel_metal_low_array = [[], []]\n\n\t\t\t#Loop through files of processed firefly output \n\t\t\tfor file in files:\n\n\t\t\t\tpath_, file_ = os.path.split(file)\n\n\t\t\t\t#Get just the name of the object observed\n\t\t\t\tobject_name = file_[6:file_.find(\"_\")]\n\t\t\t\t\n\t\t\t\t#Search through literature table to see if it contains the spectra\n\t\t\t\tlit_values = lit_table.loc[lit_table['ID'] == object_name]\n\n\t\t\t\t#If the literature data doesn't contain the object, move to next file\n\t\t\t\tif lit_values.shape[0] == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t#Check which file is being used and extract the data\n\t\t\t\tif lit_file == \"UsherGC.txt\":\n\n\t\t\t\t\tlit_age = float(math.log10(lit_values['Age']))\n\t\t\t\t\tlit_age_error = 0\n\n\t\t\t\t\tlit_metal = float(lit_values['[Fe/H]'])\n\t\t\t\t\tlit_metal_error = 0\n\t\t\t\t\n\t\t\t\t#DeAngeli_GB and HST have 2 values for metal and age. Have taken the average?\n\t\t\t\telif lit_file == \"DeAngeli_GB.txt\":\n\t\t\t\t\t\n\t\t\t\t\tlit_age1 = float(lit_values['Age1'])\n\t\t\t\t\tlit_age2 = float(lit_values['Age2'])\n\t\t\t\t\tlit_age_error = abs(lit_age1 - lit_age2)\n\t\t\t\t\t\n\t\t\t\t\tlit_metal1 = float(lit_values['[Fe/H]zw'])\n\t\t\t\t\tlit_metal2 = float(lit_values['[Fe/H]cg'])\n\t\t\t\t\tlit_metal_error = abs(lit_metal1 - lit_metal2)\n\n\t\t\t\t\tlit_age = (lit_age1 + lit_age2)/2\n\t\t\t\t\tlit_metal = (lit_metal1 + lit_metal2) / 2\n\t\t\t\t\t\n\t\t\t\telif lit_file == \"DeAngeli_HST.txt\":\n\t\t\t\t\t\n\t\t\t\t\tlit_age1 = float(lit_values['Age1'])\n\t\t\t\t\tlit_age2 = float(lit_values['Age2'])\n\t\t\t\t\tlit_age_error = abs(lit_age1 - lit_age2)\n\n\t\t\t\t\tlit_metal1 = float(lit_values['[Fe/H]zw'])\n\t\t\t\t\tlit_metal2 = float(lit_values['[Fe/H]cg'])\n\t\t\t\t\tlit_metal_error = abs(lit_metal1 - lit_metal2)\n\n\t\t\t\t\tlit_age = (lit_age1 + lit_age2)/2\n\t\t\t\t\tlit_metal = (lit_metal1 + lit_metal2) / 2\n\n\t\t\t\t#Extract the model data\n\t\t\t\thdul = fits.open(file)\n\n\t\t\t\tmodel_age_lightW = float(hdul[1].header['age_lightW'])\n\t\t\t\tmodel_age_lightW_up = float(hdul[1].header['age_lightW_up_1sig'])\n\t\t\t\tmodel_age_lightW_low = float(hdul[1].header['age_lightW_low_1sig'])\n\n\t\t\t\tmodel_metal_lightW = float(hdul[1].header['metallicity_lightW'])\n\t\t\t\tmodel_metal_lightW_up = float(hdul[1].header['metallicity_lightW_up_1sig'])\n\t\t\t\tmodel_metal_lightW_low = float(hdul[1].header['metallicity_lightW_low_1sig'])\n\n\t\t\t\tmodel_age_massW = float(hdul[1].header['age_massW'])\n\t\t\t\tmodel_age_massW_up = float(hdul[1].header['age_massW_up_1sig'])\n\t\t\t\tmodel_age_massW_low = float(hdul[1].header['age_massW_low_1sig'])\n\n\t\t\t\tmodel_metal_massW = float(hdul[1].header['metallicity_massW'])\n\t\t\t\tmodel_metal_massW_up = float(hdul[1].header['metallicity_massW_up_1sig'])\n\t\t\t\tmodel_metal_massW_low = float(hdul[1].header['metallicity_massW_low_1sig'])\n\n\t\t\t\tmodel = hdul[1].header[\"MODEL\"] + version + downgraded_to_conroy\n\t\t\t\t\n\t\t\t\thdul.close()\n\n\t\t\t\t#Check if the lit data is nan. If it isn't, save the lit and model data to array.\n\t\t\t\tif not np.isnan(lit_metal) and not np.isnan(lit_age):\n\n\t\t\t\t\tlit_age_array.append(lit_age)\n\t\t\t\t\tlit_age_up_array.append(lit_age_error)\n\t\t\t\t\tlit_age_low_array.append(lit_age_error)\n\n\t\t\t\t\tlit_metal_array.append(lit_metal)\n\t\t\t\t\tlit_metal_up_array.append(lit_metal_error)\n\t\t\t\t\tlit_metal_low_array.append(lit_metal_error)\n\n\t\t\t\t\tmodel_age_array[0].append(model_age_lightW)\n\t\t\t\t\tmodel_age_up_array[0].append(model_age_lightW_up)\n\t\t\t\t\tmodel_age_low_array[0].append(model_age_lightW_low)\n\t\t\t\t\t\n\t\t\t\t\tmodel_metal_array[0].append(model_metal_lightW)\n\t\t\t\t\tmodel_metal_up_array[0].append(model_metal_lightW_up)\n\t\t\t\t\tmodel_metal_low_array[0].append(model_metal_lightW_low)\n\n\t\t\t\t\tmodel_age_array[1].append(model_age_massW)\n\t\t\t\t\tmodel_age_up_array[1].append(model_age_massW_up)\n\t\t\t\t\tmodel_age_low_array[1].append(model_age_massW_low)\n\t\t\t\t\t\n\t\t\t\t\tmodel_metal_array[1].append(model_metal_massW)\n\t\t\t\t\tmodel_metal_up_array[1].append(model_metal_massW_up)\n\t\t\t\t\tmodel_metal_low_array[1].append(model_metal_massW_low)\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Missing:\",object_name)\n\n\t\t\tsample_size = len(model_age_array[0])\n\n\t\t\t#Check the type of model used and assign a colour\n\t\t\tif \"CONROY_E\" in path.upper():\n\t\t\t\tcolor = \"red\"\n\t\t\t\tmodel = \"Conroy\"\n\n\t\t\telif \"MASTAR_TH\" in path.upper():\n\t\t\t\tcolor = \"royalblue\"\n\t\t\t\tmodel = \"Th-MaStar\"\n\n\t\t\t\tif \"VMPL7\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL7)\"\n\n\t\t\t\telif \"VMPL9\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL9)\"\n\t\t\t\t\tcolor = \"navy\"\n\n\t\t\t\telif \"VMPL11\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL11)\"\n\t\t\t\t\tcolor = \"blueviolet\"\n\n\t\t\telif \"MASTAR_E\" in path.upper():\n\t\t\t\tcolor = \"lime\"\n\t\t\t\tmodel = \"E-MaStar\"\t\n\n\t\t\t\tif \"VMPL7\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL7)\"\n\n\t\t\t\telif \"VMPL9\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL9)\"\n\n\t\t\t\tif \"VMPL11\" in path.upper():\n\t\t\t\t\tmodel = model + \"(MPL11)\"\n\n\t\t\t#Convert lists to numpy arrays\n\t\t\tmodel_age_array = np.array(model_age_array)\n\t\t\tlit_age_array = np.array(lit_age_array)\n\t\t\tmodel_metal_array = np.array(model_metal_array)\n\t\t\tlit_metal_array = np.array(lit_metal_array)\n\n\t\t\t#Setup plotting options\n\t\t\tcolumns = 4\n\t\t\trows = len(lit_files)\n\t\t\tcapsize = 3\n\t\t\tmin_bin = -3\n\t\t\tmax_bin = 3\n\t\t\tbin_width = 0.25\n\n\t\t\t#bins = np.arange(min_bin, max_bin + bin_width, bin_width)\n\t\t\tbins = np.linspace(start = min_bin, stop= max_bin, num=26)\n\t\t\t#Name figure\n\t\t\tfig.suptitle(\"Model \" + model + \" compared with literature values.\", fontweight='bold')\n\t\t\t\n\t\t\t#Plot histogram data\n\n\t\t\t#Age\n\t\t\tax1 = fig.add_subplot(rows, columns, index)\n\n\t\t\tage_difference = np.array(model_age_array[firefly_values_to_use]) - np.array(lit_age_array)\n\t\t\tif absolute_value:\n\t\t\t\tage_difference = np.absolute(age_difference)\n\n\t\t\tage_difference_lightW = np.array(model_age_array[0]) - np.array(lit_age_array)\n\t\t\tage_difference_massW = np.array(model_age_array[1]) - np.array(lit_age_array)\n\n\t\t\tax1.hist(age_difference_lightW, bins = bins, color = color, alpha = 0.5, label = \"LW - \" + model)\n\t\t\tax1.hist(age_difference_lightW, bins = bins, color = color, alpha = 0.75, histtype=u'step', linewidth = 3)\n\t\t\tax1.hist(age_difference_massW, bins = bins, color = \"black\", alpha = 0.5, label = \"MW - \" + model)\n\t\t\tax1.hist(age_difference_massW, bins = bins, color = \"black\", alpha = 0.75, histtype=u'step', linewidth = 3)\n\t\t\txabs_max = abs(max(ax1.get_xlim(), key=abs))\n\n\t\t\tax1.set_xlim(xmin=min_bin, xmax=max_bin)\n\t\t\tax1.set_ylabel(\"Frequency\")\n\t\t\tax1.vlines(0, linestyle = \"dashed\", ymin=0, ymax=100)\n\n\t\t\tif index == 1:\n\t\t\t\tax1.legend(framealpha = 0.5)\n\n\t\t\tif \"Usher\" in lit_file:\n\t\t\t\tax1.set_ylim(0, 40)\n\t\t\telif \"DeAngeli_GB\" in lit_file:\n\t\t\t\tax1.set_ylim(0, 15)\n\t\t\telif \"DeAngeli_HST\" in lit_file:\n\t\t\t\tax1.set_ylim(0, 20)\n\n\t\t\tif index > rows*columns -columns: \n\t\t\t\tax1.set_xlabel(\"Age (log Gyr) Model - Literature\")\n\t\t\telse:\n\t\t\t\tax1.set_xticklabels([])\n\t\t\t\tax1.tick_params(axis = \"x\", direction = \"in\")\n\n\t\t\tax1.annotate(lit_file[:-4],# + \"\\n(Sample size = \" + str(sample_size) + \")\", \n\t\t\t\t\t\t xy=(0, 0.5), \n\t\t\t\t\t\t xytext=(-ax1.yaxis.labelpad - 75, 0), \n\t\t\t\t\t\t xycoords=ax1.yaxis.label, \n\t\t\t\t\t\t textcoords='offset points',\n\t\t\t\t\t\t size='large', \n\t\t\t\t\t\t ha='center', \n\t\t\t\t\t\t va='center',\n\t\t\t\t\t\t fontweight='bold')\n\t\t\tax1.grid()\n\n\t\t\t#Plot scatter plot data\n\t\t\t#Age\n\t\t\tindex = index +1\n\t\t\tax3 = fig.add_subplot(rows, columns, index)\n\t\t\tax3.plot([0, 1], [0, 1], 'g--', transform=ax3.transAxes, color = \"black\")\n\n\t\t\tax3.errorbar(lit_age_array, \n\t\t\t\t\t\t model_age_array[0], \n\t\t\t\t\t\t yerr = (np.array(model_age_array[0]) - np.array(model_age_low_array[0]), np.array(model_age_up_array[0]) - np.array(model_age_array[0])),\n\t\t\t\t\t\t xerr = (lit_age_low_array, lit_age_up_array),\n\t\t\t\t\t\t color = color, \n\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t ecolor=color, \n\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t label = 'LW - ' + model,\n\t\t\t\t\t\t linewidth = 1)\n\n\t\t\ttry:\n\t\t\t\tax3.errorbar(lit_age_array, \n\t\t\t\t\t\t\t model_age_array[1], \n\t\t\t\t\t\t\t yerr = (np.array(model_age_array[1]) - np.array(model_age_low_array[1]), np.array(model_age_up_array[1]) - np.array(model_age_array[1])),\n\t\t\t\t\t\t\t xerr = (lit_age_low_array, lit_age_up_array),\n\t\t\t\t\t\t\t color = lighten_color(color, 1.2), \n\t\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t\t ecolor=lighten_color(color, 1.2), \n\t\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t\t marker = 'v',\n\t\t\t\t\t\t\t label = 'MW - ' + model,\n\t\t\t\t\t\t\t linewidth = 1)\n\t\t\texcept:\n\t\t\t\tax3.errorbar(lit_age_array, \n\t\t\t\t\t\t model_age_array[1], \n\t\t\t\t\t\t yerr = (np.array(model_age_array[1]) - np.array(model_age_low_array[1]), np.array(model_age_up_array[1]) - np.array(model_age_array[1])),\n\t\t\t\t\t\t xerr = (lit_age_low_array, lit_age_up_array),\n\t\t\t\t\t\t color = \"black\", \n\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t ecolor= \"black\", \n\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t marker = 'v',\n\t\t\t\t\t\t label = 'MW - ' + model,\n\t\t\t\t\t\t linewidth = 1)\n\t\t\t\n\t\t\tax3.set_ylabel(\"Log Age (Gyr) - Model\")\n\t\t\tax3.set_xlim(-2, 1.3)\n\t\t\tax3.set_ylim(-2, 1.3)\n\n\t\t\tif index > rows*columns -columns: \n\t\t\t\tax3.set_xlabel(\"Log Age (Gyr) - Literature\")\n\t\t\telse:\n\t\t\t\tax3.set_xticklabels([])\n\t\t\t\tax3.tick_params(axis = \"x\", direction = \"in\")\n\n\t\t\tif index == 2:\n\t\t\t\tax3.legend(framealpha= 0.5)\n\n\t\t\tax3.grid()\n\n\t\t\t#Metal\n\t\t\tindex = index +1\n\n\t\t\tax2 = fig.add_subplot(rows, columns, index)\n\n\t\t\tmetal_difference = np.array(model_metal_array[firefly_values_to_use]) - np.array(lit_metal_array)\n\n\t\t\tif absolute_value:\n\t\t\t\tmetal_difference = np.absolute(metal_difference)\n\n\t\t\tmetal_difference_lightW = np.array(model_metal_array[0]) - np.array(lit_metal_array)\n\t\t\tmetal_difference_massW = np.array(model_metal_array[1]) - np.array(lit_metal_array)\n\n\t\t\tax2.hist(metal_difference_lightW, bins = bins, color = color, alpha = 0.5, label = \"LW - \" + model)\n\t\t\tax2.hist(metal_difference_lightW, bins = bins, color = color, alpha = 0.75, histtype=u'step', linewidth = 3)\n\t\t\tax2.hist(metal_difference_massW, bins = bins, color = \"black\", alpha = 0.5,label = \"MW - \" + model)\n\t\t\tax2.hist(metal_difference_massW, bins = bins, color = \"black\", alpha = 0.75, histtype=u'step', linewidth = 3)\n\t\t\txabs_max = abs(max(ax2.get_xlim(), key=abs))\n\t\t\tax2.set_xlim(xmin=-3, xmax=3)\n\t\t\tax2.vlines(0, linestyle = \"dashed\", ymin=0, ymax=100)\n\n\t\t\tif index == 3:\n\t\t\t\tax2.legend(framealpha = 0.5)\n\n\t\t\tax2.set_ylabel(\"Frequency\")\n\t\t\tif index > rows*columns -columns: \n\t\t\t\tax2.set_xlabel(\"[Z/H] Model - Literature\")\n\t\t\telse:\n\t\t\t\tax2.set_xticklabels([])\n\t\t\t\tax2.tick_params(axis = \"x\", direction = \"in\")\n\n\t\t\tif \"Usher\" in lit_file:\n\t\t\t\tax2.set_ylim(0, 30)\n\t\t\telif \"DeAngeli_GB\" in lit_file:\n\t\t\t\tax2.set_ylim(0, 12)\n\t\t\telif \"DeAngeli_HST\" in lit_file:\n\t\t\t\tax2.set_ylim(0, 18)\n\n\t\t\tax2.grid()\n\n\t\t\t#Metal\n\t\t\tindex = index +1\n\t\t\tax4 = fig.add_subplot(rows, columns, index)\n\t\t\t#ax4.scatter(lit_metal_array, model_metal_array, color = color)\n\t\t\tax4.plot([0, 1], [0, 1], 'g--', transform=ax4.transAxes, color = \"black\")\n\n\t\t\tax4.errorbar(lit_metal_array, \n\t\t\t\t\t\t model_metal_array[0], \n\t\t\t\t\t\t yerr = (np.array(model_metal_array[0]) - np.array(model_metal_low_array[0]), np.array(model_metal_up_array[0]) - np.array(model_metal_array[0])),\n\t\t\t\t\t\t xerr = (lit_metal_low_array, lit_metal_up_array),\n\t\t\t\t\t\t color = color, \n\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t ecolor=color, \n\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t label = 'LW - ' + model)\n\n\t\t\ttry:\n\t\t\t\tax4.errorbar(lit_metal_array, \n\t\t\t\t\t\t\t model_metal_array[1], \n\t\t\t\t\t\t\t yerr = (np.array(model_metal_array[1]) - np.array(model_metal_low_array[1]), np.array(model_metal_up_array[1]) - np.array(model_metal_array[1])),\n\t\t\t\t\t\t\t xerr = (lit_metal_low_array, lit_metal_up_array),\n\t\t\t\t\t\t\t color = lighten_color(color, 1.3), \n\t\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t\t ecolor= lighten_color(color, 1.3), \n\t\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t\t marker = 'v',\n\t\t\t\t\t\t\t label = 'MW - ' + model)\n\t\t\texcept:\n\t\t\t\tax4.errorbar(lit_metal_array, \n\t\t\t\t\t\t\t model_metal_array[1], \n\t\t\t\t\t\t\t yerr = (np.array(model_metal_array[1]) - np.array(model_metal_low_array[1]), np.array(model_metal_up_array[1]) - np.array(model_metal_array[1])),\n\t\t\t\t\t\t\t xerr = (lit_metal_low_array, lit_metal_up_array),\n\t\t\t\t\t\t\t color = \"black\", \n\t\t\t\t\t\t\t fmt='o', \n\t\t\t\t\t\t\t ecolor= \"black\", \n\t\t\t\t\t\t\t alpha = 0.75, \n\t\t\t\t\t\t\t markerfacecolor='none', \n\t\t\t\t\t\t\t capsize=capsize,\n\t\t\t\t\t\t\t marker = 'v',\n\t\t\t\t\t\t\t label = 'MW - ' + model)\n\t\t\t\n\t\t\tax4.set_ylabel(\"[Z/H] - Model\")\n\t\t\tax4.set_xlim(-2.5, 0.5)\n\t\t\tax4.set_ylim(-2.5, 0.5)\n\n\t\t\tif index == 4:\n\t\t\t\tax4.legend(framealpha = 0.5)\n\n\n\t\t\tif index > rows*columns -columns: \n\t\t\t\tax4.set_xlabel(\"[Z/H] - Literature\")\n\t\t\telse:\n\t\t\t\tax4.set_xticklabels([])\n\t\t\t\tax4.tick_params(axis = \"x\", direction = \"in\")\n\t\t\tax4.grid()\n\n\t\t\tindex = index +1\n\t\t\t\n\t\t\t#Calculate stats\n\t\t\t#######################################################################\n\n\t\t\t#Light weight\n\n\t\t\tmean_age = np.mean(age_difference_lightW)\n\t\t\tmean_metal = np.mean(metal_difference_lightW)\n\n\t\t\tmedium_age = np.median(age_difference_lightW)\n\t\t\tmedium_metal = np.median(metal_difference_lightW)\n\n\t\t\t#Medium error\n\t\t\tage_error_firefly = np.maximum(model_age_up_array[0], model_age_low_array[0])\n\t\t\tage_error_literature = np.maximum(lit_age_up_array, lit_age_low_array) \n\n\t\t\tmetal_error_firefly = np.maximum(model_metal_up_array[0], model_metal_low_array[0])\n\t\t\tmetal_error_literature = np.maximum(lit_metal_up_array, lit_metal_low_array) \n\n\t\t\tmed_error_age = np.sqrt((np.median(age_error_firefly/model_age_array[0]))**2 + (np.median(age_error_literature/lit_age_array))**2) * mean_age\n\t\t\tmed_error_metal = np.sqrt((np.median(metal_error_firefly/model_metal_array[0]))**2 + (np.median(metal_error_literature/lit_metal_array))**2) * mean_metal\n\n\t\t\tstd_age = np.std(age_difference_lightW)\n\t\t\tstd_metal = np.std(metal_difference_lightW)\n\n\t\t\t#Save data to arrays\n\t\t\tsample_size_array.append(sample_size)\n\t\t\tsample_size_array.append(sample_size)\n\n\t\t\tmodel_array.append(model)\n\t\t\tmodel_array.append(model)\n\n\t\t\tlit_used_array.append(lit_file[:-4])\n\t\t\tlit_used_array.append(lit_file[:-4])\n\n\t\t\tparameter_array.append(\"Age (log Gyr)\")\n\t\t\tparameter_array.append(\"[Z/H]\")\n\n\t\t\tn_decimals= 2\n\n\t\t\tmean_array[0].append(round(mean_age, n_decimals))\n\t\t\tmean_array[0].append(round(mean_metal, n_decimals))\t\t\t\n\n\t\t\tmedium_array[0].append(round(medium_age, n_decimals))\n\t\t\tmedium_array[0].append(round(medium_metal, n_decimals))\n\n\t\t\tmed_error_array[0].append(abs(round(med_error_age, n_decimals)))\n\t\t\tmed_error_array[0].append(abs(round(med_error_metal, n_decimals)))\n\n\t\t\tsigma_array[0].append(round(std_age, n_decimals))\n\t\t\tsigma_array[0].append(round(std_metal, n_decimals))\n\n\t\t\t#########################################################################################\n\t\t\t#Mass weight\n\n\t\t\tmean_age = np.mean(age_difference_massW)\n\t\t\tmean_metal = np.mean(metal_difference_massW)\n\n\t\t\tmedium_age = np.median(age_difference_massW)\n\t\t\tmedium_metal = np.median(metal_difference_massW)\n\n\t\t\tage_error_firefly = np.maximum(model_age_up_array[1], model_age_low_array[1])\n\t\t\tage_error_literature = np.maximum(lit_age_up_array, lit_age_low_array) \n\n\t\t\tmetal_error_firefly = np.maximum(model_metal_up_array[1], model_metal_low_array[1])\n\t\t\tmetal_error_literature = np.maximum(lit_metal_up_array, lit_metal_low_array) \n\n\t\t\tmed_error_age = np.sqrt((np.median(age_error_firefly/model_age_array[1]))**2 + (np.median(age_error_literature/lit_age_array))**2) * mean_age\n\t\t\tmed_error_metal = np.sqrt((np.median(metal_error_firefly/model_metal_array[1]))**2 + (np.median(metal_error_literature/lit_metal_array))**2) * mean_metal\n\n\t\t\tstd_age = np.std(age_difference_massW)\n\t\t\tstd_metal = np.std(metal_difference_massW)\n\n\t\t\tmean_array[1].append(round(mean_age, n_decimals))\n\t\t\tmean_array[1].append(round(mean_metal, n_decimals))\t\t\t\n\n\t\t\tmedium_array[1].append(round(medium_age, n_decimals))\n\t\t\tmedium_array[1].append(round(medium_metal, n_decimals))\n\n\t\t\tmed_error_array[1].append(abs(round(med_error_age, n_decimals)))\n\t\t\tmed_error_array[1].append(abs(round(med_error_metal, n_decimals)))\n\n\t\t\tsigma_array[1].append(round(std_age, n_decimals))\n\t\t\tsigma_array[1].append(round(std_metal, n_decimals))\n\n\t\tfig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n\t\t#Save plots\n\n\t\tfig.savefig(\"output/dissertation/data/comparison_to_lit/\" + (\"absolute/\" if absolute_value else \"\") + model + \"_comparison_to_lit\" + (\"_absolute\" if absolute_value else \"\") + \".png\")\n\n\t\tif display_plot:\n\t\t\tplt.show()\n\n\t#Create dataframe and save the stats in a table\n\tdata_LW = {'Model': model_array, 'Literature data': lit_used_array, 'Parameter':parameter_array, 'Sample size': sample_size_array, 'Median difference': medium_array[0], 'Medium error': med_error_array[0],'Standard deviation difference': sigma_array[0]}\n\tdf_LW = pd.DataFrame(data=data_LW)\n\tdf_LW = df_LW.sort_values([\"Literature data\", \"Parameter\"], ascending = (True, True))\n\tdf_LW.to_csv(\"output/dissertation/data/comparison_to_lit/\" + (\"absolute/\" if absolute_value else \"\") + \"table_lightW\" + (\"_absolute\" if absolute_value else \"\") +\".csv\", index = False, header=True)\n\tprint(df_LW, \"\\n\")\n\n\tdata_MW = {'Model': model_array, 'Literature data': lit_used_array, 'Parameter':parameter_array, 'Sample size': sample_size_array, 'Median difference': medium_array[1], 'Medium error': med_error_array[1],'Standard deviation difference': sigma_array[1]}\n\tdf_MW = pd.DataFrame(data=data_MW)\n\tdf_MW = df_MW.sort_values([\"Literature data\", \"Parameter\"], ascending = (True, True))\n\tprint(df_MW, \"\\n\")\n\tdf_MW.to_csv(\"output/dissertation/data/comparison_to_lit/\" + (\"absolute/\" if absolute_value else \"\") + \"table_massW\" + (\"_absolute\" if absolute_value else \"\") +\".csv\", index = False, header=True)\n\n\t\"\"\"\n\tff_values = [df_LW, df_MW]\n\tparameters = ['[Z/H]', 'Age (log Gyr)']\n\n\tfor i, val in enumerate(ff_values):\n\n\t\tif i == 0:\n\t\t\tprint (\"Light weight values\")\n\t\t\tval_type = \"LW\"\n\t\telse:\n\t\t\tprint(\"Mass weight values\")\n\t\t\tval_type = \"MW\"\n\n\t\tfor lit in lit_files:\n\n\t\t\tfor param in parameters:\n\n\t\t\t\ttemp_data = val.loc[(val['Literature data'] == lit[:-4]) & (val['Parameter'] == param)]\n\n\t\t\t\tprint(temp_data, \"\\n\")\n\n\t\t\t\tif param == 'Age (log Gyr)':\n\t\t\t\t\tparam = \"age\"\n\t\t\t\telif param == \"[Z/H]\":\n\t\t\t\t\tparam = \"metal\"\n\n\t\t\t\ttemp_data.to_csv(\"output/dissertation/data/comparison_to_lit/table_sep/\" + val_type + \"_\" + lit[:-4] + \"_\" + param + \".csv\", index = False, header=True)\n\n\t\"\"\"\n\n\n\n\n\t\n\t\"\"\"\n\tprint(df.loc[(df['Literature data'] == 'UsherGC') & (df['Parameter'] == '[Z/H]')], \"\\n\")\n\tprint(df.loc[(df['Literature data'] == 'UsherGC') & (df['Parameter'] == 'Age (log Gyr)')], \"\\n\")\n\n\tprint(df.loc[(df['Literature data'] == 'DeAngeli_HST') & (df['Parameter'] == '[Z/H]')], \"\\n\")\n\tprint(df.loc[(df['Literature data'] == 'DeAngeli_HST') & (df['Parameter'] == 'Age (log Gyr)')], \"\\n\")\n\n\tprint(df.loc[(df['Literature data'] == 'DeAngeli_GB') & (df['Parameter'] == '[Z/H]')], \"\\n\")\n\tprint(df.loc[(df['Literature data'] == 'DeAngeli_GB') & (df['Parameter'] == 'Age (log Gyr)')], \"\\n\")\n\t\"\"\"\n\t#print(df)\n\n","sub_path":"output/dissertation/compare_model_to_lit.py","file_name":"compare_model_to_lit.py","file_ext":"py","file_size_in_byte":21997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"310654429","text":"'''\nnginx fab tools\nnotice: the nginx installation is missing \n'''\n\nfrom fab_tools import info\nfrom fab_tools import Config\nfrom fabric import task\n\n_conf_repo_path = Config['git_repo_dist_path']+'/'\\\n +Config['nginx_conf_file_repo_path']\n_conf_srv_path = Config['nginx_conf_file_srv_path']\n\n@task\ndef start(c):\n info('Copy from '+_conf_repo_path+' to '+_conf_srv_path)\n c.sudo('cp '+_conf_repo_path+' '+_conf_srv_path, echo=True)\n info('Start nginx service')\n c.sudo('nginx', echo=True)\n\n\n@task\ndef reload(c):\n info('Copy from '+_conf_repo_path+' to '+_conf_srv_path)\n c.sudo('cp '+_conf_repo_path+' '+_conf_srv_path, echo=True)\n info('reload nginx service')\n c.sudo('nginx -s reload', echo=True)\n","sub_path":"fab_tools/nginx_tools.py","file_name":"nginx_tools.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"459512320","text":"# GUI based Connect-4 with an AI\n\nimport numpy as np\nimport pygame\nimport sys\nimport math\n\nROW_COUNT = 6\nCOLUMN_COUNT = 7\nSQUARESIZE = 100\nRADIUS = int(SQUARESIZE/2 - 10)\nwidth = COLUMN_COUNT * SQUARESIZE\nheight = (ROW_COUNT + 1) * SQUARESIZE\nCOLOUR_R = (17, 108, 135)\nCOLOUR_C = (5, 26, 48)\nCOLOUR_P1 = (114, 223, 191)\nCOLOUR_P2 = (255, 40, 105)\nCOLOUR_DB = (19, 67, 95)\n\ndef create_board(): \n board = np.zeros((ROW_COUNT, COLUMN_COUNT))\n return board\n\ndef drop_piece(board, row, col, piece):\n board[row][col] = piece\n\ndef is_valid_loc(board, col):\n return board[ROW_COUNT-1][col] == 0\n\ndef get_next_open_row(board, col):\n for r in range(ROW_COUNT):\n if board[r][col] == 0:\n return r\n\ndef show_board(board):\n print(np.flip(board, 0), \"\\n 0 1 2 3 4 5 6 \\n\")\n\ndef winning_move(board, piece):\n # Horizontal wins\n for c in range(COLUMN_COUNT-3):\n for r in range(ROW_COUNT):\n if board[r][c]==piece and board[r][c+1]==piece and board[r][c+2]==piece and board[r][c+3]==piece:\n return True\n # Verical wins\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT-3):\n if board[r][c]==piece and board[r+1][c]==piece and board[r+2][c]==piece and board[r+3][c]==piece:\n return True\n # Positve slope diagonal wins\n for c in range(COLUMN_COUNT-3):\n for r in range(ROW_COUNT-3):\n if board[r][c]==piece and board[r+1][c+1]==piece and board[r+2][c+2]==piece and board[r+3][c+3]==piece:\n return True\n # Negative slope diagonal wins\n for c in range(COLUMN_COUNT-3):\n for r in range(3, ROW_COUNT):\n if board[r][c]==piece and board[r-1][c+1]==piece and board[r-2][c+2]==piece and board[r-3][c+3]==piece:\n return True\n\ndef draw_board(board):\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n pygame.draw.rect(screen, COLOUR_R, (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))\n pygame.draw.circle(screen, COLOUR_C, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)\n \n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n if board[r][c] == 1:\n pygame.draw.circle(screen, COLOUR_P1, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n elif board[r][c] == 2:\n pygame.draw.circle(screen, COLOUR_P2, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n pygame.display.update()\n\nboard = create_board()\ngameover = False\nturn = 0\n\npygame.init()\nsize = (width, height)\nscreen = pygame.display.set_mode(size)\ndraw_board(board)\npygame.display.update()\nmyfont = pygame.font.SysFont(\"calibri\", 75)\n\nwhile not gameover:\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit(0)\n\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(screen, COLOUR_DB, (0, 0, width, SQUARESIZE))\n posx = event.pos[0]\n if turn == 0:\n pygame.draw.circle(screen, COLOUR_P1, (posx, int(SQUARESIZE/2)), RADIUS-5)\n else:\n pygame.draw.circle(screen, COLOUR_P2, (posx, int(SQUARESIZE/2)), RADIUS-5)\n pygame.display.update()\n \n if event.type == pygame.MOUSEBUTTONDOWN:\n if turn == 0:\n posx = event.pos[0]\n col = int(math.floor(posx/SQUARESIZE))\n piece = 1\n\n else:\n posx = event.pos[0]\n col = int(math.floor(posx/SQUARESIZE))\n piece = 2\n\n if is_valid_loc(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col , piece)\n\n if winning_move(board, piece):\n label = myfont.render(f\"PLAYER {piece} WINS!!\", 1, COLOUR_C)\n screen.blit(label, (40, 10))\n gameover = True\n\n draw_board(board)\n turn = (turn+1) % 2\n\n if gameover:\n pygame.time.wait(3000)\n","sub_path":"Connect4/Connect4_v3.py","file_name":"Connect4_v3.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"223945612","text":"from base64 import b64encode\nfrom collections import namedtuple\nfrom io import BytesIO\nfrom urllib.parse import quote\n\nfrom aiohttp import ClientResponseError\nfrom PIL import Image, UnidentifiedImageError\nfrom redbot.core.i18n import Translator\n\ntry:\n from redbot import json # support of Draper's branch\nexcept ImportError:\n import json\n\n_ = Translator(\"ReverseImageSearch\", __file__)\n\nBASE_URL = \"https://trace.moe\"\nBASE_API_URL = f\"{BASE_URL}/api\"\n\n\nclass TraceMoeDoc:\n def __init__(self, data: dict):\n self.time_start = data.get(\"from\")\n self.time_end = data.get(\"to\")\n self.time = data.get(\"at\")\n self.episode = data.get(\"episode\")\n self.similarity = data.get(\"similarity\")\n self.anilist_id = data.get(\"anilist_id\")\n self.mal_id = data.get(\"mal_id\")\n self.is_adult = data.get(\"is_adult\")\n self.title = data.get(\"title\")\n self.title_native = data.get(\"title_native\")\n self.title_chinese = data.get(\"title_chinese\")\n self.title_english = data.get(\"title_english\")\n self.title_romaji = data.get(\"title_romaji\")\n self.synonyms = data.get(\"synonyms\")\n self.synonyms_chinese = data.get(\"synonyms_chinese\")\n self.filename = data.get(\"filename\")\n tokenthumb = data.get(\"tokenthumb\")\n self.thumbnail = (\n f\"{BASE_URL}/thumbnail.php?anilist_id={self.anilist_id}\"\n f\"&file={quote(self.filename)}&t={self.time}&token={tokenthumb}\"\n )\n self.preview = (\n f\"https://trace.moe/preview.php?anilist_id={self.anilist_id}&file=\"\n f\"{quote(self.filename)}&t={self.time}&token={tokenthumb}\"\n )\n self.preview_scene = (\n f\"https://media.trace.moe/video/{self.anilist_id}/{quote(self.filename)}\"\n f\"?t={self.time}&token={tokenthumb}\"\n )\n\n @property\n def time_str(self):\n hours, minutes = divmod(self.time, 3600)\n minutes, seconds = divmod(minutes, 60)\n return \"{:02}:{:02}:{:02}\".format(int(hours), int(minutes), int(seconds))\n\n\nclass TraceMoe:\n def __init__(self, data: dict):\n self.searched_for = data.get(\"RawDocsCount\")\n self.rawsearchtime = data.get(\"RawDocsSearchTime\")\n self.comparetime = data.get(\"ReRankSearchTime\")\n self.cached = data.get(\"CacheHit\")\n self.times_searched = data.get(\"trial\")\n self.limit_remain = data.get(\"limit\")\n self.limit_reset = data.get(\"limit_ttl\")\n self.quota_remain = data.get(\"quota\")\n self.quota_reset = data.get(\"quota_ttl\")\n self.docs = [TraceMoeDoc(doc) for doc in data.get(\"docs\")]\n\n @classmethod\n async def from_image(cls, ctx, image_url):\n apikeys = await ctx.bot.get_shared_api_tokens(\"reverseimagesearch\")\n apikey = apikeys.get(\"tracemoe\", \"\")\n async with ctx.typing():\n try:\n async with ctx.cog.session.get(image_url, raise_for_status=True) as resp:\n image = BytesIO(await resp.read())\n image_file = BytesIO()\n with Image.open(image) as pil_image:\n with pil_image.convert(\"RGB\") as converted:\n converted.thumbnail((2048, 2048))\n converted.save(image_file, \"JPEG\")\n image.close()\n except UnidentifiedImageError:\n raise ValueError(_(\"Unable to convert image.\"))\n except ClientResponseError as e:\n raise ValueError(_(\"Unable to get image: {}\").format(e.message))\n try:\n async with ctx.cog.session.post(\n f\"{BASE_API_URL}/search\",\n params={\"token\": apikey},\n json={\"image\": b64encode(image_file.getvalue()).decode()},\n raise_for_status=True,\n ) as data:\n image_file.close()\n return cls(await data.json(loads=json.loads))\n except ClientResponseError as e:\n raise ValueError(\n _(\n \"Unable to search for provided image, trace.moe returned {status} ({message})\"\n ).format(status=e.status, message=e.message)\n )\n\n @classmethod\n async def me(cls, ctx):\n async with ctx.cog.session.get(f\"{BASE_API_URL}/me\") as data:\n data = await data.json(loads=json.loads)\n me_tuple = namedtuple(\n \"me\",\n \"user_id, email, limit, limit_ttl, quota, \"\n \"quota_ttl, user_limit, user_limit_ttl, \"\n \"user_quota, user_quota_ttl\",\n )\n return me_tuple(\n data.get(\"user_id\"),\n data.get(\"email\"),\n data.get(\"limit\"),\n data.get(\"limit_ttl\"),\n data.get(\"quota\"),\n data.get(\"quota_ttl\"),\n data.get(\"user_limit\"),\n data.get(\"user_limit_ttl\"),\n data.get(\"user_quota\"),\n data.get(\"user_quota_ttl\"),\n )\n","sub_path":"reverseimagesearch/tracemoe.py","file_name":"tracemoe.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"352937055","text":"#!/usr/bin/env python\n\nimport sys\n\nnum = int(sys.argv[1])\nword = str(sys.argv[2])\ncount = 0\n\nif (len(sys.argv) != 3):\n print (\"wrong number of arguments\")\n \n\nwhile (count < num):\n print(word)\n count += 1\n\n","sub_path":"2041-labs/lab06/echon.py","file_name":"echon.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"153967388","text":"from director import *\nfrom pathTree import *\n\ndef main():\n director = Director()\n tostop = 0\n isfirst=1\n #adjMat = {}\n adjMat[\"byName\"] = {}\n adjMat[\"byObj\"] = {}\n adjMat['byIp'] = {}\n PCListByIp = {}\n while(not tostop):\n if(isfirst):\n print(\"Choose to add one of the following components:\")\n isfirst = 0\n else:\n print(\"Make a choice:\")\n choice1=int(raw_input(\"1. Add a new component \\n2. Setting Up of network is done \\n\"))\n if(choice1 == 1):\n print(\"Which component do you want to add:\")\n choice2=int(raw_input(\"1. PC \\n2. Hub \\n3. Router\\n\"))\n if(choice2 == 1):\n print(\"Enter the following details:\")\n name = inputName(adjMat)\n ipaddress = inputIp('IP address', adjMat, IPList)\n subnetmask = None\n gateway = inputIpForRoutingTable(\"Enter Gateway: \", adjMat, IPList)\n dnsserver = None\n director.setBuilder(PCBuilder())\n pc = director.createComponent(name = name, ipaddress = ipaddress, subnetmask = subnetmask, gateway = gateway, dnsserver = dnsserver)\n print(\"The current list of objects that exists is as follows:\")\n print(list(adjMat[\"byName\"].keys()))\n PCListByIp[ipaddress] = pc\n print(\"Enter the name of object that this given PC connects to. To not enter a link, enter 0\")\n print(pc)\n linkChoice = checkName('',adjMat)\n director.createLink(linkChoice)\n \n pc.specification()\n\n elif(choice2 == 2):\n print(\"Enter the following details:\")\n name = inputName(adjMat) \n director.setBuilder(HubBuilder())\n hub = director.createComponent(name = name)\n print(\"The current list of objects that exists is as follows:\")\n print(list(adjMat[\"byName\"].keys()))\n print(\"Enter the names of objects that this given Hub connects to. To stop entering links, enter 0\")\n linkChoice = checkName('', adjMat) #raw_input()\n director.createLink(linkChoice)\n \n hub.specification()\n elif(choice2 == 3):\n print(\"Enter the following details:\")\n name = inputName(adjMat) #raw_input(\"Enter Name: \")\n mac = raw_input(\"Enter mac address: \")\n print(\"Enter the details pertaining to the ethernet ports. Maximum number of ports in a router is 4\")\n count= 1\n tostop2 = 0\n fastethernet =[]\n temp = {}\n while(count <= 4 and not tostop2):\n choice3 = raw_input(\"Details for port\"+str(count)+\"\\nPress any key to continue, or press 0 to skip setup of this port: \")\n if(choice3 != '0'):\n temp = {}\n temp[\"mac\"] = mac\n temp[\"ipaddress\"] = inputIp('ip address for this port',adjMat, IPList) #raw_input(\"Enter ip address: \")\n temp[\"subnetmask\"] = None #inputIp('subnet mask')\n fastethernet.append(temp)\n count += 1\n print(\"Enter the details pertaining to the Routing Table Entries\")\n tostop3 = 0\n routes =[]\n while(not tostop3):\n choice4= raw_input(\"Press any key to add to routing table. Enter 0 to stop adding routing table entries:\")\n if(choice4 != '0'):\n temp = {}\n temp[\"network\"] = inputIpForRoutingTable('network address',adjMat, IPList)\n temp[\"mask\"] = None \n temp[\"nexthop\"] = inputIpExisting('next hop IP address',adjMat, IPList)\n zeroFrom = len(temp['network']) #should be 15, for format xxx.xxx.xxx.xxx\n while zeroFrom > 0:\n if temp['network'][zeroFrom-1]=='0' or temp['network'][zeroFrom-1]=='.':\n zeroFrom -= 1\n else:\n break\n temp['zeroFrom'] = zeroFrom\n routes.append(temp)\n else:\n tostop3 = 1\n director.setBuilder(RouterBuilder())\n router = director.createComponent(name = name, fastethernet = fastethernet, routes = routes)\n print(\"The current list of objects that exists is as follows:\")\n print(list(adjMat[\"byName\"].keys()))\n print(\"Press any key to add a router link. To stop entering links, enter 0\")\n linkChoice = raw_input()\n director.createLink(linkChoice)\n router.specification()\n\n else:\n continue\n elif(choice1 == 2):\n tostop = 1\n print(adjMat)\n else:\n continue\n\n print(PCListByIp)\n\n\n def printTree(treeRoot):\n children = treeRoot.getChildren()\n print('('+str(treeRoot.getCurrNode().getName())+','+str(treeRoot)+' : '+str(children))\n for i in children:\n printTree(i)\n\n\n def pingTraverse(ping, rootTree):\n curr = rootTree.getCurrNode()\n ping['visitedObjs'].append(curr)\n print(curr)\n childList = curr.send(ping, adjMat, PCListByIp)\n print(childList)\n for i in childList:\n if i in ping['visitedObjs']:\n continue\n child = pathNode(i, curr)\n rootTree.addChild(child)\n print(str(rootTree.getCurrNode().getName()))\n print(' : ')\n print(str(i.getName()))\n raw_input('')\n if i.receive(ping) == True:\n return True\n currResult = pingTraverse(ping, child)\n if currResult == True:\n return True\n\n while True:\n print('\\n\\n\\n\\nPing trial:')\n src = inputIpExisting('source IP address', adjMat, IPList) \n dst = inputIpExisting('destination IP address', adjMat, IPList) \n ping = {'sourceIP':src,'destinationIP':dst,'visitedObjs':[]}\n if src not in adjMat['byIp']:\n print('Failed ping')\n continue\n if src == dst:\n print('Successful ping')\n continue\n currentObjs = [adjMat['byIp'][src]]\n pingTraverseTree = pathNode(PCListByIp[src],None)\n resultTree = pingTraverse(ping, pingTraverseTree)\n if resultTree == True:\n print('Successful ping')\n else:\n print('Ping failed')\n #printTree(pingTraverseTree)\n \n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"112182901","text":"import re, os, sys, argparse\nfrom distutils.util import strtobool\nfrom collections import namedtuple\nfrom scripts.single_mode import copy_single_mode, delete_single_mode\nfrom scripts.all_mode import copy_all_mode, delete_all_mode\nfrom scripts.daemon_mode import daemon_mode\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(cur_dir, \"VS-Utils\"))\nfrom prints import errmsg, debugmsg, init_logging\n\ndef ask_user(question):\n answer = input(question)\n if not answer:\n answer = 'Y' if 'Y' in re.split(r'[\\[\\]|]', question)[1:3] else 'N'\n return strtobool(answer)\n\ndef main():\n\n ## Get arguments\n choices = ['copy-single', 'copy-all', 'delete-single', 'delete-all']\n parser = argparse.ArgumentParser(usage=\"sudo -u postgres python main.py -m -p \")\n parser.add_argument('--daemon', help=\"Whether to run daemon mode\", action=\"store_true\")\n parser.add_argument(\"--playlist\", help=\"Name of a playlist\", metavar='')\n parser.add_argument(\"--user\", help=\"Username\", metavar='')\n parser.add_argument(\"--loglvl\", help=\"Logging level\", metavar='', default=10, type=int, nargs='?')\n parser.add_argument(\"--mode\", help=\"Copy/Delete a playlist to/of a single or all user(s): {%(choices)s}\",\n default='copy-single', nargs='?', choices=choices, metavar=\"\")\n args = parser.parse_args()\n args.script_dir = cur_dir\n args.scope = \"postgres\"\n\n ## Initialize logging\n cfg = namedtuple('cfg', [\"log_level\"])\n cfg = cfg(args.loglvl)\n init_logging(args, cfg)\n\n ## Daemon execution\n if args.daemon:\n return daemon_mode(args)\n\n ## Copy single mode\n if (args.mode == 'copy-single' and args.user != None and args.playlist != None):\n question = \"Are you sure to COPY the playlist '{playlist}' from \" \\\n \"admin to user '{user}'? [Y|n]: \".format(playlist=args.playlist, user=args.user)\n if (ask_user(question)):\n copy_single_mode(args)\n else: exit(-1)\n\n ## Delete single mode\n elif (args.mode == 'delete-single' and args.playlist != None and args.user != None):\n question = \"Are you sure to DELETE the playlist '{playlist}' from \" \\\n \"user '{user}'? [y|N]: \".format(playlist=args.playlist, user=args.user)\n if (ask_user(question)):\n delete_single_mode(args)\n else: exit(-1)\n\n ## Copy all mode\n elif (args.mode == 'copy-all' and args.playlist != None):\n if (args.user != None):\n debugmsg(\"Your parameter for \\\"--user\\\" will be ignored, it is not necessary here\")\n question = \"Are you sure to COPY the playlist '{playlist}' from admin to ALL users? \" \\\n \"[Y|n]: \".format(playlist=args.playlist)\n if (ask_user(question)):\n copy_all_mode(args)\n else: exit(-1)\n\n ## Delete all mode\n elif (args.mode == 'delete-all' and args.playlist != None):\n if (args.user != None):\n debugmsg(\"Your argument for the \\\"--user\\\" will be ignored, it is not necessary here\")\n question = \"Are you sure to DELETE the playlist '{playlist}' from ALL users EXCEPT admin? \" \\\n \"[y|N]: \".format(playlist=args.playlist)\n if (ask_user(question)):\n delete_all_mode(args)\n else:\n exit(-1)\n\n else:\n errmsg(\"Invalid arguments, try the help section (-h)\")\n exit(-1)\n\n debugmsg(\"Finish ...\", args.mode)\n\nif __name__== \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"171775974","text":"from ib_insync import *\n# util.startLoop() # uncomment this line when in a notebook\n\nib = IB()\nib.connect('127.0.0.1', 7497, clientId=1)\n\ncontract = Forex('EURUSD')\nbars = ib.reqHistoricalData(\n contract, endDateTime='', durationStr='30 D',\n barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True)\n\n# convert to pandas dataframe:\ndf = util.df(bars)\nprint(df)","sub_path":"IBKR_py/ibkrBot.py","file_name":"ibkrBot.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"274877575","text":"from flask_restplus import Resource, fields\n\nfrom . import api, book_v1_ns\nfrom .token_required import token_required\nfrom app.services import book_service, BookNotFoundException\n\n\nbook = api.model('Book', {\n 'bookId': fields.String(required=True, description='The book identifier'),\n 'title': fields.String(required=True, description='The book title'),\n 'author': fields.String(required=True, description='The book author'),\n 'genre': fields.String(required=True, description='The book genre'),\n 'read': fields.Boolean(required=False, description='The book read flag')\n})\nbookList = api.model('BookList', {\n 'books': fields.Nested(book, description='Array of book')\n})\ngenreList = api.model('GenreList', {\n 'genres': fields.List(fields.String(description=\"A book genre\"))\n})\n\n\nparser = api.parser()\nparser.add_argument('title', type=str, required=True, help='Title for the book', location='json')\nparser.add_argument('author', type=str, required=True, help='Author for the book', location='json')\nparser.add_argument('genre', type=str, required=True, help='Genre for the book', location='json')\nparser.add_argument('read', type=bool, required=False, help='Read flag for the book', location='json')\n\n\n@book_v1_ns.route('/')\nclass BookList(Resource):\n\n @api.doc(description='Get a list of books')\n @api.marshal_list_with(bookList)\n @token_required\n def get(self, current_user):\n book_list = book_service.get_books()\n return {'books': book_list}\n\n @api.doc(parser=parser)\n @api.marshal_with(book, code=201)\n @token_required\n def post(self, current_user):\n args = parser.parse_args()\n added_book = book_service.add_book(\n title=args['title'],\n author=args['author'],\n genre=args['genre'],\n read=args['read']\n )\n return added_book, 201\n\n\n@book_v1_ns.route('/')\n@api.param('bookId', 'The book identifier')\n@api.response(404, 'Book not found')\nclass Book(Resource):\n\n @api.doc('get_book')\n @api.marshal_with(book)\n @token_required\n def get(self, current_user, bookId):\n try:\n return book_service.find_book(bookId)\n except BookNotFoundException:\n api.abort(404, message=\"Book {} doesn't exist\".format(bookId))\n\n @api.doc(responses={204: 'Book deleted'})\n @token_required\n def delete(self, current_user, bookId):\n try:\n removed_book = book_service.delete_book(bookId)\n return removed_book, 204\n except BookNotFoundException:\n api.abort(404, message=\"Book {} doesn't exist\".format(bookId))\n\n @api.doc(parser=parser)\n @api.marshal_with(book)\n @token_required\n def put(self, current_user, bookId):\n try:\n args = parser.parse_args()\n updated_book = book_service.update_book(book_id=bookId, new_book=args)\n return updated_book, 201\n except BookNotFoundException:\n api.abort(404, message=\"Book {} doesn't exist\".format(bookId))\n\n\n@book_v1_ns.route('/genres')\nclass GenreList(Resource):\n\n @api.doc(description='Get a list of unique genres within the books')\n @api.marshal_list_with(genreList)\n @token_required\n def get(self, current_user):\n genre_list = book_service.get_genres()\n return {'genres': genre_list}\n\n","sub_path":"server/app/api/book_v1_api.py","file_name":"book_v1_api.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"388498267","text":"# This is a module to demonstrate how a model could be implemented in SedEdu\n# The module is written and executed in Python\n\n\n# import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.widgets as widget\n\n\n# parameters\nD = 50 # diffusivity\nU = 0\ndt = 1\ndx = 50\n\n\n# set up the x and z arrays for the hillslope\nx = np.arange(start=0, stop=1000, step=dx)\nz = np.zeros(x.shape)\nz[0:np.int(x.size/2)] = 100\nz_init = z\ndzdt = 0\n\n\n# limits for the sliders\nD_min = 0\nD_max = 500\nU_max = 1\nU_min = 0\nC_max = 1\nC_min = 0\n\n\n# setup the figure\nplt.rcParams['toolbar'] = 'None' # turn off the matplotlib toolbar in the figure\nplt.rcParams['figure.figsize'] = 5, 7 # size of the figure in inches\n\nfig, ax = plt.subplots() # gives us a figure object and axes object to manipulate and plot things into\nfig.subplots_adjust(left=0.2, bottom=0.4, top=0.95, right=0.9) # where do we want the limits of the axes object\n\nfig.canvas.set_window_title('Hillslope model') # title of the figure window\nax.set_xlabel(\"x-distance\") # the axis xlabel\nax.set_ylabel(\"elevation\") # the axis ylabel\nax.set_ylim(0, 200) # the axis y limits\nax.set_xlim(x.min(), x.max())\n\n\n# add plot elements\ndef xz_to_fill(x, z):\n \"\"\"\n this simple function provides a convenient way to calculate the polygon\n vertices for the hillslope from the x and z vectors.\n \"\"\"\n x_fill = np.hstack([x, np.flipud(x)])\n z_fill = np.hstack([z, -np.ones(z.shape)])\n return x_fill, z_fill\n\nthesky, = ax.fill(np.array([-1, -1, x.max(), x.max()]),\n np.array([-1, 250, 250, -1]), facecolor='aliceblue', edgecolor='none')\n# theline, = plt.plot(x, z, lw=1.5, color='green')\nx_fill, z_fill = xz_to_fill(x, z)\nthehill, = ax.fill(x_fill, z_fill, facecolor='forestgreen', edgecolor='k')\n\nthetext = ax.text(0.05, 0.05, '$dz/dt_{x=0}$' + '= {:.2f}'.format(dzdt), transform=ax.transAxes)\n\n# add slider\nwidget_color = 'lightgoldenrodyellow'\n\nslide_D_ax = plt.axes([0.2, 0.25, 0.4, 0.05], facecolor=widget_color)\nslide_D = widget.Slider(slide_D_ax, 'diffusivity', D_min, D_max, \n valinit=D, valstep=1, \n valfmt='%g', transform=ax.transAxes)\n\nslide_U_ax = plt.axes([0.2, 0.15, 0.4, 0.05], facecolor=widget_color)\nslide_U = widget.Slider(slide_U_ax, 'uplift at\\n crest', U_min, U_max, \n valinit=U, valstep=0.001, \n valfmt='%g', transform=ax.transAxes)\n\nslide_C_ax = plt.axes([0.2, 0.05, 0.4, 0.05], facecolor=widget_color)\nslide_C = widget.Slider(slide_C_ax, 'downcut at\\n valley', C_min, C_max, \n valinit=U, valstep=0.001, \n valfmt='%g', transform=ax.transAxes)\n\nbtn_hill_reset_ax = plt.axes([0.7, 0.2, 0.25, 0.04])\nbtn_hill_reset = widget.Button(btn_hill_reset_ax, 'Reset hillslope', \n color=widget_color, hovercolor='0.975')\n\nbtn_slide_reset_ax = plt.axes([0.7, 0.1, 0.25, 0.04])\nbtn_slide_reset = widget.Button(btn_slide_reset_ax, 'Reset sliders', \n color=widget_color, hovercolor='0.975')\n\n# reset functions\ndef reset_hillslope(event):\n z[:] = z_init[:]\n\ndef reset_sliders(event):\n slide_D.reset()\n slide_U.reset()\n slide_C.reset()\n fig.canvas.draw_idle() \n\nbtn_hill_reset.on_clicked(reset_hillslope)\nbtn_slide_reset.on_clicked(reset_sliders)\n\n# show the results\nplt.ion()\n\n# preallocate vectors for consistency in size\nsedflux_in = np.empty(x.shape, dtype=float)\nsedflux_out = np.empty(x.shape, dtype=float)\n\nwhile plt.fignum_exists(1):\n\n # read values from the slider\n D = slide_D.val\n\n # calculate slope and sediment flux\n rise = z[:-1] - z[1:]\n run = x[1:] - x[:-1]\n slope = rise / run\n q = slope * D # q is some dimensionless sediment flux, based just on slope and diffusivity \n sedflux_out[0:-1] = q * dt\n\n # compute the sed flux into each cell\n sedflux_in[0] = 0\n sedflux_in[1:] = sedflux_out[:-1]\n\n # apply some boundary condition to define flux out of downstream cell\n sedflux_out[-1] = sedflux_out[-1] # zero-gradient boundary\n\n # compute the change in elevation per node\n dz = (sedflux_in - sedflux_out) / dx\n\n # apply boundary condition updates\n dz[0] = dz[0] + slide_U.val\n dz[-1] = dz[-1] + -slide_C.val\n if z[-1] + dz[-1] < 0:\n dz[-1] = 0\n\n # update elevation\n z = z + dz\n dzdt = dz[0] / dt\n\n # update the plot\n # theline.set_ydata(z)\n x_fill, z_fill = xz_to_fill(x, z)\n thehill.set_xy(np.row_stack([x_fill, z_fill]).transpose())\n thetext.set_text('$dz/dt_{x=0}$' + '= {:.2f}'.format(dzdt))\n\n plt.pause(0.001)\n","sub_path":"hillslope/CSDMS_hillslope_module_part4.py","file_name":"CSDMS_hillslope_module_part4.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"81978235","text":"import unittest\nfrom unittest.mock import patch\nfrom mycroftapi import MycroftAPI\n\n\nclass MockWS(object):\n def __init__(self):\n pass\n\n def send(self, message):\n self.message = message\n\n\nclass TestSet(unittest.TestCase):\n @patch('mycroftapi.create_connection')\n def test_reset_display(self, mock_create_conn):\n # Create simple replacement websocket object and return it\n # when creating sockets\n mock_ws = MockWS()\n mock_create_conn.return_value = mock_ws\n # Test that init calls create_connection with correct param\n m = MycroftAPI('127.0.0.1')\n mock_create_conn.assert_called_with(\n \"ws://\" + '127.0.0.1' + \":8181/core\")\n # Check that message bus message looks like what we expect\n # Expected data to websocket\n mycroft_type = '\"enclosure.reset\"'\n message = '{\"type\": ' + mycroft_type + '}'\n m.reset_display()\n self.assertEqual(message, mock_ws.message)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/unittests/display/test_display.py","file_name":"test_display.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"483803557","text":"#!/usr/bin/env python3\n\n# dennis(a)yurichev, 2017\n\nimport my_utils, SAT_lib\n\ndef div_test():\n s=SAT_lib.SAT_lib(False)\n\n BITS=32\n divident=s.alloc_BV(BITS)\n divisor=s.alloc_BV(BITS)\n \n s.fix_BV(divident, SAT_lib.n_to_BV(1234567890, BITS))\n s.fix_BV(divisor, SAT_lib.n_to_BV(123, BITS))\n\n quotient, remainder=s.divider(divident, divisor)\n\n assert s.solve()==True\n\n assert SAT_lib.BV_to_number(s.get_BV_from_solution(quotient))==10037137\n assert SAT_lib.BV_to_number(s.get_BV_from_solution(remainder))==39\n\ndef SumIsNot1_test():\n s=SAT_lib.SAT_lib(False)\n\n _vars=s.alloc_BV(4)\n s.SumIsNot1(_vars)\n\n assert s.count_solutions()==12\n\ndef AND_list_test():\n s=SAT_lib.SAT_lib(False)\n\n _vars=s.alloc_BV(4)\n s.fix(s.AND_list(_vars),False)\n\n assert (s.count_solutions()==15)\n\ndiv_test()\nSumIsNot1_test()\nAND_list_test()\n\n","sub_path":"libs/SAT_lib_tests.py","file_name":"SAT_lib_tests.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"224303203","text":"import jdatetime\nfrom django.db.models import F\nfrom django.urls import reverse_lazy\nfrom django_filters.views import FilterView\nfrom django.views.generic import RedirectView, DetailView, TemplateView\nfrom jdatetime import j_days_in_month\n\nfrom customer.helpers.credit import get_total_credit, STRATEGIES, get_total_credit_letter\nfrom customer.models import Customer\nfrom request.models import Xpref\nfrom request.filters.filters import ReportFilter\n\n\nclass Dash(FilterView):\n model = Xpref\n template_name = 'requests/gentelella/report/report.html'\n context_object_name = 'proformas'\n filterset_class = ReportFilter\n\n def get(self, request, *args, **kwargs):\n self.request.session['by_month'] = kwargs.get('month', None)\n return super(Dash, self).get(request, *args, **kwargs)\n\n def get_sales_by_user(self):\n data = Xpref.group_by(qs=self.object_list, group_by_title='req_id__owner__last_name')\n data = data.annotate(\n user=F('req_id__owner'),\n )\n tk = Xpref.get_total_kw(data)\n mnt = Xpref.get_total_amount(data)\n return {\n 'sales': data.annotate(\n percent=100 * F('amount') / mnt,\n kw_percent=100 * F('kw') / tk,\n ),\n 'tkw': tk,\n 'mnt': mnt\n }\n\n def get_sales_by_type(self):\n data = Xpref.group_by(qs=self.object_list, group_by_title='prefspec__reqspec_eq__type__title')\n data = data.annotate(\n type=F('prefspec__reqspec_eq__type'),\n )\n tk = Xpref.get_total_kw(data)\n mnt = Xpref.get_total_amount(data)\n return {\n 'sales': data.annotate(\n percent=100 * F('amount') / mnt,\n kw_percent=100 * F('kw') / tk,\n ),\n 'tkw': tk,\n 'mnt': mnt\n }\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(Dash, self).get_context_data(object_list=object_list, **kwargs)\n context['sales_by_user'] = self.get_sales_by_user()\n context['sales_by_type'] = self.get_sales_by_type()\n return context\n\n def get_filterset_kwargs(self, filterset_class):\n kwargs = super(Dash, self).get_filterset_kwargs(filterset_class)\n if self.by_month(kwargs):\n return self.set_kwargs_by_month(kwargs)\n if self.filter_is_empty(kwargs):\n return self.set_default(kwargs)\n\n return self.apply_filters(kwargs)\n\n def by_month(self, kwargs):\n return self.request.session.get('by_month', None) and not kwargs.get('data', None)\n\n def set_kwargs_by_month(self, kwargs):\n today = jdatetime.date.today()\n day = today.day\n month = today.month - self.request.session.get('by_month')\n year = today.year\n\n while month <= 0:\n month += 12\n year -= 1\n\n if day < 1:\n day = 1\n\n elif day > j_days_in_month[month - 1]:\n day = j_days_in_month[month - 1]\n\n if month == 12 and day == 30 and jdatetime.date(year=year, month=1, day=1).isleap():\n # for leap years it's ok to have 30 days in Esfand\n pass\n elif month == 12 and day > 30 and not jdatetime.date(year=year, month=1, day=1).isleap():\n day = 30\n\n date_start = jdatetime.date(year=year, month=month, day=day)\n kwargs['data'] = {\n 'perm_date_after': str(date_start),\n 'perm_date_before': str(today),\n }\n return kwargs\n\n @staticmethod\n def filter_is_empty(kwargs):\n return not kwargs.get('data')\n\n def apply_filters(self, kwargs):\n self.request.session['report-filters'] = self.request.GET\n kwargs.update({\n 'data': self.request.session['report-filters']\n })\n return kwargs\n\n def set_default(self, kwargs):\n kwargs_temp = kwargs.copy()\n today = jdatetime.date.today()\n month_start = jdatetime.date(year=today.year, month=today.month, day=1)\n kwargs_temp['data'] = {\n 'perm_date_after': str(month_start),\n }\n self.request.session['report-filters'] = {\n 'data': {\n 'perm_date_after': str(month_start),\n }\n }\n return kwargs_temp\n\n\nclass NMonth(RedirectView):\n url = reverse_lazy('report:dash')\n\n\nclass Rep(TemplateView):\n template_name = 'requests/gentelella/report/rep.html'\n\n","sub_path":"app/request/views/report/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"218977034","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nimport math\n\nSTATE_COUNT_THRESHOLD = 2\nLOOKAHEAD_WPS = 200 # Number of waypoints ahead our vehicle where to search for traffic lights\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector') #, log_level=rospy.WARN)\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n self.last_wp = None\n\n self.visibility = rospy.get_param('~waypoint_lookahead_nb', LOOKAHEAD_WPS)\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n #keep trace of the last known position of our vehicle\n self.last_pos = -1\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n rospy.logdebug('publishing WP: %s', light_wp)\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n rospy.logdebug('publishing wp: %s', self.last_wp)\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n\n #extract car yaw data from quaternion\n _,_,car_yaw = tf.transformations.euler_from_quaternion(\n [pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w]\n )\n\n #rospy.logdebug('position: (%f,%f)', pose.position.x, pose.position.y)\n\n #loop through waypoints starting from few waypoints before our last known position (to limit computational time)\n #wp_idx = max(0, self.last_pos - 300)\n #if self.last_pos == len(self.waypoints.waypoints) - 1:\n # wp_idx = 0\n i = 0\n wp_idx = 0\n dist = None\n min_dist = None\n\n #while (dist == None or dist < 0) and i < len(self.waypoints.waypoints):\n while i < len(self.waypoints.waypoints):\n wp_x = self.waypoints.waypoints[i].pose.pose.position.x\n wp_y = self.waypoints.waypoints[i].pose.pose.position.y\n\n dx = wp_x - pose.position.x\n dy = wp_y - pose.position.y\n\n #calculate the distance in car coordinates between the car and the waypoint\n car_dx = math.cos(-car_yaw) * dx - math.sin(-car_yaw) * dy\n car_dy = math.sin(-car_yaw) * dx + math.cos(-car_yaw) * dy\n dist = math.sqrt(car_dx**2 + car_dy**2)\n\n #rospy.logdebug('wp %i (%f,%f): dist: %f (dx:%f, dy:%f)', i, wp_x, wp_y, dist, dx, dy)\n if car_dx > 0 and (min_dist == None or dist < min_dist):\n min_dist = dist\n wp_idx = i\n #rospy.logdebug('dist_dx: (%f,%f), yaw: %f, idx: %i, dist_sqrt: %f', car_dx, car_dy, car_yaw, wp_idx, dist)\n i += 1\n\n rospy.logdebug('nearest waypoint: (%f,%f) - idx: %i, dist: %s', wp_x, wp_y, wp_idx, min_dist)\n\n return wp_idx\n\n def get_closest_trafficlight(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n\n Args:\n pose (Pose): position to match a traffic light to\n\n Returns:\n (float, float): position of the closest traffic light.\n\n \"\"\"\n\n #extract car yaw data from quaternion\n _,_,car_yaw = tf.transformations.euler_from_quaternion(\n [pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w]\n )\n\n min_dist = None\n nearest_tl = None\n tl_idx = -1\n i = 0\n\n for light in self.lights:\n tl_x = light.pose.pose.position.x\n tl_y = light.pose.pose.position.y\n\n dx = tl_x - pose.position.x\n dy = tl_y - pose.position.y\n\n #calculate the distance in car coordinates between the car and the traffic light\n car_dx = math.cos(-car_yaw) * dx - math.sin(-car_yaw) * dy\n car_dy = math.sin(-car_yaw) * dx + math.cos(-car_yaw) * dy\n dist = math.sqrt(car_dx**2 + car_dy**2)\n\n #rospy.logdebug('light nr. %i: (%f,%f) - dist: %f', i, tl_x, tl_y, dist)\n if (car_dx > 0 and (min_dist == None or min_dist > dist)):\n min_dist = dist\n tl_idx = i\n nearest_tl = (tl_x, tl_y, tl_idx)\n\n i += 1\n\n if (nearest_tl != None):\n rospy.logdebug('nearest light: (%f,%f) - idx: %i, dist: %s', nearest_tl[0], nearest_tl[1], nearest_tl[2], min_dist)\n else:\n rospy.logdebug('nearest light: NONE')\n\n #if (tl_idx > -1 and nearest_tl != None):\n # rospy.logdebug('nearest semaphore: (%f,%f) - idx: %i', nearest_tl[0], nearest_tl[1], nearest_tl[2])\n\n #return nearest traffic light index\n return tl_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n\n #TODO: temporarily I'm returning the color state included in the light data, this will have to be replaced by the classifier\n # rospy.logdebug('light state: %s', light.state)\n # return light.state\n\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n\n #Get classification\n return self.light_classifier.get_classification(cv_image)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n\n light = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n #find the closest traffic light to the vehicle (if one exists)\n car_idx = -1\n tl_idx = -1\n if(self.pose):\n rospy.logdebug('finding car waypoint:')\n car_idx = self.get_closest_waypoint(self.pose.pose)\n self.last_pos = car_idx\n rospy.logdebug('finding traffic light index:')\n tl_idx = self.get_closest_trafficlight(self.pose.pose)\n\n #rospy.loginfo('vehicle position: (%f,%f) - wp index: %i', self.pose.pose.position.x, self.pose.pose.position.y, car_idx)\n\n #if a traffic light has been found, find its nearest waypoint\n tl_wp_idx = -1\n if tl_idx > -1:\n tl_pos = self.lights[tl_idx].pose.pose\n rospy.logdebug('finding traffic light waypoint:')\n tl_wp_idx = self.get_closest_waypoint(tl_pos)\n light = self.lights[tl_idx]\n\n #if the waypoint for this traffic light is too far from us, ignore it\n if (tl_wp_idx == -1):\n rospy.logdebug('no traffic light has been found ahead')\n elif (tl_wp_idx - car_idx > self.visibility):\n tl_wp_idx = -1\n rospy.logdebug('traffic light too far, will be ignored')\n else:\n rospy.logdebug('traffic light index: %i', tl_wp_idx)\n\n stop_line_wp = None\n #find the waypoint corresponding to the stop line for the closest traffic light (if one exists)\n rospy.logdebug('car wp index: %i', car_idx)\n rospy.logdebug('tl wp index: %i (%i)', tl_wp_idx, tl_idx)\n if (tl_idx > -1 and tl_wp_idx > -1):\n stop_line_pos = stop_line_positions[tl_idx]\n #I remove \"3\" from the stop line X because the value is refered to the center of the vehicle, so we need to stop a bit earlier than that\n stop_line_x = stop_line_pos[0]\n stop_line_y = stop_line_pos[1]\n min_dist = None\n for idx in range(max(0, tl_wp_idx-100), tl_wp_idx):\n tmp_wp = self.waypoints.waypoints[idx]\n dx = stop_line_x - tmp_wp.pose.pose.position.x\n dy = stop_line_y - tmp_wp.pose.pose.position.y\n dist = math.sqrt(dx**2 + dy**2)\n if min_dist == None or dist < min_dist:\n min_dist = dist\n stop_line_wp = idx\n\n if stop_line_wp != None:\n rospy.logdebug('stop line index: %i (%f,%f)', stop_line_wp, self.waypoints.waypoints[stop_line_wp].pose.pose.position.x, self.waypoints.waypoints[stop_line_wp].pose.pose.position.y)\n\n '''\n if car_wp > -1:\n rospy.logdebug('car position: %i (%f,%f)', car_wp, self.waypoints.waypoints[car_wp].pose.pose.position.x, self.waypoints.waypoints[car_wp].pose.pose.position.y)\n else:\n rospy.logdebug('car position: not found')\n if tl_wp_idx > -1:\n rospy.logdebug('light position: %i (%f,%f)', tl_wp_idx, self.waypoints.waypoints[tl_wp_idx].pose.pose.position.x, self.waypoints.waypoints[tl_wp_idx].pose.pose.position.y)\n else:\n rospy.logdebug('light position: not found')\n if car_wp > -1 and tl_wp_idx > -1:\n if tl_wp_idx > car_wp:\n rospy.logdebug('distance: %i', tl_wp_idx - car_wp)\n else:\n rospy.logdebug('something went wrong, traffic light waypoint is behind us')\n '''\n\n if light:\n state = self.get_light_state(light)\n return stop_line_wp, state\n #self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":12710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"619988863","text":"import argparse\nfrom datetime import datetime\n\nimport pandas as pd\n\nfrom src.train import train\n\nEPOCHS = 1000\nDEVICE = None\nBATCH_SIZE = 128\nSUMMARY_PATH = \"training_summaries/\" + str(datetime.now())\nN_SUMMARY = 100\nN_EVAL = 100\nLEARNING_RATE = 0.001\nWEIGHT_DECAY = 0\n\nif __name__ == \"__main__\":\n print(\"Summary path:\", SUMMARY_PATH)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--epochs\", type=int, default=EPOCHS, help=\"Number of epochs to train\"\n )\n parser.add_argument(\n \"--batch_size\", type=int, default=BATCH_SIZE, help=\"Number of samples per batch\"\n )\n parser.add_argument(\n \"--summary_path\",\n type=str,\n default=SUMMARY_PATH,\n help=\"Path to store tensorboard logs\",\n )\n parser.add_argument(\n \"--n_summary\",\n type=int,\n default=N_SUMMARY,\n help=\"Number steps between each training summary log to tensorboard\",\n )\n parser.add_argument(\n \"--n_eval\",\n type=int,\n default=N_EVAL,\n help=\"Number steps between each evaluation\",\n )\n parser.add_argument(\n \"--layer_sizes\",\n type=str,\n default=\"256,128,2\",\n help=\"Sizes of hidden layers separated by commas (including last layer)\",\n )\n parser.add_argument(\n \"--dropout_prob\",\n type=float,\n default=None,\n help=\"Probability to drop values in dropout layers\",\n )\n parser.add_argument(\n \"--learning_rate\", type=float, default=LEARNING_RATE, help=\"Learning rate\",\n )\n parser.add_argument(\n \"--weight_decay\",\n type=float,\n default=WEIGHT_DECAY,\n help=\"L2 regularization parameter\",\n )\n parser.add_argument(\n \"--device\", type=str, default=DEVICE, help=\"Device to run training on\"\n )\n args = parser.parse_args()\n\n # Load data\n data = pd.read_csv(\"data_new/final_merged.csv\")\n data = data.dropna(subset=[\"image_url\", \"encoded_text\", \"views\"])\n data = data.reset_index(drop=True)\n\n train_df = data.iloc[: (int(len(data) * 0.9))]\n train_df = train_df.sample(frac=1)\n val_df = data.iloc[(int(len(data) * 0.9)) :]\n val_df = val_df.sample(frac=1)\n val_df.reset_index(inplace=True)\n train_df.reset_index(inplace=True)\n\n layer_sizes = list(map(lambda s: int(s), args.layer_sizes.split(\",\")))\n\n train(\n train_df,\n val_df,\n epochs=args.epochs,\n batch_size=args.batch_size,\n summary_path=args.summary_path,\n n_summary=args.n_summary,\n n_eval=args.n_eval,\n layer_sizes=layer_sizes,\n dropout_prob=args.dropout_prob,\n learning_rate=args.learning_rate,\n weight_decay=args.weight_decay,\n device=args.device,\n )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63557657","text":"\"\"\"\n\n Student : Shahreen Shahjahan Psyche\n Time : O(N) [One pass]\n Space : O(N)\n\n This code ran successfully for all the test cases in Leetcode\n\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \n # edge case\n if head is None:\n return\n if head.next is None:\n return None\n \n records = []\n res = head\n \n # saving the nodes in an array so that I can solve the problem in one pass\n while(head != None): \n records.append(head)\n head = head.next\n # if I have to remove the first element, then I am just moving my head to next pointer\n if len(records) == n:\n res = res.next\n # if I have to remove the last element then I am just making my second last's next pointer to None\n elif n == 1:\n records[len(records) - 2].next = None\n # finally, for the nth node deletion, I am just connecting the n-1 node to n+1 node\n else:\n records[len(records) - n - 1].next = records[len(records) - n + 1]\n \n del records\n \n return res","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"248113027","text":"# create a function that takes a number,\n# divides ten with it,\n# and prints the result.\n# it should print \"fail\" if the parameter is 0\n\ndef div_ten_by(divisor):\n try:\n print(10 / divisor)\n except ZeroDivisionError:\n print(\"fail\")\n\ndiv_ten_by(3)\ndiv_ten_by(0)","sub_path":"week-03/day_02/divide_by_zero.py","file_name":"divide_by_zero.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"536810536","text":"\"\"\"Building blocks for creating new commands.\"\"\"\n\nimport abc as _abc\nimport argparse as _argparse\nimport sys as _sys\n\nfrom quill.cli import display as _display\n\n\nclass Argument:\n \"\"\"An argument for a command.\"\"\"\n\n def __init__(self, name, **options):\n \"\"\"Create the argument.\n\n Args:\n name (str): The command's name.\n **options: Options to an argparse.ArgumentParser.\n\n \"\"\"\n self.name = name\n self.options = options\n\n def __repr__(self):\n name = repr(self.name)\n options = ', '.join('{}={}'.format(k, repr(v)) for k, v in \n self.options.items())\n return 'Argument({})'.format(', '.join((name, options)))\n\n @classmethod\n def from_existing(cls, argument, name, **added_options):\n \"\"\"Create a new argument using another Argument as a base.\n\n Args:\n name (str): The argument's new name.\n **added_options: Options which will override the previous \n argument's options.\n\n \"\"\"\n options = argument.options.copy()\n options.update(added_options)\n return cls(name, **options)\n\n\ndef comma_separated(s):\n \"\"\"An argparse argument type which is a comma-separated string.\n\n Returns:\n set: Set of strings which were separated by commas.\n\n \"\"\"\n try:\n return set(s.split(','))\n except AttributeError:\n raise _argparse.ArgumentTypeError('Argument must be a list of comma-'\n 'separated strings.')\n\n\ndef _format_docstring(docstring, parser):\n \"\"\"Format a docstring with a parser's usage string.\"\"\"\n docstring += '\\n\\n.. code-block:: text\\n\\n'\n usage = parser.format_help().split('\\n')\n usage = '\\n'.join('\\t' + line for line in usage)\n return docstring + usage\n\n\nclass CommandMeta(_abc.ABCMeta):\n \"\"\"Metaclass which creates a new command.\n \n This metaclass ensures that the command has a 'name' attribute and an\n 'arguments' attribute. It creates an argparse.ArgumentParser from the\n previous attributes and stores the result at cls.parser.\n \n \"\"\"\n\n def __new__(cls, clsname, bases, dct):\n required_attributes = ('name',)\n for attribute in required_attributes:\n if attribute not in dct:\n message = \"The command does not define '{}'.\".format(attribute)\n raise TypeError(message)\n\n prog = 'pensieve {}'.format(dct['name'])\n dct['parser'] = parser = _argparse.ArgumentParser(prog=prog)\n if 'arguments' in dct:\n for argument in dct['arguments']:\n if isinstance(argument.name, str):\n parser.add_argument(argument.name, **argument.options)\n else:\n parser.add_argument(*argument.name, **argument.options)\n\n dct['__doc__'] = _format_docstring(dct.get('__doc__', ''), parser)\n\n return super().__new__(cls, clsname, bases, dct)\n\n\nclass Command(metaclass=CommandMeta):\n \"\"\"Abstract class for a basic command.\"\"\"\n\n name = None\n requires_pensieve = False\n arguments = tuple()\n\n def __init__(self, colormap=None):\n \"\"\"Create a callable which runs the command.\n\n Args:\n colormap (pensieve.cli.display.ColorMap): Colormap used to print\n messages to the terminal. Defaults to an empty colormap.\n\n \"\"\"\n if colormap is None:\n self.colormap = _display.ColorMap()\n else:\n self.colormap = colormap\n\n @_abc.abstractmethod\n def runner(self, args):\n \"\"\"Called when the command is run.\"\"\"\n pass\n\n def __call__(self, argv=None):\n if argv is None:\n argv = _sys.argv[2:]\n args = self.parser.parse_args(argv)\n return self.runner(args)\n\n\nclass PensieveCommand(Command):\n \"\"\"Abstract class for a command which must be run in a pensieve.\"\"\"\n\n name = None\n requires_pensieve = True\n\n def __init__(self, pensieve, colormap=None):\n Command.__init__(self, colormap=colormap)\n self.pensieve = pensieve\n","sub_path":"quill/cli/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"532034169","text":"def combinePWMScoreFiles(scoreFileNamePrefix, scoreFileNameListFile):\r\n # For each enhancer, put the score for each TF in a 2-D array\r\n scoreFileNameList = open(scoreFileNameListFile)\r\n combinedScoreArray = []\r\n firstFile = True\r\n for scoreFileName in scoreFileNameList:\r\n # Iterate through files with scores and put them all in an array\r\n scoreFile = open(scoreFileNamePrefix + scoreFileName[:len(scoreFileName)-1])\r\n scoreCount = 0\r\n for line in scoreFile:\r\n # Iterate through scores and put each score into the array\r\n if firstFile == True:\r\n # Initialize the combined score array\r\n combinedScoreArray.append([])\r\n lineElements = line.split(\"\\t\")\r\n score = float(lineElements[1])\r\n combinedScoreArray[scoreCount].append(score)\r\n scoreCount = scoreCount + 1\r\n if firstFile == True:\r\n # Finished with the first file\r\n firstFile = False\r\n scoreFile.close()\r\n scoreFileNameList.close()\r\n return combinedScoreArray\r\n\r\ndef writeCombinedScoreArray(combinedScoreArray, combinedScoreFileName):\r\n # Write the 2-D array of scores to a file\r\n combinedScoreFile = open(combinedScoreFileName, 'w+')\r\n for scoreList in combinedScoreArray:\r\n # Iterate through scores for each enhancer\r\n for score in scoreList:\r\n # Iterate through an enhancers scores\r\n combinedScoreFile.write(str(score))\r\n combinedScoreFile.write(\"\\t\")\r\n combinedScoreFile.write(\"\\n\")\r\n combinedScoreFile.close()\r\n\r\nif __name__==\"__main__\":\r\n import sys\r\n scoreFileNamePrefix = sys.argv[1]\r\n scoreFileNameListFile = sys.argv[2]\r\n combinedScoreFileName = sys.argv[3]\r\n \r\n combinedScoreArray = combinePWMScoreFiles(scoreFileNamePrefix, scoreFileNameListFile)\r\n writeCombinedScoreArray(combinedScoreArray, combinedScoreFileName)\r\n","sub_path":"combinePWMScoreFiles.py","file_name":"combinePWMScoreFiles.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"377431669","text":"import random\nimport numpy as np\nfrom scipy import stats\nimport re\nimport unicodedata\nimport string\nimport time\nimport datetime\nimport math\nimport socket\nhostname = socket.gethostname()\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence#, masked_cross_entropy\n\nimport sacrebleu\nimport subprocess\n\nfrom dataprep import prepareData, indexesFromPairs, tensorsFromPairsSorted\nfrom att_model import EncoderRNN, AttnDecoderRNN\nfrom train import masked_cross_entropy, train, evaluate, as_minutes, time_since\nfrom dataloader import langDataset\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size = 2\nMAX_LENGTH = 70\nattn_model = 'general'\nhidden_size = 500\nn_layers = 2\ndropout = 0.1\n\n# Configure training/optimization\nclip = 50.0\nteacher_forcing_ratio = 1\nlearning_rate = 0.0001\ndecoder_learning_ratio = 5.0\nn_epochs = 5\nepoch = 0\n\nlang_zh, lang_en_zh, train_pairs_zh = prepareData('train.tok.zh','train.tok.en')\n_, _, val_pairs_zh = prepareData('dev.tok.zh','dev.tok.en')\n_, _, test_pairs_zh = prepareData('test.tok.zh','test.tok.en')\n\ntrain_id_zh = indexesFromPairs(lang_zh, lang_en_zh, train_pairs_zh)\nval_id_zh = indexesFromPairs(lang_zh, lang_en_zh, val_pairs_zh)\ntest_id_zh = indexesFromPairs(lang_zh, lang_en_zh, test_pairs_zh)\n\ndef collate_fn_zh(batch):\n \"\"\"\n return (pair_batch, len_batch)\n \"\"\"\n pairs = [sample[0] for sample in batch]\n input_lengths = [sample[1][0] for sample in batch]\n target_lengths = [sample[1][1] for sample in batch]\n max_input_length = max(input_lengths)\n max_target_length = max(target_lengths)\n\n pairstensor = tensorsFromPairsSorted(lang_zh,lang_en_zh,max_input_length,max_target_length,pairs)\n \n return [pairstensor[0], \\\n (torch.from_numpy(np.array(pairstensor[1]))), \\\n pairstensor[2], \\\n (torch.from_numpy(np.array(pairstensor[3])))]\n\ntrain_zh = langDataset(train_id_zh)\nval_zh = langDataset(val_id_zh)\ntest_zh = langDataset(test_id_zh)\n\ntrain_loader_zh = torch.utils.data.DataLoader(dataset=train_zh,\n batch_size=batch_size,\n collate_fn=collate_fn_zh,\n shuffle=True)\nval_loader_zh = torch.utils.data.DataLoader(dataset=val_zh,\n batch_size=batch_size,\n collate_fn=collate_fn_zh,\n shuffle=True)\ntest_loader_zh = torch.utils.data.DataLoader(dataset=test_zh,\n batch_size=batch_size,\n collate_fn=collate_fn_zh,\n shuffle=True)\n\n\n# Initialize models\nencoder = EncoderRNN(lang_zh.n_words, hidden_size, n_layers, dropout=dropout).to(device)\ndecoder = AttnDecoderRNN(attn_model, hidden_size, lang_en_zh.n_words, n_layers, dropout=dropout).to(device)\n\n# Initialize optimizers and criterion\nencoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\ndecoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\ncriterion = nn.CrossEntropyLoss().to(device)\n\n# Keep track of time elapsed and running averages\nstart = time.time()\n\ndef test_model_loss(loader, output_lang, encoder, decoder):\n encoder.eval()\n decoder.eval()\n total = 0\n total_loss = 0\n for i, (input_batches, input_lengths, target_batches, target_lengths) in enumerate(loader):\n input_batches, input_lengths, target_batches, target_lengths = input_batches.to(device), input_lengths.to(device), target_batches.to(device), target_lengths.to(device)\n decoded_words, all_decoder_outputs = evaluate(input_batches, input_lengths, output_lang, target_batches, target_lengths, encoder, decoder)\n #print(all_decoder_outputs.shape)\n loss = masked_cross_entropy(\n all_decoder_outputs.contiguous(), \n target_batches.contiguous(), \n target_lengths)\n total_loss += loss.item()\n total += 1\n \n return (total_loss / total)\n\ndef test_model_score(loader, output_lang, encoder, decoder, targetlang):\n encoder.eval()\n decoder.eval()\n total = 0\n total_loss = 0\n predict_file = 'predict_temp'\n predict_lines = open(predict_file, 'w')\n for i, (input_batches, input_lengths, target_batches, target_lengths) in enumerate(loader):\n input_batches, input_lengths, target_batches, target_lengths = input_batches.to(device), input_lengths.to(device), target_batches.to(device), target_lengths.to(device)\n decoded_words, all_decoder_outputs = evaluate(input_batches, input_lengths, output_lang, target_batches, target_lengths, encoder, decoder)\n loss = masked_cross_entropy(\n all_decoder_outputs.contiguous(), \n target_batches.contiguous(),\n target_lengths)\n \n total_loss += loss\n total += 1\n predict_lines.write(''.join(decoded_words) + '\\n')\n predict_lines.close()\n \n if targetlang == 'zh':\n target_file = '../iwslt-zh-en-processed/dev.tok.en'\n else:\n target_file = '../iwslt-vi-en-processed/dev.tok.en'\n result = subprocess.run('cat {} | sacrebleu {}'.format(predict_file,target_file),shell=True,stdout=subprocess.PIPE)\n score = get_blue_score(str(result))\n \n return (total_loss / total), score\n\ntrain_loss_step = []\nval_loss_step = []\nprint_every = 100\nplot_every = 1000\nn_iters = len(train_loader_zh)\n\nwhile epoch < n_epochs:\n epoch += 1\n plot_losses = []\n print_loss_avg = 0\n plot_loss_total = 0\n # Get training data for this cycle\n for i, (input_batches, input_lengths, target_batches, target_lengths) in enumerate(train_loader_zh):\n input_batches, input_lengths, target_batches, target_lengths = input_batches.to(device), input_lengths.to(device), target_batches.to(device), target_lengths.to(device)\n loss = train(\n input_batches, input_lengths, target_batches, target_lengths,\n encoder, decoder,\n encoder_optimizer, decoder_optimizer, criterion\n )\n plot_loss_total += loss.item()\n print_loss_avg += loss.item()\n \n if i > 0 and i % print_every == 0:\n print_loss_avg = plot_loss_total / print_every\n plot_loss_total = 0\n print('%s (%d %d%%) %.4f' % (time_since(start, i / n_iters),\n i, i / n_iters * 100, print_loss_avg))\n\n if i > 0 and i % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n train_loss_step.append(loss)\n val_l = test_model_loss(val_loader_zh, lang_en_zh, encoder, decoder)\n val_loss_step.append(val_l)\n print('Epoch: [{}/{}], Step: [{}/{}], Train Loss: {}, Validation Loss: {}'.format(\n epoch+1, n_epochs, i+1, len(train_loader_zh), loss, val_l))\n \ntorch.save(encoder.state_dict(), \"att_encoder_zh\")\ntorch.save(decoder.state_dict(), \"att_decoder_zh\")\n \nout = {\"train_loss_1000_step\": train_loss_step, \n \"validation_loss_1000_step\": val_loss_step, \n }\n\nwith open('att_output_zh.txt', 'w') as file:\n file.write(json.dumps(out))","sub_path":"2.attention_rnn/Att_zh/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"251786984","text":"import sys \n# #迭代器\n# list2=[1,2,3,4]\n# it=iter(list2)\n\n# #迭代完成后,超出时会抛错\n# # print(next(it))\n# # print(next(it))\n# # print(next(it))\n# # print(next(it))\n\n# # for x in it:print(x,end=' ')\n\n# while True:\n# \ttry:\n# \t\tprint(next(it))\n# \texcept StopIteration:\n# \t\tsys.exit()\n\n# 为类创建一个迭代器\n# __iter__() 方法返回一个特殊的迭代器对象, 这个迭代器对象实现了 __next__() 方法并通过 StopIteration 异常标识迭代的完成。\n# __next__() 方法(Python 2 里是 next())会返回下一个迭代器对象。\n# class MyNumbers:\n# \tdef __iter__(self):\n# \t\tself.a = 1\n# \t\treturn self\n# \tdef __next__(self):\n# \t\tif self.a<=20:\n# \t\t\tx = self.a\n# \t\t\tself.a += 1\n# \t\t\treturn x\n# \t\telse:\n# \t\t\traise StopIteration\n \n# myclass = MyNumbers()\n# myiter = iter(myclass)\n \n# for it in myiter:print(it)\n\n# 生成器------------------------------------------------------\nimport sys\n \ndef fib(maxNum):\n\ta,b,m=0,1,0\n\twhile True:\n\t\tif m>maxNum:\n\t\t\treturn\n\t\tyield a;\n\t\ta,b=b,a+b;\n\t\tm+=1\nfibTest=fib(10)\n\n# for x in fibTest:\n# \ttry:\n# \t\tprint(x,end=' ')\n# \texcept StopIteration:\n# \t\tsys.exit()\n\n# while True:\n# \ttry:\n# \t\tprint(next(fibTest),end=' ')\n# \texcept StopIteration:\n# \t\tsys.exit()\n\n","sub_path":"python/runoob/09generator.py","file_name":"09generator.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"259221156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n#\n# ARD - Automatic Reaction Discovery\n#\n# Copyright (c) 2016 Prof. William H. Green (whgreen@mit.edu) and Colin\n# Grambow (cgrambow@mit.edu)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n#\n###############################################################################\n\n\"\"\"\nDiscovers chemical reactions automatically.\n\"\"\"\n\nif __name__ == '__main__':\n import argparse\n import os\n\n from ard.main import ARD, readInput\n\n # Set up parser for reading the input filename from the command line\n parser = argparse.ArgumentParser(description='Automatic Reaction Discovery')\n parser.add_argument('file', type=str, metavar='infile', help='An input file describing the job options')\n args = parser.parse_args()\n\n # Read input file\n input_file = os.path.abspath(args.file)\n kwargs = readInput(input_file)\n\n # Set output directory\n output_dir = os.path.abspath(os.path.dirname(input_file))\n kwargs['output_dir'] = output_dir\n\n # Execute job\n ard = ARD(**kwargs)\n ard.execute(**kwargs)\n","sub_path":"ard.py","file_name":"ard.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"274625314","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport tempfile\nimport unittest\n\nfrom contextlib import contextmanager\nfrom typing import Iterator\n\nfrom artifacts_dir import ensure_per_repo_artifacts_dir_exists\nfrom btrfs_diff.tests.render_subvols import render_sendstream\nfrom package_image import package_image, Format\nfrom volume_for_repo import get_volume_for_current_repo\n\n\nclass PackageImageTestCase(unittest.TestCase):\n\n def setUp(self):\n self.subvolumes_dir = os.path.join(\n get_volume_for_current_repo(\n 1e8, ensure_per_repo_artifacts_dir_exists(sys.argv[0]),\n ),\n 'targets',\n )\n # Works in @mode/opt since the files of interest are baked into the XAR\n self.my_dir = os.path.dirname(__file__)\n\n @contextmanager\n def _package_image(self, json_path: str, format: str) -> Iterator[str]:\n with tempfile.TemporaryDirectory() as td:\n out_path = os.path.join(td, 'sendstream')\n package_image([\n '--subvolumes-dir', self.subvolumes_dir,\n '--subvolume-json', json_path,\n '--format', format,\n '--output-path', out_path,\n ])\n yield out_path\n\n def _sibling_path(self, rel_path: str):\n return os.path.join(self.my_dir, rel_path)\n\n def _assert_sendstream_files_equal(self, path1: str, path2: str):\n renders = []\n for path in [path1, path2]:\n with open(self._sibling_path(path), 'rb') as infile:\n renders.append(render_sendstream(infile.read()))\n self.assertEqual(*renders)\n\n # This tests `image_package.py` by consuming its output.\n def test_packaged_sendstream_matches_original(self):\n self._assert_sendstream_files_equal(\n self._sibling_path('create_ops-original.sendstream'),\n self._sibling_path('create_ops.sendstream'),\n )\n\n def test_package_image_as_sendstream(self):\n with self._package_image(\n self._sibling_path('create_ops.json'), 'sendstream',\n ) as out_path:\n self._assert_sendstream_files_equal(\n self._sibling_path('create_ops-original.sendstream'),\n out_path,\n )\n\n def test_format_name_collision(self):\n with self.assertRaisesRegex(AssertionError, 'share format_name'):\n\n class BadFormat(Format, format_name='sendstream'):\n pass\n","sub_path":"fs_image/tests/test_package_image.py","file_name":"test_package_image.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"221158552","text":"from functools import partial\nimport tensorflow as tf\n\nfrom bayesflow.computational_utilities import mmd_kernel, gaussian_kernel_matrix\n\n\ndef heteroscedastic_loss(network, params, x):\n \"\"\" Computes the heteroscedastic loss between true and predicted parameters. \n Legacy, used in the paper\n Radev, S. T., Mertens, U. K., Voss, A., & Köthe, U. (2020). Towards end‐to‐end likelihood‐free inference with convolutional neural networks.\n\n Parameters\n ----------\n network : tf.keras.Model\n A neural network with a single output vector (posterior means)\n params : tf.Tensor of shape (batch_size, n_out_dim)\n Data-generating parameters, as sampled from prior\n x : tf.Tensor of shape (batch_size, N, x_dim)\n Synthetic data sets generated by the parameters\n\n Returns\n -------\n loss : tf.Tensor\n A single scalar value representing the heteroscedastic loss, shape (,)\n \"\"\"\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss\n\n\ndef kl_latent_space_gaussian(network, *args):\n \"\"\" Computes the Kullback-Leibler divergence (Maximum Likelihood Loss) between true and approximate\n posterior using simulated data and parameters. Assumes a Gaussian latyent space.\n\n Parameters\n ----------\n network : tf.keras.Model\n A single model amortizer\n *args\n List of arguments as inputs to network (e.g. model_indices, params, sim_data)\n\n Returns\n -------\n loss : tf.Tensor\n A single scalar value representing the KL loss, shape (,)\n\n Examples\n --------\n Parameter estimation\n\n >>> kl_latent_space(net, params, sim_data)\n\n Model comparison\n\n >>> kl_latent_space(net, model_indices, sim_data)\n\n Meta\n\n >>> kl_latent_space(net, model_indices, params, sim_data)\n \"\"\"\n\n z, log_det_J = network(*args)\n loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n return loss\n\n\ndef kl_latent_space_student(network, *args):\n \"\"\" Computes the Kullback-Leibler divergence (Maximum Likelihood Loss) between true and approximate\n posterior using simulated data and parameters. Assumes a latent student t-Distribution as a source.\n\n Parameters\n ----------\n network : tf.keras.Model\n A single model amortizer\n *args\n List of arguments as inputs to network (e.g. model_indices, params, sim_data)\n\n Returns\n -------\n loss : tf.Tensor\n A single scalar value representing the KL loss, shape (,)\n \"\"\"\n \n v, z, log_det_J = network(*args)\n d = z.shape[-1]\n loss = 0.\n loss -= d * tf.math.lgamma(0.5*(v + 1))\n loss += d * tf.math.lgamma(0.5*v + 1e-15)\n loss += (0.5*d) * tf.math.log(v + 1e-15)\n loss += 0.5*(v+1) * tf.reduce_sum(tf.math.log1p(z**2 / v), axis=-1)\n loss -= log_det_J\n mean_loss = tf.reduce_mean(loss)\n return mean_loss\n\n\ndef log_loss(network, model_indices, sim_data, kl_weight=0.01):\n \"\"\" Computes the logloss given output probs and true model indices m_true.\n\n Parameters\n ----------\n network : tf.keras.Model\n An evidential network (with real outputs in ``[1, +inf]``)\n model_indices : tf.Tensor of shape (batch_size, n_models)\n True model indices\n sim_data : tf.Tensor of shape (batch_size, n_obs, data_dim) or (batch_size, summary_dim) \n Synthetic data sets generated by the params or summary statistics thereof\n kl_weight : float in [0, 1]\n The weight of the KL regularization term\n\n Returns\n -------\n loss : tf.Tensor\n A single scalar Monte-Carlo approximation of the regularized Bayes risk, shape (,)\n \"\"\"\n\n # Compute evidences\n alpha = network(sim_data)\n\n # Obtain probs\n model_probs = alpha / tf.reduce_sum(alpha, axis=1, keepdims=True)\n\n # Numerical stability\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n\n # Actual loss + regularization (if given)\n loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n if kl_weight > 0:\n kl = kl_dirichlet(model_indices, alpha)\n loss = loss + kl_weight * kl\n return loss\n\n\ndef kl_dirichlet(model_indices, alpha):\n \"\"\" Computes the KL divergence between a Dirichlet distribution with parameter vector alpha and a uniform Dirichlet.\n\n Parameters\n ----------\n model_indices : tf.Tensor of shape (batch_size, n_models)\n one-hot-encoded true model indices\n alpha : tf.Tensor of shape (batch_size, n_models)\n positive network outputs in ``[1, +inf]``\n\n Returns\n -------\n kl: tf.Tensor\n A single scalar representing :math:`D_{KL}(\\mathrm{Dir}(\\\\alpha) | \\mathrm{Dir}(1,1,\\ldots,1) )`, shape (,)\n \"\"\"\n\n # Extract number of models\n J = int(model_indices.shape[1])\n\n # Set-up ground-truth preserving prior\n alpha = alpha * (1 - model_indices) + model_indices\n beta = tf.ones((1, J), dtype=tf.float32)\n alpha0 = tf.reduce_sum(alpha, axis=1, keepdims=True)\n\n # Computation of KL\n kl = tf.reduce_sum((alpha - beta) * (tf.math.digamma(alpha) - tf.math.digamma(alpha0)), axis=1, keepdims=True) + \\\n tf.math.lgamma(alpha0) - tf.reduce_sum(tf.math.lgamma(alpha), axis=1, keepdims=True) + \\\n tf.reduce_sum(tf.math.lgamma(beta), axis=1, keepdims=True) - tf.math.lgamma(\n tf.reduce_sum(beta, axis=1, keepdims=True))\n loss = tf.reduce_mean(kl)\n return loss\n\n\ndef maximum_mean_discrepancy(source_samples, target_samples, mmd_weight=1., minimum=0.):\n \"\"\" This Maximum Mean Discrepancy (MMD) loss is calculated with a number of different Gaussian kernels.\n\n Parameters\n ----------\n source_samples : tf.Tensor of shape (N, num_features)\n target_samples : tf.Tensor of shape (M, num_features)\n mmd_weight : float, default: 1.0\n the weight of the MMD loss.\n minimum : float, default: 0.0\n lower loss bound\n\n Returns\n -------\n loss_value : tf.Tensor\n A scalar Maximum Mean Discrepancy, shape (,)\n \"\"\"\n\n loss_value = mmd_kernel(source_samples, target_samples, kernel=gaussian_kernel_matrix)\n loss_value = mmd_weight * tf.maximum(minimum, loss_value)\n return loss_value\n\n\ndef mmd_kl_gaussian_loss(network, *args, z_dist=tf.random.normal, mmd_weight=1.0):\n \"\"\"KL loss in latent z space, MMD loss in summary space.\"\"\"\n \n # Apply net and unpack \n x_sum, out = network(*args, return_summary=True)\n z, log_det_J = out\n \n # Apply MMD loss to summary network output\n z_samples = z_dist(x_sum.shape) \n mmd_loss = maximum_mean_discrepancy(x_sum, z_samples)\n \n # Apply KL loss for inference net\n kl_loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n \n # Sum and return losses\n return kl_loss + mmd_weight * mmd_loss\n\n\ndef mmd_kl_student_loss(network, *args, z_dist=tf.random.normal, mmd_weight=1.0):\n \"\"\"KL loss in latent z space, MMD loss in summary space.\"\"\"\n \n # Apply net and unpack \n x_sum, out = network(*args, return_summary=True)\n v, z, log_det_J = out\n \n # Apply MMD loss to summary network output\n z_samples = z_dist(x_sum.shape) \n mmd_loss = maximum_mean_discrepancy(x_sum, z_samples)\n \n # Apply KL loss for inference net\n d = z.shape[-1]\n kl_loss = 0.\n kl_loss -= d * tf.math.lgamma(0.5*(v + 1))\n kl_loss += d * tf.math.lgamma(0.5*v + 1e-15)\n kl_loss += (0.5*d) * tf.math.log(v + 1e-15)\n kl_loss += 0.5*(v+1) * tf.reduce_sum(tf.math.log1p(z**2 / v), axis=-1)\n kl_loss -= log_det_J\n kl_loss = tf.reduce_mean(kl_loss)\n \n # Sum and return losses\n return kl_loss + mmd_weight * mmd_loss","sub_path":"Conversion/Conversion (data interpolation, n_obs=3)/bayesflow/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"332367321","text":"#!/usr/bin/env python2\n\nimport eventlet\n\ndef closed_callback():\n #print \"called back\"\n pass\n\ndef forward(source, dest, addr, cb = lambda: None):\n \"\"\"Forwards bytes unidirectionally from source to dest\"\"\"\n while True:\n d = source.recv(32384)\n #print repr(d)\n if d == '':\n cb()\n break\n dest.sendall(d)\n\ndef listen(local, remote):\n listener = eventlet.listen(local)\n while True:\n client, addr = listener.accept()\n server = eventlet.connect(remote)\n # two unidirectional forwarders make a bidirectional one\n eventlet.spawn_n(forward, client, server, addr, closed_callback)\n eventlet.spawn_n(forward, server, client, addr)\n\nif __name__ == '__main__':\n for local, remote in (\n (('', 9001), ('mini5.opera-mini.net', 1080)),\n (('', 9002), ('mini5.opera-mini.net', 80)),\n ):\n eventlet.spawn_n(listen, local, remote)\n while True:\n eventlet.sleep(3)\n","sub_path":"operamirror.py","file_name":"operamirror.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"455840919","text":"import requests\nfrom bs4 import BeautifulSoup\n\n#得到cookie1 和authenticity_token\nr1=requests.get(\n url='https://github.com/login',\n headers={\n 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac 05 X 10_11_4)AppleWebKit/537.36(KHTML,like Gecko)Chrome/52.0.2743.116 Safari/537.36'})\nsoup=BeautifulSoup(r1.text,'html.parser')\nform=soup.find(name='form')\ntoken=form.find(attrs={'name':'authenticity_token'})\nauthenticity_token=token.get('value')\ncookie1=r1.cookies.get_dict()\nr1.close()\n# print(authenticity_token)\n\n#登录\nfrom_data={\n 'authenticity_token': authenticity_token,\n \"utf-8\":\"\",\n \"commit\":\"sign in\",\n 'login': 'yuxiaomi',\n 'password': '52067.lt',\n}\nr2=requests.post('https://github.com/session',data=from_data,cookies=cookie1)\ncookie2=r2.cookies.get_dict()\ncookie1.update(cookie2)#把第二次得到的cookies加到第一次的值上面\n\nr3=requests.get('https://github.com/settings/emails',cookies=cookie1)\nf=open('r3.html','w',encoding='utf-8')\n# f.write(r3.text)\n# f.close()\n# print(\"写入完成\")\n# print(r3.text)\nsoup=BeautifulSoup(r3.text,'html.parser')\ndiv=soup.find(name='li',attrs={'class':'Box-row'})\nemail=div.find(name='h4')\nprint('email:',email.text)\n\n\n\n\n\n# r2=requests.post(\n# url='https://github.com/session',\n# headers={\n# 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac 05 X 10_11_4)AppleWebKit/537.36(KHTML,like Gecko)Chrome/52.0.2743.116 Safari/537.36'\n# },\n# data={\n# 'authenticity_token':authenticity_token,\n# 'login': 'yuxiaomi',\n# 'password': '52067.lt',\n# }\n# )\n# print(r2.text)\n# # print(r2.cookies.get_dict())\n# #到自己的主页\n# r3=requests.get(\n# url='https://github.com/settings/emails',\n# headers={\n# 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac 05 X 10_11_4)AppleWebKit/537.36(KHTML,like Gecko)Chrome/52.0.2743.116 Safari/537.36'\n# },\n# cookies=r2.cookies.get_dict()\n# )\n# print(r3.text)\n# soup=BeautifulSoup(r3.text,'html.parser')\n# div=soup.find(name='div',attrs={'class':'Subhead'})\n# print(div)\n","sub_path":"spyder/spyder/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"356907988","text":"import urllib.request\nimport re\n\nhtml = urllib.request.urlopen(\"http://www.quanshuwang.com/book/44/44683\").read() # 获取网页源代码\nhtml = html.decode(\"gbk\") # 转成该网站格式\n\nreg = r'
  • (.*?)
  • ' # 根据网站样式匹配的正则:(.*?)可以匹配所有东西,加括号为我们需要的\n\nreg = re.compile(reg)\n\nurls = re.findall(reg, html)\n\nfor url in urls:\n chapter_url = url[0]\n chapter_title = url[1]\n chapter_html = urllib.request.urlopen(chapter_url).read() # 获取该章节的全文代码\n chapter_html = chapter_html.decode(\"gbk\")\n chapter_reg = r'    .*?
    (.*?)