> started. ----- ')\nprint()\n\n###### Create function to allow pause (one click) and stop (double click) on figure #####\npause=False\npause_start = 0\ndef onClick(event): \n global pause\n global pause_start\n global anim\n \n pause ^= True\n if pause:\n pause_start = time.time()\n else:\n dT = time.time()-pause_start\n if dT < 0.3: # Break simulation at double click\n print('Animation exited.')\n anim.ani.event_source.stop()\n\n \n##### Anmation Function #####\nclass Animated():\n \"\"\"An animated scatter plot using matplotlib.animations.FuncAnimation.\"\"\"\n def __init__(self, x0, obs=[], N_simuMax = 600, dt=0.01, attractorPos='default', convergenceMargin=0.01, xRange=[-10,10], yRange=[-10,10], zRange=[-10,10], sleepPeriod=0.03, nonlinear=False, RK4_int = False, dynamicalSystem=linearAttractor):\n\n self.dim = x0.shape[0]\n\n #self.simuColors=[]\n \n # Initialize class variables\n self.obs = obs\n self.N_simuMax = N_simuMax\n self.dt = dt\n if attractorPos == 'default':\n self.attractorPos = self.dim*[0.0]\n else:\n self.attractorPos = attractorPos\n \n self.sleepPeriod=sleepPeriod\n\n # last three values are observed for convergence\n self.convergenceMargin = convergenceMargin\n self.lastConvergences = [convergenceMargin for i in range(3)] \n\n # Get current simulation time\n self.old_time = time.time()\n self.pause_time = self.old_time\n \n self.N_points = x0.shape[1]\n\n self.x_pos = np.zeros((self.dim, self.N_simuMax+2, self.N_points))\n \n self.x_pos[:,0,:] = x0\n \n self.xd_ds = np.zeros(( self.dim, self.N_simuMax+1, self.N_points ))\n #self.t = np.linspace(( 0, self.N_simuMax*self.dt, num=self.N_simuMax ))\n self.t = np.linspace(0,self.N_simuMax+1,num=self.N_simuMax+1)*dt\n\n # Simulation parameters\n self.RK4_int = RK4_int\n self.nonlinear = nonlinear\n self.dynamicalSystem = dynamicalSystem\n\n self.converged = False\n \n self.iSim = 0\n\n self.lines = [] # Container to keep line plots\n self.startPoints = [] # Container to keep line plots\n self.endPoints = [] # Container to keep line plots \n self.patches = [] # Container to keep patch plotes\n self.contour = []\n self.centers = []\n self.cent_dyns = []\n\n # Setup the figure and axes.\n if self.dim==2:\n self.fig, self.ax = plt.subplots()\n else:\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n #self.fig.set_size_inches(14, 9)\n self.fig.set_size_inches(12, 8)\n \n self.ax.set_xlim(xRange)\n self.ax.set_ylim(yRange)\n #self.ax.set_xlabel('x1')\n #self.ax.set_ylabel('x2')\n if self.dim==3:\n self.ax.set_zlim(zRange)\n self.ax.set_zlabel('x3')\n #self.ax.view_init(elev=0.3, aim=0.4)\n\n # Set axis etc.\n plt.gca().set_aspect('equal', adjustable='box')\n\n # Set up plot\n #self.setup_plot()\n #self.tt1 = self.ax.text(.5, 1.05, '', transform = self.ax.transAxes, va='center', animated=True, )\n \n # Adjust dynamic center\n # intersection_obs = obs_common_section(self.obs)\n # dynamic_center_3d(self.obs, intersection_obs)\n \n # Then setup FuncAnimation \n self.ani = FuncAnimation(self.fig, self.update, interval=1, frames = self.N_simuMax-2, repeat=False, init_func=self.setup_plot, blit=True, save_count=self.N_simuMax-2)\n\n def setup_plot(self):\n print('setup started')\n # Draw obstacle\n self.obs_polygon = []\n \n # Numerical hull of ellipsoid\n for n in range(len(self.obs)):\n self.obs[n].draw_ellipsoid(numPoints=50) # 50 points resolution\n\n for n in range(len(self.obs)):\n if self.dim==2:\n emptyList = [[0,0] for i in range(50)]\n #self.obs_polygon.append( plt.Polygon(self.obs[n].x_obs, animated=True,))\n self.obs_polygon.append( plt.Polygon(emptyList, animated=True,))\n self.obs_polygon[n].set_color(np.array([176,124,124])/255)\n self.obs_polygon[n].set_alpha(0.8)\n patch_o = plt.gca().add_patch(self.obs_polygon[n])\n self.patches.append(patch_o)\n\n if self.obs[n].x_end > 0:\n cont, = plt.plot([],[], 'k--', animated=True)\n else:\n cont, = plt.plot([self.obs[n].x_obs_sf[ii][0] for ii in range(len(self.obs[n].x_obs_sf))],\n [self.obs[n].x_obs_sf[ii][1] for ii in range(len(self.obs[n].x_obs_sf))],\n 'k--', animated=True)\n self.contour.append(cont)\n else: # 3d\n N_resol=50 # TODO save as part of obstacle class internally from assigining....\n self.obs_polygon.append(\n self.ax.plot_surface(\n np.reshape([obs[n].x_obs[i][0] for i in range(len(obs[n].x_obs))],\n (N_resol,-1)),\n np.reshape([obs[n].x_obs[i][1] for i in range(len(obs[n].x_obs))],\n (N_resol,-1)),\n np.reshape([obs[n].x_obs[i][2] for i in range(len(obs[n].x_obs))],\n (N_resol, -1)) ) )\n\n # Center of obstacle\n center, = self.ax.plot([],[],'k.', animated=True) \n self.centers.append(center)\n \n if hasattr(self.obs[n], 'center_dyn'):# automatic adaptation of center\n cent_dyn, = self.ax.plot([],[], 'k+', animated=True, linewidth=18, markeredgewidth=4, markersize=13)\n # ax_ifd.plot(obs[n].center_dyn[0],obs[n].center_dyn[1], 'k+', linewidth=18, markeredgewidth=4, markersize=13)\n self.cent_dyns.append(cent_dyn)\n \n for ii in range(self.N_points):\n line, = plt.plot([], [], '--', lineWidth = 4, animated=True)\n self.lines.append(line)\n point, = plt.plot(self.x_pos[0,0,ii],self.x_pos[1,0,ii], '*k', markersize=10, animated=True)\n if self.dim==3:\n point, = plt.plot(self.x_pos[0,0,ii],self.x_pos[1,0,ii], self.x_pos[2,0,ii], '*k', markersize=10, animated=True)\n self.startPoints.append(point)\n point, = plt.plot([], [], 'bo', markersize=15, animated=True)\n self.endPoints.append(point)\n\n\n if self.dim==2:\n plt.plot(self.attractorPos[0], self.attractorPos[1], 'k*', linewidth=7.0, markeredgewidth=4, markersize=13)\n else:\n plt.plot([self.attractorPos[0]], [self.attractorPos[1]], [self.attractorPos[2]], 'k*', linewidth=7.0)\n\n self.fig.canvas.mpl_connect('button_press_event', onClick) # Button click enabled\n\n #self.tt1 = self.ax.text(.5, 8.2, '', va='center', fontsize=20)\n\n print('setup finished')\n\n #return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints + [self.tt1])\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n \n def update(self, iSim):\n if not plt.fignum_exists(self.fig.number):\n anim.ani.event_source.stop()\n \n #if saveFigure:\n if pause: # NO ANIMATION -- PAUSE\n self.old_time=time.time()\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n\n if not plt.fignum_exists(self.fig.number):\n anim.ani.event_source.stop()\n \n print('loop count={} - frame ={}-Simulation time ={}'.format(self.iSim, iSim, np.round(self.dt*self.iSim, 3) ))\n\n # intersection_obs = obs_common_section(self.obs)\n #print('center before',obs[0].center_dyn)\n # dynamic_center_3d(self.obs, intersection_obs)\n # print('center after',obs[0].center_dyn)\n \n if self.RK4_int: # Runge kutta integration\n for j in range(self.N_points):\n self.x_pos[:, self.iSim+1,j] = obs_avoidance_rk4(self.dt, self.x_pos[:,self.iSim,j], self.obs, x0=self.attractorPos, obs_avoidance = obs_avoidance_interpolation_moving)\n\n #self.x_pos[:, self.iSim+1,j] = obs_avoidance_rk4(self.dt, self.x_pos[:,self.iSim,j], self.obs, x0=self.attractorPos, obs_avoidance = obs_avoidance_modulation)\n \n elif self.nonlinear:\n for j in range(self.N_points):\n self.xd_ds[:,self.iSim,j] = obs_avoidance_nonlinear_radial(self.x_pos[:,self.iSim, j], self.dynamicalSystem, obs, self.attractorPos)\n self.x_pos[:,self.iSim+1,:] = self.x_pos[:,self.iSim, :] + self.xd_ds[:,self.iSim, :]*self.dt\n \n else: # Simple euler integration\n # Calculate DS\n for j in range(self.N_points):\n xd_temp = linearAttractor(self.x_pos[:,self.iSim, j], self.attractorPos)\n \n self.xd_ds[:,self.iSim,j] = obs_avoidance_interpolation_moving(self.x_pos[:,self.iSim, j], xd_temp, self.obs)\n self.x_pos[:,self.iSim+1,:] = self.x_pos[:,self.iSim, :] + self.xd_ds[:,self.iSim, :]*self.dt\n \n self.t[self.iSim+1] = (self.iSim+1)*self.dt\n\n # Update lines\n for j in range(self.N_points):\n self.lines[j].set_xdata(self.x_pos[0,:self.iSim+1,j])\n self.lines[j].set_ydata(self.x_pos[1,:self.iSim+1,j])\n if self.dim==3:\n self.lines[j].set_3d_properties(zs=self.x_pos[2,:self.iSim+1,j])\n\n self.endPoints[j].set_xdata(self.x_pos[0,self.iSim+1,j])\n self.endPoints[j].set_ydata(self.x_pos[1,self.iSim+1,j])\n if self.dim==3:\n self.endPoints[j].set_3d_properties(zs=self.x_pos[2,self.biSim+1,j])\n \n # ========= Check collision ----------\n #collisions = obs_check_collision(self.x_pos[:,self.iSim+1,:], obs)\n #collPoints = np.array()\n\n #print('TODO --- collision observation')\n #collPoints = self.x_pos[:,self.iSim+1,collisions]\n\n # if collPoints.shape[0] > 0:\n # plot(collPoints[0,:], collPoints[1,:], 'rx')\n # print('Collision detected!!!!')\n for o in range(len(self.obs)):# update obstacles if moving\n self.obs[o].update_pos(self.t[self.iSim], self.dt) # Update obstacles\n\n self.centers[o].set_xdata(self.obs[o].x0[0])\n self.centers[o].set_ydata(self.obs[o].x0[1])\n if self.dim==3:\n self.centers[o].set_3d_properties(zs=obs[o].x0[2])\n\n if hasattr(self.obs[o], 'center_dyn'):# automatic adaptation of center\n self.cent_dyns[o].set_xdata(self.obs[o].center_dyn[0])\n self.cent_dyns[o].set_ydata(self.obs[o].center_dyn[1])\n if self.dim==3:\n self.cent_dyns[o].set_3d_properties(zs=self.obs[o].center_dyn[2])\n\n\n if self.obs[o].x_end > self.t[self.iSim] or self.iSim<1: # First round or moving\n if self.dim ==2: # only show safety-contour in 2d, otherwise not easily understandable\n self.contour[o].set_xdata([self.obs[o].x_obs_sf[ii][0] for ii in range(len(self.obs[o].x_obs_sf))])\n self.contour[o].set_ydata([self.obs[o].x_obs_sf[ii][1] for ii in range(len(self.obs[o].x_obs_sf))])\n\n if self.dim==2:\n self.obs_polygon[o].xy = self.obs[o].x_obs\n else:\n self.obs_polygon[o].xyz = self.obs[o].x_obs\n self.iSim += 1 # update simulation counter\n self.check_convergence() # Check convergence \n \n # Pause for constant simulation speed\n self.old_time = self.sleep_const(self.old_time)\n self.pause_time = self.old_time\n\n #self.tt1.set_text('{:2.2f} s'.format(round(self.t[self.iSim+1],2) ) )\n\n #return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints + [self.tt1] )\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n\n def check_convergence(self):\n #return\n self.lastConvergences[0] = self.lastConvergences[1]\n self.lastConvergences[1] = self.lastConvergences[2]\n\n self.lastConvergences[2] = np.sum(abs(self.x_pos[:,self.iSim,:] - np.tile(self.attractorPos, (self.N_points,1) ).T ))\n\n if (sum(self.lastConvergences) < self.convergenceMargin) or (self.iSim+1>=self.N_simuMax):\n self.ani.event_source.stop()\n \n if (self.iSim>=self.N_simuMax-1):\n print('Maximum number of {} iterations reached without convergence.'.format(self.N_simuMax))\n else:\n print('Convergence with tolerance of {} reached after {} iterations.'.format(sum(self.lastConvergences), self.iSim+1) )\n\n \n def show(self):\n plt.show()\n\n def sleep_const(self, old_time=0):\n next_time = old_time+self.sleepPeriod\n \n now = time.time()\n \n sleep_time = next_time - now # get sleep time\n sleep_time = min(max(sleep_time, 0), self.sleepPeriod) # restrict in sensible range\n\n time.sleep(sleep_time)\n\n return next_time\n\n\nanimationName = -1\nsaveFigure=0\nN = 4\n\ndef samplePointsAtBorder(N, xRange, yRange):\n # Draw points evenly spaced at border\n dx = xRange[1]-xRange[0]\n dy = yRange[1]-yRange[0]\n\n N_x = ceil(dx/(2*(dx+dy))*(N))+2\n N_y = ceil(dx/(2*(dx+dy))*(N))-0\n\n x_init = np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n np.ones(N_x)*yRange[0]) )\n\n x_init = np.hstack((x_init, \n np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n np.ones(N_x)*yRange[1] )) ))\n\n ySpacing=(yRange[1]-yRange[0])/(N_y+1)\n x_init = np.hstack((x_init, \n np.vstack((np.ones(N_y)*xRange[0],\n np.linspace(yRange[0]+ySpacing,yRange[1]-ySpacing, num=N_y) )) ))\n\n x_init = np.hstack((x_init, \n np.vstack((np.ones(N_y)*xRange[1],\n np.linspace(yRange[0]+ySpacing,yRange[1]-ySpacing, num=N_y) )) ))\n\n return x_init\n\n \nsimuCase=7\n\n\nif simuCase==0:\n N = 10\n x_init = np.vstack((np.ones(N)*20,\n np.linspace(-10,10,num=N) ))\n ### Create obstacle \n obs = []\n a = [5, 2] \n p = [1, 1]\n x0 = [10.0, 0]\n th_r = 30/180*pi\n sf = 1.\n\n #xd=[0, 0]\n w = 0\n x_start = 0\n x_end = 2\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n\n a = [3,2]\n p = [1,1]\n x0 = [7,-6]\n th_r = -40/180*pi\n sf = 1.\n\n xd=[0.25, 1]\n w = 0\n x_start = 0\n x_end = 10\n \n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n a = [3,2]\n p = [1,1]\n x0 = [7,-6]\n th_r = -40/180*pi\n sf = 1.\n\n xd=[0., 0]\n w = 0\n x_start = 0\n x_end = 0\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n \n ob2 = Obstacle(\n a= [1,1],\n p= [1,1],\n x0= [10,-8],\n th_r= -40/180*pi,\n sf=1,\n xd=[0, 0],\n x_start=0,\n x_end=0,\n w=0\n )\n #obs.append(ob2)\n\n ob3 = Obstacle(\n a= [1,1],\n p= [1,1],\n x0= [14,-2],\n th_r= -40/180*pi,\n sf=1,\n xd=[0, 0],\n x_start=0,\n x_end=0,\n w=0\n )\n obs.append(ob3)\n\n xRange = [ -1,20]\n yRange = [-10,10]\n zRange = [-10,10]\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n attractorPos = [0,0]\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.05, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos )\n \nelif simuCase==1:\n N = 10\n x_init = np.vstack((np.ones(N)*1,\n np.linspace(-1,1,num=N),\n np.linspace(-1,1,num=N) ))\n ### Create obstacle \n obs = []\n\n x0 = [0.5,0.2,0.0]\n a = [0.4,0.1,0.1]\n #a = [4,4,4]\n p = [10,1,1]\n th_r = [0, 0, 30./180*pi]\n sf = 1.\n\n xd=[0,0,0]\n w = [0,0,0]\n\n x_start = 0\n x_end = 2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n\n ### Create obstacle\n x0 = [0.5,-0.2,0]\n a = [0.4,0.1,0.1]\n p = [10,1,1]\n th_r = [0, 0, -30/180*pi]\n sf = 1\n\n xd=[0,0,0]\n w = [0,0,0]\n\n x_start = 0\n x_end = 2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n xRange = [-0.2,1.8]\n yRange = [-1,1]\n zRange = [-1,1]\n\n\nelif simuCase ==2:\n xRange = [-0.7,0.3]\n yRange = [2.3,3.0]\n \n xRange = [-3,3]\n yRange = [-3,3.0]\n\n N = 10\n #x_init = np.vstack((np.linspace(-.19,-0.16,num=N),\n # np.ones(N)*2.65))\n\n x_init = np.vstack((np.linspace(-3,-1,num=N),\n np.ones(N)*0))\n \n xAttractor = np.array([0,0])\n\n obs = []\n \n obs.append(Obstacle(a=[1.1, 1],\n p=[1,1],\n x0=[0.5,1.5],\n th_r=-25*pi/180,\n sf=1.0\n ))\n \n a = [0.2,5]\n p = [1,1]\n x0 = [0.5, 5]\n th_r = -25/180*pi\n sf = 1.0\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.003, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\n \nelif simuCase ==3:\n xRange = [-0.7,0.3]\n yRange = [2.3,3.0]\n \n xRange = [-4,4]\n yRange = [-0.1,6.0]\n\n N = 20\n x_init = np.vstack((np.linspace(-4.5,4.5, num=N),\n np.ones(N)*5.5))\n \n \n xAttractor = np.array([0,0])\n\n obs = []\n obs.append(Obstacle(\n a = [1.1,1.2],\n p = [1,1],\n x0 = [-1, 1.5],\n th_r = -25/180*pi,\n sf = 1.0\n ))\n \n obs.append(Obstacle(\n a = [1.8,0.4],\n p = [1,1],\n x0 = [0, 4],\n th_r = 20/180*pi,\n sf = 1.0,\n ))\n \n obs.append(Obstacle(\n a=[1.2,0.4],\n p=[1,1],\n x0=[3,3],\n th_r=-30/180*pi,\n sf=1.0 \n ))\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\nelif simuCase==4:\n\n # Moving in LAB\n xRange = [0,16]\n yRange = [0,9]\n \n #x_init = np.vstack((np.ones(N)*16,\n # np.linspace(0,9,num=N) ))b\n \n ### Create obstacle \n obs = []\n x0 = [3.5,1]\n a = [2.5,0.8]\n p = [1,1]\n th_r = -10\n sf = 1.3\n\n xd0=[0,0]\n w0 = 0\n\n x01 =x0\n x_start = 0\n x_end = 10\n obs.append(Obstacle(a=a, p=p, x0=x01,th_r=th_r, sf=sf, x_start=x_start, x_end=x_end, timeVariant=True))\n\n def func_w1(t):\n t_interval1 = [0, 2.5, 5, 7, 8, 10]\n w1 = [th_r, -20, -140, -140, -170, -170]\n \n for ii in range(len(t_interval1)-1):\n if t < t_interval1[ii+1]:\n return (w1[ii+1]-w1[ii])/(t_interval1[ii+1]-t_interval1[ii]) * pi/180\n return 0\n\n def func_xd1(t):\n t_interval1x = [0, 2.5, 5, 7, 8, 10]\n xd1 = [[x01[0], 7, 9, 9, 7, 6],\n [x01[1], 4, 5, 5, 4, -2]]\n\n for ii in range(len(t_interval1x)-1):\n if t < t_interval1x[ii+1]:\n dt = (t_interval1x[ii+1]-t_interval1x[ii])\n return [(xd1[0][ii+1]-xd1[0][ii])/dt, (xd1[1][ii+1]-xd1[1][ii])/dt]\n return 0\n\n obs[0].func_w = func_w1\n obs[0].func_xd = func_xd1\n\n x0 = [12,8]\n a = [2,1.2]\n p = [1,1]\n th_r = 0\n sf = 1.3\n\n xd0=[0,0]\n w0 = 0\n\n x_start = 0\n x_end = 10\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, x_start=x_start, x_end=x_end, timeVariant=True))\n\n def func_w2(t):\n t_interval = [0, 2., 6.5, 7, 10]\n w = [th_r, -60, -60, 30, 30]\n \n for ii in range(len(t_interval)-1):\n if t < t_interval[ii+1]:\n return (w[ii+1]-w[ii])/(t_interval[ii+1]-t_interval[ii]) * pi/180\n return 0\n\n def func_xd2(t):\n t_interval = [0, 2.0, 5, 6.5, 9, 10]\n xd = [[x0[0], 13, 13, 12, 14, 15], \n [x0[1], 6, 6, 3, -2, -3 ]]\n\n for ii in range(len(t_interval)-1):\n if t < t_interval[ii+1]:\n dt = (t_interval[ii+1]-t_interval[ii])\n return [(xd[0][ii+1]-xd[0][ii])/dt, (xd[1][ii+1]-xd[1][ii])/dt]\n return 0\n\n obs[1].func_w = func_w2\n obs[1].func_xd = func_xd2\n\n #x_init = np.array([[15.5],[0.2]])\n N = 20\n\n x_init = samplePointsAtBorder(N, xRange, yRange)\n collisions = obs_check_collision(x_init, obs)\n x_init = x_init[:,collisions[0]]\n \n attractorPos = [4,8]\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.01, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos )\n \n if False: #save animation\n anim.ani.save('ani/animation_multipleObstacles_conv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \n\nelif simuCase==5:\n\n \n xRange = [-4,4]\n yRange = [-0.1,6.0]\n\n N = 10\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n print('axample at rorder')\n\n # dx = xRange[1]-xRange[0]\n # dy = yRange[1]-yRange[0]\n\n # N_x = ceil(dx/(2*(dx+dy))*N)\n # N_y = ceil(dx/(2*(dx+dy))*N)\n\n # x_init = np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n # np.ones(N_x)*yRange[0]) )\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n # np.ones(N_x)*yRange[1] )) ))\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.ones(N_y)*xRange[0],\n # np.linspace(yRange[0],yRange[1], num=N_y) )) ))\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.ones(N_y)*xRange[1],\n # np.linspace(yRange[0],yRange[1], num=N_y) )) ))\n #x_init = np.array( [[-2,-2,-1],\n # [2, 3, 3]])\n xAttractor = np.array([0,0])\n\n obs = []\n obs.append(Obstacle(\n a = [1.1,1.2],\n p = [1,1],\n x0 = [-1, 1.5],\n th_r = -25/180*pi,\n sf = 1\n ))\n \n obs.append(Obstacle(\n a = [1.8,0.4],\n p = [1,1],\n x0 = [0, 4],\n th_r = 20/180*pi,\n sf = 1.0,\n ))\n \n obs.append(Obstacle(\n a=[1.2,0.4],\n p=[1,1],\n x0=[3,3],\n th_r=-30/180*pi,\n sf=1.0 \n ))\n\n N = 10\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_peopleWalking.mp4', dpi=100, fps=25)\n print('Saving finished.')\n\n #dist slow = 0.18\n # anim.ani.save('ani/simue.mpeg', writer=\"ffmpeg\")\n #FFwriter = animation.FFMpegWriter()\n #anim.ani.save('ani/basic_animation.mp4', writer = FFwriter, fps=20)\n\nif simuCase==6:\n xRange = [-0.1,12]\n yRange = [-5,5]\n\n N=5\n #x_init = samplePointsAtBorder(N, xRange, yRange)\n x_init = np.vstack((np.ones((1,N))*8,\n np.linspace(-1,1,num=N),))\n\n xAttractor=[0,0]\n \n obs = []\n a=[0.3, 2.5]\n p=[1,1]\n x0=[2,0]\n th_r=-50/180*pi\n sf=1\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n # Obstacle 2\n a = [0.4,2.5]\n p = [1,1]\n #x0 = [7,2]\n x0 = [6,0]\n th_r = 50/180*pi\n sf = 1\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\nif simuCase==7:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n N = 12\n R = 5\n th_r0 = 38/180*pi\n rCent=2.4\n for n in range(N):\n obs.append(Obstacle(\n a = [0.4,3],\n p = [1,1],\n x0 = [R*cos(2*pi/N*n), R*sin(2*pi/N*n)],\n th_r = th_r0 + 2*pi/N*n,\n sf = 1.0))\n \n obs[n].center_dyn=[obs[n].x0[0]-rCent*sin(obs[n].th_r),\n obs[n].x0[1]+rCent*cos(obs[n].th_r)]\n\n obs[n].tail_effect = True\n \n xRange = [-10,10]\n yRange = [-8,8]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1000, convergenceMargin=0.3, sleepPeriod=0.001, RK4_int=True)\n\n\n # animationName = 'ani/animation_ring_noConvergence.mp4'\n animationName = 'ani/animation_ring_convergence.mp4'\n\nif simuCase ==8:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n obs.append(Obstacle(\n a = [2,2],\n p = [1,1],\n x0 = [10,-7],\n th_r = 0,\n sf = 1.0,\n xd = [-5,5],\n x_start=0,\n x_end=10))\n \n xRange = [-1,10]\n yRange = [-5,5]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.0, N_simuMax=800, convergenceMargin=0.3, sleepPeriod=0.01)\n\n animationName = 'ani/animation_movingCircle.mp4'\n \nif simuCase ==9:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n obs.append(Obstacle(\n a = [0.4,3],\n p = [1,1],\n x0 = [2,0],\n th_r = 0,\n sf = 1.0,\n w = 3,\n x_start=0,\n x_end=10))\n \n xRange = [-3,7]\n yRange = [-5,5]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=800, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_rotatingEllipse.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \nif simuCase ==10:\n xAttractor = np.array([0,0])\n centr = [1.5, 3.0]\n ### Three obstacles touching -- no common center, no convergence\n obs = []\n a = [0.6,0.6]\n p = [1,1]\n x0 = [1.5, .7]\n th_r = -60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n a = [1,0.4]\n p = [1,4]\n x0 = [3, -00.8]\n th_r= +60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n a = [1.2,0.2]\n p = [2,2]\n x0 = [2.3,.1]\n th_r = 20/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n N = 20\n\n xRange = [-0.5,5.5]\n yRange = [-2.5,2.5]\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=int(800/3), convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_multipleObstacles_noConv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \nif simuCase ==11:\n xAttractor = np.array([0,0])\n centr = [2.05, 2.55-dy]\n\n dy =2.5\n \n obs = []\n a = [0.6,0.6]\n p = [1,1]\n x0 = [2., 3.2-dy]\n th_r = -60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[0].center_dyn = centr\n\n a = [1,0.4]\n p = [1,3]\n x0 = [1.5, 1.6-dy]\n th_r = +60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[1].center_dyn = centr\n\n a = [1.2,0.2]\n p = [2,2]\n x0 = [3.3,2.1-dy]\n th_r = -20/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[1].center_dyn = centr\n\n N = 20\n\n xRange = [-0.5,5.5]\n yRange = [-2.5,2.5]\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=600, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if False: #save animation\n anim.ani.save('ani/animation_multipleObstacles_conv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n\n\nif simuCase==12:\n N = 10\n ### Create obstacle \n obs = []\n a = [5, 2] \n p = [1, 1]\n x0 = [10.0, 0]\n th_r = 30/180*pi\n sf = 1.\n\n #xd=[0, 0]\n w = 3\n x_start = 0\n x_end = 2\n\n a = [3,0.8]\n p = [1,1]\n x0 = [3,0]\n th_r = 100/180*pi\n sf = 1.\n\n xd=[0., 0]\n w = 10\n x_start = 0\n x_end = 10\n \n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n \n xRange = [ -1,8]\n yRange = [-4,4]\n x_init = np.vstack((np.ones(N)*xRange[1],\n np.linspace(yRange[0],yRange[1],num=N) ))\n #x_init = np.array([[12],[1]])\n\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n attractorPos = [0,0]\n\n # anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=2080, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos, dynamicalSystem=nonlinear_stable_DS, nonlinear=True)\n\n # anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.01, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=2080, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos, dynamicalSystem=linearAttractor, nonlinear=True)\n\n #anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\nif simuCase == 13:\n # Parallel ellipses; flow going through\n xAttractor = np.array([0,0])\n\n th_r0 = 38/180*pi\n obs = []\n obs.append(Obstacle(\n a = [4,0.4],\n p = [1,1],\n x0 = [0, 2],\n th_r = 30/180*pi,\n sf = 1.0))\n\n n = 0\n rCent = 3\n # obs[n].center_dyn=[obs[n].x0[0], \n # obs[n].x0[1]]\n obs[n].center_dyn=[obs[n].x0[0]-rCent*np.cos(obs[n].th_r),\n obs[n].x0[1]-rCent*np.sin(obs[n].th_r)]\n\n # obs.append(Obstacle(\n # a = [4,0.4],\n # p = [1,1],\n # x0 = [0, 4],\n # th_r = 30*180/pi,\n # sf = 1.0))\n \n # n = 1\n # obs[n].center_dyn=[obs[n].x0[0]-rCent*np.cos(obs[n].th_r),\n # obs[n].x0[1]-rCent*np.sin(obs[n].th_r)]\n \n xRange = [-5,5]\n yRange = [-1,7]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1000, convergenceMargin=0.3, sleepPeriod=0.001, RK4_int=True)\n\n\n animationName = 'ani/avoiding_ellipse.mp4'\n\n\n\n# saveFigure\nif True:\n# if saveFigure:\n if type(animationName)==int:\n anim.ani.save('ani/test.mp4', dpi=100,fps=50)\n else:\n anim.ani.save(animationName, dpi=100,fps=50)\n print('Saving finished.')\n plt.close('all')\nelse:\n anim.show()\n print('Animation')\n\n#if __name__ == '__main__':\n#if True:\n #anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, zRange=zRange, dt=0.005, N_simuMax=200000, convergenceMargin=0.3, sleepPeriod=0.01, )\n #\nprint('\\n\\n---- Script finished ---- \\n\\n')\n","sub_path":"dynamicSimulation.py","file_name":"dynamicSimulation.py","file_ext":"py","file_size_in_byte":32961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"350225023","text":"import cv2\nfrom os import path\n\nimport maproom.constants as c\n\ndef markerPath(fname):\n return path.join('markers', fname)\n\nprint('wrote image with dims', c.charucoImgDims, 'maps to size', (c.charucoSqSizeM * c.charucoNSqHoriz, c.charucoSqSizeM * c.charucoNSqVert), 'meters')\nprint('or', (c.charucoSqSizeM * c.charucoNSqHoriz / c.inToM, c.charucoSqSizeM * c.charucoNSqVert / c.inToM), 'inches')\n\ncharucoImg = c.charucoBoard.draw(c.charucoImgDims)\ncv2.imwrite(markerPath('charuco-calibration.png'), charucoImg)\n\npixelSize = c.markerSizeIn * c.imgPPI\n\nrobot01 = cv2.aruco.drawMarker(c.markerDictionary, 23, pixelSize)\ncv2.imwrite(markerPath('robot01.png'), robot01)\n\nrobot02 = cv2.aruco.drawMarker(c.markerDictionary, 24, pixelSize)\ncv2.imwrite(markerPath('robot02.png'), robot02)\n\nrobot03 = cv2.aruco.drawMarker(c.markerDictionary, 25, pixelSize)\ncv2.imwrite(markerPath('robot03.png'), robot03)\n\nrobot04 = cv2.aruco.drawMarker(c.markerDictionary, 26, pixelSize)\ncv2.imwrite(markerPath('robot04.png'), robot04)\n\nrobot05 = cv2.aruco.drawMarker(c.markerDictionary, 27, pixelSize)\ncv2.imwrite(markerPath('robot05.png'), robot05)\n\nrobot06 = cv2.aruco.drawMarker(c.markerDictionary, 28, pixelSize)\ncv2.imwrite(markerPath('robot06.png'), robot06)\n\nrobot07 = cv2.aruco.drawMarker(c.markerDictionary, 29, pixelSize)\ncv2.imwrite(markerPath('robot07.png'), robot07)\n\nrobot08 = cv2.aruco.drawMarker(c.markerDictionary, 30, pixelSize)\ncv2.imwrite(markerPath('robot08.png'), robot08)\n","sub_path":"skycam/generate_markers.py","file_name":"generate_markers.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"54729555","text":"import flask\nfrom flask import request, jsonify\nimport sqlite3\nimport json\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return '''Distant Reading Archive
A prototype API for distant reading of science fiction novels.
'''\n\n#Users\n#Getting all users\n@app.route('/bookmarking/users', methods=['GET'])\ndef api_user_all():\n users_table = ['user_id', 'user_name']\n sql = 'SELECT * FROM Users ORDER BY user_id ASC;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql).fetchall()\n users = [dict(zip(users_table, r)) for r in res]\n return jsonify({'count':len(res), 'users':users}), 200\n\n#Adding one or more new user(s)\n@app.route('/bookmarking', methods=['POST'])\ndef api_user_one():\n users_table = ['user_id', 'user_name']\n try:\n data = request.get_json(silent=True)\n user_ids = data['user_id']\n user_names = data['user_name']\n # 500 error\n except Exception as e:\n return jsonify({\"reasons\":[{\"message\":\"Internal Server Error\"}]}), 500\n sql = 'INSERT INTO Users(user_id, user_name) values(?, ?);'\n try:\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n # if input data is more than one\n if str(type(user_ids)) == \"\":\n res = []\n already_exists = []\n for idx in range(len(user_ids)):\n user_id = user_ids[idx]\n user_name = user_names[idx]\n try:\n cur.execute(sql, (user_id, user_name,))\n conn.commit()\n res.append(dict(zip(users_table, [user_id, user_name])))\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"User already exists\"}]}), 400\n # else input data is one\n else:\n user_id = user_ids\n user_name = user_names\n cur.execute(sql, (user_id, user_name,))\n conn.commit()\n res = dict(zip(users_table, [user_id, user_name]))\n return jsonify(res), 201\n # 400 error\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"User already exists\"}]}), 400\n\n#Deleting a user\n@app.route('/bookmarking/', methods=['DELETE'])\ndef api_user_del(user_id):\n sql = 'DELETE FROM Users WHERE user_id=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute('SELECT * FROM Users WHERE user_id=?;', (user_id,)).fetchall()\n if res:\n cur.execute(sql, (user_id,))\n conn.commit()\n return '', 204\n else:\n return jsonify({\"reasons\":[{\"message\":\"User does not exists\"}]}), 404\n\n#Bookmarks\n#Getting all bookmarks\n@app.route('/bookmarking/bookmarks', methods=['GET'])\ndef api_bookmark_all():\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks ORDER BY user_id ASC;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql).fetchall()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n\n#Getting all bookmarks for a certain user\n@app.route('/bookmarking/bookmarks/', methods=['GET'])\ndef api_bookmark_certain(user_id):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? ORDER BY url ASC;'\n try:\n # sql을 이용할 경우\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql, (user_id,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n # 404 error\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user does not exist\"}]}), 404\n\n#Getting target bookmarks for a certain user\n@app.route('/bookmarking/bookmarks//', methods=['GET'])\ndef api_bookmark_target(user_id, url):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n if not res:\n return jsonify({\"reasons\":[{\"message\":\"The user does not exist\"}]}), 404\n else:\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n\n#Adding one or more bookmark(s) for a user\n@app.route('/bookmarking//bookmarks', methods=['POST'])\ndef api_bookmark_add(user_id):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n data = request.get_json(silent=True)\n # url & user_id is essential / tags & text is not essential\n if not user_id:\n return jsonify({\"reasons\": [{\"message\": \"User does not exist\"}]}), 404\n try:\n urls = data['url']\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"Url does not exist\"}]}), 500\n try:\n tagss = data['tags']\n except Exception as e:\n print(e)\n tagss = [[] for i in range(len(urls))]\n try:\n texts = data['text']\n except Exception as e:\n print(e)\n text = [[] for i in range(len(urls))]\n try:\n sql = 'INSERT INTO Bookmarks(url, tags, text, user_id) VALUES(?, ?, ?, ?);'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n # if input data is more than one\n if str(type(urls)) == \"\":\n res = []\n already_exists = []\n for idx in range(len(urls)):\n url = urls[idx]\n tags = tagss[idx]\n text = texts[idx]\n try:\n cur.execute(sql, (url, tags, text, user_id,))\n conn.commit()\n res.append([url, tags, text, user_id])\n except Exception as e:\n print(e)\n return jsonify({\"reasons\": [{\"message\": \"User and url does already exist\"}]}), 400\n # else input data is one\n else:\n url = urls\n tags = tagss\n text = texts\n try:\n cur.execute(sql, (url, tags, text, user_id,))\n conn.commit()\n except Exception as e:\n print(e)\n return jsonify({\"reasons\": [{\"message\": \"User and url does already exist\"}]}), 400\n # test\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 201\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user or url does not exist\"}]}), 404\n\n#Updating the title/tag(s) for a bookmarks for a target user\n@app.route('/bookmarking//bookmarks/', methods=['PUT'])\ndef api_bookmark_update_delete(user_id, url):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n data = request.get_json(silent=True)\n # url & user_id is essential / tags & text is not essential\n if not user_id or not url:\n return jsonify({\"reasons\":[{\"message\":\"Request is incorrect\"}]}), 500\n try:\n tags = data['tags']\n except Exception as e:\n print(e)\n tags = ''\n try:\n text = data['text']\n except Exception as e:\n print(e)\n text = ''\n try:\n sql = 'UPDATE Bookmarks SET tags=?, text=? WHERE url=? AND user_id=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n cur.execute(sql, (tags, text, url, user_id,))\n conn.commit()\n # test\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 201\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user or url does not exist\"}]}), 404\n\n#Deleting a bookmark for a target user\n@app.route('/bookmarking//bookmarks/', methods=['DELETE'])\ndef api_bookmark_del(user_id, url):\n try:\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n if not res:\n return jsonify({\"reasons\": [{\"message\": \"The user or bookmark does not exist\"}]}), 404\n else:\n sql = 'DELETE FROM Bookmarks WHERE user_id=? AND url=?;'\n cur.execute(sql, (user_id, url,))\n conn.commit()\n return '', 204\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"Request in incorrect\"}]}), 500\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return \"404
The resource could not be found.
\", 404\n\n@app.route('/bookmarking', methods=['GET'])\ndef api_filter():\n query_parameters = request.args\n url = query_parameters.get('url')\n tags = query_parameters.get('tags')\n text = query_parameters.get('text')\n user_id = query_parameters.get('user_id')\n\n query = \"SELECT * FROM bookmarks WHERE\"\n to_filter = []\n\n if url:\n query += ' url=? AND'\n to_filter.append(url)\n if tags:\n query += ' tags=? AND'\n to_filter.append(tags)\n if text:\n query += ' text=? AND'\n to_filter.append(text)\n if user_id:\n query += ' user_id=? AND'\n to_filter.append(user_id)\n if not (url or tags or text or user_id):\n return page_not_found(404)\n\n query = query[:-4] + ';'\n \n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n results = cur.execute(query, to_filter).fetchall()\n \n return jsonify(results)\n\napp.run()","sub_path":"os_unknown2/api_final.py","file_name":"api_final.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"529225784","text":"class Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n #Time - O(n) ; n is length of s\n #Space - O(1) #as the number of alphabets are 26\n if len(p) > len(s) : #if length of s is less than p, return empty list.\n return []\n output = []\n hash_p = collections.Counter(p) # counter hashmap of characters of p\n hash_s = collections.Counter(s[:len(p)]) #counter hashmap of characters of s till length of p.\n #From here, we use sliding window technique.\n if hash_p == hash_s:\n output.append(0)\n for i in range(len(p), len(s)):\n #remove the first element and add the next element in s.\n if hash_s[s[i - len(p)]] > 1:\n hash_s[s[i - len(p)]] -= 1\n else:\n del(hash_s[s[i - len(p)]])\n if s[i] in hash_s:\n hash_s[s[i]] += 1\n else:\n hash_s[s[i]] = 1\n if hash_p == hash_s: #compare if both hashmaps are same, and append the start index if True\n output.append(i - len(p) + 1)\n return output","sub_path":"Week2/AllAnagramsInString.py","file_name":"AllAnagramsInString.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"7358195","text":"import time\nfrom lib import zabbix2canopsis\nfrom lib import event2amqp\n\n\nif __name__ == \"__main__\":\n while True:\n ZabixApi = zabbix2canopsis.ZabbixApi(configfile=\"zabbix.cfg\")\n CanoAmqp = event2amqp.EventCanopsis(configfile=\"zabbix.cfg\")\n eventlist = ZabixApi()\n for event in eventlist:\n print(event)\n CanoAmqp(event)\n time.sleep(45)\n print('Check beeing processed !')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"570522286","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nimport matplotlib as mpl\n# mpl.use('TkAgg') # バックエンドでエラーが出る人用。Linuxでは多分要らない。\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import date2num\nfrom mpl_finance import candlestick_ohlc\nimport pandas as pd\nimport numpy as np\n\n\ndef make_chart(currency_pair, data_path):\n \"\"\"\n parameters: currency_pair(str):通貨ペア\n data_path(path):保存場所\n \"\"\"\n with open(data_path, 'r') as fp:\n data_freq = 10 # 1分のデータをまとめて何分足のチャートにするか。単位は分。\n data = json.load(fp)\n le = len(data)\n idx = pd.date_range(data[0][0], data[le - 1][0], freq='1min')\n data = [data[i][1:] for i in range(len(data))]\n data2 = []\n for i in range(len(data)):\n data2.append(float(data[i][0]))\n data = np.array(data2) \n\n if len(data) != len(idx):\n with open('error_log.json', 'w') as file:\n file.write('error: data missing or overlapped')\n file.close()\n\n df = pd.Series(data, index=idx).resample('{0}min'.format(data_freq)).ohlc()\n df.plot()\n fig = plt.figure()\n ax = plt.subplot()\n\n xdate = date2num([x for x in df.index]) # Timestamp -> datetime\n ohlc = np.vstack((xdate, df.values.T)).T # datetime -> float\n\n candlestick_ohlc(ax, ohlc, width=1/24/60*data_freq, colorup='g', colordown='r', alpha=.4)\n\n ax.grid() # グリッド表示\n plt.ylabel(\"Price\")\n ax.set_xlim(df.index[0], df.index[-1]) # x軸の範囲\n fig.autofmt_xdate() # x軸のオートフォーマット\n plt.xlabel(\"Date\")\n plt.title(currency_pair)\n plt.savefig('chart.png') # チャートはpngとして保存する。\n","sub_path":"make_chart.py","file_name":"make_chart.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"371865278","text":"Height=input(\"Height: \")\r\n\r\n#Function to check, if the input is in range?\r\ndef check_input_range(height):\r\n\tif height<1 or height >8:\r\n\t\treturn False\r\n\telse:\r\n\t\tprint(height)\r\n\t\treturn True\r\n\r\n#Function to check, if the input is int?\r\ndef check_input_type(height):\r\n\ttry:\r\n\t\tHeight=int(height)\r\n\t\treturn True\r\n\texcept ValueError:\r\n\t\treturn False\r\n\t\t\r\n\r\n#Loop Checking\r\nwhile True:\r\n\tif check_input_type(Height):\r\n\t\tbreak\r\n\telse:\r\n\t\tHeight=input(\"Height: \")\r\n\r\n\r\n\r\nwhile True:\r\n\tif check_input_range(int(Height)):\r\n\t\tbreak\r\n\telse:\r\n\t\tHeight=input(\"Height: \")\r\n\r\n\r\n#Building The Pyramid\r\nHeight=int(Height)\r\nfor i in range(Height+1):\r\n\ttemp=Height-i\r\n\tfor j in range(temp):\r\n\t\tprint(\" \", end=\"\") #for printing same line in python\r\n\tfor j in range(i):\r\n\t\tprint(\"#\", end=\"\")\r\n\tprint(\"\\n\")\t\r\n\t\r\n","sub_path":"mario_less.py","file_name":"mario_less.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"361191155","text":"from packs.multiscale.preprocess.dual_primal.create_dual_and_primal_mesh import MultilevelData\n\nimport pdb\nfrom packs.directories import data_loaded\nfrom run_compositional import run_simulation\nimport time\n\n\"\"\" ---------------- LOAD STOP CRITERIA AND MESH DATA ---------------------- \"\"\"\n\nname_current = 'current_compositional_results_'\nname_all = data_loaded['name_save_file'] + '_'\nmesh = 'mesh/' + data_loaded['mesh_name']\n\nif data_loaded['use_vpi']:\n stop_criteria = max(data_loaded['compositional_data']['vpis_para_gravar_vtk'])\nelse: stop_criteria = data_loaded['compositional_data']['maximum_time']\n\nloop_max = 1000\nrun_criteria = 0\nloop = 0\n\"\"\" ----------------------------- RUN CODE --------------------------------- \"\"\"\n\nload = data_loaded['load_data']\nconvert = data_loaded['convert_english_to_SI']\n\nt = time.time()\nsim = run_simulation(name_current, name_all)\nM, data_impress, wells, fprop, load = sim.initialize(load, convert, mesh)\n\nmultilevel_structure = MultilevelData(data_impress, M)\n\nimport pdb; pdb.set_trace()","sub_path":"adm_impec-00/packs/tests/test_compositional_1.py","file_name":"test_compositional_1.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"552071707","text":"cities = eval(open('cities15000-smaller.txt',encoding=\"utf-8\").read())\nimport geohash_copied as gh\n\n\nct = [(cit1, cit2, latitude, longitude, country, cntr, pop, gh.encode(float(latitude), float(longitude))) for cit1, cit2, latitude, longitude, country, cntr, pop in cities[:5000]]\n\n\ndef sizeof_fmt(num, suffix=''):\n for unit in ['','K','M','G','T','P','E','Z']:\n if abs(num) < 1024.0:\n # return \"%3.1f%s%s\" % (num, unit, suffix)\n return \"%3.2f %s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Y', suffix)\n\nct.sort(key=lambda i:i[7])\nfrom pprint import pformat\n\nct_quick = [(latitude, longitude, gh[:4], country, cit1, sizeof_fmt(int(pop))) for cit1, cit2, latitude, longitude, country, cntr, pop, gh in ct]\nf = open('cities-1000-geohash-smallerl.nogit.txt', 'w', encoding=\"utf-8\")\n\nppf = pformat(ct_quick, width=200, indent = 4)\nf.write(ppf)\n\n# for cit1, cit2, latitude, longitude, country, cntr, pop, gh in ct:\n# print(gh[:4], country, cit1, sizeof_fmt(int(pop)))","sub_path":"_projlab/spatialite-geohash/geonames-geohash.py","file_name":"geonames-geohash.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"491735447","text":"\nfrom habitat_baselines.config.default import get_config\nfrom habitat_baselines.common.pepper_env import PepperRLExplorationEnv\nimport cv2\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nrgb_buffer = []\ndepth_buffer = []\nforward_step = 0.25\nturn_step = 0.1\n\ncfg = get_config()\nprint(cfg)\npepper_env = PepperRLExplorationEnv(cfg)\npepper_env.reset()\n\nkey = 0\nc_action = np.random.choice(3, 1, p=[0.8, 0.1, 0.1])[0]\ndefault_rot = np.random.choice(3, 1, p=[0, 0.5, 0.5])[0]\n\nvalues = []\nforward_enabled = True\nuser_forward_enabled = True\nnum_forward = 0\n\nactions = []\nstep = 0\nobservations, reward, done, info = \\\n pepper_env.reset()\nlast_pose = pepper_env.get_position()[0]\n\nx_p = []\ny_p = []\n\nx_o = []\ny_o = []\n\n\nplt.ion()\nplt.show()\n\nwhile key != ord('q'):\n step += 1\n last_action = c_action\n observations, reward, done, info = \\\n pepper_env.step(None, action={\"action\": c_action})\n pose = observations['robot_position']\n rot = observations['robot_rotation']\n sonar = observations['sonar']\n odom = observations['odom']\n\n\n gps_to_goal = observations['gps_with_pointgoal_compass']\n movement = np.linalg.norm(pose - last_pose)\n\n print(\"-\" * 100)\n print(\"Pose:\", pose)\n print(\"Odom:\", odom)\n print(\"Sonar:\", sonar)\n print(\"Movement:\", movement)\n print(\"STEP:\", step)\n\n x_p.append(pose[0])\n y_p.append(pose[1])\n x_o.append(odom[0][0])\n y_o.append(odom[0][1])\n\n plt.clf()\n plt.plot(x_p, y_p)\n plt.plot(x_o, y_o, \"--\")\n plt.pause(0.01)\n\n rgb = observations['rgb']\n depth = observations['depth']\n\n cv2.imshow(\"RGB\", rgb)\n cv2.imshow(\"Depth\", depth)\n key = cv2.waitKey(500)\n\n if sonar < 0.9 or (last_action == 0 and movement < 0.15):\n forward_enabled = False\n print(\"Disabled forward\")\n else:\n forward_enabled = True\n print(\"Enabled forward\")\n\n if forward_enabled and user_forward_enabled:\n c_action = np.random.choice(3, 1, p=[0.8, 0.1, 0.1])[0]\n else:\n print(\"Forward is disabled\")\n c_action = default_rot\n print(\"Running with default rotation\")\n\n if c_action == 0:\n num_forward += 1\n else:\n num_forward = 0\n\n if num_forward == 3:\n print(\"Changing default rotation\")\n default_rot = np.random.choice(3, 1, p=[0, 0.5, 0.5])[0]\n num_forward = 0\n\n if key == ord('w'):\n user_forward_enabled = not user_forward_enabled\n #elif key == ord('a'):\n # c_action = 1\n #elif key == ord('d'):\n # c_action = 2\n\n values.append({\n \"rgb\": rgb,\n \"depth\": depth,\n \"odom_pose\": odom[0],\n \"odom_rot\": odom[1],\n \"position\": pose,\n \"rotation\": rot,\n \"action\": last_action,\n \"gps_to_goal_compass\": gps_to_goal,\n \"sonar\": sonar\n })\n last_pose = pose\n if step == 250:\n break\n\nimport datetime\nnow = datetime.datetime.now()\ndt_string = now.strftime(\"%d.%m.%Y %H:%M:%S\")\npickle.dump(values, open(dt_string + \"pepper_save.p\", \"wb\"))\npepper_env.close()\n\n","sub_path":"aimas/pepper_save.py","file_name":"pepper_save.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"469224531","text":"#!/usr/bin/env python\n\nimport argparse\nfrom array import array\n\nimport elias\n\ndef main():\n parser = argparse.ArgumentParser(description='Compress data')\n parser.add_argument(\n '-o', metavar='FILE', required=True,\n type=argparse.FileType('wb'),\n help='File to save compressed data to'\n )\n parser.add_argument(\n 'input', metavar='INPUT_FILE', type=argparse.FileType('rb'),\n help='File to compress'\n )\n\n args = parser.parse_args()\n\n raw_data = array('B', args.input.read())\n\n filetype = args.o.name.split('.')[-1]\n if filetype == 'eliasd':\n encoded = elias.compress(raw_data, elias.elias_delta_encode)\n elif filetype == 'unary':\n encoded = elias.compress(raw_data, elias.unary_encode)\n else:\n parser.error('Unknown file type: %s' % filetype)\n\n args.o.write(encoded)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ffcrunch/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"259998940","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\npath = os.getcwd()\npath\n\n\n# In[3]:\n\n\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\nprint(test.shape) # (178028, 49)\ntest1 = test[(test['smart_5raw'] > 0) | \n (test['smart_187raw'] > 0) | \n (test['smart_188raw'] > 0) | \n (test['smart_197raw'] > 0) | \n (test['smart_198raw'] > 0)] # 筛选raw:5,187,188,197,198任一大于0\nprint(test1.shape) # (33111, 49)\ntg = test1.groupby([\"manufacturer\", \"model\", \"serial_number\"])['dt'].max().reset_index()\nprint(tg.shape) # 2213\ntg.head()\n\n\n# In[6]:\n\n\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\nprint(test.shape) # (178028, 49)\ntest1 = test[(test['smart_5_normalized'] < 100) | \n (test['smart_187_normalized'] < 100) | \n (test['smart_188_normalized'] < 100) | \n (test['smart_197_normalized'] < 100) | \n (test['smart_198_normalized'] < 100)] # 筛选raw:5,187,188,197,198任一大于0\nprint(test1.shape) # \ntg = test1.groupby([\"manufacturer\", \"model\", \"serial_number\"])['dt'].max().reset_index()\nprint(tg.shape) # \ntg.head()\n\n\n# In[7]:\n\n\n# 把最终结果里的dt转换成test里最晚出现的时间\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\ntd = test[test['model']==1]\ntd = td[['serial_number','dt']].groupby(['serial_number'])['dt'].max().reset_index()\nprint(td.shape)\ntr = tg.merge(td, how='left', on='serial_number')\nprint(tr.shape)\ntr['dt'] = tr['dt_y']\ndel tr['dt_x']\ndel tr['dt_y']\ntr.drop_duplicates(inplace=True)\ntr.reset_index(drop=True, inplace=True)\ntr.to_csv(path + '/result/result_rules_max_test141.csv', index=False, header=None)\nprint(tr.shape)\ntr.head()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"code/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"94137600","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 4 14:20:16 2021\n\n@author: pc\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsRegressor\nimport matplotlib.pyplot as plt\n\n# Initialize data of lists\ndata = {'x1':[0, 0.4, 0.7, 0.5, 0.5, 0.6, 0.3, 0.1, 0.8, 0.8]\n ,'x2':[0.6, 0.4, 0.8, 0.2, 0.8, 0, 0.2, 0.6, 0.8, 0]\n ,'y':[-0.6, -0.6, 0.6, 1.8, 1.2, 1.2, 1.4, 0.6, 1.8, 1.6]}\n\n# using dictionary to convert specific columns\nconvert_dict = {'x1': float,\n 'x2': float,\n 'y': float\n }\n\n# create data\ndf = pd.DataFrame(data)\ndf = df.astype(convert_dict)\nprint(df.dtypes)\n\n#Specify the data\nx = df[['x1', 'x2']]\ny = df['y']\n\n# Find optimal number of neighbours\nresult = pd.DataFrame()\nmax_neighbors = x.shape[0]\n\nfor k in range(max_neighbors):\n kNNSpec = KNeighborsRegressor(n_neighbors = (k+1), metric = 'chebyshev')\n nbrs = kNNSpec.fit(x, y)\n pred_y = nbrs.predict(x)\n error_y = y - pred_y\n sse_y = np.sum(np.absolute(error_y))\n result = result.append([[(k+1), sse_y]], ignore_index = True)\n \nresult = result.rename(columns = {0: 'Number of Neighbors', 1: 'Sum of Squared Error'})\n\nplt.scatter(result['Number of Neighbors'], result['Sum of Squared Error'])\nplt.xlabel('Number of Neighbors')\nplt.ylabel('Sum of Squared Error')\nplt.xticks(np.arange(1,max_neighbors+1,1))\nplt.grid(axis = 'both')\nplt.show()\n\nsuggested_neighbor = result.nsmallest(2, 'Sum of Squared Error').tail(1).reset_index(drop=True).loc[0]['Number of Neighbors']\n\nprint(f'The number of neighbors that yields the smallest criterion is k = {suggested_neighbor}')","sub_path":"Final Exam/My Try/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"373201922","text":"# 入力された文の一定以上の文字数の単語を並び変えるやつ\r\nimport random\r\n\r\ndef shuffle(str):\r\n result = []\r\n for word in str.split():\r\n if len(word) > 4: # 長さが4超であればシャッフル\r\n word = word[:1] + ''.join(random.sample(word[1:-1], len(word) - 2)) + word[-1:]\r\n result.append(word)\r\n\r\n return ' '.join(result)\r\n\r\nstr = input('文章を入力: ')\r\nstr = shuffle(str)\r\n\r\nprint(str)","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"545152959","text":"import numpy as np\nimport astropy.io.fits as pyfits\n#from surveysim.utils import angsep\nfrom surveysim.exposurecalc import airMassCalculator\nfrom surveysim.avoidobject import avoidObject, moonLoc\nfrom surveysim.utils import mjd2lst\nfrom surveysim.observefield import setup_time\nfrom datetime import datetime\nfrom astropy.time import Time\nfrom desitarget.targetmask import obsconditions as obsbits\n\nMAX_AIRMASS = 10.0 #3.0 This new bound effectively does nothing.\nMIN_MOON_SEP = 90.0\nMIN_MOON_SEP_BGS = 5.0\n\ndef nextFieldSelector(obsplan, mjd, conditions, tilesObserved, slew, previous_ra, previous_dec):\n \"\"\"\n Returns the first tile for which the current time falls inside\n its assigned LST window and is far enough from the Moon and\n planets.\n\n Args:\n obsplan: string, FITS file containing the afternoon plan\n mjd: float, current time\n conditions: dictionnary containing the weather info\n tilesObserved: list containing the tileID of all completed tiles\n slew: bool, True if a slew time needs to be taken into account\n previous_ra: float, ra of the previous observed tile (degrees)\n previous_dec: float, dec of the previous observed tile (degrees)\n\n Returns:\n target: dictionnary containing the following keys:\n 'tileID', 'RA', 'DEC', 'Program', 'Ebmv', 'maxLen',\n 'MoonFrac', 'MoonDist', 'MoonAlt', 'DESsn2', 'Status',\n 'Exposure', 'obsSN2', 'obsConds'\n overhead: float (seconds)\n \"\"\"\n\n hdulist = pyfits.open(obsplan)\n tiledata = hdulist[1].data\n moonfrac = hdulist[0].header['MOONFRAC']\n tileID = tiledata['TILEID']\n tmin = tiledata['LSTMIN']\n tmax = tiledata['LSTMAX']\n explen = tiledata['MAXEXPLEN']/240.0\n ra = tiledata['RA']\n dec = tiledata['DEC']\n program = tiledata['PROGRAM']\n obsconds = tiledata['OBSCONDITIONS']\n\n lst = mjd2lst(mjd)\n dt = Time(mjd, format='mjd')\n found = False\n for i in range(len(tileID)):\n dra = np.abs(ra[i]-previous_ra)\n if dra > 180.0:\n dra = 360.0 - dra\n ddec = np.abs(dec[i]-previous_dec)\n overhead = setup_time(slew, dra, ddec)\n t1 = tmin[i] + overhead/240.0\n t2 = tmax[i] - explen[i]\n\n if ( ((t1 <= t2) and (lst > t1 and lst < t2)) or ( (t2 < t1) and ((lst > t1 and t1 <=360.0) or (lst >= 0.0 and lst < t2))) ):\n if (avoidObject(dt.datetime, ra[i], dec[i]) and airMassCalculator(ra[i], dec[i], lst) < MAX_AIRMASS):\n moondist, moonalt, moonaz = moonLoc(dt.datetime, ra[i], dec[i])\n if ( (len(tilesObserved) > 0 and tileID[i] not in tilesObserved['TILEID']) or len(tilesObserved) == 0 ):\n if (( (moonalt < 0.0 and (obsconds[i] & obsbits.mask('DARK')) != 0) ) or\n (moonalt >=0.0 and\n (( (moonfrac < 0.2 or (moonalt*moonfrac < 12.0)) and moondist > MIN_MOON_SEP and (obsconds[i] & obsbits.mask('GRAY')) != 0 ) or\n ( (obsconds[i] & obsbits.mask('BRIGHT')) != 0 and moondist > MIN_MOON_SEP_BGS) ))):\n found = True\n break\n\n if found == True:\n tileID = tiledata['TILEID'][i]\n RA = ra[i]\n DEC = dec[i]\n Ebmv = tiledata['EBV_MED'][i]\n maxLen = tiledata['MAXEXPLEN'][i]\n DESsn2 = 100.0 # Some made-up number -> has to be the same as the reference in exposurecalc.py\n status = tiledata['STATUS'][i]\n exposure = -1.0 # Updated after observation\n obsSN2 = -1.0 # Idem\n target = {'tileID' : tileID, 'RA' : RA, 'DEC' : DEC, 'Program': program[i], 'Ebmv' : Ebmv, 'maxLen': maxLen,\n 'MoonFrac': moonfrac, 'MoonDist': moondist, 'MoonAlt': moonalt, 'DESsn2': DESsn2, 'Status': status,\n 'Exposure': exposure, 'obsSN2': obsSN2, 'obsConds': obsconds[i]}\n else:\n target = None\n return target, overhead\n\n","sub_path":"py/surveysim/nextobservation.py","file_name":"nextobservation.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"118677844","text":"#!/usr/bin/env python3\n#\n# Copyright AlertAvert.com (c) 2017. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nfrom copy import deepcopy\nfrom flask import (\n Flask,\n make_response,\n redirect,\n render_template,\n request,\n url_for,\n)\nimport json\nimport os\nfrom werkzeug.utils import secure_filename\n\nfrom elasticsearch_connector import ElasticsearchConnector\n\n\nDOCTYPE = 'plants'\nINDEX_NAME = 'cfgreendesign'\nUPLOAD_FOLDER = '/tmp'\n\n\ndef load_template():\n with open('templates/query.json') as template:\n return json.load(template)\n\n\nTEMPLATE = load_template()\nHEADERS = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n}\nALLOWED_EXTENSIONS = {'xls', 'xlsx'}\n\nMAPPING = [\n {\"key\": \"botanical_name\", \"caption\": \"Botanical\", \"width\": \"10%\"},\n {\"key\": \"common_name\", \"caption\": \"Name\", \"width\": \"10%\"},\n {\"key\": \"description\", \"caption\": \"Description\", \"width\": \"20%\"},\n {\"key\": \"flowering_months\", \"caption\": \"Flowering\", \"width\": \"10%\"},\n {\"key\": \"quantity\", \"caption\": \"Qty\", \"width\": \"5%\"},\n {\"key\": \"plant_size_at_maturity\", \"caption\": \"Plant size at Maturity\", \"width\": \"10%\"},\n {\"key\": \"qualify_for_rebate\", \"caption\": \"Rbt\", \"width\": \"5%\"},\n {\"key\": \"water_needs_according_to_wucols\", \"caption\": \"Water\", \"width\": \"5%\"},\n {\"key\": \"pot_size\", \"caption\": \"Pot\", \"width\": \"5%\"},\n {\"key\": \"native\", \"caption\": \"Native\", \"width\": \"5%\"},\n {\"key\": \"light\", \"caption\": \"Light\", \"width\": \"5%\"},\n {\"key\": \"attract_butterflies\", \"caption\": \"Attract\", \"width\": \"10%\"},\n {\"key\": \"classification\", \"caption\": \"Cat\", \"width\": \"5%\"},\n {\"key\": \"attract_bees\", \"caption\": \"Bees?\", \"width\": \"5%\"}\n]\n\nPLANT_KEYS = [\n {\"key\": \"classification\", \"caption\": \"Classification\"},\n {\"key\": \"description\", \"caption\": \"Description\"},\n {\"key\": \"full_description\", \"caption\": \"Full Description\"},\n {\"key\": \"flowering_months\", \"caption\": \"Flowering Months\"},\n {\"key\": \"quantity\", \"caption\": \"Quantity\"},\n {\"key\": \"plant_size_at_maturity\", \"caption\": \"Plant size at Maturity\"},\n {\"key\": \"water_needs_according_to_wucols\", \"caption\": \"Water needs according to WUCOLS\"},\n {\"key\": \"pot_size\", \"caption\": \"Recommended Pot Size\"},\n {\"key\": \"native\", \"caption\": \"California Native?\"},\n {\"key\": \"light\", \"caption\": \"Light\"},\n {\"key\": \"attract_butterflies\", \"caption\": \"Attracts birds & hummingbirds?\"},\n {\"key\": \"attract_bees\", \"caption\": \"Attracts Bees?\"},\n {\"key\": \"qualify_for_rebate\", \"caption\": \"Qualifies for Rebate?\"},\n]\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef entry():\n filename = request.args.get('filename')\n errmsg = request.args.get('errmsg')\n upload_id = request.args.get('upload_id')\n return render_template('search.html', uploaded_file=filename, msg=errmsg, upload_id=upload_id)\n\n\n@app.route('/search')\ndef search():\n query_args = request.args.get('q')\n offset = int(request.args.get('offset', 0))\n size = int(request.args.get('size', 25))\n query = build_query(query_args, offset, size)\n connector = app.config['ES_HOST']\n response = connector.search_for(query)\n if response.ok:\n return render_template(\"results.html\",\n results=process_results(response.json()),\n meta=MAPPING)\n return make_response(\"Failed: {}\".format(response.reason), 400)\n\n\n@app.route('/plant/')\ndef get_plant(id):\n connector = app.config['ES_HOST']\n response = connector.find_one(id)\n if response.ok:\n return render_template(\"plant.html\", plant=response.json().get(\"_source\"), meta=PLANT_KEYS)\n else:\n return redirect(url_for('entyr', errmsg=\"Could not find plant ({} id missing)\".format(id)))\n\n\n@app.route('/import', methods=['POST'])\ndef upload_file():\n # check if the post request has the file part\n file = request.files.get('importFile')\n if not file:\n app.logger.error(\"Missing import file name: \", request.files)\n return redirect(url_for('entry', errmsg=\"Missing file\"))\n app.logger.info(\"Uploading file '%s'\", file.filename)\n # if user does not select file, browser also\n # submit a empty part without filename\n filename = secure_filename(file.filename)\n if filename == '' or not allowed_file(filename):\n app.logger.error(\"Not a valid filename: %s\", filename)\n return redirect(url_for('entry', errmsg=\"You must select a valid Excel file\"))\n\n local_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(local_path)\n app.logger.info(\"Uploading data to Elasticsearch server\")\n try:\n connector = app.config['ES_HOST']\n stats_id = connector.rebuild_index(local_path)\n return redirect(url_for('entry', filename=filename, upload_id=stats_id))\n except Exception as ex:\n app.logger.error(\"Cannot upload file: {}. Reason: {}\".format(filename, ex))\n return redirect(url_for('entry', errmsg=\"Error while importing data from '{}' ({})\".format(\n filename, ex)))\n\n\n@app.route('/uploads/')\ndef show_metadata(upload_id):\n connector = app.config['ES_HOST']\n response = connector.find_metadata(upload_id)\n if response.ok:\n return render_template(\"metadata.html\", metadata=response.json().get(\"_source\"))\n else:\n return redirect(url_for('entry', errmsg=\"Could not find details for upload ({} id \"\n \"missing)\".format(upload_id)))\n\n\n\ndef process_results(results):\n items = list()\n if \"hits\" in results:\n if \"hits\" in results.get(\"hits\"):\n hits = results[\"hits\"][\"hits\"]\n for hit in hits:\n item = hit.get(\"_source\")\n item['id'] = hit[\"_id\"]\n items.append(item)\n return items\n\n\ndef build_query(search_terms, offset=0, size=25):\n query = deepcopy(TEMPLATE)\n query[\"query\"][\"multi_match\"][\"query\"] = search_terms\n query['from'] = offset\n query['size'] = size\n return query\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--debug\", action='store_true', default=False)\n parser.add_argument(\"--host\")\n parser.add_argument(\"--port\", type=int, default=8000)\n parser.add_argument(\"--es_host\", required=True)\n parser.add_argument(\"--es_port\", type=int, required=True)\n parser.add_argument(\"--secret\", required=True)\n parser.add_argument(\"--workdir\", default=UPLOAD_FOLDER)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n cfg = parse_args()\n app.config['ES_HOST'] = ElasticsearchConnector(INDEX_NAME, DOCTYPE,\n host=cfg.es_host,\n port=cfg.es_port)\n app.config['UPLOAD_FOLDER'] = cfg.workdir\n app.config['SECRET'] = cfg.secret\n app.secret_key = cfg.secret\n\n app.run(host=cfg.host, port=cfg.port, debug=cfg.debug)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"279575154","text":"from django.contrib.auth.models import Permission,Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom fa_system.models import CustomUser\n\ninvestor=Group.objects.get(name='investors')\nbranch=Group.objects.get(name='branches')\nanalyst=Group.objects.get(name='analysts')\nfinDep=Group.objects.get(name='finDep')\n\n\nct=ContentType.objects.get_for_model(CustomUser)\nPermission.objects.create(\n codename='asInvestor',\n name='as investor',\n content_type=ct,\n)\npInv=Permission.objects.get(name='as investor')\n\nPermission.objects.create(\n codename='asBranch',\n name='as branch',\n content_type=ct,\n)\npBra=Permission.objects.get(name='as branch')\n\nPermission.objects.create(\n codename='asAnalyst',\n name='as analyst',\n content_type=ct,\n)\npAna=Permission.objects.get(name='as analyst')\n\nPermission.objects.create(\n codename='asFinDep',\n name='as finDep',\n content_type=ct,\n)\npFin=Permission.objects.get(name='as finDep')\n\ninvestor.permissions.add(pInv)\nbranch.permissions.add(pBra)\nanalyst.permissions.add(pAna)\nfinDep.permissions.add(pFin)\nprint('Done')\n\n\n","sub_path":"createPermissionsAssignToGroup.py","file_name":"createPermissionsAssignToGroup.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"295549112","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\nfrom apps.restaurants.models import Restaurant\n\nUser = get_user_model()\n\n\nclass Review(models.Model):\n\n NOTATION = (\n (1, \"1 star, could do better\"),\n (2, \"2 stars, experience is ok\"),\n (3, \"3 stars, expect a good lunch\"),\n (4, \"4 stars, great restaurant\"),\n (5, \"5 stars, WOW experience\"),\n )\n\n content = models.TextField(\n verbose_name=\"review content\"\n )\n rating = models.IntegerField(\n verbose_name=\"review rating\",\n choices=NOTATION\n )\n date_created = models.DateTimeField(\n verbose_name=\"created time\",\n auto_now_add=True\n )\n date_modified = models.DateTimeField(\n verbose_name=\"modified\",\n auto_now=True\n )\n idUser = models.ForeignKey(\n to=User,\n related_name=\"fk_Review_to_User\",\n on_delete=models.CASCADE\n )\n idRestaurant = models.ForeignKey(\n to=Restaurant,\n related_name=\"fk_Review_to_Restaurant\",\n on_delete=models.CASCADE\n )\n likes = models.ManyToManyField(\n to=User,\n related_name='review_likes',\n blank=True\n )\n\n class Meta:\n ordering = ['-date_modified']\n\n def __str__(self):\n return f'Review #{self.id}'","sub_path":"backend/apps/reviews/models/models_reviews.py","file_name":"models_reviews.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"556281192","text":"import torch\nimport logging\nimport gzip\nimport os\nimport csv\nfrom torch.utils.data import Dataset\nfrom typing import List\nfrom typing import Union, List\nfrom tqdm import tqdm\nimport logging\n\n\nclass InputExample:\n \"\"\"\n Structure for one input example with texts, the label and a unique id\n \"\"\"\n def __init__(self, guid: str, texts: List[str], label: Union[int, float]):\n \"\"\"\n Creates one InputExample with the given texts, guid and label\n\n str.strip() is called on both texts.\n\n :param guid\n id for the example\n :param texts\n the texts for the example\n :param label\n the label for the example\n \"\"\"\n self.guid = guid\n self.texts = [text.strip() for text in texts]\n self.label = label\n\n\nclass LabeledSTSDataReader(object):\n \"\"\"Semantic Textual Similarity data reader\"\"\"\n def __init__(self, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter=\"\\t\", dataset_folder=None, \n quoting=csv.QUOTE_NONE, normalize_scores=False, min_score=0, max_score=1):\n self.dataset_folder = dataset_folder\n self.score_col_idx = score_col_idx\n self.s1_col_idx = s1_col_idx\n self.s2_col_idx = s2_col_idx\n self.delimiter = delimiter\n self.quoting = quoting\n self.normalize_scores = normalize_scores\n self.min_score = min_score\n self.max_score = max_score\n\n def get_examples(self, filename, max_examples=0, skip_head=False, predict_mode=False):\n \"\"\"\n filename specified which data split to use (train.csv, dev.csv, test.csv).\n \"\"\"\n if self.dataset_folder is not None:\n filepath = os.path.join(self.dataset_folder, filename)\n else:\n filepath = filename\n with open(filepath, encoding=\"utf-8\") as fIn:\n data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)\n examples = []\n for id, row in enumerate(data):\n if skip_head == True and id == 0:\n continue\n if predict_mode == False:\n score = int(row[self.score_col_idx])\n else:\n score = 0\n\n s1 = row[self.s1_col_idx]\n s2 = row[self.s2_col_idx]\n examples.append(InputExample(guid=filename+str(id), texts=[s1, s2], label=score))\n if id < 10:\n logging.info(\"Example idx:%d\\ntexts:%s\\t%s\\nlabel:%d\"%(id, s1, s2, score)) \n\n if max_examples > 0 and len(examples) >= max_examples:\n break\n\n return examples\n\n\nclass SentencesDataset(Dataset):\n \"\"\"\n Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all\n sequences to the max length.\n The SentenceBertEncoder.smart_batching_collate is required for this to work.\n SmartBatchingDataset does *not* work without it.\n \"\"\"\n def __init__(self, examples: List[InputExample], model, show_progress_bar: bool = None):\n \"\"\"\n Create a new SentencesDataset with the tokenized texts and the labels as Tensor\n \"\"\"\n if show_progress_bar is None:\n show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)\n self.show_progress_bar = show_progress_bar\n\n self.convert_input_examples(examples, model)\n\n def convert_input_examples(self, examples: List[InputExample], model):\n \"\"\"\n Converts input examples to a SmartBatchingDataset usable to train the model with\n SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader\n\n smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence BERT model for the conversion\n :return: a SmartBatchingDataset usable to train the model with SentenceTransformer.smart_batching_collate as the collate_fn\n for the DataLoader\n \"\"\"\n num_texts = len(examples[0].texts)\n inputs = [[] for _ in range(num_texts)]\n labels = []\n too_long = [0] * num_texts\n label_type = None\n iterator = examples\n max_seq_length = model.get_max_seq_length()\n\n if self.show_progress_bar:\n iterator = tqdm(iterator, desc=\"Convert dataset\")\n\n for ex_index, example in enumerate(iterator):\n if label_type is None:\n if isinstance(example.label, int):\n label_type = torch.long\n elif isinstance(example.label, float):\n label_type = torch.float\n tokenized_texts = [model.tokenize(text) for text in example.texts]\n\n for i, token in enumerate(tokenized_texts):\n if max_seq_length != None and max_seq_length > 0 and len(token) >= max_seq_length:\n too_long[i] += 1\n\n labels.append(example.label)\n for i in range(num_texts):\n inputs[i].append(tokenized_texts[i])\n\n tensor_labels = torch.tensor(labels, dtype=label_type)\n\n logging.info(\"Num sentences: %d\" % (len(examples)))\n for i in range(num_texts):\n logging.info(\"Sentences {} longer than max_seqence_length: {}\".format(i, too_long[i]))\n\n self.tokens = inputs\n self.labels = tensor_labels\n\n def __getitem__(self, item):\n return [self.tokens[i][item] for i in range(len(self.tokens))], self.labels[item]\n\n def __len__(self):\n return len(self.tokens[0])","sub_path":"transformer_encoder/data_util/LabeledSTSDataUtil.py","file_name":"LabeledSTSDataUtil.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"409757930","text":"__author__ = 'nsrivas3'\n\nclass Solution:\n # @param {integer} n\n # @return {boolean}\n def isHappy(self, n):\n notHappy = 0\n iterlist = []\n\n def check(n):\n sum1 = 0\n while n!=0:\n sum1 = sum1 + (n%10)**2\n n = int(n/10)\n n = sum1\n # print(sum1)\n return(n)\n\n while (n!=1) and (notHappy!=1):\n n = check(n)\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" iterlist: \"+str(iterlist))\n for I in iterlist:\n if n==I:\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" ChkRslt \"+str(n==I))\n notHappy = 1\n break\n else:\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" ChkRslt \"+str(n==I))\n iterlist.append(n)\n\n if n==1: return(True)\n elif notHappy == 1: return(False)\n\nSol1 = Solution()\nprint(Sol1.isHappy(7))\n\n","sub_path":"Closed Questions/IsHappy_v2.py","file_name":"IsHappy_v2.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"30316493","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\n#python img_data.py ./data/casia_ds_Expert.txt ./data/casia_ds\n\ndef FitToDistr01(amounm_array):\n\tmax_t = max(amounm_array)\n\tmin_t = min(amounm_array)\n\tinit_range = max_t - min_t\n\treturn [(init_val - min_t) / init_range for init_val in amounm_array]\n\n\ndef PersonEyeDistr(same, diff):\n\t#same = [float(format(i, '.3f')) for i in same]\n\tuniq_dist_same = list(set(same))\n\tuniq_dist_same.sort()\n\t\n\t#diff = [float(format(i, '.3f')) for i in diff]\n\tuniq_dist_diff = list(set(diff))\n\tuniq_dist_diff.sort()\n\t\n\tall_mount = len(same) + len(diff)\n\tsame_am = [same.count(i) / all_mount for i in uniq_dist_same]\n\tdiff_am = [diff.count(i) / all_mount for i in uniq_dist_diff]\n\t\n\treturn uniq_dist_same, FitToDistr01(same_am), uniq_dist_diff, FitToDistr01(diff_am)\n\nsame = []\nwith open('same.txt', 'r') as data_file:\n\tfor line in data_file:\n\t\tsame += [int(i) for i in line.split()]\ndiff = []\nwith open('diff.txt', 'r') as data_file:\n\tfor line in data_file:\n\t\tdiff += [int(i) for i in line.split()]\n\nsame_len = len(same)\ndiff_len = len(diff)\n\nsame = FitToDistr01(same)\ndiff = FitToDistr01(diff)\n\nsame.sort()\ndiff.sort()\n\nprint('same_arr_len -> ', len(same), 'avg ->', sum(i for i in same) / len(same))\nprint('diff_arr_len -> ', len(diff), 'avg ->', sum(i for i in diff) / len(diff))\n\n\n\nsame_dist, same_am, diff_dist, diff_am = PersonEyeDistr(same, diff)\n\nplt.figure(figsize=(10,6))\nplt.title(\"Left and Right eyes\")\nplt.xlabel(r\"$\\rho$\", fontsize=20)\nplt.ylabel(\"amount\", fontsize=18)\nplt.plot(same_dist, same_am, 'ro', label=\"same person dist\")\nplt.plot(diff_dist, diff_am, 'bo', label=\"diff person dist\")\nplt.legend(loc=\"best\")\nplt.show()","sub_path":"distr.py","file_name":"distr.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"222754910","text":"import math\r\n\r\nclass PGaussSolver:\r\n def __init__(self, fp, a, b, n):\r\n self.m_fp = fp\r\n self.m_A = a\r\n self.m_B = b\r\n self.m_N = n\r\n self.Result = 0\r\n\r\n def execute(self):\r\n integral = 0\r\n for i in range(1, self.m_N+1):\r\n integral += self.m_fp(self.legendreZeroes(self.m_N, i)) * self.weight(self.m_N, self.legendreZeroes(self.m_N, i))\r\n self.Result = ((self.m_B-self.m_A)/2.0)*integral\r\n\r\n def getResult(self):\r\n return self.Result\r\n\r\n def legendre(self, m_N, x):\r\n if m_N == 0:\r\n return 1\r\n elif m_N == 1:\r\n return x\r\n else:\r\n return ((2.0*m_N-1)/m_N)*x*self.legendre(m_N-1, x)-((1.0*m_N-1)/m_N)*self.legendre(m_N-2, x)\r\n\r\n def dLegendre(self, m_N, x):\r\n d = (1.0*m_N/(x*x-1))*((x*self.legendre(m_N, x))-self.legendre(m_N-1, x))\r\n return d\r\n\r\n def legendreZeroes(self, m_N, i):\r\n xnew = 0\r\n xold = 0\r\n pi = math.pi\r\n xold = math.cos(pi*(i-1/4.0)/(m_N+1/2.0))\r\n xnew = xold - self.legendre(m_N, xold)/self.dLegendre(m_N, xold)\r\n while (1+abs(xnew-xold)>1.0):\r\n xold = xnew\r\n xnew = xold - self.legendre(m_N, xold)/self.dLegendre(m_N, xold)\r\n return xnew\r\n\r\n def weight(self, m_N, x):\r\n w = 2/((1-x**2)*(self.dLegendre(m_N, x)**2))\r\n return w","sub_path":"AP-HW5-9523124/Q6/PGaussSolver.py","file_name":"PGaussSolver.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"199694653","text":"import gym\nfrom gym.wrappers import Monitor\nimport glob\nimport io\nimport base64\nfrom pyvirtualdisplay import Display\n\nfrom gym import wrappers\nfrom IPython import display\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport gym\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\nfrom tensorflow.keras import layers,models\n\nvirtual_display = Display(visible=0, size=(1400, 900))\nvirtual_display.start()\n\n\ndef query_environment(name):\n env = gym.make(name)\n spec = gym.spec(name)\n print(f\"Action Space: {env.action_space}\")\n print(f\"Observation Space: {env.observation_space}\")\n print(f\"Max Episode Steps: {spec.max_episode_steps}\")\n print(f\"Nondeterministic: {spec.nondeterministic}\")\n print(f\"Reward Range: {env.reward_range}\")\n print(f\"Reward Threshold: {spec.reward_threshold}\")\n\nMEMORYLEN=int(10000)\nBATCHSIZE=64\nEPOCHS=1\n# UPDATE_EVERY = 4\n\n\nclass DQNAgent():\n def __init__(self,actions=2,obs=4):\n self.actions=actions\n self.observations=obs\n self.model=self.load_model()\n\n self.memory=deque(maxlen=MEMORYLEN)\n self.gamma=0.99\n self.patience=0\n self.target_model=self.load_model()\n \n self.copy_weights()\n self.a=0.75\n self.b=0\n \n def play(self,observation,epsilon):\n if (len(self.memory)epsilon:\n# print(\"model\")\n action=self.model_predictions(observation)\n else:\n action=np.random.randint(low=0,high=self.actions)\n return action\n \n def step(self,state, action, reward, next_state, done):\n self.memory.append([state, action, reward, next_state, done])\n if ((len(self.memory)>=BATCHSIZE) & (np.random.random() < 0.25 )):\n self.train_model()\n pass\n \n def load_memory_with_probs(self):\n mem=np.array(list(self.memory))\n y_pred=self.model.predict(np.stack(mem[:,0]))\n \n \n data=np.array(mem)\n \n state, action, reward, next_state, done=np.stack(data[:,0]),np.stack(data[:,1]),np.stack(data[:,2]),np.stack(data[:,3]),np.stack(data[:,4])\n qnext_max=np.max(self.target_model.predict(next_state),axis=1)\n qnext_max=reward+ self.gamma*qnext_max*(1-done)\n qtable_to_update=self.target_model.predict(state)\n for indx,qs in enumerate(qtable_to_update):\n qtable_to_update[indx,action[indx]]=qnext_max[indx]\n# self.model.fit(state,qtable_to_update,epochs=1,verbose=0)\n y_pred=self.model.predict(state)\n errors=[]\n for i in range(y_pred.shape[0]):\n errors.append(np.abs(y_pred[i,action[i]] - qtable_to_update[i,action[i]]))\n \n errors=[(error+0.1)**self.a for error in errors]\n sig_p=sum(errors)\n errors=[error/sig_p for error in errors]\n \n# print(data.shape,len(errors))\n mem=np.hstack([data,np.array(errors).reshape(-1,1)])\n return mem\n \n def train_model(self):\n memory=self.load_memory_with_probs()\n rnd_indices = np.random.choice(len(memory), size=BATCHSIZE,p=memory[:,5].astype('float64'))\n data=np.array(memory)[rnd_indices]\n np.random.shuffle(data)\n \n state, action, reward, next_state, done=np.stack(data[:,0]),np.stack(data[:,1]),np.stack(data[:,2]),np.stack(data[:,3]),np.stack(data[:,4])\n qnext_max=np.max(self.target_model.predict(next_state),axis=1)\n qnext_max=reward+ self.gamma*qnext_max*(1-done)\n qtable_to_update=self.target_model.predict(state)\n for indx,qs in enumerate(qtable_to_update):\n qtable_to_update[indx,action[indx]]=qnext_max[indx]\n# print(data[:5])\n importance=[(1/p)*(1/len(memory))**self.b for p in data[:,5]]\n self.model.fit(state,qtable_to_update,epochs=1,verbose=0,sample_weight=np.array(importance))\n self.patience+=1\n if self.patience==10:\n self.copy_weights()\n self.patience=0\n \n pass\n def model_predictions(self,observation):\n pred=self.model.predict(observation.reshape(1,-1))\n pred=np.argmax(pred)\n return pred\n \n def load_model(self):\n num_input = layers.Input(shape=(self.observations, ))\n x = layers.Dense(24,activation=\"relu\")(num_input)\n# x = layers.BatchNormalization()(x)\n# x = layers.Dropout(0.1)(x)\n x = layers.Dense(24, activation=\"relu\")(x)\n# x = layers.Dropout(0.1)(x)\n# x = layers.BatchNormalization()(x)\n y = layers.Dense(self.actions, activation=\"linear\")(x)\n model = models.Model(inputs=num_input, outputs=y)\n model.compile(loss=\"mse\",optimizer=tf.keras.optimizers.Adam(lr=0.01,decay=0.01))\n model.summary()\n return model\n def copy_weights(self):\n self.target_model.set_weights(self.model.get_weights()) \nimport time\nfrom tqdm import tqdm\nstarttime=time.time()\nscores = [] # list containing scores from each episode\nscores_window = deque(maxlen=100) # last 100 scores\nn_episodes=5000\nagent=DQNAgent()\n\nmax_t=500\neps_start=1.0\neps_end=0.15\neps_decay=0.99\n\n\n\neps = eps_start\nenv=gym.make('LunarLander-v2')\neps_history=[]\nfor i_episode in range(1, n_episodes+1):\n state = env.reset()\n score = 0\n for i_ in range(1,max_t+1):\n action = agent.play(state,eps)\n next_state, reward, done, _ = env.step(action)\n if done:\n if (i_>=140):\n agent.step(state, action, reward+5, next_state, done)\n else:\n agent.step(state, action, reward-5, next_state, done)\n else:\n agent.step(state, action, reward, next_state, done)\n \n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps)\n eps_history.append(eps)\n if i_episode % 100 == 0:\n agent.model.save_weights(\"./weightsfolder/Lunar_weights_{}.h5\".format(i_episode))\n if i_episode % 10 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=190.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n \n break\nendtime= time.time() \nprint(endtime-starttime)\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(scores)), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.savefig(\"lunFT.png\")\n# plt.show()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(eps_history)), eps_history)\nplt.ylabel('Epsilone')\nplt.xlabel('Episode #')\nplt.savefig(\"epsilonLunarFT.png\")\n# plt.show() \nagent.model.save_weights(\"./weightsfolder/Lunar_agent_weights.h5\")","sub_path":"lunar_dqm_fixdtarget_prioreplay.py","file_name":"lunar_dqm_fixdtarget_prioreplay.py","file_ext":"py","file_size_in_byte":7065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"118438869","text":"from django.http import HttpResponse\nimport MySQLdb\nfrom libs import config\nimport json\ndef mysql_connect(query,fetch='all'):\n\tconnection=MySQLdb.connect(host=config.mysql_ip,user=config.mysql_login,passwd=config.mysql_pwd,db=config.mysql_db,port=int(config.mysql_port))\n\tcur=connection.cursor()\n\tcur.execute(query)\n\tresult = [] \n\tcolumns = tuple( [d[0].decode('utf8') for d in cur.description] ) \n\tfor row in cur:\n\t\tresult.append(dict(zip(columns, row)))\n\tif fetch == 'one':\n\t\tif len(result) > 0:\n\t\t\tresult = result[0]\n\treturn result\n\ndef mysql_commit():\n\tconnection.commit()\n\ndef setResponse(result):\n\treturn HttpResponse(json.dumps(result))\n\ndef setExceptionResponse(result):\n\treturn HttpResponse(json.dumps(result),status=500)\n","sub_path":"investclub/libs/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"481763743","text":"import pygame, random\nfrom pygame.locals import *\n\nclass Inimigo(pygame.sprite.Sprite):\n\n def __init__(self, nome, velocidade):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load(nome).convert_alpha()\n self.rect = self.image.get_rect()\n\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n\n self.velocidade = velocidade\n self.velocidadeInicial = velocidade\n\n def update(self):\n self.rect[1] += self.velocidade\n if (self.rect[1] >= 600):\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n if self.velocidade != 11:\n self.velocidade += 1\n\n def atualiza(self):\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n\n self.velocidade = self.velocidadeInicial","sub_path":"scripts/inimigo.py","file_name":"inimigo.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"529936700","text":"import numpy as np\nimport math\n\nfrom quaternion import Quat as Quaternion\nimport dualquaternion\n\n# Transformation test\npt = [7, 0, 0]\ntrans1 = [0, 0, 0]\nrotAxis1 = [0, 1, 0]\nrotAngle1 = math.radians(0)\ntrans2 = [0, 0, 0]\nrotAxis2 = [0, 1, 0]\nrotAngle2 = math.radians(90)\n\n\ndq1 = dualquaternion.DualQuat()\ndq2 = dualquaternion.DualQuat()\ndq1.setTransformation(Quaternion(axis=rotAxis1, angle=rotAngle1), trans1)\ndq2.setTransformation(Quaternion(axis=rotAxis2, angle=rotAngle2), trans2)\ndq3 = dq2 * dq1 # transform by dq1 then dq2\ndq4 = dq1 * 0.5 + dq2 * 0.5 # blending 2 transform\nprint ('dq1: ', dq1)\nprint ('dq2: ', dq2)\nprint ('dq3: ', dq3)\nprint ('dq4: ', dq4)\nprint('dq1*: ', dq1.transform(pt))\nprint('dq2*: ', dq2.transform(pt))\nprint('dq3*: ', dq3.transform(pt))\nprint('dq4*: ', dq4.transform(pt))\n\n\n\n\n# Using matrix\ndef transformByMatrix(pt, trans, rotAxis, rotAngle):\n # Construct quaternion\n quat = Quaternion(axis=rotAxis, angle=rotAngle)\n \n # Construct transformation matrix\n xform = quat.transformation_matrix\n xform[0,3] = trans[0]\n xform[1,3] = trans[1]\n xform[2,3] = trans[2]\n \n # Transform with matrix\n ptArr = np.array(pt + [1])\n ptMat = np.matrix(ptArr).transpose()\n xform = np.matrix(xform)\n result = xform * ptMat\n \n # Result\n result = result.transpose().tolist()\n return result[0][:3]\n\nprint ('transformByMatrix: ', transformByMatrix(pt, trans1, rotAxis1, rotAngle1))\n","sub_path":"dualQuaternion/dqtransformationtest.py","file_name":"dqtransformationtest.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"524846609","text":"#!/usr/bin/env python3\n\nimport sys\nfrom enum import Enum\n\n\n\"\"\"\nGiven a filepath, opens the file and reads the text as tape.\n\nINPUT:\n filename: a valid unix path to a list of comma-separated integers.\n\nRETURN: list of integers\n\"\"\"\ndef read_input(filename):\n\n text = \"\"\n\n with open(filename, \"r\") as fp:\n text = fp.readline()\n\n if text == \"\":\n print(\"No Input\")\n exit(2) \n\n tape = list(map(int,text.split(',')))\n\n return tape\n\n\n\nclass Status:\n FINISHED = 0\n OK = 1\n INPUT_REQUIRED = 2\n OUT_OF_BOUNDS = 3\n BAD_OPCODE = 4\n\n\n\nclass IntComp:\n\n \"\"\"\n Determines the literal values of all parameters for an intcode instruction.\n\n INPUTS:\n tape: list of integers in the form of an intcode program\n head: currently executing opcode of the intcode program\n inst_len: the number of integers in tape that are part of the instruction\n write_param: indicates which parameter is the address of the instruction output. Is the head of the parameter list. (this is gross, but I haven't figured out anything better)\n\n RETURN:\n params: list of integers, containing the literal inputs to the instruction calling this function.\n \"\"\"\n def get_params(self, inst_len, write_param):\n\n mode_digits = str(self.tape[self.head])[-3::-1]\n params = self.tape[self.head+1:self.head+inst_len]\n modes = []\n\n for i in range(inst_len-1):\n if len(mode_digits) > i:\n modes.append(int(mode_digits[i]))\n else:\n modes.append(0)\n\n\n for i in range(len(params)):\n if write_param != i and modes[i] == 0:\n params[i] = self.tape[params[i]]\n\n return params\n\n\n\n def instr_add(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n\n self.tape[params[2]] = params[0] + params[1]\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_multiply(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n\n self.tape[params[2]] = params[0] * params[1]\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_input(self, inputs):\n\n inst_len = 2\n\n if len(inputs) == 0:\n return Status.INPUT_REQUIRED\n \n num = inputs.pop(0)\n self.tape[self.tape[self.head+1]] = num\n \n self.head += inst_len\n return Status.OK\n\n\n\n def instr_output(self, outputs):\n\n inst_len = 2\n params = self.get_params(inst_len, -1)\n \n outputs.append(params[0])\n \n self.head += inst_len\n return Status.OK\n\n\n\n def instr_jump_if(self, if_true):\n \n inst_len = 3\n params = self.get_params(inst_len, -1)\n ip = self.head\n\n if (params[0] == 0) != if_true:\n ip = params[1]\n else:\n ip = self.head + inst_len\n\n if ip >= 0 and ip < len(self.tape):\n self.head = ip\n return Status.OK\n else:\n return Status.OUT_OF_BOUNDS\n\n\n\n def instr_less_than(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n \n if params[0] < params[1]:\n self.tape[params[2]] = 1\n else:\n self.tape[params[2]] = 0\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_equals(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n \n if params[0] == params[1]:\n self.tape[params[2]] = 1\n else:\n self.tape[params[2]] = 0\n\n self.head += inst_len\n return Status.OK\n\n\n\n \"\"\"\n Executes an intcode program\n\n INPUT:\n tape: list of integers that form an intcode program\n human_IO: True if you want IO to interact with user. \n False if you want IO to interact with another program.\n RETURN:\n \"\"\"\n def execute_tape(self, inputs = []):\n\n outputs = []\n status = Status.OK\n\n while self.head >= 0 and self.head < len(self.tape):\n\n opcode = int(str(self.tape[self.head])[-2:]) #I love python.\n\n if opcode == 1:\n status = self.instr_add()\n elif opcode == 2:\n status = self.instr_multiply()\n elif opcode == 3:\n status = self.instr_input(inputs)\n elif opcode == 4:\n status = self.instr_output(outputs)\n elif opcode == 5:\n status = self.instr_jump_if(True)\n elif opcode == 6:\n status = self.instr_jump_if(False)\n elif opcode == 7:\n status = self.instr_less_than()\n elif opcode == 8:\n status = self.instr_equals()\n elif opcode == 99:\n status = Status.FINISHED\n else:\n print(f\"Unrecognized opcode \\\"{opcode}\\\" at index {self.head}.\")\n exit(2)\n\n if status != Status.OK:\n break\n\n if self.head >= len(self.tape):\n print(f\"Tape overflow with head {self.head}\")\n exit(3)\n\n return (status, outputs)\n\n\n\n def __init__(self, tape):\n self.tape = tape\n self.head = 0\n\n\n def __init__(self, filename):\n self.tape = read_input(filename)\n self.head = 0\n #self.Status = STATUS()\n\n\n# end of class\n\n\n\n\"\"\"\n def main():\n\n print(\"READING INTCODE PROGRAM\")\n\n filename = \"\"\n\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n\n if filename == \"\":\n filename = \"7day-input.txt\"\n\n comp = IntComp(filename)\n\n comp.execute_tape()\n\n print(\"\\nEND OF INTCODE PROGRAM\\n\" + str(tape))\n\n\n\nif __name__ == \"__main__\":\n IntComp.main()\n\"\"\"\n","sub_path":"7day/intcomp2.py","file_name":"intcomp2.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"267392990","text":"import Pyro4\nimport time\n\nclass Player:\n def __init__(self, uri):\n self.server = Pyro4.Proxy(uri)\n self.name = \"\"\n self.sticks = 3\n self.turn = -1\n self.addName()\n self.requestMyTurn()\n\n def addName(self):\n name = input(\"What is your name?\\n\").strip()\n self.name = name\n logged = self.server.addPlayer(name)\n while logged == 0:\n print(\"user already logged! Try again\")\n name = input(\"What is your name?\\n\").strip()\n self.name = name\n logged = self.server.addPlayer(name)\n print(\"Welcome, \", name)\n\n def requestMyTurn(self):\n self.turn = self.server.requestTurn(self.name)\n\n def waitForMyTurn(self):\n print(\"waiting for your turn\")\n\n while not self.server.isMyTurn(self.turn):\n time.sleep(0.5)\n\n def play(self):\n while self.sticks > 0:\n sticksToSend = self.send_sticks()\n stickResponse = self.server.playerPlay(sticksToSend, self.name)\n\n if stickResponse == 0:\n\n print(\"waiting for players to put their sticks\")\n\n while not self.server.sticksInGame():\n time.sleep(2)\n\n self.waitForMyTurn()\n\n shot = self.send_shot()\n shot = int(shot)\n shotResponse = -1\n\n while shotResponse != 0:\n shotResponse = self.server.playerShots(shot, self.name)\n\n if shotResponse == 0:\n print(\"good luck with this shot\")\n self.server.nextTurn()\n else:\n print(\"repeated shot! Try again\")\n shot = self.send_shot()\n\n print(\"waiting for other players\")\n\n while not self.server.allPlayed():\n time.sleep(2)\n\n winner = self.server.requestWinner()\n\n time.sleep(1)\n\n allShots = self.server.requestShots()\n self.printShots(allShots)\n\n time.sleep(1)\n\n score = self.server.requestScore()\n self.printScore(score)\n\n time.sleep(1)\n\n if winner == self.name:\n self.decrease_sticks()\n winner = \"YOU\"\n else:\n print(\"good Luck next time\")\n\n print(\"winner: \",winner)\n\n\n\n time.sleep(3)\n print(\"Turn endo\")\n\n if self.server.lastPlayer():\n print(\"YOU LOST\")\n self.server.endGame()\n exit()\n\n self.server.newRound()\n\n print(\"Congratulations! You are out of the game\")\n self.server.removePlayer(self.name)\n\n def send_sticks(self):\n player_sticks = -1\n if self.server.isFirstTurn():\n minSticks = 1\n else:\n minSticks = 0\n while player_sticks < minSticks or player_sticks > self.sticks:\n print(\"Choose the number of sticks to put in the game (between \", minSticks, \" and \", self.sticks, \")\")\n player_sticks = input().strip()\n while not player_sticks.isdigit():\n print(\"Please choose a number between \", minSticks, \" and \", self.sticks)\n player_sticks = input().strip()\n player_sticks = int(player_sticks)\n return int(player_sticks)\n\n def send_shot(self):\n print(\"Say your shot of the total of sticks in game\")\n player_shot = input().strip()\n\n return player_shot\n\n def decrease_sticks(self):\n print(\"Correct shot!\")\n self.sticks -= 1\n\n def printShots(self, shots):\n print(\"######################################\")\n\n print(\"Shots fired:\")\n\n for key in shots.keys():\n print(key, \": \", shots[key])\n\n print(\"######################################\")\n\n def printScore(self, score):\n print(\"######################################\")\n\n print(\"Score now:\")\n\n for s in score.keys():\n print(s, \" has \", score[s], \" sticks\")\n\n print(\"######################################\")\n\ndef main():\n serverURI = \"PYRONAME:matches.server\"\n player = Player(serverURI)\n\n player.play()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"315593318","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import api_view,authentication_classes,permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import geo_code\nfrom .serializer import geo_codeSerializer\n\n#from rest_framework.authentication import TokenAuthentication \nfrom rest_framework.permissions import IsAuthenticated\n# Create your views here.\n\n#from rest_framework_simplejwt.authentication import JWTAuthentication \n# for JWT authentication_classes and that too when we need to use it on local level\n#i.e for views. py otherwise direct add rest_framework_simplejwt.authentication.JWTAuthentication globally into settings\n\n@api_view(['GET'])\n#@authentication_classes([JWTAuthentication])\n@permission_classes([IsAuthenticated])\ndef get_geo_code(request):\n\tgeo_info=geo_code.objects.all()\n\tserializer=geo_codeSerializer(geo_info,many=True)\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef create_geo_code(request):\n\tserializer = geo_codeSerializer(data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\n\n@permission_classes([IsAuthenticated])\ndef update_geo_code(request, pk):\n\tgeo_record = geo_code.objects.get(seo_id=pk)\n\tprint(geo_record)\n\tserializer = geo_codeSerializer(instance=geo_record, data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_geo_code(request, pk):\n\tgeo_record = geo_code.objects.get(seo_id=pk)\n\tgeo_record.delete()\n\n\treturn Response('Item succsesfully delete!')\n","sub_path":"myrestapi/geo_code/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"393880807","text":"#scripting by james.duclos@commercebank.com\n#rlps, commercial card, data intelligence\n#In Colaberation with Mike Hockens\n#This report loads into the datamart around 9pm\n\nfrom collections import OrderedDict\nimport requests\nimport sf_mod.james_force as jf\nfrom shutil import copy\nfrom proxies import proxy_dict2\n\n#assign working folder\nfolder = \"//rlps_kw-84gv942/Scripting/fr_SF/HSF/\"\n\n#Pull New Salesforce Token\nimport sys\nsys.path.insert(0, '//rlps_kw-84gv942/Scripting/fr_SF/pull_sf_token') \nfrom pull_sf_token_class import Token\n\n#Get token\ntoken = Token(instance='cs66').get_token()\n\nprint(\"Setting headers\")\nmy_headers = {\n 'Content-Type': 'application/json',\n 'Authorization': \"Bearer \" + token,\n 'X-PrettyPrint': '1'\n }\n\nprint(\"Getting stuff with webcall\")\ndef pull_report(report_id):\n \n result = requests.post(\"https://cs66.salesforce.com/services/data/v34.0/analytics/reports/{0}\".format(report_id), \n headers=my_headers, proxies=proxy_dict2)\n \n output = result.json(object_pairs_hook=OrderedDict)\n jf.print_sf_json(output, folder + \"output-\" + report_id + \"_sandbox.csv\")\n src = folder + \"output-\" + report_id + \"sandbox.csv\"\n\n #dst = '//cbsh.com/kcdfspool/DR-Commercial/HSF/1. Hockens/Requests/hsf_' + report_id + '.txt'\n #copy(src, dst)\n \n #dst = '//wkpv1gspta01/infomovergs/HSF_SLFC/hsf_' + report_id + '.txt'\n #copy(src, dst)\n\nreport_ids = [\"00O330000049zNy\",\"00O33000004A1xF\"]\n\nfor id in report_ids:\n pull_report(id)","sub_path":"fr_SF/HSF/hsf_cpcs_cr_pull_from_sandbox.py","file_name":"hsf_cpcs_cr_pull_from_sandbox.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"15575692","text":"\"\"\"\r\nRuns the application server and client modules.\r\n Requirements:\r\n - python34\r\n - npm\r\n\"\"\"\r\n\r\nfrom threading import Thread\r\nfrom argparse import RawTextHelpFormatter\r\nimport argparse\r\nimport os\r\n\r\n# The directory of this file should be the root directory of the project.\r\nROOT_DIR = os.path.dirname(os.path.realpath(__file__))\r\n\r\n# Path to virtualenv binaries for setup.\r\nPYTHON = os.path.join('env', 'Scripts', 'python.exe')\r\nPIP = os.path.join('env', 'Scripts', 'pip.exe')\r\n\r\n\r\ndef _set_directory(subdir):\r\n working_dir = os.path.join(ROOT_DIR, subdir)\r\n os.chdir(working_dir)\r\n print('Current directory: %s' % working_dir)\r\n\r\n\r\ndef _run_command(command):\r\n out = os.system(command)\r\n print('%s [%d]' % (command, out))\r\n if out is not 0: # Raise an exception if the command fails.\r\n raise Exception('Command failed: %s' % command)\r\n\r\n\r\ndef _main():\r\n description = 'Setup development environment.\\n\\n' \\\r\n 'If you have already run this script, it may be necessary to \\n' \\\r\n 'skip virtual environment and superuser setup. This can be \\n' \\\r\n 'done with options `--skip-venv` and `--skip-superuser`.'\r\n parser = argparse.ArgumentParser(description=description,\r\n formatter_class=RawTextHelpFormatter)\r\n parser.add_argument('--skip-venv', action='store_true')\r\n parser.add_argument('--skip-pip', action='store_true')\r\n parser.add_argument('--skip-migrate', action='store_true')\r\n parser.add_argument('--skip-superuser', action='store_true')\r\n parser.add_argument('--skip-server', action='store_true')\r\n parser.add_argument('--skip-client', action='store_true')\r\n args = parser.parse_args()\r\n\r\n # Install dependencies.\r\n _run_command('pip install virtualenv')\r\n _run_command('npm install -g bower')\r\n _run_command('npm install -g less')\r\n\r\n try:\r\n # Server setup.\r\n if not args.skip_server:\r\n _set_directory('server')\r\n\r\n # Initialize virtual environment.\r\n if not args.skip_venv:\r\n _run_command('virtualenv env')\r\n else:\r\n print('Skipping virtual environment setup.')\r\n\r\n # Update pip to latest version.\r\n if not args.skip_pip:\r\n _run_command('%s -m pip install --upgrade pip' % PYTHON)\r\n else:\r\n print('Skipping pip update.')\r\n _run_command('%s install -r requirements.txt' % PIP)\r\n\r\n # Django migrations.\r\n if not args.skip_migrate:\r\n _run_command('%s manage.py migrate' % PYTHON)\r\n else:\r\n print('Skipping Django migrations.')\r\n\r\n # Create Django superuser.\r\n if not args.skip_superuser:\r\n _run_command('%s manage.py createsuperuser' % PYTHON)\r\n else:\r\n print('Skipping Django Admin superuser creation.')\r\n else:\r\n print('Skipping server setup.')\r\n\r\n # Client setup.\r\n if not args.skip_client:\r\n _set_directory('client')\r\n _run_command('npm install')\r\n _set_directory(os.path.join('client', 'static'))\r\n _run_command('bower install')\r\n else:\r\n print('Skipping client setup.')\r\n\r\n except Exception as exception:\r\n print('Setup failed due to:\\n\\t%s' % str(exception))\r\n\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"502749405","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom web.items import WebItem\nfrom scrapy.exceptions import NotConfigured\n\nclass webSpider(scrapy.Spider):\n name = \"web\"\n\n def start_requests(self):\n for url in self.settings.get('PATH').keys():\n yield scrapy.Request(url=url,\n cookies = self.settings.get('COOKIES'),\n callback=self.parse,)\n\n def parse(self, response):\n try:\n path = self.settings.get('PATH')[response.url]\n root = path['ROOT']\n title = path['TITLE']\n link = path['LINK']\n except TypeError:\n raise NotConfigured('PATH should be a dict value')\n except:\n raise NotConfigured('Url or xpath is not configured, please check settings')\n\n for sel in response.xpath(root):\n item = WebItem()\n item['title'] = sel.xpath(title).extract()\n item['link'] = sel.xpath(link).extract()\n yield item\n","sub_path":"web/spiders/web_spider.py","file_name":"web_spider.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"607633621","text":" #!/usr/bin/env python\nimport os\nimport re\nimport sys\nimport warnings\ntry:\n from setuptools import setup\nexcept:\n from distutils.core import setup\n\nMAJOR = 0\nMINOR = 1\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nQUALIFIER = ''\n\n# code to extract and write the version copied from pandas, which is available\n# under the BSD license:\nFULLVERSION = VERSION\nwrite_version = True\n\nif not ISRELEASED:\n import subprocess\n FULLVERSION += '.dev'\n\n pipe = None\n for cmd in ['git', 'git.cmd']:\n try:\n pipe = subprocess.Popen(\n [cmd, \"describe\", \"--always\", \"--match\", \"v[0-9]*\"],\n stdout=subprocess.PIPE)\n (so, serr) = pipe.communicate()\n if pipe.returncode == 0:\n break\n except:\n pass\n\n if pipe is None or pipe.returncode != 0:\n # no git, or not in git dir\n if os.path.exists('src/xray/version.py'):\n warnings.warn(\"WARNING: Couldn't get git revision, using existing xray/version.py\")\n write_version = False\n else:\n warnings.warn(\"WARNING: Couldn't get git revision, using generic version string\")\n else:\n # have git, in git dir, but may have used a shallow clone (travis does this)\n rev = so.strip()\n # makes distutils blow up on Python 2.7\n if sys.version_info[0] >= 3:\n rev = rev.decode('ascii')\n\n if not rev.startswith('v') and re.match(\"[a-zA-Z0-9]{7,9}\", rev):\n # partial clone, manually construct version string\n # this is the format before we started using git-describe\n # to get an ordering on dev version strings.\n rev = \"v%s.dev-%s\" % (VERSION, rev)\n\n # Strip leading v from tags format \"vx.y.z\" to get th version string\n FULLVERSION = rev.lstrip('v')\n\nelse:\n FULLVERSION += QUALIFIER\n\n\ndef write_version_py(filename=None):\n cnt = \"\"\"\\\nversion = '%s'\nshort_version = '%s'\n\"\"\"\n if not filename:\n filename = os.path.join(\n os.path.dirname(__file__), 'src', 'xray', 'version.py')\n\n a = open(filename, 'w')\n try:\n a.write(cnt % (FULLVERSION, VERSION))\n finally:\n a.close()\n\nif write_version:\n write_version_py()\n\n\nsetup(name='xray',\n version=FULLVERSION,\n description='Extended arrays for working with scientific datasets',\n author='Stephan Hoyer, Alex Kleeman, Eugene Brevdo',\n author_email='TODO',\n install_requires=['scipy >= 0.13', 'numpy >= 1.8', 'netCDF4 >= 1.0.6',\n 'pandas >= 0.13.1'],\n tests_require=['mock >= 1.0.1', 'nose >= 1.0'],\n url='https://github.com/akleeman/xray',\n test_suite='nose.collector',\n packages=['xray'],\n package_dir={'': 'src'})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"598755090","text":"from django.shortcuts import render\nfrom django.http import *\nfrom django.core import serializers\nfrom django.db import connection\nimport json\n# Modelos\nfrom panel.models import Summary as sm\nfrom panel.models import Result as rs\nfrom panel.models import Labels as lb\n\ndef index(request):\n\treturn render(request, 'panel/index.html')\n\ndef main(request):\n\treturn render(request, 'panel/main.html')\n #return HttpResponse(\"bienvenido a mi pagina en %s\" % request.path) \n\ndef configure(request):\n\treturn render(request, 'panel/graph.html', {'parametro':request.POST['parametro'], 'algoritmo': request.POST['algoritmo']})\t \n\ndef load_summary(request):\n\tif (request.GET['parametro'] and request.GET['algoritmo']):\n\t\tdespegue = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase='despegue').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo', 'parametro')\t\n\t\taterrizaje = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase='aterrizaje').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo', 'parametro')\t\n\t\treturn JsonResponse({'despegue': parse_data_summary(list(despegue)), 'aterrizaje':parse_data_summary(list(aterrizaje))})\n\telse:\n\t\treturn JsonResponse({'despegue': [], 'aterrizaje':[]})\t\n\ndef load_result(request):\n\tdespegue = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'])\t\n\treturn JsonResponse(list(despegue), safe=False)\n\ndef parse_data_summary(data):\n\torder_index = ['row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo']\t\n\tlist_result = []\t\n\t\n\tfor item in data:\n\t\torder_row = []\n\t\tgrupo = ''\n\t\ti = 1\n\t\tfor current in order_index:\n\t\t\tif current == 'grupo':\n\t\t\t\tgrupo = item[current]\n\t\t\telse:\n\t\t\t\torder_row.append([i,item[current]])\n\t\t\t\ti = i + 1\n\t\tlist_result.append({grupo:order_row})\n\n\treturn list_result\n \t\ndef asign_label(request):\n\tgrupos = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase']).values('cluster').distinct()\n\n\tfor current in grupos:\t\t\n\t\tcurrent_clusters = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase'], cluster=current['cluster'])\n\t\tfor flight in current_clusters:\t\t\n\t\t\tflight.etiqueta = request.GET[current['cluster']]\n\t\t\tflight.save()\n\treturn JsonResponse({}, safe=False)\n\ndef cantidad_grupo(request):\n\tgrupos = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase']).values('cluster').distinct()\n\tlistado = {}\n\tfor current in grupos:\n\t\tvalores = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase'], cluster=current['cluster'])\n\t\tlistado[current['cluster']] = len(valores)\n\treturn JsonResponse(listado, safe=False)\n\ndef load_resumen(request):\n\tpass\n\ndef resumen(request):\n\t\"\"\"FILTRAR POR EL ALGORITMO\"\"\"\n\t#if request.GET['algoritmo']:\n\t#\talgoritmo = request.GET['algoritmo']\n\t#else:\n\talgoritmo = 'K-means'\n\tparametros = ['VRTG', 'AOAC', 'FLAP', 'PTCH', 'ROLL']\n\tvuelos = rs.objects.filter(algoritmo=algoritmo, fase='despegue').values('flight','id').distinct()\n\tetiquetas_despegue = []\n\tfor vuelo in vuelos:\n\t\tlistado = {'flight':vuelo['flight'],'VRTG':'-', 'AOAC':'-', 'FLAP':'-', 'PTCH':'-', 'ROLL':'-'}\n\t\t\n\t\t\"\"\"for parametro in parametros:\t\t\t\n\t\t\ttry:\n\t\t\t\tetiqueta = rs.objects.get(algoritmo=algoritmo, fase='despegue', flight=vuelo['flight'], parametro=parametro)\n\t\t\texcept etiqueta.DoesNotExist, e:\n\t\t\t\tprint str(vuelo['id']) +' --- '+ str(e)\n\t\t\telse:\n\t\t\t\tlistado[parametro] = etiqueta.etiqueta\n\t\t\tfinally:\n\t\t\t\tpass\"\"\"\n\t\t\t\n\t\t\t#if hasattr(etiqueta, 'etiqueta'):\n\t\t\t#\tprint str(vuelo['id'])+' -- '+ etiqueta.etiqueta\n\t\t\t\t\n\t\tetiquetas_despegue.append(listado)\n\treturn render(request, 'panel/resumen.html', {'despegue':list(etiquetas_despegue)})\t \n\ndef detalle_vuelo(request, flight):\n\treturn render(request, 'panel/detalle_vuelo.html', {'vuelo':flight, 'algoritmo':''})\n\ndef load_detalle_vuelo(request):\n\tparametros = ['VRTG', 'AOAC', 'FLAP', 'PTCH', 'ROLL']\n\tlistado = {'despegue':{'VRTG':[], 'AOAC':[], 'FLAP':[], 'PTCH':[], 'ROLL':[]}, 'aterrizaje':{'VRTG':[], 'AOAC':[], 'FLAP':[], 'PTCH':[], 'ROLL':[]}}\n\tfor parametro in parametros:\n\t\tquery_objetc = rs.objects.filter(flight=request.GET['vuelo'], parametro=parametro, fase='despegue').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro').distinct()\n\t\tlistado['despegue'][parametro] = parse_data_detalle(list(query_objetc), request.GET['vuelo'])\n\t\tquery_objetc = rs.objects.filter(flight=request.GET['vuelo'], parametro=parametro, fase='aterrizaje').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro').distinct()\n\t\tlistado['aterrizaje'][parametro] = parse_data_detalle(list(query_objetc), request.GET['vuelo'])\n\treturn JsonResponse({'listado': listado})\n\ndef parse_data_detalle(data, vuelo):\n\torder_index = ['row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro']\t\n\tlist_result = []\t\n\t\n\tfor item in data:\n\t\torder_row = []\n\t\tparametro = ''\n\t\ti = 1\n\t\tfor current in order_index:\n\t\t\tif current == 'parametro':\n\t\t\t\tparametro = item[current]\n\t\t\telse:\n\t\t\t\torder_row.append([i,item[current]])\n\t\t\t\ti = i + 1\n\t\tlist_result.append({vuelo:order_row})\n\n\treturn list_result\n \t","sub_path":"panel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"564536671","text":"\"\"\"Plugwise Water Heater component for HomeAssistant.\"\"\"\n\nimport logging\nimport plugwise\n\nfrom . import (\n DOMAIN,\n DATA_ADAM,\n PwEntity,\n)\n\nfrom homeassistant.components.climate.const import (\n CURRENT_HVAC_COOL,\n CURRENT_HVAC_HEAT,\n CURRENT_HVAC_IDLE,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nCURRENT_HVAC_DHW = \"dhw\"\nWATER_HEATER_ICON = \"mdi:thermometer\"\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Add the Plugwise Water Heater.\"\"\"\n\n if discovery_info is None:\n return\n\n api = hass.data[DATA_ADAM].data\n\n devices = []\n ctrl_id = None\n try:\n devs = api.get_devices()\n except RuntimeError:\n _LOGGER.error(\"Unable to get location info from the API\")\n return\n\n _LOGGER.info('Dev %s', devs)\n for dev in devs:\n data = None\n _LOGGER.info('Dev %s', dev)\n if dev['name'] == 'Controlled Device':\n ctrl_id = dev['id']\n dev_id = None\n name = 'adam'\n _LOGGER.info('Name %s', name)\n data = api.get_device_data(dev_id, ctrl_id, None)\n\n if data is None:\n _LOGGER.debug(\"Received no data for device %s.\", name)\n return\n\n device = PwWaterHeater(api, name, dev_id, ctrl_id)\n _LOGGER.info('Adding water_heater.%s', name)\n if not device:\n continue\n devices.append(device)\n add_entities(devices, True)\n\n\nclass PwWaterHeater(PwEntity):\n \"\"\"Representation of a Plugwise water_heater.\"\"\"\n\n def __init__(self, api, name, dev_id, ctlr_id):\n \"\"\"Set up the Plugwise API.\"\"\"\n self._api = api\n self._name = name\n self._dev_id = dev_id\n self._ctrl_id = ctlr_id\n self._cooling_status = None\n self._heating_status = None \n self._boiler_status = None\n self._dhw_status = None\n\n @property\n def name(self):\n \"\"\"Return the name of the thermostat, if any.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n if self._heating_status or self._boiler_status:\n return CURRENT_HVAC_HEAT\n if self._dhw_status:\n return CURRENT_HVAC_DHW\n if self._cooling_status:\n return CURRENT_HVAC_COOL\n return CURRENT_HVAC_IDLE\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend.\"\"\"\n return WATER_HEATER_ICON\n\n def update(self):\n \"\"\"Update the data from the water_heater.\"\"\"\n _LOGGER.debug(\"Update water_heater called\")\n data = self._api.get_device_data(self._dev_id, self._ctrl_id, None)\n\n if data is None:\n _LOGGER.debug(\"Received no data for device %s.\", self._name)\n else:\n if 'central_heating_state' in data:\n self._heating_status = data['central_heating_state'] \n if 'boiler_state' in data:\n self._boiler_status = data['boiler_state']\n if 'cooling_state' in data:\n self._cooling_status = data['cooling_state'] \n if 'dhw_state' in data:\n self._dhw_status = data['dhw_state'] \n","sub_path":"custom_components/adam/water_heater.py","file_name":"water_heater.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"346376557","text":"from typing import List\nfrom os import listdir\nfrom AudioProcessing import *\nfrom collections import Counter\nimport numpy as np\nfrom FingerPrintDatabase import FingerPrintDatabase, get_fingerprints\nfrom SongDatabase import *\nfrom Spectrograms import spectrogram, local_peaks\nfrom multiprocessing import Process, Queue, Value\nimport time\n'''\npotential features:\n- real time audio\n- ratio for more accurate predictions\n- website?\n\n'''\n\n# main prediction functions should be here\n# it uses other classes for the prediction\n# todo: add background cancelling even in song file\n\nclass Predictor:\n def __init__(self) -> None:\n self.fingerprints = FingerPrintDatabase()\n self.songs = SongDatabase()\n self.pollster = Counter()\n self.percent_thres = 0\n self.store_fanout_value = 2\n self.pred_fanout_value = 30\n self.thres_ratio = 1.5\n self.store_width = 3\n self.store_length = 3\n self.store_perc = 98\n self.thickness = 10\n self.pred_length = 3\n self.pred_width = 3\n self.pred_perc = 80\n self.time_diff_grain = 10\n self.realtime_accum = []\n self.test_accum = []\n \n def tally(self, songs : List, time0):\n if not songs is None: \n self.pollster.update(Counter([(song, int((time-time0)/self.time_diff_grain)) for song, time in songs]))\n\n def get_tally_winner(self):\n # print(self.pollster.most_common()[:4])\n if len(self.pollster)==0:\n return -1\n common, ratio = self.confidence_ratio()\n if ratio < self.thres_ratio:\n return -1\n return common\n \n def confidence_ratio(self):\n # uses the built in counters to find an approximate ratio for confident guesses\n counter = self.pollster.most_common()\n # takes the \"most common\" song\n most_common = counter[0][0][0]\n print(counter[0][1])\n common_two = None\n for index in range(1, len(counter)):\n if counter[index][0][0] != most_common:\n common_two = index\n break\n if common_two is None:\n ratio = 1e9\n else:\n ratio = counter[0][1] / counter[common_two][1]\n return most_common, ratio\n\n def add_song(self, file_path : str, songname : str, artist : str):\n if songname in self.songs.name2id:\n return\n audio, sampling_rate = read_song(file_path)\n # these should read in discrete digital data\n spectro, freqs, times = spectrogram(audio)\n # returns (Frequency, Time) data\n thres = np.percentile(spectro, self.percent_thres)\n peaks = local_peaks(spectro, thres, self.store_width, self.store_length, self.store_perc, self.thickness)\n print(len(peaks))\n self.songs.save_song(peaks, songname, artist, self.fingerprints, self.store_fanout_value)\n \n def add_songs(self, *, dir_path : str):\n files = listdir(dir_path)\n for file in files:\n if 'DS_Store' in file:\n continue\n print(f'reading {file}')\n file_parts = file.split('_')\n self.add_song(dir_path+\"/\"+file, *file_parts[:2])\n \n def delete_song(self, songname : str):\n self.songs.delete_song(songname, self.store_fanout_value,self.fingerprints)\n\n def save_data(self, dir_path):\n self.songs.save_data(dir_path+\"/songs\")\n self.fingerprints.save_data(dir_path+\"/fingerprints\")\n \n def load_data(self, dir_path):\n self.songs.load_data(dir_path+\"/songs\")\n self.fingerprints.load_data(dir_path+\"/fingerprints\")\n\n def preprocess(self, audio):\n # these should read in discrete digital data\n spectro, freqs, times = spectrogram(audio)\n # returns (Frequency, Time) data\n thres = np.percentile(spectro, self.percent_thres)\n peaks = local_peaks(spectro, thres, self.pred_width, self.pred_length, self.pred_perc)\n # returns a list of peaks (f, t)\n return peaks, len(times)\n \n def process_peaks(self, peaks):\n fingerprints, times = get_fingerprints(peaks, self.pred_fanout_value)\n for fingerprint, time in zip(fingerprints,times):\n songs = self.fingerprints.query_fingerprint(fingerprint)\n self.tally(songs, time)\n\n def process_prediction(self, audio : np.ndarray):\n peaks, _ = self.preprocess(audio)\n self.process_peaks(peaks)\n\n def predict(self, *, file_path : str = '', record_time : float = 0, samples : np.ndarray = None):\n self.pollster = Counter()\n # this is meant to be a function that indicates the general structure of the program\n # it uses some pseudo functions that should be implemented\n if file_path!='':\n audio, sampling_rate = read_song(file_path)\n elif record_time > 0:\n audio = record_song(record_time)\n else:\n audio = samples\n self.process_prediction(audio)\n ret = self.get_tally_winner()\n if ret==-1:\n return \"Oops, did not find this song!\"\n else:\n return self.songs.id2name[ret]\n\n def process_prediction_realtime(self, queue, ret):\n tmp_ret = -1\n all_peaks = None\n offset = 0\n while True:\n self.pollster = Counter()\n data = queue.get()\n if data is None:\n ret.value = self.get_tally_winner()\n break\n peaks, time_len = self.preprocess(data)\n peaks[:,1] += offset\n if all_peaks is None:\n all_peaks = peaks\n else:\n all_peaks = np.concatenate([all_peaks,peaks])\n self.process_peaks(all_peaks)\n tmp_ret = self.get_tally_winner()\n offset += time_len + 1\n if tmp_ret != -1:\n ret.value = tmp_ret\n while not queue.empty():\n queue.get()\n break\n\n def predict_realtime(self, file_path: str='', samples: np.ndarray = None, step_size: int = 1, state:int = 1):\n if state == 0:\n self.queue = Queue()\n self.realtime_ret = Value('i',-1)\n self.process = Process(target=self.process_prediction_realtime, args=(self.queue,self.realtime_ret,))\n self.process.start()\n elif state == 1:\n if self.realtime_ret.value != -1:\n self.process.join()\n return self.songs.id2name[self.realtime_ret.value]\n if samples is None:\n audio, sampling_rate = read_song(file_path)\n else:\n audio = samples\n self.realtime_accum.append(audio)\n if len(self.realtime_accum)>=step_size:\n data = np.concatenate(self.realtime_accum)\n self.realtime_accum = []\n self.queue.put(data)\n self.test_accum.append(data)\n else:\n self.queue.put(None)\n self.process.join()\n if self.realtime_ret.value==-1:\n return \"Oops, did not find this song!\"\n else:\n return self.songs.id2name[self.realtime_ret.value]\n\n\n# predictor = Predictor()\n# predictor.load_data('song_recognition/database')\n# print(predictor.predict(record_time=5))\n\n\n# peaks = predictor.songs.database[predictor.songs.name2id['Imperial March']][\"peaks\"]\n# fingerprints, times = get_fingerprints(peaks,2)\n# print(fingerprints[:])\n\n# predictor.add_songs(dir_path='AGOP-mp3-files')\n# predictor.save_data('song_recognition/database')\n\n# first_print = (202, 831, 0)\n# print(predictor.fingerprints.database[first_print])\n# print(predictor.fingerprints.query_fingerprint(first_print))\n\n# predictor.delete_song('Imperial-March')\n# print(len(predictor.fingerprints.database))","sub_path":"song_recognition/Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"110867765","text":"#!/usr/bin/python -u\n# coding: utf-8\n\nfrom flask import *\nimport json\nimport urllib.request\nimport ssl\n\napp = Flask(__name__)\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef handle_root():\n if request.method == \"GET\":\n return 'IBPS(Iraira Bo Print Server) is running'\n else:\n comm_path = '/home/pi/work/iraira/iraira_bo_print/comm.txt'\n \n if request.headers['Content-Type'] != 'application/json':\n print(request.headers['Content-Type'])\n return jsonify(res='error'), 400\n\n ssl._create_default_https_context = ssl._create_unverified_context\n url_score_site = 'https://lchika.club/scores'\n url_result_server = 'http://192.168.100.111'\n headers = {\n 'Content-Type': 'application/json',\n }\n app.logger.info(request.json)\n req = urllib.request.Request(url_score_site, json.dumps(request.json).encode(), headers)\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n print('score_site res=' + res_html)\n req = urllib.request.Request(url_result_server, json.dumps(request.json).encode(), headers)\n try:\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n print('result_sserver res=' + res_html)\n except:\n app.logger.info('Error: failed to request to result server')\n with open(comm_path, mode='a') as f:\n f.write(json.dumps(request.json) + '\\n')\n return 'score was sent'\n\nif __name__ == '__main__':\n app.run(\"0.0.0.0\", debug=True)\n #app.run(\"0.0.0.0\")\n","sub_path":"flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"303751723","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n ┏┛ ┻━━━━━┛ ┻┓\n ┃ ┃\n ┃ ━ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┃\n ┃ ┻ ┃\n ┃ ┃\n ┗━┓ ┏━━━┛\n ┃ ┃ 神兽保佑\n ┃ ┃ 代码无BUG!\n ┃ ┗━━━━━━━━━┓\n ┃ ┣┓\n ┃ ┏┛\n ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛\n ┃ ┫ ┫ ┃ ┫ ┫\n ┗━┻━┛ ┗━┻━┛\n\"\"\"\n\nimport codecs\nfrom tqdm import tqdm\nfrom collections import Counter\nimport pickle\nimport os\nimport numpy as np\nimport itertools\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\n\ndef load_sentences(path):\n \"\"\"\n 从文档中读取句子\n :param path:\n :return:\n \"\"\"\n sentences = []\n sentence = []\n for line in tqdm(codecs.open(path, 'r', encoding='utf-8'), desc='数据读取'):\n line = line.strip()\n if not line:\n if len(sentence) > 0:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) == 2\n sentence.append(word)\n if len(sentence) > 0:\n sentences.append(sentence)\n return sentences\n\n\ndef change_bio_to_bioes(sentences):\n \"\"\"\n 将bio编码转换为BIOES编码\n :param sentences:\n :return:\n \"\"\"\n new_tags = []\n new_sentences = []\n for idx, sentence in tqdm(enumerate(sentences), desc='数据处理'):\n tags = [each[-1] for each in sentence]\n new_tag = ['O']\n for tag in tags:\n # 处理O前的I 改为E\n if tag == 'O' and new_tag[-1].split('-')[0] == 'I':\n new_tag[-1] = 'E-' + str(new_tag[-1].split('-')[1])\n # 处理O前的B 改为S\n if tag == 'O' and new_tag[-1].split('-')[0] == 'B':\n new_tag[-1] = 'S-' + str(new_tag[-1].split('-')[1])\n # 放行O\n if tag == 'O':\n new_tag.append(tag)\n # 放行 B\n if tag.split('-')[0] == 'B':\n new_tag.append(tag)\n # 放行 I\n if tag.split('-')[0] == 'I':\n new_tag.append(tag)\n new_tags.append(new_tag[1:])\n for i in range(len(sentence)):\n # 因为添加了一个O在最前面统一操作,因此+1对齐原句\n sentence[i][-1] = new_tag[i + 1]\n new_sentences.append(sentence)\n return new_sentences\n\n\ndef word_mapping(sentences):\n \"\"\"\n 单词映射,获取所有单词的id映射(这一步很显然是one-hot操作,可以被BERT等词嵌入操作取代)\n :param sentences:\n :return:\n \"\"\"\n words = []\n for sentence in sentences:\n for each in sentence:\n words.append(each[0])\n words_counter = Counter(words)\n words_dict = {}\n for each in words_counter:\n words_dict[each] = words_counter[each]\n words_dict[''] = 10000001\n words_dict[''] = 10000000\n sorted_items = sorted(words_dict.items(), key=lambda x: -x[1])\n id_to_word = {i: v[0] for i, v in enumerate(sorted_items)}\n word_to_id = {v[0]: i for i, v in enumerate(sorted_items)}\n return words_dict, id_to_word, word_to_id\n\n\ndef tag_mapping(sentences):\n \"\"\"\n 标签映射,类似于one-hot,这一步不可以取代,类似于输出类别索引\n :param sentences:\n :return:\n \"\"\"\n tags = []\n for sentence in sentences:\n for each in sentence:\n tags.append(each[1])\n tags_counter = Counter(tags)\n tags_dict = {}\n for each in tags_counter:\n tags_dict[each] = tags_counter[each]\n sorted_item = sorted(tags_dict.items(), key=lambda x: -x[1])\n id_to_tags = {i: v[0] for i, v in enumerate(sorted_item)}\n tags_to_id = {v[0]: i for i, v in enumerate(sorted_item)}\n return tags_dict, id_to_tags, tags_to_id\n\n\ndef prepare_dataset(sentences, word_to_id, tags_to_id):\n \"\"\"\n 准备训练数据,将sentences的word和tag全部转换为idx\n :param sentences:\n :param words_to_id:\n :param tags_to_id:\n :return:\n \"\"\"\n data = []\n for sentence in sentences:\n word_list = [word[0] for word in sentence]\n # 这里其实有一点问题,这里已经应该是idx了,不应该else UNK,应该是上面的‘10000000’,\n # 但是因为词汇表就是从数据集中获取的,一般不会在这里报错\n word_id_list = [word_to_id[word if word in word_to_id else ''] for word in word_list]\n tag_id_list = [tags_to_id[word[-1]] for word in sentence]\n data.append([word_list, word_id_list, tag_id_list])\n return data\n\n\ndef load_data(config):\n \"\"\"\n 读取数据(整合)\n :param config:\n :return:\n \"\"\"\n # 由于处理需要时间,因此进行序列化存储,这里检查是否有存储过的序列化文件\n if os.path.exists(config.dataset_pkt):\n dataset_pkt = pickle.load(open(config.dataset_pkt, 'rb'))\n train_sentences = dataset_pkt['train']\n dev_sentences = dataset_pkt['dev']\n test_sentences = dataset_pkt['test']\n else:\n # 加载数据集\n train_sentences = load_sentences(config.train_path)\n dev_sentences = load_sentences(config.dev_path)\n test_sentences = load_sentences(config.test_path)\n # 编码转换\n train_sentences = change_bio_to_bioes(train_sentences)\n dev_sentences = change_bio_to_bioes(dev_sentences)\n test_sentences = change_bio_to_bioes(test_sentences)\n dataset_plt = {}\n dataset_plt['train'] = train_sentences\n dataset_plt['dev'] = dev_sentences\n dataset_plt['test'] = test_sentences\n pickle.dump(dataset_plt, open(config.dataset_pkt, 'wb'))\n\n # 单词映射以及标签映射的存储\n if os.path.exists(config.map_pkt):\n map_pkt = pickle.load(open(config.map_pkt, 'rb'))\n words_dict = map_pkt['words_dict']\n id_to_word = map_pkt['id_to_word']\n word_to_id = map_pkt['word_to_id']\n tags_dict = map_pkt['tags_dict']\n id_to_tags = map_pkt['id_to_tags']\n tags_to_id = map_pkt['tags_to_id']\n else:\n words_dict, id_to_word, word_to_id = word_mapping(train_sentences)\n tags_dict, id_to_tags, tags_to_id = tag_mapping(train_sentences)\n map_pkt = {}\n map_pkt['words_dict'] = words_dict\n map_pkt['id_to_word'] = id_to_word\n map_pkt['word_to_id'] = word_to_id\n map_pkt['tags_dict'] = tags_dict\n map_pkt['id_to_tags'] = id_to_tags\n map_pkt['tags_to_id'] = tags_to_id\n pickle.dump(map_pkt, open(config.map_pkt, 'wb'))\n\n # 处理后的data文件\n if os.path.exists(config.handled_pkt):\n data_pkt = pickle.load(open(config.handled_pkt, 'rb'))\n train_data = data_pkt['train_data']\n dev_data = data_pkt['dev_data']\n test_data = data_pkt['test_data']\n else:\n train_data = prepare_dataset(train_sentences, word_to_id, tags_to_id)\n test_data = prepare_dataset(test_sentences, word_to_id, tags_to_id)\n dev_data = prepare_dataset(dev_sentences, word_to_id, tags_to_id)\n data_dict = {}\n data_dict['train_data'] = train_data\n data_dict['test_data'] = test_data\n data_dict['dev_data'] = dev_data\n pickle.dump(data_dict, open(config.handled_pkt, 'wb'))\n\n return train_data, dev_data, test_data, word_to_id, id_to_word, tags_to_id, id_to_tags\n\n\ndef load_word2vec(config, id_to_word):\n \"\"\"\n 读取word2vec词嵌入向量\n :param config:\n :param id_to_word:\n :param word_dim:\n :return:\n \"\"\"\n if os.path.exists(config.embedding_matrix_file):\n embedding_mat = np.load(config.embedding_matrix_file)\n return embedding_mat\n else:\n pre_trained = {}\n emb_invalid = 0\n for i, line in enumerate(codecs.open(config.emb_file, 'r', encoding='utf-8')):\n line = line.rstrip().split()\n if len(line) == config.embsize + 1:\n pre_trained[line[0]] = np.array(\n [float(x) for x in line[1:]]\n ).astype(np.float32)\n else:\n emb_invalid = emb_invalid + 1\n\n if emb_invalid > 0:\n print('waring: %i invalid lines' % emb_invalid)\n\n num_words = len(id_to_word)\n embedding_mat = np.zeros([num_words, config.embsize])\n for i in range(num_words):\n word = id_to_word[i]\n if word in pre_trained:\n embedding_mat[i] = pre_trained[word]\n else:\n pass\n print('加载了 %i 个字向量' % len(pre_trained))\n np.save(config.embedding_matrix_file, embedding_mat)\n return embedding_mat\n\n\ndef get_X_and_Y_data(dataset, max_len, num_classes):\n \"\"\"\n 将数据拆分为X和Y\n :param dataset:\n :param max_len:\n :param num_classes:\n :return:\n \"\"\"\n x_data = [data[1] for data in dataset]\n x_data = pad_sequences(x_data, maxlen=max_len, dtype='int32', padding='post', truncating='post', value=0)\n y_data = [data[2] for data in dataset]\n y_data = pad_sequences(y_data, maxlen=max_len, dtype='int32', padding='post', truncating='post', value=0)\n y_data = to_categorical(y_data, num_classes=num_classes)\n return x_data, y_data\n\n\ndef check_label(front_label, follow_label):\n \"\"\"\n 检查标签前后是否连贯\n :param front_label:\n :param follow_label:\n :return:\n \"\"\"\n tag_check = {\n \"I\": [\"B\", \"I\"],\n \"E\": [\"B\", \"I\"],\n }\n if not follow_label:\n raise Exception(\"follow label should not both None\")\n\n if not front_label:\n return True\n\n if follow_label.startswith(\"B-\"):\n return False\n\n if (follow_label.startswith(\"I-\") or follow_label.startswith(\"E-\")) and \\\n front_label.endswith(follow_label.split(\"-\")[1]) and \\\n front_label.split(\"-\")[0] in tag_check[follow_label.split(\"-\")[0]]:\n return True\n return False\n\n\ndef format_result(chars, tags):\n \"\"\"\n 将网络输出转换为字典格式\n :param chars:\n :param tags:\n :return:\n \"\"\"\n entities = []\n entity = []\n for index, (char, tag) in enumerate(zip(chars, tags)):\n entity_continue = check_label(tags[index - 1] if index > 0 else None, tag)\n if not entity_continue and entity:\n entities.append(entity)\n entity = []\n entity.append([index, char, tag, entity_continue])\n if entity:\n entities.append(entity)\n\n entities_result = []\n for entity in entities:\n if entity[0][2].startswith(\"B-\"):\n entities_result.append(\n {\"begin\": entity[0][0] + 1,\n \"end\": entity[-1][0] + 1,\n \"words\": \"\".join([char for _, char, _, _ in entity]),\n \"type\": entity[0][2].split(\"-\")[1]\n }\n )\n\n return entities_result\n\n\nif __name__ == '__main__':\n sentences = load_sentences(r'./data/dev.txt')\n bioes_tag = change_bio_to_bioes(sentences)\n words_dict, id_to_word, word_to_id = word_mapping(sentences)\n tags_dict, id_to_tags, tags_to_id = tag_mapping(sentences)\n data = prepare_dataset(sentences, word_to_id, tags_to_id)\n print()\n","sub_path":"NER/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":11489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"514002883","text":"\"\"\"all models migrations\n\nRevision ID: 426cef99c026\nRevises: 4cad96b74ef3\nCreate Date: 2021-07-10 17:08:58.298372\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '426cef99c026'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'produtos',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('descricao', sa.String(length=200), nullable=False),\n sa.Column('preco', sa.DECIMAL(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n )\n op.drop_table('produtos')\n op.drop_constraint(\n 'conta_produto_id_produto_fkey', 'conta_produto', type_='foreignkey'\n )\n op.create_foreign_key(None, 'conta_produto', 'produtos', ['id_produto'], ['id'])\n op.drop_constraint(\n 'estoque_produto_id_produto_fkey', 'estoque_produto', type_='foreignkey'\n )\n op.create_foreign_key(None, 'estoque_produto', 'produtos', ['id_produto'], ['id'])\n op.drop_constraint(\n 'fornecedor_produto_id_produto_fkey', 'fornecedor_produto', type_='foreignkey'\n )\n op.create_foreign_key(\n None, 'fornecedor_produto', 'produtos', ['id_produto'], ['id']\n )\n op.drop_constraint(\n 'produto_ordem_de_compra_id_produto_fkey',\n 'produto_ordem_de_compra',\n type_='foreignkey',\n )\n op.create_foreign_key(\n None, 'produto_ordem_de_compra', 'produtos', ['id_produto'], ['id']\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'produto_ordem_de_compra', type_='foreignkey')\n op.create_foreign_key(\n 'produto_ordem_de_compra_id_produto_fkey',\n 'produto_ordem_de_compra',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'fornecedor_produto', type_='foreignkey')\n op.create_foreign_key(\n 'fornecedor_produto_id_produto_fkey',\n 'fornecedor_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'estoque_produto', type_='foreignkey')\n op.create_foreign_key(\n 'estoque_produto_id_produto_fkey',\n 'estoque_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'conta_produto', type_='foreignkey')\n op.create_foreign_key(\n 'conta_produto_id_produto_fkey',\n 'conta_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.create_table(\n 'produto',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column(\n 'descricao', sa.VARCHAR(length=200), autoincrement=False, nullable=False\n ),\n sa.Column('preco', sa.NUMERIC(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='produto_pkey'),\n )\n op.drop_table('produtos')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/426cef99c026_all_models_migrations.py","file_name":"426cef99c026_all_models_migrations.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"174437264","text":"##################################\n# fichier amitie-entre-gardes-entrainement.py\n# nom de l'exercice : Amitié entre gardes\n# url : http://www.france-ioi.org/algo/task.php?idChapter=648&idTask=0&sTab=task&iOrder=5\n# type : entrainement\n#\n# Nom du chapitre : \n#\n# Compétence développée : \n#\n# auteur : \n##################################\n\n# chargement des modules\n\n\n# mettre votre code ici\n\ndebutsoldat1=int(input())\nfinsoldat1=int(input())\ndebutsoldat2=int(input())\nfinsoldat2=int(input())\n\nif (finsoldat2 str:\n \"\"\"\n Fetch a customer ID by database lookup or create one if a token is provided\n \"\"\"\n cid = current_user.customer_id\n if not cid and token:\n cid = stripe.Customer.create(\n email=current_user.email,\n source=token\n ).id\n current_user.customer_id = cid\n db.session.commit()\n return cid\n\ndef new_subscription(plan: str, token: str) -> bool:\n \"\"\"\n Create a new subscription for the current user\n \"\"\"\n cid = get_customer_id(token)\n subscription = stripe.Subscription.create(\n customer=cid,\n items=[{'plan': PLANS[plan]['id']}]\n )\n current_user.subscription_id = subscription.id\n current_user.plan = plan\n db.session.commit()\n return True\n\ndef change_subscription(plan: str) -> bool:\n \"\"\"\n Change the subscription from one plan to another\n \"\"\"\n sid = current_user.subscription_id\n if not sid or current_user.plan == plan:\n return False\n subscription = stripe.Subscription.retrieve(sid)\n subscription.modify(sid,\n cancel_at_period_end=False,\n items=[{\n 'id': subscription['items']['data'][0].id,\n 'plan': PLANS[plan]['id'],\n }]\n )\n current_user.subscription_id = subscription.id\n current_user.plan = plan\n db.session.commit()\n return True\n\ndef cancel_subscription() -> bool:\n \"\"\"\n Cancel a subscription\n \"\"\"\n sid = current_user.subscription_id\n if sid:\n subscription = stripe.Subscription.retrieve(sid)\n subscription.delete()\n cid = current_user.customer_id\n if cid:\n customer = stripe.Customer.retrieve(cid)\n customer.delete()\n current_user.customer_id = None\n current_user.subscription_id = None\n current_user.plan = None\n db.session.commit()\n return True\n","sub_path":"avwx_account/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"341660330","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PROTPGD\")\n# ecal mapping\nprocess.load(\"Geometry.EcalMapping.EcalMapping_cfi\")\n\nprocess.load(\"Geometry.EcalMapping.EcalMappingRecord_cfi\")\n\n# magnetic field\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n# Calo geometry service model\nprocess.load(\"Geometry.CaloEventSetup.CaloGeometry_cfi\")\n\n# Calo geometry service model\nprocess.load(\"Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi\")\n\n# IdealGeometryRecord\nprocess.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\")\n\nprocess.load(\"CalibCalorimetry.Configuration.Ecal_FakeConditions_cff\")\n\n#include \"SimCalorimetry/EcalTrigPrimProducers/data/ecalTriggerPrimitiveDigis_with_suppressed.cff\"\nprocess.load(\"SimCalorimetry.EcalTrigPrimProducers.ecalTriggerPrimitiveDigis_cff\")\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('file:/data/uberthon/tpg/elec_unsupp_pt10-100.root')\n)\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('drop *_*_*_*', \n 'keep *_simEcalTriggerPrimitiveDigis_*_*', \n 'keep *_ecalDigis_*_*', \n 'keep *_ecalRecHit_*_*', \n 'keep *_ecalWeightUncalibRecHit_*_*', \n 'keep PCaloHits_*_EcalHitsEB_*', \n 'keep PCaloHits_*_EcalHitsEE_*', \n 'keep edmHepMCProduct_*_*_*'),\n fileName = cms.untracked.string('TrigPrim.root')\n)\n\nprocess.Timing = cms.Service(\"Timing\")\n\nprocess.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\")\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n DEBUG = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n EcalTPG = cms.untracked.PSet(\n limit = cms.untracked.int32(1000000)\n ),\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('DEBUG')\n ),\n debugModules = cms.untracked.vstring('simEcalTriggerPrimitiveDigis')\n)\n\nprocess.p = cms.Path(process.simEcalTriggerPrimitiveDigis)\nprocess.outpath = cms.EndPath(process.out)\n\n\n","sub_path":"SimCalorimetry/EcalTrigPrimProducers/test/writeTP_cfg.py","file_name":"writeTP_cfg.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"553029341","text":"from src import logger\nfrom io import BytesIO\n\n\ndef make_sequence(log, msg=\"\"):\n log.debug('debug')\n log.warning('warning')\n log.info('info')\n log.error('error: {0}'.format(msg))\n log.fatal('fatal')\n try:\n raise RuntimeError(\"cos sie spieprzylo\")\n except Exception:\n log.exception('exception')\n\n\ndef test_logger():\n #print dir(logger)\n #make_sequence(logger.logger)\n\n make_sequence(logger.make_logger(\"testowy logger\", debug=True, colored=True))\n make_sequence(logger.make_logger(\"logger_nocolor\", debug=True, colored=False), False)\n","sub_path":"tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"29219895","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 16:08:17 2021\n\n@author: timvr\n\"\"\"\n\nfrom mastermind.game.algorithms.Mastermind_Oracle import build_mastermind_a_circuit, build_mastermind_b_circuit\nfrom mastermind.arithmetic.dradder import add, sub\nfrom mastermind.arithmetic.count import count, icount\nfrom mastermind.arithmetic.increm import increment, decrement, cnincrement, cndecrement\nfrom qiskit import QuantumCircuit\nimport numpy as np\n\ndef build_find_colour_positions_circuit(circuit, x, q, a, c, d, secret_sequence, d_positions=None):\n '''\n Builds mastermind check circuit on circuit. Requires the inputs q, a, b, c\n and secret_sequence. You can optionally choose to measure the outcomes.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-quereis\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n d : integer, d in {0, 1, ..., k-1}\n any colour which does not occur in the secret string\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with find_colour_positions algorithm appended to it.\n\n '''\n \n #0: init\n circuit.barrier()\n \n #1: Hadamard\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n #2: build query\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #3: get Oracle a response\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n circuit.barrier()\n \n #3.alt: sub d positions if used\n if d_positions != None:\n for (i,j) in enumerate(d_positions):\n if j == 1:\n circuit.x(x[i])\n cndecrement(circuit, [x[i]], a)\n circuit.x(x[i])\n circuit.barrier()\n \n #4: Z gate on output LSB\n circuit.z(a[0]) # should be the LSB; maybe that's actually a[-1]!!!!!!!!!!\n circuit.barrier()\n \n #5: undo step 2 & 3\n if d_positions != None:\n for (i,j) in enumerate(d_positions):\n if j == 1:\n circuit.x(x[i])\n cnincrement(circuit, [x[i]], a)\n circuit.x(x[i])\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #11\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n # Return the check circuit\n return circuit\n\n\ndef build_find_colour_positions_alt_circuit(circuit, x, q, a, b, c, k, secret_sequence):\n '''\n Builds mastermind check circuit on circuit. Requires the inputs q, a, b, c\n and secret_sequence. You can optionally choose to measure the outcomes.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-queries\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n b : QuantumRegister, length 1+ceil(log2(k))+ceil(log2(n))\n holds inner product outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n k : integer\n number of available colours\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with find_colour_positions_alt algorithm appended to it.\n\n '''\n \n logk = int(np.ceil(np.log2(k)))\n \n #0: init\n circuit.barrier()\n \n #1: Hadamard\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n #2: calculate the MMa sum\n for d in range(k):\n #2a: build query\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #2b: get Oracle a response\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n circuit.barrier()\n \n #2c: add to output reg\n add(circuit, a, b)\n circuit.barrier()\n \n #2d: undo #2a & #2b\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #3: add the count of c colours to the b reg\n count(circuit, a=x, b=b, step=1) # or step=-1?????\n circuit.barrier()\n \n #4: ignore the logk LSBs in the b reg\n #... which of course requires literally no code, but I'll add identity gates for clarity\n for i in range(logk):\n circuit.i(b[i])\n circuit.barrier()\n \n #5: decrement the remaining value by 1 to find the desired inner product\n decrement(circuit, b[logk::])\n circuit.barrier()\n \n #6: Z gate on output LSB (the effective LSB, not the actual one)\n circuit.z(b[logk]) # should be the remaining LSB; not exactly sure if this is the correct one!\n circuit.barrier()\n \n #7: undo steps 2 through 5\n increment(circuit, b[logk::])\n for i in range(logk):\n circuit.i(b[i])\n icount(circuit, a=x, b=b, step=1)\n for d in range(k):\n _build_query_two_colours(circuit, x, q, c, d)\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n sub(circuit, a, b)\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n \n #8\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n # Return the check circuit\n return circuit\n\n\ndef _build_query_two_colours(circuit, x, q, c, d):\n '''\n Performs CNOTs on the query q according to binary proto-query x:\n - if x[i]=1, then the binary version of c is applied\n - alse, d is applied.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-queries\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n d : integer, d in {0, 1, ..., k-1}\n any colour which does not occur in the secret string\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with build_query_two_colours sub-circuit appended to it.\n\n '''\n \n n_x = len(x)\n n_q = len(q)\n \n amount_colour_bits = n_q // n_x\n \n binary_c = bin(c)[2:].zfill(amount_colour_bits)\n binary_d = bin(d)[2:].zfill(amount_colour_bits)\n \n for i in range(n_x):\n for (j,bit) in enumerate(binary_c[::-1]):\n if bit == '1':\n circuit.cnot(x[i], q[i*amount_colour_bits + j])\n else:\n pass\n circuit.x(x[i])\n for (j,bit) in enumerate(binary_d[::-1]):\n if bit == '1':\n circuit.cnot(x[i], q[i*amount_colour_bits + j])\n else:\n pass\n circuit.x(x[i])\n \n return circuit\n\n","sub_path":"src/mastermind/game/algorithms/Find_Colour_Positions.py","file_name":"Find_Colour_Positions.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"429463703","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 9 12:21:15 2016\n\n@author: pbva152\n\"\"\"\n\nimport convert\nimport numpy as np\nimport matasanochallenge3\n\n\ndatas = np.genfromtxt(\"mt6.txt\", dtype = str) \ndata = \"\".join(datas)\n\ndef HammDist( string1, string2):\n assert(len(string1) == len(string2))\n hammweight = 0\n for i in range(0, len(string1)):\n tempbin = bin(string1[i] ^ string2[i])\n hammweight = tempbin.count(\"1\") + hammweight\n return hammweight\n \n\nbytedata = convert.base642byte(data)\nmetricvec = []\nfinalmetric = 100000 \nfor keysize in range(2, 41):\n bytes1 =[]\n bytes2 = []\n bytes3 = []\n bytes4 = []\n for i in range(0, keysize):\n bytes1.append(bytedata[i])\n bytes2.append(bytedata[i + keysize])\n bytes3.append(bytedata[i + (2 * keysize)])\n bytes4.append(bytedata[i + (3 * keysize)])\n metric1 = HammDist(bytes1, bytes2) / keysize\n metric2 = HammDist(bytes1, bytes3) / keysize\n metric3 = HammDist(bytes1, bytes4) / keysize\n metric4 = HammDist(bytes2, bytes3) / keysize\n metric5 = HammDist(bytes2, bytes4) / keysize\n metric6 = HammDist(bytes3, bytes4) / keysize\n metric = (metric1 + metric2 + metric3 + metric4 + metric5 + metric6) / 6\n metricvec.append(metric)\n if (metric < finalmetric):\n finalmetric = metric\n finalkeysize = keysize\n \nplaintext = [''] * len(bytedata)\n\nfor i in range(0,finalkeysize):\n ciph = []\n \n for j in range(0, (len(bytedata) // finalkeysize) +1):\n if (j* finalkeysize + i) < len(bytedata):\n ciph.append(bytedata[j* finalkeysize + i])\n plain = matasanochallenge3.DecryptCaesarCipher(convert.byte2hex(ciph))\n for j in range(0, (len(bytedata) // finalkeysize)+1):\n if (j* finalkeysize + i) < len(bytedata):\n plaintext[j* finalkeysize + i] = plain[j] \nfinalplaintext = \"\".join(plaintext)\nprint(finalplaintext)\n\n\n\n\n\n\n\n\n","sub_path":"python/matasano/set1/c6.py","file_name":"c6.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"380233265","text":"\"\"\"TaxIDlink\n\nauthor: jbonet\ndate: 10/2013\n\n@oliva's lab\n\"\"\"\n\n\"\"\"\nImport Standard Libraries\n\"\"\"\nimport sys, os, re\nimport subprocess\nimport warnings\nimport urllib\n\n\"\"\"\nDependences in SBI library\n\"\"\"\nfrom SBI import SBIglobals\nfrom SBI.databases import taxIDftp\nfrom SBI.beans import Path\nfrom SBI.beans import File\n\nclass TaxIDlink(object):\n \"\"\"The TaxIDlink class controls the download and parsing of TaxID database\n\n \"\"\"\n def __init__(self, local = None):\n self._local = os.path.abspath(local)\n self.__name__ = 'databases.TaxIDlink' # This must be included in every class for the SBIglobals.alert()\n\n self._nodes = 'nodes.dmp'\n self._names = 'names.dmp'\n self._delet = 'delnodes.dmp'\n self._merged = 'merged.dmp'\n self._taxid = 'taxid.gz'\n\n if local is not None:\n self.local = local\n\n \"\"\"ATTRIBUTES\"\"\"\n @property\n def local(self): return self._local\n @local.setter\n def local(self, value):\n self._local = os.path.abspath(value)\n self._nodes = os.path.join(self._local, self._nodes)\n self._names = os.path.join(self._local, self._names)\n self._delet = os.path.join(self._local, self._delet)\n self._merged = os.path.join(self._local, self._merged)\n self._taxid = os.path.join(self._local, self._taxid)\n\n @property\n def localTaxIDs(self):\n taxFile = File(self._taxid, 'r')\n for tax_line in taxFile.descriptor:\n yield tax_line\n taxFile.close()\n\n @property\n def source(self):\n return taxIDftp['show']\n\n \"\"\"BOOLEANS\"\"\"\n @property\n def has_local(self): return self._local is not None\n\n \"\"\"METHODS\"\"\"\n def download(self):\n if not self.has_local:\n raise NameError('A local TaxID database directory must be defined.')\n\n Path.mkdir(self.local)\n destination = os.path.join(self.local, 'taxdmp.zip')\n urllib.urlretrieve(taxIDftp['global'], destination)\n command = ['unzip', '-o', destination, '-d', self.local]\n p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n out, err = p.communicate()\n\n self._process()\n\n return True\n\n def get_TaxID(self, TaxID):\n if self.has_local:\n for tax_line in self.localTaxIDs:\n if tax_line.split('\\t')[0] == TaxID:\n return tax_line\n\n else:\n raise NameError('A local TaxID database directory must be defined.')\n\n def get_TaxIDs(self, TAXset):\n if isintance(TAXset, str):\n warnings.warn('For single TaxID search the get_TaxID function is recomended.')\n yield self.get_TaxID(TAXset)\n\n if self.has_local:\n for tax_line in self.localTaxIDs:\n if tax_line.split('\\t')[0] in TAXset:\n yield tax_linej\n else:\n raise NameError('A local TaxID database directory must be defined.')\n\n \"\"\"PRIVATE METHODS\"\"\"\n def _process(self):\n inh = {}\n nodefile = File(file_name = self._nodes, action = 'r')\n for line in nodefile.descriptor:\n line = re.sub('\\'', '\\\\\\'', line)\n line_data = line.split('|')\n inh[line_data[0].strip()] = TaxID(line_data[0].strip())\n inh[line_data[0].strip()].parent = line_data[1].strip()\n inh[line_data[0].strip()].rank = line_data[2].strip()\n nodefile.close()\n\n namefile = File(file_name = self._names, action = 'r')\n for line in namefile.descriptor:\n line = re.sub('\\'', '\\\\\\'', line)\n line_data = line.split('|')\n if line_data[3].strip() == 'scientific name':\n inh[line_data[0].strip()].name = line_data[1].strip()\n namefile.close()\n\n delefile = File(file_name = self._delet, action = 'r')\n for line in delefile.descriptor:\n data = line.split('|')\n inh[data[0].strip()] = TaxID(data[0].strip())\n inh[data[0].strip()].old = True\n delefile.close()\n\n mrgefile = File(file_name = self._merged, action = 'r')\n for line in mrgefile.descriptor:\n data = line.split('|')\n inh[data[0].strip()] = TaxID(data[0].strip())\n inh[data[0].strip()].old = True\n inh[data[0].strip()].new = data[1].strip()\n mrgefile.close()\n\n taxFile = File(self._taxid, 'w', True)\n for taxid in inh:\n taxFile.write(str(inh[taxid]) + \"\\n\")\n taxFile.close()\n\nclass TaxID(object):\n def __init__(self, taxid = None, inline = None):\n if inline is not None:\n inline = inline.strip()\n self.taxid = taxid if inline is None else inline.split('\\t')[0]\n self.name = None if inline is None else inline.split('\\t')[1] if inline.split('\\t')[1] != 'None' else None\n self.rank = None if inline is None else inline.split('\\t')[2] if inline.split('\\t')[2] != 'None' else None\n self.parent = None if inline is None else inline.split('\\t')[3] if inline.split('\\t')[3] != 'None' else None\n self.old = False if inline is None else eval(inline.split('\\t')[4])\n self.new = None if inline is None else inline.split('\\t')[5] if inline.split('\\t')[5] != 'None' else None\n\n \"\"\"BOOLEANS\"\"\"\n @property\n def has_old(self): return self.old\n @property\n def has_new(self): return False if self.new is None else True\n\n \"\"\"OVERWRITE PARENT METHODS\"\"\"\n def __str__(self):\n return \"{0.taxid}\\t{0.name}\\t{0.rank}\\t{0.parent}\\t{0.old}\\t{0.new}\".format(self)\n","sub_path":"collision_detection_program/SBI/databases/TaxIDlink.py","file_name":"TaxIDlink.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"432660897","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20201211\n# Problem link : https://leetcode.com/problems/merge-in-between-linked-lists/\n################################################################\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n # time: O(n), space: O(1), where n = size(list1) + size(list2).\n def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode:\n p1, p2 = list1, list2\n for _ in range(a-1):\n p1 = p1.next\n s1 = p1 # first stop point\n for _ in range(b-a+2):\n p1 = p1.next\n s2 = p1 # second stop point\n s1.next = p2 \n while p2.next != None:\n p2 = p2.next\n p2.next = s2 # connect stop points and list\n return list1\n \n ","sub_path":"biweekly_contest_40_virtual/1669_merge_in_between_linked_lists.py","file_name":"1669_merge_in_between_linked_lists.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"622613223","text":"'''\n======================================================================\nCreated on Jan 14, 2018\n\nPURPOSE: this module provides classes to read Maven projects from git or other repos\n specifically intended to create the graph of multiple project dependencies\n\nROADMAP: TODO - \n 1. review how properties are distributed and could break things\n 2. review subproject dependencies on top level, are props declared?\n 2. review parent POM, are props declared?\n 3. are external property files used?\n\n\n@author: Larry\n======================================================================\n'''\nimport os\nimport subprocess\n\n#import json\n#import xml.etree.ElementTree as ET\n#import urllib2\n#import csv\nimport xml.etree.cElementTree as ET\nimport re\nimport urllib.request\n\n\n#======================================================================= \n# static functions and constants\nclass Util(object):\n mvn_pom_ns = {\"mvn\":\"http://maven.apache.org/POM/4.0.0\"}\n \n def __init__(self):\n pass \n \n @staticmethod\n def get_tag_value(name, section):\n s = ('mvn:%s' % name)\n elem = section.find(s, Util.mvn_pom_ns)\n if elem ==None:\n return'' \n return elem.text\n\n @staticmethod\n def get_path(dirs):\n path = ''\n for d in dirs:\n path += d + '/' \n return path[:len(path) -1]\n\n # if hasattr(a, 'property'):\n \n @staticmethod\n def run_process_2(cmd_args):\n #result = subprocess.run(['dir', '../*.*'], stdout=subprocess.PIPE)\n #result = subprocess.run(['C:/apps/maven352/bin/mvn', 'help:effective-pom'], stdout=subprocess.PIPE)\n result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True) \n result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True) \n print(result.stdout.decode('utf-8'))\n\n \n @staticmethod\n def run_process(cmd_args, args_in):\n cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n if (args_in):\n cmd.stdin.write(args_in.encode('utf-8'))\n cmd.stdin.flush() # Must include this to ensure data is passed to child process\n result = cmd.stdout.read()\n \n print(args_in.encode('utf-8'))\n print(result) #.stdout.decode('utf-8'))\n '''\n cmdline = [\"cmd\", \"/q\", \"/k\", \"echo off\"]\n batch = b\"\"\"\\\n rem vsinstr -coverage helloclass.exe /exclude:std::*\n vsperfcmd /start:coverage /output:run.coverage\n helloclass\n vsperfcmd /shutdown\n exit\n \"\"\" \n \n '''\n def test_map_update(self):\n A = {'a':1, 'b':2, 'c': 3}\n B = {'c':99, 'd':4, 'e':5}\n A.update(B)\n print(A)\n\n#======================================================================= \n# identifies Maven coordinates for a project or dependnecy\nclass MavenCoords(object):\n def __init__(self, element, props): \n if (not element):\n self.groupid =''\n self.artifactid = ''\n self.version = ''\n self.scope = ''\n self.relative_path = ''\n self.key ='' \n return \n \n self.groupid = Util.get_tag_value('groupId', element)\n self.artifactid = Util.get_tag_value('artifactId', element)\n self.version = Util.get_tag_value('version', element) \n self.relative_path = Util.get_tag_value('relativePath', element) \n self.scope = Util.get_tag_value('scope', element) \n self.refresh_key(props) \n \n def refresh_key(self, props):\n if (props and self.version in props):\n self.version = props[self.version]\n self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version) \n\n \n\n#======================================================================= \n# a maven project POM complete with properties and dependencies \nclass MavenProject(object):\n def __init__(self, pom_url, project_map): \n #dirs = pom_url.split('/')\n\n self.pom_url = pom_url; \n self.project_map = project_map\n self.pom_file = self.get_pom_file(self.pom_url)\n self.name = Util.get_tag_value('name', self.pom_file) \n self.packaging = Util.get_tag_value('packaging', self.pom_file) \n \n self.init_from_parent() \n self.properties.update(self.get_properties(self.pom_file)) \n self.coord = MavenCoords(self.pom_file, self.properties) \n self.dependencies.update(self.get_dependencies(self.pom_file))\n self.project_map[self.coord.key] = self \n self.get_sub_modules(self.pom_file)\n self.history = []\n self.consumers = []\n #if self.packaging =='pom':\n \n # parent pom's will always be pre-existent to child pom's. they will be looked by coord key from\n # the global graph / project list \n def init_from_parent(self):\n parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns) \n if (parent_section):\n self.parent_coord = MavenCoords(parent_section[0], None)\n parent = self.project_map[self.parent_coord.key]\n if (parent):\n self.properties = parent.properties.copy()\n self.dependencies = parent.dependencies.copy() \n else:\n print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key)) \n else:\n self.dependencies = {}\n self.properties = {} \n self.coord = MavenCoords(None, None)\n dirs = self.pom_url.split('/')\n print(dirs)\n print (Util.get_path(dirs))\n \n \n def get_sub_modules(self, pom_file):\n section = pom_file.findall('mvn:modules', Util.mvn_pom_ns) \n self.modules = {}\n if (not section):\n return \n \n for elem in section[0].findall('*'):\n sub_proj = self.get_sub_module(elem.text)\n self.modules[sub_proj.coord.key] = sub_proj \n self.project_map[sub_proj.coord.key] = sub_proj\n \n\n def get_sub_module(self, sub_dir):\n dirs = self.pom_url.split('/')\n x = len(dirs)\n dirs[x-1] = 'pom.xml'\n dirs.insert(x-1, sub_dir)\n path = Util.get_path(dirs) \n module = MavenProject(path, self.project_map) \n return module\n\n def get_properties(self, pom):\n section = pom.findall('mvn:properties', Util.mvn_pom_ns)\n props = {}\n if (len(section)==0):\n return props\n \n for elem in section[0].findall('*'):\n k = re.sub('{.*?}', '', elem.tag)\n k = '${%s}' % k\n props[k] = elem.text\n return props\n\n def get_dependencies(self, pom):\n section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)\n deps_map = {}\n if (len(section)==0):\n return deps_map\n \n for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns): \n obj = MavenCoords(dep_section, self.properties)\n deps_map[obj.key] = obj \n return deps_map\n\n @staticmethod\n def get_pom_file(pomfile):\n if pomfile.find(\"http://\") >=0 or pomfile.find(\"https://\") >=0: \n opener = urllib.request.build_opener() \n pom = ET.parse( opener.open(pomfile) ).getroot() \n else:\n pom = ET.parse(pomfile).getroot() \n return pom\n\n def logx(self, level):\n print() \n print('---------Maven Project---------')\n #print('key: %s * Group: %s * Id: %s * Ver: %s' % (self.coord.key, self.coord.groupid, self.coord.artifactid, self.coord.version))\n print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))\n print() \n if level ==0:\n return \n \n print(' dependencies') \n for k, v in self.dependencies.items():\n print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))\n \n print() \n print(' properties: ', self.properties)\n \n print (' consumers')\n for proj in self.consumers:\n print(' ', proj.coord.key)\n \nclass DAGerror(Exception):\n def __init__(self, arg):\n self.arg = arg\n\n#======================================================================= \n# \nclass MavenProjectGraph(object):\n \n def __init__(self, pom_url_list):\n self.pom_url_list = pom_url_list\n self.proj_list = []\n self.proj_map = {}\n #self.validation = {}\n \n def generate_pom_list(self):\n for pom_url in self.pom_url_list:\n MavenProject(pom_url, self.proj_map)\n #self.proj_list.append(proj)\n #self.proj_map[proj.coord.key] = proj\n \n self.proj_list = list(self.proj_map.values())\n \n for proj in self.proj_list:\n proj.logx(1) #$$\n print()\n \n def set_options(self):\n pass\n \n \n # PURPOSE: sort the list in DAG dependency order and capture each project consumers\n #\n #\n def resolve_graph(self):\n self.resolve_dependencies()\n self.resolve_consumers()\n \n \n # PURPOSE: reorder the project list such that each projects dependencies appear before that project\n #\n # NOTE #1: iterate thru the list looking fwd in the list for each project's dependencies\n # for each dependency found, move it behind that project\n #\n # NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project\n #\n # NOTE #3: a history of each dependency relocation is maintained for each project\n # a circular reference will be detected if that \n # \n def resolve_dependencies(self):\n try:\n while True:\n for p in self.proj_list:\n print(p.name)\n\n i = 0 \n #dependency_found = False \n while i < len(self.proj_list):\n dependency_found = False \n proj_base = self.proj_list[i]\n \n print('loop i={}, base={}'.format(i, proj_base.name))\n \n j = i + 1\n while j < len(self.proj_list):\n print(' loop j {}'.format(j))\n\n proj_scan = self.proj_list[j]\n \n # a forward project dependency is found for the base project, move it behind the base project\n if proj_scan.coord.key in proj_base.dependencies:\n \n # dejavu - a repeated reorder indicates circular dependency\n if proj_scan.coord.key in proj_base.history:\n raise DAGerror(\"Error: base project - {} - encountered duplicate reorder for dependency - {} -\".format\n ( proj_base.name, proj_scan.name))\n \n # remove the fwd item first to avoid order issues \n del self.proj_list[j] #self.proj_list.remove(j)\n \n # insert behind the base project\n self.proj_list.insert(i, proj_scan)\n \n print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i)) \n \n for p in self.proj_list:\n print(p.name)\n \n proj_base.history.append(proj_scan.coord.key) \n dependency_found = True\n i = i -1\n break\n \n j =j+1 # while j\n \n i=i+1 # while i \n \n # repeat outer loop until nothing is reordered \n if not dependency_found:\n break\n else:\n i = 0 \n \n except DAGerror as e:\n print(e)\n \n # PURPOSE: for each project in the list, discover the set of consuming projects\n #\n # NOTE #1: call this method AFTER the dependency graph has been properly resolved\n # consuming projects will be forward in the list\n #\n def resolve_consumers(self):\n for i in range(len(self.proj_list)):\n proj_base = self.proj_list[i]\n j = i\n while j < len(self.proj_list)-1:\n j = j+1\n proj_scan = self.proj_list[j]\n if (proj_base.coord.key in proj_scan.dependencies):\n proj_base.consumers.append(proj_scan)\n \n \n def list_projects(self): \n for proj in self.proj_list:\n proj.logx(1) \n \n \n#==========================================================================\ndef main():\n pom_files = ['D:\\\\devspaces\\\\wks4\\\\py1\\\\snipits2.xml', \n 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']\n \n pom_files = ['D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-A.xml', \n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-B.xml',\n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-C.xml',\n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-D.xml',\n ]\n \n pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml', \n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',\n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',\n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',\n ]\n \n # C:\\Users\\Larry\\Dropbox\\gitcode\\gh\\maven_proj_graph\n \n s = ['dir', '*']\n s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']\n \n s2 = ['C:\\\\apps\\\\maven352\\\\bin\\\\mvn', 'help:effective-pom']\n \n #Util.run_process(['cd', '..'], 'C:\\\\apps\\\\maven352\\\\bin\\\\mvn help:effective-pom')\n \n #Util.run_process('C:\\\\apps\\\\maven352\\\\bin\\\\mvn help:effective-pom', '')\n \n #Util.test_map_update(None)\n \n #return()\n \n graph = MavenProjectGraph(pom_files)\n \n graph.generate_pom_list()\n \n graph.resolve_graph()\n \n graph.list_projects()\n\n\n#==========================================================================\n# see this article for opening remote xml files\n# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request\n \ndef main2(): \n cwd = os.getcwd()\n cwd = 'D:\\\\devspaces\\\\wks4\\\\py1\\\\'\n pom_file = cwd + 'snipits2.xml'\n \n pom_file = 'D:\\\\devspaces\\\\wks4\\\\py1\\\\snipits2.xml'\n pom = ET.parse(pom_file).getroot() \n \n # https://github.com/LeonardoZ/java-concurrency-patterns.git\n \n # this is the correct patttern for reading single files from github\n # https://raw.githubusercontent.com/user/repository/branch/filename\n \n # this is the web page containing the file \n # 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'\n \n pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'\n \n opener = urllib.request.build_opener()\n \n f = opener.open(pom_file_url)\n \n \n # ng, file=urllib.urlopen(file=urllib.urlopen())\n \n #parser = ET.HTMLParser()\n\n #with urlopen('https://pypi.python.org/simple') as f:\n #tree = ET.parse(f, parser) \n\n #pom_file = urllib.request.urlopen(pom_file)\n \n pom = ET.parse(opener.open(pom_file_url)).getroot() \n\n project = MavenProject(pom)\n project.logx()\n\nif __name__ == '__main__':\n main()\n\n\n#main()\n\n'''\n=====================================================================\nnotes:\n alternatives - use maven to get equiv pom \n > mvn help:effective-pom\n\nhttps://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output\n\n\n'''\n\n","sub_path":"maven_proj_graph/pkg1/mvnsortmod1.py","file_name":"mvnsortmod1.py","file_ext":"py","file_size_in_byte":16760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"356432877","text":"#!/usr/bin/python3\n\"\"\"RESTful API for Amenities object \"\"\"\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views\nfrom models.base_model import BaseModel\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models import storage\n\n\n@app_views.route('/amenities', methods=['GET'],\n strict_slashes=False)\ndef get_amenities():\n \"\"\"Retrieves all Amenity objects \"\"\"\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef get_amenity(amenity_id):\n \"\"\" Retrieves a Amenity object \"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_amenity(amenity_id):\n \"\"\" Deletes a Amenity object \"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n empty_dict = {}\n amenity.delete()\n storage.save()\n return jsonify(empty_dict), 200\n\n\n@app_views.route('/amenities', methods=['POST'],\n strict_slashes=False)\ndef create_amenity():\n \"\"\" Creates a City object \"\"\"\n my_dict = request.get_json()\n if my_dict is None:\n abort(400, \"Not a JSON\")\n elif \"name\" not in my_dict:\n abort(400, \"Missing name\")\n new_amenity = Amenity(**my_dict)\n new_amenity.save()\n return jsonify(new_amenity.to_dict()), 201\n\n\n@app_views.route('/amenities/',\n methods=['PUT'],\n strict_slashes=False)\ndef update_amenity(amenity_id):\n \"\"\"Update an Amenity object\"\"\"\n if amenity_id:\n my_dict = request.get_json()\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if my_dict is None:\n abort(400, \"Not a JSON\")\n for key, value in my_dict.items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(amenity, key, value)\n storage.save()\n return jsonify(amenity.to_dict()), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"552361307","text":"t = int(input())\n\n\ndef bus(k, n, m):\n cnt = 0\n now = 0\n charge = [False] * (int(n) + 1)\n tmp = list(map(int, input().split()))\n for s in tmp:\n charge[s] = True\n while True:\n if now+k <= n and charge[now+k] == True:\n now = now+k\n cnt += 1\n elif now+k == n:\n break;\n else:\n for j in range(1, k):\n if now+k-j <= n and charge[now+k-j] == True:\n now = now+k-j\n cnt += 1\n break;\n elif now+k-j == n:\n return cnt\n elif j == k-1 and charge[now+k-j] != True:\n return 0\n\n\n\n return cnt\n\nfor i in range(t):\n k, n, m = map(int, input().split())\n\n ans = bus(k, n, m)\n print('#'+str(i+1)+' '+ str(ans))\n","sub_path":"05/0510/SWEA_4831.py","file_name":"SWEA_4831.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"560392010","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport sqlite3\nfrom textblob import TextBlob\nfrom unidecode import unidecode\nimport time\nimport re\nfrom nltk.tokenize import word_tokenize, RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport string\nfrom geotext import GeoText\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n# consumer key, consumer secret, access token, access secret.\nconsumerKey = \"\"\nconsumerSecret = \"\"\naccessToken = \"\"\naccessTokenSecret = \"\"\n\nconn = sqlite3.connect('db.twitterdata')\nc = conn.cursor()\n\n\nclass Listener(StreamListener):\n def processTweet(self, tweet):\n tweet = re.sub(r'\\&\\w*;', '', tweet)\n tweet = re.sub('@[^\\s]+', '', tweet)\n tweet = re.sub(r'\\$\\w*', '', tweet)\n tweet = tweet.lower()\n tweet = re.sub(r'https?:\\/\\/.*\\/\\w*', '', tweet)\n tweet = re.sub(r'#\\w*', '', tweet)\n # tweet = re.sub(r'[' + punctuation.replace('@', '') + ']+', ' ', tweet)\n tweet = re.sub(r't\\b\\w\\b', '', tweet)\n tweet = re.sub(r'\\s\\s+', ' ', tweet)\n tweet = re.sub(r\"[-()\\\"#/@;:<>{}[`+=~*|.!?,.....']\", \"\", tweet)\n tweet = re.sub(r\"rt\", '', tweet)\n tweet = re.sub(r\"1234567890\", \"\", tweet)\n tweet = tweet.lstrip(' ')\n tweet = ''.join(c for c in tweet if c <= '\\uFFFF')\n tweet = ''.join(c for c in tweet if not c.isdigit())\n return tweet\n\n def wordList(self, tweet):\n stop_words = set(stopwords.words('english') + list(string.punctuation))\n word = word_tokenize(tweet.replace('\\n', ' '))\n # freq_words = FreqDist(word)\n filtered_word = []\n for w in word:\n if len(w) > 2:\n if w not in stop_words:\n filtered_word.append(w)\n return filtered_word\n\n def location(self, userlocation):\n country = None\n city = None\n state = None\n cur = conn.cursor()\n cur.execute('SELECT * FROM worldcities') # You need to have table worldcities information.\n world = cur.fetchall()\n places = GeoText(str(userlocation))\n countries = places.countries\n cities = places.cities\n if len(cities) == 1 and len(countries) == 1:\n city = cities[0]\n country = countries[0]\n for w in world:\n if len(w[0]) != 0 and len(w[2]) != 0 and str(city).find(w[0]) != -1 and str(country).find(w[2]) != -1:\n state = w[5]\n else:\n if len(countries) == 1:\n country = countries[0]\n if len(cities) == 1:\n city = cities[0]\n population = []\n country_array = []\n state_array = []\n for w in world:\n if len(w[0]) != 0 and str(cities).find(w[0]) != -1:\n if w[6] is not None:\n population.append(int(w[6]))\n else:\n population.append(0)\n country_array.append(w[2])\n state_array.append(w[5])\n elif w[5] is not None and str(cities).find(w[5]) != -1:\n if w[6] is not None:\n population.append(int(w[6]))\n else:\n population.append(0)\n country_array.append(w[2])\n state_array.append(w[5])\n if len(population) != 0:\n p_index = population.index(max(population))\n country = country_array[p_index]\n state = state_array[p_index]\n else:\n if len(country_array) != 0:\n country = country_array[0]\n if len(state_array) != 0:\n state = state_array[0]\n else:\n splited = str(userlocation).lower().strip(',').strip('.').split()\n splited_left = str(userlocation).split(',')[0]\n if 'usa' in splited:\n country = 'United States'\n if 'uk' in splited:\n country = 'United Kingdom'\n if 'england' in splited:\n country = 'United Kingdom'\n state = 'England'\n if userlocation != splited_left:\n splited_right = str(userlocation).split(',')[1].strip()\n cur.execute('SELECT state FROM usa_states WHERE code LIKE ?', (splited_left,))\n code_right = cur.fetchone()\n if code_right is not None:\n state = code_right[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE code LIKE ?', (splited_right,))\n code_left = cur.fetchone()\n if code_left is not None:\n state = code_left[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE state LIKE ?', (splited_left,))\n state_right = cur.fetchone()\n if state_right is not None:\n state = state_right[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE state LIKE ?', (splited_right,))\n state_left = cur.fetchone()\n if state_left is not None:\n state = state_left[0]\n country = 'United States'\n\n returnedlist = [country, state, city]\n print(userlocation)\n print(countries)\n print(cities)\n print(returnedlist)\n print(\"------------------------------\")\n return returnedlist\n\n def on_data(self, data):\n try:\n tweet_frame = json.loads(data)\n user_id = tweet_frame['user']['id']\n tweet = unidecode(tweet_frame['text'])\n user_name = tweet_frame[\"user\"][\"name\"]\n user_screen_name = tweet_frame[\"user\"][\"screen_name\"]\n user_followers_count = tweet_frame['user']['followers_count']\n user_verified = tweet_frame['user']['verified']\n user_location = tweet_frame['user']['location']\n created_at = tweet_frame['created_at']\n id_str = tweet_frame['id_str']\n verified_int = 0\n if user_verified:\n verified_int = 1\n else:\n verified_int = 0\n\n clean_tweet = self.processTweet(tweet)\n\n if user_name is not None:\n if user_followers_count is not None:\n if user_location is not None:\n if user_followers_count > 20:\n vader_analyzer = SentimentIntensityAnalyzer()\n vader_polarity = vader_analyzer.polarity_scores(clean_tweet)\n vader_compound = vader_polarity['compound']\n\n textblob_analyzer = TextBlob(clean_tweet)\n textblob_polarity = textblob_analyzer.sentiment.polarity\n textblob_subjective = textblob_analyzer.subjectivity\n\n insertedlist = self.location(user_location)\n\n country = insertedlist[0]\n state = insertedlist[1]\n city = insertedlist[2]\n word_list = ','.join(self.wordList(clean_tweet))\n\n c.execute(\n \"INSERT INTO homepage_tweets (user_id, user_name, user_screen_name, user_follower_count, created, verified, location, country, state, city, tweet, clean_tweet, word_list, polarity, subjectivity, vader_compound,id_str) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?)\",\n (user_id, user_name, user_screen_name, user_followers_count, created_at, verified_int,\n user_location, country, state, city, tweet, clean_tweet, word_list, textblob_polarity,\n textblob_subjective, vader_compound, id_str\n ))\n\n conn.commit()\n print(user_followers_count)\n except KeyError as e:\n print(str(e))\n return (True)\n\n def on_error(self, status):\n print(status)\n\ndef stream():\n while True:\n try:\n auth = OAuthHandler(consumerKey, consumerSecret)\n auth.set_access_token(accessToken, accessTokenSecret)\n twitterStream = Stream(auth, Listener())\n twitterStream.filter(track=[\"a\", \"e\", \"i\", \"o\", \"u\"], languages=['en'])\n except Exception as e:\n print(str(e))\n time.sleep(1)\nif __name__ == \"__main__\":\n stream()\n","sub_path":"streamingtweets.py","file_name":"streamingtweets.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"584383917","text":"from wordcloud import WordCloud \r\nimport matplotlib.pyplot as plt \r\nimport pandas as pd \r\nfrom tweet_feed import Feed\r\nfrom basic_cleaner import BasicCleaner\r\nimport data_object\r\n\r\n\r\n\r\n\r\nfile_path = \"../DataCollection/191020-20_39_57--191020-20_40_07\" \r\nsentiment_range = [float(-1), float(-0.5)]\r\n\r\n\r\n\r\ndef get_long_tweet_string():\r\n feed = Feed()\r\n queue_stream = feed.disk_get_tweet_queue(file_path)\r\n data_objects = [data_object.get_dataobj_converted(tweet) for tweet in queue_stream]\r\n for obj in data_objects: BasicCleaner.autocleaner(obj,sentiment_range, True)\r\n long_string = [obj.text*(obj.valid_sentiment_range) for obj in data_objects]\r\n return \" \".join(long_string)\r\n\r\n\r\n\r\nWC = WordCloud(width = 800, height = 800, \r\n background_color ='white', \r\n min_font_size = 10).generate(get_long_tweet_string()) \r\n \r\n# plot WC \r\nplt.figure(figsize = (8, 8), facecolor = None) \r\nplt.imshow(WC) \r\nplt.axis(\"off\") \r\nplt.tight_layout(pad = 0) \r\n\r\n\r\nplt.show() ","sub_path":"generate_wordcloud.py","file_name":"generate_wordcloud.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"599906893","text":"from django.conf.urls import url\nfrom rest_framework.routers import DefaultRouter\nfrom views import (NotificationViewSet,\n ReportRetrieveAPIView,\n ReportViewSet)\n\n\nreport_router = DefaultRouter()\nreport_router.register(r'report', ReportViewSet, base_name='report')\n\nreport_view_url = [\n url(r'^report/(?P[0-9a-f-]+)/$', ReportRetrieveAPIView.as_view(), name='report-view')\n]\n\nnotification_router = DefaultRouter()\nnotification_router.register(r'notification', NotificationViewSet, base_name='notification')\n","sub_path":"reporting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"202629834","text":"\"\"\"Quicksort algorithm from Grokking Algorithms: An illustrated guide for \nprogrammers and other curiouse people.\n\"\"\"\n\n\ndef quicksort(items):\n \"\"\"Quicksort is a sorting algorithm. Faster than selction sort and is \n frequenly used. It splits a list on a pivot point and recursively \n sorts each resulting list.\n \"\"\"\n if len(items) < 2 or items[0] == items[1]:\n return items\n else:\n pivot = items[len(items) // 2]\n less = [i for i in items if i < pivot]\n greater = [i for i in items if i > pivot]\n equal = [i for i in items if i == pivot]\n return quicksort(less) + quicksort(equal) + quicksort(greater)\n\nif __name__ == \"__main__\":\n lst = [ 44, 2, 17, 3, 9, 18, 6, 12]\n print(quicksort(lst))\n","sub_path":"skill_builders/books/ga_quicksort.py","file_name":"ga_quicksort.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"466324563","text":"import tensorflow as tf\n\nx = tf.placeholder(dtype=tf.float32)\ny = x * 2\n\ninput_data = [1, 2]\n\nsess = tf.Session()\nresult = sess.run(y, feed_dict={x: input_data})\nprint(result)\nsess.close()","sub_path":"week01/placeholder1.py","file_name":"placeholder1.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"67368023","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport plot_setting\n\nfolder_diff =\"/home/fschubert/Master/sim_data/complete_diff_long/\"\nfolder_non_diff =\"/home/fschubert/Master/sim_data/complete_non_diff_long/\"\nfolder_diff_no_top =\"/home/fschubert/Master/sim_data/diffusive_no_distance_topology/\"\nfolder_non_diff_no_top =\"/home/fschubert/Master/sim_data/non_diffusive_no_distance_topology/\"\n\nW_diff=np.load(folder_diff+\"W_eTOe_record.npy\")\nW_non_diff=np.load(folder_non_diff+\"W_eTOe_record.npy\")\nW_diff_no_top=np.load(folder_diff_no_top+\"W_eTOe_record.npy\")\nW_non_diff_no_top=np.load(folder_non_diff_no_top+\"W_eTOe_record.npy\")\n\n\nW_diff=np.transpose(W_diff,(0,2,1))\nW_non_diff=np.transpose(W_non_diff,(0,2,1))\nW_diff_no_top=np.transpose(W_diff_no_top,(0,2,1))\nW_non_diff_no_top=np.transpose(W_non_diff_no_top,(0,2,1))\n\nt_ind=999\n\nout_sum_diff = W_diff[t_ind,:,:].sum(axis=0)*1000\nout_sum_non_diff = W_non_diff[t_ind,:,:].sum(axis=0)*1000\nout_sum_diff_no_top = W_diff_no_top[t_ind,:,:].sum(axis=0)*1000\nout_sum_non_diff_no_top = W_non_diff_no_top[t_ind,:,:].sum(axis=0)*1000\n\nsort_out_sum_diff = np.sort(out_sum_diff)\nsort_out_sum_non_diff = np.sort(out_sum_non_diff)\nsort_out_sum_diff_no_top = np.sort(out_sum_diff_no_top)\nsort_out_sum_non_diff_no_top = np.sort(out_sum_non_diff_no_top)\n\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_non_diff_no_top,'.',label=\"non-diffusive, no spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_non_diff,'.',label=\"non-diffusive, spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_diff_no_top,'.',label=\"diffusive, no spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_diff,'.',label=\"diffusive, spatial topology\")\n\nplt.xlabel(\"Quantile of excitatory neurons\")\nplt.ylabel(\"Sum of outgoing synapse weights\") \nplt.legend()\n\nplt.ylim([0.,200])\n\n\nplt.savefig(\"/home/fschubert/Master/plots/out_weight_quantile.png\")\n\n#plt.show()\n\n#import pdb\n#pdb.set_trace()\n","sub_path":"plotting_scripts/out_weight_quantile.py","file_name":"out_weight_quantile.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"217232748","text":"#!/usr/bin/env python3\n\nimport configparser\nimport ipaddress\nimport logging\nimport logging.handlers\nimport os\nimport requests\nimport sys\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom godaddypy import Client, Account\n\nPREVIOUS_IP_FILE = 'previous-ip.txt'\n\n\ndef raise_if_invalid_ip(ip):\n ipaddress.ip_address(ip)\n\n\ndef get_public_ip():\n r = requests.get('https://api.ipify.org')\n r.raise_for_status()\n \n ip = r.text\n raise_if_invalid_ip(ip)\n\n return ip\n\n\ndef get_previous_public_ip():\n try:\n with open(PREVIOUS_IP_FILE, 'r') as f:\n ip = f.read()\n except FileNotFoundError:\n return None\n \n # Sanity check\n raise_if_invalid_ip(ip)\n\n return ip\n\n\ndef store_ip_as_previous_public_ip(ip):\n with open(PREVIOUS_IP_FILE, 'w') as f:\n f.write(ip)\n\n\ndef get_public_ip_if_changed():\n current_public_ip = get_public_ip()\n previous_public_ip = get_previous_public_ip()\n\n if current_public_ip != previous_public_ip:\n return current_public_ip\n else:\n return None\n\n\ndef get_godaddy_client():\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n\n account = Account(api_key=config.get('godaddy', 'api_key'),\n api_secret=config.get('godaddy', 'api_secret'))\n if not account:\n raise RuntimeError('Could not log in into GoDaddy')\n\n client = Client(account)\n\n return client\n\n\ndef get_schedule_timer():\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n timer = config.get('godaddy', 'timer')\n unit = config.get('godaddy', 'unit')\n\n if not timer or not unit:\n print('Please specify a timer and unit (seconds, minutes, hours, days) in godaddy-dyndns.conf')\n else:\n return timer, unit\n\n\ndef init_logging():\n l = logging.getLogger()\n rotater = logging.handlers.RotatingFileHandler('config/godaddy-dyndns.log', maxBytes=10000000, backupCount=2)\n l.addHandler(rotater)\n l.setLevel(logging.INFO)\n rotater.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\n\n\ndef updatedns():\n ip = get_public_ip_if_changed()\n \n # If the IP hasn't changed then there's nothing to do.\n if ip is None:\n logging.info(\"IP has not changed\")\n return None\n\n # Open config file to read\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n domains = [x.strip() for x in (config.get('godaddy', 'domains').split(','))]\n records = [x.strip() for x in (config.get('godaddy', 'records').split(','))]\n \n #Initialize godaddy client\n client = get_godaddy_client()\n\n logging.info(\"Changing listed domains to %s\" % ip)\n \n for domain in client.get_domains():\n if domain in domains: #Check to make sure the domain is requested\n for dns_records in client.get_records(domain, record_type='A'):\n if dns_records[\"name\"] in records:\n full_domain = \"%s.%s\" % (dns_records[\"name\"], domain)\n \n if ip == dns_records[\"data\"]:\n # There's a race here (if there are concurrent writers),\n # but there's not much we can do with the current API.\n logging.info(\"%s unchanged\" % full_domain)\n else:\n if not client.update_record_ip(ip, domain, dns_records[\"name\"], 'A'):\n raise RuntimeError('DNS update failed for %s' % full_domain)\n \n logging.info(\"%s changed from %s to %s\" % (full_domain, dns_records[\"data\"], ip))\n \n store_ip_as_previous_public_ip(ip)\n\n\nif __name__ == '__main__':\n timer, unit = get_schedule_timer()\n \n init_logging()\n logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)\n\n #Initialize schedule\n sched = BlockingScheduler()\n if unit == 'seconds':\n sched.add_job(updatedns, 'interval', seconds=int(timer))\n elif unit == 'minutes':\n sched.add_job(updatedns, 'interval', minutes=int(timer))\n elif unit == 'hours':\n sched.add_job(updatedns, 'interval', hours=int(timer))\n elif unit == 'days':\n sched.add_job(updatedns, 'interval', days=int(timer))\n else:\n print('Unit of measurement needs to be either: seconds, minutes, hours or days within godaddy-dyndns.conf')\n\n\n try:\n sched.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n except Exception as e:\n logging.error('Exception: %s' % e)\n logging.shutdown()\n sys.exit(1)","sub_path":"godaddy-dyndns.py","file_name":"godaddy-dyndns.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"23945638","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom collections import defaultdict\n\nimport os\nimport yaml\n\nfrom .state import State\nfrom .state_delegate import StateDelegate\n\n\ndef dict_constructor(loader, node):\n return defaultdict(str, loader.construct_pairs(node))\n\n\nclass StateMachine(StateDelegate):\n \"\"\"\n Describes the states available, as well as identifying the current\n state.\n\n StateMachine acts as a delegate for State objects, so that the current\n state can be updated and the current state machine can be persisted after\n each update.\n \"\"\"\n\n def __init__(self, path):\n self._machine = None\n self._current_state = None\n self._current_state_name = ''\n self._path = path\n self._states = None\n\n def did_enter_state(self, old_state, new_state_name):\n \"\"\"StateDelegate method\"\"\"\n old_state_name = old_state.name\n self.states[old_state_name] = old_state\n self._current_state_name = new_state_name\n return True\n\n def save(self):\n \"\"\"StateDelegate method\"\"\"\n states_as_dict = [state.to_dict() for state in list(self.states.values())]\n data = {\n 'current_state': self._current_state_name,\n 'states': states_as_dict,\n }\n with open(self.path, 'wt') as f:\n yaml.dump(data, f, default_flow_style=False)\n\n @property\n def machine(self):\n if self._machine is None:\n self._machine = self._read_machine()\n\n return self._machine\n\n @property\n def current_state(self):\n return self.states[self._current_state_name]\n\n @property\n def path(self):\n return self._path\n\n @property\n def states(self):\n if self._states is None:\n self._states = {}\n self._current_state_name = self.machine['current_state']\n self._define(self.machine['states'])\n\n return self._states\n\n def _define(self, definitions):\n for _, definition in enumerate(definitions):\n state = State(definition)\n state.delegate = self\n state_name = state.name\n self._states[state_name] = state\n\n def _read_machine(self):\n \"\"\"\n Reads the state machine from a YAML file.\n\n Returns:\n - State machine (dict) if read from file\n - State machine (dict) that is only an end state, if path or file\n is missing\n\n Raises:\n - RuntimeError if YAML if not properly formatted\n \"\"\"\n\n if self.path is None:\n return {\n 'name': 'no_state',\n }\n\n if os.path.exists(self.path):\n with open(self.path, 'rt') as data:\n try:\n machine = yaml.load(data)\n except yaml.YAMLError:\n raise RuntimeError(f'{self.path} is not a YAML file')\n\n return machine\n else:\n return {\n 'name': 'no_state',\n }\n","sub_path":"state_service/state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"576726295","text":"#!/usr/bin/env python\n# license removed for brevity\nimport time\nimport rospy\nimport csv\nimport os\nfrom std_msgs.msg import String\nfrom ev3_ros.msg import MotorCommands\n\n\ndef readInCsv():\n global robotNames\n robotNames = {}\n global filePath\n filePath = raw_input(\"Enter csv URI: \")\n while \".csv\" not in filePath:\n print(filePath)\n filePath = raw_input(\"not a csv file, try again: \")\n\n with open(os.getcwd() + \"/../data/\" + filePath) as csvFile:\n reader = csv.DictReader(csvFile)\n for row in reader:\n if row[\"name\"] not in robotNames:\n robotNames[row[\"name\"]] = {\"pub\": rospy.Publisher(row[\"name\"], MotorCommands, queue_size=10),\n \"commandDict\": {round(float(row[\"startTime\"]), 2):\n {\"start_time\": float(row[\"startTime\"]),\n \"end_time\": float(row[\"endTime\"]),\n \"right_speed\": float(row[\"rightMotorSpeed\"]),\n \"left_speed\": float(row[\"leftMotorSpeed\"])\n }\n }\n }\n print(row[\"name\"], \" has been added at: \", round(float(row[\"startTime\"]), 2), \" with motor speeds: \", float(row[\"rightMotorSpeed\"]), float(row[\"leftMotorSpeed\"]))\n else:\n robotNames[row[\"name\"]][\"commandDict\"][round(float(row[\"startTime\"]), 2)] = {\n \"start_time\": float(row[\"startTime\"]),\n \"end_time\": float(row[\"endTime\"]),\n \"right_speed\": float(row[\"rightMotorSpeed\"]),\n \"left_speed\": float(row[\"leftMotorSpeed\"])\n }\n print(row[\"name\"], \" new motor speeds at: \", round(float(row[\"startTime\"]), 2), \" with motor speeds: \", float(row[\"rightMotorSpeed\"]), float(row[\"leftMotorSpeed\"]))\n\n return filePath\n\n\ndef mc_pub_csv():\n pub = rospy.Publisher('default', MotorCommands, queue_size=10)\n rospy.init_node('mc_pub_csv', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n\n #reader = csv.DictReader(csvFile)\n\n for x in range(0, 10):\n for robot in robotNames:\n pub = robotNames[robot][\"pub\"]\n mc = MotorCommands()\n mc.right_speed = 0.0\n mc.left_speed = 0.0\n pub.publish(mc)\n rospy.loginfo(robot)\n rospy.loginfo(mc)\n rate.sleep()\n\n start = \"\"\n first = True\n start_time = time.clock()\n\n while not rospy.is_shutdown():\n while \"y\" not in start:\n if \"n\" in start:\n exit(0)\n start = raw_input(\"start the dance: (y/n)\")\n if \"y\" in start and not first:\n start_time = time.clock()\n first = False\n\n time_diff = time.clock() - start_time\n\n for robot in robotNames:\n\n #print(\" current_time: \", round(current_time,2),\n # \"robot: \", robot,\n # \" startTime: \", robotNames[robot][\"commandDict\"])\n if (round(float(time_diff), 4)*100) in robotNames[robot][\"commandDict\"]:\n #print(\" startTime: \", robotNames[robot][\"commandDict\"],\n # \" pub: \", robotNames[robot][\"pub\"],\n # \" mc.right_speed: \", robotNames[robot][\"commandDict\"][round(current_time, 2)][\"right_speed\"],\n # \" mc.left_speed: \", robotNames[robot][\"commandDict\"][round(current_time, 2)][\"left_speed\"]\n # )\n pub = robotNames[robot][\"pub\"]\n right_speed = robotNames[robot][\"commandDict\"][(round(float(time_diff), 4)*100)][\"right_speed\"]\n left_speed = robotNames[robot][\"commandDict\"][(round(float(time_diff), 4)*100)][\"left_speed\"]\n #right_speed = max(-1, min(right_speed, 1))\n #left_speed = max(-1, min(left_speed, 1))\n while right_speed < 0.1:\n right_speed = right_speed * 10\n while left_speed < 0.1:\n left_speed = left_speed * 10\n mc.right_speed = right_speed\n mc.left_speed = left_speed\n print(\"time diff: \", (round(float(time_diff), 4)*100))\n print(\"New speed at time: \", time.clock(), \" for Robot: \", robot, \" with pub: \", pub, \" rs: \", mc.right_speed, \" ls: \", mc.left_speed)\n\n\n rospy.loginfo(mc)\n pub.publish(mc)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n readInCsv()\n except Exception:\n print(Exception)\n pass\n\n try:\n mc_pub_csv()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/mc_pub_csv_speed_fix.py","file_name":"mc_pub_csv_speed_fix.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}
+{"seq_id":"150699492","text":"# *- encoding=utf-8 -*-\n'''\n用于转换markdown 为 html\n'''\nimport os\nimport markdown\nfrom bs4 import BeautifulSoup\nclass MarkdownToHtml(object):\n \"\"\"\n 一个转换类\n \"\"\"\n def __init__(self, cssFilePath=None):\n if cssFilePath != None:\n # 读取外部css文件的内容\n self.get_style(cssFilePath)\n self.head_tag = ''\n\n def get_style(self, css):\n '''\n 读取css文件\n '''\n with open(css, 'r') as file:\n string = file.read()\n new_css = '