diff --git "a/4889.jsonl" "b/4889.jsonl"
new file mode 100644--- /dev/null
+++ "b/4889.jsonl"
@@ -0,0 +1,744 @@
+{"seq_id":"385355481","text":"\"\"\"\n * Author - danish\n * Date - 16/11/20\n * Time - 1:31 AM\n * Title - Find Power Of 2 between 0 to 31\n\"\"\"\nclass Power:\n # Definning constructor method\n def __init__(self,number):\n self.number = number\n\n # Method to check number is valid or not\n def checkValidNumber(self):\n if 0 <= self.number < 31:\n print(f\"Power Of Two Is: {Power.powerOf2(self.number)}\")\n else:\n print(\"Not a valid power.\")\n\n # Method to calculate power of 2\n def powerOf2(number):\n if number == 0:\n return 1\n else:\n return Power.powerOf2(number-1)*2\n\nif __name__ == \"__main__\":\n number = int(input(\"Enter a Number: \"))\n powerObject = Power(number)\n powerObject.checkValidNumber()\n","sub_path":"PowerOfTwo.py","file_name":"PowerOfTwo.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"210455116","text":"#test.py\nimport data_loader as dl\nimport os\nimport models\n#import modeling\nimport numpy as np\nimport time\n\n\n\nsvd=models.SoRec(num_epochs=20)\nfit_time=[]\nrmse_record=[]\ndata1=dl.loader_100k_1()\ndata1=dl.dataset_to_matrix(data1)\ntest1=dl.loader_100k_t1()\ndata2=dl.loader_100k_2()\ndata2=dl.dataset_to_matrix(data2)\ntest2=dl.loader_100k_t2()\ndata3=dl.loader_100k_3()\ndata3=dl.dataset_to_matrix(data3)\ntest3=dl.loader_100k_t3()\ndata4=dl.loader_100k_4()\ndata4=dl.dataset_to_matrix(data4)\ntest4=dl.loader_100k_t4()\ndata5=dl.loader_100k_5()\ndata5=dl.dataset_to_matrix(data5)\ntest5=dl.loader_100k_t5()\n\n\nstart_time=time.time()\nsvd.fit(data1)\nend_time=time.time()\nfit_time.append(str(end_time-start_time))\nsvd.save_parameters(\"svd1.txt\")\nestimate=svd.predict(test1)\nrmse=accuracy.RMSE(estimate)\nrmse_record.append(rmse)\nstart_time=time.time()\nsvd.fit(data2)\nend_time=time.time()\nfit_time.append(str(end_time-start_time))\nsvd.save_parameters(\"svd2.txt\")\nestimate=svd.predict(test2)\nrmse=accuracy.RMSE(estimate)\nrmse_record.append(rmse)\nstart_time=time.time()\nsvd.fit(data3)\nend_time=time.time()\nfit_time.append(str(end_time-start_time))\nsvd.save_parameters(\"svd3.txt\")\nestimate=svd.predict(test3)\nrmse=accuracy.RMSE(estimate)\nrmse_record.append(rmse)\nstart_time=time.time()\nsvd.fit(data4)\nend_time=time.time()\nfit_time.append(str(end_time-start_time))\nsvd.save_parameters(\"svd4.txt\")\nestimate=svd.predict(test4)\nrmse=accuracy.RMSE(estimate)\nrmse_record.append(rmse)\nstart_time=time.time()\nsvd.fit(data5)\nend_time=time.time()\nfit_time.append(str(end_time-start_time))\nsvd.save_parameters(\"svd5.txt\")\nestimate=svd.predict(test5)\nrmse=accuracy.RMSE(estimate)\nrmse_record.append(rmse)\n\nfor i in range(5):\n\tprint(i+1,\": fitting_time: \", fit_time[i],\" RMSE: \", rmse_record[i])\n\n","sub_path":"CF_recommender_system/SoRec/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"505320976","text":"#Main module.\r\n\r\nimport tkinter as tk\r\nimport numpy as np\r\nfrom scipy.sparse.linalg import eigsh\r\nimport scipy.sparse as spa\r\nimage_path = 'Images/'\r\n\r\n#Grid start coordinates, grid spacings and number of grid squares.\r\nx_start = 20\r\ny_start = 20\r\ndx_grid = 65\r\ndy_grid = 65\r\nx_number = 12\r\ny_number = 6\r\n\r\n#Initial spin position\r\nspin_posx_init = 900\r\nspin_posy_init = 150\r\n\r\n#Canvas Size.\r\ncanvas_width = 1000\r\ncanvas_height = 500\r\n\r\n#Spin matrices and identity matrix.\r\nI2 = spa.bsr_matrix(np.eye(2))\r\nSx = spa.bsr_matrix([[0,0.5],[0.5,0]])\r\nSy = spa.bsr_matrix([[0,0.5*1j],[-0.5*1j,0]])\r\nSz = spa.bsr_matrix([[0.5,0],[0,-0.5]])\r\n\r\n#J-value.\r\nJ = 1\r\n\r\n#Spin Image.\r\nspin_type = 'Test.gif'\r\nspin_raised = 'Spin_Raised.gif'\r\nspin_sunken = 'Spin_Sunken.gif'\r\n\r\n\r\n#################Calculation Functions###########################\r\n\r\n\r\n#Creates matrix for a particular set of spins. N: Total number of spins.\r\ndef Create_Int(spin1,spin2,N):\r\n Int_Matx = 1\r\n Int_Maty = 1\r\n Int_Matz = 1\r\n for i in range(spin1):\r\n Int_Matx = spa.kron(Int_Matx,I2)\r\n Int_Maty = spa.kron(Int_Maty,I2)\r\n Int_Matz = spa.kron(Int_Matz,I2)\r\n \r\n Int_Matx = spa.kron(Int_Matx,Sx)\r\n Int_Maty = spa.kron(Int_Maty,Sy)\r\n Int_Matz = spa.kron(Int_Matz,Sz)\r\n \r\n diff = spin2 - spin1\r\n for i in range(diff-1):\r\n Int_Matx = spa.kron(Int_Matx,I2)\r\n Int_Maty = spa.kron(Int_Maty,I2)\r\n Int_Matz = spa.kron(Int_Matz,I2)\r\n \r\n Int_Matx = spa.kron(Int_Matx,Sx)\r\n Int_Maty = spa.kron(Int_Maty,Sy)\r\n Int_Matz = spa.kron(Int_Matz,Sz) \r\n \r\n for i in range(N-1-spin2):\r\n Int_Matx = spa.kron(Int_Matx,I2)\r\n Int_Maty = spa.kron(Int_Maty,I2)\r\n Int_Matz = spa.kron(Int_Matz,I2) \r\n \r\n Int = Int_Matx + Int_Maty + Int_Matz\r\n return Int\r\n\r\n#############Main UI#################\r\n\r\nclass Spin_Object(object):\r\n def __init__(self, spin_type, xpos, ypos):\r\n self.xpos, self.ypos = xpos, ypos\r\n self.spin_type = spin_type\r\n \r\n #Spin ID number.\r\n self.spin_number = main_canvas.spin_count\r\n \r\n #Set spin status (Locked: Due to interaction arrow, need to lock the spin. -- 0 is unlocked.)\r\n self.lock_status = 0\r\n \r\n #Create image.\r\n self.spin_image = tk.PhotoImage(\r\n file = '{}{}'.format(image_path, spin_type))\r\n \r\n self.spin = main_canvas.create_image(\r\n xpos, ypos, image = self.spin_image)\r\n \r\n #On click, create a new spin at the back.\r\n main_canvas.tag_bind(self.spin, '', self.createnew) \r\n\r\n def createnew(self, event):\r\n x_init, y_init = main_canvas.coords(self.spin)\r\n \r\n #If starting position of the spin is at its original position, create a new spin.\r\n if (x_init == self.xpos and \r\n y_init == self.ypos):\r\n \r\n #First add a count to spin number.\r\n main_canvas.spin_count += 1\r\n \r\n #Then create the spin. \r\n main_canvas.spin_list.append(Spin_Object(\r\n self.spin_type, self.xpos, self.ypos)) \r\n \r\n #Assigning the initial click position to an attribute.\r\n self.x = event.x\r\n self.y = event.y\r\n \r\n #Moving the spin.\r\n main_canvas.tag_bind(self.spin, '', \r\n self.startmove)\r\n #End moving the spin, delete the 2nd spin if necessary.\r\n main_canvas.tag_bind(self.spin, '', self.endmove) \r\n\r\n def startmove(self, event):\r\n \r\n print('{0}'.format(self.lock_status))\r\n \r\n if self.lock_status == 0:\r\n #Calculating how much to move the spin.\r\n rel_xpos = event.x - self.x\r\n rel_ypos = event.y - self.y\r\n \r\n #Updating the start motion value.\r\n self.x, self.y = event.x, event.y\r\n \r\n main_canvas.move(self.spin, rel_xpos, rel_ypos)\r\n \r\n else:\r\n self.error_lock_window = tk.Toplevel()\r\n self.error_lock = tk.Label(self.error_lock_window, \r\n text = 'Error!\\nSpin is linked via an interaction.\\nPlease remove interaction arrow first.')\r\n self.error_lock.grid(row = 0, column = 0)\r\n \r\n def endmove(self, event):\r\n final_x, final_y = main_canvas.coords(self.spin)\r\n \r\n #Code in if cycle is to snap the spin to grid.\r\n #If spin is outside of grid lines, return it to the original position\r\n if (final_x > main_canvas.grid_xlines[-1] or \r\n final_y > main_canvas.grid_ylines[-1]):\r\n \r\n x_spot = self.xpos\r\n y_spot = self.ypos\r\n \r\n elif (final_x < main_canvas.grid_xlines[0]):\r\n \r\n #Return it to the first column if spin is outside first column.\r\n x_spot = main_canvas.grid_xlines[0] + dx_grid / 2\r\n \r\n if (final_y < main_canvas.grid_ylines[0]):\r\n \r\n #If spin is at top left corner, but outside grid, move it to 1st square.\r\n y_spot = main_canvas.grid_ylines[0] + dx_grid / 2 \r\n \r\n else:\r\n #Find the square the spin should be placed in.\r\n y_vec = [y for y in main_canvas.grid_ylines if y >= final_y]\r\n y_spot = y_vec[0] - dy_grid / 2\r\n \r\n elif (final_y < main_canvas.grid_ylines[0]):\r\n \r\n y_spot = main_canvas.grid_ylines[0] + dy_grid / 2\r\n \r\n #Find square which the spin should be placed in.\r\n x_vec = [x for x in main_canvas.grid_xlines if x >= final_x]\r\n x_spot = x_vec[0] - dx_grid / 2\r\n \r\n else:\r\n #Find correct box.\r\n x_vec = [x for x in main_canvas.grid_xlines if x >= final_x]\r\n x_spot = x_vec[0] - dx_grid / 2 \r\n y_vec = [y for y in main_canvas.grid_ylines if y >= final_y]\r\n y_spot = y_vec[0] - dy_grid / 2 \r\n\r\n #Find relative amount to move spin by.\r\n rel_xpos = x_spot - final_x\r\n rel_ypos = y_spot - final_y \r\n \r\n #Move spin to new position.\r\n main_canvas.move(self.spin, rel_xpos, rel_ypos)\r\n \r\n #Check the positions of all other spins.\r\n for i in main_canvas.spin_list:\r\n spin_id = i.spin_number\r\n \r\n #Don't compare spin to itself.\r\n if spin_id != self.spin_number:\r\n x_init, y_init = main_canvas.coords(i.spin)\r\n \r\n #If have two spins on the same spot,\r\n if x_init == x_spot and y_init == y_spot:\r\n \r\n #Delete the spin (that is not moved) & change the spin_count.\r\n main_canvas.delete(i.spin)\r\n del main_canvas.spin_list[spin_id]\r\n main_canvas.spin_count -= 1\r\n \r\n #Re-number all spins.\r\n for j in range(0, len(main_canvas.spin_list)):\r\n main_canvas.spin_list[j].spin_number = j\r\n\r\n\r\nclass Spin_Button(object):\r\n def __init__(self, spin_type, xpos, ypos, button_id):\r\n self.id = button_id\r\n self.xpos, self.ypos = xpos, ypos\r\n \r\n #Create image of spin.\r\n self.spin_raised = tk.PhotoImage(\r\n file = '{}{}'.format(image_path, spin_raised))\r\n self.spin_sunken = tk.PhotoImage(\r\n file = '{}{}'.format(image_path, spin_sunken))\r\n \r\n #Create the button on canvas.\r\n self.sp_button = main_canvas.create_image(\r\n xpos, ypos, image = self.spin_raised, tag = 'sp_button')\r\n \r\n main_canvas.tag_bind(self.sp_button, '', self.activate)\r\n \r\n \r\n def activate(self,event):\r\n print(main_canvas.link_status)\r\n print(self.id)\r\n #If no spin is clicked yet.\r\n if main_canvas.link_status == -1:\r\n \r\n #Then set link status to active (Own ID).\r\n main_canvas.link_status = self.id\r\n \r\n #Sink & change button background.\r\n main_canvas.itemconfig(self.sp_button, image = self.spin_sunken)\r\n\r\n \r\n #If select the same spin again while activated.\r\n elif main_canvas.link_status == self.id:\r\n #Lift the button up & reset link status.\r\n main_canvas.itemconfig(self.sp_button, image = self.spin_raised)\r\n main_canvas.link_status = -1\r\n \r\n #If one spin is already clicked.\r\n else:\r\n \r\n #Reset the spin buttons to raised states.\r\n main_canvas.itemconfig(main_canvas.s_button_list[main_canvas.link_status].sp_button,\r\n image = self.spin_raised)\r\n \r\n #Add in spin arrows.\r\n main_canvas.int_list.append(Int_Arrow(\r\n self.xpos, self.ypos, main_canvas.s_button_list[main_canvas.link_status].xpos,\r\n main_canvas.s_button_list[main_canvas.link_status].ypos))\r\n \r\n #Lock spins.\r\n main_canvas.spin_list[main_canvas.link_status].lock_status += 1\r\n main_canvas.spin_list[self.id].lock_status += 1\r\n \r\n #Reset spin list status to no clicked spin.\r\n main_canvas.link_status = -1\r\n\r\nclass Int_Arrow(object):\r\n def __init__(self, xpos1, ypos1, xpos2, ypos2):\r\n #Retain interaction positions.\r\n self.xpos1 = xpos1\r\n self.xpos2 = xpos2\r\n self.ypos1 = ypos1\r\n self.ypos2 = ypos2\r\n \r\n #Get the positions of the spin buttons.\r\n xpos_left, xpos_right = min(xpos1, xpos2), max(xpos1, xpos2)\r\n ypos_down, ypos_up = min(ypos1, ypos2), max(ypos1, ypos2)\r\n \r\n if ((xpos1 == xpos_left and ypos1 == ypos_up) or \r\n (xpos1 == xpos_right and ypos1 == ypos_down)):\r\n switch = 0\r\n \r\n else:\r\n switch = 1\r\n \r\n #Check for error: If spins are too close, can't draw arrow.\r\n if (((xpos_right == xpos_left + dx_grid) or xpos_right == xpos_left) and \r\n ((ypos_down == ypos_up + dy_grid) or ypos_down == ypos_up)):\r\n \r\n #Put up error message.\r\n self.Error_Close_Msg()\r\n \r\n #Delete the interaction term created.\r\n main_canvas.int_list.pop()\r\n \r\n else:\r\n \r\n if xpos_left + dx_grid >= xpos_right :\r\n xpos_left = xpos_left + dx_grid / 2\r\n xpos_right = xpos_right - dx_grid / 2\r\n\r\n if ypos_up + dy_grid >= ypos_down:\r\n ypos_down = ypos_down + dy_grid / 2\r\n ypos_up = ypos_up - dy_grid / 2\r\n \r\n if switch == 1:\r\n #Switch to match spin coordinates.\r\n y_temp = ypos_up\r\n ypos_up = ypos_down\r\n ypos_down = y_temp \r\n \r\n #Creates arrow and binds key to delete the particular interaction.\r\n self.arrow = main_canvas.create_line(xpos_left, ypos_up, xpos_right, ypos_down,\r\n arrow = tk.BOTH, arrowshape = (5,5,5), width = 3)\r\n main_canvas.tag_bind(self.arrow, '', self.delete_popup)\r\n \r\n def delete_popup(self,event):\r\n self.del_query = tk.Toplevel()\r\n \r\n self.query_msg = tk.Label(self.del_query, text = 'Confirm delete?')\r\n self.query_msg.grid(row = 0, column = 0)\r\n\r\n yes_button = tk.Button(self.del_query, text = 'Yes', command = self.delete)\r\n yes_button.grid(row = 1, column = 0)\r\n \r\n no_button = tk.Button(self.del_query, text = 'No', command = self.resume)\r\n no_button.grid(row = 1, column = 1) \r\n\r\n def delete(self):\r\n #Close message.\r\n self.del_query.destroy()\r\n \r\n #Delete Interaction Arrow.\r\n main_canvas.delete(self.arrow)\r\n \r\n #Delete corresponding term in int_list.\r\n z = -1;\r\n for j in main_canvas.int_list:\r\n xint1, yint1 = j.xpos1, j.ypos1\r\n xint2, yint2 = j.xpos2, j.ypos2\r\n z += 1\r\n \r\n if xint1 == self.xpos1 and xint2 == self.xpos2 and yint1 == self.ypos1 and yint2 == self.ypos2:\r\n del main_canvas.int_list[z]\r\n \r\n elif xint2 == self.xpos1 and xint1 == self.xpos2 and yint2 == self.ypos1 and yint1 == self.ypos2:\r\n del main_canvas.int_list[z]\r\n \r\n #Help to remove one locking value on spin object. If lock_status = 0 after this, the spin is officially\r\n #unlocked.\r\n for j in main_canvas.spin_list:\r\n spinx, spiny = main_canvas.coords(j.spin)\r\n if spinx == self.xpos1 and spiny == self.ypos1:\r\n j.lock_status -= 1\r\n \r\n if spinx == self.xpos2 and spiny == self.ypos2:\n j.lock_status -= 1\r\n \r\n \r\n def resume(self):\r\n self.del_query.destroy()\r\n \r\n def Error_Close_Msg(self):\r\n self.error_win = tk.Toplevel()\r\n \r\n self.error_msg = tk.Label(self.error_win, text = \r\n 'Error! Please place spins at least one space apart!')\r\n self.error_msg.grid(row = 1, column = 0)\r\n\r\n ok_button = tk.Button(self.error_win, text = 'Ok', command = self.delete_err)\r\n ok_button.grid(row = 2, column = 0)\r\n \r\n def delete_err(self):\r\n self.error_win.destroy()\r\n\r\nclass Main_Screen(tk.Frame):\r\n\r\n def __init__(self, master):\r\n \r\n tk.Frame.__init__(self, master)\r\n \r\n #Create main canvas on application page.\r\n global main_canvas \r\n main_canvas = tk.Canvas(self, width = canvas_width, height = canvas_height, bg = 'white')\r\n \r\n #Canvas position.\r\n main_canvas.grid(column = 0, row = 0)\r\n \r\n main_canvas.grid_xlines = []\r\n main_canvas.grid_ylines = []\r\n x_end = x_start + dx_grid * x_number\r\n y_end = y_start + dy_grid * y_number\r\n \r\n #Store grid information onto main canvas and create the grid.\r\n for i in range(x_number + 1):\r\n val = i * dx_grid + x_start\r\n main_canvas.grid_xlines.append(val)\r\n main_canvas.create_line(val, y_start, val, y_end, fill = 'grey')\r\n \r\n for j in range(y_number + 1):\r\n val = j * dy_grid + y_start\r\n main_canvas.grid_ylines.append(val)\r\n main_canvas.create_line(x_start, val, x_end, val, fill = 'grey')\r\n \r\n #Initialise the spin count variable.\r\n main_canvas.spin_count = 0\r\n \r\n #Creates spin 1 as the first element of the list.\r\n main_canvas.spin_list = [Spin_Object(\r\n spin_type, spin_posx_init, spin_posy_init)] \r\n\r\n #Create button widget for mode change.\r\n main_canvas.mode_button = tk.Button(\r\n self, text = 'Interaction', command = self.mode_to_int)\r\n main_canvas.mode_window = main_canvas.create_window(\r\n 900, 300, window = main_canvas.mode_button)\r\n \r\n #Initialise interaction matrix.\r\n main_canvas.int_list = []\r\n \r\n #Button to click for calculations.\r\n main_canvas.calc_button = tk.Button(\r\n self, text = 'Tabulate', command = self.calc_initiate)\r\n main_canvas.calc_window = main_canvas.create_window(\r\n 900, 350, window = main_canvas.calc_button)\r\n \r\n def mode_to_int(self):\r\n #Change button to \"pressed down\" mode.\r\n main_canvas.mode_button.configure(relief = 'sunken', command = self.mode_to_spin)\r\n main_canvas.itemconfigure(main_canvas.mode_window, window = main_canvas.mode_button)\r\n \r\n for i in main_canvas.int_list:\r\n main_canvas.tag_bind(i.arrow, '', i.delete_popup)\r\n \r\n #Create variable link status to track clicks on spin buttons.\r\n main_canvas.link_status = -1\r\n \r\n #Initialise variables.\r\n main_canvas.s_button_list = []\r\n \r\n for i in main_canvas.spin_list:\r\n x_pos, y_pos = main_canvas.coords(i.spin)\r\n \r\n #Check that the spin we are looking at is not at its original position (outside grid).\r\n if x_pos != i.xpos:\r\n main_canvas.s_button_list.append(\r\n Spin_Button(spin_type, x_pos, y_pos, i.spin_number))\r\n \r\n def mode_to_spin(self):\r\n #Raise button.\r\n main_canvas.mode_button.configure(relief = 'raised', command = self.mode_to_int)\r\n main_canvas.itemconfigure(main_canvas.mode_window, window = main_canvas.mode_button)\r\n \r\n main_canvas.s_button_list = []\r\n main_canvas.delete('sp_button')\r\n \r\n for i in main_canvas.int_list:\r\n main_canvas.tag_unbind(i.arrow, '')\r\n \r\n def calc_initiate(self):\r\n #To start calculations, need to extract all spins & interaction info.\r\n \r\n main_canvas.spin_info = np.zeros((len(main_canvas.spin_list)-1,3))\r\n z = int(0)\r\n \r\n #Extract xpos & ypos info for spin.\r\n for i in main_canvas.spin_list:\r\n \r\n xpos, ypos = main_canvas.coords(i.spin)\r\n \r\n #Ignore spin that is at original position.\r\n if xpos != spin_posx_init or ypos != spin_posy_init:\r\n main_canvas.spin_info[z] = [z, xpos, ypos]\r\n z += 1\r\n \r\n z = 0\r\n main_canvas.int_info = np.zeros((len(main_canvas.int_list),3))\r\n \r\n #Extract spin numbers the interaction is linking.\r\n for i in main_canvas.int_list:\r\n for j in range(len(main_canvas.spin_info)):\r\n if (i.xpos1 == main_canvas.spin_info[j][1]) and (i.ypos1 == main_canvas.spin_info[j][2]):\r\n spin1 = main_canvas.spin_info[j][0]\r\n if (i.xpos2 == main_canvas.spin_info[j][1]) and (i.ypos2 == main_canvas.spin_info[j][2]):\r\n spin2 = main_canvas.spin_info[j][0]\r\n if spin1 > spin2:\r\n temp = spin2\r\n spin2 = spin1\r\n spin1 = temp \r\n main_canvas.int_info[z] = [z,spin1,spin2]\r\n z += 1\r\n \r\n #Initialising matrix.\r\n D = spa.bsr_matrix(np.zeros((2**len(main_canvas.spin_info),2**len(main_canvas.spin_info))))\r\n \r\n #Extract spins corresponding to each interaction arrow. Then create and interaction term and add it to \r\n #the Hamiltoninan (D).\r\n for i in range(len(main_canvas.int_info)):\r\n spin1 = int(main_canvas.int_info[i][1])\r\n spin2 = int(main_canvas.int_info[i][2]) \r\n Int = Create_Int(spin1, spin2, len(main_canvas.spin_info))\r\n D = D + J * Int\r\n \r\n #If there is only two spins, use eig to solve for eigenvalues.\r\n if len(main_canvas.spin_info) == 2:\r\n D = D.todense()\r\n val, vec = np.linalg.eig(D)\r\n \r\n #Else, use eigsh.\r\n else:\r\n val, vec = eigsh(D, k = 6, which = 'SA')\r\n \r\n #Sorts the eigenvalues & eigenvectors.\r\n ind = np.argsort(val)\r\n val = np.real(val[ind].round(decimals = 2))\r\n vec = vec[:,ind]\r\n \r\n \r\n #Create the result window that outputs the lowest 4 energy values.\r\n self.result_window = tk.Toplevel()\r\n self.result = tk.Label(self.result_window, \r\n text = 'E1 = {0}\\nE2 = {1}\\nE3 = {2}\\nE4 = {3}'.format(val[0], val[1], val[2], val[3]))\r\n self.result.grid(row = 0, column = 0)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = tk.Tk()\r\n Main_Screen(app).pack()\r\n app.mainloop()\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":20099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"1012226","text":"v1=0\r\nv2=0\r\nop=0\r\nwhile (op<=3 and op==5):\r\n\tn1=(int(input(\"Digirte um numero:\")))\r\n\tn2=int(input(\"Digite um numero:\"))\r\n\tv1=n1\r\n\tv2=n2\r\n\tprint(\"Digite a operação:\")\r\n\tprint(''' \r\n\t\t[1] somar\r\n\t\t[2] multiplicar\r\n\t\t[3] monstrar o maior valor\r\n\t\t[4]digitar novos numeros\r\n\t\t[5]sair''')\r\n\toperacao=int(input(''))\r\n\top=operacao\r\n\tif operacao=='1':\r\n\t\tv1=n1+n2\r\n\telif operacao=='2':\r\n\t\tv1=n1*n2\r\n\telif operacao=='3':\r\n\t\tif n1>n2:\r\n\t\t\tprint(\"n1 é maior\")\r\n\t\telse: \r\n\t\t\tprint(\"n2 é maior\")\r\n\t\r\n\telse:\r\n\t\tbreak\r\nprint(\"Os valores digitados foram: {} e {} \".format(v1,v2))\r\n","sub_path":"estrutura de repeticao/exxx3.py","file_name":"exxx3.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"405425514","text":"import pickle\nimport copy\n\n# 通过在数据前增加数据长度段来分割粘连包\n# 数据长度段的长度为4字节\n# 数据域统一为字典经过utf-8编码后的字符串\n\nclass TCPpackage:\n def __init__(self, pck=None):\n if pck:\n self._content = bytearray(pck)\n else:\n self._content = bytearray(0)\n\n def get_content(self):\n \"\"\"\n 将数据域数据转化为字典返回\n :return:\n \"\"\"\n try:\n content = self._content\n content = content.decode(encoding='utf-8')\n return eval(content)\n except:\n raise Exception(\"解析数据异常!\")\n\n def set_content(self, data):\n data = str(data)\n self._content = bytearray(data.encode(encoding='utf-8'))\n\n def get_pck_with_head(self):\n pck_length = bytearray(len(self._content).to_bytes(4, byteorder='little'))\n return pck_length + self._content\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"bin/TCPpackage.py","file_name":"TCPpackage.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"604152597","text":"# Embedded file name: ./oneOffs.py\r\nimport subprocess\r\nimport logging\r\nimport re\r\n\r\nclass OneOffs:\r\n \"\"\"\r\n This function is used to remove RPMs.\r\n \"\"\"\r\n\r\n def removeRPMs(self, rpmsToRemove, loggerName):\r\n logger = logging.getLogger(loggerName)\r\n logger.info('Removing the RPMs, which were identified by the csur resource file for removal.')\r\n rpmsToRemoveList = re.sub(',\\\\s*', ' ', rpmsToRemove).split()\r\n rpmList, result = self.__checkRPMsForRemoval(rpmsToRemoveList, loggerName)\r\n if not result:\r\n logger.error('Problems were encountered while getting the updated list of RPMs to remove.')\r\n return (False, rpmsToRemove)\r\n rpmsToRemove = ' '.join(rpmList)\r\n if len(rpmsToRemove) != 0:\r\n command = 'rpm -e ' + rpmsToRemove\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.info('The output of the command (' + command + ') used to remove the pre-identified RPMs for removal was: ' + out.strip())\r\n if result.returncode == 0:\r\n logger.info('Successfully removed the following RPMs: ' + rpmsToRemove)\r\n else:\r\n logger.error('Problems were encountered while removing the RPMs which were identified by the patch resource file for removal.\\n' + err)\r\n return (False, re.sub('\\\\s+', ', ', rpmsToRemove))\r\n else:\r\n logger.info('There were no RPMs that needed to be removed.')\r\n logger.info('Done removing the RPMs, which were identified by the csur resource file for removal.')\r\n return (True, '')\r\n\r\n def __checkRPMsForRemoval(self, rpmsToRemoveList, loggerName):\r\n updatedRPMList = []\r\n result = True\r\n logger = logging.getLogger(loggerName)\r\n logger.info('Checking the installed RPMs for removal.')\r\n command = 'rpm -qa'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.info('The output of the command (' + command + ') used to get a list of the installed RPMs was: ' + out.strip())\r\n if result.returncode != 0:\r\n logger.error('Problems were encountered while getting a list of the installed RPMs.\\n' + err)\r\n result = False\r\n else:\r\n rpmList = out.split()\r\n for rpm in rpmsToRemoveList:\r\n for installedRPM in rpmList:\r\n if re.match(rpm, installedRPM) != None:\r\n updatedRPMList.append(installedRPM)\r\n\r\n logger.info('Done checking the installed RPMs for removal.')\r\n return (updatedRPMList, result)","sub_path":"2018.02/CS500/csur-1.5.3-sles11/modules/oneOffs.py","file_name":"oneOffs.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"168547472","text":"from urllib.parse import quote_plus as quote\nfrom argparse import ArgumentParser\nfrom arrow import Arrow\nfrom fs.osfs import OSFS\nfrom yattag import Doc, indent\nfrom shared_code import FullPaths, is_dir\n\ndots = False\n\n\ndef accept(name, moreDots=dots):\n if moreDots:\n return True\n else:\n return not name.startswith('.')\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description=\"Create a sitemap of a directory on your local file system\", prog='sitemap',\n usage='%(prog)s [options]')\n parser.add_argument('-dir', '--directory', help='directory to use', action=FullPaths, type=is_dir)\n parser.add_argument('-dots', help='include dot files', action='store_true')\n args = parser.parse_args()\n dir = OSFS(args.directory)\n dots = args.dots\n dirs = []\n skipFirst = True\n doc, tag, text = Doc().tagtext()\n with tag('urlSet', xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"):\n for name, stats in dir.listdirinfo(files_only=True):\n if accept(name):\n with tag('url'):\n with tag('loc'):\n text('http://www.example.com/%s' % quote(name))\n with tag('lastmod'):\n text(str(Arrow.utcfromtimestamp(stats['modified_time'].timestamp())))\n for aDir in dir.walkdirs():\n if skipFirst:\n skipFirst = False\n continue\n if accept(aDir[1:None]):\n for name, stats in dir.listdirinfo(path=aDir, files_only=True):\n if accept(name):\n with tag('url'):\n with tag('loc'):\n text('http://www.example.com%s/%s' %(aDir,quote(name)))\n with tag('lastmod'):\n text(str(Arrow.utcfromtimestamp(stats['modified_time'].timestamp())))\n\n\n dir.close()\n result = indent(\n doc.getvalue(),\n indentation=' ' * 2,\n newline='\\r\\n'\n )\n\n print(result)\n","sub_path":"assignments/a1/code/siteMap.py","file_name":"siteMap.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"7550207","text":"# get top posts in a subreddit\r\nimport praw # pip install this\r\n\r\nreddit = praw.Reddit( # my application\r\n client_id='UctJAJCE-8-nvA',\r\n client_secret='1FsIcCXLLrKqxcAU3bRISfzuUrI',\r\n user_agent = 'my user agent'\r\n\r\n)\r\nsb = input(\"Enter subreddit to go to\")\r\nn = int(input(\"Enter the number of titles needed\"))\r\nfor submission in reddit.subreddit(sb).hot(limit=n):\r\n print(submission.title)\r\n print('=========================')\r\n","sub_path":"RedditBotNews.py","file_name":"RedditBotNews.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"196511494","text":"import tweepy as tp\nimport time\nimport os\n\n# Credenciales para acceder a la API de Twitter\nconsumer_key = 'LLAVE DEL CONSUMIDOR'\nconsumer_secret = 'SECRETO DEL CONSUMIDOR'\naccess_token = 'ACCESO SIMBÓLICO'\naccess_secret = 'ACCESO SECRETO'\n\n# Acceder a la cuenta de desarrollador\nauth = tp.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tp.API(auth)\n\n# Iteración sobre las fotos en el directorio\nos.chdir('modelos')\nfor img_modelo in os.listdir('.'):\n\tapi.update_with_media(img_modelo)\n\ttime.sleep(3)","sub_path":"twitter-bot.py","file_name":"twitter-bot.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"97826925","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef test(request, *args, **kwargs):\n id_list = request.GET.getlist('id')\n id_html = ''\n for id in id_list:\n id_html += '%s ' % id\n response = HttpResponse(\n content = '' % id_html,\n content_type = 'text/html',\n status = 200\n )\n return response\n","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"580594730","text":"\"\"\"transactions extension client\"\"\"\n\nimport json\nimport logging\nfrom typing import Dict, Optional, Type\n\nimport attr\n\nfrom stac_api.clients.base import BaseBulkTransactionsClient, BaseTransactionsClient\nfrom stac_api.clients.postgres.session import Session\nfrom stac_api.errors import NotFoundError\nfrom stac_api.models import database, schemas\n\nlogger = logging.getLogger(__name__)\n\n\n@attr.s\nclass TransactionsClient(BaseTransactionsClient):\n \"\"\"Transactions extension specific CRUD operations\"\"\"\n\n session: Session = attr.ib(default=attr.Factory(Session.create_from_env))\n collection_table: Type[database.Collection] = attr.ib(default=database.Collection)\n item_table: Type[database.Item] = attr.ib(default=database.Item)\n\n def create_item(self, model: schemas.Item, **kwargs) -> schemas.Item:\n \"\"\"create item\"\"\"\n data = self.item_table.from_schema(model)\n with self.session.writer.context_session() as session:\n session.add(data)\n data.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Item.from_orm(data)\n\n def create_collection(\n self, model: schemas.Collection, **kwargs\n ) -> schemas.Collection:\n \"\"\"create collection\"\"\"\n data = self.collection_table.from_schema(model)\n with self.session.writer.context_session() as session:\n session.add(data)\n data.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Collection.from_orm(data)\n\n def update_item(self, model: schemas.Item, **kwargs) -> schemas.Item:\n \"\"\"update item\"\"\"\n with self.session.reader.context_session() as session:\n query = session.query(self.item_table).filter(\n self.item_table.id == model.id\n )\n if not query.scalar():\n raise NotFoundError(f\"Item {model.id} not found\")\n # SQLAlchemy orm updates don't seem to like geoalchemy types\n data = self.item_table.get_database_model(model)\n data.pop(\"geometry\", None)\n query.update(data)\n\n response = self.item_table.from_schema(model)\n response.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Item.from_orm(response)\n return model\n\n def update_collection(\n self, model: schemas.Collection, **kwargs\n ) -> schemas.Collection:\n \"\"\"update collection\"\"\"\n with self.session.reader.context_session() as session:\n query = session.query(self.collection_table).filter(\n self.collection_table.id == model.id\n )\n if not query.scalar():\n raise NotFoundError(f\"Item {model.id} not found\")\n # SQLAlchemy orm updates don't seem to like geoalchemy types\n data = self.collection_table.get_database_model(model)\n data.pop(\"geometry\", None)\n query.update(data)\n return model\n\n def delete_item(self, id: str, **kwargs) -> schemas.Item:\n \"\"\"delete item\"\"\"\n with self.session.writer.context_session() as session:\n query = session.query(self.item_table).filter(self.item_table.id == id)\n data = query.first()\n if not data:\n raise NotFoundError(f\"Item {id} not found\")\n query.delete()\n data.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Item.from_orm(data)\n\n def delete_collection(self, id: str, **kwargs) -> schemas.Collection:\n \"\"\"delete collection\"\"\"\n with self.session.writer.context_session() as session:\n query = session.query(self.collection_table).filter(\n self.collection_table.id == id\n )\n data = query.first()\n if not data:\n raise NotFoundError(f\"Collection {id} not found\")\n query.delete()\n data.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Collection.from_orm(data)\n\n\n@attr.s\nclass BulkTransactionsClient(BaseBulkTransactionsClient):\n \"\"\"postgres bulk transactions\"\"\"\n\n session: Session = attr.ib(default=attr.Factory(Session.create_from_env))\n debug: bool = attr.ib(default=False)\n\n def __attrs_post_init__(self):\n \"\"\"create sqlalchemy engine\"\"\"\n self.engine = self.session.writer.cached_engine\n\n @staticmethod\n def _preprocess_item(item: schemas.Item) -> Dict:\n \"\"\"\n preprocess items to match data model\n # TODO: dedup with GetterDict logic (ref #58)\n \"\"\"\n item = item.dict(exclude_none=True)\n item[\"geometry\"] = json.dumps(item[\"geometry\"])\n item[\"collection_id\"] = item.pop(\"collection\")\n item[\"datetime\"] = item[\"properties\"].pop(\"datetime\")\n return item\n\n def bulk_item_insert(\n self, items: schemas.Items, chunk_size: Optional[int] = None, **kwargs\n ) -> str:\n \"\"\"\n bulk item insertion using sqlalchemy core\n https://docs.sqlalchemy.org/en/13/faq/performance.html#i-m-inserting-400-000-rows-with-the-orm-and-it-s-really-slow\n \"\"\"\n # Use items.items because schemas.Items is a model with an items key\n processed_items = [self._preprocess_item(item) for item in items.items]\n return_msg = f\"Successfully added {len(processed_items)} items.\"\n if chunk_size:\n for chunk in self._chunks(processed_items, chunk_size):\n self.engine.execute(database.Item.__table__.insert(), chunk)\n return return_msg\n\n self.engine.execute(database.Item.__table__.insert(), processed_items)\n return return_msg\n","sub_path":"stac_api/clients/postgres/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"483551001","text":"import tweepy\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nfrom textblob import TextBlob \nimport re\nimport json\nimport sqlite3\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\n# VALUES FOUND IN TWITTER DEV ACCOUNT: \n# consumer key, consumer secret, access token, access secret.\nckey=\"\"\ncsecret=\"\"\natoken=\"\"\nasecret=\"\"\n# handles OAuth twitter authorization\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\napi = tweepy.API(auth)\n\nprint(\"What would you like to name your database?\")\ndatabase = input()\n\n'''\nCreates an SQLite databse with the inputted name\n'''\nconn = sqlite3.connect(database + \".db\")\nc = conn.cursor()\nc.execute('''CREATE TABLE tweets\n (tweetText text,\n user text,\n followers integer,\n date text,\n sentiment, text)''')\nconn.commit()\nconn.close()\n\n# DB stuff\nconn = sqlite3.connect(database + \".db\")\nc = conn.cursor()\n \n# Class for defining a Tweet\nclass Tweet():\n\n def clean_tweet(self, tweet): \n ''' \n Utility function to clean tweet text by removing links, \n special characters using simple regex statements. \n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n \n # Data on the tweet\n def tweet_sentiment(self, tweet): \n ''' \n Utility function to classify sentiment of passed tweet \n using textblob's sentiment method \n '''\n analyser = SentimentIntensityAnalyzer()\n score = analyser.polarity_scores(self.clean_tweet(tweet))\n sentscore = analyser.polarity_scores(tweet) \n return sentscore['compound']\n \n \n def __init__(self, text, user, followers, date, sentiment):\n self.text = text\n self.user = user\n self.followers = followers\n self.date = date\n self.sentiment = self.tweet_sentiment(self.text)\n \n \n \n\n # Inserting that data into the DB\n def insertTweet(self):\n\n c.execute(\"INSERT INTO tweets (tweetText, user, followers, date, sentiment) VALUES (?, ?, ?, ?, ?)\",\n (self.text, self.user, self.followers, self.date, self.sentiment))\n conn.commit()\n \n#override tweepy.StreamListener to add logic to on_status\nclass MyStreamListener(tweepy.StreamListener):\n \n def clean_tweet(self, tweet): \n ''' \n Utility function to clean tweet text by removing links, \n special characters using simple regex statements. \n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split()) \n \n \n def get_tweet_sentiment(self, tweet): \n ''' \n Utility function to classify sentiment of passed tweet \n using textblob's sentiment method \n '''\n # create TextBlob object of passed tweet text \n analyser = SentimentIntensityAnalyzer()\n score = analyser.polarity_scores(self.clean_tweet(tweet))\n # set sentiment \n if score['compound'] > 0.05: \n return '\\033[1;35;48m Positive' # weird numbers are for color coding\n elif score['compound'] >= -.05 and score['compound'] >= -.05:\n return '\\033[1;36;48m Neutral' \n else: \n return '\\033[1;30;48m Negative'\n\n # creates listener that prints each tweet\n def on_data(self, data):\n \n # Error handling because teachers say to do this\n try:\n\n # Make it JSON\n tweet = json.loads(data)\n\n # filter out retweets\n if not tweet['retweeted'] and 'RT @' not in tweet['text']:\n\n # Get user via Tweepy so we can get their number of followers\n user_profile = api.get_user(tweet['user']['screen_name'])\n\n # assign all data to Tweet object\n tweet_data = Tweet(\n str(tweet['text']),\n tweet['user']['screen_name'],\n user_profile.followers_count,\n tweet['created_at'],\n tweet['user']['location'])\n\n # Insert that data into the DB\n tweet_data.insertTweet()\n print(\"Added to DB \\n\")\n \n sent = self.get_tweet_sentiment(tweet_data.text)\n print(\"\\033[0;30;48m This tweet is:\" + sent)\n print(tweet_data.text)\n print()#line break\n print()#line break\n\n # Error handler\n except Exception as e:\n print(e)\n pass\n\n return True\n \n \n\n \n \n \nmyStreamListener = MyStreamListener()\nmyStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)\n\nprint(\"Enter keyword to stream:\")\nkeyword = input()\n\n\nmyStream.filter(track=[keyword])\n","sub_path":"SentStream.py","file_name":"SentStream.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"320343130","text":"import logging\n\nfrom airflow.models import BaseOperator\nfrom airflow.plugins_manager import AirflowPlugin\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.hooks.postgres_hook import PostgresHook\n\nlog = logging.getLogger(__name__)\n\n\nclass CheckTableOperator(BaseOperator):\n \"\"\" operator to check table exist\"\"\"\n\n @apply_defaults\n def __init__(self, table_name, schema_name, *args, **kwargs):\n \"\"\"\n\n :param table_name: table name\n :param schema_name: database schema name\n example, how to get schema name:\n SELECT * FROM pg_tables;\n \"\"\"\n self.table_name = table_name\n self.schema_name = schema_name\n self.hook = PostgresHook()\n super(CheckTableOperator, self).__init__(*args, **kwargs)\n\n def execute(self, context):\n check_schema_sql = 'SELECT schema_name ' \\\n 'FROM information_schema.schemata ' \\\n f'WHERE schema_name=\\'{self.schema_name}\\''\n\n check_table_sql = 'SELECT table_name FROM information_schema.tables ' \\\n f'WHERE table_schema=\\'{self.schema_name}\\' ' \\\n f'AND table_name=\\'{self.table_name}\\';'\n\n schema = self.hook.get_first(check_schema_sql)\n if not schema:\n raise ValueError(f'Schema {self.schema_name} not found!')\n\n table = self.hook.get_first(check_table_sql)\n if table:\n log.info(f'Table {self.schema_name}.{self.table_name} found!')\n return True\n log.info(f'Table {self.schema_name}.{self.table_name} not found!')\n return False\n\n\nclass CountRowsOperator(BaseOperator):\n \"\"\" operator to check table exist\"\"\"\n\n @apply_defaults\n def __init__(self, table_name, *args, **kwargs):\n \"\"\"\n\n :param table_name: table name\n \"\"\"\n self.table_name = table_name\n self.hook = PostgresHook()\n super(CountRowsOperator, self).__init__(*args, **kwargs)\n\n def execute(self, context):\n sql = f'SELECT COUNT(*) FROM {self.table_name}'\n result = self.hook.get_first(sql)\n log.info(f\"Result: {result}\")\n return result\n\n\nclass PostgreSQLCustomOperatorsPlugin(AirflowPlugin):\n name = \"postgres_custom\"\n operators = [CheckTableOperator, CountRowsOperator]\n","sub_path":"plugins/gridu_airflow_plugin.py","file_name":"gridu_airflow_plugin.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"29574395","text":"import datetime\nimport random\nfrom carddirector.cd_card_reg.tests import dummy_data_utils\nfrom carddirector.cd_card_reg import repos, card_upgrade_kyc_services\nfrom carddirector.cd_card_reg.constants import card_upgrade_types\nfrom carddirector.cd_card_reg.models import CdCardProfileUpgradeInfo\nfrom carddirector.cd_master.constants import kyc_check_status_constants\nfrom carddirector.cd_utils import string_utils\nfrom carddirector.cd_utils.repo_utils import save_model\nfrom carddirector.cd_utils.test_utils import CardDirectorDjangoTest\nfrom carddirector.tps_account.constants import card_profile_types\nfrom carddirector.tps_account.repos import find_card_holder_by_card_id, find_card_profile_by_code\n\n# class IntegrationTest(unittest.TestCase):\nclass IntegrationTest(CardDirectorDjangoTest):\n\n def _create_card_upgrade_profile(self, card_id, card_profile_code, card_upgrade_type_name):\n upgrade_profile = CdCardProfileUpgradeInfo()\n\n upgrade_profile.card_holder = find_card_holder_by_card_id(card_id)\n upgrade_profile.card_profile = find_card_profile_by_code(card_profile_code)\n upgrade_profile.card_upgrade_type = repos.get_card_upgrade_type_by_name(card_upgrade_type_name)\n upgrade_profile.revision = 0\n\n upgrade_profile.title = 'Mr.'\n upgrade_profile.given_name = 'RichardUnit'\n upgrade_profile.family_name = 'Choi'\n\n upgrade_profile.birth_date = datetime.datetime.now().date()\n upgrade_profile.gender = random.choice('MF')\n upgrade_profile.address1 = string_utils.random_string(size=35)\n upgrade_profile.address2 = string_utils.random_string(size=34)\n upgrade_profile.address3 = string_utils.random_string(size=34)\n upgrade_profile.city = string_utils.random_string(size=20)\n upgrade_profile.post_code = string_utils.random_string(size=10)\n upgrade_profile.county_or_state = string_utils.random_string(size=20)\n upgrade_profile.country = 'HK' #string_utils.random_string(size=2)\n upgrade_profile.email = 'nunit@dummy.com'\n upgrade_profile.home_phone = string_utils.random_string(size=20)\n upgrade_profile.mobile_phone = string_utils.random_string(size=20)\n upgrade_profile.is_fis_submitted=False\n upgrade_profile.membership_number = string_utils.random_string(size=10)\n\n save_model(upgrade_profile)\n return upgrade_profile\n\n def _create_ndd_to_sdd_upgrade_info(self):\n return dummy_data_utils.create_dummy_card_upgrade_profile('NDD_001', card_profile_types.NDD, card_upgrade_types.NDD_TO_SDD)\n\n def test_submit_kyc_checks(self):\n upgrade_info = self._create_ndd_to_sdd_upgrade_info()\n save_model(upgrade_info)\n kyc_check_record = card_upgrade_kyc_services.submit_kyc_checks(upgrade_info)\n self.assertIsNotNone(kyc_check_record)\n self.assertEquals(kyc_check_status_constants.KYC_CHECK_STATUS_APPROVED, kyc_check_record.status.name)\n\n","sub_path":"apps/carddirector/cd_card_reg/tests/test_card_upgrade_kyc_services.py","file_name":"test_card_upgrade_kyc_services.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"405062180","text":"import requests\nimport json\n\nSRC_URL = 'http://stash.compciv.org/2017/mapzen-search-stanford-university-single.json'\n\n\n# Download and parse the data\nresp = requests.get(SRC_URL)\ntxt = resp.text\njdata = json.loads(txt)\n\nfeatures = jdata['features']\n\nbest_feature = features[0]\ngeo = best_feature['geometry']\ncoords = geo['coordinates']\nlng = coords[0]\nlat = coords[1]\n\n\nprint(\"Longitude:\", lng)\nprint(\"Latitude:\", lat)\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nrequests.get(SRC_DATA_URL)\nresp = json.loads(resp.text)\n\ndata = get_and_parse_data()\ndata.keys()\n\nval = data['type']\ntype(val)\nval\n\n\nval = data['geocoding']\ntype(val)\nval\nval['query']\n\n\nval = data['features']\ntype(val)\nlen(val)\nthing = val[0]\n\n# get geo\ngeo = thing['geometry']\ngeo.keys()\n\ncoords = geo['coordinates']\ntype(coords)\nlng = coords[0]\nlat = coords[1]\n\n\n# get properties\nprops = thing['properties']\ntype(props)\nprops.keys()\n\nprops['confidence']\nprops['country']\nprops['country']\n\"\"\"\n\n","sub_path":"docs/code/python/mapzen_geocoder/alpha_response_parsing.py","file_name":"alpha_response_parsing.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"134521886","text":"import os\n\nfrom leapp.libraries.common.utils import makedirs\n\nLEAPP_HOME = '/root/tmp_leapp_py3'\n\n\ndef apply_python3_workaround():\n py3_leapp = os.path.join(LEAPP_HOME, 'leapp3')\n makedirs(LEAPP_HOME)\n leapp_lib_symlink_path = os.path.join(LEAPP_HOME, 'leapp')\n if not os.path.exists(leapp_lib_symlink_path):\n os.symlink('/usr/lib/python2.7/site-packages/leapp', leapp_lib_symlink_path)\n with open(py3_leapp, 'w') as f:\n f_content = [\n '#!/usr/bin/python3',\n 'import sys',\n 'sys.path.append(\\'{}\\')'.format(LEAPP_HOME),\n '',\n 'import leapp.cli',\n 'sys.exit(leapp.cli.main())',\n ]\n f.write('{}\\n\\n'.format('\\n'.join(f_content)))\n os.chmod(py3_leapp, 0o770)\n","sub_path":"repos/system_upgrade/el7toel8/actors/preparepythonworkround/libraries/workaround.py","file_name":"workaround.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"649486702","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/podchecker/utils/logger_builder.py\n# Compiled at: 2019-07-14 06:02:10\n# Size of source mod 2**32: 5050 bytes\nimport logging\n\nclass LoggerBuilder(object):\n s_logger: logging.Logger\n msg_only: bool\n\n @classmethod\n def build(cls, name='default', level=logging.DEBUG, msgOnly=False):\n\n def _logger(name, level):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n return logger\n\n builder = cls()\n builder.msg_only = msgOnly\n builder.s_logger = _logger(name, level)\n return builder\n\n def addConsole(self):\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.s_logger.level)\n console_handler.setFormatter(ConsoleColoredFormatter.standardFormatter(self.msg_only))\n self.s_logger.addHandler(console_handler)\n return self\n\n def addFile(self, filepath=''):\n if len(filepath) > 0:\n file_handler = logging.FileHandler(filepath)\n file_handler.setLevel(self.s_logger.level)\n file_handler.setFormatter(FileNormalFormatter.standardFormatter(self.msg_only))\n self.s_logger.handlers = [\n file_handler] + self.s_logger.handlers\n return self\n\n def logger(self):\n return self.s_logger\n\n\nclass ConsoleColoredFormatter(logging.Formatter):\n\n @classmethod\n def standardFormatter(cls, msgOnly=False):\n if msgOnly:\n fmt = '%(message)s'\n else:\n fmt = '{1}%(asctime)s.%(msecs)03d{0} {2}[%(name)s] %(filename)s:%(lineno)s{0} [%(levelname)s] {3}:{0} %(message)s'.format(kLoggerFore.RESET, kLoggerFore.GREEN, kLoggerFore.CYAN, kLoggerFore.WHITE)\n return ConsoleColoredFormatter(fmt, '%Y-%m-%d %H:%M:%S')\n\n def format(self, record):\n if record.levelno in Level2ColorMap:\n record.levelname, record.msg = ('{}{}{}'.format(Level2ColorMap[record.levelno], x, kLoggerFore.RESET) for x in (record.levelname, record.msg))\n return super().format(record)\n\n\nclass FileNormalFormatter(logging.Formatter):\n\n @classmethod\n def standardFormatter(cls, msgOnly=False):\n if msgOnly:\n fmt = '%(message)s'\n else:\n fmt = '%(asctime)s.%(msecs)03d [%(name)s] %(filename)s:%(lineno)s [%(levelname)s] : %(message)s'\n return FileNormalFormatter(fmt, '%Y-%m-%d %H:%M:%S')\n\n\nLTCSI = '\\x1b['\n\ndef code_to_chars(code):\n return LTCSI + str(code) + 'm'\n\n\nclass LTCodes(object):\n\n def __init__(self):\n for name in dir(self):\n if not name.startswith('_'):\n value = getattr(self, name)\n setattr(self, name, code_to_chars(value))\n\n\nclass LTFore(LTCodes):\n RESET = 0\n BLACK = 30\n RED = 31\n GREEN = 32\n YELLOW = 33\n BLUE = 34\n MAGENTA = 35\n CYAN = 36\n WHITE = 37\n\n\nkLoggerFore = LTFore()\nLevel2ColorMap = {logging.CRITICAL: kLoggerFore.MAGENTA, \n logging.ERROR: kLoggerFore.RED, \n logging.WARNING: kLoggerFore.YELLOW, \n logging.INFO: kLoggerFore.WHITE, \n logging.DEBUG: kLoggerFore.BLUE}","sub_path":"pycfiles/podcraft-0.1.0-py3-none-any/logger_builder.cpython-37.py","file_name":"logger_builder.cpython-37.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"134105818","text":"import cv2\nimport numpy as np\nimport pandas as pd\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef get_features_rgb(src, i):\n src_row1 = cv2.hconcat([src, src, src, src])\n src_row2 = cv2.hconcat([src, src, src, src])\n src_row3 = cv2.hconcat([src, src, src, src])\n src_row4 = cv2.hconcat([src, src, src, src])\n src = cv2.vconcat([src_row1, src_row2, src_row3, src_row4])\n cv2.imwrite('./Frame_generated/' + i + '.jpg', src)\n src_count = src.size\n histSize = 256\n histRange = (0, 256)\n accumulate = False\n b_hist = cv2.calcHist(src, [0], None, [histSize], histRange, accumulate=accumulate)\n g_hist = cv2.calcHist(src, [1], None, [histSize], histRange, accumulate=accumulate)\n r_hist = cv2.calcHist(src, [2], None, [histSize], histRange, accumulate=accumulate)\n b_hp = [b * 100 / src_count for b in b_hist]\n g_hp = [g * 100 / src_count for g in g_hist]\n r_hp = [r * 100 / src_count for r in r_hist]\n return b_hp, g_hp, r_hp\n\ndef get_images(paths):\n images = []\n for path in paths:\n image = cv2.imread(path)\n image = cv2.resize(image, (150, 150))\n if(image is None):\n print('Could not open or find the image')\n exit(0)\n images.append(image)\n return images\n\ndf = pd.read_csv('../../Full Samples Dataset/dataset_register.csv')\npaths = df['paths']\nimages = get_images(paths)\nbgr_features = []\n\nfor i, image in enumerate(images):\n b, g, r = get_features_rgb(image, str(i))\n bgr_feature = b + g + r\n bgr_features.append(bgr_feature)\nbgr_features = np.array(bgr_features)\nbgr_features = np.squeeze(bgr_features)\nnp.savetxt('./image_hist_features.csv', bgr_features, delimiter=\",\", header='')\nprint('Success...')","sub_path":"Train Model/SVR_REGRESSOR/generate_features.py","file_name":"generate_features.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"18123306","text":"\"\"\"Plot profiles of the velocity at different locations in the x direction.\"\"\"\n\nfrom matplotlib import pyplot\nimport pathlib\n\nimport rodney\n\n\ndef get_velocity_profiles(datadir,config, time, xlocs):\n \"\"\"Get the velocity profiles at given time and x locations.\"\"\"\n profiles = {'u': {'locs': None, 'vals': []},\n 'v': {'locs': None, 'vals': []},\n 'w': {'locs': None, 'vals': []}}\n get_profile = {'u': rodney.get_vertical_profile_xy,\n 'v': rodney.get_vertical_profile_xy,\n 'w': rodney.get_spanwise_profile_xz}\n S = config.S # spanwise length\n yzloc = {'u': S / 2, 'v': S / 2, 'w': 0.0}\n for name in profiles.keys():\n for iloc, xloc in enumerate(xlocs):\n filepath = datadir / f'probe{iloc + 1}-{name}.h5'\n locs, vals = get_profile[name](filepath, name, time,\n xloc, yzloc[name])\n profiles[name]['vals'].append(vals)\n profiles[name]['locs'] = locs\n return profiles\n\n\nargs = rodney.parse_command_line()\nmaindir = pathlib.Path(__file__).absolute().parents[1]\n\nif args.save_figures:\n # Create directory for output figures.\n figdir = maindir / 'figures'\n figdir.mkdir(parents=True, exist_ok=True)\n figname_suffix = '_compare_dx_dt'\n\ntime = 6.528332 # time value to get data over the last cycle\nxlocs = [1.0, 2.0, 3.0, 4.0, 5.0] # locations along the x-direction\n\nall_profiles = [] # profiles for all simulations\nplot_kwargs = [] # parameters for pyplot.plot function\n\n# Compute velocity profiles obtained on nominal grid.\nlabel = 'Nominal'\ndatadir = maindir / 'run3' / 'output'\nconfig = rodney.WingKinematics(Re=200.0, St=0.6, psi=90.0, nt_period=2000)\nall_profiles.append(get_velocity_profiles(datadir, config, time, xlocs))\nplot_kwargs.append(dict(label=label, color='C3', linestyle='-'))\n\n# Compute velocity profiles obtained on finer grid.\nlabel = 'Finer in space'\ndatadir = maindir / 'run4' / 'output'\nconfig = rodney.WingKinematics(Re=200.0, St=0.6, psi=90.0, nt_period=2000)\nall_profiles.append(get_velocity_profiles(datadir, config, time, xlocs))\nplot_kwargs.append(dict(label=label, color='black', linestyle='--'))\n\n# Compute velocity profiles obtained on grid with coarser time-step size.\nlabel = 'Coarser in time'\ndatadir = maindir / 'run6' / 'output'\nconfig = rodney.WingKinematics(Re=200.0, St=0.6, psi=90.0, nt_period=1000)\nall_profiles.append(get_velocity_profiles(datadir, config, time, xlocs))\nplot_kwargs.append(dict(label=label, color='C0', linestyle='--'))\n\n# Set default font style and size for Matplotlib figures.\npyplot.rc('font', family='serif', size=12)\n\n# Plot the x-velocity profiles in the x/y plane at z=S/2.\nfig, ax = pyplot.subplots(figsize=(6.0, 5.0))\nax.set_xlabel('x/c')\nax.set_ylabel('y/c')\nax.axhline(0.0, color='grey', linestyle='--')\nfor iloc, xloc in enumerate(xlocs):\n for profiles, kwargs in zip(all_profiles, plot_kwargs):\n if iloc > 0:\n kwargs = kwargs.copy()\n kwargs['label'] = None\n ax.plot(xloc + profiles['u']['vals'][iloc] - config.U_inf,\n profiles['u']['locs'], **kwargs)\nif args.extra_data:\n # Add digitized data from Li & Dong (2016).\n ax.scatter(*rodney.li_dong_2016_load_ux_profiles(),\n label='Li & Dong (2016)',\n s=10, marker='o', edgecolor='black', color='none')\nax.legend(frameon=False, prop={'size': 10}, scatterpoints=3)\nax.set_xlim(-2.0, 6.0)\nax.set_ylim(-3.0, 3.0)\nfig.tight_layout()\nif args.save_figures:\n # Save Matplotlib figure to PNG file.\n filepath = figdir / f'ux_profiles{figname_suffix}.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\n# Plot the y-velocity profiles in the x/y plane at z=S/2.\nfig, ax = pyplot.subplots(figsize=(6.0, 5.0))\nax.set_xlabel('x/c')\nax.set_ylabel('y/c')\nax.axhline(0.0, color='grey', linestyle='--')\nfor iloc, xloc in enumerate(xlocs):\n for profiles, kwargs in zip(all_profiles, plot_kwargs):\n if iloc > 0:\n kwargs = kwargs.copy()\n kwargs['label'] = None\n ax.plot(xloc + profiles['v']['vals'][iloc],\n profiles['v']['locs'], **kwargs)\nif args.extra_data:\n # Add digitized data from Li & Dong (2016).\n ax.scatter(*rodney.li_dong_2016_load_uy_profiles(),\n label='Li & Dong (2016)',\n s=10, marker='o', edgecolor='black', color='none')\nax.set_xlim(-2.0, 6.0)\nax.set_ylim(-3.0, 3.0)\nfig.tight_layout()\nif args.save_figures:\n # Save Matplotlib figure to PNG file.\n filepath = figdir / f'uy_profiles{figname_suffix}.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\n# Plot the z-velocity profiles in the x/z plane at y=0.\nfig, ax = pyplot.subplots(figsize=(6.0, 5.0))\nax.set_xlabel('x/c')\nax.set_ylabel('z/c')\nax.axhline(0.0, color='grey', linestyle='--')\nfor iloc, xloc in enumerate(xlocs):\n for profiles, kwargs in zip(all_profiles, plot_kwargs):\n if iloc > 0:\n kwargs = kwargs.copy()\n kwargs['label'] = None\n ax.plot(xloc + profiles['w']['vals'][iloc],\n profiles['w']['locs'] - config.S / 2, **kwargs)\nif args.extra_data:\n # Add digitized data from Li & Dong (2016).\n ax.scatter(*rodney.li_dong_2016_load_uz_profiles(),\n label='Li & Dong (2016)',\n s=10, marker='o', edgecolor='black', color='none')\nax.set_xlim(-2.0, 6.0)\nax.set_ylim(-2.0, 2.0)\nfig.tight_layout()\nif args.save_figures:\n # Save Matplotlib figure to PNG file.\n filepath = figdir / f'uz_profiles{figname_suffix}.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\nif args.show_figures:\n # Display Matplotlib figure.\n pyplot.show()\n","sub_path":"runs/independence/scripts/plot_velocity_profiles_compare_dx_dt.py","file_name":"plot_velocity_profiles_compare_dx_dt.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"386776573","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n Class that reads the off event data and calculates the off-time per modem.\r\n\r\n\"\"\"\r\n\r\n\r\n__author__ = \"Matthias.Stuebner\"\r\n__copyright__ = \"Copyright 2012, Matthias.Stuebner\"\r\n__credits__ = [\"Matthias.Stuebner\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"$Rev$\"\r\n__maintainer__ = \"Matthias.Stuebner\"\r\n__email__ = \"Matthias.Stuebner|kabeldeutschland.de\"\r\n__status__ = \"Dev\"\r\n\r\n\r\nfrom datetime import date, timedelta\r\n\r\nimport sna_offevent_based as sna_offevent_based\r\nfrom KPI.service_non_availability_definitions import kpi_service_non_availability\r\nfrom Tools.date_tools import find_last_monday \r\nimport Tools.pickle_file_helper as ph\r\n\r\n\r\ndef _analysis_function(node, data_kinds, data, flags):\r\n '''\r\n Funktion shall detect whether or not offtime is caused by absence of customer:\r\n - clean up offevent data (of duplicate events)\r\n - look through the off-data to detect gaps longer than vac_duration (ie. 2 days)\r\n - create nodestates='vacationer' with priority and timestamps (which?)\r\n \r\n :param node:\r\n :param data_kinds:\r\n :param data:\r\n :param flags:\r\n '''\r\n\r\n# use_data = data[0][:]\r\n# use_flags = flags[0][:]\r\n# frac = data_kinds[0][1]\r\n# #use_flags = [analysis.get_minflag(flag) for flag in use_flags]\r\n#\r\n# \r\n# onoff = data[0][:]\r\n# sk_tickets = data[1][:]\r\n# #sk_tickets = data_kinds[1][0]\r\n# \r\n# \"\"\"\r\n# data = [[onoff_liste],[sk_ticket_liste]]\r\n# \"\"\"\r\n# global first\r\n# print('>>>>>>>>>>>>>>>>>> Number of nodes: {}'.format(len(node)))\r\n# \r\n# if first < 11:\r\n# print(node)\r\n# print(data_kinds)\r\n# print('OnOff: {}'.format(onoff))\r\n# print('SK_Tickets: {}'.format(sk_tickets))\r\n# print(flags)\r\n# print('----')\r\n#\r\n# first += 1\r\n\r\n \"\"\"\r\n Kunde 431799000 - 204721 # Kunde kundennummer - grundstuecks_id\r\n \r\n ['onoff']\r\n \r\n [\r\n [\r\n Datapoint(\r\n data=None, \r\n value=u'OFF', \r\n starttime=datetime.datetime(2013, 8, 26, 20, 43, 8, 772000), \r\n stoptime=datetime.datetime(2013, 8, 26, 20, 43, 8, 772000), \r\n node_name=u'MAC c44619dc488c'), \r\n Datapoint(\r\n data=None, \r\n value=u'ON', \r\n starttime=datetime.datetime(2013, 8, 26, 20, 43, 23, 42000), \r\n stoptime=datetime.datetime(2013, 8, 26, 20, 43, 23, 42000), \r\n node_name=u'MAC c44619dc488c')\r\n ]\r\n ]\r\n \r\n [\r\n [\r\n [], []\r\n ]\r\n ]\r\n \"\"\"\r\n\r\n first = False\r\n \r\n \r\n# \r\n# use_flags = [analysis.get_minflag(flag) for flag in use_flags]\r\n# \r\n# for i, entry in enumerate(use_data):\r\n# totalon += 1 * use_flags[i] if entry.value == 'ON' else 0\r\n# totalon /= frac * week_multiple\r\n# percent = max(0, 1 - totalon ** 0.55 / 100)\r\n# count_text = 'einige'\r\n# if percent >= gw2:\r\n# count_text = 'wenige'\r\n# elif percent <= gw1:\r\n# count_text = 'viele'\r\n# desc_coll_short = (u'Der Kunde hatte {} potentiell durch Störungen vor Ort hervorgerufene '\r\n# u'Verbindungsabbrüche in den letzten {} Tagen.'.format(count_text, period.days))\r\n#\r\n# return node, percent, desc_coll_short\r\n return 1,1,1\r\n\r\nif __name__ == \"__main__\":\r\n\r\n data = ph.pickle_file()._read_pickle('vacationer_data.pcl')\r\n\r\n# nodes, data_kinds, data, flags = data\r\n#\r\n# print('Number of nodes: {}'.format(len(nodes)))\r\n# cnt = 0\r\n# resultlist = []\r\n# \r\n# for i, node in enumerate(nodes):\r\n# if len(data[i][0]) > 0:\r\n# cnt += 1\r\n#\r\n# resultlist.append(_analysis_function(\r\n# node.name, \r\n# data_kinds, \r\n# data[i], \r\n# flags[i]\r\n# )\r\n# )\r\n#\r\n# print('Customers with OnOff: {}'.format(cnt))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# max_weeks = 10 # 10 weeks max\r\n# max_days = (7 * max_weeks) \r\n# \r\n# vacation_days = 2\r\n# num_weeks = 1\r\n# \r\n# # select data for dates <= end_date (so end_date is included)\r\n# end_date = find_last_monday(date.today() - timedelta(days=-1))\r\n## start_date = end_date - timedelta(days=(num_weeks if num_weeks < max_weeks else max_weeks) * 7)\r\n# # for testing\r\n# start_date = end_date - timedelta(days=1)\r\n# \r\n# sob = sna_offevent_based.sna_offevent_based(\r\n# start_date=start_date, \r\n# end_date=end_date, \r\n# off_duration=vacation_days\r\n# )\r\n#\r\n# # Get off-events by cmmac, cleans events and removes OFF-gaps longer than vacation_days\r\n# offdict = sob.get_offdata_by_cmmac()\r\n \r\n# startDatum = '07.01.2013' # Data since this date will be used\r\n#\r\n# kpi = kpi_service_non_availability()\r\n# res = kpi.kpi_service_non_availability_actual_weekly(start_date=startDatum,\r\n# without_KIP_SDSM=False)\r\n\r\n#res = []\r\n#print('Len nodes: ' + str(len(nodes)))\r\n#\r\n#for i,node in enumerate(nodes):\r\n# \r\n# res += [[node, calc_off_fraction(events=data[i][0], \r\n# weights=flags[i][0], \r\n# start=(datetime.datetime.now()-datetime.timedelta(seconds=num_days)), \r\n# stop=datetime.datetime.now(), \r\n# numbins=1)[0]]]\r\n#\r\n# \r\n## [mac_adress, off time in %]\r\n#\r\n#for node in res:\r\n# print(node)","sub_path":"naos-python/Source/MS/SNA/sna_offevents.py","file_name":"sna_offevents.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"538451951","text":"\"\"\"Scrapes tweets from Twitter and svaes the results in a dictionary that is the pickled\"\"\"\n\nfrom pattern.en import *\nfrom pattern.web import Twitter\nimport pickle\nMY_LICENSE = ('WgRmLC6IAhx27bRIG54ngxaRp', 'ldjhjaWF2G6jtPlg3mudc1IZV0V7PN7YZaSjuDqlw7QpvwF7ra', ('700461301575905284-PMu8wIBN2Qt1dW2T1nrytKjC0GYPgF3', 'OszrgU2gVUyBuNAmQc70CAARcpbqvu26DKwEKE0lAQ1ZG'))\n\n# creates dictionary with weather conditions (ex. #snow) as keys and a list of 1000 tweet strings as the value\n\ndictionary = {}\nweather_conditions = ['#snow', '#rain', '#cold', '#storm', \"#blizzard\", '#sun', '#warm', '#drizzle', '#cloudy']\nt = Twitter(license = MY_LICENSE)\n\nfor hashtag in weather_conditions:\n\tdictionary[hashtag] = []\n\tfor tweet in t.search(hashtag, start = None, count = 1000):\n\t\tdictionary[hashtag].append(tweet.text)\n\n# pickles the tweet dictionary\n\nf = open('weather2.pickle', 'w')\npickle.dump(dictionary, f)\nf.close()","sub_path":"tweet_miner.py","file_name":"tweet_miner.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"649915892","text":"import os, sys\nfrom osgeo import gdal\n\n'''this bash script clips a tiff (from gdal_learn.py) using as inputs: west, east,\nnorth and south extents'''\n\ndset = gdal.Open(sys.argv[1])\nwest_ext, east_ext, north_ext, south_ext = int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])\ntilesize = 30\ngdaltransString = 'gdal_translate -of GTiff -srcwin '+str(west_ext)+', '+str(north_ext)+', '+str(east_ext-west_ext)+', ' \\\n +str(south_ext-north_ext)+' '+sys.argv[1]+' landF_BRB_cropped.tif'\nos.system(gdaltransString)\n\n# gdaltranString = \"gdal_translate -of GTIFF -srcwin \"+str(i)+\", \"+str(j)+\", \"+str(w)+\", \" \\\n# +str(h)+\" \" + sys.argv[1] + \" \" + sys.argv[2] + \"_\"+str(i)+\"_\"+str(j)+\".tif\"\n# os.system(gdaltranString)\n","sub_path":"Crop_Raster_CL.py","file_name":"Crop_Raster_CL.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"350409180","text":"# game screen class\nimport pygame\nfrom state import State\nfrom button import Button\nfrom buttonListener import ButtonListener\nfrom stateHomeScreen import StateHomeScreen\nfrom constants import Constants\n\nclass StateGameScreen(State, ButtonListener):\n HOME_BUTTON = 0\n\n def __init__(self, screen, inputManager, anteater):\n State.__init__(self, screen, inputManager)\n pygame.display.set_caption(\"Game Screen\")\n\n self.anteater = anteater\n\n self.screen.fill(Constants.SCREEN_COLOR)\n\n #Used for all fonts\n self.text = pygame.font.Font('freesansbold.ttf', 35)\n\n #buttons\n self.home_button = self.create_button(self.HOME_BUTTON, Constants.HOME, (300,10,135,50))\n self.home_button.font_object = self.text\n self.home_button.set_padding(25,15)\n\n self.next_state = \"Continue\"\n \n\n def receiveInput(self, event):\n State.receiveInput(self, event)\n\n def destroy(self):\n self.inputManager.detach(self.home_button)\n\n def update(self):\n State.update(self)\n self.home_button.update()\n self.anteater.update()\n return self.next_state\n\n def render(self):\n State.render(self)\n self.display_background_picture(Constants.BACKGROUND_PICTURE)\n self.display_message('2350', (25,25), Constants.BLUE)\n self.home_button.render(self.screen)\n self.anteater.render(self.screen)\n \n\n def display_message(self, message, topleft, color):\n display_message = self.text.render(message, False, color)\n display_message_rect = display_message.get_rect()\n display_message_rect.topleft = topleft\n self.screen.blit(display_message, display_message_rect)\n\n def display_character(self, image, coords):\n anteater = pygame.image.load(image)\n anteater = pygame.transform.scale(anteater, (75,75))\n self.screen.blit(anteater, coords)\n\n def display_background_picture(self, image):\n width, height = self.screen.get_width(), self.screen.get_height()\n background = pygame.image.load(image)\n background = pygame.transform.scale(background, (width, height))\n background_rect = background.get_rect()\n self.screen.blit(background, background_rect)\n \n\n def create_button(self, button_ID, message, rect):\n button = Button(button_ID, self, message, Constants.BLUE, rect)\n self.inputManager.attach(button)\n button.font_object = self.text\n button.set_color(Constants.BLUE, Constants.YELLOW, Constants.WHITE)\n return button\n\n def clickPerformed(self, button_ID):\n if (button_ID == self.HOME_BUTTON):\n self.next_state = Constants.STATE_HOME\n \n","sub_path":"stateGameScreen.py","file_name":"stateGameScreen.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"611175477","text":"\"\"\"This module contains simple helper functions \"\"\"\nfrom __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\nfrom cv2 import applyColorMap, COLORMAP_JET \n\ndef dcp_norm(I, vis=False, flag=\"stbd\", pair=0):\n if pair==1:\n a,b,c,d = 210.5, -0.006692, 57.32, -0.001335 # left pair left\n elif pair==0: \n a,b,c,d = 229.4, -0.005883, 81.63, -0.001001 # left overall\n elif pair==2:\n a,b,c,d = 248.3, -0.005074, 105.9, -0.0006677 # left pair right\n \n h,w = I.shape\n if flag == \"port\":\n z = np.arange(w-1,-1,-1).reshape(1,-1)\n elif flag == \"stbd\":\n z = np.arange(w).reshape(1,-1)\n elif flag==\"whole\":\n z1 = np.arange(w//2-1,-1,-1).reshape(1,-1)\n z2 = np.arange(w//2).reshape(1,-1)\n z = np.hstack((z1,z2))\n else:\n print(\"wrong flag!\")\n return\n f = 1 / (a * np.exp(b*z) + c * np.exp(d*z))\n f = np.repeat(f,h,axis=0)\n return I*f\n\ndef multi_tensor2im(input_image, imtype=np.float64):\n \"\"\"\"Converts a 4D Tensor array into a numpy array. For cal scores\n\n Parameters:\n input_image (tensor) -- the input image tensor array\n imtype (type) -- the desired type of the converted numpy array\n \"\"\"\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n \n print_multi_numpy(image_numpy, val=True, shp=True)# max 255.0 min 0.0\n \n image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1.0) / 2.0 * 255.0 # post-processing: tranpose and scaling\n \n else: # if it is a numpy array, do nothing\n image_numpy = input_image\n return image_numpy.astype(imtype)\n\n\ndef tensor2im_raw(input_image, imtype=np.float64):\n \"\"\"\"Converts a Tensor array into a numpy image array.\n\n Parameters:\n input_image (tensor) -- the input image tensor array\n imtype (type) -- the desired type of the converted numpy array\n \"\"\"\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n # print(image_numpy.shape)\n # image_numpy = np.transpose(image_numpy, (1, 2, 0)) \n \n return image_numpy.astype(imtype)\ndef tensor2im_raw_sss(input_image, imtype=np.float64):\n \"\"\"\"Converts a Tensor array into a numpy image array.\n\n Parameters:\n input_image (tensor) -- the input image tensor array\n imtype (type) -- the desired type of the converted numpy array\n \"\"\"\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n # print(image_numpy.shape)\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n \n return image_numpy.astype(imtype)\ndef tensor2im(input_image, imtype=np.uint8, keep_grayscale=False, color_map=False):\n \"\"\"\"Converts a Tensor array into a numpy image array.\n\n Parameters:\n input_image (tensor) -- the input image tensor array\n imtype (type) -- the desired type of the converted numpy array\n \"\"\"\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n if image_numpy.shape[0] == 1: # grayscale to RGB\n if not keep_grayscale:\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n# print(\"before post-processing\\n\")\n# print_numpy(image_numpy, val=True, shp=True)\n \n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1.0) / 2.0 * 255.0 # post-processing: tranpose and scaling\n \n if color_map:\n# import matplotlib.pyplot as plt\n# cm = plt.get_cmap('jet')\n# print('before colormap: ')\n# print_numpy(image_numpy, val=True, shp=True)\n# image_numpy = cm(image_numpy[:,:,0])[:,:,:3]\n image_numpy = applyColorMap(image_numpy.astype(np.uint8), COLORMAP_JET)\n# print('after colormap \\n')\n# print_numpy(image_numpy, val=True, shp=True)\n \n else: # if it is a numpy array, do nothing\n image_numpy = input_image\n return image_numpy.astype(imtype)\n\n\ndef diagnose_network(net, name='network'):\n \"\"\"Calculate and print the mean of average absolute(gradients)\n\n Parameters:\n net (torch network) -- Torch network\n name (str) -- the name of the network\n \"\"\"\n mean = 0.0\n count = 0\n for param in net.parameters():\n if param.grad is not None:\n mean += torch.mean(torch.abs(param.grad.data))\n count += 1\n if count > 0:\n mean = mean / count\n print(name)\n print(mean)\n\n\ndef save_numpy_array(image_numpy, image_path):\n \"\"\"Save a numpy array to the disk\n\n Parameters:\n image_numpy (numpy array) -- input numpy array\n image_path (str) -- the path of the image\n \"\"\"\n\n np.save(image_path, image_numpy)\n\ndef save_image(image_numpy, image_path, aspect_ratio=1.0, color_map=False):\n \"\"\"Save a numpy image to the disk\n\n Parameters:\n image_numpy (numpy array) -- input numpy array\n image_path (str) -- the path of the image\n \"\"\"\n if color_map:\n import matplotlib.pyplot as plt\n cm = plt.get_cmap('jet')\n colored_image = cm(image_numpy[:,:,0])[:,:,:3]\n# print_numpy(colored_image, val=True, shp=True) # max 1.0 min 0.0 shape (256,256,3)\n \n image_pil = Image.fromarray((colored_image*255.).astype(np.uint8))\n else:\n# print_numpy(image_numpy, val=True, shp=True)\n image_pil = Image.fromarray(image_numpy)\n h, w, _ = image_numpy.shape\n\n if aspect_ratio > 1.0:\n image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)\n if aspect_ratio < 1.0:\n image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)\n image_pil.save(image_path)\n\n\ndef print_numpy(x, val=True, shp=False):\n \"\"\"Print the mean, min, max, median, std, and size of a numpy array\n\n Parameters:\n val (bool) -- if print the values of the numpy array\n shp (bool) -- if print the shape of the numpy array\n \"\"\"\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n \ndef print_multi_numpy(x_, val=True, shp=False):\n \"\"\"Print the mean, min, max, median, std, and size of a numpy array\n\n Parameters:\n val (bool) -- if print the values of the numpy array\n shp (bool) -- if print the shape of the numpy array\n \"\"\"\n for i in range(x_.shape[0]):\n x = x_[i].astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n \"\"\"create empty directories if they don't exist\n\n Parameters:\n paths (str list) -- a list of directory paths\n \"\"\"\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\n\ndef mkdir(path):\n \"\"\"create a single empty directory if it didn't exist\n\n Parameters:\n path (str) -- a single directory path\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"541123190","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2011 Midokura KK\n# Copyright (C) 2011 Nicira, Inc\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"VIF drivers for libvirt.\"\"\"\n\nfrom nova import context\nfrom nova import exception\nfrom nova import flags\nfrom nova.network import linux_net\nfrom nova.network import quantumv2\nfrom nova.openstack.common import cfg\nfrom nova.openstack.common import importutils\nfrom nova.openstack.common import log as logging\nfrom nova import utils\nfrom nova.virt import netutils\nfrom nova.virt import vif\n\nfrom nova.virt.libvirt import config\n\nLOG = logging.getLogger(__name__)\n\nmeta_vif_opts = [\n cfg.StrOpt('meta_flavor_driver_mappings',\n help='Mapping between flavor and BridgeDriver')\n]\n\nFLAGS = flags.FLAGS\nFLAGS.register_opts(meta_vif_opts)\n\n\nclass MetaBridgeDriver(vif.VIFDriver):\n def __init__(self):\n ctxt = context.get_admin_context()\n self.quantum = quantumv2.get_client(ctxt)\n self.flavor_driver_map = {}\n for flavor, driver_name in [\n driver_set.split(':')\n for driver_set in\n FLAGS.meta_flavor_driver_mappings.split(',')]:\n self.flavor_driver_map[flavor] =\\\n self._load_driver(driver_name)\n\n def _get_driver_by_network_id(self, network_id):\n network = self.quantum.show_network(network_id)\n if not 'network' in network:\n raise Exception('Specified network is not found.')\n flavor = network['network']['flavor:network']\n return self.flavor_driver_map[flavor]\n\n def plug(self, instance, vif):\n network, _ = vif\n driver = self._get_driver_by_network_id(network['id'])\n return driver.plug(instance, vif)\n\n def unplug(self, instance, vif):\n network, _ = vif\n driver = self._get_driver_by_network_id(network['id'])\n return driver.unplug(instance, vif)\n\n def _load_driver(self, driver_provider):\n LOG.debug(\"Driver location:%s\", driver_provider)\n # If the plugin can't be found let them know gracefully\n try:\n LOG.info(\"Loading Driver: %s\" % driver_provider)\n plugin_klass = importutils.import_class(driver_provider)\n except ClassNotFound:\n LOG.exception(\"Error loading driver\")\n raise Exception(\"driver_provider not found. You can install a \"\n \"Driver with: pip install \\n\"\n \"Example: pip install quantum-sample-driver\")\n return plugin_klass()\n","sub_path":"metaplugin/vif.py","file_name":"vif.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"639498505","text":"# coding: utf-8\r\n# YYeTsBot - messenger.py\r\n# 2019/11/6 12:42\r\n\r\n__author__ = 'Benny '\r\n\r\nfrom config import TOKEN\r\nimport telebot\r\nimport sys\r\n\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\n\r\ndef send_msg(argv):\r\n uid = argv[1]\r\n msg = argv[2]\r\n bot.send_chat_action(uid, 'typing')\r\n bot.send_message(uid, msg, parse_mode='html')\r\n\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 3:\r\n print(\"Need ID and message!\")\r\n sys.exit(2)\r\n\r\n send_msg(sys.argv)\r\n","sub_path":"messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"18323086","text":"import sys\nimport json\nimport base64\nimport requests\nimport csv\n\n\nfile = open('/Users/edmondpetres/Cphbusiness/HackerNewsG7/utils/failed_imports.csv', 'a')\ndef log_user(user):\n file.write(user['username'] + ',' + user['password'])\n\ndef sendRequests(user):\n try:\n headers = {'Connection': 'close',\n 'Content-Type': 'application/json'}\n response = requests.post(receiver, data=json.dumps(user),\n headers=headers,\n timeout=1)\n \n if response.status_code != 200:\n print('Hov, I would like a 200 status code. Your system'\n 'returned {}'.format(response.status_code))\n else :\n response.close()\n return True\n\n\n except requests.exceptions.ConnectionError as e:\n log_user(user)\n print('Hov, it seems I cannot connect to your system!')\n except requests.exceptions.ReadTimeout as e:\n log_user(user)\n print('Hov, your system seems to be a bit slow in responding!')\n except Exception as e:\n log_user(user)\n print('Hov, something else went wrong!')\n print(e)\n\n\nif __name__ == '__main__':\n host = sys.argv[1]\n\n receiver = '{}/user/registration'.format(host)\n\n with open('/Users/edmondpetres/Cphbusiness/HackerNewsG7/utils/untracked_users.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = -1\n success_count = 0\n\n for row in csv_reader:\n if line_count == -1:\n print('Column names are {\", \".join(row)}\\n')\n line_count += 1\n else:\n last_line_processed = 0\n if line_count > last_line_processed:\n print('Processing line {} - user {}'.format(line_count, row[0]))\n success = sendRequests({'username': row[0], 'password': row[1]})\n \n if success == True:\n success_count += 1\n \n line_count += 1\n \n print(f'Successfully imported {success_count} users out of {line_count} rows.')\n\n\n","sub_path":"utils/users/import_users.py","file_name":"import_users.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"328828484","text":"# -*- coding: utf-8 -*-\n\nfrom os.path import join\n\nfrom numpy import pi, empty, zeros, array, isinf, diff, cumsum\n\nfrom pyrail.testers import gyro\nfrom pyrail.parsers import can_txt as ct\nfrom pyrail.parsers import server_conf as sc\nfrom pyrail.common.smooth import gauss_smooth_scipy as gauss_smooth\nfrom numpy import roll as shift\n#from pyrail.common.shift import shift\nfrom pyrail.common.bunch import Bunch\nfrom pyrail.common.diff import win_diff\n\n\nclass __ConfigAdapter__:\n\n def __init__(self, conf, cal):\n\n if type(conf) is str:\n self.conf = sc.load_server_conf(conf, key_mode='lower')\n else:\n self.conf = conf\n\n if type(cal) is str:\n self.cal = sc.load_server_cal(cal, key_mode='lower')\n else:\n self.cal = cal\n\n self._dl = None\n self._kdz = None\n self._z_smd = None\n self._filt_params = None\n\n def __inds__():\n raise NotImplementedError()\n\n def lir_bits(self):\n raise NotImplementedError()\n\n def lir_radius(self):\n raise NotImplementedError()\n\n def kdz(self):\n if self._kdz is None:\n self._kdz = self.conf.coefficients.kdz\n #return self.conf.coefficients.kdz\n return self._kdz\n def set_kdz(self, val):\n self._kdz = val\n\n def input_coeff(self):\n return self.conf.nodes['lir.inputcoef']\n\n def dir_event(self):\n raise NotImplementedError\n\n def filt_params(self, key):\n win = self.conf.system.filtering[self.__inds__()[key]]\n # min_delay = min(min(self.conf.system.extradelayfwd), min(self.conf.system.extradelayrev))\n delay_fwd = self.conf.system.extradelayfwd[self.__inds__()[key]] # - min_delay\n delay_rev = self.conf.system.extradelayrev[self.__inds__()[key]] # - min_delay\n return win, delay_fwd, delay_rev\n\n def z_smd(self):\n return [self.conf.filtering[i] for i in self.__inds__()['z']]\n\n def set_z_smd(self, val):\n self._z_smd = val\n\n def z_bits(self):\n return self.lir_bits()[:8]\n\n def z_base(self):\n return self.cal.n[:8]\n\n def z_coeff(self):\n res = [1 << b for b in self.lir_bits()[:8]]\n result = [2 * pi * r / rs * inp * zc\n for r, rs, inp, zc in\n zip(self.lir_radius()[:8], res,\n self.input_coeff()[:8], self.conf.coefficients.zcoeff[:8])]\n return [result[i] if i < 4 else -result[i] for i in range(8)]\n\n def trolley_for_level(self):\n raise NotImplementedError()\n\n def level_shift(self):\n return self.cal.dz\n\n def dl(self):\n if self._dl is None:\n print(\"dl()\", id(self))\n self._dl = self.cal.dx\n return self._dl\n\nclass __ConfigAdapterSyn__(__ConfigAdapter__):\n\n def __init__(self, conf, cal):\n super(__ConfigAdapterSyn__, self).__init__(conf, cal)\n\n def lir_bits(self):\n return [13] * 16\n\n def lir_radius(self):\n return [35] * 16\n\n def __inds__(self):\n return {\n 'z': list(range(8, 16)),\n 'acs': 31,\n 'gyr_angle': 32,\n 'gyr_sm': 33,\n 'speed_corr': 34,\n 'hor_gyro': 37,\n 'speed': 30}\n\n def dir_event(self):\n node_id, mask, mode = self.conf.events['8']\n node_id |= 0x280\n return node_id, mask, mode\n\n def trolley_for_level(self):\n if 'trolleyforlevel' in self.conf:\n return self.conf.system.trolleyforlevel - 1\n else:\n return 1\n\n\nclass __ConfigAdapter7374__(__ConfigAdapter__):\n\n def __init__(self, conf, cal):\n super(__ConfigAdapter7374__, self).__init__(conf, cal)\n\n def lir_bits(self):\n return [12] * 16\n\n def lir_radius(self):\n return [35] * 16\n\n def __inds__(self):\n return {\n 'z': list(range(8, 16)),\n 'acs': 31,\n 'gyr_angle': 32,\n 'gyr_sm': 33,\n 'speed_corr': 34,\n 'speed': 30}\n\n def trolley_for_level(self):\n return 1\n\nclass __Level__:\n\n def __init__(self, config_adapter, gyro_calc, angle_delta_correct):\n self.__config_adapter__ = config_adapter\n self.__gyro__ = gyro_calc\n self.__angle_delta_correct__ = angle_delta_correct\n\n def __direction__(self, can, conf):\n raise NotImplementedError()\n\n def __smd__(self, values, win):\n return gauss_smooth(values, win, 0.5)\n\n def __norm__(self, i, j, bits):\n base = 1 << (bits - 1)\n mask = base * 2 - 1\n a = base - i\n b = (j + a) & mask\n return b - base\n\n def __shift__(self, values, direction, delay_fwd, delay_rev):\n dir_fwd = direction\n dir_rev = True - direction\n result = empty(len(values))\n result_fwd = shift(values, delay_fwd)\n result_rev = shift(values, delay_rev)\n result[dir_fwd] = result_fwd[dir_fwd]\n result[dir_rev] = result_rev[dir_rev]\n return result\n\n def __filt__(self, values, direction, win, delay_fwd, delay_rev):\n return self.__shift__(self.__smd__(values, win), direction, delay_fwd, delay_rev)\n\n def __time__(self, can):\n ts = array([m.time_stamp for m in can.messages if m.id == 0x208])\n t = self.__norm__(ts[0], ts, 32) * 1e-6\n return t\n\n def __speed__(self, can, conf):\n raise NotImplementedError()\n\n def set_data(self, can, conf, cal):\n self.can = can\n self.gyr = self.__gyro__(conf, cal, can)\n self.conf = self.__config_adapter__(conf, cal)\n self.inds = can.get_indexes(0x1A9, 8)\n self.direction = self.__direction__(can, self.conf)\n\n def __call__(self):\n\n z = [\n self.__norm__(raw, base, bits) * coeff\n if raw is not None\n else zeros(len(self.inds))\n for raw, base, bits, coeff in\n zip(self.can.z, self.conf.z_base(), self.conf.z_bits(), self.conf.z_coeff())\n ]\n\n win, delay_fwd, delay_rev = self.conf.filt_params('z')\n z = [\n self.__filt__(z[i], self.direction, win[i], delay_fwd[i], delay_rev[i])\n for i in range(8)\n ]\n\n dz = [(z[i + 4] - z[i]) * self.conf.kdz() for i in range(4)]\n\n gyro_x_inds = self.gyr.gyr_angle.x[self.inds]\n\n # AngleShift в mathrail.cpp:\n if self.__angle_delta_correct__ is not None and \\\n self.__angle_delta_correct__ > 0:\n gyr_delta = diff(gyro_x_inds)\n gyr_delta[abs(gyr_delta) <= self.__angle_delta_correct__] = 0\n gyro_x_inds[1:] -= cumsum(gyr_delta)\n\n gyr_angle = self.__filt__(\n gyro_x_inds,\n self.direction,\n *self.conf.filt_params('gyr_angle'))\n\n gyr_sm = self.__filt__(\n gyro_x_inds,\n self.direction,\n *self.conf.filt_params('gyr_sm'))\n\n hi_freq = gyr_angle - gyr_sm\n\n hor_gyro = self.gyr.gyro.z[self.inds]\n\n speed = self.__speed__(self.can, self.conf) * (2 * self.direction - 1)\n\n speed_corr = self.__filt__(\n self.__speed_corr__(hor_gyro, speed),\n self.direction,\n *self.conf.filt_params('speed_corr'))\n\n speed = self.__filt__(\n speed,\n self.direction,\n *self.conf.filt_params('speed'))\n\n acs = self.__filt__(\n self.gyr.acs.y[self.inds],\n self.direction,\n *self.conf.filt_params('acs'))\n\n acs_corr = acs + speed_corr\n\n hi_freq_mm = hi_freq * 1600\n acs_corr_mm = acs_corr * 1600\n\n level = [dz[i] + acs_corr_mm + hi_freq_mm + self.conf.level_shift()[i]\n for i in range(len(dz))]\n\n result = Bunch(\n level=level,\n z=z,\n dz=dz,\n gyro=self.gyr,\n hor_gyro=hor_gyro,\n speed=speed,\n gyr_angle=gyr_angle,\n gyr_sm=gyr_sm,\n acs=acs,\n acs_corr=acs_corr,\n speed_corr=speed_corr,\n acs_corr_mm=acs_corr_mm,\n hi_freq_mm=hi_freq_mm,\n hi_freq=hi_freq,\n direction=self.direction,\n inds=self.inds,\n can=self.can,\n conf=self.conf,\n )\n\n return result\n\n\nclass LevelSyn(__Level__):\n\n def __init__(self):\n super(LevelSyn, self).__init__(\n __ConfigAdapterSyn__,\n #LevelParams,\n gyro.integral,\n 0)\n\n def __direction__(self, can, conf):\n node_id, mask, mode = conf.dir_event()\n direction = can.get_event(node_id, mask)\n if mode & 1 == 0:\n direction = True - direction\n return direction[can.get_indexes(node_id, 2)]\n\n def __speed__(self, can, conf):\n t = self.__time__(can)\n dt = win_diff(t, 30) / 30\n speed = conf.dl() / dt\n speed[isinf(speed)] = 0\n return speed\n\n def __speed_corr__(self, hor_gyro, speed):\n return hor_gyro * speed / 9.8\n\n\nclass Level7374(__Level__):\n\n def __init__(self):\n super(Level7374, self).__init__(\n __ConfigAdapter7374__,\n gyro.Metro7374(alpha=1e-4, beta=1e-4),\n 0.001)\n\n def __direction__(self, can, conf):\n e = can.parse(0x28F, 8, 6, 1)\n inds = can.get_indexes(0x28F, 8)\n return True - (e[inds] != 0)\n\n def __speed__(self, can, conf):\n t = self.__time__(can)\n dt = win_diff(t, 99) / 100 # в серваке так (todo: исправить!)\n speed = conf.dl() / dt\n speed[isinf(speed)] = 0\n speed[:50] = speed[51]\n speed[-50:] = speed[-51]\n return speed\n\n def __speed_corr__(self, hor_gyro, speed):\n return hor_gyro * speed * 0.08\n\n\nclass Level7374True(__Level__):\n\n def __init__(self):\n super(Level7374True, self).__init__(\n __ConfigAdapter7374__,\n gyro.Metro7374(alpha=1e-4, beta=1e-4),\n 0.001)\n\n def __direction__(self, can, conf):\n e = can.parse(0x28F, 8, 6, 1)\n inds = can.get_indexes(0x28F, 8)\n return True - (e[inds] != 0)\n\n def __speed__(self, can, conf):\n t = self.__time__(can)\n dt = win_diff(t, 30) / 30\n speed = conf.dl() / dt\n speed[isinf(speed)] = 0\n return speed\n\n def __speed_corr__(self, hor_gyro, speed):\n return hor_gyro * speed / 9.8\n\n\n\nif __name__ != '_main__':\n syn = LevelSyn()\n metro7374 = Level7374()\n metro7374_true = Level7374True()\n","sub_path":"calc3.py","file_name":"calc3.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"360970305","text":"import sys\nimport json\nfrom flask import Blueprint, request\nfrom Controller.API import Controllers\nfrom Model.Gateway.DashboardGateway import DashboardGateway\nfrom Model.Object.DataTransferObject import DataTransferObject\n\nDashboardController = Blueprint('DashboardController', __name__)\n\n#Get All Serves\n@DashboardController.route('/api/Dashboards', methods=['GET'])\ndef getServers():\n return json.dumps(DashboardGateway().getAllDashboards(), default=Controllers.default)\n\n#Get Server based on ID\n@DashboardController.route('/api/Dashboards/View/', methods=['GET'])\ndef getServer(ID):\n if ID is None:\n abort(404)\n else:\n return json.dumps(DashboardGateway().getDashboard(ID), default=Controllers.default)\n\n#Get Charts based on Dashboard ID\n@DashboardController.route('/api/Dashboards/View/Charts/', methods=['GET'])\ndef getCharts(ID):\n if ID is None:\n abort(404)\n else:\n return json.dumps(DashboardGateway().getCharttoDashboard(ID), default=Controllers.default)\n\n\n@DashboardController.route('/api/Dashboards/Create/', methods=['POST'])\ndef createDashboard():\n DashboardDTO = DataTransferObject()\n DashboardDTO.Name = request.form['name']\n DashboardDTO.Description = request.form['description']\n DashboardDTO.ID = 0\n DashboardID = DashboardGateway().addDashboard(DashboardDTO)\n\n Charts = request.form['charts']\n Charts = json.loads(Charts)\n gateway = DashboardGateway()\n for chart in Charts:\n newChart = DataTransferObject()\n newChart.Position = chart['Position']\n newChart.ChartID = chart['ChartID']\n gateway.addCharttoDashboard(DashboardID,newChart)\n return json.dumps(\"OKAY\", default=Controllers.default)\n\n@DashboardController.route('/api/Dashboards/Update/', methods=['POST'])\ndef updateServer(ID):\n if ID is None:\n abort(404)\n else:\n ServerDTO = DataTransferObject()\n ServerDTO.ID = ID\n ServerDTO.Name = request.form['name']\n ServerDTO.Description = request.form['description']\n ServerDTO.HostName = request.form['host']\n ServerDTO.Schema = request.form['schema']\n ServerDTO.Port = request.form['port']\n ServerDTO.UserName = request.form['user']\n ServerDTO.Password = request.form['password']\n return json.dumps(ServerGateway().modifyServer(ServerDTO), default=Controllers.default)\n\n@DashboardController.route('/api/Dashboards/Delete/', methods=['POST'])\ndef deleteServer(ID):\n if ID is None:\n abort(404)\n else:\n return json.dumps(ServerGateway().deleteServer(ID), default=Controllers.default)\n","sub_path":"src/Controller/API/DashboardController.py","file_name":"DashboardController.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"439105627","text":"from adapt.engine import IntentDeterminationEngine\nfrom DB_Skill.task_manager.IntentsGrouper import all_entities_dic as entities, all_intents as intents, all_MRA as multi_regex_entities\nfrom DB_Skill.task_manager import Handler\n\nengine = IntentDeterminationEngine()\n\n\n# Register entities in engine\nfor entity, keywords in entities.items():\n for keyword in keywords:\n engine.register_entity(keyword, entity)\n\nfor entity in multi_regex_entities:\n for regex in entity:\n engine.register_regex_entity(regex)\n\n# Register intents on engine\nfor intent in intents:\n engine.register_intent_parser(intent)\n\n\ntext1= 'what is the number of active jobs today?'\ntext2= 'create assignment buy mic for john'\n\n\nfor intent in engine.determine_intent(text1):\n\tprint (intent)\n\nfor intent in engine.determine_intent(text2):\n\tprint (intent)\n\n\ndef f1():\n\tprint(' i am connecting todo')\n\nh1 = Handler.Handler(intents[0], f1)\n\n\nh1.compute() #\n","sub_path":"DB_Skill/task_manager/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"307988732","text":"def ratio(x,y):\n \"jjjjj\"\n return x/y\ndef answertolife():\n \"\"\n return 42\n\ndef think():\n \"\"\n return ('fuck yeah!')\n\ndef comple_base(base):\n \"\"\n if base in 'Aa':\n return 'T'\n elif base in 'Tt':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'\n\nseq = 'ATCGCGGGCGTTTAGATGTAGT'\n\ndef comple_seq(seq):\n \"\"\n rev_comp=''\n for base in reversed(seq):\n rev_comp += comple_base(base)\n return rev_comp\n\ndef complement_base(base, material='DNA'):\n \"\"\"Returns the Watson-Crick complement of a base.\"\"\"\n\n if base == 'A' or base == 'a':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base == 'T' or base == 't' or base == 'U' or base == 'u':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'\n\ndef reverse_complement(seq, material='DNA'):\n \"\"\"Compute reverse complement of a sequence.\"\"\"\n\n # Initialize reverse complement\n rev_seq = ''\n\n # Loop through and populate list with reverse complement\n for base in reversed(seq):\n rev_seq += complement_base(base, material=material)\n\n return rev_seq\n\n\ndef add_three_numbers(a,b,c):\n \"\"\n return a+b+c\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"242009211","text":"#example of list comprehensions, simple and fast how to make list with implementet algorithm\n\nsqrt = [x**2 for x in range(10)]\n\nprint(sqrt)\n\n#to get the same results you can use for loop, but it is longer than abowe way\n\n#using multi args with logical expressions\n\ntup = [(x, y) for x in [1, 2, 3] for y in [1, 2, 4] if x != y]\nprint(tup)\n\n#filter with simple comprehation list\n\narray = range(10)\n\nfilteredArray = [x**3 for x in array if x%2 == 0]\nprint(filteredArray)\n\nx = []\nd = [x for j in array]\n\nprint(d)\n\nfor i in range(10):\n d.append(i)\n\nprint(d)\n\n#nested comprehetion lists -> fast transposition algorithm\nmatrix = [[1, 2, 3], [5, 6, 7], [9, 10, 11], [13, 14, 15]]\nmatrix = [[row[i] for row in matrix]for i in range(3)]\n\nprint(matrix)\n\n#deletions elements form lists\ndel matrix[1]\nprint(matrix)\ndel d[1:4] #deletion of range\ndel d #it will delete d valeu form computer momery\n\n","sub_path":"basics/list_comprehensions.py","file_name":"list_comprehensions.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"525794858","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2, 2018\n\n@author: tom verguts\npics and tables from chapter 6\n\"\"\"\n\nimport numpy as np\n\n\n \n#%% table: illustration of AIC, BIC, and transfer test\n\"\"\"\nAIC and BIC illustration\nmodel 1: There is just one p-value\nmodel 2: Part 1 and part 2 have a different p-value\n\"\"\"\nn_trials = 1000\np = [0.2, 0.8]\nK = [1, 2] # nr of parameters\n\nfor loop in range(2):\n data = np.zeros(n_trials)\n data_new = np.zeros(n_trials) # these are the transfer data\n for data_loop in range(n_trials):\n data[data_loop] = (np.random.rand() > p[data_loop%2])\n data_new[data_loop] = (np.random.rand() > p[data_loop%2])\n \n n_heads = np.array([np.sum(data[:n_trials//2+1]), np.sum(data[n_trials//2:])])\n n_heads_new = np.array([np.sum(data_new[:n_trials//2+1]), np.sum(data_new[n_trials//2:])])\n p1 = np.sum(n_heads)/data.size # p-values estimates only on data (not data_new)\n p2 = np.array(n_heads/(data.size//2))\n\n lik1 = np.sum(n_heads)*np.log(p1) + (n_trials-np.sum(n_heads))*np.log(1-p1)\n lik2 = ( n_heads[0]*np.log(p2[0]) + (n_trials//2-n_heads[0])*np.log(1-p2[0])+\n n_heads[1]*np.log(p2[1]) + (n_trials//2-n_heads[1])*np.log(1-p2[1]) )\n\n aic1 = -2*lik1 + 2*K[0]\n aic2 = -2*lik2 + 2*K[1]\n\n bic1 = -2*lik1 + K[0]*np.log(n_trials)/2\n bic2 = -2*lik2 + K[1]*np.log(n_trials)/2\n\n lik1_new = np.sum(n_heads_new)*np.log(p1) + (n_trials-np.sum(n_heads_new))*np.log(1-p1)\n lik2_new = ( n_heads_new[0]*np.log(p2[0]) + (n_trials//2-n_heads_new[0])*np.log(1-p2[0])+\n n_heads_new[1]*np.log(p2[1]) + (n_trials//2-n_heads_new[1])*np.log(1-p2[1]) )\n print(\"Dataset {0}\\n LogL model1: {1:.2f} model 2: {2:.2f}\\n AIC model1: {3:.2f} \\\n model2: {4:.2f}\\n BIC model1 {5:.2f} BIC model2 {6:.2f} LogLtransf model1: {7:.2f} LogLtransf model2: {8:.2f}\".\n format(loop+1,lik1, lik2, aic1, aic2, bic1, bic2, lik1_new, lik2_new))","sub_path":"AY 2018 - 2019/ch6_exam_question.py","file_name":"ch6_exam_question.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"427177934","text":"#!/usr/bin/python3\n\n# Watch for xscreensaver lock / unlock events to pause notifications etc\n\nimport subprocess\nimport sys\n\nfor line in sys.stdin:\n state = line.strip().split()[0]\n if state == 'LOCK':\n subprocess.run('killall -SIGUSR1 dunst', shell=True)\n subprocess.run('echo RELOADAGENT | gpg-connect-agent', shell=True)\n subprocess.run('ssh-add -D', shell=True)\n elif state == 'UNBLANK':\n subprocess.run('killall -SIGUSR2 dunst', shell=True)\n","sub_path":"modern-metapackages/debian/modern/usr/share/modern/bin/lock_watch.py","file_name":"lock_watch.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"265580223","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nreadme = ''\n\nif not os.path.exists('VERSION'):\n os.system(\"git describe --tags | cut -c 2- | cut -d'-' -f1 > VERSION\")\n\nversion = open(os.path.join(HERE, 'VERSION')).read()[:-1]\n\nsetup(\n name='test',\n version=version,\n long_description=readme,\n packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),\n license='MIT',\n platforms='any'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"485436069","text":"# -*- coding:utf-8 -*-\n\nimport psutil\nimport re\nimport os\nimport sendmail\na=[]\nc=[]\n\ndef process(x):\n p = psutil.pids()\n for i in p:\n pid=psutil.Process(i)\n name = str(pid.name())\n if re.match(x, name):\n a.append('%i' %i)\n\n\ntry:\n process('java')\n if len(a) < 16:\n for g in a:\n pid=psutil.Process(int(g))\n l=pid.cwd()\n if re.match(\"^.*\\-tdx.*$\", l):\n c.append('%i' %int(g))\n sendmail.send_mail('maintenance@xinyusoft.com@xinyusoft.com','交易程序故障报警','现有进程数:%i\\ntdx进程数:%i' %(len(a),len(c)))\n sendmail.send_mail('zengjianfeng@xinyusoft.com@xinyusoft.com','交易程序故障报警','现有进程数:%i\\ntdx进程数:%i' %(len(a),len(c)))\n sendmail.send_mail('gusheng@xinyusoft.com@xinyusoft.com','交易程序故障报警','现有进程数:%i\\ntdx进程数:%i' %(len(a),len(c)))\nexcept Exception as e:\n sendmail.send_mail('xitongjiankong@xinyusoft.com','交易程序故障报警','监控程序异常,请处理\\nerror%s' %e)","sub_path":"num_process.py","file_name":"num_process.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"121037986","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^dashboard/', views.dashboard, name='dashboard'),\n url(r'^vizes/(?P[\\w.@+-]+)', views.vizes, name='vizes'),\n url(r'^project_management/', views.project_management, name='project_management'),\n url(r'^response/(?P-?\\d+\\.\\d{2})', views.response, name='response'),\n]\n","sub_path":"ihc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"454997919","text":"from django.conf.urls import url, include\nfrom apps.settingsSICAP.views import *\n\nurlpatterns = [\n \n url(r'ajax/settingsOP/(?P\\d+)/', GetOriginSettings.as_view(), name='settingsOP'),\n url(r'ajax/settingsOG/(?P\\d+)/', CreateOriginSettings.as_view(), name='settingsOG'),\n url(r'ajax/settingsCreateOP/(?P\\d+)/', CreateOperationSettings.as_view(), name='settingsCreateOP'),\n url(r'ajax/settingsInfDetall/(?P\\d+)/', CreateInformDetall.as_view(), name='settingsInfDetall'),\n url(r'ajax/createInform/(?P\\d+)/$', CreateInform.as_view(), name='createInform'), \n url(r'settings/(?P\\d+)/(?P\\d+)/$', ListAccountPeriod.as_view() , name='settings'),\n url(r'settings/listInform/(?P\\d+)/(?P\\d+)/$', ListInform.as_view() , name='listInform'),\n url(r'settings/createTypeAgreement/(?P\\d+)/', CreateTypeAgreement.as_view(), name='createTypeAgreement'), \n url(r'settings/listTypeAgreement/(?P\\d+)/(?P\\d+)/$', ListTypeAgreement.as_view() , name='listTypeAgreement'),\n url(r'ajax/updateTypeAgreement/(?P\\d+)/', UpdateTipeAgreement.as_view(), name='updateTypeAgreement'),\n url(r'settings/deleteAll/(?P\\d+)/', DeleteAll.as_view() , name='deleteAll'),\n url(r'ajax/updateAccountPeriod/(?P\\d+)/', UpdateAccountPeriod.as_view(), name='updateAccountPeriod'),\n url(r'ajax/updateInform/(?P\\d+)/', UpdateInform.as_view(), name='updateInform'),\n url(r'settings/listOperations/(?P\\d+)/(?P\\d+)/', ListOperations.as_view() , name='listOperations'),\n url(r'ajax/updateOperation/(?P\\d+)/', UpdateOperation.as_view(), name='updateOperation'),\n url(r'ajax/getOperationsContra/(?P\\d+)/', GetOperationsContra.as_view(), name='getOperationsContra'),\n url(r'ajax/updateContraOperation/(?P\\d+)/', UpdateContraOperation.as_view(), name='updateContraOperation'),\n url(r'ajax/getOriginOperation/(?P\\d+)/', GetOriginOperation.as_view(), name='getOriginOperation'),\n url(r'ajax/changeWindowsOperation/(?P\\d+)/', ChangeWindowsOperation.as_view(), name='changeWindowsOperation'),\n url(r'settings/listDiscount/(?P\\d+)/(?P\\d+)/$', ListDiscount.as_view() , name='listDiscount'),\n url(r'ajax/createDiscount/(?P\\d+)/$', CreateDiscount.as_view(), name='createDiscount'),\n\n url(r'settings/listAccount/(?P\\d+)/(?P\\d+)/$', ListAccount.as_view() , name='listAccount'),\n url(r'ajax/createAccount/(?P\\d+)/$', CreateAccount.as_view(), name='createAccount'),\n url(r'ajax/updateAccount/(?P\\d+)/', UpdateAccount.as_view(), name='updateAccount'),\n\n url(r'settings/generateAccounting/(?P\\d+)/', generateAccounting , name='generateAccounting'),\n url(r'ajax/getAccountSettings/(?P\\d+)/', GetAccountSettings.as_view(), name='getAccountSettings'),\n url(r'ajax/createAccountingOpTip/(?P\\d+)/$', CreateAccountingOpTip.as_view(), name='createAccountingOpTip'),\n url(r'ajax/getBudget/(?P\\d+)/', GetBudget.as_view(), name='getBudget'),\n url(r'ajax/createAccountRubro/(?P\\d+)/', CreateAccountRubro.as_view(), name='createAccountRubro'),\n url(r'ajax/getAccountsByRubro/(?P\\d+)/', GetAccountsByRubro.as_view(), name='getAccountsByRubro'),\n url(r'ajax/searchAccount/(?P\\d+)/', SearchAccount.as_view(), name='searchAccount'),\n \n url(r'settings/listInformBank/(?P\\d+)/(?P\\d+)/$', ListInformBank.as_view() , name='listInformBank'),\n url(r'ajax/createInformBank/(?P\\d+)/$', CreateInformBank.as_view(), name='createInformBank'), \n url(r'ajax/settingsInfDetailBank/(?P\\d+)/', CreateInformDetailBank.as_view(), name='settingsInfDetailBank'),\n url(r'ajax/updateInformBank/(?P\\d+)/', UpdateInformBank.as_view(), name='updateInformBank'),\n url(r'ajax/changeWindowsInformDetailBank/(?P\\d+)/', ChangeWindowsInformDetailBank.as_view(), name='changeWindowsInformDetailBank'),\n url(r'ajax/importAccountsBD/(?P\\d+)/', ImportAccountsBD.as_view(), name='importAccountsBD'),\n\n url(r'ajax/searchAccountButton/(?P\\d+)/', SearchAccountButton.as_view(), name='searchAccountButton'),\n\n url(r'ajax/getSearchAccountButton/(?P\\d+)/', GetSearchAccountButton.as_view(), name='getSearchAccountButton'),\n \n]","sub_path":"apps/settingsSICAP/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"207454277","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ArtifactSourcePropertiesModel(Model):\n \"\"\"The properties that define the source location where the artifacts are\n located.\n\n All required parameters must be populated in order to send to Azure.\n\n :param source_type: Required. The type of artifact source used.\n :type source_type: str\n :param artifact_root: The path from the location that the 'authentication'\n property [say, a SAS URI to the blob container] refers to, to the location\n of the artifacts. This can be used to differentiate different versions of\n the artifacts. Or, different types of artifacts like binaries or\n templates. The location referenced by the authentication property\n concatenated with this optional artifactRoot path forms the artifact\n source location where the artifacts are expected to be found.\n :type artifact_root: str\n :param authentication: Required. The authentication method to use to\n access the artifact source.\n :type authentication: ~azure.mgmt.deploymentmanager.models.Authentication\n \"\"\"\n\n _validation = {\n 'source_type': {'required': True},\n 'authentication': {'required': True},\n }\n\n _attribute_map = {\n 'source_type': {'key': 'sourceType', 'type': 'str'},\n 'artifact_root': {'key': 'artifactRoot', 'type': 'str'},\n 'authentication': {'key': 'authentication', 'type': 'Authentication'},\n }\n\n def __init__(self, **kwargs):\n super(ArtifactSourcePropertiesModel, self).__init__(**kwargs)\n self.source_type = kwargs.get('source_type', None)\n self.artifact_root = kwargs.get('artifact_root', None)\n self.authentication = kwargs.get('authentication', None)\n","sub_path":"azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/models/artifact_source_properties_model.py","file_name":"artifact_source_properties_model.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"495307913","text":"from . import has_scope_name\nfrom . import constants\nfrom . import settings\nfrom .models import Token\nfrom .utils import TimestampGenerator\nfrom .exceptions import OAuth2Exception\n\nclass OAuth2Backend(object):\n '''\n Authentication backend using OAuth2 access tokens. Currently supports\n bearer tokens only.\n '''\n def authenticate(self, **credentials):\n if 'authentication_method' in credentials:\n authentication_method = credentials['authentication_method']\n \n if authentication_method == 'bearer':\n if settings.AUTHENTICATION_METHOD & constants.BEARER:\n if 'access_token' in credentials:\n access_token = credentials['access_token']\n \n try:\n token = Token.objects.get(\n access_token=access_token\n )\n \n now = TimestampGenerator()()\n if token.expire >= now:\n if has_scope_name(\n settings.AUTHENTICATION_SCOPE,\n token.scopes.all()\n ):\n return token.user\n \n except OAuth2Exception:\n pass\n \n return None\n","sub_path":"oauth2/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"564359396","text":"# coding: utf-8\n#########################################################################\n# 网站: 疯狂Java联盟 #\n# author yeeku.H.lee kongyeeku@163.com #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n# Date: #\n#########################################################################\nfrom tkinter import *\n# 导入ttk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nclass App:\n def __init__(self, master):\n self.master = master\n self.initWidgets()\n def initWidgets(self):\n # 创建一个Label组件\n ttk.Label(self.master, text='选择您喜欢的人物:')\\\n .pack(fill=BOTH, expand=YES)\n self.chars = []\n # 定义元组\n characters = ('孙悟空', '猪八戒','唐僧', '牛魔王')\n # 采用循环创建多个Checkbutton\n for ch in characters:\n intVar = IntVar()\n self.chars.append(intVar)\n cb = ttk.Checkbutton(self.master, \n text = ch,\n variable = intVar, # 将Checkbutton绑定到intVar变量\n command = self.change) # 将选中事件绑定到self.change方法\n cb.pack(anchor=W)\n # 创建一个Label组件\n ttk.Label(self.master, text='选择您喜欢的图书:')\\\n .pack(fill=BOTH, expand=YES)\n # --------------下面是第二组Checkbutton---------------\n self.books = []\n # 定义两个元组\n books = ('疯狂Python讲义', '疯狂Kotlin讲义','疯狂Swift讲义', '疯狂Java讲义')\n vals = ('python', 'kotlin','swift', 'java')\n i = 0\n # 采用循环创建多个Checkbutton\n for book in books:\n strVar = StringVar()\n self.books.append(strVar)\n cb = ttk.Checkbutton(self.master, \n text = book,\n variable = strVar, # 将Checkbutton绑定到strVar变量\n onvalue = vals[i],\n offvalue = '无',\n command = self.books_change) # 将选中事件绑定到books_change方法\n cb.pack(anchor=W)\n i += 1\n def change(self):\n # 将self.chars列表转换成元素为str的列表\n new_li = [str(e.get()) for e in self.chars]\n # 将new_li列表连接成字符串\n st = ', '.join(new_li)\n messagebox.showinfo(title=None, message=st)\n def books_change(self):\n # 将self.books列表转换成元素为str的列表\n new_li = [e.get() for e in self.books]\n # 将new_li列表连接成字符串\n st = ', '.join(new_li)\n messagebox.showinfo(title=None, message=st)\nroot = Tk()\nroot.title(\"Checkbutton测试\")\n# 改变窗口图标\nroot.iconbitmap('images/fklogo.ico')\nApp(root)\nroot.mainloop()\n","sub_path":"官方配套代码/11/11.5/Checkbutton_test.py","file_name":"Checkbutton_test.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"263913371","text":"'''\n File name: main.py\n Python Version: 3.6\n'''\n\nimport csv\nimport re\nimport argparse\nimport time\nimport datetime\nimport logging\n\nimport unittest\n\nclass Tag(object):\n def __init__(self, iTag, iTerms):\n self.tag = iTag\n self.searchTerms = iTerms\n self.searchRegex = None\n self.setRegexFromSearchTerms()\n\n def setRegexFromSearchTerms(self):\n aListWordsUpdated = [r\"\\b(?i)\" + aOneOriginalWord + r\"\\b\" for aOneOriginalWord in self.searchTerms]\n aTermToHighlightAsRegexString='|'.join(aListWordsUpdated )\n self.searchRegex = re.compile(aTermToHighlightAsRegexString)\n\n def prettyPrint(self):\n aRetStr = \"\"\n aRetStr = aRetStr + \"The Tag: \\\"\" + self.tag + \"\\\"\"\n aRetStr = aRetStr + \" will match searchTerms: \\\"\" + str('\",\"'.join(self.searchTerms)) + \"\\\"\"\n aRetStr = aRetStr + \" and use regex: \\\"\" + str(self.searchRegex) + \"\\\"\"\n return aRetStr\n\n\nclass MyTest(unittest.TestCase):\n\n def test_createRegex(self):\n aFakeTagFromCsv={\"String to look for (seperated by ; )\":\"aaa;bbb;ccc\",\"Tag to add on the post\":\"aaa\"}\n aFakeTagObj = Tag(aFakeTagFromCsv[\"Tag to add on the post\"], aFakeTagFromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aRegexResult = re.compile(r\"\\b(?i)aaa\\b|\\b(?i)bbb\\b|\\b(?i)ccc\\b\")\n self.assertEqual(aFakeTagObj.searchRegex, aRegexResult)\n\n def test_nominal(self):\n aFakePost={\"post_uuid\":\"1\",\"content\":\"test aaa test bbb test\"}\n\n aFakeTagFromCsv={\"String to look for (seperated by ; )\":\"aaa;aa aa\",\"Tag to add on the post\":\"AAA\"}\n aFakeTagObj = Tag(aFakeTagFromCsv[\"Tag to add on the post\"], aFakeTagFromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aSmartTagList=[aFakeTagObj]\n\n aTagsRes = smartHighlight(aFakePost,aSmartTagList)\n\n #https://stackoverflow.com/questions/12813633/how-to-assert-two-list-contain-the-same-elements-in-python/35095881#35095881\n self.assertEqual(aTagsRes,[\"AAA\"])\n\n def test_case_sensitive(self):\n aFakePost={\"post_uuid\":\"1\",\"content\":\"test eee test bbb test AAa test\"}\n\n aFakeTagFromCsv={\"String to look for (seperated by ; )\":\"aaa;aa aa\",\"Tag to add on the post\":\"AAA\"}\n aFakeTagObj = Tag(aFakeTagFromCsv[\"Tag to add on the post\"], aFakeTagFromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aSmartTagList=[aFakeTagObj]\n\n aTagsRes = smartHighlight(aFakePost,aSmartTagList)\n\n self.assertEqual(aTagsRes,[\"AAA\"])\n\n def test_several_tags(self):\n aFakePost={\"post_uuid\":\"1\",\"content\":\"test eee test bbb test AAa test\"}\n\n aFakeTagFromCsv={\"String to look for (seperated by ; )\":\"aaa;aa aa\",\"Tag to add on the post\":\"AAA\"}\n aFakeTagObj = Tag(aFakeTagFromCsv[\"Tag to add on the post\"], aFakeTagFromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aFakeTag2FromCsv={\"String to look for (seperated by ; )\":\"bbb;bb bb\",\"Tag to add on the post\":\"BBB\"}\n aFakeTag2Obj = Tag(aFakeTag2FromCsv[\"Tag to add on the post\"], aFakeTag2FromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aSmartTagList=[aFakeTagObj,aFakeTag2Obj]\n\n aTagsRes = smartHighlight(aFakePost,aSmartTagList)\n\n self.assertEqual(aTagsRes,[\"AAA\",\"BBB\"])\n\n def test_no_match(self):\n aFakePost={\"post_uuid\":\"1\",\"content\":\"test eee test bbb test AAa test\"}\n\n aFakeTagFromCsv={\"String to look for (seperated by ; )\":\"ccc;cc cc\",\"Tag to add on the post\":\"CCC\"}\n aFakeTagObj = Tag(aFakeTagFromCsv[\"Tag to add on the post\"], aFakeTagFromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aFakeTag2FromCsv={\"String to look for (seperated by ; )\":\"ddd;dd dd\",\"Tag to add on the post\":\"DDD\"}\n aFakeTag2Obj = Tag(aFakeTag2FromCsv[\"Tag to add on the post\"], aFakeTag2FromCsv[\"String to look for (seperated by ; )\"].split(';'))\n aSmartTagList=[aFakeTagObj,aFakeTag2Obj]\n\n aTagsRes = smartHighlight(aFakePost,aSmartTagList)\n\n self.assertEqual(aTagsRes,[])\n\n\nFILE_FORMAT=\"utf-8-sig\"\nFILE_FORMAT=\"ISO-8859-1\"\n#wafaa use utf-8 and jenny utf-8-sig\n\n#https://stackoverflow.com/questions/1557571/how-do-i-get-time-of-a-python-programs-execution\nstart_time = time.time()\n\nnow = datetime.datetime.now()\naCurrentDateTimeString = f'{now:%B_%d_%Y_%H_%M_%S}'\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# create a file handler\n\nhandler = logging.FileHandler('Highlighter_'+aCurrentDateTimeString+'.log',mode='w')\nhandler.setLevel(logging.INFO)\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(handler)\n\nparser = argparse.ArgumentParser(description='Look for some keyword in csv file and highlight them.')\n\n# https://stackoverflow.com/questions/15753701/argparse-option-for-passing-a-list-as-option\n# This is the correct way to handle accepting multiple arguments.\n# '+' == 1 or more.\n# '*' == 0 or more.\n# '?' == 0 or 1.\n# An int is an explicit number of arguments to accept.\nparser.add_argument('--FilePathListToProcess', nargs='+')\nparser.add_argument('--utest', action='store_true')\nparser.add_argument('--termToHighlightFilePath', help='File with term to hightlight. One term by line')\n\nargs = parser.parse_args()\n\ndef smartHighlight(aPost, iSmartTagsList):\n \"\"\"Extract some specific keyword from a post content (text)\n # @param aPost The post object (dict).\n # @param iTermToHighlightRegex The keyword regex to use (regex).\n # @return Returns list of tags matching the post\n \"\"\"\n aTags=[]\n for aOneTag in iSmartTagsList:\n aHighlightMatch=aOneTag.searchRegex.search(aPost[\"content\"])\n if(aHighlightMatch):\n aTags.append(aOneTag.tag)\n #logger.info(\"Match for \" + str(aOneTag.tag))\n return aTags\n\ndef extractTags(iFilePath):\n aStructuredTags=[]\n with open(iFilePath, encoding=FILE_FORMAT) as f:\n reader = csv.DictReader(f)\n for aOneEntry in reader:\n aNewTag = Tag(aOneEntry[\"Tag to add on the post\"], aOneEntry[\"String to look for (seperated by ; )\"].split(';'))\n aStructuredTags.append(aNewTag)\n return aStructuredTags\n\ndef smartFilterFile(iSmartTags, iFilename):\n \"\"\"Process all the post in a text file to extract their keyword\n # @param iFilename The filename path (string).\n \"\"\"\n aIndexForThisFile=0\n with open(iFilename, encoding=FILE_FORMAT) as f:\n reader = csv.DictReader(f)\n\n with open(iFilename + \"_output\"+aCurrentDateTimeString+\".csv\",\"w\", encoding=FILE_FORMAT,newline='') as filteredFile:\n fieldnames = reader.fieldnames + [\"TAGS\"]\n writer = csv.DictWriter(filteredFile, fieldnames=fieldnames)\n writer.writeheader()\n\n for aOneEntry in reader:\n aTags = smartHighlight(aOneEntry,iSmartTags)\n aOneEntry[\"TAGS\"]= '-'.join(aTags)\n writer.writerow(aOneEntry)\n aIndexForThisFile=aIndexForThisFile+1\n logger.info(\"aIndexForThisFile \" + str(aIndexForThisFile))\n \n\nif __name__== \"__main__\":\n if (args.utest):\n logger.info(\"Unit test\")\n runner = unittest.TextTestRunner()\n itersuite = unittest.TestLoader().loadTestsFromTestCase(MyTest)\n runner.run(itersuite)\n quit()\n\n aIndex=0\n\n aStrTags = extractTags(args.termToHighlightFilePath)\n for aOneTag in aStrTags:\n logger.info(aOneTag.prettyPrint())\n\n for aOneFile in args.FilePathListToProcess:\n logger.info(\"Current file: \" + str(aOneFile))\n smartFilterFile(aStrTags,aOneFile)\n\n logger.info(\"Ending in \" + str((time.time() - start_time)) + \" ms\")\n","sub_path":"highlighter.py","file_name":"highlighter.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"212495120","text":"class MGUplinkSettings(object):\n def __init__(self, session):\n super(MGUplinkSettings, self).__init__()\n self._session = session\n \n def getNetworkCellularGatewaySettingsUplink(self, networkId: str):\n \"\"\"\n **Returns the uplink settings for your MG network.**\n https://api.meraki.com/api_docs#returns-the-uplink-settings-for-your-mg-network\n \n - networkId (string)\n \"\"\"\n\n metadata = {\n 'tags': ['MG uplink settings'],\n 'operation': 'getNetworkCellularGatewaySettingsUplink',\n }\n resource = f'/networks/{networkId}/cellularGateway/settings/uplink'\n\n return self._session.get(metadata, resource)\n\n def updateNetworkCellularGatewaySettingsUplink(self, networkId: str, **kwargs):\n \"\"\"\n **Updates the uplink settings for your MG network.**\n https://api.meraki.com/api_docs#updates-the-uplink-settings-for-your-mg-network\n \n - networkId (string)\n - bandwidthLimits (object): The bandwidth settings for the 'cellular' uplink\n \"\"\"\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['MG uplink settings'],\n 'operation': 'updateNetworkCellularGatewaySettingsUplink',\n }\n resource = f'/networks/{networkId}/cellularGateway/settings/uplink'\n\n body_params = ['bandwidthLimits']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)\n\n","sub_path":"meraki/api/mg_uplink_settings.py","file_name":"mg_uplink_settings.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"297882841","text":"# -*- coding: utf-8 -*-\n\nimport jieba\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.cluster import KMeans\nfrom apriori import *\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\nimport pandas as pd\nclass wordf :\n def __init__(self,word,freq):\n self.word = word\n self.freq = freq\ndef find_chinese(file):\n pattern = re.compile(r'[u4e00-u9fa5]')#先compile一个正则表达式\n chinese = re.sub(pattern, '', file)#在使用re.sub进行匹配找到合理的子串\n return chinese\n\nclass KmeansClustering():\n def __init__(self, stopwords_path=None):\n self.stopwords = self.load_stopwords(stopwords_path)\n self.vectorizer = CountVectorizer()\n self.transformer = TfidfTransformer()\n\n def load_stopwords(self, stopwords=None):\n \"\"\"\n 加载停用词\n :param stopwords:\n :return:\n \"\"\"\n if stopwords:\n with open(stopwords, 'r', encoding='utf-8') as f:\n return [line.strip() for line in f]\n else:\n return []\n \n\n def frequence_key_word(self,corpus_path):\n '''\n 统计文档中的词\n :param corpus_path:\n :return:\n '''\n all =[]\n corpus = self.preprocess_data(corpus_path)\n xx = 0\n for item in corpus:\n print(item)\n L = []\n vectorizer = CountVectorizer()\n #norm不对词频进行归一化,关闭idf进行计算\n transformer = TfidfTransformer(norm = None,use_idf =False)\n tf = transformer.fit_transform(vectorizer.fit_transform([item]))\n word = vectorizer.get_feature_names()\n\n weight = tf.toarray()\n\n\n for i in range(len(weight)):\n for j in range(len(word)):\n s1 = wordf(word[j],weight[i][j])\n # print(word[j],weight[i][j])\n L.append(s1)\n\n L.sort(key=lambda t:t.freq,reverse=True)\n\n l = []\n for w in range(10):\n l.append((L[w].word,L[w].freq))\n print(l)\n xx+=1\n self.plot_keyword(l,xx)\n all.append(l)\n\n def preprocess_data(self, corpus_path):\n \"\"\"\n 文本预处理,每行一个文本\n :param corpus_path:\n :return:\n \"\"\"\n corpus = []\n with open(corpus_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = find_chinese(line)\n line = ' '.join([word for word in jieba.lcut(line.replace(' ','')) if word not in self.stopwords])\n corpus.append(line)\n return corpus\n\n\n def get_text_conunt_matrix(self,corpus):\n pass\n def get_text_tfidf_matrix(self, corpus):\n \"\"\"\n 获取tfidf矩阵\n :param corpus:\n :return:\n \"\"\"\n tfidf = self.transformer.fit_transform(self.vectorizer.fit_transform(corpus))\n\n # 获取词袋中所有词语\n # words = self.vectorizer.get_feature_names()\n\n # 获取tfidf矩阵中权重\n weights = tfidf.toarray()\n return weights\n\n def kmeans(self, corpus_path, n_clusters=5):\n \"\"\"\n KMeans文本聚类\n :param corpus_path: 语料路径(每行一篇),文章id从0开始\n :param n_clusters: :聚类类别数目\n :return: {cluster_id1:[text_id1, text_id2]}\n \"\"\"\n index = 0\n dict ={}\n with open('../data/class.txt','r',encoding='utf-8') as f:\n for i in f:\n dict.update({str(index):i.strip()})\n index += 1\n\n print(\"财经:1\",\"时政:2\",\"娱乐:3\",\"民生:4\",\"房产:5\")\n print(\"文档对应的分类如下\")\n print(dict)\n\n\n corpus = self.preprocess_data(corpus_path)\n weights = self.get_text_tfidf_matrix(corpus)\n\n clf = KMeans(n_clusters=n_clusters)\n\n # clf.fit(weights)\n\n y = clf.fit_predict(weights)\n\n # 中心点\n # centers = clf.cluster_centers_\n\n # 用来评估簇的个数是否合适,距离约小说明簇分得越好,选取临界点的簇的个数\n # score = clf.inertia_\n\n # 每个样本所属的簇\n result = {}\n for text_idx, label_idx in enumerate(y):\n if label_idx not in result:\n result[label_idx] = [text_idx]\n else:\n result[label_idx].append(text_idx)\n return result\n def plot_keyword(self,L,xx):\n #画出这个文档关键词的内容并保存图\n dict ={}\n for i in L:\n print(i)\n dict.update({i[0]:i[1]})\n\n s = sorted(dict.items(),key = lambda x:x[1],reverse = False)#直接对字典排序\n\n x_x = []\n y_y = []\n for i in s:\n x_x.append(i[0])\n y_y.append(i[1])\n fig,ax = plt.subplots()\n x = x_x\n y = y_y\n ax.barh(x,y,color = 'deepskyblue')\n labels = ax.get_xticklabels()\n\n plt.setp(labels, rotation=0, horizontalalignment='right')\n for a,b in zip(x,y):\n plt.text(b+1,a,b,ha = 'center',va = 'center')\n plt.xlim(0, max(y)+5)\n ax.legend(['label'],loc = 'lower right')\n plt.rcParams['font.sans-serif'] = ['SimHei']#正常显示中文\n plt.ylabel('关键词')\n plt.xlabel('出现次数')\n plt.rcParams['savefig.dpi'] =300\n plt.rcParams['figure.dpi'] = 300\n plt.rcParams['figure.figsize'] =(20,8.0)#尺寸\n plt.title('文档出现次数前十的词')\n plt.savefig('../data/keyword/key'+str(xx)+'.png')\n # plt.show()\n\n\n\n\n\n\nif __name__ == '__main__':\n\n Kmeans = KmeansClustering(stopwords_path='../data/stop_words.txt')\n\n Kmeans.frequence_key_word('../data/news')\n","sub_path":"text_clustering-master/Kmeans/词频统计.py","file_name":"词频统计.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"463948258","text":"import math\n\ndef create_k_fold(X, k_fold=5):\n X_len = len(X)\n one_fold_len = int(math.floor(X_len/k_fold))\n last_fold_len = X_len - (k_fold-1)*one_fold_len\n for i in range(k_fold):\n if i == k_fold - 1:\n X_test = X[X_len-last_fold_len:]\n X_train = X[:X_len-last_fold_len]\n else:\n X_test = X[i * one_fold_len:(i + 1) * one_fold_len]\n X_train = X[0: i * one_fold_len] + X[(i + 1) * one_fold_len:]\n yield X_train, X_test\n\n\nif __name__ == '__main__':\n X = list(range(100))\n for X_train, X_test in create_k_fold(X, k_fold=10):\n print (\"X_train: \", X_train)\n print (\"X_test: \", X_test)\n\n\n\n","sub_path":"other_funcs/cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"545414471","text":"#Python Flowchart depicting a flow control(conditional) statement\n#This material was used for educational purposes only\n#Many thanks to Al Swegart\n\nname = input(\"What is your name? \")\nage = int(input(\"How old are you? \"))\nif name == 'Alice':\n print('Hi, Alice.')\nelif age < 12:\n print('You are not Alice, kiddo.')\nelse:\n print(\"You are neither Alice nor a little kid\")\n","sub_path":"flowcharts/flowchart6/flowchart6.py","file_name":"flowchart6.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"388562384","text":"import sys\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\nclass Simple_drawing_window1(QWidget):\n def __init__(self):\n QWidget.__init__(self,None)\n self.setWindowTitle(\"draw\")\n\n def paintEvent(self,e):\n p = QPainter()\n p.begin(self)\n\n p.setPen(QColor(0, 0, 0))\n p.setBrush(QColor(0, 127, 0))\n p.drawPolygon([QPoint(70, 100), QPoint(100, 110),\n QPoint(130, 100), QPoint(100, 150),\n ])\n\n p.setPen(QColor(0, 0, 0))\n p.setBrush(QColor(255, 127, 0))\n p.drawPie(50, 150, 100, 100, 0, 180 * 16)\n\n p.drawPolygon([\n QPoint(50, 200), QPoint(150, 200), QPoint(100, 400),\n ])\nclass Simple_drawing(Simple_drawing_window1):\n def __init__(self):\n Simple_drawing_window1.__init__(self)\n \n\n def paintEvent(self,e):\n p = QPainter()\n p.begin(self)\n\n p.setPen(QColor(0, 0, 0))\n p.setBrush(QColor(0, 221, 0))\n p.drawPolygon([QPoint(250, 100), QPoint(250, 300),\n QPoint(300, 190)\n ])\n\n p.setPen(QColor(255, 255, 0))\n p.setBrush(QColor(255, 255, 0))\n p.drawPolygon([\n QPoint(50, 100), QPoint(250, 100), QPoint(50, 300),QPoint(250, 300)\n ])\n\n\n\ndef main():\n app = QApplication(sys.argv)\n\n w = Simple_drawing()\n w.show()\n \n\n return app.exec_()\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"Simple_drawing_window2.py","file_name":"Simple_drawing_window2.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"647726304","text":"# -*- coding: utf-8 -*-\nimport time\nimport sys\nfrom lib import qiot\n\nsys.path.insert(0, '/usr/lib/python2.7/bridge/') \nfrom bridgeclient import BridgeClient as bridgeclient\n\nbridge_client = bridgeclient()\n\n\"\"\"\n\tThis sample code demo button send to QIoT Suite \n\trequirement:\n\t-- opkg update\n\t-- opkg install distribute\n\t-- opkg install python-openssl\n\t-- easy_install pip\n\t-- pip install paho-mqtt\n\n configure the system to allow various Bridge related services to run,command as follow \n\t-- uci set yunbridge.config.disabled=’0’\n\t-- uci commit\n\t-- reboot\n\trun command: python button.py\n\"\"\"\n\n\"\"\"\n\tSetup connection options\n\"\"\"\nconnection = None\nconnection = qiot.connection(qiot.protocol.MQTT)\nconnection_options = connection.read_resource('./res/resourceinfo.json', '/ssl/')\nconnection.connect(connection_options)\n\n\"\"\"\n\tSend sensor's data to QIoT Suite Lite by Resourcetype.\n\"\"\"\nwhile 1:\t\n\tbutton_state = bridge_client.get(\"button\")\n\tconnection.publish_by_id(\"button\", str(button_state))\n\ttime.sleep(1)\n","sub_path":"python/device/mtk-linkit-7688-duo/examples/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"430200923","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExamples of use:\n$ python benchmark-word-embeddings.py -a models/wiki.fr.bin -c embeddings/wiki.fr.vec -q benchmarks/questions-words-fr.txt -o results/wiki-benchmark.txt -m exception -v 100000\n$ python benchmark-word-embeddings.py -a models/wiki.fr.bin -b models/cc.fr.300.bin -c embeddings/wiki.fr.vec -d cc.fr.300.vec -q benchmarks/questions-words-fr.txt -o results/aggregated-benchmark.txt -m top -t 1 -v 100000\n\"\"\"\n__author__ = 'Brice Olivier'\n\n\nimport io\nimport numpy as np\nfrom optparse import OptionParser\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\n\ndef load_vec(emb_path, nmax=50000):\n \"\"\"Load word embeddings file and returns a numpy.matrix with embeddings,\n an id2word dictionnary and a word2id dictionnary.\"\"\"\n vectors = []\n word2id = {}\n with io.open(emb_path, 'r', encoding='utf-8',\n newline='\\n', errors='ignore') as f:\n next(f)\n for i, line in enumerate(f):\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n assert word not in word2id, 'word found twice'\n vectors.append(vect)\n word2id[word] = len(word2id)\n if len(word2id) == nmax:\n break\n id2word = {v: k for k, v in word2id.items()}\n embeddings = np.vstack(vectors)\n return embeddings, id2word, word2id\n\n\ndef get_nn(word_embedding, embeddings, id2word, k=1):\n \"\"\"take as input a word embedding and find its K nearest neighbors\n within a numpy.matrix of embeddings and its id2word dictionnary.\"\"\"\n scores = (embeddings / np.linalg.norm(embeddings, 2, 1)[:, None]).dot(\n word_embedding / np.linalg.norm(word_embedding))\n k_best = scores.argsort()[-k:][::-1]\n return [id2word[idx] for _, idx in enumerate(k_best)], scores[k_best]\n\n\ndef get_custom_nn(word_embedding1, word_embedding2, embeddings1, embeddings2, id2word, k=1):\n \"\"\"embeddings1 and embeddings2 must have shared id2word\n \"\"\"\n scores1 = (embeddings1 / np.linalg.norm(embeddings1, 2, 1)[:, None]).dot(\n word_embedding1 / np.linalg.norm(word_embedding1))\n scores2 = (embeddings2 / np.linalg.norm(embeddings2, 2, 1)[:, None]).dot(\n word_embedding2 / np.linalg.norm(word_embedding2))\n scores = (scores1 + scores2) / 2\n k_best = scores.argsort()[-k:][::-1]\n return [id2word[idx] for _, idx in enumerate(k_best)], scores[k_best]\n\n\ndef evaluate_predicted_embedding(embeddings, id2word, word2id, question, method=\"top\", k=1):\n \"\"\"test\"\"\"\n if method == \"exception\":\n k = 2\n if type(embeddings) is tuple:\n pred_embedding1 = embeddings[0][word2id[question[1]]] - \\\n embeddings[0][word2id[question[0]]] + \\\n embeddings[0][word2id[question[2]]]\n pred_embedding2 = embeddings[1][word2id[question[1]]] - \\\n embeddings[1][word2id[question[0]]] + \\\n embeddings[1][word2id[question[2]]]\n nn, _ = get_custom_nn(pred_embedding1, pred_embedding2,\n embeddings[0], embeddings[1], id2word, k)\n else:\n pred_embedding = embeddings[word2id[question[1]]] - \\\n embeddings[word2id[question[0]]] + \\\n embeddings[word2id[question[2]]]\n nn, _ = get_nn(pred_embedding, embeddings, id2word, k)\n nn = np.array(nn)\n # print(\"question ^ top predictions: %s - %s + %s = %s ^ %s\" %\n # (question[1], question[0], question[2], question[3], nn))\n if method == \"top\":\n return question[3] in nn\n elif method == \"exception\":\n return question[3] == nn[nn != question[2]][0]\n\n\ndef load_benchmark(benchmark_path):\n \"\"\"Load word embeddings file and returns a numpy.matrix with embeddings,\n an id2word dictionnary and a word2id dictionnary.\"\"\"\n questions_words = {}\n preset_results = {}\n with io.open(benchmark_path, 'r', encoding='utf-8',\n newline='\\n', errors='ignore') as f:\n for _, line in enumerate(f):\n line = line.split(\"\\n\")[0]\n if \":\" in line:\n key = line.rstrip().split(\":\")[1][1:]\n questions_words[key] = []\n preset_results[key] = []\n else:\n question = line.rstrip().split(\" \")\n assert(len(question) == 4)\n questions_words[key].append(question)\n preset_results[key].append(0)\n return questions_words, preset_results\n\n\ndef compute_embeddings_for_oov(embeddings, oov_words, id2word, word2id, model_path):\n _, oov_words_queries_file_name = tempfile.mkstemp()\n _, oov_words_file_name = tempfile.mkstemp()\n print(\"model %s\" % model_path)\n print(\"oov words: %s\" % oov_words_queries_file_name)\n print(\"oov words embeddings: %s\" % oov_words_file_name)\n with io.open(oov_words_queries_file_name, 'w', encoding='utf-8') as f:\n for w in oov_words:\n f.write(w + '\\n')\n subprocess.call('./fastText-0.1.0/fasttext' +\n ' print-word-vectors ' +\n model_path +\n '<' +\n oov_words_queries_file_name +\n '>' + oov_words_file_name,\n shell=True)\n vectors = []\n with io.open(oov_words_file_name, 'r', encoding='utf-8') as f:\n for line in f:\n fields = line.strip().split(\" \")\n word = fields[0]\n vec = [float(v) for v in fields[1:]]\n if len(vec) != embeddings.shape[1]:\n # print(\"Warning: embedding size %d, with field[0] %s, line starting with %s\" %\n # (len(vec), word, line[0:10]), file=sys.stderr)\n pass\n elif np.linalg.norm(vec) == 0:\n # print(\"Warning: embedding for word %s has norm 0\" % word, file=sys.stderr)\n pass\n else:\n word2id[word] = len(word2id) - 1\n id2word[len(word2id) - 1] = word\n vectors.append(vec)\n embeddings = np.row_stack((embeddings, np.vstack(vectors)))\n return embeddings, id2word, word2id\n\n\ndef run_benchmark(questions_words, preset_results, embeddings, id2word, word2id, output_path, method=\"top\", k=3):\n t0 = time.time()\n for key in questions_words:\n print(\"\\n____________________________________________________\")\n print(\"Questions-Words category%s\" % key)\n i = 0\n t1 = time.time()\n for question in questions_words[key]:\n preset_results[key][i] = evaluate_predicted_embedding(\n embeddings, id2word, word2id, question, method, k)\n sys.stdout.write(\"\\rProgress: %.4f\\tAccuracy: %.4f\" %\n (float(i + 1) / len(preset_results[key]),\n float(np.sum(preset_results[key])) / (i + 1)))\n sys.stdout.flush()\n i += 1\n t2 = time.time()\n with open(output_path, 'a') as f:\n f.write(\"____________________________________________________\\n\")\n f.write(\"Questions-Words category%s\\n\" % key)\n f.write(\"Accuracy: %.4f\\n\" %\n (float(np.sum(preset_results[key])) /\n len(preset_results[key])))\n f.write(\"Time: %.0fs\\n\" % (t1 - t0))\n print('\\nTime: %.4fs' % (t2 - t1))\n t3 = time.time()\n overall_accuracy = float(np.sum([np.sum(preset_results[key])\n for key in questions_words])) / np.sum(\n [len(preset_results[key])\n for key in questions_words])\n overall_time = t3 - t0\n print(\"____________________________________________________\")\n print(\"Overall accuracy %.4f\\n\" % overall_accuracy)\n print(\"Overall time %.0f\\n\" % overall_time)\n with open(output_path, 'a') as f:\n f.write(\"____________________________________________________\\n\")\n f.write(\"Overall accuracy %.4f\\n\" % overall_accuracy)\n f.write(\"Overall time %.0f\\n\" % overall_time)\n\n\nparser = OptionParser()\n\nparser.add_option(\"-a\", \"--model1\", dest=\"model_path1\",\n help=\"Path to the model1 with embedding (.bin).\",\n default=None)\n\nparser.add_option(\"-b\", \"--model2\", dest=\"model_path2\",\n help=\"Path to the model2 with embedding (.bin).\",\n default=None)\n\nparser.add_option(\"-c\", \"--embeddings1\", dest=\"embeddings_path1\",\n help=\"Path to the embeddings1 (.vec).\",\n default=None)\n\nparser.add_option(\"-d\", \"--embeddings2\", dest=\"embeddings_path2\",\n help=\"Path to the embeddings2 (.vec).\",\n default=None)\n\nparser.add_option(\"-q\", \"--questions\", dest=\"questions_path\",\n help=\"Path to the benchmark questions (.txt).\",\n default=None)\n\nparser.add_option(\"-o\", \"--output\", dest=\"output_path\",\n help=\"Path to the benchmark output (.txt).\",\n default=None)\n\nparser.add_option(\"-v\", \"--vocabulary_size\", dest=\"vocabulary_size\",\n help=\"Size of the vocabulary to import from .vec (int).\",\n default=100000)\n\nparser.add_option(\"-m\", \"--method\", dest=\"method\",\n help=\"method to evaluate predicted embedding\"\n \"(\\\"top\\\" or \\\"exception\\\").\",\n default=\"top\")\n\nparser.add_option(\"-t\", \"--top\", dest=\"top\",\n help=\"argument for top method (int)\",\n default=3)\n\nif __name__ == \"__main__\":\n (options, args) = parser.parse_args()\n options.top = int(options.top)\n options.vocabulary_size = int(options.vocabulary_size)\n print(\"loading benchmark... \", end=\"\")\n questions_words, preset_results = load_benchmark(options.questions_path)\n print(\"ok\\nloading embeddings1... \", end=\"\")\n embeddings1, id2word1, word2id1 = load_vec(options.embeddings_path1,\n options.vocabulary_size)\n print(\"ok\")\n word_list = np.unique([item for sublist in questions_words.values()\n for subsublist in sublist for item in subsublist])\n oov_words1 = np.setdiff1d(word_list, id2word1.values())\n print(\"%d question words not in embeddings1\" % len(oov_words1))\n if all(v is not None for v in [options.model_path2, options.embeddings_path2]):\n print(\"loading embeddings2... \", end=\"\")\n embeddings2, id2word2, word2id2 = load_vec(options.embeddings_path2,\n options.vocabulary_size)\n print(\"ok\")\n # embeddings2 = embeddings2[id2word1.keys()]\n # id2word2 = id2word1\n # word2id2 = word2id1\n words_embeddings_1 = word2id1.keys()\n words_embeddings_2 = word2id2.keys()\n oov_words1 = np.concatenate((\n oov_words1, np.setdiff1d(words_embeddings_2, words_embeddings_1)))\n oov_words2 = np.setdiff1d(word_list, words_embeddings_2)\n print(\"%d question words not in embeddings2\" % len(oov_words2))\n oov_words2 = np.concatenate((\n oov_words2, np.setdiff1d(words_embeddings_1, words_embeddings_2)))\n print(\"computing oov words for embeddings2... \", end=\"\")\n embeddings2, id2word2, word2id2 = compute_embeddings_for_oov(\n embeddings2, oov_words2, id2word2, word2id2, options.model_path2)\n print(\"ok\\ncomputing oov words for embeddings1... \", end=\"\")\n\n embeddings1, id2word1, word2id1 = compute_embeddings_for_oov(\n embeddings1, oov_words1, id2word1, word2id1, options.model_path1)\n print(\"ok\")\n if all(v is not None for v in [options.model_path2, options.embeddings_path2]):\n words = np.intersect1d(id2word1.values(), id2word2.values())\n id1 = [word2id1[w] for w in words]\n id2 = [word2id2[w] for w in words]\n embeddings1 = embeddings1[id1]\n embeddings2 = embeddings2[id2]\n id2word = dict(zip(range(0, len(words)), words))\n word2id = {v: k for k, v in id2word.items()}\n print(\"running benchmark...\")\n run_benchmark(questions_words, preset_results,\n (embeddings1, embeddings2), id2word, word2id,\n options.output_path, options.method, options.top)\n else:\n run_benchmark(questions_words, preset_results, embeddings1, id2word1,\n word2id1, options.output_path, options.method,\n options.top)\n","sub_path":"benchmark_word_embeddings.py","file_name":"benchmark_word_embeddings.py","file_ext":"py","file_size_in_byte":12429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"152586407","text":"# This action is \"checkable\" as defined in the INI settings. \n# Therefore, we can use the AttachCamera station parameter to detect if the button is checked or unchecked.\n# This script will be triggered when the button is clicked (checked or unchecked)\n\nfrom robolink import * # RoboDK API\nfrom robodk import * # Robot toolbox\nimport os\n\nclass RunApplication:\n \"\"\"Class to detect when the terminate signal is emited.\n Example:\n run = RunApplication()\n while run.run:\n # your loop\n\n \"\"\"\n run = True\n def __init__(self): \n import signal\n signal.signal(signal.SIGTERM, self.clean_exit)\n signal.signal(signal.SIGINT, self.clean_exit) # ctrl + c \n\n def clean_exit(self,signum, frame):\n self.run = False\n \nrun = RunApplication()\n\ndef AttachCamera():\n RDK = Robolink()\n\n item2station_pose = eye(4)\n view_pose_last = eye(4)\n last_item = None\n\n # Get the file name of this file/script\n filename = getFileName(__file__)\n\n # Allow running an infinite loop if this script is run without the parameter AttachCamera\n infinite_loop = False\n if RDK.getParam(filename) is None:\n infinite_loop = True\n\n RDK.Render()\n \n #run = RunApplication()\n\n # Run until the station parameter AttachCamera is set to 0\n while infinite_loop or RDK.getParam(filename) == 1: \n #while run.run:\n # Retrieve user selection\n selected_items = RDK.Selection()\n if len(selected_items) <= 0:\n last_item = None\n continue\n \n # Use the first selected item to attach the camera\n item = selected_items[0]\n \n # Prevent selecting programs or instructions or anything that doesn't move\n if item.type == ITEM_TYPE_ROBOT or item.type == ITEM_TYPE_TOOL or item.type == ITEM_TYPE_FRAME or item.type == ITEM_TYPE_OBJECT or item.type == ITEM_TYPE_TARGET: \n item_pose = item.PoseAbs() # Selected item pose with respect to the station reference \n item2station_pose = item_pose.inv()\n\n if last_item != item:\n # If it is the first time we select this item: update the relationship camera 2 item pose\n view_pose = RDK.ViewPose() # View Pose (camera pose with respect to the station reference)\n camera2item_pose = (item2station_pose * view_pose.inv()).inv()\n msg = 'Camera attached to %s' % item.Name()\n print(msg)\n RDK.ShowMessage(msg, False)\n last_item = item\n \n else: \n # calculate the new view pose and udpate it\n view_pose = camera2item_pose * item2station_pose\n \n # Only update if the view pose changed\n if view_pose != view_pose_last:\n view_pose_last = view_pose\n RDK.setViewPose(view_pose)\n RDK.Render()\n\ndef runmain():\n # Make sure we don't run this file if we are unchecking it\n if len(sys.argv) >= 2:\n if sys.argv[1] == \"Unchecked\":\n print(\"This action is triggered by the uncheck action\")\n quit()\n \n # Important: This setting will tell RoboDK App loader to not kill the process a few seconds after the terminate function is called\n # This is needed if we want the user input to save the file\n #print(\"App Setting: Skip kill\")\n #sys.stdout.flush()\n \n AttachCamera()\n\n# Function to run when this module is executed on its own or by selecting the action button in RoboDK\nif __name__ == \"__main__\": \n runmain()\n","sub_path":"PluginAppLoader/Apps/Record/AttachCamera.py","file_name":"AttachCamera.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"307893287","text":"## Material Views for CRUD\nfrom .forms import *;\n\n\ndef opt(clase, name, href):\n return {\"name\":name, \"href\":href, \"class\": clase}\n\nclass context:\n def __init__(self,request=None):\n self.contexto = {}\n if request is not None:\n self.set_options(request)\n\n def set_page_info(self,title=\"\",page_name=\"\",page_description=\"\",breadcrumbs=\"\",model_name=\"\",action_url=\"\", submit_text=\"\"):\n self.contexto[\"title\"] = title;\n self.contexto[\"page_name\"] = page_name;\n self.contexto[\"page_description\"] = page_description;\n self.contexto[\"breadcrumbs\"] = breadcrumbs;\n self.contexto[\"model_name\"] = model_name;\n self.contexto[\"action_url\"] = action_url;\n self.contexto[\"submit_text\"] = submit_text;\n\n def set_form(self, form):\n self.contexto[\"form\"] = form\n\n def set_model_list(self,model_list):\n self.contexto[\"model_list\"]=model_list\n\n def set_name_value(self, name, value):\n self.contexto[name] = value\n\n def get(self):\n return self.contexto\n\n def set_options(self,request):\n perfil =[\n opt(\"fa fa-edit\", \"Editar Perfil\", \"/welcome/profile\")\n ]\n turnos =[\n opt(\"fa fa-desktop\", \"Lobby Turnos\", \"/turnos/\")\n ]\n crud_usuarios = [\n opt(\"fa fa-user-plus\", \"Registrar Usuario\", \"/admin/crear/usuario/\"),\n opt(\"fa fa-list-alt\", \"Consultar Usuarios\", \"/admin/consultar/usuarios\")]\n\n crud_sucursales = [\n opt(\"fa fa-bank\", \"Registrar Sucursal\", \"/admin/crear/sucursal/\"),\n opt(\"fa fa-list-alt\", \"Consultar Sucursales\", \"/admin/consultar/sucursales/\"),\n ]\n\n crud_clientes = [\n opt(\"fa fa-users\", \"Registrar Cliente\", \"/registrar/cliente/\"),\n opt(\"fa fa-list-alt\", \"Consultar Clientes\", \"/consultar/cliente/\")\n ]\n\n crud_asignacion =[\n opt(\"fa fa-child\", \"Asignación Usuario\", \"/admin/asignar/usuario/sucursal/\")\n ]\n\n atencion_turnos = [\n opt(\"fa fa-desktop\", \"Atención de turnos\", \"/turnos/cajero/atencion/\")\n ]\n\n gerente = [\n opt(\"fa fa-bar-chart\", \"Reportes\", \"/turnos/gerente/reportes/\")\n ]\n\n crud_publicidad = [\n opt(\"fa fa-video-camera\", \"Registrar publicidad\", \"/admin/registrar/publicidad/\"),\n opt(\"fa fa-list-alt\", \"Consultar publicidad\", \"/admin/consultar/publicidad\")\n\n ]\n\n usuario = UsuarioSucursal.objects.all().filter(user_id=request.user.id)[:1]\n if usuario:\n usuario = usuario.get()\n if usuario.rol==\"C\":\n self.contexto[\"options\"] = perfil + crud_clientes + atencion_turnos\n else:\n self.contexto[\"options\"] = perfil + gerente + crud_usuarios + crud_asignacion + crud_clientes\n\n\n else:\n if request.user.is_superuser:\n self.contexto[\"options\"] = perfil + turnos + crud_usuarios + crud_sucursales + crud_publicidad + crud_asignacion + crud_clientes\n\n\n\n","sub_path":"banco_xyz/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"164474194","text":"#!/usr/bin/python \n\nimport time\nfrom SimpleCV import *\nfrom SimpleCV.Display import Display, pg\n\ndisplay = Display(resolution = (800, 600)) #create a new display to draw images on\ncam = Camera() #initialize the camera\ndone = False # setup boolean to stop the program\n\n# Loop until not needed\nwhile not display.isDone():\n cam.getImage().flipHorizontal().save(display) # get image, flip it so it looks mirrored, save to display\n time.sleep(0.01) # Let the program sleep for 1 millisecond so the computer can do other things\n if display.mouseLeft:\n display.done = True #if the left arrow is pressed, close the program\n","sub_path":"SimpleCV/examples/displaycam.py","file_name":"displaycam.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"639576696","text":"# -*- coding: utf-8 -*-\n\"\"\"\n tests/test_carrier.py\n\n :copyright: (C) 2014 by Openlabs Technologies & Consulting (P) Limited\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport unittest\nimport datetime\nfrom decimal import Decimal\nfrom dateutil.relativedelta import relativedelta\n\nimport pycountry\n\nimport trytond.tests.test_tryton\nfrom trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT\nfrom trytond.transaction import Transaction\n\n\nclass CarrierTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Set up data used in the tests.\n this method is called before each test function execution.\n \"\"\"\n trytond.tests.test_tryton.install_module('carrier_pricelist')\n\n self.Currency = POOL.get('currency.currency')\n self.Company = POOL.get('company.company')\n self.Party = POOL.get('party.party')\n self.User = POOL.get('res.user')\n self.ProductTemplate = POOL.get('product.template')\n self.Uom = POOL.get('product.uom')\n self.ProductCategory = POOL.get('product.category')\n self.Product = POOL.get('product.product')\n self.Country = POOL.get('country.country')\n self.Subdivision = POOL.get('country.subdivision')\n self.Employee = POOL.get('company.employee')\n self.Carrier = POOL.get('carrier')\n self.PriceList = POOL.get('product.price_list')\n self.Sale = POOL.get('sale.sale')\n\n def _create_fiscal_year(self, date=None, company=None):\n \"\"\"\n Creates a fiscal year and requried sequences\n \"\"\"\n FiscalYear = POOL.get('account.fiscalyear')\n Sequence = POOL.get('ir.sequence')\n SequenceStrict = POOL.get('ir.sequence.strict')\n Company = POOL.get('company.company')\n\n if date is None:\n date = datetime.date.today()\n\n if company is None:\n company, = Company.search([], limit=1)\n\n invoice_sequence, = SequenceStrict.create([{\n 'name': '%s' % date.year,\n 'code': 'account.invoice',\n 'company': company,\n }])\n fiscal_year, = FiscalYear.create([{\n 'name': '%s' % date.year,\n 'start_date': date + relativedelta(month=1, day=1),\n 'end_date': date + relativedelta(month=12, day=31),\n 'company': company,\n 'post_move_sequence': Sequence.create([{\n 'name': '%s' % date.year,\n 'code': 'account.move',\n 'company': company,\n }])[0],\n 'out_invoice_sequence': invoice_sequence,\n 'in_invoice_sequence': invoice_sequence,\n 'out_credit_note_sequence': invoice_sequence,\n 'in_credit_note_sequence': invoice_sequence,\n }])\n FiscalYear.create_period([fiscal_year])\n return fiscal_year\n\n def _create_coa_minimal(self, company):\n \"\"\"Create a minimal chart of accounts\n \"\"\"\n AccountTemplate = POOL.get('account.account.template')\n Account = POOL.get('account.account')\n\n account_create_chart = POOL.get(\n 'account.create_chart', type=\"wizard\")\n\n account_template, = AccountTemplate.search([('parent', '=', None)])\n\n session_id, _, _ = account_create_chart.create()\n create_chart = account_create_chart(session_id)\n create_chart.account.account_template = account_template\n create_chart.account.company = company\n create_chart.transition_create_account()\n\n receivable, = Account.search([\n ('kind', '=', 'receivable'),\n ('company', '=', company),\n ])\n payable, = Account.search([\n ('kind', '=', 'payable'),\n ('company', '=', company),\n ])\n create_chart.properties.company = company\n create_chart.properties.account_receivable = receivable\n create_chart.properties.account_payable = payable\n create_chart.transition_create_properties()\n\n def _get_account_by_kind(self, kind, company=None, silent=True):\n \"\"\"Returns an account with given spec\n\n :param kind: receivable/payable/expense/revenue\n :param silent: dont raise error if account is not found\n \"\"\"\n Account = POOL.get('account.account')\n Company = POOL.get('company.company')\n\n if company is None:\n company, = Company.search([], limit=1)\n\n accounts = Account.search([\n ('kind', '=', kind),\n ('company', '=', company)\n ], limit=1)\n if not accounts and not silent:\n raise Exception(\"Account not found\")\n return accounts[0] if accounts else False\n\n def _create_payment_term(self):\n \"\"\"Create a simple payment term with all advance\n \"\"\"\n PaymentTerm = POOL.get('account.invoice.payment_term')\n\n return PaymentTerm.create([{\n 'name': 'Direct',\n 'lines': [('create', [{'type': 'remainder'}])]\n }])\n\n def _create_countries(self, count=5):\n \"\"\"\n Create some sample countries and subdivisions\n \"\"\"\n for country in list(pycountry.countries)[0:count]:\n countries = self.Country.create([{\n 'name': country.name,\n 'code': country.alpha2,\n }])\n try:\n divisions = pycountry.subdivisions.get(\n country_code=country.alpha2\n )\n except KeyError:\n pass\n else:\n for subdivision in list(divisions)[0:count]:\n self.Subdivision.create([{\n 'country': countries[0].id,\n 'name': subdivision.name,\n 'code': subdivision.code,\n 'type': subdivision.type.lower(),\n }])\n\n def setup_defaults(self):\n \"\"\"Creates default data for testing\n \"\"\"\n self.currency, = self.Currency.create([{\n 'name': 'US Dollar',\n 'code': 'USD',\n 'symbol': '$',\n }])\n\n with Transaction().set_context(company=None):\n company_party, = self.Party.create([{\n 'name': 'openlabs'\n }])\n employee_party, = self.Party.create([{\n 'name': 'Jim'\n }])\n\n self.company, = self.Company.create([{\n 'party': company_party,\n 'currency': self.currency,\n }])\n\n self.employee, = self.Employee.create([{\n 'party': employee_party.id,\n 'company': self.company.id,\n }])\n\n self.User.write([self.User(USER)], {\n 'company': self.company,\n 'main_company': self.company,\n 'employees': [('add', [self.employee.id])],\n })\n # Write employee separately as employees needs to be saved first\n self.User.write([self.User(USER)], {\n 'employee': self.employee.id,\n })\n\n CONTEXT.update(self.User.get_preferences(context_only=True))\n\n # Create Fiscal Year\n self._create_fiscal_year(company=self.company.id)\n # Create Chart of Accounts\n self._create_coa_minimal(company=self.company.id)\n # Create a payment term\n self.payment_term, = self._create_payment_term()\n\n # Create carrier\n carrier_price_list, = self.PriceList.create([{\n 'name': 'PL 1',\n 'company': self.company.id,\n 'lines': [\n ('create', [{\n 'formula': '(unit_price * 0.0) + 5',\n }])\n ],\n }])\n carrier_party, = self.Party.create([{\n 'name': 'Pricelist Carrier',\n }])\n\n day, = self.Uom.search([('name', '=', 'Day')])\n carrier_product_template, = self.ProductTemplate.create([{\n 'name': 'Carrier Pricelist',\n 'type': 'service',\n 'salable': True,\n 'default_uom': day.id,\n 'sale_uom': day.id,\n 'account_revenue': self._get_account_by_kind('revenue').id,\n 'list_price': Decimal('50'),\n 'cost_price': Decimal('40'),\n }])\n carrier_product, = self.Product.create([{\n 'template': carrier_product_template.id,\n }])\n self.carrier, = self.Carrier.create([{\n 'party': carrier_party.id,\n 'carrier_cost_method': 'pricelist',\n 'carrier_product': carrier_product.id,\n 'price_list': carrier_price_list.id,\n }])\n\n unit, = self.Uom.search([('name', '=', 'Unit')])\n\n self.template1, = self.ProductTemplate.create([{\n 'name': 'Product 1',\n 'type': 'goods',\n 'salable': True,\n 'default_uom': unit.id,\n 'sale_uom': unit.id,\n 'list_price': Decimal('100'),\n 'cost_price': Decimal('90'),\n 'account_revenue': self._get_account_by_kind('revenue').id,\n 'products': [('create', [{\n 'code': 'product-1'\n }])]\n }])\n\n self.template2, = self.ProductTemplate.create([{\n 'name': 'Product 2',\n 'type': 'goods',\n 'salable': True,\n 'default_uom': unit.id,\n 'sale_uom': unit.id,\n 'list_price': Decimal('50'),\n 'cost_price': Decimal('40'),\n 'account_revenue': self._get_account_by_kind('revenue').id,\n 'products': [('create', [{\n 'code': 'product-1'\n }])]\n }])\n\n self.product1 = self.template1.products[0]\n self.product2 = self.template2.products[0]\n\n # Create sale party\n self.sale_party, = self.Party.create([{\n 'name': 'Test Sale Party',\n 'addresses': [('create', [{\n 'name': 'John Doe',\n 'street': '123 Main Street',\n 'zip': '83702',\n 'city': 'Boise',\n }])]\n }])\n\n def test_0010_test_shipping_price(self):\n \"\"\"Test shipping price\n \"\"\"\n with Transaction().start(DB_NAME, USER, context=CONTEXT):\n self.setup_defaults()\n\n # Create sale order\n sale, = self.Sale.create([{\n 'reference': 'S-1001',\n 'company': self.company,\n 'currency': self.currency,\n 'payment_term': self.payment_term,\n 'party': self.sale_party.id,\n 'invoice_address': self.sale_party.addresses[0].id,\n 'shipment_address': self.sale_party.addresses[0].id,\n 'carrier': self.carrier.id,\n 'lines': [\n ('create', [{\n 'type': 'line',\n 'quantity': 2,\n 'product': self.product1,\n 'unit_price': Decimal('100.00'),\n 'description': 'Test Description1',\n 'unit': self.product1.template.default_uom,\n }, {\n 'type': 'line',\n 'quantity': 2,\n 'product': self.product2,\n 'unit_price': Decimal('50.00'),\n 'description': 'Test Description2',\n 'unit': self.product2.template.default_uom,\n }]),\n ]\n }])\n\n self.assertEqual(sale.total_amount, Decimal('300'))\n\n with Transaction().set_context(company=self.company.id):\n # Quote the sale\n self.Sale.quote([sale])\n self.Sale.confirm([sale])\n self.Sale.process([sale])\n\n # Shipping line is added and total amount got updated.\n self.assertEqual(len(sale.lines), 3)\n self.assertEqual(sale.total_amount, Decimal('320'))\n\n self.assertEqual(len(sale.shipments), 1)\n\n shipment, = sale.shipments\n self.assertEqual(shipment.cost, Decimal(20))\n\n\ndef suite():\n \"\"\"\n Define suite\n \"\"\"\n test_suite = trytond.tests.test_tryton.suite()\n test_suite.addTests(\n unittest.TestLoader().loadTestsFromTestCase(CarrierTestCase)\n )\n return test_suite\n\nif __name__ == '__main__':\n unittest.TextTestRunner(verbosity=2).run(suite())\n","sub_path":"tests/test_carrier.py","file_name":"test_carrier.py","file_ext":"py","file_size_in_byte":12329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"396894985","text":"_base_ = [\n '../_base_/datasets/coco_caption.py',\n '../_base_/default_runtime.py',\n]\n\n# model settings\nmodel = dict(\n type='Blip2Caption',\n tokenizer=dict(\n type='AutoTokenizer', name_or_path='facebook/opt-2.7b',\n use_fast=False),\n vision_backbone=dict(\n type='BEiTViT',\n # eva-g without the final layer\n arch=dict(\n embed_dims=1408,\n num_layers=39,\n num_heads=16,\n feedforward_channels=6144,\n ),\n img_size=364,\n patch_size=14,\n out_indices=-2,\n layer_scale_init_value=0.0,\n use_abs_pos_emb=True,\n use_rel_pos_bias=False,\n frozen_stages=39,\n final_norm=False,\n use_shared_rel_pos_bias=False,\n out_type='raw'),\n text_backbone=dict(\n type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'),\n multimodal_backbone=dict(\n type='Qformer',\n model_style='bert-base-uncased',\n vision_model_width=1408,\n add_cross_attention=True,\n cross_attention_freq=2,\n num_query_token=32),\n vision_neck=dict(\n type='LinearClsHead',\n in_channels=768,\n num_classes=2560,\n ),\n prompt='a photo of',\n max_txt_len=30)\n\n# schedule settings\noptim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05))\n\nparam_scheduler = [\n dict(\n type='CosineAnnealingLR',\n by_epoch=True,\n begin=0,\n end=10,\n )\n]\n\ntrain_cfg = dict(by_epoch=True, max_epochs=10)\nval_cfg = dict()\ntest_cfg = dict()\n\n# dataset settings\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='Resize',\n scale=(364, 364),\n interpolation='bicubic',\n backend='pillow'),\n dict(type='PackInputs', meta_keys=['image_id']),\n]\n\nval_dataloader = dict(dataset=dict(pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n","sub_path":"configs/blip2/blip2-opt2.7b_8xb32_caption.py","file_name":"blip2-opt2.7b_8xb32_caption.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"285441976","text":"import os\nimport requests\nimport csv\nimport json\nimport pytz\n\nfrom cairosvg import svg2png\nfrom datetime import date, datetime, timedelta\nimport numpy as np\n\n# source: https://data.cityofchicago.org/Health-Human-Services/COVID-19-Vaccine-Doses-by-ZIP-Code-Series-Complete/8u6c-48j3\nvax_url = \"https://data.cityofchicago.org/api/views/553k-3xzc/rows.json?accessType=DOWNLOAD\"\nvax_svg_path = os.path.join(os.getcwd(), \"data\", \"zipcodes-vax.svg\")\n\n# source: https://data.cityofchicago.org/Health-Human-Services/COVID-19-Cases-Tests-and-Deaths-by-ZIP-Code/yhhz-zm2v\ndeaths_url= \"https://data.cityofchicago.org/api/views/yhhz-zm2v/rows.json?accessType=DOWNLOAD\"\ndeaths_svg_path = os.path.join(os.getcwd(), \"data\", \"zipcodes-deaths.svg\")\n\nvax_colorscale = [\"#feebe2\", \"#fbb4b9\", \"#f768a1\", \"#c51b8a\", \"#7a0177\"]\ndeaths_colorscale = [\"#feebe2\", \"#f3cea3\", '#f3b875', '#C83302', '#992702']\n\nnow = datetime.now(pytz.timezone('America/Chicago'))\nyesterday = (now - timedelta(days = 1))\nvax_output_path = os.path.join(os.getcwd(), \"exports\", \"vax-{}.png\".format(\n now.strftime(\"%Y-%m-%d-%H%M\")\n))\ndeaths_output_path = os.path.join(os.getcwd(), \"exports\", \"deaths-{}.png\".format(\n now.strftime(\"%Y-%m-%d-%H%M\")\n))\n\ndef get_tweet():\n vax_res = requests.get(vax_url)\n vax_res_json = json.loads(vax_res.text)\n\n deaths_res = requests.get(deaths_url)\n deaths_res_json = json.loads(deaths_res.text)\n\n # loop over city data and return a dictionary of zipcodes and their vax rates\n # also, a sum of all vaccinations\n vax_perc = {}\n vax_sum = 0\n population_sum = 0\n max_date = max([datetime.strptime(i[9], '%Y-%m-%dT00:00:00') for i in vax_res_json[\"data\"]])\n for row in vax_res_json[\"data\"]:\n # we only want dose cumulatives from the latest date\n # res date should be in the format 2021-01-18T00:00:00\n if max_date == datetime.strptime(row[9], '%Y-%m-%dT00:00:00'):\n vax_perc[row[8]] = float(row[17])\n vax_sum += int(row[16])\n population_sum += int(row[18])\n\n deaths_perc = {}\n deaths_sum = 0\n max_week = max([int(i[9]) for i in deaths_res_json[\"data\"]])\n for row in deaths_res_json[\"data\"]:\n if max_week == int(row[9]):\n # take \"death rate per 100,000 population through the week\"\n deaths_perc[row[8]] = float(row[25])\n deaths_sum += int(row[23])\n\n # then, create a dictionary of zip codes and colors\n vax_colors = get_colors_dict(vax_perc, vax_colorscale, \"vax\")\n deaths_colors = get_colors_dict(deaths_perc, deaths_colorscale, \"deaths\")\n\n write_svg(vax_svg_path, vax_output_path, vax_colors)\n write_svg(deaths_svg_path, deaths_output_path, deaths_colors)\n\n percent_vaccinated = vax_sum / population_sum * 100\n tweet_text = \"As of {date}, Chicago is reporting {vaccinations} people fully vaccinated: {percent}% of the population.\\n\\nWho is dying: Who is vaccinated:\".format(\n date=now.strftime(\"%B %d, %Y\"), # January 26, 2021\n vaccinations=f'{vax_sum:,}',\n percent=round(percent_vaccinated, 1),\n )\n\n alt_text = '''\n Two maps of Chicago, side by side. The map on the left shows COVID-19 deaths\n per capita by ZIP code. The map on the right shows completed COVID-19\n vaccination per capita by ZIP code. The maps reveal a disconnect between\n where residents are getting vaccinated and where COVID-19 deaths are\n concentrated.\n '''\n\n return {\n \"tweet_text\": tweet_text,\n \"deaths_map_path\": deaths_output_path,\n \"vax_map_path\": vax_output_path,\n \"alt_text\": alt_text\n }\n\n\ndef get_colors_dict(values_dict, colorscale, data_type):\n colors_dict = {}\n arr = list(values_dict.values())\n\n colors_dict[\"key_color1\"] = colorscale[0]\n colors_dict[\"key_color2\"] = colorscale[1]\n colors_dict[\"key_color3\"] = colorscale[2]\n colors_dict[\"key_color4\"] = colorscale[3]\n colors_dict[\"key_color5\"] = colorscale[4]\n\n key_label1_raw = np.percentile(arr, 20)\n key_label2_raw = np.percentile(arr, 40)\n key_label3_raw = np.percentile(arr, 60)\n key_label4_raw = np.percentile(arr, 80)\n key_label5_raw = np.percentile(arr, 100)\n\n if data_type == \"deaths\":\n colors_dict[\"key_label1\"] = round(key_label1_raw, 1)\n colors_dict[\"key_label2\"] = round(key_label2_raw, 1)\n colors_dict[\"key_label3\"] = round(key_label3_raw, 1)\n colors_dict[\"key_label4\"] = round(key_label4_raw, 1)\n colors_dict[\"key_label5\"] = round(key_label5_raw, 1)\n elif data_type == \"vax\":\n colors_dict[\"key_label1\"] = \"{}%\".format(round(key_label1_raw * 100, 1))\n colors_dict[\"key_label2\"] = \"{}%\".format(round(key_label2_raw * 100, 1))\n colors_dict[\"key_label3\"] = \"{}%\".format(round(key_label3_raw * 100, 1))\n colors_dict[\"key_label4\"] = \"{}%\".format(round(key_label4_raw * 100, 1))\n colors_dict[\"key_label5\"] = \"{}%\".format(round(key_label5_raw * 100, 1))\n else:\n raise Exception(\"Unexpected key passed to function. Choose 'vax' or 'deaths'\")\n\n for name, value in values_dict.items():\n # prepend \"zip\" to make these names less confusing\n # when they appear in the SVG\n svg_name = \"zip{}\".format(name)\n\n # divide results into 5 even percentiles\n if (value < key_label1_raw):\n colors_dict[svg_name] = colors_dict[\"key_color1\"]\n elif (value < key_label2_raw):\n colors_dict[svg_name] = colors_dict[\"key_color2\"]\n elif (value < key_label3_raw):\n colors_dict[svg_name] = colors_dict[\"key_color3\"]\n elif (value < key_label4_raw):\n colors_dict[svg_name] = colors_dict[\"key_color4\"]\n elif (value <= key_label5_raw):\n colors_dict[svg_name] = colors_dict[\"key_color5\"]\n else:\n colors_dict[svg_name] = \"white\"\n\n return colors_dict\n\n\ndef write_svg(svg_path, output_path, colors_dict):\n # write colors into the SVG file and export\n with open(svg_path, \"r\") as svg_file:\n svg_string = svg_file.read().format(**colors_dict)\n svg2png(\n bytestring=svg_string,\n write_to=output_path,\n background_color=\"white\",\n )\n","sub_path":"chivaxbot.py","file_name":"chivaxbot.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"250484552","text":"# -*- coding: utf-8 -*-\n\nimport logging\nlogger = logging.getLogger('ruautogui.bezier')\nlogger.setLevel(logging.DEBUG)\n\nlogConsoleHandler = logging.StreamHandler()\nformatterConsole = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nlogConsoleHandler.setFormatter(formatterConsole)\nlogger.addHandler(logConsoleHandler)\n\nlogFileHandler = logging.FileHandler(f'ruautogui.log')\nformatterFile = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogFileHandler.setFormatter(formatterFile)\nlogger.addHandler(logFileHandler) \n\nimport math\nimport random\nimport time\nimport ctypes\n\ndef get_mouse_cursor_position():\n cursor = POINT()\n ctypes.windll.user32.GetCursorPos(ctypes.byref(cursor))\n return (cursor.x, cursor.y)\n \nclass POINT(ctypes.Structure):\n _fields_ = [(\"x\", ctypes.c_long),\n (\"y\", ctypes.c_long)]\n\ndef get_curve_points(\n begin_pos=None, \n end_pos=(100,100),\n order=2,\n control_points=[],\n transition_time=None\n ):\n \"\"\" This function gets the following arguments:\n begin_pos - a tuple of x and y coordinates of the starting point \n (default = None - the starting point will be the current cursor\n position);\n end_pos - a tuple of x and y coordinates of the ending point \n (default = (100,100));\n order - an integer that indicates the order of the Bezier curve \n (default=2)\n (if order is 1 the resulting curve is a straight line,\n if order is 2 the resulting curve is quadratic etc.);\n control_points - a list of tuples of x and y coordinates of the control points\n (default is an empty list - the control points will be calculated\n randomily).\n transition_time - an integer - transition time in milliseconds\n (default is None - the transition time will be calculated randomily)\nReturns:\n points - a tuple of tuples of x and y coordinates of the Bezier curve;\n control_points - a tuple of tuples of x and y coordinates of the control\n points, including the starting and the ending points;\n transition_time - an integer in milliseconds of the transition time.\n \"\"\"\n\n if begin_pos == None:\n begin_pos = get_mouse_cursor_position()\n\n # Make sure that the order equal to at least one.\n if order < 2:\n order = 1\n\n # The transition time that is used for calculating the points of the curve.\n if transition_time == None:\n transition_time = get_random_travel_time(begin_pos, end_pos)\n\n logger.debug(f'The transition time is {transition_time}')\n\n # The optimal number of points is calculated keeping in mind that the points of the curve\n # could be used for automatic mouse movement. Therefore, the sleep time between\n # the movements should be greater than 13 milliseconds.\n optimal_number_of_points = int(transition_time / 13)\n if optimal_number_of_points == 0:\n optimal_number_of_points = 1\n logger.debug(f'Optimal number of points: {optimal_number_of_points}')\n\n # The controls point are not passed as an argument to the function.\n # Therefore, they will be chosen randomily. The number of control points\n # are one less than the order of the curve.\n if len(control_points) == 0:\n logger.debug('The control points will be determined randomly')\n control_points.append(begin_pos)\n for i in range(1, order):\n random_control_point = get_random_control_point(begin_pos, end_pos)\n control_points.append(random_control_point)\n control_points.append(end_pos)\n\n # The control points have been passed to the function. We need to add the starting point and \n # ending point to the list of control points.\n else:\n control_points.insert(0, begin_pos)\n control_points.append(end_pos)\n\n logger.debug(f'Control points: {control_points}')\n logger.debug(f'ORDER: {order}')\n\n # Calculated points of the Bezier curve are added to the following list.\n points = []\n\n STEP = int(transition_time / optimal_number_of_points)\n if STEP == 0:\n STEP = 1\n logger.debug(f'The optimal step for calculating the points is {STEP} milliseconds.')\n time_stamps = [t / (transition_time) for t in range(0, transition_time + 1, STEP)]\n\n for time_stamp in time_stamps:\n point_x = 0\n point_y = 0 \n for i in range(0, order + 1):\n coeff = number_of_combinations(order, i) * (1 - time_stamp) ** (order - i) * time_stamp ** i\n point_x += coeff * control_points[i][0] \n point_y += coeff * control_points[i][1]\n points.append((int(point_x), int(point_y))) \n\n return tuple(points), tuple(control_points), transition_time\n\ndef get_random_control_point(\n begin_pos=(300,10), \n end_pos=(280,100),\n ):\n \"\"\" This function gets the coordinates of two points both as a tuple,\ncalculates a random control point for a Bezier curve.\nReturns the coordinates of the random control point as a tuple of coordinates.\"\"\"\n \n # Random offsets are calculated. The offsets show how far the control\n # point is from the straight line that connects the two initial points.\n # If the offsets equal to zero, the control point is on the line.\n offset_x = round(random.uniform(0.25, 0.55), 2)\n offset_y = round(random.uniform(0.25, 0.55), 2)\n\n # The following block calculates the coordinates of the middle of the straight line.\n middle_x = abs((end_pos[0] - begin_pos[0]) // 2)\n if middle_x < 30:\n middle_x = random.randint(30, 45)\n middle_y = abs((end_pos[1] - begin_pos[1]) // 2)\n if middle_y < 30:\n middle_y = random.randint(30, 45)\n\n # Variable direction shows the relative position of the control point.\n # If direction is 1, the control point will be to the right of the straight line.\n # If direction is -1, the control point will be to the left.\n direction = random.choice((-1,1))\n\n # The x coordinate of the control point\n control_point_x = int(min(begin_pos[0], end_pos[0]) + middle_x + direction * middle_x * offset_x)\n \n # The y coordinate of the control point depends on the inclination of the straight line.\n if (begin_pos[0] >= end_pos[0] and begin_pos[1] >= end_pos[1]) \\\n or (end_pos[0] >= begin_pos[0] and end_pos[1] >= begin_pos[1]\n ):\n control_point_y = int(min(begin_pos[1], end_pos[1]) + middle_y - direction * middle_y * offset_y) \n elif (begin_pos[0] > end_pos[0] and begin_pos[1] < end_pos[1]) \\\n or (end_pos[0] > begin_pos[0] and end_pos[1] < begin_pos[1]\n ): \n control_point_y = int(min(begin_pos[1], end_pos[1]) + middle_y + direction * middle_y * offset_y)\n \n return (control_point_x, control_point_y)\n\ndef get_random_travel_time(begin_pos, end_pos):\n \"\"\" This function gets the coordinates of two points both as a tuple.\nReturns a random duration of transition between the two points.\nThe transition time slightly correlates with the distance.\"\"\"\n MINIMUM_TRANSITION_TIME_MILLISECONDS = 150\n MAXIMUM_TRANSITION_TIME_MILLISECONDS = 850\n # The distance is calculated using the Pythagorean theorem.\n distance = int(math.sqrt((end_pos[0] - begin_pos[0]) ** 2 + (end_pos[1] - begin_pos[1]) ** 2))\n \n logger.debug(f'The distance between the two points is {distance}')\n resulting_random_transition_time = distance // (random.randint(3,4))\n if resulting_random_transition_time < MINIMUM_TRANSITION_TIME_MILLISECONDS:\n resulting_random_transition_time = MINIMUM_TRANSITION_TIME_MILLISECONDS + random.randint(10,100)\n \n if resulting_random_transition_time > MAXIMUM_TRANSITION_TIME_MILLISECONDS:\n resulting_random_transition_time = MAXIMUM_TRANSITION_TIME_MILLISECONDS - random.randint(10,100)\n \n logger.debug(f'Random transition time has been chosen: {resulting_random_transition_time} milliseconds.')\n return resulting_random_transition_time\n\ndef number_of_combinations(n, i):\n \"\"\" This function gets the binominal coefficients n and i.\n Returns the number of ways the i objects can be chosen from among n objects.\"\"\"\n return int((math.factorial(n)) / (math.factorial(i) * math.factorial(n - i)))","sub_path":"ruautogui/bezier.py","file_name":"bezier.py","file_ext":"py","file_size_in_byte":8298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"443165894","text":"from colorama import Fore\nfrom scipy import spatial\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\"\"\"\"\nProgram to find the sentence similarity between two sentences using CountVectorizer\n\"\"\"\n\n\nif __name__ == '__main__':\n\n sentence_list = []\n print('Enter two sentences:')\n\n sentence_list.append(input())\n sentence_list.append(input())\n\n # Initializing CountVectorizer and training on the sentences\n vectorizer = CountVectorizer()\n tf = vectorizer.fit_transform(sentence_list)\n tf = tf.toarray()\n\n # tf[0] implies the vectorized form of the first input sentence\n # tf[1] implies the vectorized form of the second input sentence\n\n print(Fore.BLUE + 'Similarity based metrics')\n\n # Computing the cosine similarity\n cosine = 1 - spatial.distance.cosine(tf[0], tf[1])\n print(Fore.YELLOW + 'Cosine similarity: ', round(cosine, 2))\n\n # Computing the Jaccard similarity\n jaccard = 1 - spatial.distance.jaccard(tf[0], tf[1])\n print(Fore.YELLOW + 'Jaccard similarity: ', round(jaccard, 2))\n\n print(Fore.BLUE + 'Distance based metrics')\n\n # Computing the Euclidean distance\n euclidean = spatial.distance.euclidean(tf[0], tf[1])\n print(Fore.YELLOW + 'Euclidean distance: ', round(euclidean, 2))\n\n # Computing the Manhattan distance\n manhattan = spatial.distance.cityblock(tf[0], tf[1])\n print(Fore.YELLOW + 'Manhattan distance: ', round(manhattan, 2))\n","sub_path":"count_vectorizer.py","file_name":"count_vectorizer.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"460283218","text":"from tkinter import *\nfrom NeuronNetwork_NetGen import StandardRun\n\nclass Application(Frame):\n\tdef __init__(self, Master = None):\n\t\tFrame.__init__(self, Master)\n\t\tself.grid()\n\t\tself.createWidgets()\n\n\tdef createWidgets(self):\n\t\tself.n_total_box = Entry(self)\n\t\tself.n_total_box_label = Label(self, text = \"Total Number of Neurons:\")\n\t\tself.n_total_box_label.grid()\n\t\tself.n_total_box.grid()\n\t\tself.n_total_box.insert(0,\"1000\")\n\n\t\tself.n_excit_box = Entry(self)\n\t\tself.n_excit_box_label = Label(self, text = \"Number of Excitatory Neurons:\")\n\t\tself.n_excit_box_label.grid()\n\t\tself.n_excit_box.grid()\n\t\tself.n_excit_box.insert(0,\"800\")\n\n\t\tself.Type_E_var = StringVar()\n\t\tself.Type_E_var.set(\"RS\")\n\t\tself.Type_E_dropdown=OptionMenu(self, self.Type_E_var, \"RS\", \"LTS\", \"CP\", \"IB\", \"FS\")\n\t\tself.Type_E_label = Label(self, text = \"Type of Excitatory Neurons:\")\n\t\tself.Type_E_label.grid()\n\t\tself.Type_E_dropdown.grid()\n\n\t\tself.Type_I_var = StringVar()\n\t\tself.Type_I_var.set(\"LTS\")\n\t\tself.Type_I_dropdown=OptionMenu(self, self.Type_I_var, \"RS\", \"LTS\", \"CP\", \"IB\", \"FS\")\n\t\tself.Type_I_label = Label(self, text = \"Type of Inhibitory Neurons:\")\n\t\tself.Type_I_label.grid()\n\t\tself.Type_I_dropdown.grid()\n\n\t\tself.p_excit_box = Entry(self)\n\t\tself.p_excit_box_label = Label(self, text = \"Probability of Excitatory Neuron Synapse:\")\n\t\tself.p_excit_box_label.grid()\n\t\tself.p_excit_box.grid()\n\t\tself.p_excit_box.insert(0,\".1\")\n\n\t\tself.p_inhib_box = Entry(self)\n\t\tself.p_inhib_box_label = Label(self, text = \"Probability of Inhibitory Neuron Synapse:\")\n\t\tself.p_inhib_box_label.grid()\n\t\tself.p_inhib_box.grid()\n\t\tself.p_inhib_box.insert(0,\".2\")\n\n\t\tself.w_excitmax_box = Entry(self)\n\t\tself.w_excitmax_box_label = Label(self, text = \"Max Weight of Excitatory Neuron Synapse:\")\n\t\tself.w_excitmax_box_label.grid()\n\t\tself.w_excitmax_box.grid()\n\t\tself.w_excitmax_box.insert(0,\"150\")\n\n\t\tself.w_inhibmax_box = Entry(self)\n\t\tself.w_inhibmax_box_label = Label(self, text = \"Max Weight of Inhibitory Neuron Synapse:\")\n\t\tself.w_inhibmax_box_label.grid()\n\t\tself.w_inhibmax_box.grid()\n\t\tself.w_inhibmax_box.insert(0,\"-200\")\n\n\t\tself.d_excitmax_box = Entry(self)\n\t\tself.d_excitmax_box_label = Label(self, text = \"Max Delay of Excitatory Neuron Synapse:\")\n\t\tself.d_excitmax_box_label.grid()\n\t\tself.d_excitmax_box.grid()\n\t\tself.d_excitmax_box.insert(0,\"15\")\n\n\t\tself.d_inhibmax_box = Entry(self)\n\t\tself.d_inhibmax_box_label = Label(self, text = \"Max Delay of Inhibitory Neuron Synapse:\")\n\t\tself.d_inhibmax_box_label.grid()\n\t\tself.d_inhibmax_box.grid()\n\t\tself.d_inhibmax_box.insert(0,\"10\")\n\n\t\tself.xmax_box = Entry(self)\n\t\tself.xmax_box_label = Label(self, text = \"Max X Value (mm):\")\n\t\tself.xmax_box_label.grid()\n\t\tself.xmax_box.grid()\n\t\tself.xmax_box.insert(0,\"3\")\n\n\t\tself.ymax_box = Entry(self)\n\t\tself.ymax_box_label = Label(self, text = \"Max Y Value (mm):\")\n\t\tself.ymax_box_label.grid()\n\t\tself.ymax_box.grid()\n\t\tself.ymax_box.insert(0,\"3\")\n\n\t\tself.Pmax_box = Entry(self)\n\t\tself.Pmax_box_label = Label(self, text = \"Maximum Probabilty of Connection:\")\n\t\tself.Pmax_box_label.grid()\n\t\tself.Pmax_box.grid()\n\t\tself.Pmax_box.insert(0,\".6\")\n\n\t\tself.tau_box = Entry(self)\n\t\tself.tau_box_label = Label(self, text = \"Spacial Coefficient of Connectivity:\")\n\t\tself.tau_box_label.grid()\n\t\tself.tau_box.grid()\n\t\tself.tau_box.insert(0,\".7\")\n\n\t\tself.d_velocity_box = Entry(self)\n\t\tself.d_velocity_box_label = Label(self, text = \"Transmission speed (m/s):\")\n\t\tself.d_velocity_box_label.grid()\n\t\tself.d_velocity_box.grid()\n\t\tself.d_velocity_box.insert(0,\".2\")\n\n\n\t\tself.simulateButton = Button (self, text=\"Simulate\", command=self.simulate)\n\t\tself.simulateButton.grid()\n\t\tself.quitButton = Button (self, text=\"Quit\", command=self.quit)\n\t\tself.quitButton.grid()\n\n\n\tdef simulate(self):\n\t\tn_excit=int(self.n_excit_box.get())\n\t\tn_total=int(self.n_total_box.get())\n\t\tp_excit=float(self.p_excit_box.get())\n\t\tp_inhib=float(self.p_inhib_box.get())\n\t\tw_excitmax=int(self.w_excitmax_box.get())\n\t\tw_inhibmax=int(self.w_inhibmax_box.get())\n\t\td_excitmax=int(self.d_excitmax_box.get())\n\t\td_inhibmax=int(self.d_inhibmax_box.get())\n\n\t\txmax=float(self.xmax_box.get())\n\t\tymax=float(self.ymax_box.get())\n\t\tPmax=float(self.Pmax_box.get())\n\t\ttau=float(self.tau_box.get())\n\t\td_velocity=float(self.d_velocity_box.get())\n\n\t\tk=30\n\t\tp_rewire=.2\n\n\t\tStandardRun(k, p_rewire,\n n_excit, p_excit, w_excitmax, d_excitmax,\n n_total, p_inhib,w_inhibmax, d_inhibmax, self.Type_E_var.get(), self.Type_I_var.get(),\n xmax, ymax, Pmax, tau, d_velocity)\t\t\n\napp=Application()\napp.master.title=\"Sample Network\"\napp.mainloop()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"615116637","text":"import torch.utils.data as data\nimport torch\nimport numpy as np\nfrom datasets.humans36m import Humans36mDataset\n\ndef flatten_views(img):\n return img.view(-1, *img.shape[2:])\n\nclass Batch:\n def __init__(self, data):\n (img, annot, ref_img, unaligned_annot) = list(zip(*data))\n self.imgs = flatten_views(torch.stack(img, 0))\n self.ref_imgs = flatten_views(torch.stack(ref_img, 0))\n self.annots = flatten_views(torch.from_numpy(np.stack(annot, 0))) / (self.imgs.shape[2]-1)\n self.unaligned_annots = flatten_views(torch.from_numpy(np.stack(unaligned_annot, 0))) / (self.imgs.shape[2]-1)\n\n\ndef process_batch(data):\n def shift_to_interval(x):\n '''\n Make return \\in [-1, 1], assuming x \\in [0,1]\n '''\n return (2*x - 1)\n\n ziped_data = list(zip(*data))\n #len(ziped_data)\n #print(ziped_data[0])\n (img, annot, ref_img, ref_annot, unaligned_annot) = list(zip(*data))\n imgs = flatten_views(torch.stack(img, 0))\n ref_imgs = flatten_views(torch.stack(ref_img, 0))\n annots = flatten_views(torch.from_numpy(np.stack(annot, 0))) / (imgs.shape[2]-1)\n ref_annots = flatten_views(torch.from_numpy(np.stack(ref_annot, 0))) / (imgs.shape[2]-1)\n unaligned_annots = flatten_views(torch.from_numpy(np.stack(unaligned_annot, 0))) / (imgs.shape[2]-1)\n\n out = {\n \"imgs\":imgs,\n \"ref_imgs\":ref_imgs,\n \"ref_annots\":shift_to_interval(ref_annots),\n \"annots\":shift_to_interval(annots),\n \"unaligned_annots\":shift_to_interval(unaligned_annots)\n }\n return out \n\ndef process_batch_class(data):\n return Batch(data)\n\ndef LoadUnalignedH36m(subjects_data, subjects_gt, rgb,\n nImages, nViews, batch_size=1, img_size=128, workers=4, ref_interval=[3,30]):\n data_h36m = Humans36mDataset(nViews, '', rgb, \n nImages, subjects_data, -1, \n img_size=img_size, normalized=False, ref_interval=ref_interval)\n gt_h36m = Humans36mDataset(nViews, '', rgb, \n nImages, subjects_gt, -1, \n img_size=img_size, normalized=False, gt_only=True)\n print('Len data: %d' % len(data_h36m))\n print('Len unaligned: %d' % len(gt_h36m))\n dset = UnalignedH36m(data_h36m, gt_h36m) \n loader = data.DataLoader(dset, batch_size=batch_size, shuffle=False, \n num_workers=workers, pin_memory=False, collate_fn=process_batch)\n #loader = data.DataLoader(dset, batch_size=batch_size, shuffle=False, \n # num_workers=workers, pin_memory=False)\n \n return loader\n\nclass UnalignedH36m(data.Dataset):\n \"\"\"\n Class unifying the loading process of data and unaligned gt\n \"\"\"\n def __init__(self, data_sampler, gt_sampler):\n self.data_sampler = data_sampler\n self.gt_sampler = gt_sampler\n\n def shuffle_unaligned(self):\n print('shuffling unaligned\\n')\n self.gt_sampler.shuffle()\n\n def __getitem__(self, i):\n j = i % len(self.data_sampler)\n k = i % len(self.gt_sampler)\n img, annots, ref_img, ref_annots = self.data_sampler.__getitem__(j)\n _, unalign_gt, _, _ = self.gt_sampler.__getitem__(k)\n return (img, annots, ref_img, ref_annots, unalign_gt)\n\n def __len__(self):\n return len(self.data_sampler)\n\n\n","sub_path":"datasets/unaligned_loader.py","file_name":"unaligned_loader.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"247778291","text":"# imports\nimport sys\nimport time\nimport atexit\n\n######################################################################\n### Description #######\n### ######\n### Created by: Jimmy Vang #####\n### Date Created: 3/03/2017 ####\n### ###\n######################################################################\n\n######################################################################\n### Platform/OS compatibility Checks ###\n#######################################\n\n# Create platform variables/constants\n_OS_ = sys.platform\nRASPBERRY_PI_3 = \"linux\"\n\n# Check for OS compatibility (ONLY WORKS ON RASPBERRY PI).\nif _OS_ == RASPBERRY_PI_3:\n \n # Import Raspberry PI packages.\n print(\"This OS platform \\\"\" + _OS_ + \"\\\" is OK.\")\n import RPi.GPIO as GPIO\n import pigpio\n \nelse:\n\n # Exit this program if the OS is not compatible.\n print(\"This OS platform \\\"\" + _OS_ + \"\\\" is not supported.\")\n sys.exit()\n\n##################################################################\nclass DriverController(object):\n\n # Constants\n UP = 1\n DOWN = -1\n LEFT = -1\n RIGHT = 1\n\n ######################################################################\n ### Initialization ###\n #####################\n def __init__(self):\n #print(\"init test\")\n ### User Configuration ###\n \n ### End of User Configuration ###\n\n ### GPIO Setup ###\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.rightMotorAIN1, GPIO.OUT)\n GPIO.setup(self.rightMotorAIN2, GPIO.OUT)\n GPIO.setup(self.leftMotorBIN1, GPIO.OUT)\n GPIO.setup(self.leftMotorBIN2, GPIO.OUT)\n\n GPIO.setup(self.rightMotorPwmA_Pin, GPIO.OUT)\n GPIO.setup(self.leftMotorPwmB_Pin, GPIO.OUT)\n \n ### Connect to local Pi. ###\n self.pi = pigpio.pi()\n if not self.pi.connected:\n print(\"pigpio.pi was unable to connect to the pi.\")\n sys.exit()\n\n ### Software PWM Setup ###\n self.leftMotor = GPIO.PWM(self.rightMotorPwmA_Pin, self.rightMotorPwmA_Freq)\n self.rightMotor = GPIO.PWM(self.leftMotorPwmB_Pin, self.leftMotorPwmB_Freq)\n\n ######################################################################\n ### Drive Functions ###\n ######################\n\n # Drive the robot forwards\n def driveForward(self):\n print(\"Going forwards\")\n GPIO.output(self.rightMotorAIN1, GPIO.LOW)\n GPIO.output(self.rightMotorAIN2, GPIO.HIGH)\n self.leftMotor.start(self.rightMotorPwmA_DutyCycle)\n\n GPIO.output(self.leftMotorBIN1, GPIO.LOW)\n GPIO.output(self.leftMotorBIN2, GPIO.HIGH)\n self.rightMotor.start(self.leftMotorPwmB_DutyCycle)\n return\n\n # Drive the robot in reverse\n def driveReverse(self):\n print(\"Going backwards\")\n GPIO.output(self.rightMotorAIN1, GPIO.HIGH)\n GPIO.output(self.rightMotorAIN2, GPIO.LOW)\n self.leftMotor.start(self.rightMotorPwmA_DutyCycle)\n\n GPIO.output(self.leftMotorBIN1, GPIO.HIGH)\n GPIO.output(self.leftMotorBIN2, GPIO.LOW)\n self.rightMotor.start(self.leftMotorPwmB_DutyCycle)\n return\n\n # Rotate the robot counter-clockwise\n def rotateLeft(self):\n print(\"Turning Left\")\n GPIO.output(self.rightMotorAIN1, GPIO.HIGH)\n GPIO.output(self.rightMotorAIN2, GPIO.LOW)\n self.leftMotor.start(self.rightMotorPwmA_DutyCycle)\n\n GPIO.output(self.leftMotorBIN1, GPIO.LOW)\n GPIO.output(self.leftMotorBIN2, GPIO.HIGH)\n self.rightMotor.start(self.leftMotorPwmB_DutyCycle)\n return\n\n # Rotate the robot clockwise\n def rotateRight(self):\n print(\"Turning Right\")\n GPIO.output(self.rightMotorAIN1, GPIO.LOW)\n GPIO.output(self.rightMotorAIN2, GPIO.HIGH)\n self.leftMotor.start(self.rightMotorPwmA_DutyCycle)\n\n GPIO.output(self.leftMotorBIN1, GPIO.HIGH)\n GPIO.output(self.leftMotorBIN2, GPIO.LOW)\n self.rightMotor.start(self.leftMotorPwmB_DutyCycle)\n return\n \n ######################################################################\n ### Stop Functions ###\n #####################\n \n # Stop All\n def stop(self):\n print(\"Stopping All Parts\")\n self.stopMotors()\n return\n\n # Stop Motors\n def stopMotors(self):\n print(\"Stopping Motors\")\n self.leftMotor.stop()\n self.rightMotor.stop()\n return\n\n # Cleanup Handler\n def cleanUp(self):\n print(\"CLEANING GPIO\")\n GPIO.cleanup()\n self.pi.stop()\n print(\"DONE CLEANING\")\n\n # atexit cleaner\n if _OS_ == RASPBERRY_PI_3:\n atexit.register(cleanUp)\n\n### End of class ###\n#print(\"Done\")\n\n##################################################################\n","sub_path":"Robot Car/Old Scripts/DriverController.py","file_name":"DriverController.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"600594120","text":"import os\nimport time\nimport tensorflow as tf\nimport argparse\n\nfrom Model.model import Model\nfrom Runner.Online.runner import Runner\n\n\n\ntry:\n from SwergioUtility.CommunicationEnviroment import CommunicationEnviroment\nexcept ImportError:\n import pip\n pip.main(['install','-e','/shared/MessageUtilities'])\n time.sleep(10)\n from SwergioUtility.CommunicationEnviroment import CommunicationEnviroment\n\n\ndef run():\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-bs','--batchsize', help='size of batch and steps per learning', type=int, default=5)\n parser.add_argument('-ms','--memorysize', help='size of memory batches', type=int, default=10)\n parser.add_argument('-ls','--latentsize', help='size of latent vektor', type=int, default=30)\n\n parser.add_argument('-lr','--learningrate', help='learning rate', type=float, default=7e-4)\n parser.add_argument('--lrschedule', help='Learning rate schedule', choices=['constant', 'linear'], default='constant')\n\n parser.add_argument('--alpha', help='alpha', type=float, default=0.99)\n parser.add_argument('--gamma', help='gamma', type=float, default=0.99)\n parser.add_argument('--gaelambda', help='lambda for genralized advatage estimate', type=float, default=0.96)\n parser.add_argument('--epsilon', help='epsilon', type=float, default=1e-5)\n parser.add_argument('--maxgradnorm', help='max_grad_norm', type=float, default=0.5)\n\n parser.add_argument('--actprobability', help='Probability of the actor to act', type=float, default=1)\n parser.add_argument('--explorprobability', help='Probability of the actor to explor', type=float, default=0.05)\n\n\n\n parser.add_argument('--entcoef', help='entropy coefficient', type=float, default=0.01)\n \n parser.add_argument('--latentlossweight', help='weight latent loss', type=float, default=1)\n parser.add_argument('--generationlossweight', help='weight generation loss', type=float, default=1)\n parser.add_argument('--GANGlossweight', help='weight GAN G loss', type=float, default=1)\n parser.add_argument('--GANDlossweight', help='weight GAM D loss', type=float, default=1)\n parser.add_argument('--policylossweight', help='weight policy loss', type=float, default=1)\n parser.add_argument('--criticlossweight', help='weight critic loss', type=float, default=0.5)\n\n parser.add_argument('-li','--logint', help='log intervall', type=int, default=100)\n parser.add_argument('-si','--saveint', help='save intervall', type=int, default=500)\n\n parser.add_argument('--logdir', help='path to log directory')\n parser.add_argument('--savedir', help='path to save directory')\n\n parser.add_argument('--expertdata', help='path to expertdata file')\n\n args = parser.parse_args()\n\n batch_size = args.batchsize \n memory_size = args.memorysize\n latent_size = args.latentsize\n\n lr = args.learningrate\n lrschedule = args.lrschedule\n\n alpha=args.alpha\n gamma = args.gamma\n gae_lambda = args.gaelambda\n epsilon = args.epsilon\n max_grad_norm = args.maxgradnorm\n ent_coef = args.entcoef\n\n latent_loss_weight = args.latentlossweight\n generation_loss_weight = args.generationlossweight\n GAN_G_loss_weight = args.GANGlossweight\n GAN_D_loss_weight = args.GANDlossweight\n policy_loss_weight = args.policylossweight\n critic_loss_weight = args.criticlossweight\n\n log_interval= args.logint\n save_interval = args.saveint\n\n if args.logdir is None:\n logpath = os.getenv('LOG_PATH')\n else:\n logpath = args.logdir\n\n if args.savedir is None:\n modelsavepath = os.getenv('MODELSAVE_PATH')\n else:\n modelsavepath = args.savedir\n\n if args.expertdata is None:\n expertdata_path = os.getenv('EXPERTDATA_PATH')\n else:\n expertdata_path = args.expertdata\n\n env = CommunicationEnviroment(expertdata_file_path = expertdata_path)\n\n tf.reset_default_graph()\n print('load model')\n model = Model(env=env, \n batch_size=batch_size, \n memory_size = memory_size, \n latent_size = latent_size,\n ent_coef=ent_coef, \n max_grad_norm=max_grad_norm, \n alpha=alpha,\n epsilon=epsilon,\n latent_loss_weight = latent_loss_weight,\n generation_loss_weight =generation_loss_weight,\n GAN_G_loss_weight = GAN_G_loss_weight,\n GAN_D_loss_weight = GAN_D_loss_weight,\n policy_loss_weight = policy_loss_weight,\n critic_loss_weight = critic_loss_weight )\n\n print('load runner')\n runner = Runner(env, model,\n modelsavepath = modelsavepath,\n logpath = logpath,\n lr = lr,\n gamma = gamma,\n gae_lambda = gae_lambda,\n log_interval = log_interval, \n save_interval = save_interval )\n \n time.sleep(1)\n print('start')\n runner.listen()\n\n\nif __name__ == '__main__':\n run()\n\n\n","sub_path":"Runner/Online/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"234499110","text":"# gets top 1000 movies\nimport time as t\nimport tmdbsimple as tmdb\ntmdb.API_KEY = '3b688e8bee27473c3ed2c1caeeab204e'\n\ndef main():\n movies = {}\n for j in range(1,40):\n temp = tmdb.Movies(tmdb).popular(**{'page': j})['results']\n for i in temp:\n movies[i['title']] = []\n t.sleep(12)\n for j in range(40,51):\n temp = tmdb.Movies(tmdb).popular(**{'page': j})['results']\n for i in temp:\n movies[i['title']] = []\n return movies\n","sub_path":"TMDB_connection/getMovies.py","file_name":"getMovies.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"73837699","text":"import logging\n\nfrom timeit import default_timer as timer\nfrom typing import List, Dict, Any, Optional, Union\n\nfrom health_check import get_cause_string\nfrom health_check.constants import OKAY, UNAVAILABLE, WARNING\nfrom health_check.exceptions import ServiceWarning, ServiceUnavailable\n\nlogger = logging.getLogger('health-check')\n\n\nclass HealthCheckError:\n def __init__(self, severity, message):\n # type: (str, str) -> None\n assert severity in [UNAVAILABLE, WARNING]\n assert message\n self.severity = severity # type: str\n self.message = message # type: str\n\n def __str__(self):\n # type: () -> str\n return \"{0}: {1}\".format(self.severity, self.message)\n\n\nclass HealthCheckResult:\n def __init__(self):\n # type: (str) -> None\n self.errors = [] # type: List[HealthCheckError]\n self.time_taken = None # type: Optional[float]\n\n def add_service_error(self, message):\n # type: (str) -> None\n self.errors.append(HealthCheckError(UNAVAILABLE, message))\n\n def add_service_warning(self, message):\n # type: (str) -> None\n self.errors.append(HealthCheckError(WARNING, message))\n\n @property\n def status(self):\n \"\"\"\n If there are no errors, status is 'okay'.\n If there are only warnings, status is 'warning'.\n Otherwise, status is 'unavailable'\n \"\"\"\n # type: () -> str\n if not self.errors:\n return OKAY\n for error in self.errors:\n if error.severity != WARNING:\n return UNAVAILABLE\n return WARNING\n\n def as_dict(self):\n # type: () -> Dict[str, Any]\n\n latency = '{0} seconds'.format(round(self.time_taken, 4)) if self.time_taken else 'unknown'\n return {\n 'latency': latency,\n 'status': self.status,\n 'errors': [str(error) for error in self.errors]\n }\n\n\nclass BaseHealthCheckBackend(object):\n\n def __init__(self):\n self.result = HealthCheckResult() # type: HealthCheckResult\n self._required = None # type: Optional[bool]\n\n @property\n def required(self):\n # type: () -> bool\n assert self._required is not None, \"required not set\"\n return self._required\n\n @required.setter\n def required(self, value):\n # type: (bool) -> None\n assert value is not None\n self._required = value\n\n def check_status(self):\n raise NotImplementedError\n\n def run_check(self):\n start = timer() # type: float\n try:\n self.check_status()\n except ServiceWarning as e:\n self.add_service_warning(e.message, exc_info=False)\n except ServiceUnavailable as e:\n self.add_service_error(e.message, exc_info=False)\n except BaseException as e:\n logger.exception(\n dict(msg='Unexpected exception in {0}.check_status()\"'.format(self.__class__.__name__),\n type='health_check_service_exception',\n exc_class=e.__class__.__name__,\n component_name=self.component_name))\n raise\n finally:\n time_taken = timer() - start # type: float\n self.result.time_taken = time_taken\n\n def add_service_error(self, cause, log_it=True, exc_info=True):\n # type: (Union[str, BaseException], bool, bool) -> None\n if log_it:\n _cause = get_cause_string(cause) # type: str\n logger.info(\n dict(msg='Service Error in {0} ({1}): {2}'.format(self.__class__.__name__, self.component_name, _cause),\n type='health_check_service_error',\n cause=_cause,\n component_name=self.component_name),\n **dict(exc_info=exc_info))\n self.result.add_service_error(get_cause_string(cause))\n\n def add_service_warning(self, cause, log_it=True, exc_info=True):\n # type: (Union[str, BaseException], bool, bool) -> None\n if log_it:\n _cause = get_cause_string(cause) # type: str\n logger.info(\n dict(msg='Service Warning in {0} ({1}): {2}'.format(self.__class__.__name__, self.component_name, _cause),\n type='health_check_service_warning',\n cause=_cause,\n component_name=self.component_name),\n **dict(exc_info=exc_info))\n self.result.add_service_warning(get_cause_string(cause))\n\n @property\n def component_name(self):\n # type: () -> str\n return self.__class__.__name__\n\n def result_as_dict(self):\n # type: () -> Dict[str, Any]\n ret_val = self.result.as_dict()\n ret_val['name'] = self.component_name\n ret_val['required'] = self.required\n return ret_val\n","sub_path":"health_check/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"172527150","text":"# Copyright 2018 Samuel Payne sam_payne@byu.edu\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nfrom cptac.cancers.source import Source\n\nclass BcmUcec(Source):\n \"\"\"Define the BcmUcec class, inherited from the Source class. This class will manage the loading of the UCEC data from the BCM source.\"\"\"\n\n def __init__(self, no_internet=False):\n \"\"\"\n Initialize the BcmUcec object with the specified parameters.\n This object represents the UCEC data from the BCM source.\n\n Parameters:\n no_internet (bool, optional): If True, skip the index update step. Useful when internet connection is spotty or not available. Default is False.\n \"\"\"\n\n # Define the data files associated with this dataset\n self.data_files = {\n \"mapping\" : \"gencode.v34.basic.annotation-mapping.txt.gz\",\n \"circular_RNA\" : \"UCEC-circRNA_rsem_tumor_normal_UQ_log2(x+1)_BCM.txt.gz\",\n \"transcriptomics\" : \"UCEC-gene_rsem_removed_circRNA_tumor_normal_UQ_log2(x+1)_BCM.txt.gz\"\n }\n \n # Define the load functions for each data type\n self.load_functions = {\n 'circular_RNA' : self.load_circular_RNA,\n 'transcriptomics' : self.load_transcriptomics,\n }\n \n # Initialize the Source parent class\n super().__init__(cancer_type=\"ucec\", source='bcm', data_files=self.data_files, load_functions=self.load_functions, no_internet=no_internet)\n\n def load_circular_RNA(self):\n \"\"\"Load the circular RNA dasta from the defined file.\"\"\"\n\n df_type = 'circular_RNA'\n \n if df_type not in self._data:\n # If the data is not already loaded, load it\n file_path = self.locate_files(df_type)\n \n df = pd.read_csv(file_path, sep=\"\\t\")\n df = df.rename_axis('INDEX').reset_index()\n df[[\"circ\",\"chrom\",\"start\",\"end\",\"gene\"]] = df.INDEX.str.split('_', expand=True)\n df[\"circ_chromosome\"] = df[\"circ\"] +\"_\" + df[\"chrom\"]\n df = df.set_index('gene')\n \n # Add gene names to circular RNA data\n self.load_mapping()\n gene_key = self._helper_tables[\"gene_key\"]\n df = gene_key.join(df, how = \"inner\")\n df = df.reset_index()\n df = df.rename(columns= {\"gene_name\": \"Name\",\"gene\":\"Database_ID\"}) # change names to match cptac package\n df = df.set_index([\"Name\",\"circ_chromosome\", \"start\",\"end\",\"Database_ID\"]) #create multi-index\n df.drop(['INDEX', 'circ', 'chrom'], axis=1, inplace=True) \n df = df.sort_index()\n df = df.T\n df.index = df.index.str.replace(r\"_T\", \"\", regex=True) # remove Tumor label\n df.index = df.index.str.replace(r\"_A\", \".N\", regex=True)# Normal samples labeled with .N\n df.index.name = \"Patient_ID\"\n\n # save df in self._data\n self.save_df(df_type, df)\n\n def load_mapping(self):\n \"\"\"Load the mapping data from the defined file.\"\"\"\n\n df_type = 'mapping'\n\n if not self._helper_tables:\n # If the mapping data is not already loaded, load it\n file_path = self.locate_files(df_type)\n \n df = pd.read_csv(file_path, sep=\"\\t\")\n df = df[[\"gene\",\"gene_name\"]] #only need gene (database gene id) and gene_name (common gene name)\n df = df.set_index(\"gene\")\n df = df.drop_duplicates()\n self._helper_tables[\"gene_key\"] = df\n\n def load_transcriptomics(self):\n \"\"\"Load the transcriptomics data from the defined file.\"\"\"\n\n df_type = 'transcriptomics'\n\n if df_type not in self._data:\n # If the data is not already loaded, load it\n file_path = self.locate_files(df_type)\n \n df = pd.read_csv(file_path, sep=\"\\t\")\n df.index.name = 'gene'\n \n # Add gene names to transcriptomic data\n self.load_mapping()\n gene_key = self._helper_tables[\"gene_key\"]\n transcript = gene_key.join(df, how = \"inner\") #keep only gene_ids with gene names\n transcript = transcript.reset_index()\n transcript = transcript.rename(columns={\"gene_name\":\"Name\",\"gene\":\"Database_ID\"})\n transcript = transcript.set_index([\"Name\", \"Database_ID\"])\n transcript = transcript.sort_index() #alphabetize\n transcript = transcript.T\n transcript.index = transcript.index.str.replace(r\"_T\", \"\", regex=True)\n transcript.index = transcript.index.str.replace(r\"_A\", \".N\", regex=True)# Normal samples labeled with .N\n transcript.index.name = \"Patient_ID\"\n\n df = transcript\n # save df in self._data\n self.save_df(df_type, df)\n","sub_path":"cptac/cancers/bcm/bcmucec.py","file_name":"bcmucec.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"233244326","text":"import logging\nimport os\nimport sys\nimport requests\nimport urllib.parse\nimport validators\n\nclass ErrorParser:\n\n def __init__(self, data=None, ogdata=None, tdata=None):\n try:\n self.ogdata = ['title', 'type', 'description', 'image', 'url', 'locale']\n self.tdata = ['card', 'site', 'site:id', 'creator', 'creator:id', 'description', 'title', 'image',\n 'image:alt']\n\n if ogdata is not None:\n self.ogdata = ogdata\n\n if tdata is not None:\n self.tdata = tdata\n\n if data is not None:\n\n self.errors = {}\n\n self.data = data\n self.url = self.data['url']\n\n self.title_errors()\n self.image_errors()\n self.h1_errors()\n self.body_text()\n self.og_errors()\n self.meta_errors()\n self.error_errors()\n else:\n self.errors = None\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n def title_errors(self):\n try:\n if self.data['title']:\n\n if len(self.data['title']) <= 0:\n self.errors['title'] = {'url': self.url, 'severity': 1, 'title': self.data['title'], 'error': \"No Title\"}\n return True\n\n if len(self.data['title']) <= 20:\n self.errors['title'] = {'url': self.url, 'severity': 2,'title': self.data['title'], 'error': \"Title to short\"}\n return True\n\n if len(self.data['title']) >= 60:\n self.errors['title'] = {'url': self.url, 'severity': 2, 'title': self.data['title'], 'error': \"Title to Long\"}\n return True\n\n #40 is about right\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n\n def image_errors(self):\n try:\n imgerrors = []\n if self.data['images']:\n images = self.data['images']\n for img in images:\n\n src = images[img].get(\"src\")\n\n if src is None or src == '':\n imgerrors.append({'url': self.url, 'severity': 1, 'img': src,\n 'error': \"No imgage src\"})\n\n base_url = self.data['base_url']\n if src is not None and src != '':\n if not validators.url(src):\n imgscr = urllib.parse.urljoin(base_url, src)\n print(src)\n\n # check for https:// or http or //\n # NO ?\n # ADD URL\n #\n # check again for // in URL (not https://)\n\n\n\n\n r = requests.head(imgscr)\n\n if 200 != r.status_code:\n errorstr = \"status code : {}\".format(r.status_code)\n imgerrors.append({'url': self.url, 'severity': 1, 'img': src,\n 'error': errorstr})\n\n alt = images[img].get(\"alt\")\n if alt is None or alt == '':\n imgerrors.append({'url': self.url, 'severity': 3, 'img': src,\n 'error': \"No Alt-text\"})\n\n height = images[img].get(\"height\")\n if height is None or int(height) <= 0:\n imgerrors.append({'url': self.url, 'severity': 3, 'img': src,\n 'error': \"No height\"})\n\n width = images[img].get(\"width\")\n if width is None or int(width) <= 0:\n imgerrors.append({'url': self.url, 'severity': 3, 'img': self.data['src'],\n 'error': \"No width\"})\n\n self.errors['images'] = imgerrors\n\n except KeyError as e:\n self.errors['images'] = [{'url': self.url, 'severity': 1, 'img': \"0\", 'error': \"Error reading images\"}]\n return True\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n\n def h1_errors(self):\n try:\n i = 0\n if len(self.data['h1']) <= 0:\n raise KeyError('none')\n\n for h in self.data['h1']:\n if len(h) <= 5:\n self.errors['h1'][i] = {'url': self.url, 'severity': 4, 'h1': self.data['h1'], 'error': \"Short H1-text\"}\n if len(h) > 50:\n self.errors['h1'][i] = {'url': self.url, 'severity': 4, 'h1': self.data['h1'],\n 'error': \"Long H1-text\"}\n i += 1\n\n if i > 1:\n self.errors['h1'][i] = {'url': self.url, 'severity': 1, 'h1': \">1\", 'error': \"Multiple H1-text\"}\n\n except KeyError as e:\n self.errors['h1'][i] = {'url': self.url, 'severity': 1, 'h1': \"0\", 'error': \"No H1-text\"}\n return True\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n\n def body_text(self):\n try:\n if self.data['site_text']:\n\n if self.data['word_count'] <= 0:\n raise KeyError('none')\n\n if self.data['word_count'] <= 300:\n self.errors['site_text'] = {'url': self.url, 'severity': 1, 'content': \"<300\", 'error': \"To little content\"}\n return True\n\n if self.data['word_count'] > 2000:\n self.errors['site_text'] = {'url': self.url, 'severity': 4, 'content': \">2000\", 'error': \"Much content\"}\n return True\n\n except KeyError as e:\n self.errors['site_text'] = {'url': self.url, 'severity': 1, 'content': \"0\", 'error': \"No content\"}\n return True\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n\n def og_errors(self):\n try:\n i = 0\n for f in self.ogdata:\n try:\n field = \"og:{}\".format(f)\n if len(self.data[field]) <= 0:\n raise KeyError('None')\n except KeyError as e:\n error = \"No og {}\".format(f)\n self.errors['og'][i] = {'url': self.url, 'severity':5,'title': self.data['title'], 'error': error}\n i += 1\n\n for f in self.tdata:\n try:\n field = \"twitter:{}\".format(f)\n if len(self.data[field]) <= 0:\n raise KeyError('None')\n except KeyError as e:\n error = \"No twitter {}\".format(f)\n self.errors['twitter'][i] = {'url': self.url, 'severity': 5, 'title': self.data['title'], 'error': error}\n i += 1\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n\n def meta_errors(self):\n try:\n try:\n if len(self.data['metadescription']) <= 0:\n raise KeyError('None')\n except KeyError as e:\n self.errors['metadescription'] = {'url': self.url, 'severity': 2, 'content': \"0\", 'error': \"No Meta Description\"}\n\n try:\n if len(self.data['metakeywords']) <= 0:\n raise KeyError('None')\n except KeyError as e:\n self.errors['metakeywords'] = {'url': self.url, 'severity': 2, 'content': \"0\", 'error': \"No Meta keywords\"}\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n\n def error_errors(self):\n try:\n i = 0\n for e in self.data['pageerrors']:\n self.errors['global'][i] = e\n i += 1\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.warning(str(e) + \" | \" + str(exc_type) + \" | \" + str(fname) + \" | \" + str(exc_tb.tb_lineno))\n","sub_path":"py_seonizer/errrorparser.py","file_name":"errrorparser.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"523001713","text":"import random\nimport game_framework\nfrom pico2d import *\n\n# Game object class here\n\nclass monWater:\n\n PIXEL_PER_METER = (10.0 / 0.3) # 10 pixel 30 cm\n RUN_SPEED_KMPH = 3.0 # Km / Hour\n RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)\n RUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\n RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\n\n TIME_PER_ACTION = 0.1\n ACTION_PER_TIME = 1.0 / TIME_PER_ACTION\n FRAMES_PER_ACTION = 7\n RIGHT_RUN, ATTACK = 0, 2\n\n def __init__(self):\n self.x, self.y = random.randint(-200,0), random.randint(100,500)\n self.attack_x, self.attack_y = self.x, self.y\n self.frame = random.randint(0, 8)\n self.attack_frame = random.randint(0, 4)\n self.bullet_frame = 0\n self.total_frames = 0.0\n self.hp = 3\n self.state = self.RIGHT_RUN\n\n monWater.image1 = load_image('mon_Water_1.png')\n monWater.image2 = load_image('mon_Water_2.png')\n monWater.image3 = load_image('mon_Water_3.png')\n\n def return_x(self):\n return self.x\n\n def return_y(self):\n return self.y\n\n def return_state(self):\n return self.state\n\n def change_state(self, state):\n self.state = state\n\n def update(self, frame_time):\n if self.state == self.RIGHT_RUN:\n distance = monWater.RUN_SPEED_PPS * frame_time\n self.total_frames += \\\n monWater.FRAMES_PER_ACTION * monWater.ACTION_PER_TIME * frame_time\n self.frame = int(self.total_frames) % 5\n elif self.state == self.ATTACK:\n distance = monWater.RUN_SPEED_PPS * frame_time\n self.total_frames += \\\n monWater.FRAMES_PER_ACTION * monWater.ACTION_PER_TIME * frame_time\n self.frame = int(self.total_frames) % 8\n self.attack_frame = int(self.total_frames) % 4\n\n if self.x < 600:\n self.x += distance\n self.attack_x = self.x\n elif self.x >= 600:\n self.x = 600\n self.state=self.ATTACK\n self.attack_x += distance\n\n def draw(self):\n if self.state == self.RIGHT_RUN:\n self.image1.clip_draw(self.frame*80, 0, 80, 100, self.x, self.y)\n elif self.state == self.ATTACK:\n self.image2.clip_draw(self.attack_frame*80, 0, 80, 100, self.x, self.y)\n for i in range(0, 10):\n if(self.attack_x - i*80 > 600):\n self.image3.clip_draw(self.bullet_frame*30, 0, 30, 30, self.attack_x - i*80, self.attack_y)\n\n\n def draw_bb(self):\n draw_rectangle(*self.get_bb())\n\n def get_bb(self):\n return self.x - 30, self.y - 40, self.x + 20, self.y + 0\n\n def draw_attack_bb(self):\n draw_rectangle(*self.get_attack_bb())\n\n def get_attack_bb(self):\n return self.attack_x - 15, self.attack_y - 15, self.attack_x + 15, self.attack_y + 15\n\n","sub_path":"monwater.py","file_name":"monwater.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"581436391","text":"'''\n다음과 같이 정수 N을 입력받아서 1부터 N까지의 정수를 키로 하고,\n\n그 정수의 제곱을 값으로 하는 딕셔너리 객체를 만드는 코드를 작성하십시오.\n'''\n\ninput_number = int(input())\n\npow_dict = {num:pow(num,2) for num in range(1,input_number+1)}\n\nprint(pow_dict)\n","sub_path":"PYTHON/파이썬_프로그래밍_기초_문제풀이/13/13-05.py","file_name":"13-05.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"593940847","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom shortner.forms import urlForm\nfrom shortner.models import urlShortner, generate_public_id\n\n\n@csrf_exempt\ndef list(request):\n if request.method==\"GET\":\n form=urlForm()\n return render(\n request, \"shortner/input.html\", {\"form\": form}\n )\n if request.method==\"POST\":\n data=request.POST['url']\n public_id=generate_public_id()\n url=urlShortner()\n url.public_key=public_id\n url.url=data\n url.save()\n form = urlForm()\n host = request.get_host()\n print(host)\n new_url=host+\"/\"+public_id\n print(data)\n return render(\n request, \"shortner/input.html\", {\"form\": form,\"new_url\":new_url}\n )\n\n\n@csrf_exempt\ndef search(request, public_id):\n url=urlShortner.objects.filter(public_key=public_id).first()\n\n return redirect(url.url)\n","sub_path":"shortner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"230947874","text":"\"\"\"Module for processing force-constant.\"\"\"\nimport numpy as np\n\n\ndef qpoint(force_constant=[], qpt=[0.1, 0.1, 0.1]):\n \"\"\"Get FC as natons x natons x 3 x3.\"\"\"\n qpt = np.array(qpt)\n exp_iqpt = np.exp(1.0j * qpt)\n dmat = force_constant * exp_iqpt\n vals, vects = np.linalg.eigh(dmat)\n return vals, vects\n","sub_path":"jarvis/analysis/phonon/force_constants.py","file_name":"force_constants.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"65522227","text":"import requests\n\nfrom src.sync.runelite.utils.endpoints import Endpoint\nfrom src.sync.runelite.utils.timestep import timestep\nfrom src.sync.runelite.utils.user_agent import userAgent\nfrom src.utils.ratelimiter import RateLimiter\n\n\nclass runelitePrices:\n def __init__(\n self,\n user_agent: userAgent,\n n_calls: int = 60,\n interval: int = 60,\n endpoint: Endpoint = Endpoint.OSRS,\n ) -> None:\n self.rate_limiter = RateLimiter(n_calls, interval)\n self.user_agent = user_agent\n self.endpoint = endpoint\n pass\n\n def get_latest(\n self,\n id: int = None,\n ) -> dict:\n \"\"\"\n Get the latest price for all available osrs\n \"\"\"\n self.rate_limiter.call()\n\n url = f\"{self.endpoint.value}/latest\"\n params = dict(id=id) if id else {}\n\n response = requests.get(\n url=url,\n params=params,\n headers=self.user_agent.to_dict(),\n )\n response.raise_for_status()\n return response.json()\n\n def get_time_series(self, id: int, timestep: timestep = timestep.ONE_HOUR) -> dict:\n \"\"\"Gives a list of the high and low prices of item with the given id at the given interval, up to 365 maximum.\"\"\"\n self.rate_limiter.call()\n\n url = f\"{self.endpoint.value}/timeseries\"\n params = {\n \"id\": id,\n \"timestep\": timestep.value,\n }\n\n response = requests.get(\n url=url,\n params=params,\n headers=self.user_agent.to_dict(),\n )\n response.raise_for_status()\n return response.json()\n\n def get_avg_price(self, timestep: timestep, unix_time: int = None) -> dict:\n \"\"\"Gives timestep average of item high and low prices as well as the number traded for the items that we have data on. Comes with a Unix timestamp indicating the timestep block the data is from\"\"\"\n self.rate_limiter.call()\n\n url = f\"{self.endpoint.value}/{timestep.value}\"\n params = {\"timestamp\": unix_time}\n params = {k: v for k, v in params.items() if v is not None}\n\n response = requests.get(\n url=url,\n params=params,\n headers=self.user_agent.to_dict(),\n )\n response.raise_for_status()\n return response.json()\n","sub_path":"src/sync/runelite/runelite_prices.py","file_name":"runelite_prices.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"533492480","text":"import pandas as pd\nimport json\nfrom flask import jsonify, request\nfrom app.api import bp\nfrom app import db\nfrom app.api import responses\n\n\n@bp.route(\"/cell-kpis/\", methods=[\"GET\"])\ndef list_cell_kpis():\n end_timestamp = request.args.get(\"end_timestamp\")\n try:\n if end_timestamp:\n res = pd.read_sql_query(\n f\"select * from cell_kpi where cell_kpi.interval_end_timestamp = {end_timestamp};\",\n con=db.engine,\n )\n else:\n res = pd.read_sql_table(table_name=\"cell_kpi\", con=db.engine)\n json_res = json.loads(res.to_json(orient=\"records\"))\n except (ValueError, Exception) as e:\n return responses.internal_server_error(message=str(e))\n return jsonify(json_res)\n","sub_path":"app/api/cell_kpi.py","file_name":"cell_kpi.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"233556292","text":"# coding=gbk\r\nimport cv2\r\nimport numpy as np\r\n\r\nface_cascade = cv2.CascadeClassifier(r'X:\\Study\\deeplearning\\dataset\\haarcascades\\haarcascade_frontalface_default.xml')\r\n\r\nvideocapture = cv2.VideoCapture(0)\r\n\r\nsuccess,frame = videocapture.read()\r\ncount = 0\r\nwhile success and cv2.waitKey(1000) == -1:\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n\r\n for (x, y, w, h) in faces:\r\n frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n while count <=20:\r\n img = cv2.resize(gray[y:y+w,x:x+h],(200,200))\r\n picename = 'X:\\Study\\deeplearning\\deeplearnertest\\data\\DKG_{0}.jpg'.format(count)\r\n count += 1\r\n cv2.imwrite(picename,img)\r\n\r\n cv2.imshow('face_matching', frame)\r\n success,frame = videocapture.read()\r\n\r\ncv2.destroyWindow('face_matching')\r\n\r\nvideocapture.release()\r\n\r\ncv2.waitKey()","sub_path":"Python/CompterVision/opencv-practice/cv2-face_matching.py","file_name":"cv2-face_matching.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"377465034","text":"'''\nCreated on Aug 28, 2013\n\n@author: loski\n'''\nimport os\nimport subprocess\n\nclass DiscSpaceManager(object):\n '''\n Class with operations to manage the disk space.\n '''\n\n def calculate_available_size(self, path):\n \"\"\"\n Calculates the available space of the device in which the path is mounted.\n \n @param path: path to check\n @return: free disk space in KB\n \"\"\"\n df = subprocess.Popen(['df', '-k', path], stdout=subprocess.PIPE)\n output = df.communicate()[0]\n i = 0\n for x in output.split('\\n')[0].split():\n if x.lower() in ['available', 'free']:\n return float(output.split('\\n')[1].split()[i])\n else:\n i += 1\n \n def calculate_folder_size(self, path):\n \"\"\"\n Calculates the size of all the files (*.mp3 or *.wma in the path)\n @param param: path to check\n @return: size in KB\n \"\"\"\n total_size = 0.0\n for x in os.listdir(path):\n if not x.startswith('.'):\n subitem = os.path.join(path, x)\n if os.path.isdir(subitem):\n total_size += self.calculate_folder_size(subitem)\n else:\n if subitem[-4:].lower() in ['.mp3', '.wma']:\n total_size += os.path.getsize(subitem)\n return total_size/1024","sub_path":"controller/discSpaceManager.py","file_name":"discSpaceManager.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"468584305","text":"#import cv2\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nimport matplotlib.path as path\r\nfrom pylab import *\r\nimport cv2\r\nimport Test1\r\n\r\ndef displayHistogram(n,bins):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n \r\n # get the corners of the rectangles for the histogram\r\n left = np.array(bins[:-1])\r\n right = np.array(bins[1:])\r\n bottom = np.zeros(len(left))\r\n top = bottom + n\r\n \r\n \r\n # we need a (numrects x numsides x 2) numpy array for the path helper\r\n # function to build a compound path\r\n XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T\r\n \r\n # get the Path object\r\n barpath = path.Path.make_compound_path_from_polys(XY)\r\n \r\n # make a patch out of it\r\n patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8)\r\n ax.add_patch(patch)\r\n \r\n # update the view limits\r\n ax.set_xlim(left[0], right[-1])\r\n ax.set_ylim(bottom.min(), top.max())\r\n \r\n plt.show()\r\n\r\n\r\n\r\n\r\n#-------------------------------Main body\r\n#loading an image\r\nI=cv2.imread(\"colorProblem.jpg\") \r\n#Creating the image histogram\r\nn,bins = histogram(array(I),256,normed=True)\r\n#displaing the histogram\r\ndisplayHistogram(n,bins)\r\n\r\n\r\n\r\n#------Example of histogram equalization\r\n\r\n#loading an image\r\nI=cv2.imread(\"colorProblem.jpg\") \r\nI=cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)\r\nI_eq = cv2.equalizeHist(I)\r\nTest1.show2_OpenCV(I,I_eq)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"exercises3/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"88473965","text":"import os\nimport aylien_news_api\n\nconfiguration = aylien_news_api.Configuration()\nconfiguration.api_key['X-AYLIEN-NewsAPI-Application-ID'] = os.environ.get(\n 'NEWSAPI_APP_ID')\nconfiguration.api_key['X-AYLIEN-NewsAPI-Application-Key'] = os.environ.get(\n 'NEWSAPI_APP_KEY')\n\nclient = aylien_news_api.ApiClient(configuration)\napi_instance = aylien_news_api.DefaultApi(client)\n\n\ndef get_stories():\n \"\"\"\n Returns a list of story objects\n \"\"\"\n response = api_instance.list_stories(\n title='Donald Trump',\n published_at_start='NOW-6HOURS',\n per_page=100\n )\n\n return response.stories\n\n\nstories = get_stories()\nclustered_stories = {}\nclusters = []\n\nfor story in stories:\n if len(story.clusters) > 0:\n cluster = story.clusters[0]\n if cluster not in clusters:\n clustered_stories[cluster] = [story.title]\n else:\n clustered_stories[cluster].append(story.title)\n\nfor cluster in clustered_stories:\n print(cluster, len(\n clustered_stories[cluster]), clustered_stories[cluster][0])\n","sub_path":"samples/python/stories_clusters.py","file_name":"stories_clusters.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"300990244","text":"# coding: utf-8\r\nfrom tr import *\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport cv2\r\n\r\nif __name__ == \"__main__\":\r\n # img_path = \"imgs/name_card.jpg\"\r\n img_path = \"imgs/id_card.jpeg\"\r\n\r\n img_pil = Image.open(img_path)\r\n MAX_SIZE = 2000\r\n if img_pil.height > MAX_SIZE or img_pil.width > MAX_SIZE:\r\n scale = max(img_pil.height / MAX_SIZE, img_pil.width / MAX_SIZE)\r\n\r\n new_width = int(img_pil.width / scale + 0.5)\r\n new_height = int(img_pil.height / scale + 0.5)\r\n img_pil = img_pil.resize((new_width, new_height), Image.BICUBIC)\r\n\r\n print(img_pil.width, img_pil.height)\r\n\r\n color_pil = img_pil.convert(\"RGB\")\r\n gray_pil = img_pil.convert(\"L\")\r\n\r\n rect_arr = detect(gray_pil, FLAG_ROTATED_RECT)\r\n\r\n img_draw = ImageDraw.Draw(color_pil)\r\n colors = ['red', 'green', 'blue', \"purple\"]\r\n\r\n for i, rect in enumerate(rect_arr):\r\n x, y, w, h, a = rect\r\n box = cv2.boxPoints(((x, y), (w, h), a))\r\n box = numpy.int0(box)\r\n img_draw.line(xy=(box[0][0], box[0][1], box[1][0], box[1][1]), fill=colors[i % len(colors)], width=2)\r\n img_draw.line(xy=(box[1][0], box[1][1], box[2][0], box[2][1]), fill=colors[i % len(colors)], width=2)\r\n img_draw.line(xy=(box[2][0], box[2][1], box[3][0], box[3][1]), fill=colors[i % len(colors)], width=2)\r\n img_draw.line(xy=(box[3][0], box[3][1], box[0][0], box[0][1]), fill=colors[i % len(colors)], width=2)\r\n\r\n color_pil.save(\"~color_pil.png\")\r\n color_pil.show()\r\n\r\n\r\n","sub_path":"test_angle.py","file_name":"test_angle.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"115486072","text":"import json\nimport pymysql\nfrom flask import jsonify\nimport os\n\nfrom flask import Flask, request, Response\n\nfrom util.error import ErrorHandler\n\nimport dropbox\n\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\nINVALID_TOKEN = 401\nSERVER_ERROR = 500\nBAD_REQUEST = 400\nNOT_FOUND = 404\nVERIFY_EMAIL = 600\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nclient = dropbox.Dropbox(\"ut9MzqycHAAAAAAAAAAAM-HAvZ8JWgqcOSvr5e3VdjJnlPoTByUGs11BUsUzFl1T\")\n\n\ndef conn():\n return pymysql.connect(host='us-cdbr-iron-east-03.cleardb.net',\n user='bbc01008ee8dff',\n password='7c06ebdf',\n db='heroku_aa2646e5ac763f9',\n use_unicode=True,\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n\n\n@app.route('/getChartMusic')\ndef getChartMusic():\n connection = conn()\n cursor = connection.cursor()\n try:\n cursor.execute(\n \"SELECT * FROM week\")\n weeks = cursor.fetchall()\n return Response(json.dumps(weeks), mimetype='application/json')\n except Exception as e:\n return error_return(ErrorHandler(str(e), status_code=500))\n finally:\n cursor.close()\n connection.close()\n\n\n@app.route('/getSongWeek')\ndef getSongWeek():\n weekId = request.args.get('weekId')\n connection = conn()\n cursor = connection.cursor()\n try:\n datas = []\n cursor.execute(\n \"SELECT * FROM week_song WHERE id_week = '%s'\" % weekId)\n week_songs = cursor.fetchall()\n for item in week_songs:\n data = {}\n cursor.execute(\n \"SELECT * FROM song WHERE id = '%s'\" % item['id_song'])\n song = cursor.fetchone()\n if \"https://firebasestorage.googleapis.com\" not in song['link_local']:\n song['link_local'] = client.files_get_temporary_link(song['link_local']).link\n cursor.execute(\n \"SELECT id_singer FROM singer_song WHERE id_song = '%s'\" % song['id'])\n idSinger = cursor.fetchone()\n cursor.execute(\"SELECT * FROM singer where id = '%s'\" % idSinger['id_singer'])\n singer = cursor.fetchone()\n song['singer'] = singer\n data['song'] = song\n data['position'] = item['position']\n data['hierarchical'] = item['hierarchical']\n data['hierarchical_number'] = item['hierarchical_number']\n datas.append(data)\n return Response(json.dumps(datas), mimetype='application/json')\n except Exception as e:\n return error_return(ErrorHandler(str(e), status_code=500))\n finally:\n cursor.close()\n connection.close()\n\n\n@app.route('/getSinger')\ndef getSinger():\n connection = conn()\n cursor = connection.cursor()\n try:\n cursor.execute(\n \"SELECT * FROM singer\")\n singers = cursor.fetchall()\n return Response(json.dumps(singers), mimetype='application/json')\n except Exception as e:\n return error_return(ErrorHandler(str(e), status_code=500))\n finally:\n cursor.close()\n connection.close()\n\n\n@app.route('/getSongSinger')\ndef getSongSinger():\n singerId = request.args.get('singerId')\n connection = conn()\n cursor = connection.cursor()\n try:\n data = []\n cursor.execute(\n \"SELECT id_song FROM singer_song WHERE id_singer = '%s'\" % singerId)\n listIdSongs = cursor.fetchall()\n for song in listIdSongs:\n cursor.execute(\n \"SELECT * FROM song WHERE id = '%s'\" % song['id_song'])\n song = cursor.fetchone()\n if \"https://firebasestorage.googleapis.com\" not in song['link_local']:\n song['link_local'] = client.files_get_temporary_link(song['link_local']).link\n data.append(song)\n return Response(json.dumps(data), mimetype='application/json')\n except Exception as e:\n return error_return(ErrorHandler(str(e), status_code=500))\n finally:\n cursor.close()\n connection.close()\n\n\n@app.errorhandler(ErrorHandler)\ndef error_return(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"486086323","text":"from django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns=[\r\n \r\n\r\n path('',views.index,name=\"home\"),\r\n path('student.html',views.student,name=\"student\"),\r\n path('students_attendence.html',views.students_attendence,name=\"students_attendence\"),\r\n path('students_marks.html',views.students_marks,name=\"students_marks\"),\r\n path('teacher_marks.html',views.teacher_marks,name=\"teacher_marks\"),\r\n path('teacher_update_attendence.html',views.teacher_update_attendence,name=\"teacher_update_attendence\"),\r\n path('teacher.html',views.teacher,name=\"teacher\"),\r\n #path('accounts/',include('accounts.urls')\r\n]","sub_path":"erp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"63967767","text":"# https://www.oschina.net/translate/playing-around-with-await-async-in-python-3-5\n\nimport types\n\n\n@types.coroutine\ndef switch():\n yield\n\n\nasync def coroutine1():\n print(\"C1: Start\")\n await switch()\n print(\"C1: Stop\")\n\n\nasync def coroutine2():\n print(\"C2: Start\")\n print(\"C2: a\")\n await switch()\n print(\"C2: b\")\n print(\"C2: c\")\n print(\"C2: Stop\")\n\n\nasync def main():\n await c1\n\n\ndef run(coroutine):\n coroutine = list(coroutine)\n\n while coroutine:\n for cor in list(coroutine):\n try:\n cor.send(None)\n except StopIteration:\n coroutine.remove(cor)\n\n\nc1 = coroutine1()\nc2 = coroutine2()\n\nrun([c1, c2])\n","sub_path":"L1/1_AsyncAndAwait.py","file_name":"1_AsyncAndAwait.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"425756846","text":"from typing import Optional\r\n\r\nfrom numpy import ndarray\r\n\r\nfrom src.algorithm.domain.node import Node\r\nfrom src.util.constants import Direction, Status\r\nfrom src.util.extensions import compute_manhattan_distance, add_tuples\r\n\r\n\r\nclass HillClimbing:\r\n def __init__(self, maze: ndarray, start: tuple[int, int], end: tuple[int, int]):\r\n self.__maze = maze\r\n self.__start_node = Node(position=start)\r\n self.__end_node = Node(position=end)\r\n self.__visited = set()\r\n self.__define_iterations()\r\n self.__current_node = None\r\n\r\n def __define_iterations(self):\r\n self.height = len(self.__maze)\r\n self.width = len(self.__maze[0])\r\n\r\n self.__iterations = 0\r\n self.__maximum_iterations = self.height * self.width // 2\r\n\r\n def __set_current_node(self, node: Node):\r\n self.__current_node = node\r\n self.__visited.add(node.position)\r\n self.__current_node.total_cost = compute_manhattan_distance(self.__end_node.position, node.position)\r\n\r\n def __increment_and_check_exceeding_iterations(self) -> bool:\r\n self.__iterations += 1\r\n\r\n return self.__iterations > self.__maximum_iterations\r\n\r\n def __did_find_goal(self) -> bool:\r\n return self.__current_node and self.__current_node == self.__end_node\r\n\r\n def __is_x_in_range(self, x) -> bool:\r\n return 0 <= x < self.height\r\n\r\n def __is_y_in_range(self, y) -> bool:\r\n return 0 <= y < self.width\r\n\r\n def __is_position_in_range(self, position: tuple[int, int]) -> bool:\r\n return self.__is_x_in_range(position[0]) and self.__is_y_in_range(position[1])\r\n\r\n def __are_coordinates_valid(self, coordinates) -> bool:\r\n return self.__maze[coordinates[0]][coordinates[1]] == Status.EMPTY\r\n\r\n def __get_best_neighbour(self) -> Node:\r\n best_node = Node()\r\n best_node.total_cost = self.width * self.height\r\n\r\n for new_position in Direction.DIRECTIONS:\r\n node_position = add_tuples(self.__current_node.position, new_position)\r\n\r\n if not (self.__is_position_in_range(node_position) and self.__are_coordinates_valid(node_position)):\r\n continue\r\n\r\n if node_position in self.__visited:\r\n continue\r\n\r\n cost = compute_manhattan_distance(self.__end_node.position, node_position)\r\n if cost < best_node.total_cost:\r\n best_node = Node(position=node_position)\r\n best_node.total_cost = cost\r\n\r\n return best_node\r\n\r\n def execute(self) -> Optional[tuple[int, int]]:\r\n if self.__did_find_goal() or self.__increment_and_check_exceeding_iterations():\r\n return None\r\n\r\n if not self.__current_node:\r\n self.__set_current_node(self.__start_node)\r\n return self.__current_node.position\r\n\r\n best_node = self.__get_best_neighbour()\r\n if best_node.position:\r\n self.__set_current_node(best_node)\r\n return self.__current_node.position\r\n\r\n return None\r\n\r\n\r\nhill_climbing = None\r\n\r\n\r\ndef execute_hill_climbing(maze: ndarray, start: tuple[int, int], end: tuple[int, int]) -> Optional[tuple[int, int]]:\r\n global hill_climbing\r\n if not hill_climbing:\r\n hill_climbing = HillClimbing(maze, start, end)\r\n return hill_climbing.execute()\r\n","sub_path":"2nd year/AI/Assignments/Assignment2/src/algorithm/hill_climbing.py","file_name":"hill_climbing.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"582632348","text":"import requests\nfrom github import Github\n\ng = Github(\"username\", \"password\")\n\noutput = \"\"\nlibraries = []\nheader = \"\"\nwith open('library_list.txt', 'r') as f:\n line = f.readline()\n while line:\n target_library = line.strip()\n libraries.append(target_library)\n line = f.readline()\n f.close()\n\nfor target_library in libraries:\n repositories = g.search_repositories(query=target_library+' language:swift')\n org = repositories[0].full_name.split('/')[0]\n\n header += \"・{} by {} \".format(target_library,org)\n for tail in [\"\",\".md\"]: #LICENSE / LICENSE.mdどっちかのパターンがあるっぽい\n license_url = 'https://raw.githubusercontent.com/' + repositories[0].full_name +'/master/LICENSE' + tail\n print(repositories)\n print(license_url)\n\n req = requests.get(license_url)\n if req.status_code == 200:\n raw_license = req.text\n output += \"{} {}\".format(target_library, raw_license) + ' '*4\n # else:\n # print(\"**ERROR!**≠\\nstatus code: not 200⇛\",license_url)\n\noutput = header + \" \"*4 + output\nwith open('COMBINED_LICENSE', mode='w') as f:\n f.write(output)\n","sub_path":"python_utls/licenser.py","file_name":"licenser.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"9965969","text":"def shorten(text, max_char, max_newline):\n\tshortened = False\n\n\tif len(text) > max_char:\n\t\ttext = text[0:max_char]\n\t\tshortened = True\n\n\tif text.count('\\n') > max_newline:\n\t\ttext = text[0:find_nth(text, '\\n', max_newline)]\n\t\tshortened = True\n\n\tif shortened:\n\t\ttext = text[0:len(text) - 4] + ' ...'\n\n\treturn text\n\ndef find_nth(haystack, needle, n):\n\tstart = haystack.find(needle)\n\twhile start >= 0 and n > 1:\n\t\tstart = haystack.find(needle, start + len(needle))\n\t\tn -= 1\n\treturn start","sub_path":"utils/shorten.py","file_name":"shorten.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"147353379","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Parses raw BMS logs \"\"\"\n\nimport argparse\nimport os\n\nfrom matplotlib import pyplot as plt\n\nfrom bms.models import BmsLog\nfrom bms.plotter import build_plot_bms\n\n\ndef create_args():\n \"\"\"\n :return: ArgumentParser\n Parser that handles cmd arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser(usage='-f '\n '-o '\n '-h for full usage')\n\n parser.add_argument('-f', dest='file',\n help='log file to parse', required=True)\n parser.add_argument('-o', dest='out',\n help='output folder', required=True)\n\n return parser\n\n\ndef parse_args(parser):\n \"\"\"\n :param parser: ArgumentParser\n Object that holds cmd arguments.\n :return: tuple\n Values of arguments.\n \"\"\"\n\n args = parser.parse_args()\n\n input_file = str(args.file)\n assert os.path.exists(input_file)\n\n output_folder = str(args.out)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n assert os.path.exists(output_folder)\n\n return input_file, output_folder\n\n\ndef run(msgs, output_folder):\n bms_to_plot = [\n 9, 10, 11\n ] # plots just these\n\n for bms in bms_to_plot:\n print('\\nBMS', bms)\n build_plot_bms(bms, msgs) # create plot\n\n # plt.show() # show plot\n\n out_file = 'BMS-' + str(bms) + '.png'\n out_file = os.path.join(output_folder, out_file)\n fig = plt.gcf() # get reference to figure\n fig.set_size_inches(8.27, 11.69) # A4 vertical\n plt.savefig(out_file, dpi=120) # save plot\n\n\ndef main():\n input_file, output_folder = parse_args(create_args())\n lines = open(input_file).readlines()\n\n msgs = [\n BmsLog(line)\n for line in lines\n ] # parse logs\n\n run(msgs, output_folder)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"parsers/bms/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"369553984","text":"__author__: \"Aditya Kalyan Jayanti\"\n__email__: \"aj8582@rit.edu\"\n\nimport csv\nimport math\n\n\n# Stopping condition\nPURITY = 0.95\n\n# Classes for attributes\nCUPCAKE = \"0\"\nMUFFIN = \"1\"\n\n\n# 2.a)\ndef read_input(filename):\n \"\"\"\n To read the input of the file\n :param filename: File path of the input file\n :return: Data for the attributes, attribute names, row indices\n \"\"\"\n # A flag to get all only the headers\n flag = 1\n\n # To store all the data\n columns_with_data = {}\n\n # List of names of the attributes\n attributes = []\n with open(filename) as file:\n file_reader = csv.reader(file, delimiter=\",\")\n for rows in file_reader:\n if flag:\n for row in rows:\n\n # Creating lists for column names in the dictionary\n if row != \"\":\n temp = row.strip()\n columns_with_data[temp] = []\n attributes.append(temp)\n flag = 0\n else:\n\n # Appending values to the list corresponding to the column names in the dictionary\n for values in range(len(attributes)):\n if values == 0:\n columns_with_data[attributes[values]].append(rows[values])\n else:\n quantity = float(rows[values])\n columns_with_data[attributes[values]].append(quantity)\n\n # To create a list of indices for the rows\n no_of_rows = [x for x in range(len(columns_with_data[attributes[1]]))]\n\n return columns_with_data, attributes, no_of_rows\n\n\n# 2.b)\ndef emit_header(filename):\n \"\"\"\n Opens up a file pointer to classifier file and writes header\n :param filename: Filename of the file to write to\n :return: file pointer\n \"\"\"\n file = open(filename, \"w\")\n file.write('__author__: \"Aditya Kalyan Jayanti & Rohit Ravishankar\"\\n__email__: \"aj8582@rit.edu & rr9105@rit.edu\" ')\n file.write(\"\\n\\ndef classifier(csv_file):\\n\")\n file.write(\"\\n\\t# To write the result of classification\\n\")\n file.write(\"\\tfile = open('HW_05_Jayanti_AdityaKalyan_Ravishankar_Rohit_MyClassifications.csv','w')\\n\")\n file.write(\"\\trows = csv_file.readlines()\\n\")\n file.write(\"\\n\\t# Dictionary to store each row of data\\n\")\n file.write(\"\\tcolumns_with_data = {}\\n\")\n file.write(\"\\n\\t# To store column headers in a list\\n\")\n file.write(\"\\tattributes = []\\n\")\n file.write(\"\\tattributes_name = rows[0].split(',')\\n\")\n file.write(\"\\tfor attribute in attributes_name:\\n\")\n file.write(\"\\t\\tcolumns_with_data[attribute]=0\\n\")\n file.write(\"\\t\\tattributes.append(attribute)\\n\")\n file.write(\"\\n\\t# Iterating from 1 through all rows because the first row has only headers \\n\")\n file.write(\"\\tfor index in range(1,len(rows)):\\n\")\n file.write(\"\\t\\tline = rows[index].split(',')\\n\")\n file.write(\"\\t\\tline=line[0:len(rows[0])]\\n\")\n file.write(\"\\t\\tfor i in range(1,len(line)):\\n\")\n file.write(\"\\t\\t\\tcolumns_with_data[attributes[i]]= float(line[i])\\n\")\n\n return file\n\n\n# 2.c)\ndef emit_classifier(file):\n \"\"\"\n Opens up a file pointer to classifier file and writes classifier\n :param file: Filename of the file to write to\n :return: None\n \"\"\"\n file.write(\"\\t\\tmy_classifier_function(file, columns_with_data)\\n\")\n file.write(\"\\n\\ndef my_classifier_function(file, columns_with_data):\\n\")\n\n\n# 2.d)\ndef emit_trailer(file):\n \"\"\"\n Opens up a file pointer to classifier file and writes trailer\n :param file: Filename of the file to write to\n :return: None\n \"\"\"\n file.write(\"\\ndef main():\\n\")\n file.write(\"\\n\\t# for any new validation set, the path must be entered here\\n\")\n file.write(\"\\tcsv_file = open('Recipes_For_VALIDATION_2181_RELEASED_v202.csv', 'r')\\n\")\n file.write(\"\\tclassifier(csv_file)\\n\\n\")\n file.write(\"if __name__ == '__main__':\\n\")\n file.write(\"\\tmain()\\n\")\n\n\n# 2.e)\ndef emit_decision_tree(file, columns_with_data, attributes, no_of_rows, level=2, left_or_right=\"N\"):\n \"\"\"\n To emit the decision tree code by recursively splitting\n :param file: File object to which the decision tree writes\n :param columns_with_data: Dictionary containing all the data\n :param attributes: The names of the attributes\n :param no_of_rows: The list of indices for each subtree\n :param level: The level of the tree the current recursive call is at\n :param left_or_right: The attribute which tells which subtree the call is for. By default it is \"N\" for neither.\n :return: None\n \"\"\"\n # To store the best Gini Index, threshold value and attribute name\n best_gini = math.inf\n best_threshold = math.inf\n best_attribute_name = \"\"\n\n # The alternative names for the classes\n cupcake = \"cupcake\"\n Cupcake = \"Cupcake\"\n muffin = \"muffin\"\n Muffin = \"Muffin\"\n\n # To store the number of cupcakes and muffins greater and lesser than the threshold\n greater_cupcake = 0\n greater_muffin = 0\n smaller_cupcake = 0\n smaller_muffin = 0\n\n # To store the different splits of the data\n smaller_indices = []\n greater_indices = []\n\n # Type of item, muffin or cupcake\n item_type = columns_with_data[attributes[0]]\n\n # Iterate over all the attributes to find the attribute with the least weighted gini index to split on\n for index in range(1, len(attributes)):\n gini_impurity, gini_threshold = calculate_weighted_gini_index(item_type, columns_with_data[attributes[index]],\n no_of_rows)\n # To find the best gini and best threshold\n if best_gini >= gini_impurity:\n best_gini = gini_impurity\n best_threshold = gini_threshold\n best_attribute_name = attributes[index]\n\n # Getting the entire column of data for the best attribute\n best_attribute_column = columns_with_data[best_attribute_name]\n\n # Finding the number of muffins and cupcakes above and below the threshold\n for rows in no_of_rows:\n if best_attribute_column[rows] <= best_threshold:\n if item_type[rows] == cupcake or item_type[rows] == Cupcake:\n smaller_cupcake += 1\n elif item_type[rows] == muffin or item_type[rows] == Muffin:\n smaller_muffin += 1\n smaller_indices.append(rows)\n else:\n if item_type[rows] == cupcake or item_type[rows] == Cupcake:\n greater_cupcake += 1\n elif item_type[rows] == muffin or item_type[rows] == Muffin:\n greater_muffin += 1\n greater_indices.append(rows)\n\n # To decide the level the of indentation\n spaces = \"\\t\" * level\n\n file.write(spaces + 'if columns_with_data[\"' + str(best_attribute_name) + '\"]<=' + str(best_threshold) + \":\\n\")\n\n recurse_further, winner = should_recurse_further(smaller_cupcake, smaller_muffin)\n\n # Decision to recurse the left subtree further or not\n if recurse_further:\n emit_decision_tree(file, columns_with_data, attributes, smaller_indices, level + 1, \"L\")\n else:\n file.write(spaces + \"\\t\" + \"file.write('\" + winner + \"\\\\n')\\n\")\n\n file.write(spaces + \"else:\\n\")\n\n recurse_further, winner = should_recurse_further(greater_cupcake, greater_muffin)\n\n # Decision to recurse the right subtree further or not\n if recurse_further:\n emit_decision_tree(file, columns_with_data, attributes, greater_indices, level + 1, \"R\")\n else:\n file.write(spaces + \"\\t\" + \"file.write('\" + winner + \"\\\\n')\\n\")\n\n\ndef should_recurse_further(cupcake_count, muffin_count):\n \"\"\"\n To define the stopping condition\n :param cupcake_count: The number of cupcakes\n :param muffin_count: The number of muffins\n :return: Should recurse further or not based on purity and the winner\n \"\"\"\n # To get the purity for each node\n purity = max(cupcake_count, muffin_count)/(cupcake_count + muffin_count)\n\n # To decide the winner\n if cupcake_count > muffin_count:\n winner = CUPCAKE\n else:\n winner = MUFFIN\n\n if purity > PURITY:\n return False, winner\n else:\n return True, winner\n\n\ndef calculate_weighted_gini_index(item_type, data, no_of_rows):\n \"\"\"\n To find the weighted gini index for a given attribute\n :param item_type: Contains name of the attribute\n :param data: Contains data for the\n :param no_of_rows: Contains the number of rows in terms of indices\n :return: Weighted gini index and the threshold value\n \"\"\"\n cupcake = \"cupcake\"\n Cupcake = \"Cupcake\"\n\n best_gini = math.inf\n best_threshold = 0\n\n for index in range(int(min(data)), int(max(data)) + 1):\n\n # To store the number of values below and above the threshold\n number_type_below_1 = 0\n number_type_below_2 = 0\n number_type_above_1 = 0\n number_type_above_2 = 0\n\n threshold = index\n\n # To calculate the number of points below and and number of points above the threshold\n for row_idx in no_of_rows:\n if data[row_idx] <= threshold:\n if item_type[row_idx] == cupcake or item_type[row_idx] == Cupcake:\n number_type_below_1 += 1\n else:\n number_type_below_2 += 1\n else:\n if item_type[row_idx] == cupcake or item_type[row_idx] == Cupcake:\n number_type_above_1 += 1\n else:\n number_type_above_2 += 1\n\n # To calculate fraction of points below the threshold and above the threshold for the gini index\n if (number_type_below_1 + number_type_below_2) != 0:\n probability_points_below = (number_type_below_1/(number_type_below_1 + number_type_below_2)) ** 2 + \\\n (number_type_below_2/(number_type_below_1 + number_type_below_2)) ** 2\n else:\n probability_points_below = 0.5\n\n if (number_type_above_1 + number_type_above_2) != 0:\n probability_points_above = (number_type_above_1/(number_type_above_1 + number_type_above_2)) ** 2 + \\\n (number_type_above_2 / (number_type_above_1 + number_type_above_2)) ** 2\n else:\n probability_points_above = 0.5\n\n # To find the gini indices for the fraction of points above and below the threshold\n gini_above = 1 - probability_points_above\n gini_below = 1 - probability_points_below\n\n total_points = number_type_below_2 + number_type_below_1 + number_type_above_1 + number_type_above_2\n\n if total_points != len(no_of_rows):\n return False\n\n weighted_gini = (((number_type_above_2 + number_type_above_1)/total_points) * gini_above) + \\\n (((number_type_below_1 + number_type_below_2)/total_points) * gini_below)\n\n if weighted_gini <= best_gini:\n best_gini = weighted_gini\n best_threshold = threshold\n\n return best_gini, best_threshold\n\n\ndef main():\n \"\"\"\n Main calling function of the program\n :return: None\n \"\"\"\n # The file name of the training data\n file_name = \"Recipes_For_Release_2181_v202.csv\"\n\n # Emitted file\n emit_filename = \"Jayanti_AdityaKalyan_Classifier.py\"\n\n # To emit the header\n emit_file = emit_header(emit_filename)\n\n # To emit the classifier code\n emit_classifier(emit_file)\n\n # To return the columns of data as a dictionary, attribute names and row indices\n columns_with_data, attributes, no_of_rows = read_input(file_name)\n\n # To emit the decision tree\n emit_decision_tree(emit_file, columns_with_data, attributes, no_of_rows)\n\n # To emit the trailer to the file\n emit_trailer(emit_file)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"Jayanti_AdityaKalyan_Trainer.py","file_name":"Jayanti_AdityaKalyan_Trainer.py","file_ext":"py","file_size_in_byte":11775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"394633308","text":"import click\nfrom sayhello import app,db\n\n@app.cli.command(help='initlize database')\n@click.option('--drop',is_flag=True,help='create after drop')\ndef initdb(drop):\n if drop:\n click.confirm('continue dropping ?',abort=True)\n db.drop_all()\n click.echo('drop done ..')\n db.create_all()\n click.echo('Initlize Database ..')\n","sub_path":"sayhello/sayhello/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"560274883","text":"import netfilterqueue\nimport scapy.all as scapy\nimport os\n\n#os.system(\"iptables -I FORWARD -j NFQUEUE --queue-num 0\")\nos.system(\"iptables -I OUTPUT -j NFQUEUE --queue-num 0\")\nos.system(\"iptables -I INPUT -j NFQUEUE --queue-num 0\")\ndef proses_paket(paket):\n\tscapy_paket = scapy.IP(paket.get_payload())\n\t\"\"\"DNSRR => DNS respons, DNSQR => request\"\"\"\n\t\n\tif scapy_paket.haslayer(scapy.DNSRR):\n\t\tname_website = scapy_paket[scapy.DNSQR].qname\n\t\t#print(scapy_paket.show())\n\t\tif 'www.bing.com' in name_website:\n\t\t\tprint(\"[+] Spoofing target\")\n\t\t\tprint(scapy_paket.show())\n\t\t\t\"\"\" Membuat jawaban palsu/spoof answer \"\"\"\n\t\t\tanswer = scapy.DNSRR(rrname=name_website , rdata=\"192.168.1.1\")\n\t\t\t\"\"\" Mengubah paket answer \"\"\" \n\t\t\tscapy_paket[scapy.DNS].an = answer\n\t\t\t\"\"\" Mengubah agar hanya satu jumlah answer \"\"\" \n\t\t\tscapy_paket[scapy.DNS].ancount = 1\t\n\t\t\t\"\"\" Menghapus beberapa field paket,\n\t\t\t\tTidak menggangu paket yang telah di ubah\"\"\"\n\t\t\tdel scapy_paket[scapy.IP].len\n\t\t\tdel scapy_paket[scapy.IP].chksum\n\t\t\tdel scapy_paket[scapy.UDP].len\n\t\t\tdel scapy_paket[scapy.UDP].chksum\n\t\t\t\n\t\t\t\"\"\" Mengubah paket payload \"\"\"\n\t\t\t\"\"\" Harus di covert ke str \"\"\"\n\t\t\tpaket.set_payload(str(scapy_paket))\n\n\tpaket.accept()\n\ntry:\n\tqueue = netfilterqueue.NetfilterQueue()\n\tqueue.bind(0,proses_paket)\n\tqueue.run()\nexcept KeyboardInterrupt:\n\tprint(\"\\n[-] Ctrl+C ditekan...\")\n\tos.system(\"iptables --flush\")","sub_path":"dns_spoof.py","file_name":"dns_spoof.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"129390970","text":"from django.contrib.auth.models import User\n\nfrom hammertime.hammers import ViewHammer, hammer, MultiProcessingHammer\n\nfrom blog.models import Post\nfrom blog.views import post\nfrom blog.utils import get_user\n\n\npost_slugs = Post.objects.values('slug')\nViewHammer.register(post, params=post_slugs)\n\n\ndef do_something_time_consuming(**kwargs):\n some_number = kwargs.pop('some_number', 4242)\n some_number ** some_number\n\nmp_param_list = [{'some_number': x} for x in range(4242)]\n\nMultiProcessingHammer.register(do_something_time_consuming, mp_param_list)\n\n\n@hammer\ndef flush_users():\n user_pks = User.objects.values_list('id', flat=True)\n for user_pk in user_pks:\n get_user(user_pk)\n","sub_path":"examples/blog/hammers.py","file_name":"hammers.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"537367550","text":"import logging\nimport socket\nfrom logging.handlers import SysLogHandler\nimport configparser\nimport os\n\nbase_path = os.path.dirname(os.path.realpath(__file__))\nconfig = configparser.RawConfigParser()\nconfig.read(os.path.join(base_path, \"site.cfg\"))\n\n\nclass ContextFilter(logging.Filter):\n hostname = socket.gethostname()\n\n def filter(self, record):\n record.hostname = ContextFilter.hostname\n return True\n\n\nsyslog = SysLogHandler(address=(config.get(\n \"Logger\", \"remote_host\"), int(config.get(\"Logger\", \"remote_port\"))))\n\nsyslog.addFilter(ContextFilter())\n\nformat = '%(asctime)s %(hostname)s RUUVI-SCANNER: [%(levelname)s] %(message)s'\nformatter = logging.Formatter(format, datefmt='%b %d %H:%M:%S')\nsyslog.setFormatter(formatter)\n\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(formatter)\n\nlogger = logging.getLogger()\nlogger.addHandler(syslog)\nlogger.addHandler(consoleHandler)\nlogger.setLevel(logging.INFO)\n","sub_path":"setup_logging.py","file_name":"setup_logging.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"563121992","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom random import choice\n\ndef get_url(url,agent,proxy):\n\tget=requests.get(url,headers=agent,proxies=proxy)\n\tprint(get)\n\treturn get.text\n\t\n\n#https://ru.wikipedia.org/wiki/Вода\ndef vik_text(vik_html):\n\tsoup=BeautifulSoup(vik_html,\"html.parser\")\n\ttex=soup.find_all(\"div\")\n\tfor i in tex:\n\t\tcountry=i.find_next('h2').text\n\t\tagent=i.find_next('h2').find_next('h2').text\n\t\tip=i.find_next('h1').text.split(\" \")[4]\n\t#country=country.find('span').text\n\tprint('my ip='+str(ip)+'\\n'+str(country)+\"\\n\"+str(agent))\n\t\n\t\n\n\t\t\n\nuser=open('user.txt',).read().split('\\n')\npro=open('proxy.txt',).read().split('\\n')\n\ni=1\nwhile i!=0:\n\ttry:\n\t\tch=choice(pro)\n\t\tproxy={'http':'http://'+ch}\n\t\tagent={'User-Agent':choice(user)}\n\t\tvik_html=get_url('http://mybrowserinfo.com/',agent,proxy)\n\t\tvik_text(vik_html)\n\t\ti-=1\n\texcept:\n\t\tpro.remove(ch)\n\t\tprint('error')\n\t\ti=1\n\n\n\n\n\n","sub_path":"vpn.py","file_name":"vpn.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"601594959","text":"from math import sqrt\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.stats import f,friedmanchisquare, rankdata\n\ndef X_F_sqr(k,N,R):\n return ((12*N)/(k*(k+1)))*(np.sum(R**2)-(k*(k+1)**2)/4)\n\ndef F_F(k,N,X_F):\n return ((N-1)*X_F)/(N*(k-1)-X_F)\n\ndef critical_value(k, N, a=0.05):\n d1 = k - 1\n d2 = (k-1)*(N-1)\n return f.isf(a, d1, d2)\n\ndef cd(k,N,q_a):\n return q_a * sqrt((k*(k+1))/(6*N))\n\ndef main(df, a=0.01):\n # df = pd.read_csv('Results - friedman table.csv')\n df['classifier'] = df['classifier'] + ' ' + df['features']\n df.drop(['features',], axis=1, inplace=True)\n df = df.T\n classifiers = df.loc['classifier'].values\n df.columns = classifiers\n df.drop('classifier', axis=0, inplace=True)\n df['Dataset'] = df.index.values\n df.reset_index(level=0, inplace=True)\n df.drop(['index',], axis=1, inplace=True)\n df = df[['Dataset',] + list(classifiers)]\n scores = df\n\n classifiers = list(set(scores.columns) - set(['Dataset']))\n scores_data = scores[list(scores.columns)[1:]].values\n\n # parameters\n k = scores_data.shape[1]\n N=scores_data.shape[0]\n a = 0.01\n\n ranks = np.zeros(scores_data.shape)\n for i,scores_ in enumerate(scores_data):\n ranks[i] = len(scores_)+1 - rankdata(scores_)\n\n R = np.average(ranks, axis=0)\n\n X_F = X_F_sqr(k=k,N=N,R=R)\n print('k:', k, ' '*5, 'N:', N, ' '*5, 'a:', a)\n print('chi2: ', X_F)\n print(\"Friedman's F: \", F_F(k=k,N=N,X_F=X_F))\n print('F({},{})|{}: '.format(k-1,(k-1)*(N-1),a), critical_value(k=k,N=N, a=a))\n\n t = pd.DataFrame(columns=list(scores.columns)[1:], index=[0])\n t.loc[0] = R\n return t, t.T.sort_values(0)\n","sub_path":"code/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"321263479","text":"from time import sleep\nprint('-=-' * 20)\nprint('Soma dos pares')\nprint('-=-' * 20)\n# Aqui eu criei duas listas porque vou fazer uma lista só com os pares.\n#Usei uma variavel SOMAPAR apenas para pegar a soma fora do for. porque irei usa-la mais tarde.\n#Se ela ficar dentro do for eu não poderei usa-la depois\nnumbList = []\nnumbPar = []\nsomaPar = 0\n#aqui carrego minha lista de numeros INTEIROS\nfor i in range(1, 7):\n numbList.append(int(input('DIGITE O {}º NUMERO INTEIRO: '.format(i))))\n# aqui eu carrego minha lista com numeros PARES APENAS\nfor i in numbList:\n if i % 2 == 0:\n somaPar +=i\n numbPar.append(i)\n#AQUI EU IMPRIMO MINHA LISTA COM OS NUMERO PARES QUE FOI USADO NA SOMA\nprint('NUMEROS SOMADOS SÃO:')\nfor i in numbPar:\n print(i, end=\"-\")\n#AQUI EU IMPRIMO A SOMA DOS PARES.\nsleep(2)\nprint('')\nprint('-=-' * 15)\nprint('\\nA SOMA DE TODOS OS PARES É: {}'.format(somaPar))","sub_path":"ExMundo_02/50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"469274160","text":"from math import sqrt\n\nfrom vec_search.lemmatizer import lemmatize\n\n# Косинусное сходство\nfrom vec_search.loader import load_indexed\n\n\ndef cos_sim(v1, v2):\n len_v1 = sqrt(sum([x * x for x in v1]))\n len_v2 = sqrt(sum([x * x for x in v2]))\n\n mult = 0\n for x, y in zip(v1, v2):\n mult += x * y\n\n return mult / (len_v1 * len_v2) if len_v1 * len_v2 != 0 else 0\n\n\ndef decode_output(results):\n decoded = []\n docs = load_indexed()\n for r in results:\n if r[1] > 0:\n decoded.append((r[1], docs[r[0]][2], '{}.pdf'.format(docs[r[0]][2])))\n decoded.sort(reverse=True)\n return decoded\n\n\ndef vector_search(q):\n q_lemmas = lemmatize(q)\n docs = load_indexed()\n\n # Загрузка TF-IDF индексов из файла\n tf_idf = {}\n idf = {}\n tf_idf_index = 'data/indexes/tf_idf_index.csv'\n file = open(tf_idf_index, 'r')\n for l in file.readlines():\n lemma = l.split(' ')[0]\n idf[lemma] = float(l.split(' ')[1])\n tf_idf[lemma] = [float(x) for x in l.split(' ')[2:]]\n file.close()\n\n # Вычисление вектора запроса\n ql_counts = {}\n for ql in q_lemmas:\n if ql in ql_counts.keys():\n ql_counts[ql] += 1\n else:\n ql_counts[ql] = 1\n\n max_count = 0\n for k in ql_counts.keys():\n if ql_counts[k] > max_count:\n max_count = ql_counts[k]\n\n q_vector = []\n for k in idf.keys():\n if k in ql_counts.keys():\n q_vector.append((ql_counts[k] / max_count) * idf[k])\n else:\n q_vector.append(0)\n\n # Рассчёт длин векторов\n vectors = []\n for i in range(len(docs)):\n v = []\n for k in tf_idf.keys():\n v.append(tf_idf[k][i])\n vectors.append(v)\n\n results = []\n for i, v in enumerate(vectors):\n results.append((i, cos_sim(v, q_vector)))\n\n results.sort(reverse=True, key=lambda tup: tup[1])\n doc_indexes = []\n for i, x in results:\n if x > 0:\n doc_indexes.append((i, x))\n\n return decode_output(results)\n\n\nif __name__ == '__main__':\n print(vector_search('Перестановки'))\n","sub_path":"vec_search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"628312774","text":"import string\n\nfrom diggly.util.text_processor.text_process import Text_Process\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ntext_proc = Text_Process()\n\ndef score_topics(source_id, topics_desc_dict):\n token_dict = {}\n indices = {}\n res_dict = {}\n index = 0\n\n for tid, text in topics_desc_dict.iteritems():\n lowers = text.lower()\n remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\n no_punctuation = lowers.translate(remove_punctuation_map)\n token_dict[tid] = no_punctuation\n\n for tok in token_dict.keys():\n indices.update({tok: index})\n index += 1\n\n main_index = indices[source_id]\n\n # this can take some time\n tf_idf = TfidfVectorizer(tokenizer=text_proc.tokenize, stop_words='english')\n tfidf_matrix = tf_idf.fit_transform(token_dict.values())\n res = cosine_similarity(tfidf_matrix[main_index], tfidf_matrix)\n\n for tok, ind in indices.iteritems():\n if tok == main_index:\n continue;\n res_dict.update({tok: res[0][ind]})\n\n return res_dict\n\ndef score_outlinks(main_text, title_list):\n main_title = \"current_selected_topic\"\n token_dict = {}\n len_titles = {}\n indices = {}\n res_dict = {}\n index = 0\n\n for title in title_list:\n lowers = title.lower().replace(\"_\", \" \").replace(\"-\", \" \")\n len_titles.update({title: len(lowers.split(\" \"))})\n token_dict[title] = lowers\n\n len_titles[main_title] = 1\n token_dict[main_title] = main_text\n\n for tok in token_dict.keys():\n indices.update({tok: index})\n index += 1\n\n main_index = indices[main_title]\n\n tf_idf = TfidfVectorizer(tokenizer=text_proc.tokenize, stop_words='english')\n tfidf_matrix = tf_idf.fit_transform(token_dict.values())\n res = cosine_similarity(tfidf_matrix[main_index], tfidf_matrix)\n\n for tok, ind in indices.iteritems():\n if tok == main_title:\n continue;\n res_dict.update({tok: (res[0][ind] * 100 / len_titles[tok]) })\n\n return res_dict\n\n\n\n","sub_path":"diggly/util/text_processor/score_process.py","file_name":"score_process.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"82402004","text":"import logging\nfrom selenium.webdriver.chrome import options\nfrom bcolor import bcolors # Clase contenedora de los colores.\nfrom atributos import Device # Clase atributos.\nfrom pymongo import MongoClient # Conexión a la base de datos.\nfrom pymongo.errors import ServerSelectionTimeoutError\nimport sys\nimport getpass # Obtener información del usuario\n# pip install alive_progress & pip install tqdm\nfrom alive_progress import alive_bar\nfrom time import sleep\nfrom icecream import ic # Debug de codigo.\nimport colorama # Imprime texto en colores.\nimport pyfiglet # Modificar la forma del Título.\nfrom dns import reversename # Para obtener el DNS.\n# Para calcular la diferencia de fechas cuando la ip está en la BD.\nfrom datetime import datetime, timedelta\n# Comprobar sockets abiertos.\nfrom socket import socket, AF_INET, SOCK_STREAM, setdefaulttimeout, getfqdn\nfrom selenium import webdriver # Abrir FireFox para capturas de pantallas.\nfrom ipwhois import IPWhois # Whois.\nimport pygeoip # Para la geolcalización de las direcciones ip.\nfrom ipaddress import IPv4Address # Manejos de IPv4.\nfrom random import randint # Para la generación de ipv4 al azar.\nhostname = getpass.getuser() # Obtener el nombre de la maquina local.\n\nfrom funcionamiento import herramienta\n\n# Generar información de diagnostico para scripts con el módulo logging.\nlogging.basicConfig(filename='logs/iotInfo.log', level='INFO',\n format='%(asctime)s: %(levelname)s: %(message)s')\n\nclient = 'edison'\npassdb = 'GnzNw2aAyJjKGOs7'\ndbname = 'iotecuador'\n\n# Conexión MongoAtlas.\n\n\ndef get_db():\n try:\n url_client = MongoClient(\"mongodb+srv://\"+client+\":\"+passdb +\n \"@iotecuador.qbeh8.mongodb.net/\"+dbname+\"?retryWrites=true&w=majority\")\n mydb = url_client.iotecuador\n\n except Exception:\n logging.error(\n 'No se puede conectar con la DataBase: %s. Verifique el cliente de conexion: get_db()', dbname)\n exit(1)\n\n except ServerSelectionTimeoutError as e:\n logging.error(\n 'No se puede conectar con la DataBase: %s. Verifique su conexion', dbname)\n exit(1)\n return mydb\n\n\n\n\n\n# Valida la existencia de la Ipv4 en la BD.\n# 0: No Existe la IPv4 en la BD.\n# 1: Existe la dirección IPv4, supera el tiempo limite en días.\n# -1: Existe la dirección IPv4, No! supera el tiempo limite en días.\n# Estado True: Contiene puertos activos asignados.\n# Estado False: No! contiene puertos activos asignados.\n\ndef find_devices(IPV4):\n try:\n db = get_db() # Conexiíon a la BD\n valor = 0\n Ipv4Bd = ''\n\n search = db.Devices.find({'Direccion': IPV4})\n for r in search:\n Ipv4Bd = r['Direccion']\n ic.disable()\n ic(Ipv4Bd)\n estadoBd = r['Estado']\n ic.disable()\n ic(estadoBd)\n fechaBd = r['Fecha']\n ic.disable()\n ic(fechaBd)\n\n if(Ipv4Bd != ''): # Existe!\n\n if(estadoBd == True): # Existen Puertos Abiertos\n ic.disable()\n ic(estadoBd)\n Tiempoconsulta = 30 # Tiempo en días.\n\n valor = DateTime(fechaBd, Tiempoconsulta)\n ic.enable()\n ic(valor)\n\n else:\n ic.disable()\n ic(estadoBd)\n Tiempoconsulta = 15 # Tiempo en días.\n\n valor = DateTime(fechaBd, Tiempoconsulta)\n ic.enable()\n ic(valor)\n\n else: # No Existe!\n valor = 0\n\n #print (\"No existe la direccion IPV4 ingresada\",band)\n\n return valor\n\n except Exception:\n logging.error(\n \"Al buscar la Direccion IPv4 : %s en la base de datos. find_devices()\", IPV4)\n exit(1)\n\n# Fecha de la Base de datos.\n\n\ndef DateTime(FechaBD, days):\n try:\n # Válida los paremetros de la fecha y hora\n cadena = datetime.strptime(FechaBD, \"%Y-%m-%d %H:%M:%S\")\n ahora = datetime.now() # Obtener la hora actual de equipo\n # Establecer los días máximos a superar.\n treintadias = timedelta(days=days)\n fechaacomparar = ahora - treintadias\n\n ic(cadena, fechaacomparar)\n\n if cadena < fechaacomparar: # Supera el limite de días establecidos.\n estadoFecha = 1\n\n else:\n estadoFecha = -1\n\n ic.enable()\n ic(estadoFecha)\n\n return estadoFecha\n\n except Exception as e:\n logging.error(\n \"Se ha producido un error al validar la fecha. DateTime()\")\n exit(1)\n\n# Impresión de Texto Principal.\n\n\ndef cabecera():\n # install pip install pyfiglet\n try:\n Title = pyfiglet.figlet_format(\n \"IOT ECUADOR \\n\", font=\"epic\", justify=\"center\")\n Users = \":.HERRAMIENTA DE ANÁLISIS DE VULNERABILIDADES EN DISPOSITIVOS IOT EN ECUADOR.:\\n\\n\"\n inicio = 'Bienvenido! >>>' + hostname + '<<<'\n\n print(bcolors.WARNING + Title + bcolors.ENDC)\n print(typewrite(Users))\n print(typewrite(inicio))\n\n except Exception:\n logging.error(\"Cabecera()\")\n exit(1)\n\n# Validar el número a entero.\n\n\ndef lee_entero():\n try:\n\n while True:\n entrada = input('Introduce la cantidad:')\n try:\n entrada = int(entrada)\n return entrada\n\n except ValueError:\n wow = \"Wow! >>> \" + entrada + \" <<< no es un número entero: \"\n ic(typewrite(wow))\n\n except Exception:\n logging.error(\"lee_entero()\")\n exit(1)\n\n# Velocidad de escritura de los prints.\n\n\ndef typewrite(text):\n try:\n\n for char in text:\n sys.stdout.write(char)\n sys.stdout.flush()\n\n if char != \"\\n\":\n sleep(0.04)\n else:\n sleep(0.7)\n return char\n\n except Exception:\n logging.error(\"Typewrite()\")\n exit(1)\n\n\ndef opc1():\n pr = \" \\nOk!. Cúantas direcciones Ipv4 Aleatorias deseas Analizar: \\n\"\n print(typewrite(pr))\n cant = lee_entero()\n maxCant = repeat(cant)\n agregar(int(maxCant))\n\n\ndef main():\n try:\n\n while True:\n pr = \"\\nCuéntame, que deseas hacer el día de hoy? \\n\"\n print(typewrite(pr))\n\n op1 = \" 1)\\tAnalizar direcciones Ipv4 en Ecuador \"\n print(typewrite(op1))\n sleep(1)\n op2 = \" 2)\\tConocer como funciona la herramienta? \"\n print(typewrite(op2))\n sleep(1)\n op3 = \" 3)\\tSalir\\n\"\n print(typewrite(op3))\n\n num = input('Introduce el Opción: ')\n\n if num == str(1):\n opc1()\n break\n\n if num == str(2):\n\n Obj = herramienta()\n print((typewrite(Obj) + \"\\n\"))\n main()\n\n break\n\n if num == str(3):\n print(\"\\n\\n\\t Gracias por usar el sistemas de Busqueda \\n\\n\")\n exit(1)\n\n if num == '':\n print('No has ingresado una opción ')\n print('Favor de volverlo a intentar.')\n\n else:\n print('La opcion ingresada no es la corecta')\n print('Favor de volverlo a intentar.')\n\n return num\n\n except Exception as e:\n logging.warning(\n \"Se ha producido un error al introducir la opción: %s. main()\", num)\n exit(1)\n\n\n# Direcciones IPV4 de Ecuador aleatorias.\n\ndef Generar_IP_Ecuador_Aleatoria():\n try:\n while True: # Bucle que se cierra una ves obtenga la direcciones ipv4 de Ecuador\n\n ip = IPv4Address('{0}.{1}.{2}.{3}'.format(\n randint(0, 255), randint(0, 255), randint(0, 255), randint(0, 255)))\n\n obj = pygeoip.GeoIP('Geo/GeoLiteCity.dat')\n\n # Validar que la direccion ipv4 es de ecuador\n if(obj.country_code_by_addr(str(ip)) == \"EC\"):\n\n break\n\n return str(ip) # guardar ipv4 de Ecuador\n\n except Exception as e:\n logging.error(\n \"Se ha producido un error al crear una dirección Ipv4 randomica. Generar_IP_Ecuador_Aleatoria()\")\n exit(1)\n\n\n# Recibe un host y los puertos que queremos comprobar y devuelve los puertos abiertos\n\ndef OpenPort(host, puerto):\n try:\n setdefaulttimeout(0.5) # Tiempo de conexión segundos\n s = socket(AF_INET, SOCK_STREAM) # Puerto IPv4, TCP PROTOCOL\n resultado = s.connect_ex((str(host), puerto))\n if resultado == 0:\n return True # Puerto abierto\n else:\n return False # Puerto cerrado\n\n except Exception as e:\n logging.error(\"Al crear la conexión desde el host: %s \",\n host, \" con el puerto: %s. OpenPort()\", puerto)\n exit(1)\n\n\n# Captura la pantalla de la ip y el puerto dado.\n# Al existir una imagen con el mismo nombre, simplemente lo actualiza.\n# En caso que la ruta del directorio contenedor de sea incorrecta, se envia un mensaje con el recpectivo error!.\n# El nombre que toma la img es la dirección Ipv4.\n\ndef capturadepantalla(ip, puerto):\n setdefaulttimeout(30)\n try:\n\n nombreimagen = \"Noimagen.png\"\n #browser=\"\"#UnboundLocalError: local variable 'browser' referenced before assignment\n optionsChr = webdriver.ChromeOptions()\n optionsChr.add_argument(\"--headless\")\n optionsChr.add_argument('--disable-gpu')\n optionsChr.add_argument('--log-level=3')\n optionsChr.set_capability(\"acceptInsecureCerts\", True)\n optionsChr.add_argument(\"--incognito\")\n optionsChr.add_argument('--ignore-certificate-errors')\n optionsChr.add_argument('--version')\n\n browser = webdriver.Chrome(\n executable_path=r'C:\\\\IoT_Divices_ESFOT\\\\FirefoxDriver\\\\chromedriver.exe', options=optionsChr)\n \n browser.implicitly_wait(10)\n browser.set_page_load_timeout(10)\n browser.get(\"http://{0}\".format(ip)+\":\"+str(puerto))\n nombreimagen = str(ip)+\",\"+str(puerto)+\".png\" # Nombre de la Img.\n sleep(1)\n ic.enable()\n ic(nombreimagen)\n screenshot = browser.get_screenshot_as_file(\n\n r\"C:\\\\IoT_Divices_ESFOT\\\\capturas\\\\\" + str(nombreimagen)) # Bool\n ic.disable()\n ic(screenshot)\n\n\n state = screenshot\n ic.disable()\n ic(\"screenshot\", state)\n browser.close()\n\n except Exception:\n state = False\n nombreimagen = \"Noimagen.png\"\n return nombreimagen\n\n print(\"Captura Exitosa!\")\n return nombreimagen\n\n \n\n\n# Obtiene la información correspondiente a esos puertos y añadirlos o actualizarlos.\n\ndef addNewDevices(ip, portOpen, exist):\n try:\n puertoList = []\n\n for puerto in portOpen:\n try:\n connection = socket(AF_INET, SOCK_STREAM)\n connection.connect((ip, puerto))\n connection.send(b'HEAD / HTTP/1.0\\r\\n\\r\\n')\n banner = \"\" # Inicializamos banner por si al final hay error en el siguiente paso\n banner = connection.recv(1024) # Max 1024 Bytes contenido\n aux = str(banner).replace('\\\\r\\\\n', ' ')\n # Quitamos el espacio incial y los finales que no interesan. Ya tenemos el banner\n banner = aux[2:len(aux)-3]\n\n except Exception:\n logging.warning(\n \"Al realizar la conexion con el banner, puerto: %s. \", puerto)\n banner = None\n\n connection.close()\n\n # adñadir información de la direccion Ipv4\n obj = pygeoip.GeoIP('Geo/GeoLiteCity.dat')\n location = obj.record_by_addr(str(ip))\n\n ic.disable()\n ic('location: ', location)\n for key, val in location.items():\n ic.disable()\n ic('%s : %s' % (key, val))\n\n #Realizar la captura.\n imagen = capturadepantalla(ip, puerto)\n\n\n # Almacena 'Documentos' dentro de un arreglo, usando append.\n puerto = {'Puerto': str(puerto), 'Banner': str(\n banner), 'Imagen': str(imagen)}\n puertoList.append(puerto)\n ic(puerto)\n\n # Información de los puertos:\n dominio = getfqdn(ip) # Dominio\n whois = IPWhois(ip).lookup_whois() # Whois\n dns = reversename.from_address(ip) # DNS\n # Fecha y hora del Equipo.\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n ic.disable()\n ic(banner)\n ic.disable()\n ic(dominio)\n ic.disable()\n ic(whois)\n ic.disable()\n ic(dns)\n ic.disable()\n ic(date)\n ic.disable()\n ic(puertoList)\n\n # Agrega la infromacion a la base de datos por primera vez.\n # Los atributos que se asignan son los siguientes: (ip, img, fecha ,location, whois, dominio, dns, puerto)\n if exist == 0:\n estado = True\n db = get_db()\n datos = Device(str(ip), estado, date, location,\n whois, str(dominio), str(dns), puertoList)\n db.Devices.insert_one(datos.toCollection())\n logging.info(\"Ipv4: %s, Agregada!\", ip)\n\n return \"Se agrego correctamente!\\n\"\n\n # Paso el límite los días esblecidos\n if exist == 1:\n db = get_db()\n db.Devices.update_one({\"Direccion\": str(ip)}, {\"$set\": {\"Estado\": True, \"Fecha\": date,\n \"Whois\": whois, \"Dominio\": str(dominio), \"Dns\": str(dns), \"puerto\": puertoList}})\n\n logging.info(\"Ipv4: %s, Actualizada!\", ip)\n return \"Se actualizo correctamente!\\n\"\n\n except Exception:\n logging.error(\n \"La direccion IPv4: %s no puede agregar o actualizar.\", ip, \"Conexion: Fallida! addNewDevices\")\n exit(1)\n\n# finalización de la busqueda.\n\n\ndef new_search(valor):\n try:\n if ((valor == \"Si\") or (valor == \"si\") or (valor == \"s\") or (valor == \"S\")):\n return opc1()\n else:\n print(bcolors.HEADER +\n \"\\n\\n\\t Gracias por usar el sistemas de Busqueda \\n\\n\" + bcolors.ENDC)\n exit(1)\n\n except Exception:\n logging.error(\n \"Se ha producido un error al generar una nueva busqueda. new_search()\", )\n exit(1)\n # Si se recibe un parámetro se comprobaran tantas direcciones ip como es parámetro (limitando a 1000)\n\n# Número de busquedas.\n\n\ndef repeat(repeticiones):\n try:\n # repeticiones=1 ## si usuario no ingresa ningun valor, por defecto es 1 direción ip\n # Realizara una busqueda de 100 direciones ipv4.\n if int(repeticiones) > 1000:\n repeticiones = 1000\n\n ic.enable()\n ic(\"Se van a examinar:\", repeticiones)\n return repeticiones\n\n except Exception:\n logging.error(\n \"Se ha producido un error en la cantidad de repeticiones. \", )\n exit(1)\n\n# No existen puertos abiertos.\n\n\ndef EmptyPort(IPv4, exist):\n try:\n estadoBd = False # Se agrege la nueva direccion IPv4\n db = get_db() # Conexiíon a la BD\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n search = db.Devices.find({'Direccion': IPv4})\n for r in search:\n estadoBd = r['Estado']\n\n if(exist == 1 and estadoBd == False): # Actualizacíon de los puertos\n db.Devices.update_one({\"Direccion\": str(IPv4)}, {\n \"$set\": {\"Fecha\": date}})\n return \"Se actualizo correctamente!\\n\"\n\n if(exist == 1 and estadoBd == True): # Actualizacíon de los puertos\n db.Devices.update_one({\"Direccion\": str(IPv4)}, {\n \"$set\": {\"puerto\": None, \"Estado\": False}})\n\n return \"Se actualizo correctamente!\\n\"\n\n if(exist == 0): # Agregar\n estado = False\n obj = pygeoip.GeoIP('Geo/GeoLiteCity.dat')\n location = obj.record_by_addr(str(IPv4))\n datos = Device(str(IPv4), estado, date, location,\n None, None, None, None)\n db.Devices.insert_one(datos.toCollection())\n return \"Se agrego correctamente!\\n\"\n\n except Exception:\n logging.warning(\n \"La direccion IPv4: %s, PuertosActivos: 0 no puede agregarse o actualizarse.\", IPv4, \"Conexion: Fallida\")\n exit(1)\n\n\n\n\n\ndef agregar(repeticiones):\n\n try:\n\n PortList = [22, 23, 25, 53, 80, 81, 110, 180, 443, 873, 2323, 5000, 5001, 5094, 5150, 5160, 7547, 8080, 8100, 8443, 8883, 49152, 52869, 56000,\n 1728, 3001, 8008, 8009, 10001, 223, 1080, 1935, 2332, 8888, 9100, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 21, 554, 888, 1159, 1160, 1161,\n 1435, 1518, 3389, 4550, 5005, 5400, 5550, 6550, 7000, 8000, 8081, 8090, 8150, 8866, 9000, 9650, 9999, 10000, 18004, 25001, 30001, 34567, 37777,\n 69, 135, 161, 162, 4786, 5431, 8291, 37215, 53413]\n\n\n # agregarle en una funcion\n #print(\"repeticiones\", repeticiones)\n for contador in range(0, int(repeticiones)):\n # validar el tipo de busqueda.\n ip = Generar_IP_Ecuador_Aleatoria() # llamamos a la funcion, ip aleatorias\n ic.enable()\n Num = contador+1\n ic(Num, ip)\n # Comprobamos si la IPv4 está en la base de datos MongpAtlas\n findDeviceBD = find_devices(ip)\n\n ic.enable()\n ic(findDeviceBD)\n\n if(findDeviceBD == 0 or findDeviceBD == 1):\n portOpen = []\n\n num = len(PortList)\n with alive_bar(num) as bar:\n for port in PortList:\n bar()\n\n estadoPort = OpenPort(ip, port)\n\n if estadoPort == True:\n\n ic.disable()\n ic(port, estadoPort)\n portOpen.append(port)\n\n else:\n ic.disable()\n ic(port, estadoPort)\n\n portsNumbers = len(portOpen)\n\n if int(portsNumbers) != 0:\n ic.enable()\n ic(portOpen)\n Estado = addNewDevices(ip, portOpen, findDeviceBD)\n ic.enable()\n ic(Estado)\n\n else:\n ic.enable()\n ic(portsNumbers)\n Estado = EmptyPort(ip, findDeviceBD)\n ic.enable()\n ic(Estado)\n ic.enable()\n\n else:\n print(\"La dirección IPv4\", ip,\n \" ya existe y es menor a los días establecidos\")\n\n print(\"\\n\\nBusqueda Finalizada :) \\n\\n\")\n return final()\n\n except Exception as e:\n print(\"Se ha producido un error al agregar o actualizar la dirección IPv4:\" +\n bcolors.WARNING + e + bcolors.ENDC)\n exit(1)\n\n # resultado\n\n\ndef final():\n\n try:\n print(\"Desea realizar una nueva busqueda \\n\")\n valor = input(\"Ingrese Si / No: \")\n ic.disable()\n ic(new_search(valor))\n\n except Exception:\n logging.error(\"Validar la opción (Si / No):\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n colorama.init()\n cabecera()\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":19495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"424849912","text":"from mininet.topo import Topo\n\ndef getPort(link, node): return link['port1'] if link['node1'] == node else link['port2']\n\nclass CamusTopo(Topo):\n \"\"\" Abstract topology routing using Camus.\n Assumes that each host is connected to exactly one switch.\n \"\"\"\n\n def __init__(self, **opts):\n Topo.__init__(self, **opts)\n self.subscriptions_for_sw = {}\n self.rules_for_sw = {}\n\n def hostSwitch(self, host):\n \"\"\" Return edge switch to which `host` is connected \"\"\"\n for a,b in self.links():\n if host == a and self.isSwitch(b):\n return b\n if host == b and self.isSwitch(a):\n return a\n raise Exception(\"Could not find a link to any switch from host %s\" % host)\n\n def switchPortForHost(self, host):\n \"\"\" Return the switch port to which `host` is connected \"\"\"\n switch = self.hostSwitch(host)\n return getPort(self.linkInfo(host, switch), switch)\n\n def subscribe(self, host, queries):\n if isinstance(queries, str):\n queries = [queries]\n\n switch = self.hostSwitch(host)\n self.addSubscriptionRec(host, queries, switch, self.linkInfo(host, switch))\n\n def addSubscriptionRec(self, host, queries, switch, down_link):\n raise NotImplementedError()\n\n\nclass SingleSwitchTopo(CamusTopo):\n def __init__(self, n, **opts):\n CamusTopo.__init__(self, **opts)\n\n switch = self.addSwitch('s1')\n self.rules_for_sw['s1'] = []\n\n for i in xrange(1, n+1):\n host = self.addHost('h%d' % i,\n ip = \"10.0.0.%d\" % i,\n mac = '00:00:00:00:00:%02x' % i)\n self.addLink(host, switch, port2=i)\n\n def addSubscriptionRec(self, host, queries, switch, down_link):\n port = getPort(down_link, switch)\n for q in queries:\n self.rules_for_sw[switch].append('%s: fwd(%d);' % (q, port))\n\n\n# Based on https://github.com/howar31/MiniNet/blob/master/topo-fat-tree.py\nclass FatTreeTopo(CamusTopo):\n\n def __init__(self, K, **opts):\n CamusTopo.__init__(self, **opts)\n\n self.pod_count = K\n self.core_count = (K/2) ** 2\n self.aggr_count = (K/2) * K\n self.edge_count = (K/2) * K\n\n self.upstream_for_sw = {}\n\n for core in range(int(self.core_count)):\n core_sw = 'cs_%d' % core\n self.addSwitch(core_sw)\n self.upstream_for_sw[core_sw] = []\n\n for pod in range(int(self.pod_count)):\n\n for aggr in range(int(self.aggr_count / self.pod_count)):\n aggr_sw = self.addSwitch('as_%d_%d' % (pod, aggr))\n self.upstream_for_sw[aggr_sw] = []\n for core in range(int((K/2)*aggr), int((K/2)*(aggr+1))):\n core_sw = 'cs_%d' % core\n self.addLink(aggr_sw, core_sw)\n self.upstream_for_sw[aggr_sw].append(core_sw)\n\n for edge in range(int(self.edge_count / self.pod_count)):\n edge_sw = self.addSwitch('es_%d_%d' % (pod, edge))\n self.upstream_for_sw[edge_sw] = []\n for aggr in range(int(self.edge_count / self.pod_count)):\n aggr_sw = 'as_%d_%d' % (pod, aggr)\n self.addLink(edge_sw, aggr_sw)\n self.upstream_for_sw[edge_sw].append(aggr_sw)\n\n for h in range(int(K/2)):\n host = self.addHost('h_%d_%d_%d' % (pod, edge, h),\n ip = '10.%d.%d.%d' % (pod, edge, h+1),\n mac = '00:00:00:%02x:%02x:%02x' % (pod, edge, h+1))\n self.addLink(edge_sw, host)\n\n\n for sw in self.switches():\n self.rules_for_sw[sw] = []\n\n\n def addSubscriptionRec(self, host, queries, switch, down_link):\n port = getPort(down_link, switch)\n for q in queries:\n self.rules_for_sw[switch].append('%s: fwd(%d);' % (q, port))\n for sw2 in self.upstream_for_sw[switch]:\n self.addSubscriptionRec(host, queries, sw2, self.linkInfo(switch, sw2))\n","sub_path":"examples/itch.p4app/camus_topo.py","file_name":"camus_topo.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"269419069","text":"import pandas as pd\nimport ezdxf\nimport os\n\n\ndef get_frames(filename):\n frames = pd.read_excel(filename, 'Connectivity - Frame', index_col= 0, skiprows= 1)\n frames = frames[1:]\n frame_sections = pd.read_excel(filename, 'Frame Section Assignments', index_col= 0, skiprows= 1)\n frame_sections = frame_sections[1:]\n frames = pd.merge(frames, frame_sections, on = 'Frame')\n coordinates = pd.read_excel(filename, 'Joint Coordinates', index_col= 0, skiprows= 1)\n coordinates = coordinates[1:]\n coordinates['Coor'] = coordinates.apply(lambda x: (x.GlobalX, x.GlobalY, x.GlobalZ), axis=1)\n frames.JointI = frames.JointI.apply(lambda x: coordinates.Coor[x])\n frames.JointJ = frames.JointJ.apply(lambda x: coordinates.Coor[x])\n frames = frames[['JointI','JointJ',\"AnalSect\"]]\n return(frames)\n\n\ndef to_dxf(filename):\n def create_layers_fonts(section_name_list,dwg):\n dwg.styles.new('custom', dxfattribs={'font': 'times.ttf', 'width': 0.8})\n for i, each in enumerate(section_name_list):\n dwg.layers.new(name = each, dxfattribs={'color': i + 1})\n frames = get_frames(filename)\n dwg = ezdxf.new('AC1024')\n msp = dwg.modelspace()\n create_layers_fonts(frames.AnalSect.unique(), dwg)\n for each in frames.index:\n row = frames.loc[each]\n msp.add_line(row.JointI, row.JointJ, dxfattribs= {'layer': row.AnalSect})\n filename = filename.replace('.xlsx','.dxf')\n dwg.saveas(filename)","sub_path":"sap_to_dxf.py","file_name":"sap_to_dxf.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"27415088","text":"import numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\ncount = []\r\n\r\n\r\ndef Initialize(n, board):\r\n for key in ['queen', 'row', 'col', 'd1', 'd2']:\r\n board[key] = {}\r\n for i in range(n):\r\n board['queen'][i] = -1\r\n board['row'][i] = 0\r\n board['col'][i] = 0\r\n for i in range(0, 2*(n+1)):\r\n board['d1'][i] = 0\r\n for i in range(-(n-1), n):\r\n board['d2'][i] = 0\r\n\r\n\r\ndef free(i, j, board):\r\n return(board['row'][i] == 0 and board['col'][j] == 0 and board['d1'][i+j] == 0 and board['d2'][i-j] == 0)\r\n\r\n\r\ndef addqueen(i, j):\r\n board['queen'][i] = j\r\n board['row'][i] = 1\r\n board['col'][j] = 1\r\n board['d1'][i+j] = 1\r\n board['d2'][i-j] = 1\r\n\r\n\r\ndef undoqueen(i, j):\r\n board['queen'][i] = -1\r\n board['row'][i] = 0\r\n board['col'][j] = 0\r\n board['d1'][i+j] = 0\r\n board['d2'][i-j] = 0\r\n\r\n\r\ndef placequeen(i):\r\n n = len(board['queen'].keys())\r\n for j in range(n):\r\n if free(i, j, board):\r\n addqueen(i, j)\r\n if i == n-1:\r\n printboard(board)\r\n\r\n else:\r\n placequeen(i+1)\r\n undoqueen(i, j)\r\n\r\n\r\ndef printboard(board):\r\n count.append(1)\r\n lst = []\r\n n = len(board['queen'].keys())\r\n for x in range(n):\r\n lst.append((x, board['queen'][x]))\r\n\r\n matrix = []\r\n for i in range(n):\r\n inner = []\r\n for j in range(n):\r\n if(i, j) == lst[i]:\r\n inner.append(1)\r\n else:\r\n inner.append(0)\r\n matrix.append(inner)\r\n print(matrix)\r\n \"\"\"\r\n sns.heatmap(matrix, linewidth=0.5, cbar=False,\r\n xticklabels=False, yticklabels=False)\r\n plt.show()\r\n \"\"\"\r\n\r\n\r\nboard = {}\r\nn = int(input(\"Enter Number of Queens: \"))\r\nInitialize(n, board)\r\nif placequeen(0):\r\n printboard(board)\r\nprint(len(count))\r\n","sub_path":"everthing python/n-queens-allpossiblesolns.py","file_name":"n-queens-allpossiblesolns.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"589932915","text":"#exercise 3\r\n\r\ngroceries = ['banana','orange','apple']\r\n\r\nstock = {\r\n 'banana':6,\r\n 'apple':0,\r\n 'orange':32,\r\n 'pear':15\r\n}\r\n\r\nprices = {\r\n 'banana':4,\r\n 'apple':2,\r\n 'orange':1.5,\r\n 'pear':3\r\n}\r\n\r\ndef compute_bill(food):\r\n total = 0\r\n for ele in food:\r\n if ele in stock:\r\n if stock[ele] > 0:\r\n total += (stock[ele] * prices[ele])\r\n stock[ele] -= 1\r\n print(ele,\"added to bill!\")\r\n print('Total Bill:',total)\r\n else:\r\n print(ele,'no stock')\r\n else:\r\n print('noe')\r\n\r\n print('Total Bill:',total)\r\n\r\ncompute_bill(groceries)\r\n\r\n","sub_path":"Codes/Python/SEM 1/List_Dictionary_Exercise/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"570550600","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 25 16:18:25 2020\n\n@author: piphi\n\"\"\"\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport csv\nimport urllib\n\n\nclass Worldometer_Scraper:\n def __init__(self, output_path):\n self.path = output_path\n world_o_meter_url = \"https://www.worldometers.info/coronavirus/\"\n self.table_list = []\n req = urllib.request.Request(\n world_o_meter_url, headers={\"User-Agent\": \"Magic Browser\"}\n )\n world_o_meter_page = urllib.request.urlopen(req).read()\n self.world_o_meter_soup = BeautifulSoup(world_o_meter_page, \"html.parser\")\n\n def run_scraper(self):\n first_row = self.world_o_meter_soup.find(\"thead\")\n categories = [a.text for a in first_row.find_all(\"th\")]\n self.table_list.append(categories)\n\n table = self.world_o_meter_soup.find(\"tbody\")\n rows = table.find_all(\"tr\", {\"style\": \"\"})\n\n for row in rows:\n\n temp_list = [a.text for a in row.find_all(\"td\")]\n\n # clean a bit of the data\n for i in range(0, len(temp_list)):\n temp = temp_list[i]\n if temp:\n temp_list[i] = temp.rstrip()\n\n self.table_list.append(temp_list)\n\n self._to_csv()\n\n def _to_csv(self):\n with open(self.path, \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerows(self.table_list)\n print(\"completed!\")\n\n\nif __name__ == \"__main__\":\n scraper = Worldometer_Scraper(\"worldometer.csv\")\n scraper.run_scraper()\n","sub_path":"scrapers/WorldometerScraper.py","file_name":"WorldometerScraper.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"258694511","text":"\nimport logging\nimport matplotlib.pyplot as plt\nimport pymc3 as pm\nfrom sklearn.datasets import make_regression\nimport seaborn as sns\n\n# note: also see https://towardsdatascience.com/\n# markov-chain-monte-carlo-in-python-44f7e609be98\n\n\nclass Pymc3RegressionExample(object):\n def __init__(self):\n logging.basicConfig(level=logging.INFO)\n self.logger = logging.getLogger(__name__)\n self.logger.info('initialising a {} instance'\n .format(self.__class__.__name__))\n\n # helpers for properties\n self._dataset = None\n\n @property\n def dataset(self):\n \"\"\"Random data set for a regression\"\"\"\n if self._dataset is None:\n X, y = make_regression(n_samples=1000, n_features=2, noise=10.)\n self._dataset = {'X': X, 'y': y}\n\n return self._dataset\n\n def run_mcmc(self, spec_method='flexible'):\n with pm.Model() as mdl:\n if spec_method == 'flexible':\n # specify priors\n self.logger.info('specifying priors')\n intercept = pm.Normal('intercept', mu=0., sd=1000.)\n x1_coef = pm.Normal('x1_coef', mu=0., sd=1000.)\n x2_coef = pm.Normal('x2_coef', mu=0., sd=1000.)\n # residual_std = pm.HalfCauchy('sigma', beta=10, testval=1.)\n residual_std = pm.Gamma('residual_std', mu=1., sd=1000.,\n testval=1.)\n\n # specify likelihood\n self.logger.info('specifying likelihood')\n mu = (intercept +\n x1_coef * self.dataset['X'][:, 0] +\n x2_coef * self.dataset['X'][:, 1])\n likelihood = pm.Normal(\n 'y', mu=mu, sd=residual_std, observed=self.dataset['y'])\n\n elif spec_method == 'patsy_glm':\n data_dict = {\n 'y': self.dataset['y'],\n 'x1': self.dataset['X'][:, 0],\n 'x2': self.dataset['X'][:, 1],\n }\n\n self.logger.info('specifying model using patsy glm method')\n pm.glm.GLM.from_formula('y ~ x1 + x2', data_dict)\n\n else:\n raise ValueError('unrecognised spec_method {}'\n .format(spec_method))\n\n # run mcmc (using automatically chosen sampler, e.g. NUTS sampling)\n self.logger.info('running mcmc')\n trace = pm.sample(6000, njobs=1, tune=1000)\n # note: 'tune' argument handles the burn-in\n\n # show results (with no thinning)\n n_burnin_samples = 0 # burn-in handled above\n msg = ('summary of marginal posteriors (no thinning):\\n{}'\n .format(pm.summary(trace, start=n_burnin_samples).round(2)))\n self.logger.info(msg)\n pm.traceplot(trace, skip_first=n_burnin_samples)\n plt.show()\n\n self._show_custom_plots(\n trace=trace,\n params=['intercept', 'x1_coef', 'x2_coef', 'residual_std'],\n burnin=n_burnin_samples)\n\n @staticmethod\n def _show_custom_plots(trace, params, burnin=0, thinning=5):\n trace_dict = {\n param: trace.get_values(param, burn=burnin, thin=thinning)\n for param in params}\n\n # plot traces of Markov chains\n for param in params:\n plt.plot(trace_dict[param], label=param)\n plt.xlabel('Markov chain iteration')\n plt.ylabel('param value')\n\n plt.legend()\n plt.show()\n\n # plot marginal posterior probability densities for parameters\n assert (len(params) == 4)\n for i, param in enumerate(params):\n plt.subplot(2, 2, i + 1)\n sns.distplot(trace_dict[param], label=param)\n plt.legend()\n\n plt.show()\n\n\nif __name__ == '__main__':\n reg = Pymc3RegressionExample()\n reg.run_mcmc(spec_method='flexible')\n","sub_path":"pymc3_examples.py","file_name":"pymc3_examples.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"384266276","text":"import os\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom subprocess import run\nfrom pymatgen.io.lammps.outputs import parse_lammps_dumps\n\n'''\nThis script converts a single dump file (with one or more frames) into dump files (with a single frame)\nand xyz files (with a single frame)\n'''\n\ncwd = os.getcwd()\ndump_file_path_pattern = os.path.join(cwd,'dump.*.dump')\n\nDump_files = glob.glob(dump_file_path_pattern)\n\nif len(Dump_files) == 1:\n run(['mkdir','-p','trj_files/rdf_files'])\n run(['mkdir','xyz_files'])\n Dump_file = Dump_files[0]\n wd, Dump_file_name = os.path.split(Dump_file)\n Dumps = parse_lammps_dumps(Dump_file)\n\n for Dump in Dumps:\n trj_name = Dump_file_name[:-4] + str(Dump.timestep) + '.lammpstrj'\n xyz_name = Dump_file_name[:-4] + 'alt.' + str(Dump.timestep) + '.xyz'\n Dump.as_txt_file(trj_name,output=True)\n Dump.as_txt_file(xyz_name,convert='xyz',output=True)\n if Dump.timestep % 500000 == 0:\n run(['cp',trj_name,'trj_files/rdf_files'])\n run(['mv',trj_name,'trj_files'])\n run(['mv',xyz_name,'xyz_files'])\n","sub_path":"pymatgen/io/lammps/scripts/Dump_to_dumps_and_xyz.py","file_name":"Dump_to_dumps_and_xyz.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"250414275","text":"import DSAsorts, csv, timeit\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnames_list = []\n\nmax_n = 2000\n\nwith open('RandomNames.csv', 'rt') as file:\n reader = csv.reader(file)\n for row in reader:\n names_list.append(row[0])\n\nbtimes = {}\nfor i in range(1, max_n, 50):\n sorting_list = names_list.copy()\n print(\"Bubble Sorting \" + str(i) + \" items...\")\n start_time = timeit.default_timer()\n DSAsorts.bubble_sort(sorting_list[0:i])\n end_time = timeit.default_timer()\n total_time = (end_time - start_time)\n btimes[i] = total_time\n print(total_time)\n\nprint(btimes)\n\nstimes = {}\nfor i in range(1, max_n, 50):\n sorting_list = names_list.copy()\n print(\"Selection Sorting \" + str(i) + \" items...\")\n start_time = timeit.default_timer()\n DSAsorts.selection_sort(sorting_list[0:i])\n end_time = timeit.default_timer()\n total_time = (end_time - start_time)\n stimes[i] = total_time\n print(total_time)\n\nprint(stimes)\n\nitimes = {}\nfor i in range(1, max_n, 50):\n sorting_list = names_list.copy()\n print(\"Insertion Sorting \" + str(i) + \" items...\")\n start_time = timeit.default_timer()\n DSAsorts.insertion_sort(sorting_list[0:i])\n end_time = timeit.default_timer()\n total_time = (end_time - start_time)\n itimes[i] = total_time\n print(total_time)\n\nprint(itimes)\n\n\nx = np.arange(len(itimes))\nwidth = 0.35\n\nfig, ax = plt.subplots()\nirect = ax.bar(x - width, list(itimes.values()), width, alpha=0.7,\n label=\"Insertion\")\nbrect = ax.bar(x, list(btimes.values()), width, alpha=0.7,\n label=\"Bubble\")\nsrect = ax.bar(x + width, list(stimes.values()), width, alpha=0.7,\n label=\"Selection\")\n\nax.set_ylabel('Time (s)')\nax.set_title('Sort Time Complexity Growth')\n\n\nax.legend()\n\nfig.tight_layout()\nplt.xticks(range(len(itimes)), list(itimes.keys()), rotation=70)\nplt.xlabel(\"n\")\nplt.savefig(\"bsi.png\")\nplt.show()\n","sub_path":"Prac1/SortFilesPython/bsi_test.py","file_name":"bsi_test.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"594764756","text":"class UrPiece:\n def __init__(self, color, symbol):\n self.color = color\n self.position = None\n self.complete = False\n self.symbol = symbol\n\n def can_move(self, num_moves):\n pass\n\n\nclass BoardSquare:\n def __init__(self, x, y, entrance=False, _exit=False, rosette=False, forbidden=False):\n self.piece = None\n self.position = (x, y)\n self.next_white = None\n self.next_black = None\n self.exit = _exit\n self.entrance = entrance\n self.rosette = rosette\n self.forbidden = forbidden\n\n def load_from_json(self, json_string):\n import json\n loaded_position = json.loads(json_string)\n self.piece = None\n self.position = loaded_position['position']\n self.next_white = loaded_position['next_white']\n self.next_black = loaded_position['next_black']\n self.exit = loaded_position['exit']\n self.entrance = loaded_position['entrance']\n self.rosette = loaded_position['rosette']\n self.forbidden = loaded_position['forbidden']\n\n def jsonify(self):\n next_white = self.next_white.position if self.next_white else None\n next_black = self.next_black.position if self.next_black else None\n return {'position': self.position, 'next_white': next_white, 'next_black': next_black, 'exit': self.exit, 'entrance': self.entrance, 'rosette': self.rosette, 'forbidden': self.forbidden}\n \n \n","sub_path":"board_square.py","file_name":"board_square.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"185650972","text":"#Aaron Davis\ndef greeter():\n \"\"\"This function takes input about a person's name and the time of day,\n and returns a greeting based on that input, it also counts the number of greetings\"\"\"\n import random as rand\n time = rand.randint(1,24)\n print('The current hour is', time)\n #this determines what the output will be, based on variable inputs\n if 5 <= time <= 10:\n return'Have a good breakfast,'+' '+first+' '+initial\n elif 11 <= time <=15:\n return'Have a good lunch,'+' '+first+' '+initial\n elif 16 <= time <= 20:\n return'Have a good dinner,'+' '+first+' '+initial\n else:\n return'Have a good one,'+' '+first+' '+initial\n \n \n\ncount = 1 #this gives a base number for the number of greetings\nwhile True: #this while loop lets the code ask for multiple greetings\n #main variables\n first = input('What is your first name?: ')\n last = input('What is your last name?: ')\n initial = last[:1]\n print(greeter())\n answer = input('would you like another greeting?: ')\n if answer == 'yes':\n count = count +1 #this will update the number of greetings that are recieved\n continue\n elif answer == 'no':\n break\n #this just states how many greetings the code has ran\nprint('You recieved '+ str(count)+ ' greetings')\n","sub_path":"ChatbotProject/chatbot-phase4-AaronDavis.py","file_name":"chatbot-phase4-AaronDavis.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"154617148","text":"charizardMoves = {\n 1: \"Scratch\",\n 2: \"Air Slash\",\n 3: \"Flare Blitz\",\n 4: \"Growl\",\n 5: \"Heat Wave\",\n 6: \"Ember\",\n 7: \"Shadow Claw\",\n 8: \"Smokescreen\",\n 9: \"Dragon Claw\",\n 10: \"Dragon Rage\",\n 11: \"Scary Face\",\n 12: \"Fire Fang\",\n 13: \"Flame Burst\",\n 14: \"Wing Attack\",\n 15: \"Slash\",\n 16: \"Flamethrower\",\n 17: \"Fire Spin\",\n 18: \"Inferno\",\n}\n\nHP = 360\nAtk = 293\nDef = 280\nSpAtk = 348\nSpDef = 295\nSpeed = 328\nLevel = 100\nType = \"Fire\"\nType2 = \"Flying\"\n\nfrom moves import *\ndefMoves = {\n \"Scratch\": [ScratchPower, ScratchPP, ScratchAccuracy, ScratchType, ScratchName],\n \"Air Slash\": [Air_SlashPower, Air_SlashPP, Air_SlashAccuracy, Air_SlashType, Air_SlashName],\n \"Flare Blitz\": [Flare_BlitzPower, Flare_BlitzPP, Flare_BlitzAccuracy, Flare_BlitzType, Flare_BlitzName],\n \"Growl\": [GrowlPower, GrowlPP, GrowlAccuracy, GrowlType, GrowlName],\n \"Heat Wave\": [Heat_WavePower, Heat_WavePP, Heat_WaveAccuracy, Heat_WaveType, Heat_WaveName],\n \"Ember\": [EmberPower, EmberPP, EmberAccuracy, EmberType, EmberName],\n \"Shadow Claw\": [Shadow_ClawPower, Shadow_ClawPP, Shadow_ClawAccuracy, Shadow_ClawType, Shadow_ClawName],\n \"Smokescreen\": [SmokescreenPower, SmokescreenPP, SmokescreenAccuracy, SmokesreenType, SmokescreenName],\n \"Dragon Claw\": [Dragon_ClawPower, Dragon_ClawPP, Dragon_ClawAccuracy, Dragon_ClawType, Dragon_ClawName],\n \"Dragon Rage\": [Dragon_RagePower, Dragon_RagePP, Dragon_RageAccuracy, Dragon_RageType, Dragon_RageName],\n \"Scary Face\": [Scary_FacePower, Scary_FacePP, Scary_FaceAccuracy, Scary_FaceType, Scary_FaceName],\n \"Fire Fang\": [Fire_FangPower, Fire_FangPP, Fire_FangAccuracy, Fire_FangType, Fire_FangName],\n \"Flame Burst\": [Flame_BurstPower, Flame_BurstPP, Flame_BurstAccuracy, Flame_BurstType, Flame_BurstName],\n \"Wing Attack\": [Wing_AttackPower, Wing_AttackPP, Wing_AttackAccuracy, Wing_AttackType, Wing_AttackName],\n \"Slash\": [SlashPower, SlashPP, SlashAccuracy, SlashType, SlashName],\n \"Flamethrower\": [FlamethrowerPower, FlamethrowerPP, FlamethrowerAccuracy, FlamethrowerType, FlamethrowerName],\n \"Fire Spin\": [Fire_SpinPower, Fire_SpinPP, Fire_SpinAccuracy, Fire_SpinType, Fire_SpinName],\n \"Inferno\": [InfernoPower, InfernoPP, InfernoAccuracy, InfernoType, InfernoName],\n}\n","sub_path":"charizard.py","file_name":"charizard.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"623342112","text":"#!/usr/bin/python\nimport sys\nimport fileinput\nimport re\nimport numpy as np\nimport pandas as pd\n\neth={}\nfor each_line_of_text in fileinput.input(\"/home/jupyter-aomidsal/data/Homo_sapiens.GRCh37.75.gtf\"):\n gene=re.findall(r'^.*?\\t.*?\\t(gene)\\t', each_line_of_text, re.I)\n gene_id=re.findall(r'gene_id \"(ENSG\\d*?)\"', each_line_of_text, re.I)\n gene_name=re.findall(r'gene_name \"(.*?)\"', each_line_of_text, re.I)\n if gene:\n if gene_id:\n if gene_name:\n eth[gene_id[0]] = gene_name[0]\n \nif sys.argv[1][:2] == '-f':\n columnnumber = sys.argv[1][2]\n data = sys.argv[2]\n column = int(columnnumber) - 1\nelse:\n column = 1\n data = sys.argv[1] \n\ndata = pd.read_csv(data)\ndata.iloc[:, column]=data.iloc[:, column].astype(str).str.replace(r'(\\.\\d*)', '')\ndata.iloc[:, column]=data.iloc[:, column].str.strip('\"')\ndata.iloc[:, column]=data.iloc[:, column].replace(eth)\nprint(data)\n","sub_path":"ensg2hugo.py","file_name":"ensg2hugo.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"639200765","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for Louvain\"\"\"\n\nimport unittest\n\nfrom scipy import sparse\n\nfrom sknetwork import is_numba_available\nfrom sknetwork.clustering import Louvain, BiLouvain, modularity\nfrom sknetwork.data import simple_directed_graph, karate_club, bow_tie, painters, star_wars_villains\nfrom sknetwork.utils.adjacency_formats import directed2undirected\n\n\n# noinspection PyMissingOrEmptyDocstring\nclass TestLouvainClustering(unittest.TestCase):\n\n def setUp(self):\n self.louvain = Louvain(engine='python')\n self.bilouvain = BiLouvain(engine='python')\n if is_numba_available:\n self.louvain_numba = Louvain(engine='numba')\n self.bilouvain_numba = BiLouvain(engine='numba')\n else:\n with self.assertRaises(ValueError):\n Louvain(engine='numba')\n\n def test_unknown_types(self):\n with self.assertRaises(TypeError):\n self.louvain.fit(sparse.identity(1))\n\n def test_single_node_graph(self):\n self.assertEqual(self.louvain.fit_transform(sparse.identity(1, format='csr')), [0])\n\n def test_simple_graph(self):\n self.simple_directed_graph = simple_directed_graph()\n self.louvain.fit(directed2undirected(self.simple_directed_graph))\n self.assertEqual(len(self.louvain.labels_), 10)\n\n def test_undirected(self):\n self.louvain_high_resolution = Louvain(engine='python', resolution=2)\n self.louvain_null_resolution = Louvain(engine='python', resolution=0)\n self.karate_club = karate_club()\n self.louvain.fit(self.karate_club)\n labels = self.louvain.labels_\n self.assertEqual(labels.shape, (34,))\n self.assertAlmostEqual(modularity(self.karate_club, labels), 0.42, 2)\n if is_numba_available:\n self.louvain_numba.fit(self.karate_club)\n labels = self.louvain_numba.labels_\n self.assertEqual(labels.shape, (34,))\n self.assertAlmostEqual(modularity(self.karate_club, labels), 0.42, 2)\n self.louvain_high_resolution.fit(self.karate_club)\n labels = self.louvain_high_resolution.labels_\n self.assertEqual(labels.shape, (34,))\n self.assertAlmostEqual(modularity(self.karate_club, labels), 0.34, 2)\n self.louvain_null_resolution.fit(self.karate_club)\n labels = self.louvain_null_resolution.labels_\n self.assertEqual(labels.shape, (34,))\n self.assertEqual(len(set(self.louvain_null_resolution.labels_)), 1)\n\n def test_directed(self):\n self.painters = painters(return_labels=False)\n\n self.louvain.fit(self.painters)\n labels = self.louvain.labels_\n self.assertEqual(labels.shape, (14,))\n self.assertAlmostEqual(modularity(self.painters, labels), 0.32, 2)\n\n self.bilouvain.fit(self.painters)\n n1, n2 = self.painters.shape\n row_labels = self.bilouvain.row_labels_\n col_labels = self.bilouvain.col_labels_\n self.assertEqual(row_labels.shape, (n1,))\n self.assertEqual(col_labels.shape, (n2,))\n\n def test_bipartite(self):\n star_wars_graph = star_wars_villains()\n self.bilouvain.fit(star_wars_graph)\n row_labels = self.bilouvain.row_labels_\n col_labels = self.bilouvain.col_labels_\n self.assertEqual(row_labels.shape, (4,))\n self.assertEqual(col_labels.shape, (3,))\n if is_numba_available:\n self.bilouvain_numba.fit(star_wars_graph)\n row_labels = self.bilouvain_numba.row_labels_\n col_labels = self.bilouvain_numba.col_labels_\n self.assertEqual(row_labels.shape, (4,))\n self.assertEqual(col_labels.shape, (3,))\n\n def test_shuffling(self):\n self.louvain_shuffle_first = Louvain(engine='python', shuffle_nodes=True, random_state=0)\n self.louvain_shuffle_second = Louvain(engine='python', shuffle_nodes=True, random_state=123)\n self.bow_tie = bow_tie()\n self.louvain_shuffle_first.fit(self.bow_tie)\n self.assertEqual(self.louvain_shuffle_first.labels_[1], 1)\n self.louvain_shuffle_second.fit(self.bow_tie)\n self.assertEqual(self.louvain_shuffle_second.labels_[1], 1)\n","sub_path":"sknetwork/clustering/tests/test_louvain.py","file_name":"test_louvain.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"52386944","text":"import os\nimport csv\nimport datetime as dt\nimport numpy as np\nfrom keras.utils import np_utils\nfrom keras.layers import Input, Dense, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D, Flatten, BatchNormalization, LSTM, \\\n Reshape, Dropout, TimeDistributed, Lambda\nfrom keras.models import Model\nimport tensorflow as tf\nimport sklearn.metrics as metrics\nimport pandas as pd\nfrom keras.layers.merge import _Merge\nfrom keras.utils import Sequence\nimport os\nimport keras\nimport random\n\nrandom.seed(0)\nnp.random.seed(1)\n\nactivity_list = ['01', '02', '03', '04', '05', '06', '07']\nid_list = range(len(activity_list))\nactivity_id_dict = dict(zip(activity_list, id_list))\n\n#path = '/Volumes/1708903/MEx/Data/pm_scaled/1.0min/'\n#path = '/home/mex/data/pm_1.0/'\nresults_file = '/Volumes/1708903/MEx/Data/p_pm_1.0.csv'\npm_mn_stream_folder = '/Volumes/1708903/MEx/Data/p_pm_mn_stream_500_5_7_720_5_2_1'\n\nheight = 32\nwidth = 16\nframe_size = height*width\nsamples_per_class = 5\nclasses_per_set = len(activity_list)\nembedding_length = 100\n\nnumber_of_users = 5\ntrain_size = 500 * number_of_users\nframes_per_second = 1\nwindow = 5\nincrement = 2\n\npm_min_length = 14*window\npm_max_length = 15*window\ndc_min_length = 10*window\ndc_max_length = 15*window\nac_min_length = 95*window\nac_max_length = 100*window\n\n\nclass MNGenerator(Sequence):\n\n def __init__(self, image_filenames, labels, batch_size, is_test):\n self.image_filenames, self.labels = image_filenames, labels\n self.batch_size = batch_size\n self.is_test = is_test\n\n def __len__(self):\n return np.ceil(len(self.image_filenames) / float(self.batch_size))\n\n def read_mn(self, _file):\n reader = csv.reader(open(_file, \"r\"), delimiter=\",\")\n _slice_x = []\n _slice_y = []\n for row in reader:\n if len(row) == ((samples_per_class*classes_per_set)+1) * height * width*window * 1:\n _slice_x = [float(f) for f in row]\n if len(row) == samples_per_class*classes_per_set:\n _slice_y = [float(f) for f in row]\n _slice_x = np.array(_slice_x)\n _slice_y = np.array(_slice_y)\n _slice_y = keras.utils.to_categorical(_slice_y, classes_per_set)\n _slice_x = np.reshape(_slice_x, (((samples_per_class*classes_per_set)+1), height, width*window, 1))\n return [_slice_x, _slice_y]\n\n def __getitem__(self, idx):\n batch_x = self.image_filenames[idx * self.batch_size:(idx + 1) * self.batch_size]\n batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]\n\n batch_y = keras.utils.to_categorical(batch_y, classes_per_set)\n\n slices = [self.read_mn(file_name) for file_name in batch_x]\n slice_x = np.array([xx[0] for xx in slices])\n slice_y = np.array([xx[1] for xx in slices])\n if self.is_test:\n print(slice_x.shape)\n print(slice_y.shape)\n print(batch_y.shape)\n return [slice_x, slice_y]\n return [slice_x, slice_y], batch_y\n\n\ndef get_batch_data(_fold, mode):\n folder = os.path.join(pm_mn_stream_folder, _fold+'/'+mode+'/')\n files = os.listdir(folder)\n train_file_names = [os.path.join(folder, f) for f in files if f != 'target_y.csv']\n\n reader = csv.reader(open(os.path.join(folder, 'target_y.csv'), \"r\"), delimiter=\",\")\n labels = []\n for row in reader:\n labels.append(float(row[0]))\n return train_file_names, labels\n\n\nclass CosineSimilarity(_Merge):\n def __init__(self, nway=5, n_samp=1, **kwargs):\n super(CosineSimilarity, self).__init__(**kwargs)\n self.eps = 1e-10\n self.nway = nway\n self.n_samp = n_samp\n\n def build(self, input_shape):\n if not isinstance(input_shape, list) or len(input_shape) != self.nway * self.n_samp + 2:\n raise ValueError(\n 'A ModelCosine layer should be called on a list of inputs of length %d' % (self.nway * self.n_samp + 2))\n\n def call(self, inputs):\n self.nway = (len(inputs) - 2) / self.n_samp\n similarities = []\n\n targetembedding = inputs[-2]\n numsupportset = len(inputs) - 2\n for ii in range(numsupportset):\n supportembedding = inputs[ii]\n\n sum_support = tf.reduce_sum(tf.square(supportembedding), 1, keep_dims=True)\n supportmagnitude = tf.rsqrt(tf.clip_by_value(sum_support, self.eps, float(\"inf\")))\n\n sum_query = tf.reduce_sum(tf.square(targetembedding), 1, keep_dims=True)\n querymagnitude = tf.rsqrt(tf.clip_by_value(sum_query, self.eps, float(\"inf\")))\n\n dot_product = tf.matmul(tf.expand_dims(targetembedding, 1), tf.expand_dims(supportembedding, 2))\n dot_product = tf.squeeze(dot_product, [1])\n\n cosine_similarity = dot_product * supportmagnitude * querymagnitude\n similarities.append(cosine_similarity)\n\n similarities = tf.concat(axis=1, values=similarities)\n softmax_similarities = tf.nn.softmax(similarities)\n preds = tf.squeeze(tf.matmul(tf.expand_dims(softmax_similarities, 1), inputs[-1]))\n\n preds.set_shape((inputs[0].shape[0], self.nway))\n return preds\n\n def compute_output_shape(self, input_shape):\n input_shapes = input_shape\n return input_shapes[0][0], self.nway\n\n\ndef write_data(file_path, data):\n if os.path.isfile(file_path):\n f = open(file_path, 'a')\n f.write(data + '\\n')\n else:\n f = open(file_path, 'w')\n f.write(data + '\\n')\n f.close()\n\n\ndef embedding_2D(x):\n x = Conv2D(32, kernel_size=(1,5), activation='relu')(x)\n x = MaxPooling2D(pool_size=2, strides=1, data_format='channels_last')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, kernel_size=(1,5), activation='relu')(x)\n x = MaxPooling2D(pool_size=2, strides=1, data_format='channels_last')(x)\n x = BatchNormalization()(x)\n x = Flatten()(x)\n x = Dense(embedding_length, activation='relu')(x)\n x = BatchNormalization()(x)\n return x\n\n\ndef run_model_mn(fold):\n numsupportset = samples_per_class * classes_per_set\n model_input = Input((numsupportset + 1, height, width * window * frames_per_second, 1))\n model_inputs = []\n for lidx in range(numsupportset):\n model_inputs.append(embedding_2D(Lambda(lambda x: x[:, lidx, :, :, :])(model_input)))\n targetembedding = embedding_2D(Lambda(lambda x: x[:, -1, :, :, :])(model_input))\n model_inputs.append(targetembedding)\n support_labels = Input((numsupportset, classes_per_set))\n model_inputs.append(support_labels)\n\n knn_similarity = CosineSimilarity(nway=classes_per_set, n_samp=samples_per_class)(model_inputs)\n\n model = Model(inputs=[model_input, support_labels], outputs=knn_similarity)\n model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])\n train_file_names, train_labels = get_batch_data(str(fold), 'train')\n train_gen = MNGenerator(train_file_names, train_labels, 16, False)\n model.fit_generator(train_gen, epochs=15, verbose=1)\n\n test_file_names, test_labels = get_batch_data(str(fold), 'test')\n test_gen = MNGenerator(test_file_names, test_labels, len(test_labels)/5, True)\n _predict_labels = model.evaluate_generator(test_gen, steps=5)\n #score = model.evaluate_generator(test_gen)\n #print(score)\n #write_data(results_file, ','.join(score))\n write_data(results_file, 'label lengths:'+str(len(test_labels))+','+str(len(_predict_labels)))\n _test_labels = keras.utils.to_categorical(test_labels, classes_per_set)\n f_score = metrics.f1_score(_test_labels.argmax(axis=1), _predict_labels.argmax(axis=1), average='macro')\n accuracy = metrics.accuracy_score(_test_labels.argmax(axis=1), _predict_labels.argmax(axis=1))\n results = 'pm,' + str(accuracy)+',' + str(f_score)\n print(results)\n write_data(results_file, 'results:'+results)\n _test_labels = pd.Series(_test_labels.argmax(axis=1), name='Actual')\n _predict_labels = pd.Series(_predict_labels.argmax(axis=1), name='Predicted')\n df_confusion = pd.crosstab(_test_labels, _predict_labels)\n print(df_confusion)\n write_data(results_file, 'confusion matrix:'+str(df_confusion))\n\n\ndef run():\n for i in range(6):\n tf.set_random_seed(2)\n run_model_mn(i)\n\n\nrun()\n","sub_path":"v1/p/1m/pm_mn.py","file_name":"pm_mn.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"21951697","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 16 14:27:19 2019\r\n\r\n@author: arena\r\n\"\"\"\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\"\"\"\r\n這題跟第二題有些類似,只是改成要merge\r\n一樣分為三種情況:\r\n1.L1不存在\r\n 輸出l2\r\n2.L2不存在\r\n 輸出l1\r\n3.l1 l2兩個都存在\r\n 把l1跟l2當前節點的值比大小,小的就放入l3中,直到最後,一樣需要考慮l1或l2較長的問題\r\nRuntime: 40 ms, faster than 83.96% of Python3 online submissions for Merge Two Sorted Lists.\r\nMemory Usage: 13.3 MB, less than 12.87% of Python3 online submissions for Merge Two Sorted Lists.\r\n\"\"\"\r\nclass Solution:\r\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\r\n # chek l1 l2\r\n while l1 == None:\r\n return l2\r\n while l2 == None:\r\n return l1\r\n ans = l3 = ListNode(0)\r\n # normal case\r\n while l1 and l2:\r\n if l1.val > l2.val:\r\n l3.next = ListNode(l2.val)\r\n l3 = l3.next\r\n l2 = l2.next\r\n else:\r\n l3.next = ListNode(l1.val)\r\n l3 = l3.next\r\n l1 = l1.next\r\n # l1 longer\r\n while l1:\r\n l3.next = ListNode(l1.val)\r\n l3 = l3.next\r\n l1 = l1.next\r\n # l2 longer\r\n while l2:\r\n l3.next = ListNode(l2.val)\r\n l3 = l3.next\r\n l2 = l2.next\r\n return ans.next","sub_path":"LeetCode_21.py","file_name":"LeetCode_21.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"183495834","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom backstage.view import home_views as views\n\nurlpatterns = [\n url(r'^test/$', views.test, name='home_test'),\n url(r'^count_hosts/$', views.count_hosts, name='count_hosts'),\n url(r'^count_idcs/$', views.count_idcs, name='count_idcs'),\n url(r'^count_colony/$', views.count_colony, name='count_colony'),\n url(r'^count_virtual/$', views.count_virtual, name='count_virtual'),\n url(r'^count_images/$', views.count_images, name='count_images'),\n]\n","sub_path":"python/vir_manager/backstage/url/home_urls.py","file_name":"home_urls.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"589460178","text":"import numpy as np\nfrom sklearn import preprocessing, model_selection, discriminant_analysis, metrics\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.utils import to_categorical\nfrom keras import regularizers\nfrom extract_features import *\n\n\nif __name__ == '__main__':\n\n #Load Data\n train_data = np.load('X_train_kaggle.npy')\n all_id_classes = np.genfromtxt('y_train_final_kaggle.csv', delimiter=',', dtype='str')\n groups_csv = np.genfromtxt('groups.csv', delimiter=',', dtype='str')\n\n\n le = preprocessing.LabelEncoder()\n le.fit(all_id_classes[:, 1])\n all_id_classes_transformed = le.transform(all_id_classes[:, 1])\n classes_array = np.array(all_id_classes_transformed)\n\n #Transform labels to n x 9 vectors\n target_classes = to_categorical(classes_array)\n\n\n # Split the groups to training and validation data.\n gss = model_selection.GroupShuffleSplit(n_splits=1, test_size=0.2)\n data_split = gss.split(groups_csv[:, 0], groups_csv[:, 2], groups_csv[:, 1])\n\n\n #Feature Data\n ravel_data = np.array(extract_ravel(train_data))\n mean_data = np.array(extract_mean(train_data))\n var_mean_data = np.array(extract_var_mean(train_data))\n chanel_var_mean = np.array(extract_chanel_var_mean(train_data))\n\n\n #Reshape mean data from (1703, 10) to (1703, 10, 1)\n mean_data = mean_data.reshape([int(len(mean_data)),10,1])\n var_mean_data = var_mean_data.reshape(int(len(var_mean_data)),2,1)\n\n weight_l1 = 0.001\n\n #LSTM Structure for raw training data\n #input Shape\n model = Sequential()\n model.add(LSTM(64, return_sequences= True, input_shape=train_data.shape[1:],activity_regularizer= regularizers.l1(weight_l1)))\n model.add(LSTM(64, activity_regularizer= regularizers.l1(weight_l1)))\n model.add(Dense(target_classes.shape[1], activation='softmax', activity_regularizer= regularizers.l1(weight_l1)))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n #LSTM Structure for mean data\n model2 = Sequential()\n model2.add(LSTM(64, return_sequences= True, input_shape=mean_data.shape[1:], activity_regularizer= regularizers.l1(weight_l1)))\n model2.add(LSTM(64, activity_regularizer= regularizers.l1(weight_l1)))\n model2.add(Dense(target_classes.shape[1], activation='softmax', activity_regularizer= regularizers.l1(weight_l1)))\n model2.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n #LSTM Structure for mean and var data\n model3 = Sequential()\n model3.add(LSTM(64, return_sequences= True, input_shape=var_mean_data.shape[1:], activity_regularizer= regularizers.l1(weight_l1)))\n model3.add(LSTM(64, activity_regularizer= regularizers.l1(weight_l1)))\n model3.add(Dense(target_classes.shape[1], activation='softmax', activity_regularizer= regularizers.l1(weight_l1)))\n model3.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n\n #LSTM Structure for chanel mean and var data\n\n model4 = Sequential()\n model4.add(LSTM(64, return_sequences= True, input_shape=chanel_var_mean.shape[1:], activity_regularizer= regularizers.l1(weight_l1)))\n model4.add(LSTM(64, activity_regularizer= regularizers.l1(weight_l1)))\n model4.add(Dense(target_classes.shape[1], activation='softmax', activity_regularizer= regularizers.l1(weight_l1)))\n model4.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n\n round = 0\n scores = []\n scores2 = []\n scores3 = []\n scores4 = []\n\n epochs = 20\n for train, test in data_split:\n\n #RAW DATA\n\n #Get raw training data and labels with correct indices\n F_train = train_data[train]\n y_train = target_classes[train]\n\n #Split test into test and validation data\n test_input, validation_input, test_target, validation_target = train_test_split(train_data[test],\n target_classes[test],\n test_size=0.33)\n\n model.fit(F_train,y_train, validation_data=(validation_input,validation_target), epochs=epochs)\n\n score = model.evaluate(test_input,test_target)\n scores.append(score[1])\n\n\n #MEAN DATA\n\n F_train = mean_data[train]\n y_train = target_classes[train]\n\n test_input, validation_input, test_target, validation_target = train_test_split(mean_data[test],\n target_classes[test],\n test_size=0.33)\n\n model2.fit(F_train, y_train, validation_data=(validation_input, validation_target), epochs=epochs)\n\n score2 = model2.evaluate(test_input, test_target)\n scores2.append(score2[1])\n\n\n #VAR AND MEAN DATA\n\n F_train = var_mean_data[train]\n y_train = target_classes[train]\n\n test_input, validation_input, test_target, validation_target = train_test_split(var_mean_data[test],\n target_classes[test],\n test_size=0.33)\n\n model3.fit(F_train, y_train, validation_data=(validation_input, validation_target), epochs=epochs)\n\n score3 = model3.evaluate(test_input, test_target)\n scores3.append(score3[1])\n\n\n #CHANEL VAR AND MEAN DATA\n\n F_train = chanel_var_mean[train]\n y_train = target_classes[train]\n\n test_input, validation_input, test_target, validation_target = train_test_split(chanel_var_mean[test],\n target_classes[test],\n test_size=0.33)\n\n model4.fit(F_train, y_train, validation_data=(validation_input, validation_target), epochs=epochs)\n\n score4 = model4.evaluate(test_input, test_target)\n scores4.append(score4[1])\n\n print(scores)\n print(scores2)\n print(scores3)\n print(scores4)","sub_path":"task2b.py","file_name":"task2b.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"109670719","text":"import tkinter as tk\r\n\r\n# for map_tiles\r\nmap_tiles = [0, 0, 1, 1, 2, 2, 3, 3,\r\n 2, 0, 1, 1, 1, 3, 3, 3,\r\n 2, 0, 0, 0, 0, 1, 3, 3,\r\n 2, 2, 2, 0, 1, 1, 1, 1,\r\n 2, 2, 2, 0, 1, 1, 1, 3,\r\n 2, 2, 0, 0, 0, 1, 1, 3,\r\n 2, 0, 0, 1, 0, 0, 1, 3,\r\n 1, 1, 1, 1, 1, 1, 1, 1]\r\n\r\ntk_images = []\r\nrow = 8\r\ncolumn = 8\r\n\r\n# for drawing\r\nstart_x = 64\r\nstart_y = 64\r\nspan_x = 64\r\nspan_y = 64\r\n\r\n\r\n# for move\r\nimage_ids = []\r\nspeed_x = 8\r\nspeed_y = 8\r\n\r\n#########################################\r\n\r\n\r\ndef key_press_handler(event):\r\n #print('ok')\r\n if event.keysym == 'Right':\r\n move_map(-speed_x, 0)\r\n elif event.keysym == 'Down':\r\n move_map(0, -speed_y)\r\n elif event.keysym == 'Left':\r\n move_map(speed_x, 0)\r\n elif event.keysym == 'Up':\r\n move_map(0, speed_y)\r\n\r\n\r\ndef move_map(amount_x, amount_y):\r\n for iid in image_ids:\r\n canvas_show_image.move(iid, amount_x, amount_y)\r\n\r\n\r\n#########################################\r\nroot = tk.Tk()\r\nroot.title('Map 2D')\r\nroot.resizable(False, False)\r\n\r\nframe_left = tk.Frame(root, width=200, height=600)\r\nframe_right = tk.Frame(root, width=600, height=600, bg='white')\r\n#\r\nframe_left.grid_propagate(0)\r\nframe_left.grid(row=0, column=0)\r\nframe_right.grid_propagate(0)\r\nframe_right.grid(row=0, column=1)\r\n\r\n\r\ncanvas_show_image = tk.Canvas(frame_right, width=600, height=600, bg='#00E4E4', takefocus=1)\r\ncanvas_show_image.grid()\r\ncanvas_show_image.focus_set()\r\ncanvas_show_image.bind_all('', key_press_handler)\r\ncanvas_show_image.bind_all('', key_press_handler)\r\ncanvas_show_image.bind_all('', key_press_handler)\r\ncanvas_show_image.bind_all('', key_press_handler)\r\n\r\n\r\nfor i in range(0, row):\r\n for j in range(0, column):\r\n #file_name = 'maptile_' + str(map_tiles[i*column+j]) + '.gif'\r\n file_name = 'gif/maptile_{}.gif'.format(map_tiles[i*column+j])\r\n tk_images.append(tk.PhotoImage(file=file_name))\r\n pos_x = start_x + span_x * j\r\n pos_y = start_y + span_y * i\r\n image_id = canvas_show_image.create_image(pos_x, pos_y, image=tk_images[i*column+j])\r\n image_ids.append(image_id)\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"119/CGP-006-Map_2D.py","file_name":"CGP-006-Map_2D.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"134019612","text":"# 체스판 다시 칠하기\n\n'''\nW로 시작할때와 B로 시작할 때를 구분해야함.\n'''\n\nimport sys\nN, M = map(int, input().split())\nMap = [input() for i in range(N)]\n\n\ndef check(u, v):\n a = 0\n b = 0\n for i in range(u, u+8):\n for j in range(v, v+8):\n if (i+j) % 2 == 0:\n if Map[i][j] == 'B':\n a += 1\n else:\n b += 1\n else:\n if Map[i][j] == 'W':\n a += 1\n else:\n b += 1\n return min(a, b)\n\n\nmin_num = sys.maxsize\nfor i in range(N-7):\n for j in range(M-7):\n num = check(i, j)\n min_num = min(min_num, num)\n\nprint(min_num)\n","sub_path":"백준/Python/카테고리/완전탐색/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"239455090","text":"import os, cv2\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\n\r\ndef make_person_classes(dir_P): \r\n dir_C = dir_P + '_classes'\r\n if not os.path.isdir(dir_C):\r\n os.makedirs(dir_C)\r\n \r\n img_items = os.listdir(dir_P)\r\n img_items.sort()\r\n cur_person_name = 'None'\r\n cur_bundle = []\r\n loop = tqdm(img_items)\r\n for item in loop:\r\n person_name = item[:item.rfind('_')] #something like 'fashionMENDenimid0000056501_1front.jpg'\r\n img = cv2.imread(os.path.join(dir_P, item))\r\n loop.set_description(person_name)\r\n if person_name == cur_person_name:\r\n cur_bundle.append(img)\r\n else:\r\n if cur_person_name != 'None':\r\n npy_bundle = np.stack(cur_bundle, axis=0)\r\n np.save(os.path.join(dir_C, cur_person_name + '.npy'), npy_bundle)\r\n cur_person_name = person_name\r\n cur_bundle = [img]\r\n \r\n\r\nif __name__ == '__main__':\r\n make_person_classes('./fashion_data/train')\r\n make_person_classes('./fashion_data/test')","sub_path":"temp_script.py","file_name":"temp_script.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"250577725","text":"import numpy as np\nimport pandas as pd\nimport warnings, random\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import fbeta_score\n\n\nclass Vectorizer():\n def __init__(self):\n self.vectorizer = TfidfVectorizer(lowercase=False, max_features=2000, ngram_range=(1, 2))\n\n def transform(self, X):\n return self.vectorizer.transform(X).toarray()\n\n def fit(self, X):\n self.vectorizer.fit(X)\n\n def fit_transform(self, X):\n return self.vectorizer.fit_transform(X).toarray()\n\n\nclass CrowdSimulator:\n\n @staticmethod\n def crowdsource_items(item_ids, gt_items, predicate, crowd_acc, n, crowd_votes_counts):\n '''\n :param gt_items: list of ground truth values fo items to crowdsource\n :param crowd_acc: crowd accuracy range on predicate given\n :param n: n crowd votes per predicate\n :param predicate: predicate name for\n :return: aggregated crwodsourced label on items\n '''\n crodsourced_items = []\n for item_id, gt in zip(item_ids, gt_items):\n in_votes, out_votes = 0, 0\n for _ in range(n):\n worker_acc = random.uniform(crowd_acc[0], crowd_acc[1])\n worker_vote = np.random.binomial(1, worker_acc if gt == 1 else 1 - worker_acc)\n if worker_vote == 1:\n in_votes += 1\n else:\n out_votes += 1\n item_label = 1 if in_votes >= out_votes else 0\n crowd_votes_counts[item_id][predicate]['in'] += in_votes\n crowd_votes_counts[item_id][predicate]['out'] += out_votes\n crodsourced_items.append(item_label)\n return crodsourced_items\n\n @staticmethod\n def crowdsource_items_scope_mode(item_ids, gt_items, predicates, crowd_acc, n, crowd_votes_counts):\n '''\n :param gt_items: list of ground truth values fo items to crowdsource\n :param crowd_acc: crowd accuracy range on predicate given\n :param n: n crowd votes per predicate\n :param predicate: name of predicates\n :return: aggregated crwodsourced label on items\n '''\n crodsourced_items = []\n for item_ind, item_id in enumerate(item_ids):\n item_pred_val = {}\n for pr in predicates:\n gt = gt_items[pr][item_ind]\n in_votes, out_votes = 0, 0\n for _ in range(n):\n worker_acc = random.uniform(crowd_acc[pr][0], crowd_acc[pr][1])\n worker_vote = np.random.binomial(1, worker_acc if gt == 1 else 1 - worker_acc)\n if worker_vote == 1:\n in_votes += 1\n else:\n out_votes += 1\n pred_label = 1 if in_votes >= out_votes else 0\n item_pred_val[pr] = pred_label\n crowd_votes_counts[item_id][pr]['in'] += in_votes\n crowd_votes_counts[item_id][pr]['out'] += out_votes\n item_label = 0 if 0 in item_pred_val.values() else 1\n crodsourced_items.append(item_label)\n return crodsourced_items\n\n\n# screening metrics, aimed to obtain high recall\nclass MetricsMixin:\n\n @staticmethod\n def compute_screening_metrics(gt, predicted, lr, beta):\n '''\n FP == False Inclusion\n FN == False Exclusion\n '''\n item_ids = gt.keys()\n fp = 0.\n fn = 0.\n tp = 0.\n tn = 0.\n for item_id in item_ids:\n gt_val, pred_val = gt[item_id], predicted[item_id]\n if gt_val and not pred_val:\n fn += 1\n if not gt_val and pred_val:\n fp += 1\n if gt_val and pred_val:\n tp += 1\n if not gt_val and not pred_val:\n tn += 1\n loss = (fn * lr + fp) / len(gt)\n try:\n recall = tp / (tp + fn)\n precision = tp / (tp + fp)\n beta = beta\n fbeta = (beta ** 2 + 1) * precision * recall / (recall + beta ** 2 * precision)\n except ZeroDivisionError:\n warnings.warn('ZeroDivisionError -> recall, precision, fbeta = 0., 0., 0')\n recall, precision, fbeta = 0., 0., 0\n\n return precision, recall, fbeta, loss, fn, fp\n\n\ndef load_data(file_name, predicates, path_to_project):\n path_dict = {\n '100000_reviews_lemmatized_old.csv': path_to_project + 'data/amazon-sentiment-dataset/',\n '5000_reviews_lemmatized.csv': path_to_project + 'data/amazon-sentiment-dataset/',\n 'ohsumed_C04_C12_1grams.csv': path_to_project + 'data/ohsumed_data/',\n 'ohsumed_C10_C23_1grams.csv': path_to_project + '/data/ohsumed_data/',\n 'ohsumed_C14_C23_1grams.csv': path_to_project + 'data/ohsumed_data/',\n 'loneliness-dataset-2018.csv': path_to_project + 'data/loneliness-dataset-2018/'\n }\n path = path_dict[file_name]\n data = pd.read_csv(path + file_name)\n X = data['tokens'].values\n y_screening = data['Y'].values\n y_predicate = {} # gt labels per predicate\n for pr in predicates:\n y_predicate[pr] = data[pr].values\n\n return X, y_screening, y_predicate\n\n\ndef get_init_training_data_idx(y_screening, y_predicate_train, init_train_size):\n # initial training data\n pos_idx_all = (y_screening == 1).nonzero()[0]\n # all predicates are negative\n neg_idx_all = (sum(list(y_predicate_train.values())) == 0).nonzero()[0]\n # randomly select initial balanced training dataset\n train_idx = np.concatenate([np.random.choice(pos_idx_all, init_train_size // 2, replace=False),\n np.random.choice(neg_idx_all, init_train_size // 2, replace=False)])\n\n return train_idx\n\n\n# random sampling strategy for modAL\ndef random_sampling(_, X, n_instances=1):\n query_idx = random.sample(range(X.shape[0]), n_instances)\n\n return query_idx, X[query_idx]\n\n\n# sampling takes into account conjunctive expression of predicates\ndef objective_aware_sampling(classifier, X, learners_, n_instances=1, **uncertainty_measure_kwargs):\n from modAL.uncertainty import classifier_uncertainty, multi_argmax\n uncertainty = classifier_uncertainty(classifier, X, **uncertainty_measure_kwargs)\n l_prob_in = np.ones(X.shape[0])\n if learners_:\n for l in learners_.values():\n l_prob_in *= l.learner.predict_proba(X)[:, 1]\n uncertainty_weighted = l_prob_in * uncertainty\n else:\n uncertainty_weighted = uncertainty\n\n query_idx = multi_argmax(uncertainty_weighted, n_instances=n_instances)\n\n return query_idx, X[query_idx]\n\n\n# sampling takes into account conjunctive expression of predicates\ndef mix_sampling(classifier, X, learners_, n_instances=1, **uncertainty_measure_kwargs):\n from modAL.uncertainty import classifier_uncertainty, multi_argmax\n epsilon = 0.5\n uncertainty = classifier_uncertainty(classifier, X, **uncertainty_measure_kwargs)\n\n if np.random.binomial(1, epsilon):\n query_idx = np.array(random.sample(range(0, X.shape[0]-1), n_instances))\n else:\n l_prob_in = np.ones(X.shape[0])\n if learners_:\n for l in learners_.values():\n l_prob_in *= l.learner.predict_proba(X)[:, 1]\n uncertainty_weighted = l_prob_in * uncertainty\n else:\n uncertainty_weighted = uncertainty\n\n query_idx = multi_argmax(uncertainty_weighted, n_instances=n_instances)\n\n return query_idx, X[query_idx]\n\n\n# Mixin for ScreeningActiveLearner if to use adaptive_policy for learning-exploitation\nclass ChoosePredicateMixin:\n\n def init_stat(self):\n # initialize statistic for predicates\n self.stat = {}\n for predicate in self.predicates:\n self.stat[predicate] = {\n 'num_items_queried': [],\n 'f_beta': [],\n }\n\n # compute and update performance statistic for predicate-based classifiers\n def update_stat(self):\n # do cross validation\n # estimate and save statistics for extrapolation\n window = 5\n for predicate in self.predicates:\n s = self.stat[predicate]\n assert (len(s['num_items_queried']) == len(s['f_beta'])), 'Stat attribute error'\n\n l = self.learners[predicate]\n X, y = l.learner.X_training, l.learner.y_training\n tpr_list, tnr_list, f_beta_list = [], [], []\n k = 5\n skf = StratifiedKFold(n_splits=k)\n for train_idx, val_idx in skf.split(np.empty(y.shape[0]), y):\n X_train, X_val = X[train_idx], X[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n clf = l.learner\n clf.fit(X_train, y_train)\n f_beta_list.append(fbeta_score(y_val, clf.predict(X_val), beta=self.beta, average='binary'))\n l.learner.fit(X, y)\n\n f_beta_mean = np.mean(f_beta_list)\n try:\n num_items_queried_prev = self.stat[predicate]['num_items_queried'][-1]\n except IndexError:\n num_items_queried_prev = 0\n\n if len(self.stat[predicate]['num_items_queried']) >= window - 1:\n f_beta_avg = (sum(self.stat[predicate]['f_beta'][-(window-1):]) + f_beta_mean) / window\n self.stat[predicate]['f_beta'].append(f_beta_avg)\n else:\n self.stat[predicate]['f_beta'].append(f_beta_mean)\n self.stat[predicate]['num_items_queried'].append((num_items_queried_prev + self.n_instances_query))\n\n def select_predicate_stop(self, param):\n predicates_to_train = []\n for predicate in self.predicates:\n if (self.stat[predicate]['f_beta'][-1] - self.stat[predicate]['f_beta'][-10]) >= 0.02:\n predicates_to_train.append(predicate)\n if not predicates_to_train:\n return None\n else:\n n = len(predicates_to_train)\n return predicates_to_train[param % n]\n","sub_path":"scopeAL_and_SMR/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"192065195","text":"#!/usr/bin/env python\n\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-i',\n '--csv',\n help=\"Provide your tab-delimited file(s).\",\n nargs=\"+\",\n required=True)\n parser.add_argument(\n '-o',\n '--outfile',\n help=\"Output file for merged csv filtered for freq <=0.1\",\n default=\"default\")\n args = parser.parse_args()\n tmpStr = \"\"\n if(args.outfile == \"default\"):\n csvBasenames = [i.split('/')[-1] for i in args.csv]\n for csvfname in csvBasenames:\n tmpStr += csvfname.split('.')[0] + '.'\n tmpStr += \".merged.10pctfilt.csv\"\n\n # move this to the top before the new loop and name it something like\n # outfile = open(output.csv, 'w')\n outfile = open(tmpStr, 'w')\n\n writeHeader = True\n # You could encase this whole thing in a loop that would take all\n # arguments (sys.argv[1], sys.argv[2], etc) each as a file to read in and\n # then write them all out to the same file like\n for path in args.csv:\n thiscsv = open(path, 'r')\n if(writeHeader is True):\n header = \"Filename\\t\" + thiscsv.readline()\n outfile.write(header)\n writeHeader = False\n else:\n thiscsv.readline()\n\n for line in thiscsv:\n if line.strip().split('\\t')[19] == '-':\n outfile.write(\n path +\n '\\t' +\n line) # add the file path as the first column\n elif float(line.strip().split('\\t')[19]) <= 0.1:\n outfile.write(path + '\\t' + line)\n\n # Have the outer loop end here\n\n thiscsv.close()\n outfile.close()\n return\n\nif(__name__ == \"__main__\"):\n main()\n","sub_path":"python/validations/mergeallfiles_lowMAF.py","file_name":"mergeallfiles_lowMAF.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"357829525","text":"#\n# Project: MXCuBE\n# https://github.com/mxcube.\n#\n# This file is part of MXCuBE software.\n#\n# MXCuBE is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# MXCuBE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with MXCuBE. If not, see .\n\nimport logging\nfrom AbstractAperture import AbstractAperture\n\n\n__credits__ = [\"MXCuBE colaboration\"]\n__license__ = \"LGPLv3\"\n\n\n\"\"\"\nxml example:\n\n\n [\"BEAM\", \"OFF\", \"PARK\"] \n [5, 10, 20, 30, 50, 100] \n \n\"\"\"\n\nDEFAULT_POSITION_LIST = (\"BEAM\", \"OFF\", \"PARK\")\nDEFAULT_DIAMETER_SIZE_LIST = (5, 10, 20, 30, 50, 100)\n\n\nclass ApertureMockup(AbstractAperture):\n def __init__(self, name):\n AbstractAperture.__init__(self, name)\n\n def init(self):\n try:\n self._diameter_size_list = eval(self.getProperty(\"diameter_size_list\"))\n except BaseException:\n self._diameter_size_list = DEFAULT_DIAMETER_SIZE_LIST\n logging.getLogger(\"HWR\").error(\n \"Aperture: no diameter size list defined, using default list\"\n )\n\n try:\n self._position_list = eval(self.getProperty(\"position_list\"))\n except BaseException:\n self._position_list = DEFAULT_POSITION_LIST\n logging.getLogger(\"HWR\").error(\n \"Aperture: no position list defined, using default list\"\n )\n\n self.set_position_index(0)\n self.set_diameter_index(0)\n\n def set_in(self):\n \"\"\"\n Sets aperture in the beam\n \"\"\"\n self.set_position(\"BEAM\")\n\n def set_out(self):\n \"\"\"\n Removes aperture from the beam\n \"\"\"\n self.set_position(\"OFF\")\n\n def is_out(self):\n \"\"\"\n Returns:\n bool: True if aperture is in the beam, otherwise returns false\n \"\"\"\n return self._current_position_name != \"BEAM\"\n","sub_path":"HardwareObjects/mockup/ApertureMockup.py","file_name":"ApertureMockup.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"384949967","text":"class Cartel:\n alto=0\n largo=0\n matriz=[]\n frases=[]\n def __init__(self,alto=0,largo=0):\n self.alto=alto\n self.largo=largo\n self.CrearCartel()\n \n def IntroducirFrases(self):\n for i in range (self.alto):\n print(\"Introduce frase \",i+1, \" de \",self.alto)\n a=input()\n if self.ComprobarFrase(a):\n self.frases.append(a)\n continue\n else:\n #deshacer for\n pass\n\n def PruebaAtributoYVariable(self,variable):\n print(self.alto)\n print(variable)\n \n def CrearCartel(self):\n for i in range(self.alto):\n n=[]\n for j in range(self.largo):\n n.append('')\n self.matriz.append(n)\n\n def ConvertirFraseArray(frase):\n return strip.frase\n\n def LlenarCartel(self):\n self.IntroducirFrases()\n for i in range(self.alto):\n longitud_frase=len(self.frases[i])\n for j in range(longitud_frase):\n self.matriz[i][j]=self.frases[i][j]\n\n def ComprobarFrase(self,frase):\n if (len(frase)>self.largo):\n return False\n else:\n return True\n\n def ImprimirCartel(self):\n for i in range(self.alto):\n print(self.matriz[i])","sub_path":"Python/Practicas/Clases/Cartel.py","file_name":"Cartel.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"342686186","text":"import numpy as np\nimport glob,os\n\nfrom Time_from_node_to_node.time_mat import time_weighted_matrix, th_num_mat, n_times_random_walk\nimport networkx as nx\n\nfrom time_delay_index.average_hcp_tdi import calc_avg_mat\n\nmain_fol = 'F:\\Hila\\TDI\\LeftOutAgeAnalysis'\nall_subj_fol = glob.glob(f'{main_fol}{os.sep}[0-9]*{os.sep}')\natlas = 'yeo7_100'\nmat_type = 'time_th3'\nn=500\nmax_steps = 100 # 100 or 300\nmax_path_weight = 1000 # 1000 or 3000\nfor subj_fol in all_subj_fol:\n mat2_save = rf'{subj_fol}cm{os.sep}{atlas}_{mat_type}_cm_ord.npy'\n if os.path.exists(mat2_save):\n continue\n subj = subj_fol.split(os.sep)[-2]\n print(subj)\n num_mat = np.load(f'{subj_fol}cm{os.sep}{atlas}_num_cm_ord.npy')\n add_mat = np.load(f'{subj_fol}cm{os.sep}{atlas}_add_cm_ord.npy')\n dist_mat = np.load(f'{subj_fol}cm{os.sep}{atlas}_dist_cm_ord.npy')\n time_mat = time_weighted_matrix(add_mat, dist_mat) #1.6 is the voxel dimensions\n\n num_mat = th_num_mat(num_mat, 3)\n time_mat[num_mat == 0] = 0\n\n graph_num = nx.from_numpy_matrix(num_mat)\n graph_weights = nx.from_numpy_matrix(time_mat)\n time_from_node_to_node = np.zeros(num_mat.shape)\n node_list = list(graph_num.nodes())\n for start_node in node_list:\n node_vec_mean = n_times_random_walk(graph_num, graph_weights, start_node, node_list, n=500, max_steps=300, max_path_weight=3000)\n time_from_node_to_node[start_node, :] = node_vec_mean\n np.save(mat2_save, time_from_node_to_node)\n\n\n#calc_avg_mat(all_subj_fol, mat_type, main_fol + os.sep + 'cm', 'median', atlas, '')","sub_path":"time_delay_index/tdi_thebase4ever.py","file_name":"tdi_thebase4ever.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"177651381","text":"import datetime\nimport unittest\nimport os\nimport random\nfrom appium import webdriver\nfrom attrs import timeout\nfrom db_helper import test_insert_data\nfrom appium.webdriver.connectiontype import ConnectionType\n\nclass KQIbase(unittest.TestCase):\n def setUp(self):\n self.time_to_wait = timeout\n self.attrs = self.setup_init()\n self.drivers = []\n for attr in self.attrs:\n driver = webdriver.Remote(\"http://localhost:\" + \"4725\" + \"/wd/hub\", attr)\n driver.implicitly_wait(self.time_to_wait['default'])\n self.drivers.append(driver)\n\n def tearDown(self):\n for driver in self.drivers:\n driver.quit()\n\n def insert(self, bussiness, data_type, data_value, remark=''):\n product = self.attrs[0][\"product\"]\n client = self.attrs[0][\"platformName\"]\n if self.drivers[0].network_connection == 4:\n network = '4G'\n else:\n network = 'wifi'\n data_dict = {\n \"product_name\": product,\n \"client\": client,\n \"bussiness\": bussiness,\n \"data_type\": data_type,\n \"data_value\": data_value,\n \"network\": network,\n \"remark\": remark,\n \"test_time\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n test_insert_data('139_2018', data_dict)\n print(data_dict)\n\n def setup_init(self):\n return []\n\n def file_exist(self, filename, filepath):\n try:\n f = os.popen('adb -s ' + self.attrs[0]['udid'] + ' shell ls ' + filepath)\n content = f.readlines()\n f.close()\n for i in content:\n if filename in i:\n return True\n return False\n except:\n return False\n\n def change_network(self, driver):\n temp = random.randint(0, 9)\n if 0 <= temp <= 3:\n driver.set_network_connection(ConnectionType.DATA_ONLY)\n else:\n driver.set_network_connection(ConnectionType.ALL_NETWORK_ON)\n # print(driver.network_connection)\n\n","sub_path":"kqibase.py","file_name":"kqibase.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"498594772","text":"import os\nfrom numpy import *\nimport operator\ndef img2vector(filename):\n returnVect = zeros((1,1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0,32*i+j] = int(lineStr[j])\n return returnVect\ndef classify0(inX, dataSet, labels, k):# 其中inX我们需要求得分类的features的list,dataSet为训练数据,labels为训练数据对应的标签。k为提取前k个最相似的值\n dataSetSize = dataSet.shape[0]#多少行\n diffMat = tile(inX, (dataSetSize,1)) - dataSet#计算inX与整个dataset的差值\n sqDiffMat = diffMat**2 #计算矩阵的平方\n sqDistances = sqDiffMat.sum(axis=1)#将同一行的值相加,保持行数不变\n distances = sqDistances**0.5#开方\n sortedDistIndicies = distances.argsort()#argsort()函数是将x中的元素从小到大排列,提取其对应的index(索引)组成一个新的ndarray\n classCount={}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]] #\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1# get() 函数返回指定键的值,如果值不在字典中返回默认值,第二个参数即为默认值。\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)#从大到小排序,itemgetter函数用于获取对象的哪些维的数据,参数为一些序号\n return sortedClassCount[0][0]\ndef handwritingClassTest():\n hwLabels = []\n trainingFileList = os.listdir('./digits/trainingDigits') #load the training set\n m = len(trainingFileList)\n trainingMat = zeros((m,1024))#生成训练样本个的vector组成的matrix\n for i in range(m): #循环获取图片对应的vector以及真正的数字\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0] #take off .txt\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i,:] = img2vector('./digits/trainingDigits/%s' % fileNameStr)\n testFileList = os.listdir('./digits/testDigits') #iterate through the test set\n errorCount = 0.0\n mTest = len(testFileList)#获取测试的数量\n for i in range(mTest):\n fileNameStr = testFileList[i]\n fileStr = fileNameStr.split('.')[0] #take off .txt\n classNumStr = int(fileStr.split('_')[0])\n vectorUnderTest = img2vector('./digits/testDigits/%s' % fileNameStr)\n classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)\n print(\"the classifier came back with: %d, the real answer is: %d\" % (classifierResult, classNumStr))\n if (classifierResult != classNumStr): errorCount += 1.0\n print(\"\\nthe total number of errors is: %d\" % errorCount)\n print(\"\\nthe total error rate is: %f\" % (errorCount/float(mTest)))\nhandwritingClassTest()\n","sub_path":"Machine Learning/源码/第二章/KNN2.py","file_name":"KNN2.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"607775867","text":"'''\n@Author: zhaoyang.liang\n@Github: https://github.com/LzyRapx\n@Date: 2020-01-19 22:05:58\n'''\nclass Solution:\n def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:\n s1 = sum(A)\n s2 = sum(B)\n B = set(B)\n for a in A:\n b = (s2 - s1 + 2 * a) // 2\n if b in B:\n return [a,b]\n ","sub_path":"LeetCode/Easy/888.py","file_name":"888.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"570253476","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom helpers import SqlQueries\n\n'''\nFact and Dimension Operators\n\nWith dimension and fact operators, you can utilize the provided SQL helper class to run data transformations. Most of the logic is within the SQL transformations and the operator is expected to take as input a SQL statement and target database on which to run the query against. You can also define a target table that will contain the results of the transformation.\n\nDimension loads are often done with the truncate-insert pattern where the target table is emptied before the load. Thus, you could also have a parameter that allows switching between insert modes when loading dimensions. Fact tables are usually so massive that they should only allow append type functionality.\n'''\n\nclass LoadFactOperator(BaseOperator):\n\n ui_color = '#F98866'\n\n @apply_defaults\n def __init__(self,\n table=\"\",\n redshift_conn_id=\"\",\n sql=\"\",\n *args, **kwargs):\n\n super(LoadFactOperator, self).__init__(*args, **kwargs)\n self.table = table\n self.redshift_conn_id = redshift_conn_id\n self.sql = sql\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n formatted_sql = getattr(SqlQueries,self.sql)\n \n self.log.info('Loading Fact table...')\n redshift.run(formatted_sql)\n self.log.info('\\nFact table loaded!\\n')\n","sub_path":"plugins/operators/load_fact.py","file_name":"load_fact.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"400183109","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\nimport re\nimport collections\n\ndef trans_info_to_dict(res):\n try:\n res_dict = collections.OrderedDict()\n list_info = str( res ).splitlines()\n for info_line in list_info:\n mathObj = re.search(':',info_line,re.S | re.M)\n if mathObj:\n line_list = info_line.split( ':' )\n res_dict[line_list[0].strip()] = line_list[1].strip()\n except Exception as e:\n raise Exception(\"*** trans_info_to_dict failed.***\",e)\n else:\n return res_dict","sub_path":"OltLib/HwOltLibrary/_CommApi.py","file_name":"_CommApi.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"542818043","text":"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Methods for running the Official Models with TensorRT.\n\nPlease note that all of these methods are in development, and subject to\nrapid change.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport imghdr\nimport json\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.saved_model.python.saved_model import reader\nimport cv2\n\nfrom official.resnet import imagenet_preprocessing # pylint: disable=g-bad-import-order\n\n_GPU_MEM_FRACTION = 0.50\n_WARMUP_NUM_LOOPS = 5\n_LOG_FILE = \"log.txt\"\n_LABELS_FILE = \"labellist.json\"\n_GRAPH_FILE = \"frozen_graph.pb\"\n\n\n################################################################################\n# Prep the image input to the graph.\n################################################################################\ndef preprocess_image(file_name, output_height=224, output_width=224,\n num_channels=3):\n \"\"\"Run standard ImageNet preprocessing on the passed image file.\n\n Args:\n file_name: string, path to file containing a JPEG image\n output_height: int, final height of image\n output_width: int, final width of image\n num_channels: int, depth of input image\n\n Returns:\n Float array representing processed image with shape\n [output_height, output_width, num_channels]\n\n Raises:\n ValueError: if image is not a JPEG.\n \"\"\"\n if imghdr.what(file_name) != \"jpeg\":\n raise ValueError(\"At this time, only JPEG images are supported. \"\n \"Please try another image.\")\n\n image_buffer = tf.read_file(file_name)\n normalized = imagenet_preprocessing.preprocess_image(\n image_buffer=image_buffer,\n bbox=None,\n output_height=output_height,\n output_width=output_width,\n num_channels=num_channels,\n is_training=False)\n\n with tf.Session(config=get_gpu_config()) as sess:\n result = sess.run([normalized])\n\n return result[0]\n\n\ndef batch_from_image(file_name, batch_size, output_height=224, output_width=224,\n num_channels=3):\n \"\"\"Produce a batch of data from the passed image file.\n\n Args:\n file_name: string, path to file containing a JPEG image\n batch_size: int, the size of the desired batch of data\n output_height: int, final height of data\n output_width: int, final width of data\n num_channels: int, depth of input data\n\n Returns:\n Float array representing copies of the image with shape\n [batch_size, output_height, output_width, num_channels]\n \"\"\"\n image_array = preprocess_image(\n file_name, output_height, output_width, num_channels)\n return np.expand_dims(image_array, axis=0)\n\n\n################################################################################\n# Utils for handling Frozen Graphs.\n################################################################################\n\ndef get_frozen_graph(graph_file):\n \"\"\"Read Frozen Graph file from disk.\"\"\"\n with tf.gfile.FastGFile(graph_file, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n return graph_def\n\n\n################################################################################\n# Run the graph in various precision modes.\n################################################################################\ndef get_gpu_config():\n \"\"\"Share GPU memory between image preprocessing and inference.\"\"\"\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=_GPU_MEM_FRACTION)\n return tf.ConfigProto(gpu_options=gpu_options)\n\n\ndef get_iterator(data):\n \"\"\"Wrap numpy data in a dataset.\"\"\"\n dataset = tf.data.Dataset.from_tensors(data).repeat()\n return dataset.make_one_shot_iterator()\n\n\ndef time_graph(graph_def, data, input_node, output_node):\n \"\"\"Run and time the inference graph.\n\n This function sets up the input and outputs for inference, warms up by\n running inference for _WARMUP_NUM_LOOPS, then times inference for num_loops\n loops.\n\n Args:\n graph_def: GraphDef, the graph to be timed.\n data: ndarray of shape [batch_size, height, width, depth], data to be\n predicted.\n input_node: string, the label of the input node where data will enter the\n graph.\n output_node: string, the names of the output node that will\n be returned during inference.\n num_loops: int, number of batches that should run through for timing.\n\n Returns:\n A tuple consisting of a list of num_loops inference times, and the\n predictions that were output for the batch.\n \"\"\"\n tf.logging.info(\"Starting execution\")\n\n tf.reset_default_graph()\n g = tf.Graph()\n\n with g.as_default():\n iterator = get_iterator(data)\n return_tensors = tf.import_graph_def(\n graph_def=graph_def,\n input_map={input_node: iterator.get_next()},\n return_elements=[output_node]\n )\n # Unwrap the returned output node. For now, we assume we only\n # want the tensor with index `:0`, which is the 0th element of the\n # `.outputs` list.\n output = return_tensors[0].outputs[0]\n\n with tf.Session(graph=g, config=get_gpu_config()) as sess:\n tf.logging.info(\"Starting Warmup cycle\")\n\n for _ in range(_WARMUP_NUM_LOOPS):\n sess.run([output])\n\n tf.logging.info(\"Starting timing.\")\n\n for _ in range(10):\n val = sess.run([output])\n\n return val[0]\n\n\ndef run_graph(graph_def, data, input_node, output_node):\n tf.reset_default_graph()\n g = tf.Graph()\n\n with g.as_default():\n return_tensors = tf.import_graph_def(\n graph_def=graph_def,\n input_map={input_node: data},\n return_elements=[output_node]\n )\n # Unwrap the returned output node. For now, we assume we only\n # want the tensor with index `:0`, which is the 0th element of the\n # `.outputs` list.\n output = return_tensors[0].outputs[0]\n\n with tf.Session(graph=g, config=get_gpu_config()) as sess:\n tf.logging.info(\"Starting Warmup cycle\")\n\n for _ in range(_WARMUP_NUM_LOOPS):\n sess.run([output])\n\n tf.logging.info(\"Starting timing.\")\n\n for _ in range(10):\n val = sess.run([output])\n\n return val[0]\n\n\n################################################################################\n# Parse predictions\n################################################################################\ndef get_labels():\n \"\"\"Get the set of possible labels for classification.\"\"\"\n with open(_LABELS_FILE, \"r\") as labels_file:\n labels = json.load(labels_file)\n\n return labels\n\n\ndef top_predictions(result, n):\n \"\"\"Get the top n predictions given the array of softmax results.\"\"\"\n # We only care about the first example.\n probabilities = result[0]\n # Get the ids of most probable labels. Reverse order to get greatest first.\n ids = np.argsort(probabilities)[::-1]\n return ids[:n]\n\n\ndef get_labels_for_ids(labels, ids, ids_are_one_indexed=False):\n \"\"\"Get the human-readable labels for given ids.\n\n Args:\n labels: dict, string-ID to label mapping from ImageNet.\n ids: list of ints, IDs to return labels for.\n ids_are_one_indexed: whether to increment passed IDs by 1 to account for\n the background category. See ArgParser `--ids_are_one_indexed`\n for details.\n\n Returns:\n list of category labels\n \"\"\"\n return [labels[str(x + int(ids_are_one_indexed))] for x in ids]\n\n\ndef print_predictions(result, ids_are_one_indexed=False, preds_to_print=5):\n \"\"\"Given an array of mode, graph_name, predicted_ID, print labels.\"\"\"\n labels = get_labels()\n\n print(\"Predictions:\")\n pred_ids = top_predictions(result, preds_to_print)\n pred_labels = get_labels_for_ids(labels, pred_ids, ids_are_one_indexed)\n print(\"Precision: \", pred_labels, np.argmax(result[0]))\n\n\n################################################################################\n# Run this script\n################################################################################\ndef main(argv):\n parser = TensorRTParser()\n flags = parser.parse_args(args=argv[1:])\n\n # Load the data.\n data = batch_from_image(flags.image_file, flags.batch_size)\n\n # Load the graph def\n frozen_graph_def = get_frozen_graph(flags.frozen_graph)\n\n # Run inference in all desired modes.\n result = time_graph(frozen_graph_def, data, flags.input_node, flags.output_node)\n \n # image = cv2.imread(flags.image_file)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # image = cv2.resize(image, (224, 224))\n # image = image.astype(np.float32)\n # image_np_expanded = np.expand_dims(image, axis=0)\n # result = time_graph(frozen_graph_def, image_np_expanded, flags.input_node, flags.output_node)\n\n # Print prediction results to the command line.\n print_predictions(result, flags.ids_are_one_indexed,\n flags.predictions_to_print)\n\n\nclass TensorRTParser(argparse.ArgumentParser):\n \"\"\"Parser to contain flags for running the TensorRT timers.\"\"\"\n\n def __init__(self):\n super(TensorRTParser, self).__init__()\n\n self.add_argument(\n \"--frozen_graph\", \"-fg\", default=None,\n help=\"[default: %(default)s] The location of a Frozen Graph \"\n \"protobuf file that will be used for inference. Note that either \"\n \"savedmodel_dir or frozen_graph should be passed in, and \"\n \"frozen_graph will take precedence.\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--output_node\", \"-on\", default=\"softmax_tensor\",\n help=\"[default: %(default)s] The names of the graph output node \"\n \"that should be used when retrieving results. Assumed to be a softmax.\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--input_node\", \"-in\", default=\"input_tensor\",\n help=\"[default: %(default)s] The name of the graph input node where \"\n \"the float image array should be fed for prediction.\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--batch_size\", \"-bs\", type=int, default=128,\n help=\"[default: %(default)s] Batch size for inference. If an \"\n \"image file is passed, it will be copied batch_size times to \"\n \"imitate a batch.\",\n metavar=\"\"\n )\n\n self.add_argument(\n \"--image_file\", \"-if\", default=None,\n help=\"[default: %(default)s] The location of a JPEG image that will \"\n \"be passed in for inference. This will be copied batch_size times to \"\n \"imitate a batch. If not passed, random data will be used.\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--workspace_size\", \"-ws\", type=int, default=2 << 10,\n help=\"[default: %(default)s] Workspace size in megabytes.\",\n metavar=\"\"\n )\n\n self.add_argument(\n \"--ids_are_one_indexed\", action=\"store_true\",\n help=\"[default: %(default)s] Some ResNet models include a `background` \"\n \"category, and others do not. If the model used includes `background` \"\n \"at index 0 in the output and represents all 1001 categories, \"\n \"this should be False. If the model used omits the `background` label \"\n \"and has only 1000 categories, this should be True.\"\n )\n\n self.add_argument(\n \"--predictions_to_print\", \"-pp\", type=int, default=5,\n help=\"[default: %(default)s] Number of predicted labels to predict.\",\n metavar=\"\"\n )\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n main(argv=sys.argv)\n","sub_path":"research/tensorrt/my_trt.py","file_name":"my_trt.py","file_ext":"py","file_size_in_byte":12576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"334831788","text":"# Zadanie 3\n\n\ndef delete_letters(text, letter):\n text = str(text)\n for i in text:\n if i == letter:\n text = text.replace(i, '')\n return text\n\n\nstring = input(\"Wprowadź dowolny ciąg znaków: \")\nchar = input(\"Której litery chciałbyś się pozbyć? \")\nnew_string = delete_letters(string, char)\nprint('Wynik:', new_string)\n","sub_path":"cwiczenia02/zadanie3.py","file_name":"zadanie3.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"380254430","text":"from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.forms import widgets\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.safestring import mark_safe\n\nGLOBAL_OPTIONS = getattr(settings, 'SIMDITOR_OPTIONS', {})\n\n\nclass SimditorWidgets(widgets.Textarea):\n def __init__(self, *args, **kwargs):\n self.upload_to = kwargs.pop('upload_to', '')\n self.custom_options = kwargs.pop('options', {})\n self.image_upload = kwargs.pop('image_upload', False)\n super(SimditorWidgets, self).__init__(*args, **kwargs)\n\n @property\n def options(self):\n options = GLOBAL_OPTIONS.copy()\n options.update(self.custom_options)\n if self.image_upload:\n options['image_upload'] = reverse_lazy('simditor_upload_image', kwargs={'upload_to': self.upload_to})\n return options\n\n def render(self, name, value, attrs=None):\n if value is None:\n value = ''\n if attrs is None:\n attrs = {}\n final_attrs = self.build_attrs(attrs, name=name)\n options = self.options.copy()\n options.update({\n 'name': name,\n 'value': final_attrs.get('placeholder', '') or value,\n })\n context = {\n 'Simditor': options,\n }\n html = render_to_string('simditor.html', context)\n return mark_safe(html)\n\n def _media(self):\n js = [\n 'js/jquery.min.js',\n 'js/module.min.js',\n 'js//hotkeys.min.js',\n 'js/simditor.min.js',\n ]\n if self.image_upload:\n js.append('js/uploader.min.js')\n css = {\n 'all': ('css/font-awesome.css', 'css/simditor.css')\n }\n return forms.Media(js=js, css=css)\n media = property(_media)\n","sub_path":"simditor/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"231981936","text":"import numpy as np\nimport pybindingcurve as pbc\n\nmySystem = pbc.BindingCurve(pbc.systemsequations.eq06_2com_fractionXboundAAorA_a_x_ax_axa.all_solutions)\n\nsystem_parameters = {\n 'x' : 2,\n 'kdax' : 1,\n 'kdaxa' : 0.05,\n 'a' : np.linspace(0, 10,1000),\n 'ymin':0,\n 'ymax':1,\n}\n\ncurve1=mySystem.simulate_curve(system_parameters, \"Complex 2 site ligand\", use_all_solutions=True)\nmySystem.show_plot(max_y=1.1)\n","sub_path":"example-1Ligand2Proteins.py","file_name":"example-1Ligand2Proteins.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"321239484","text":"class Temporal:\n \"\"\"For plotting summary plots\"\"\"\n\n def __init__(self):\n self.fig, (self.ax1, self.ax2, self.ax3) = plt.subplots(nrows=3)\n plot_size(8, 12, 80)\n\n def set_properties(self):\n \"\"\"We manually change the axes for every subfigure\"\"\"\n\n ax1_array = np.linspace(0, 23, 24)\n ax1_labels = [np.array2string(i)[:-1] for i in ax1_array]\n self.ax1.set_xticks(range(24))\n self.ax1.set_xticklabels(ax1_labels)\n\n ax2_labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\n 'Jul', 'Agu', 'Sep', 'Oct', 'Nov', 'Dec']\n self.ax2.set_xticks(range(12))\n self.ax2.set_xticklabels(ax2_labels)\n\n ax3_array = np.linspace(1998, 2019, 22)\n ax3_labels = [np.array2string(i) for i in ax3_array]\n ax3_labels = [\"'\" + i[2:4] for i in ax3_labels]\n self.ax3.set_xticks(range(22))\n self.ax3.set_xticklabels(ax3_labels)\n\n def plot(self, df, c):\n \"\"\"Creating summary plots\"\"\"\n period = ['hour', 'month', 'year']\n for i, ax in enumerate(self.fig.axes):\n sns.pointplot(data=df,\n x=period[i],\n y='dni',\n ax=ax,\n color=c,\n ci=None)\n self.set_properties()","sub_path":"utils/temporal.py","file_name":"temporal.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"59213488","text":"import pandas as pd\nimport numpy as np\nimport datetime\nimport os\nimport time\nimport sys\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\nsys.path.append('E:\\\\workspace\\\\work\\\\Correct\\\\CorrectClass\\\\')\nfrom get_now_time import GetNowTime\nfrom FNP import FindNearestPoint\n\n'''\n此代码旨在用逐小时实况选取最高最低实况\nGGOB是GetGribObversationMost的缩写\n'''\n\n\ndef get_listTime():\n global previousTimeS, previousTimeSUTC\n listTimeBJT, listTimeUTC = [], []\n for eHour in range(24):\n getTimeBJT = datetime.datetime.strptime(previousTimeS, '%Y%m%d%H')\n getTimeUTC = datetime.datetime.strptime(previousTimeSUTC, '%Y%m%d%H')\n getTimeBJT = getTimeBJT - datetime.timedelta(hours=eHour)\n getTimeUTC = getTimeUTC - datetime.timedelta(hours=eHour)\n getTimeBJTStr = getTimeBJT.strftime('%Y%m%d%H')\n getTimeUTCStr = getTimeUTC.strftime('%Y%m%d%H')\n listTimeBJT.append(getTimeBJTStr)\n listTimeUTC.append(getTimeUTCStr)\n return listTimeBJT, listTimeUTC\n\n\ndef get_Tmost_Grid(listTime, pathG):\n print(' 【开始格点实况最高最低气温筛选】')\n arZero = np.zeros((161, 274), dtype=float)\n arOne = np.ones((161, 274), dtype=float)\n arBase = np.array([arZero, arOne])\n for eTime in listTime:\n pathGfile = pathG + eTime[:4] + '\\\\' + eTime[:-2] + '\\\\'\n try:\n arTemp = np.loadtxt(pathGfile + 'TMP_' + eTime + '.txt')\n # print('【已载入%s时刻格点实况】' % eTime)\n except Exception as ee:\n print(eTime + '时次文件有误')\n with open(logPath + '实况格点最值获取报错_' + getTimeOb.nowBJTStr + '.log', 'a+') as logfo1:\n logfo1.writelines(eTime + '时次文件有误' + '\\n')\n logfo1.writelines(str(ee) + '\\n')\n data1 = np.append(arBase, arTemp)\n dim1 = arBase.shape\n dataComb = data1.reshape(dim1[0] + 1, dim1[1], dim1[2])\n arBase = dataComb.copy()\n dataComb = np.delete(dataComb, [0, 1], axis=0)\n listmax = []\n listmin = []\n if dataComb.shape[0] > 21:\n for n in range(dataComb.shape[1]):\n for m in range(dataComb.shape[2]):\n aaa = np.max(dataComb[:, n, m])\n bbb = np.min(dataComb[:, n, m])\n listmax.append(aaa)\n listmin.append(bbb)\n TmaxArray = np.array(listmax).reshape(dataComb.shape[1], dataComb.shape[2])\n TminArray = np.array(listmin).reshape(dataComb.shape[1], dataComb.shape[2])\n print(' 【格点实况最高/最低气温筛选执行完成】')\n return TmaxArray, TminArray\n\n\ndef get_Tmost_Station(listTimeUTC, pathS):\n print(' 【开始站点实况最高最低气温筛选】')\n stationInfoFilePath = 'E:\\\\work\\\\2020Correct\\\\data\\\\StationInfo_648.txt' # 站点信息文件,此文件中只有站点信息,为了DF索引而设\n dfSt = pd.read_csv(stationInfoFilePath, encoding='utf-8', sep=',', engine='python').set_index('Station_Num') # 读取站点信息文件\n dfSt = dfSt['Station_Name']\n for i in listTimeUTC:\n # print('【已载入%s时刻格点实况】' % i)\n try:\n dfO = pd.read_csv(pathS + i + '.txt', sep=' ', engine='python', header=1).set_index('Station_Id_C') # 读取各时次的逐小时文件\n except Exception as e2:\n print(i + '时次文件有误')\n with open(logPath + '实况站点最值获取报错_' + getTimeOb.nowBJTStr + '.log', 'a+') as logfo2:\n logfo2.writelines(i + '时次文件有误' + '\\n')\n logfo2.writelines(str(e2) + '\\n')\n dfO = dfO[~dfO.index.duplicated(keep='first')]\n dfO[dfO['TEM'] > 100] = np.nan # 过滤较大值\n dfO = dfO['TEM']\n dfMandO = pd.concat([dfSt, dfO], axis=1) # 将24个文件拼接在一起\n dfSt = dfMandO\n dfSt['TMAX'] = dfSt.max(axis=1)\n dfSt['TMIN'] = dfSt.min(axis=1)\n dfTMAXFinal = dfSt[['TMAX']]\n dfTMINFinal = dfSt[['TMIN']]\n print(' 【站点实况最高/最低气温筛选执行完成】')\n return dfTMAXFinal, dfTMINFinal\n\n\nif __name__ == '__main__':\n timeStart = time.time()\n print('【开始运行最高气温最低气温筛选】')\n #########路径及参数设置\n # 一些路径\n pathGrib1h = 'F:\\\\work\\\\2020Correct\\\\data\\\\TEM_ob_1h_CLDAS_GRID\\\\'\n pathStation1h = 'F:\\\\work\\\\2020Correct\\\\data\\\\TEM_ob_1h_648\\\\'\n pathGrib24h = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_ob_24h_Grid\\\\'\n pathStation24h = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_ob_24h_648\\\\'\n logPath = 'F:\\\\work\\\\2020Correct\\\\data\\\\log\\\\'\n # 获取时间参数\n getTimeOb = GetNowTime()\n # getTimeOb.print_all_attribute_of_object() # 此对象所有属性的print\n nowTimeStr = getTimeOb.nowTimeStr\n nowTimeStrUTC = getTimeOb.nowTimeStrUTC\n previousTimeS = getTimeOb.previousTimeS\n previousTimeSUTC = getTimeOb.previousTimeSUTC\n listTimeBJT, listTimeUTC = get_listTime()\n ##########################筛选程序\n # 筛选格点最高最低实况\n finalGridData = get_Tmost_Grid(listTimeBJT, pathGrib1h)\n np.savetxt(pathGrib24h + previousTimeS + '_TMAX.txt', finalGridData[0], fmt='%.2f')\n np.savetxt(pathGrib24h + previousTimeS + '_TMIN.txt', finalGridData[1], fmt='%.2f')\n # 筛选站点最高最低实况\n\n finalStationData = get_Tmost_Station(listTimeUTC, pathStation1h)\n finalStationData[0].to_csv(pathStation24h + previousTimeS + '_TMAX.txt', float_format='%.2f')\n finalStationData[1].to_csv(pathStation24h + previousTimeS + '_TMIN.txt', float_format='%.2f')\n\n timeEnd = time.time() - timeStart\n print('【最高气温最低气温筛选运行结束,运行时间%.2f秒】' % timeEnd)\n","sub_path":"FinalCode24/S3GetMostRes.py","file_name":"S3GetMostRes.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"178333889","text":"\"\"\"@package app_reusabilityToken_simulator\r\nSimulates a market where reusability tokens are at work\r\n\r\nFor usage: python app_reusabilityToken_simulator.py --help\r\n\"\"\"\r\nimport argparse\r\nfrom SimulationEngine import SimulationEngine\r\n\r\nnum_iterations = 100\r\nnum_customers = 3\r\nnum_shops = 2\r\n\r\n\r\ndef setup_args():\r\n global num_iterations, num_customers, num_shops\r\n parser = argparse.ArgumentParser('ReusabiliToken Simulator')\r\n parser.add_argument('--num_iterations', type=int, help='Number of iterations', default=100)\r\n parser.add_argument('--num_customers', type=int, help='Number of customers', default=100)\r\n parser.add_argument('--num_shops', type=int, help='Number of shops', default=5)\r\n args = parser.parse_args()\r\n num_iterations = args.num_iterations\r\n num_customers = args.num_customers\r\n num_shops = args.num_shops\r\n\r\n\r\ndef run_simulator():\r\n global num_iterations, num_customers, num_shops\r\n sim_engine = SimulationEngine(num_customers=num_customers,\r\n num_shops=num_shops,\r\n sim_iters=num_iterations,\r\n coin_limit=200000,\r\n rep_limit=20,\r\n coin_rep_factor=1.0,\r\n payment_due=30)\r\n sim_engine.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n setup_args()\r\n run_simulator()\r\n\r\n","sub_path":"ReusabiliTokenSimulator/src/app_reusabilityToken_simulator.py","file_name":"app_reusabilityToken_simulator.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"445481726","text":"\"\"\"\n**Challenge 7**\n\n*AES in ECB mode*\n\nThe base64-encoded content in this file has been encrypted via\nAES-128 in ECB mode under the key\n\n``\"YELLOW SUBMARINE\"``\n\n(case-sensitive, without the quotes; exactly 16 characters).\n\nDecrypt it. You know the key, after all.\n\"\"\"\nfrom Crypto.Cipher import AES\nimport c1\nimport unittest\n\n## Uses the AES library function to decrypt\ndef aes_128_ecb_decrypt(txt, key):\n \"\"\"\n Decrypts AES-128 under ECB mode.\n\n Args:\n txt: The ciphertext to be decrypted.\n key: The key for decryption\n\n Returns:\n The decrypted plaintext.\n \"\"\"\n return AES.new(key, AES.MODE_ECB).decrypt(txt)\n\n## Uses the AES library function to encrypt\ndef aes_128_ecb_encrypt(txt, key):\n \"\"\"\n Encrypts AES-128 under ECB mode.\n\n Args:\n txt: The plaintext to be encrypted.\n key: The key for encryption\n\n Returns:\n The encrypted ciphertext.\n \"\"\"\n return AES.new(key, AES.MODE_ECB).encrypt(txt)\n\nclass TestAESECB(unittest.TestCase):\n def setUp(self):\n self.DEBUG = False\n f = open('../../testdata/7.txt')\n self.txt = c1.base64toascii(f.read())\n f.close()\n self.key = b'YELLOW SUBMARINE'\n def test_challenge_7(self):\n result = aes_128_ecb_decrypt(self.txt, self.key)\n if self.DEBUG:\n print(result)\n enc = aes_128_ecb_encrypt(result, self.key)\n self.assertEqual(enc, self.txt)\n\nif __name__ == '__main__' :\n unittest.main()\n","sub_path":"cryptopals-py/set1/c7.py","file_name":"c7.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"405546501","text":"# -*- coding:utf-8 -*-\nimport time\nimport TimeAnlz\nimport json\nimport datetime\ndef getCurrentTime():\n\treturn time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n\n#时间间隔处理\ndef getVaildTime(vaildtime):\n\ttn = TimeAnlz.TimeAnlz()\n\tres = tn.parse(unicode(vaildtime))\n\tdic = json.loads(res)\n\tif 'timedelta' in dic.keys():\n\t\tdic = json.loads(res)\n\t\tnowTime = dic['timedelta']\n\t\ttimedelta = nowTime[0].split(\", \")\n\t\tday = timedelta[0].split(\" \")\n\t\tday2second = int(day[0]) *24*3600\n\t\tsec = timedelta[1].split(\":\")\n\t\tif sec[1] == \"00\":\n\t\t\tsec[1] = 0\n\t\telif sec[1] == \"01\":\n\t\t\tsec[1] = 1;\n\t\telif sec[1] == \"02\":\n\t\t\tsec[1] =2\n\t\telif sec[1] == \"03\":\n\t\t\tsec[1] = 3\n\t\telif sec[1] == \"04\":\n\t\t\tsec[1] = 4\n\t\telif sec[1] == \"05\":\n\t\t\tsec[1] = 5\n\t\telif sec[1] == \"06\":\n\t\t\tsec[1] = 6\n\t\telif sec[1] == \"07\":\n\t\t\tsec[1] = 7\n\t\telif sec[1] == \"08\":\n\t\t\tsec[1] = 8\n\t\telif sec[1] == \"09\":\n\t\t\tsec[1] = 9\n\t\thour2second = (int(sec[0])*60 + int(sec[1]))*60\n\t\treturn hour2second + day2second\n\treturn 3600\n\ndef isVaild(datetime, validTime):\n\ttry:\n\t\tdate = time.strptime(datetime, \"%Y-%m-%d %H:%M:%S\")\n\t\tdate = int(time.mktime(date))\n\t\tcur = int(time.time())\n\t\tvalid = getVaildTime(validTime)\n\t\tif cur - date < valid: #有效期1小时\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\n# if __name__ == \"__main__\":\n# \tisVaild(\"2017-12-26 10:28:00\",\"一小时\")\n\n","sub_path":"Utils_time.py","file_name":"Utils_time.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"349606271","text":"#\n# Copyright 2013 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__author__ = 'Simone Campagna'\n\nfrom structparser.core import *\n\nclass Params(Struct):\n __fields__ = (\n FieldType(\"length\",\n field_type=Int,\n default=3,\n description=\"length of lst0\"),\n FieldType(\"coeff\",\n field_type=Float,\n default=1.2,\n description=\"coeff\"),\n FieldType(\"lst0\",\n field_type=IntList(BIND.length),\n description=\"lst0\"),\n FieldType(\"lst1\",\n field_type=FloatList(1 + BIND.lst0[IntCast(BIND.coeff)]),\n description=\"lst1\"),\n )\n\n","sub_path":"examples/lst.py","file_name":"lst.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"578858224","text":"import os\nimport pkg_resources\nfrom urlparse import urljoin\nfrom urlparse import urlparse\n\nfrom paste import httpexceptions\nfrom paste import request\nfrom paste.httpheaders import ETAG\nfrom paste.urlparser import StaticURLParser\n\nfrom zope.interface import implements\n\nfrom pyramid.asset import resolve_asset_spec\nfrom pyramid.interfaces import IStaticURLInfo\nfrom pyramid.path import caller_package\nfrom pyramid.request import call_app_with_subpath_as_path_info\nfrom pyramid.url import route_url\n\nclass PackageURLParser(StaticURLParser):\n \"\"\" This probably won't work with zipimported resources \"\"\"\n def __init__(self, package_name, resource_name, root_resource=None,\n cache_max_age=None):\n self.package_name = package_name\n self.resource_name = os.path.normpath(resource_name)\n if root_resource is None:\n root_resource = self.resource_name\n self.root_resource = root_resource\n self.cache_max_age = cache_max_age\n\n def __call__(self, environ, start_response):\n path_info = environ.get('PATH_INFO', '')\n if not path_info:\n return self.add_slash(environ, start_response)\n if path_info == '/':\n # @@: This should obviously be configurable\n filename = 'index.html'\n else:\n filename = request.path_info_pop(environ)\n resource = os.path.normcase(os.path.normpath(\n self.resource_name + '/' + filename))\n if not resource.startswith(self.root_resource):\n # Out of bounds\n return self.not_found(environ, start_response)\n if not pkg_resources.resource_exists(self.package_name, resource):\n return self.not_found(environ, start_response)\n if pkg_resources.resource_isdir(self.package_name, resource):\n # @@: Cache?\n return self.__class__(\n self.package_name, resource, root_resource=self.resource_name,\n cache_max_age=self.cache_max_age)(environ, start_response)\n pi = environ.get('PATH_INFO')\n if pi and pi != '/':\n return self.error_extra_path(environ, start_response) \n full = pkg_resources.resource_filename(self.package_name, resource)\n if_none_match = environ.get('HTTP_IF_NONE_MATCH')\n if if_none_match:\n mytime = os.stat(full).st_mtime\n if str(mytime) == if_none_match:\n headers = []\n ETAG.update(headers, mytime)\n start_response('304 Not Modified', headers)\n return [''] # empty body\n\n fa = self.make_app(full)\n if self.cache_max_age:\n fa.cache_control(max_age=self.cache_max_age)\n return fa(environ, start_response)\n\n def not_found(self, environ, start_response, debug_message=None):\n comment=('SCRIPT_NAME=%r; PATH_INFO=%r; looking in package %s; '\n 'subdir %s ;debug: %s' % (environ.get('SCRIPT_NAME'),\n environ.get('PATH_INFO'),\n self.package_name,\n self.resource_name,\n debug_message or '(none)'))\n exc = httpexceptions.HTTPNotFound(\n 'The resource at %s could not be found'\n % request.construct_url(environ),\n comment=comment)\n return exc.wsgi_application(environ, start_response)\n\n def __repr__(self):\n return '<%s %s:%s at %s>' % (self.__class__.__name__, self.package_name,\n self.root_resource, id(self))\n\nclass StaticURLInfo(object):\n implements(IStaticURLInfo)\n\n route_url = staticmethod(route_url) # for testing only\n\n def __init__(self, config):\n self.config = config\n self.registrations = []\n\n def generate(self, path, request, **kw):\n for (name, spec, is_url) in self.registrations:\n if path.startswith(spec):\n subpath = path[len(spec):]\n if is_url:\n return urljoin(name, subpath)\n else:\n kw['subpath'] = subpath\n return self.route_url(name, request, **kw)\n\n raise ValueError('No static URL definition matching %s' % path)\n\n def add(self, name, spec, **extra):\n # This feature only allows for the serving of a directory and\n # the files contained within, not of a single asset;\n # appending a slash here if the spec doesn't have one is\n # required for proper prefix matching done in ``generate``\n # (``subpath = path[len(spec):]``).\n if not spec.endswith('/'):\n spec = spec + '/'\n\n # we also make sure the name ends with a slash, purely as a\n # convenience: a name that is a url is required to end in a\n # slash, so that ``urljoin(name, subpath))`` will work above\n # when the name is a URL, and it doesn't hurt things for it to\n # have a name that ends in a slash if it's used as a route\n # name instead of a URL.\n if not name.endswith('/'):\n # make sure it ends with a slash\n name = name + '/'\n\n names = [ t[0] for t in self.registrations ]\n\n if name in names:\n idx = names.index(name)\n self.registrations.pop(idx)\n\n if urlparse(name)[0]:\n # it's a URL\n self.registrations.append((name, spec, True))\n else:\n # it's a view name\n cache_max_age = extra.pop('cache_max_age', None)\n # create a view\n view = static_view(spec, cache_max_age=cache_max_age,\n use_subpath=True)\n\n # Mutate extra to allow factory, etc to be passed through here.\n # Treat permission specially because we'd like to default to\n # permissiveness (see docs of config.add_static_view). We need\n # to deal with both ``view_permission`` and ``permission``\n # because ``permission`` is used in the docs for add_static_view,\n # but ``add_route`` prefers ``view_permission``\n permission = extra.pop('view_permission', None)\n if permission is None:\n permission = extra.pop('permission', None)\n if permission is None:\n permission = '__no_permission_required__'\n\n context = extra.pop('view_context', None)\n if context is None:\n context = extra.pop('view_for', None)\n if context is None:\n context = extra.pop('for_', None)\n\n renderer = extra.pop('view_renderer', None)\n if renderer is None:\n renderer = extra.pop('renderer', None)\n\n attr = extra.pop('view_attr', None)\n\n # register a route using the computed view, permission, and \n # pattern, plus any extras passed to us via add_static_view\n pattern = \"%s*subpath\" % name # name already ends with slash\n self.config.add_route(name, pattern, **extra)\n self.config.add_view(route_name=name, view=view,\n permission=permission, context=context,\n renderer=renderer, attr=attr)\n self.registrations.append((name, spec, False))\n\nclass static_view(object):\n \"\"\" An instance of this class is a callable which can act as a\n :app:`Pyramid` :term:`view callable`; this view will serve\n static files from a directory on disk based on the ``root_dir``\n you provide to its constructor.\n\n The directory may contain subdirectories (recursively); the static\n view implementation will descend into these directories as\n necessary based on the components of the URL in order to resolve a\n path into a response.\n\n You may pass an absolute or relative filesystem path or a\n :term:`asset specification` representing the directory\n containing static files as the ``root_dir`` argument to this\n class' constructor.\n\n If the ``root_dir`` path is relative, and the ``package_name``\n argument is ``None``, ``root_dir`` will be considered relative to\n the directory in which the Python file which *calls* ``static``\n resides. If the ``package_name`` name argument is provided, and a\n relative ``root_dir`` is provided, the ``root_dir`` will be\n considered relative to the Python :term:`package` specified by\n ``package_name`` (a dotted path to a Python package).\n\n ``cache_max_age`` influences the ``Expires`` and ``Max-Age``\n response headers returned by the view (default is 3600 seconds or\n five minutes).\n\n ``use_subpath`` influences whether ``request.subpath`` will be used as\n ``PATH_INFO`` when calling the underlying WSGI application which actually\n serves the static files. If it is ``True``, the static application will\n consider ``request.subpath`` as ``PATH_INFO`` input. If it is ``False``,\n the static application will consider request.path_info as ``PATH_INFO``\n input. By default, this is ``False``.\n\n .. note:: If the ``root_dir`` is relative to a :term:`package`, or\n is a :term:`asset specification` the :app:`Pyramid`\n :class:`pyramid.config.Configurator` method can be\n used to override assets within the named ``root_dir``\n package-relative directory. However, if the ``root_dir`` is\n absolute, configuration will not be able to\n override the assets it contains. \"\"\"\n \n def __init__(self, root_dir, cache_max_age=3600, package_name=None,\n use_subpath=False):\n # package_name is for bw compat; it is preferred to pass in a\n # package-relative path as root_dir\n # (e.g. ``anotherpackage:foo/static``).\n if package_name is None:\n package_name = caller_package().__name__\n package_name, root_dir = resolve_asset_spec(root_dir, package_name)\n if package_name is None:\n app = StaticURLParser(root_dir, cache_max_age=cache_max_age)\n else:\n app = PackageURLParser(\n package_name, root_dir, cache_max_age=cache_max_age)\n self.app = app\n self.use_subpath = use_subpath\n\n def __call__(self, context, request):\n if self.use_subpath:\n return call_app_with_subpath_as_path_info(request, self.app)\n return request.get_response(self.app)\n","sub_path":"app/lib/dist/pyramid/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":10459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"34243963","text":"import numpy as np\nimport csv\nfrom demand import gen_customer_data\nfrom routing import TSP\nfrom copy import deepcopy\n\n\nclass Technician:\n def __init__(self, travel_speed, customer_data, square_length, hours_per_day):\n self.travel_speed = travel_speed\n self.customer_data = customer_data\n self.hub_loc = (square_length / 2, square_length / 2)\n self.cur_loc = (square_length / 2, square_length / 2)\n self.elapsed_time = 0\n self.mins_per_day = hours_per_day * 60\n self.idle_time = 0\n self.wait_time = []\n self.queue = []\n\n def work(self):\n for new_customer in self.customer_data:\n if self.elapsed_time >= self.mins_per_day:\n break\n\n if self.elapsed_time >= new_customer[\"arrive_time\"]:\n self.add_customer(new_customer)\n else:\n self.create_optimal_plan()\n while len(self.queue) != 0:\n cur_customer = self.queue[0]\n travel_time = self.travel(\n self.cur_loc, self.get_customer_loc(cur_customer)\n )\n\n if self.elapsed_time + travel_time < new_customer[\"arrive_time\"]:\n self.update_wait_time(cur_customer, travel_time)\n self.elapsed_time += travel_time + cur_customer[\"serve_time\"]\n self.cur_loc = self.get_customer_loc(cur_customer)\n self.remove_customer()\n\n else:\n travelled = new_customer[\"arrive_time\"] - self.elapsed_time\n self.elapsed_time += travelled\n self.cur_loc = self.get_cur_loc(\n self.cur_loc,\n self.get_customer_loc(cur_customer),\n travelled,\n )\n self.add_customer(new_customer)\n break\n\n if len(self.queue) == 0:\n self.add_customer(new_customer)\n if self.elapsed_time < new_customer[\"arrive_time\"]:\n travel_time = self.travel(self.cur_loc, self.hub_loc)\n if (\n self.elapsed_time + travel_time\n < new_customer[\"arrive_time\"]\n ):\n self.cur_loc = deepcopy(self.hub_loc)\n self.elapsed_time += travel_time\n self.idle_time += (\n new_customer[\"arrive_time\"] - self.elapsed_time\n )\n else:\n travelled = new_customer[\"arrive_time\"] - self.elapsed_time\n self.cur_loc = self.get_cur_loc(\n self.cur_loc, self.hub_loc, travelled,\n )\n self.elapsed_time = max(\n new_customer[\"arrive_time\"], self.elapsed_time\n )\n\n self.create_optimal_plan()\n while len(self.queue) and (self.elapsed_time < self.mins_per_day):\n cur_customer = self.queue[0]\n travel_time = self.travel(self.cur_loc, self.get_customer_loc(cur_customer))\n self.update_wait_time(cur_customer, travel_time)\n self.elapsed_time += travel_time + cur_customer[\"serve_time\"]\n self.cur_loc = self.get_customer_loc(cur_customer)\n self.remove_customer()\n\n if self.elapsed_time < self.mins_per_day:\n self.idle_time += self.mins_per_day - self.elapsed_time\n\n result = (\n len(self.customer_data),\n len(self.customer_data) - len(self.queue),\n len(self.queue),\n self.idle_time,\n self.wait_time,\n )\n\n return result\n\n def get_cur_loc(self, from_loc, to_loc, travelled):\n if travelled != 0:\n a = np.array(from_loc)\n b = np.array(to_loc)\n v = a - b\n u = v / np.linalg.norm(v)\n return tuple(a + travelled * u)\n else:\n return from_loc\n\n def get_customer_loc(self, customer):\n return (customer[\"loc_x\"], customer[\"loc_y\"])\n\n def travel(self, from_loc, to_loc):\n from_loc = np.array(from_loc)\n to_loc = np.array(to_loc)\n return np.linalg.norm(from_loc - to_loc) * self.travel_speed\n\n def create_optimal_plan(self):\n trip = self.create_trip()\n optimal_trip = TSP(trip, self.travel_speed)\n self.queue = [self.queue[i - 1] for i in optimal_trip[1:-1]]\n\n def create_trip(self):\n trip = [deepcopy(self.cur_loc)]\n for customer in self.queue:\n trip.append(self.get_customer_loc(customer))\n return trip\n\n def add_customer(self, customer):\n self.queue.append(customer)\n\n def remove_customer(self):\n self.queue = self.queue[1:]\n\n def update_wait_time(self, customer, travel_time):\n self.wait_time.append(\n max(self.elapsed_time + travel_time - customer[\"arrive_time\"], 0)\n )\n\n\ndef main():\n np.random.seed(42)\n total_area = 125 * 125\n num_technicians_arr = [15, 20, 25]\n\n num_of_days = 100\n hours_per_day = 8\n avg_demand_per_hour_arr = [15, 25, 35]\n avg_serve_time = 15\n travel_speed = 1\n\n for num_technicians in num_technicians_arr:\n for avg_demand_per_hour in avg_demand_per_hour_arr:\n result = []\n for day in range(num_of_days):\n day_result = [0, 0, 0, 0, []]\n area_per_technician = total_area / num_technicians\n square_length = np.sqrt(area_per_technician)\n all_customer_data = gen_customer_data(\n hours_per_day, avg_demand_per_hour, avg_serve_time, square_length\n )\n day_result[0] = len(all_customer_data)\n assigned_technicians = np.random.randint(\n low=0, high=num_technicians, size=len(all_customer_data)\n )\n\n for i in range(num_technicians):\n customer_data = all_customer_data[assigned_technicians == i]\n technician = Technician(\n travel_speed, customer_data, square_length, hours_per_day\n )\n temp = technician.work()\n day_result[1] += temp[1]\n day_result[2] += temp[2]\n day_result[3] += temp[3]\n day_result[4].extend(temp[4])\n\n result.append(\n (\n day,\n *day_result[:4],\n np.min(day_result[4]),\n np.max(day_result[4]),\n np.average(day_result[4]),\n np.std(day_result[4]),\n )\n )\n\n url = (\n \"result/\"\n + \"zone_\"\n + str(avg_demand_per_hour)\n + \"demand\"\n + \"_\"\n + str(num_technicians)\n + \"technician\"\n + \".csv\"\n )\n with open(url, \"w\") as test_file:\n file_writer = csv.writer(test_file)\n file_writer.writerow(\n (\n \"day\",\n \"num of customer\",\n \"num of served\",\n \"num of not served\",\n \"idle_time\",\n \"min_wait_time\",\n \"max_wait_time\",\n \"avg_wait_time\",\n \"std_wait_time\",\n )\n )\n for row in result:\n file_writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"multiple_servers_zonal.py","file_name":"multiple_servers_zonal.py","file_ext":"py","file_size_in_byte":7974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"304129516","text":"import asyncio\nfrom datetime import datetime\nfrom typing import Optional, Union\n\nimport discord\nfrom discord.ext import commands\n\nimport re\n\nfrom dateutil import parser\nfrom natural.date import duration\n\nfrom core import checks\nfrom core.decorators import trigger_typing\nfrom core.models import Bot\nfrom core.paginator import PaginatorSession\nfrom core.time import UserFriendlyTime, human_timedelta\nfrom core.utils import format_preview, User\n\n\nclass Modmail:\n \"\"\"Commands directly related to Modmail functionality.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command()\n @trigger_typing\n @checks.has_permissions(administrator=True)\n async def setup(self, ctx):\n \"\"\"Sets up a server for Modmail\"\"\"\n if self.bot.main_category:\n return await ctx.send(\n f'{self.bot.modmail_guild} is already set up.'\n )\n\n category = await self.bot.modmail_guild.create_category(\n name='Modmail',\n overwrites=self.bot.overwrites(ctx)\n )\n\n await category.edit(position=0)\n\n log_channel = await self.bot.modmail_guild.create_text_channel(\n name='bot-logs', category=category\n )\n\n embed = discord.Embed(\n title='Friendly Reminder:',\n description='You may use the `config set log_channel_id '\n '` command to set up a custom log channel'\n ', then you can delete the default '\n f'{log_channel.mention} channel.',\n color=self.bot.main_color\n )\n\n embed.set_footer(text=f'Type \"{self.bot.prefix}help\" '\n 'for a complete list of commands.')\n await log_channel.send(embed=embed)\n\n self.bot.config['main_category_id'] = category.id\n self.bot.config['log_channel_id'] = log_channel.id\n\n await self.bot.config.update()\n await ctx.send('Successfully set up server.')\n\n @commands.group()\n @checks.has_permissions(manage_messages=True)\n async def snippets(self, ctx):\n \"\"\"Returns a list of snippets that are currently set.\"\"\"\n if ctx.invoked_subcommand is not None:\n return\n\n embeds = []\n\n if self.bot.snippets:\n embed = discord.Embed(color=self.bot.main_color,\n description='Here is a list of snippets '\n 'that are currently configured.')\n else:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='You dont have any snippets at the moment.'\n )\n embed.set_footer(\n text=f'Do {self.bot.prefix}help snippets for more commands.'\n )\n\n embed.set_author(name='Snippets', icon_url=ctx.guild.icon_url)\n embeds.append(embed)\n\n for name, value in self.bot.snippets.items():\n if len(embed.fields) == 5:\n embed = discord.Embed(color=self.bot.main_color,\n description=embed.description)\n embed.set_author(name='Snippets', icon_url=ctx.guild.icon_url)\n embeds.append(embed)\n embed.add_field(name=name, value=value, inline=False)\n\n session = PaginatorSession(ctx, *embeds)\n await session.run()\n\n @snippets.command(name='add')\n @checks.has_permissions(manage_messages=True)\n async def add_(self, ctx, name: str.lower, *, value):\n \"\"\"Add a snippet to the bot config.\"\"\"\n if 'snippets' not in self.bot.config.cache:\n self.bot.config['snippets'] = {}\n\n self.bot.config.snippets[name] = value\n await self.bot.config.update()\n\n embed = discord.Embed(\n title='Added snippet',\n color=self.bot.main_color,\n description=f'`{name}` points to: {value}'\n )\n\n await ctx.send(embed=embed)\n\n @snippets.command(name='del')\n @checks.has_permissions(manage_messages=True)\n async def del_(self, ctx, *, name: str.lower):\n \"\"\"Removes a snippet from bot config.\"\"\"\n\n if self.bot.config.snippets.get(name):\n embed = discord.Embed(\n title='Removed snippet',\n color=self.bot.main_color,\n description=f'`{name}` no longer exists.'\n )\n del self.bot.config['snippets'][name]\n await self.bot.config.update()\n\n else:\n embed = discord.Embed(\n title='Error',\n color=discord.Color.red(),\n description=f'Snippet `{name}` does not exist.'\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n @checks.has_permissions(manage_messages=True)\n async def move(self, ctx, *, category: discord.CategoryChannel):\n \"\"\"Moves a thread to a specified category.\"\"\"\n thread = ctx.thread\n if not thread:\n embed = discord.Embed(\n title='Error',\n description='This is not a Modmail thread.',\n color=discord.Color.red()\n )\n return await ctx.send(embed=embed)\n\n await thread.channel.edit(category=category, sync_permissions=True)\n await ctx.message.add_reaction('✅')\n\n @staticmethod\n async def send_scheduled_close_message(ctx, after, silent=False):\n human_delta = human_timedelta(after.dt)\n\n silent = '*silently* ' if silent else ''\n\n embed = discord.Embed(\n title='Scheduled close',\n description=f'This thread will close {silent}in {human_delta}.',\n color=discord.Color.red()\n )\n\n if after.arg and not silent:\n embed.add_field(name='Message', value=after.arg)\n\n embed.set_footer(text='Closing will be cancelled '\n 'if a thread message is sent.')\n embed.timestamp = after.dt\n\n await ctx.send(embed=embed)\n\n @commands.command(usage='[after] [close message]')\n @checks.thread_only()\n async def close(self, ctx, *, after: UserFriendlyTime = None):\n \"\"\"\n Close the current thread.\n\n Close after a period of time:\n - `close in 5 hours`\n - `close 2m30s`\n\n Custom close messages:\n - `close 2 hours The issue has been resolved.`\n - `close We will contact you once we find out more.`\n\n Silently close a thread (no message)\n - `close silently`\n - `close in 10m silently`\n\n Stop a thread from closing:\n - `close cancel`\n \"\"\"\n\n thread = ctx.thread\n\n now = datetime.utcnow()\n\n close_after = (after.dt - now).total_seconds() if after else 0\n message = after.arg if after else None\n silent = str(message).lower() in {'silent', 'silently'}\n cancel = str(message).lower() == 'cancel'\n\n if cancel:\n\n if thread.close_task is not None:\n await thread.cancel_closure()\n embed = discord.Embed(color=discord.Color.red(),\n description='Scheduled close '\n 'has been cancelled.')\n else:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='This thread has not already '\n 'been scheduled to close.'\n )\n\n return await ctx.send(embed=embed)\n\n if after and after.dt > now:\n await self.send_scheduled_close_message(ctx, after, silent)\n\n await thread.close(\n closer=ctx.author,\n after=close_after,\n message=message,\n silent=silent,\n )\n\n @commands.command(aliases=['alert'])\n @checks.thread_only()\n async def notify(self, ctx, *, role=None):\n \"\"\"\n Notify a given role or yourself to the next thread message received.\n\n Once a thread message is received you will be pinged once only.\n \"\"\"\n thread = ctx.thread\n\n if not role:\n mention = ctx.author.mention\n elif role.lower() in ('here', 'everyone'):\n mention = '@' + role\n else:\n converter = commands.RoleConverter()\n role = await converter.convert(ctx, role)\n mention = role.mention\n\n if str(thread.id) not in self.bot.config['notification_squad']:\n self.bot.config['notification_squad'][str(thread.id)] = []\n\n mentions = self.bot.config['notification_squad'][str(thread.id)]\n\n if mention in mentions:\n embed = discord.Embed(color=discord.Color.red(),\n description=f'{mention} is already '\n 'going to be mentioned.')\n else:\n mentions.append(mention)\n await self.bot.config.update()\n embed = discord.Embed(color=self.bot.main_color,\n description=f'{mention} will be mentioned '\n 'on the next message received.')\n return await ctx.send(embed=embed)\n\n @commands.command(aliases=['sub'])\n @checks.thread_only()\n async def subscribe(self, ctx, *, role=None):\n \"\"\"\n Notify yourself or a given role for every thread message received.\n\n You will be pinged for every thread message\n received until you unsubscribe.\n \"\"\"\n thread = ctx.thread\n\n if not role:\n mention = ctx.author.mention\n elif role.lower() in ('here', 'everyone'):\n mention = '@' + role\n else:\n converter = commands.RoleConverter()\n role = await converter.convert(ctx, role)\n mention = role.mention\n\n if str(thread.id) not in self.bot.config['subscriptions']:\n self.bot.config['subscriptions'][str(thread.id)] = []\n\n mentions = self.bot.config['subscriptions'][str(thread.id)]\n\n if mention in mentions:\n embed = discord.Embed(color=discord.Color.red(),\n description=f'{mention} is already '\n 'subscribed to this thread.')\n else:\n mentions.append(mention)\n await self.bot.config.update()\n embed = discord.Embed(\n color=self.bot.main_color,\n description=f'{mention} will now be '\n 'notified of all messages received.'\n )\n return await ctx.send(embed=embed)\n\n @commands.command(aliases=['unsub'])\n @checks.thread_only()\n async def unsubscribe(self, ctx, *, role=None):\n \"\"\"Unsubscribe yourself or a given role from a thread.\"\"\"\n thread = ctx.thread\n\n if not role:\n mention = ctx.author.mention\n elif role.lower() in ('here', 'everyone'):\n mention = '@' + role\n else:\n converter = commands.RoleConverter()\n role = await converter.convert(ctx, role)\n mention = role.mention\n\n if str(thread.id) not in self.bot.config['subscriptions']:\n self.bot.config['subscriptions'][str(thread.id)] = []\n\n mentions = self.bot.config['subscriptions'][str(thread.id)]\n\n if mention not in mentions:\n embed = discord.Embed(color=discord.Color.red(),\n description=f'{mention} is not already '\n 'subscribed to this thread.')\n else:\n mentions.remove(mention)\n await self.bot.config.update()\n embed = discord.Embed(color=self.bot.main_color,\n description=f'{mention} is now unsubscribed '\n 'to this thread.')\n return await ctx.send(embed=embed)\n\n @commands.command()\n @checks.thread_only()\n async def nsfw(self, ctx):\n \"\"\"Flags a Modmail thread as nsfw.\"\"\"\n await ctx.channel.edit(nsfw=True)\n await ctx.message.add_reaction('✅')\n\n @commands.command()\n @checks.thread_only()\n async def loglink(self, ctx):\n \"\"\"Return the link to the current thread's logs.\"\"\"\n log_link = await self.bot.api.get_log_link(ctx.channel.id)\n await ctx.send(\n embed=discord.Embed(\n color=self.bot.main_color,\n description=log_link\n )\n )\n\n def format_log_embeds(self, logs, avatar_url):\n embeds = []\n logs = tuple(logs)\n title = f'Total Results Found ({len(logs)})'\n\n for entry in logs:\n\n key = entry['key']\n\n created_at = parser.parse(entry['created_at'])\n\n log_url = self.bot.config.log_url.strip('/') + f'/logs/{key}'\n\n username = entry['recipient']['name'] + '#'\n username += entry['recipient']['discriminator']\n\n embed = discord.Embed(color=self.bot.main_color,\n timestamp=created_at)\n embed.set_author(name=f'{title} - {username}',\n icon_url=avatar_url,\n url=log_url)\n embed.url = log_url\n embed.add_field(name='Created',\n value=duration(created_at, now=datetime.utcnow()))\n embed.add_field(name='Closed By',\n value=f\"<@{entry['closer']['id']}>\")\n\n if entry['recipient']['id'] != entry['creator']['id']:\n embed.add_field(name='Created by',\n value=f\"<@{entry['creator']['id']}>\")\n\n embed.add_field(name='Preview',\n value=format_preview(entry['messages']),\n inline=False)\n embed.add_field(name='Link', value=log_url)\n embed.set_footer(\n text='Recipient ID: ' + str(entry['recipient']['id'])\n )\n embeds.append(embed)\n return embeds\n\n @commands.group(invoke_without_command=True)\n @checks.has_permissions(manage_messages=True)\n async def logs(self, ctx, *, member: User = None):\n \"\"\"Shows a list of previous Modmail thread logs of a member.\"\"\"\n\n await ctx.trigger_typing()\n\n if not member:\n thread = ctx.thread\n if not thread:\n raise commands.UserInputError\n user = thread.recipient\n else:\n user = member\n\n default_avatar = 'https://cdn.discordapp.com/embed/avatars/0.png'\n icon_url = getattr(user, 'avatar_url', default_avatar)\n\n logs = await self.bot.api.get_user_logs(user.id)\n\n if not any(not log['open'] for log in logs):\n embed = discord.Embed(color=discord.Color.red(),\n description='This user does not '\n 'have any previous logs.')\n return await ctx.send(embed=embed)\n\n logs = reversed([e for e in logs if not e['open']])\n\n embeds = self.format_log_embeds(logs, avatar_url=icon_url)\n\n session = PaginatorSession(ctx, *embeds)\n await session.run()\n\n @logs.command(name='closed-by')\n @checks.has_permissions(manage_messages=True)\n async def closed_by(self, ctx, *, user: User = None):\n \"\"\"Returns all logs closed by a user.\"\"\"\n user = user or ctx.author\n\n query = {\n 'guild_id': str(self.bot.guild_id),\n 'open': False,\n 'closer.id': str(user.id)\n }\n\n projection = {\n 'messages': {'$slice': 5}\n }\n\n entries = await self.bot.db.logs.find(query, projection).to_list(None)\n\n embeds = self.format_log_embeds(entries,\n avatar_url=self.bot.guild.icon_url)\n\n if not embeds:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='No log entries have been found for that query'\n )\n return await ctx.send(embed=embed)\n\n session = PaginatorSession(ctx, *embeds)\n await session.run()\n\n @logs.command(name='search')\n @checks.has_permissions(manage_messages=True)\n async def search(self, ctx, limit: Optional[int] = None, *, query):\n \"\"\"Searches all logs for a message that contains your query.\"\"\"\n\n await ctx.trigger_typing()\n\n query = {\n 'guild_id': str(self.bot.guild_id),\n 'open': False,\n '$text': {\n '$search': f'\"{query}\"'\n }\n }\n\n projection = {\n 'messages': {'$slice': 5}\n }\n\n entries = await self.bot.db.logs.find(query, projection).to_list(limit)\n\n embeds = self.format_log_embeds(entries,\n avatar_url=self.bot.guild.icon_url)\n\n if not embeds:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='No log entries have been found for that query'\n )\n return await ctx.send(embed=embed)\n\n session = PaginatorSession(ctx, *embeds)\n await session.run()\n\n @commands.command()\n @checks.thread_only()\n async def reply(self, ctx, *, msg=''):\n \"\"\"Reply to users using this command.\n\n Supports attachments and images as well as\n automatically embedding image URLs.\n \"\"\"\n ctx.message.content = msg\n async with ctx.typing():\n await ctx.thread.reply(ctx.message)\n\n @commands.command()\n @checks.thread_only()\n async def anonreply(self, ctx, *, msg=''):\n \"\"\"Reply to a thread anonymously.\n\n You can edit the anonymous user's name,\n avatar and tag using the config command.\n\n Edit the `anon_username`, `anon_avatar_url`\n and `anon_tag` config variables to do so.\n \"\"\"\n ctx.message.content = msg\n async with ctx.typing():\n await ctx.thread.reply(ctx.message, anonymous=True)\n\n @commands.command()\n @checks.thread_only()\n async def note(self, ctx, *, msg=''):\n \"\"\"Take a note about the current thread, useful for noting context.\"\"\"\n ctx.message.content = msg\n async with ctx.typing():\n await ctx.thread.note(ctx.message)\n\n @commands.command()\n @checks.thread_only()\n async def edit(self, ctx, message_id: Optional[int] = None,\n *, new_message):\n \"\"\"Edit a message that was sent using the reply command.\n\n If no `message_id` is provided, the\n last message sent by a mod will be edited.\n\n `[message_id]` the id of the message that you want to edit.\n `new_message` is the new message that will be edited in.\n \"\"\"\n thread = ctx.thread\n\n linked_message_id = None\n\n async for msg in ctx.channel.history():\n if message_id is None and msg.embeds:\n embed = msg.embeds[0]\n if isinstance(self.bot.mod_color, discord.Color):\n mod_color = self.bot.mod_color.value\n else:\n mod_color = self.bot.mod_color\n if embed.color.value != mod_color or not embed.author.url:\n continue\n # TODO: use regex to find the linked message id\n linked_message_id = str(embed.author.url).split('/')[-1]\n break\n elif message_id and msg.id == message_id:\n url = msg.embeds[0].author.url\n linked_message_id = str(url).split('/')[-1]\n break\n\n if not linked_message_id:\n raise commands.UserInputError\n\n await asyncio.gather(\n thread.edit_message(linked_message_id, new_message),\n self.bot.api.edit_message(linked_message_id, new_message)\n )\n\n await ctx.message.add_reaction('✅')\n\n @commands.command()\n @trigger_typing\n @checks.has_permissions(manage_messages=True)\n async def contact(self, ctx,\n category: Optional[discord.CategoryChannel] = None, *,\n user: Union[discord.Member, discord.User]):\n \"\"\"Create a thread with a specified member.\n\n If the optional category argument is passed, the thread\n will be created in the specified category.\n \"\"\"\n\n if user.bot:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='Cannot start a thread with a bot.'\n )\n return await ctx.send(embed=embed)\n\n exists = await self.bot.threads.find(recipient=user)\n if exists:\n embed = discord.Embed(\n color=discord.Color.red(),\n description='A thread for this user already '\n f'exists in {exists.channel.mention}.'\n )\n\n else:\n thread = self.bot.threads.create(user, creator=ctx.author,\n category=category)\n await thread.wait_until_ready()\n embed = discord.Embed(\n title='Created thread',\n description=f'Thread started in {thread.channel.mention} '\n f'for {user.mention}.',\n color=self.bot.main_color\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n @trigger_typing\n @checks.has_permissions(kick_members=True)\n async def blocked(self, ctx):\n \"\"\"Returns a list of blocked users\"\"\"\n embed = discord.Embed(title='Blocked Users',\n color=self.bot.main_color,\n description='Here is a list of blocked users.')\n\n users = []\n not_reachable = []\n\n for id_, reason in self.bot.blocked_users.items():\n user = self.bot.get_user(int(id_))\n if user:\n users.append((user, reason))\n else:\n not_reachable.append((id_, reason))\n\n if users:\n val = '\\n'.join(u.mention + (f' - `{r}`' if r else '')\n for u, r in users)\n embed.add_field(name='Currently Known', value=val)\n if not_reachable:\n val = '\\n'.join(f'`{i}`' + (f' - `{r}`' if r else '')\n for i, r in not_reachable)\n embed.add_field(name='Unknown', value=val, inline=False)\n\n if not users and not not_reachable:\n embed.description = 'Currently there are no blocked users.'\n\n await ctx.send(embed=embed)\n\n @commands.command()\n @trigger_typing\n @checks.has_permissions(kick_members=True)\n async def block(self, ctx, user: Optional[User] = None, *,\n after: UserFriendlyTime = None):\n \"\"\"\n Block a user from using Modmail.\n\n Note: reasons that start with \"System Message: \" are reserved for internal\n use only.\n \"\"\"\n reason = ''\n\n if user is None:\n thread = ctx.thread\n if thread:\n user = thread.recipient\n else:\n raise commands.UserInputError\n\n if after is not None:\n reason = after.arg\n if reason.startswith('System Message: '):\n raise commands.UserInputError\n elif re.search(r'%(.+?)%$', reason) is not None:\n raise commands.UserInputError\n elif after.dt > after.now:\n reason = f'{reason} %{after.dt.isoformat()}%'\n\n if not reason:\n reason = None\n\n mention = user.mention if hasattr(user, 'mention') else f'`{user.id}`'\n\n extend = f' for `{reason}`' if reason is not None else ''\n msg = self.bot.blocked_users.get(str(user.id))\n if msg is None:\n msg = ''\n\n if str(user.id) not in self.bot.blocked_users or extend or msg.startswith('System Message: '):\n if str(user.id) in self.bot.blocked_users:\n\n old_reason = msg.strip().rstrip('.') or 'no reason'\n embed = discord.Embed(\n title='Success',\n description=f'{mention} was previously blocked for '\n f'\"{old_reason}\". {mention} is now blocked{extend}.',\n color=self.bot.main_color\n )\n else:\n embed = discord.Embed(\n title='Success',\n color=self.bot.main_color,\n description=f'{mention} is now blocked{extend}.'\n )\n self.bot.config.blocked[str(user.id)] = reason\n await self.bot.config.update()\n else:\n embed = discord.Embed(\n title='Error',\n color=discord.Color.red(),\n description=f'{mention} is already blocked.'\n )\n\n return await ctx.send(embed=embed)\n\n @commands.command()\n @trigger_typing\n @checks.has_permissions(kick_members=True)\n async def unblock(self, ctx, *, user: User = None):\n \"\"\"\n Unblocks a user from using Modmail.\n\n Note: reasons start with \"System Message: \" are reserved for internal\n use only.\n \"\"\"\n\n if user is None:\n thread = ctx.thread\n if thread:\n user = thread.recipient\n else:\n raise commands.UserInputError\n\n mention = user.mention if hasattr(user, 'mention') else f'`{user.id}`'\n\n if str(user.id) in self.bot.blocked_users:\n msg = self.bot.blocked_users.get(str(user.id))\n if msg is None:\n msg = ''\n del self.bot.config.blocked[str(user.id)]\n await self.bot.config.update()\n\n if msg.startswith('System Message: '):\n # If the user is blocked internally (for example: below minimum account age)\n # Show an extended message stating the original internal message\n reason = msg[16:].strip().rstrip('.') or 'no reason'\n embed = discord.Embed(\n title='Success',\n description=f'{mention} was previously blocked internally due to '\n f'\"{reason}\". {mention} is no longer blocked.',\n color=self.bot.main_color\n )\n else:\n embed = discord.Embed(\n title='Success',\n color=self.bot.main_color,\n description=f'{mention} is no longer blocked.'\n )\n else:\n embed = discord.Embed(\n title='Error',\n description=f'{mention} is not blocked.',\n color=discord.Color.red()\n )\n\n return await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Modmail(bot))\n","sub_path":"cogs/modmail.py","file_name":"modmail.py","file_ext":"py","file_size_in_byte":26910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"405730798","text":"\"\"\"\ncompute the number of inversions of the given array\nrunning time is O(nlogn)\nusing divide and conquer\n\"\"\"\n\n\ndef count_split_inv(array, left, right):\n count = 0\n i = j = 0\n length = len(left) + len(right)\n # sentinal variable\n left.append(float('inf'))\n right.append(float('inf'))\n\n for k in range(length):\n if left[i] < right[j]:\n array[k] = left[i]\n i += 1\n else:\n array[k] = right[j]\n # len(left) counts the additional sentinal 'inf'\n count += len(left) - i - 1\n j += 1\n return count\n\n\ndef count_inversion(array):\n \"\"\"divide and conquer to count the inversion in an array\"\"\"\n if len(array) == 1:\n return 0\n\n mid = len(array) // 2\n left = array[:mid]\n right = array[mid:]\n a = count_inversion(left)\n b = count_inversion(right)\n c = count_split_inv(array, left, right)\n return a + b + c\n\n\ndef read_input():\n with open('IntegerArray.txt', 'r') as f:\n while True:\n try:\n num = int(f.readline().strip('\\n'))\n yield num\n except ValueError:\n break\n\n\nif __name__ == '__main__':\n with open('IntegerArray.txt', 'r') as f:\n inputs = list()\n for line in f.readlines():\n inputs.append(int(line))\n\n print(len(inputs))\n res = count_inversion(inputs)\n print(res) # 2407905288\n","sub_path":"count_inversion.py","file_name":"count_inversion.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"403614310","text":"import os\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import scoped_session, sessionmaker\r\nfrom uti.confi import Var\r\nimport motor.motor_asyncio\r\nmongo_dbb = motor.motor_asyncio.AsyncIOMotorClient(Var.MONGO_DB)\r\ndbb = mongo_dbb[\"KING\"]\r\ndef start() -> scoped_session:\r\n engine = create_engine(Var.DB_URL)\r\n BASE.metadata.bind = engine\r\n BASE.metadata.create_all(engine)\r\n return scoped_session(sessionmaker(bind=engine, autoflush=False))\r\n\r\ndef start() -> scoped_session:\r\n dbi_url=Var.DB_URL\r\n engine = create_engine(dbi_url)\r\n BASE.metadata.bind = engine\r\n BASE.metadata.create_all(engine)\r\n return scoped_session(sessionmaker(bind=engine, autoflush=False))\r\n\r\ntry:\r\n BASE = declarative_base()\r\n SESSION = start()\r\nexcept AttributeError as e:\r\n print(\r\n \"DB_URI is not configured. Features depending on the database might have issues.\"\r\n )\r\n print(str(e))\r\n","sub_path":"sql_helper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"144153993","text":"import unittest\nimport os\nimport sys\nimport subprocess\nimport numpy\nfrom util.full import matrix, unit\nfrom . import two\nfrom two import vb\n\nclass TestBase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n root, ext = n, e = os.path.splitext(__file__)\n cls.base_dir = root + \".d\"\n \nclass TestH2O(TestBase):\n\n @classmethod\n def setUpClass(cls):\n super(TestH2O, cls).setUpClass()\n cls.subdir = \"H2O\"\n cls.dal = \"hf\"\n cls.mol = \"H2O_STO3G\"\n cls.filename = \"%s_%s.AOTWOINT\" % (cls.dal, cls.mol)\n cls.tmpdir = os.path.join(cls.base_dir, cls.subdir)\n cls.aotwoint = os.path.join(cls.tmpdir, cls.filename)\n if not os.path.exists(cls.aotwoint):\n os.chdir(cls.tmpdir)\n args = ['dalton', '-get', 'AOTWOINT', cls.dal, cls.mol]\n #subprocess.call(args)\n\n def setUp(self):\n numpy.random.seed(0)\n da = numpy.random.random((7, 2)).view(matrix)\n db = numpy.random.random((7, 2)).view(matrix)\n Da = numpy.random.random((7, 7)).view(matrix)\n Db = numpy.random.random((7, 7)).view(matrix)\n \n self.d = (da, db)\n self.dT = (da.T, db.T)\n self.D = (Da, Db)\n self.H1 = numpy.load(os.path.join(self.tmpdir, 'H1.npy'))\n self.H2 = numpy.load(os.path.join(self.tmpdir, 'H2.npy'))\n\n\n def test_vb_transform(self):\n H = vb.vb_transform(self.d, self.D, filename=self.aotwoint)\n numpy.testing.assert_almost_equal(H, self.H1)\n\n def test_vb_transform2(self):\n H = vb.vb_transform2(self.dT, self.d, self.D, self.D, filename=self.aotwoint)\n numpy.testing.assert_almost_equal(H, self.H2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n \n","sub_path":"tests/test_vb.py","file_name":"test_vb.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"639957660","text":"#队列是先进先出\nimport Node\nclass Queue:\n def __init__(self):\n self.first = Node.Node()\n self.N=0\n self.last = Node.Node()\n\n def isEmpty(self):\n #print(self.first)\n return self.N == 0\n\n def size(self):\n return self.N\n\n def enqueue(self,item):\n oldlast = self.last\n self.last = Node.Node()\n self.last.item = item\n if self.isEmpty():\n \n self.first = self.last\n else:\n #不知道有什么用\n oldlast.next = self.last\n self.N+=1\n\n def dequeue(self):\n item = self.first.item\n self.first = self.first.next\n if self.isEmpty():\n self.last.item = None\n self.N-=1\n return item\n \n \n","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"140790083","text":"\"\"\"\nDay 4 - Testing real life classification\n\"\"\"\n\nimport keras\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils as utils\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom random import shuffle\nimport numpy as np\n\nimport csv\n\n# Reproducibility\nseed = 7\nnp.random.seed(seed)\n\n# Loading data\ndataset = []\nx_train = []\ny_train = []\nx_test = []\ny_test = []\nremove_header = True\ni = 0\n\nwith open('train-test-logan.csv', newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in file_reader:\n if not(i == 0 and remove_header):\n dataset.append(row)\n i = i + 1\n\ndata = []\ntarget = []\nfor i in dataset:\n data.append(i[0: len(i) - 1])\n target.append(i[len(i) - 1])\n\n# Fix classes\nle = LabelEncoder()\ntarget = le.fit_transform(target)\n\n# Shuffle\nc = list(zip(data, target))\nshuffle(c)\ndata, target = zip(*c)\n\n# Split\nnumTotal = len(data)\npercent = round(numTotal * 0.8)\n\ndataTrain = data[0:percent]\ntargetTrain = target[0:percent]\ndataTest = data[percent+1:]\ntargetTest = target[percent+1:]\n\n# Design the MLP\nmodel = Sequential()\nmodel.add(Dense(64, activation='relu', input_dim=16))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(64, activation='sigmoid'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(64, activation='sigmoid'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\n# Train the MLP\nmodel.fit(np.asarray(dataTrain),\n np.asarray(targetTrain),\n epochs=100,\n verbose=1,\n batch_size=10)\n\n# Apply test on the model\nscore = model.evaluate(np.asarray(dataTest), np.asarray(targetTest))\nprint(score)\n\n","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"587959925","text":"# -*- coding: utf-8 -*-\n'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2016, Caleb Bell \n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .'''\n\nfrom __future__ import division\nfrom fluids import *\nfrom numpy.testing import assert_allclose\nimport pytest\n\n\ndef test_mixing():\n t1 = agitator_time_homogeneous(D=36*.0254, N=56/60., P=957., T=1.83, H=1.83, mu=0.018, rho=1020, homogeneity=.995)\n t2 = agitator_time_homogeneous(D=1, N=125/60., P=298., T=3, H=2.5, mu=.5, rho=980, homogeneity=.95)\n t3 = agitator_time_homogeneous(N=125/60., P=298., T=3, H=2.5, mu=.5, rho=980, homogeneity=.95)\n\n assert_allclose([t1, t2, t3], [15.143198226374668, 67.7575069865228, 51.70865552491966])\n\n Kp = Kp_helical_ribbon_Rieger(D=1.9, h=1.9, nb=2, pitch=1.9, width=.19, T=2)\n assert_allclose(Kp, 357.39749163259256)\n\n t = time_helical_ribbon_Grenville(357.4, 4/60.)\n assert_allclose(t, 650.980654028894)\n\n CoV = size_tee(Q1=11.7, Q2=2.74, D=0.762, D2=None, n=1, pipe_diameters=5)\n assert_allclose(CoV, 0.2940930233038544)\n\n CoV = COV_motionless_mixer(Ki=.33, Q1=11.7, Q2=2.74, pipe_diameters=4.74/.762)\n assert_allclose(CoV, 0.0020900028665727685)\n\n K = K_motionless_mixer(K=150, L=.762*5, D=.762, fd=.01)\n assert_allclose(K, 7.5)\n","sub_path":"tests/test_mixing.py","file_name":"test_mixing.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"468669700","text":"# _*_ coding: utf-8 _*_\r\n# 程式 13-10 (Python 3 Version)\r\nimport sys, os, glob\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\ntext_msg = 'Hello, world!'\r\nim = Image.open('sample_s.jpg')\r\nim_w, im_h = im.size\r\n\r\nfont = ImageFont.truetype('font/Arimo-Bold.ttf', 80)\r\ndw = ImageDraw.Draw(im)\r\nfn_w, fn_h = dw.textsize(text_msg, font=font)\r\nx = im_w/2-fn_w/2\r\ny = im_h/2-fn_h/2\r\ndw.text((x+5, y+5), text_msg, font=font, fill=(25,25,25))\r\ndw.text((x, y), text_msg, font=font, fill=(128,255,255))\r\nim.show()","sub_path":"books/Python程式設計實務_博碩/書中附檔/13-11.py","file_name":"13-11.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"507187950","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom cookielib import Cookie\nfrom util import hook, http\n\n\n@hook.regex(r'steamcommunity.com/(profiles)/([0-9]+)')\n@hook.regex(r'steamcommunity.com/(id)/([A-Za-z0-9]+)')\ndef link_steam_user(match):\n if match.group(1) == 'profiles':\n url = 'http://steamcommunity.com/profiles/%d' % match.group(2)\n elif match.group(1) == 'id':\n url = 'http://steamcommunity.com/id/%s' % match.group(2)\n else:\n return None\n\n try:\n doc = http.get_html(url)\n except http.HTTPError:\n return None\n\n persona_name_elements = doc.find_class('actual_persona_name')\n persona_level_elements = doc.find_class('persona_level')\n\n if len(persona_name_elements) > 0:\n name = persona_name_elements[0].text_content().strip()\n else:\n name = 'Unknown'\n\n if len(persona_level_elements) > 0:\n level = persona_level_elements[0].text_content().strip()\n else:\n level = 'Unknown Level'\n\n game_status_elements = doc.find_class('profile_in_game_header')\n game_name_elements = doc.find_class('profile_in_game_name')\n\n if len(game_status_elements) > 0:\n game_status = game_status_elements[0].text_content().strip()\n\n if len(game_name_elements) > 0:\n game_name = game_name_elements[0].text_content().strip()\n\n if game_status == 'Currently Offline':\n game_status = game_name\n\n if game_status == 'Currently In-Game':\n game_status = 'Playing %s' % game_name\n\n message = \"\\x02%s\\x02 - %s - %s\" % (name, game_status, level)\n\n bans = doc.find_class('profile_ban')\n\n if (len(bans) > 0):\n bans = bans[0].text_content().split('|')[0].strip()\n\n message += \" - \\x0304%s\" % bans\n\n return message\n\n\n@hook.regex(r'store.steampowered.com/app/([0-9]+)')\ndef link_steam_app(match):\n # Cookie(\n # version, name, value, port, port_specified, domain, domain_specified,\n # domain_initial_dot, path, path_specified, secure, expiry, comment, comment_url, rest)\n age_gate_cookie = Cookie(\n None, 'birthtime', '473403601', '80', '80', 'store.steampowered.com', 'store.steampowered.com',\n None, '/', '/', False, '2147483600', None, None, None, None\n )\n\n mature_content_cookie = Cookie(\n None, 'mature_content', '1', '80', '80', 'store.steampowered.com', 'store.steampowered.com',\n None, '/', '/', False, '2147483600', None, None, None, None\n )\n\n http.jar.set_cookie(age_gate_cookie)\n http.jar.set_cookie(mature_content_cookie)\n\n try:\n doc = http.get_html(\n 'http://store.steampowered.com/app/%d' % int(match.group(1)),\n cookies=True\n )\n except http.HTTPError:\n return None\n\n try:\n title = doc.find_class('apphub_AppName')[0].text_content().strip()\n except:\n return 'Nothing found'\n\n try:\n rating = doc.find_class('game_review_summary')[0].text_content().strip()\n except:\n rating = 'Unknown'\n\n price = 'Unknown'\n\n game_purchase_elements = doc.find_class('game_area_purchase_game_wrapper')\n\n if len(game_purchase_elements) > 0:\n discount_price_elements = game_purchase_elements[0].find_class('discount_final_price')\n game_purchase_price = game_purchase_elements[0].find_class('game_purchase_price')\n\n if len(discount_price_elements) > 0:\n original_price_elements = game_purchase_elements[0].find_class('discount_original_price')\n discount_percent_elements = game_purchase_elements[0].find_class('discount_pct')\n\n discount_price = discount_price_elements[0].text_content().strip()\n\n if len(original_price_elements) > 0:\n full_price = original_price_elements[0].text_content().strip()\n else:\n full_price = 'something'\n\n if len(discount_percent_elements) > 0:\n discount_percent = discount_percent_elements[0].text_content().strip()\n else:\n discount_percent = 'A'\n\n price = '\\x0307%s\\x0f (%s discount off %s)' % (discount_price, discount_percent, full_price)\n elif len(game_purchase_price) > 0:\n price = game_purchase_price[0].text_content().strip()\n\n # Limit to only 5 tags\n tags_elements = doc.find_class('app_tag')[0:5]\n\n tags = [tag.text_content().strip() for tag in tags_elements if not 'add_button' in tag.get('class')]\n\n if len(tags) > 0:\n tags = ' - %s' % (', '.join(tags))\n else:\n tags = ''\n\n return '\\x02%s\\x02 - %s - User rating is %s%s' % (title, price, rating, tags)\n\n\n@hook.command\ndef steam(inp):\n try:\n doc = http.get_html(\n 'http://store.steampowered.com/search',\n cookies=True,\n term=inp\n )\n except http.HTTPError as e:\n return None\n\n search_result_elements = doc.find_class('search_result_row')\n\n if len(search_result_elements) == 0:\n return 'app not found'\n\n app_url = search_result_elements[0].attrib['href'].strip()\n\n match = re.search(r'store.steampowered.com/app/([0-9]+)', app_url)\n\n if not match:\n return None\n\n return '%s - https://store.steampowered.com/app/%d' % (link_steam_app(match), int(match.group(1)))\n\n\n# old bad code\n# from util import hook, http\n\n\n# @hook.command(autohelp=False)\n# def steam(inp, say=None):\n# \".steam - list the current daily deal on Steam\"\n# url = 'http://store.steampowered.com/feeds/news.xml'\n# result = http.get_xml(url)\n\n# def checkoffer(offer):\n# x = 0\n# for i in offer:\n# if \"daily deal\" in i[0].text.lower():\n# try:\n# say(\"%s - %s\" % (i[0].text, i[1].text))\n# x = 1\n# break\n# except (UnicodeEncodeError, UnicodeDecodeError):\n# say(\"aaaa unicode problems again! :(\")\n# return x\n\n# if not checkoffer(result):\n# say(\"No current daily offer!\")\n\n# return\n","sub_path":"plugins/steam.py","file_name":"steam.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"396341203","text":"\"\"\"Support for Sense HAT sensors.\"\"\"\nfrom datetime import timedelta\nimport logging\nfrom pathlib import Path\n\nfrom sense_hat import SenseHat\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity\nfrom homeassistant.const import (\n CONF_DISPLAY_OPTIONS,\n CONF_NAME,\n DEVICE_CLASS_TEMPERATURE,\n PERCENTAGE,\n TEMP_CELSIUS,\n)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.util import Throttle\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_NAME = \"sensehat\"\nCONF_IS_HAT_ATTACHED = \"is_hat_attached\"\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)\n\nSENSOR_TYPES = {\n \"temperature\": [\"temperature\", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE],\n \"humidity\": [\"humidity\", PERCENTAGE, None],\n \"pressure\": [\"pressure\", \"mb\", None],\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_DISPLAY_OPTIONS, default=list(SENSOR_TYPES)): [\n vol.In(SENSOR_TYPES)\n ],\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_IS_HAT_ATTACHED, default=True): cv.boolean,\n }\n)\n\n\ndef get_cpu_temp():\n \"\"\"Get CPU temperature.\"\"\"\n t_cpu = Path(\"/sys/class/thermal/thermal_zone0/temp\").read_text().strip()\n return float(t_cpu) * 0.001\n\n\ndef get_average(temp_base):\n \"\"\"Use moving average to get better readings.\"\"\"\n if not hasattr(get_average, \"temp\"):\n get_average.temp = [temp_base, temp_base, temp_base]\n get_average.temp[2] = get_average.temp[1]\n get_average.temp[1] = get_average.temp[0]\n get_average.temp[0] = temp_base\n temp_avg = (get_average.temp[0] + get_average.temp[1] + get_average.temp[2]) / 3\n return temp_avg\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the Sense HAT sensor platform.\"\"\"\n data = SenseHatData(config.get(CONF_IS_HAT_ATTACHED))\n dev = []\n for variable in config[CONF_DISPLAY_OPTIONS]:\n dev.append(SenseHatSensor(data, variable))\n\n add_entities(dev, True)\n\n\nclass SenseHatSensor(SensorEntity):\n \"\"\"Representation of a Sense HAT sensor.\"\"\"\n\n def __init__(self, data, sensor_types):\n \"\"\"Initialize the sensor.\"\"\"\n self.data = data\n self._name = SENSOR_TYPES[sensor_types][0]\n self._unit_of_measurement = SENSOR_TYPES[sensor_types][1]\n self.type = sensor_types\n self._state = None\n self._attr_device_class = SENSOR_TYPES[sensor_types][2]\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return self._unit_of_measurement\n\n def update(self):\n \"\"\"Get the latest data and updates the states.\"\"\"\n self.data.update()\n if not self.data.humidity:\n _LOGGER.error(\"Don't receive data\")\n return\n\n if self.type == \"temperature\":\n self._state = self.data.temperature\n if self.type == \"humidity\":\n self._state = self.data.humidity\n if self.type == \"pressure\":\n self._state = self.data.pressure\n\n\nclass SenseHatData:\n \"\"\"Get the latest data and update.\"\"\"\n\n def __init__(self, is_hat_attached):\n \"\"\"Initialize the data object.\"\"\"\n self.temperature = None\n self.humidity = None\n self.pressure = None\n self.is_hat_attached = is_hat_attached\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n \"\"\"Get the latest data from Sense HAT.\"\"\"\n\n sense = SenseHat()\n temp_from_h = sense.get_temperature_from_humidity()\n temp_from_p = sense.get_temperature_from_pressure()\n t_total = (temp_from_h + temp_from_p) / 2\n\n if self.is_hat_attached:\n t_cpu = get_cpu_temp()\n t_correct = t_total - ((t_cpu - t_total) / 1.5)\n t_correct = get_average(t_correct)\n else:\n t_correct = get_average(t_total)\n\n self.temperature = t_correct\n self.humidity = sense.get_humidity()\n self.pressure = sense.get_pressure()\n","sub_path":"homeassistant/components/sensehat/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"630946307","text":"# _*_ coding: utf-8 _*_\n\"\"\"\n Created by Allen7D on 2018/12/4.\n\"\"\"\nfrom app.libs.swagger_filed import BodyField\nfrom app.config.setting import token_value\n\n__author__ = 'Allen7D'\n\nget_token = {\n \"parameters\": [\n {\n \"name\": \"body\",\n \"in\": \"body\",\n \"description\": '''登录的基本信息: 账号、密码、登录类型:\n - 邮箱账号登录(type:100)\n - 手机账号登录(type:101)\n - 小程序登录(type:200)\n - 微信扫码登录(type:201)''',\n \"schema\": {\n \"properties\": {\n \"account\": {\n \"type\": \"string\",\n \"description\": \"用户名(此处可以传邮箱,或者微信登录的code)\",\n \"enum\": [\"999@qq.com\", \"888@qq.com\", \"777@qq.com\"],\n \"default\": \"999@qq.com\"\n },\n \"secret\": {\n \"type\": \"string\",\n \"description\": \"密码\",\n \"enum\": [\"123456\"],\n \"default\": \"123456\"\n },\n \"type\": {\n \"type\": \"integer\",\n \"description\": \"登录方式(100: 邮箱登录; 200: 微信登录)\",\n \"enum\": [100, 200],\n \"default\": 100\n }\n }\n }\n }\n ]\n}\n\ntoken = BodyField('token', 'string', 'Token', [token_value])\naccount = BodyField('account', 'string', '用户名(此处可以传邮箱,或者微信登录的code)', [\"777@qq.com\"])\nsecret = BodyField('secret', 'string', '密码', [\"123456\"])\ntype = BodyField('type', 'integer', '登录方式', [100])\n","sub_path":"app/api_docs/v1/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"20337437","text":"# -------------------------------------------------------------------------------\n# Description: This file provides methods for removing plural copies from the\n# reuslt set. For instance, we do not want to suggest both \"book\" and \"books\".\n# We also want to avoid suggesting words which are the plural versions of words\n# found in the board. For instance, saying \"books\" and a clue when \"book\" is on\n# the board is clearly against the rules, so those words should be avoided.\n#\n# Author: Ryan Hood (ryanchristopherhood@gmail.com)\n# -------------------------------------------------------------------------------\n\nimport inflect\nimport helper_methods\n\n# This method removes plural copies from the result set.\ndef remove_plural_copies(result_set):\n # We use the inflect library to get plural forms.\n p = inflect.engine()\n\n # trouble_indices will hold the indices that need to be removed.\n trouble_indices = []\n # The first step is to figure out which entries need to be removed from the result set.\n for first_index in range(0, len(result_set)):\n # If index is in trouble indices then move along.\n if first_index in trouble_indices:\n continue\n\n # If not... start by getting the word.\n word = result_set[first_index][0]\n\n # Get the plural opposite. That is, if word is already plural, then the plural\n # opposite will be singular.\n plural_opposite = p.plural(word)\n\n # Figure out if and where the plural opposite shows up in result set.\n for second_index in range(0, len(result_set)):\n if plural_opposite == result_set[second_index][0]:\n # Then add it to trouble_indices.\n trouble_indices.append(second_index)\n\n # Now trouble_indices should contain the indices we need to remove from the result set.\n new_result_set = []\n for number in range(0, len(result_set)):\n if number not in trouble_indices:\n new_result_set.append(result_set[number])\n return new_result_set\n\n\n# This method removes the plural versions of board words from the reuslt set.\ndef remove_board_words(board, result_set):\n # As before, we use inflect to get plural forms.\n p = inflect.engine()\n\n # Get a list of all the board words.\n board_words = helper_methods.get_board_words(board)\n\n # The first step is to pluralize all of the board words.\n plural_board_words = []\n for word in board_words:\n plural_word = p.plural(word)\n plural_board_words.append(plural_word)\n\n # Now, we need to go through and make sure that none are in our result set.\n trouble_indices = []\n for plural_word in plural_board_words:\n for index in range(0, len(result_set)):\n if plural_word == result_set[index][0]:\n trouble_indices.append(index)\n\n # Second, we go through the regular board words.\n for word in board_words:\n for index in range(0, len(result_set)):\n if (word == result_set[index][0]) and (index not in trouble_indices):\n trouble_indices.append(index)\n\n # Now we have the bad indices and we can remove them from the results set.\n new_result_set = []\n for num in range(0, len(result_set)):\n if num not in trouble_indices:\n new_result_set.append(result_set[num])\n return new_result_set\n","sub_path":"plural_transformations.py","file_name":"plural_transformations.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"449557335","text":"import os\nimport shutil\nimport uuid\n\nfrom django.conf import settings\n\nPATH = os.path.split(__file__)[0]\nORIG_IMG_PATH = os.path.join(PATH, 'data')\n\n\nclass CropdusterTestCaseMediaMixin(object):\n\n def setUp(self):\n super(CropdusterTestCaseMediaMixin, self).setUp()\n\n random = uuid.uuid4().hex\n self.TEST_IMG_ROOT = os.path.join(settings.MEDIA_ROOT, random)\n self.TEST_IMG_DIR = os.path.join(self.TEST_IMG_ROOT, 'data')\n self.TEST_IMG_DIR_RELATIVE = os.path.join(random, 'data')\n\n # Create directory for test images\n shutil.copytree(ORIG_IMG_PATH, self.TEST_IMG_DIR)\n\n def tearDown(self):\n super(CropdusterTestCaseMediaMixin, self).tearDown()\n\n # Remove all generated images\n shutil.rmtree(self.TEST_IMG_ROOT, ignore_errors=True)\n\n def create_unique_image(self, image):\n image_uuid = uuid.uuid4().hex\n image_dir = os.path.join(self.TEST_IMG_DIR, image_uuid)\n\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n ext = os.path.splitext(image)[1]\n image_name = os.path.join(\n self.TEST_IMG_DIR_RELATIVE, image_uuid, \"original%s\" % ext)\n\n shutil.copyfile(\n os.path.join(self.TEST_IMG_DIR, image),\n os.path.join(settings.MEDIA_ROOT, image_name))\n return image_name\n","sub_path":"cropduster/tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"461210823","text":"from hearthstone.enums import BnetGameType\nfrom sqlalchemy import Date, Float, Integer, String\nfrom sqlalchemy.sql import bindparam, column, text\n\nfrom hsredshift.analytics.filters import GameType, TimeRange\nfrom hsredshift.analytics.library.base import BaseRedshiftQuery, QueryDisplayVisual\n\n\nclass CardIncludedPopularityReport(BaseRedshiftQuery):\n\tname = \"card_included_popularity_report\"\n\tenabled = True\n\trequired_parameters = []\n\tavailable_parameters = [\"TimeRange\", \"RankRange\", \"GameType\", \"Region\"]\n\tdisplay_visual = QueryDisplayVisual.TABLE\n\tstmt = text(\"\"\"\n\t\tWITH total_games AS (\n\t\t\tSELECT\n\t\t\t\tp.player_class,\n\t\t\t\tcount(*) AS total,\n\t\t\t\tcount(DISTINCT p.deck_id) AS num_distinct_decks\n\t\t\tFROM player p\n\t\t\tWHERE p.game_date BETWEEN :start_date AND :end_date\n\t\t\t\tAND p.game_type = :game_type\n\t\t\t\tAND p.region BETWEEN :min_region AND :max_region\n\t\t\t\tAND p.rank BETWEEN (CASE WHEN :game_type = 2 or :game_type = 30 THEN :min_rank ELSE -1 END) AND :max_rank\n\t\t\t\tAND p.full_deck_known\n\t\t\tGROUP BY p.player_class\n\t\t\tUNION ALL\n\t\t\tSELECT\n\t\t\t\t-1 AS player_class,\n\t\t\t\tcount(*) AS total,\n\t\t\t\tcount(DISTINCT p.deck_id) AS num_distinct_decks\n\t\t\tFROM player p\n\t\t\tWHERE p.game_date BETWEEN :start_date AND :end_date\n\t\t\t\tAND p.game_type = :game_type\n\t\t\t\tAND p.region BETWEEN :min_region AND :max_region\n\t\t\t\tAND p.rank BETWEEN (CASE WHEN :game_type = 2 or :game_type = 30 THEN :min_rank ELSE -1 END) AND :max_rank\n\t\t\t\tAND p.full_deck_known\n\t\t), player_include AS (\n\t\t\tSELECT\n\t\t\t\ti.dbf_id,\n\t\t\t\tp.player_class,\n\t\t\t\ti.count,\n\t\t\t\ti.deck_id,\n\t\t\t\tp.final_state\n\t\t\tFROM player p\n\t\t\t\tJOIN include i ON i.deck_id = p.deck_id AND p.game_date = i.game_date\n\t\t\tWHERE p.full_deck_known\n\t\t\t\tAND p.game_date BETWEEN :start_date AND :end_date\n\t\t\t\tAND i.game_date BETWEEN :start_date AND :end_date\n\t\t\t\tAND p.game_type = :game_type\n\t\t\t\tAND p.rank BETWEEN (CASE WHEN :game_type = 2 or :game_type = 30 THEN :min_rank ELSE -1 END) AND :max_rank\n\t\t\t\tAND p.region BETWEEN :min_region AND :max_region\n\t\t)\n\t\tSELECT\n\t\t\tpi.dbf_id,\n\t\t\tf_enum_name('CardClass', pi.player_class) AS player_class,\n\t\t\tavg(pi.count::decimal(5,2)) AS avg_count,\n\t\t\tcount(distinct pi.deck_id) AS num_distinct_decks,\n\t\t\tcount(*) AS times_included,\n\t\t\t(100.0 * sum(decode(pi.final_state, 4, 1, 0)) / count(*)::decimal) AS winrate,\n\t\t\t(100.0 * count(DISTINCT pi.deck_id) / max(tg.num_distinct_decks)::decimal) AS in_percent_of_decks,\n\t\t\t(100.0 * count(*) / max(tg.total)::decimal) AS popularity\n\t\tFROM player_include pi\n\t\t\tJOIN total_games tg ON tg.player_class = pi.player_class\n\t\tGROUP BY pi.dbf_id, pi.player_class\n\t\tUNION ALL\n\t\tSELECT\n\t\t\tpi.dbf_id,\n\t\t\t'ALL' AS player_class,\n\t\t\tavg(pi.count::decimal(5,2)) AS avg_count,\n\t\t\tcount(distinct pi.deck_id) AS num_distinct_decks,\n\t\t\tcount(*) AS times_included,\n\t\t\t(100.0 * sum(decode(pi.final_state, 4, 1, 0)) / count(*)::decimal) AS winrate,\n\t\t\t(100.0 * count(DISTINCT pi.deck_id) / max(tg.num_distinct_decks)::decimal) AS in_percent_of_decks,\n\t\t\t(100.0 * count(*) / max(tg.total)::decimal) AS popularity\n\t\tFROM player_include pi\n\t\t\tJOIN total_games tg ON tg.player_class = -1\n\t\tGROUP BY pi.dbf_id\n\t\tUNION ALL\n\t\tSELECT\n\t\t\t-1 AS dbf_id,\n\t\t\t'ALL' AS player_class,\n\t\t\tNULL::float AS avg_count,\n\t\t\ttg.total AS num_distinct_decks,\n\t\t\tNULL::integer AS times_included,\n\t\t\tNULL::float AS winrate,\n\t\t\tNULL::float AS in_percent_of_decks,\n\t\t\tNULL::float AS popularity\n\t\tFROM total_games tg\n\t\tWHERE player_class = -1\n\t\"\"\").bindparams(\n\t\tbindparam(\"start_date\", type_=Date),\n\t\tbindparam(\"end_date\", type_=Date),\n\t\tbindparam(\"game_type\", value=BnetGameType.BGT_RANKED_STANDARD.value, type_=Integer),\n\t\tbindparam(\"min_rank\", value=0, type_=Integer),\n\t\tbindparam(\"max_rank\", value=20, type_=Integer),\n\t\tbindparam(\"min_region\", value=1, type_=Integer),\n\t\tbindparam(\"max_region\", value=5, type_=Integer),\n\t).columns(\n\t\tcolumn(\"dbf_id\", Integer),\n\t\tcolumn(\"player_class\", String),\n\t\tcolumn(\"avg_count\", Float),\n\t\tcolumn(\"num_distinct_decks\", Integer),\n\t\tcolumn(\"times_included\", Integer),\n\t\tcolumn(\"winrate\", Float),\n\t\tcolumn(\"in_percent_of_decks\", Float),\n\t\tcolumn(\"popularity\", Float),\n\t)\n\n\tdef get_supported_filter_members(self, filter):\n\t\tresult = super(CardIncludedPopularityReport, self).get_supported_filter_members(filter)\n\t\tif filter == TimeRange:\n\t\t\treturn [\n\t\t\t\tTimeRange.LAST_1_DAY,\n\t\t\t\tTimeRange.LAST_3_DAYS,\n\t\t\t\tTimeRange.LAST_7_DAYS,\n\t\t\t\tTimeRange.LAST_14_DAYS,\n\t\t\t\tTimeRange.CURRENT_EXPANSION,\n\t\t\t\tTimeRange.CURRENT_PATCH,\n\t\t\t\tTimeRange.ARENA_EVENT,\n\t\t\t]\n\t\telse:\n\t\t\treturn result\n\n\tdef get_default_value_for_filter(self, filter):\n\t\tif filter == TimeRange:\n\t\t\treturn TimeRange.LAST_14_DAYS\n\t\telse:\n\t\t\treturn filter.get_default_member()\n\n\tdef filter_member_is_premium(self, filter, filter_member):\n\t\tif filter == GameType and filter_member == GameType.ARENA:\n\t\t\treturn False\n\t\telif filter == TimeRange and filter_member == TimeRange.ARENA_EVENT:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn super(CardIncludedPopularityReport, self).filter_member_is_premium(filter, filter_member)\n\n\tdef to_chart_series(self, params, result_set):\n\t\t# result will be a list of TableSeries objects\n\t\tresult = {\n\t\t\t\"metadata\": {\n\t\t\t},\n\t\t\t\"data\": {\n\t\t\t\t\"ALL\": [],\n\t\t\t\t\"DRUID\": [],\n\t\t\t\t\"HUNTER\": [],\n\t\t\t\t\"MAGE\": [],\n\t\t\t\t\"PALADIN\": [],\n\t\t\t\t\"PRIEST\": [],\n\t\t\t\t\"ROGUE\": [],\n\t\t\t\t\"SHAMAN\": [],\n\t\t\t\t\"WARLOCK\": [],\n\t\t\t\t\"WARRIOR\": [],\n\t\t\t}\n\t\t}\n\n\t\tfor row in result_set:\n\t\t\tif row[\"dbf_id\"] == -1:\n\t\t\t\tresult[\"metadata\"][\"total_played_decks_count\"] = row[\"num_distinct_decks\"]\n\t\t\t\tcontinue\n\t\t\tif row[\"player_class\"] in result[\"data\"]:\n\t\t\t\tresult[\"data\"][row[\"player_class\"]].append(dict(\n\t\t\t\t\tdbf_id=row[\"dbf_id\"],\n\t\t\t\t\tpopularity=row[\"popularity\"],\n\t\t\t\t\twinrate=row[\"winrate\"],\n\t\t\t\t\tcount=row[\"avg_count\"],\n\t\t\t\t\tdecks=row[\"num_distinct_decks\"]\n\t\t\t\t))\n\n\t\treturn result\n\n\tdef example_parameters(self):\n\t\treturn dict(\n\t\t\tTimeRange=\"LAST_14_DAYS\",\n\t\t\tRankRange=\"ALL\",\n\t\t\tGameType=\"RANKED_STANDARD\",\n\t\t)\n\n\nif __name__ == \"__main__\":\n\tfrom hsredshift.utils.runners import LocalQueryRunner\n\trunner = LocalQueryRunner(locals())\n\trunner.run()\n","sub_path":"hsredshift/analytics/library/card_included_popularity_report.py","file_name":"card_included_popularity_report.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"507598729","text":"__author__ = 'Girish'\nfrom tkinter import *\nfrom tkinter import ttk\n\nclass MainFrame:\n def __init__(self,master):\n self.frame = ttk.Frame(master)\n self.frame.pack()\n self.frame.config(width =300,height=250,relief=SOLID)\n self.button = ttk.Button(self.frame,text=\"Start\").pack()\n self.frame.config(padding=(100,75))\n\nroot = Tk(\"girish\")\nframe = MainFrame(root)\nroot.mainloop()\n","sub_path":"MainFrame.py","file_name":"MainFrame.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"562707481","text":"# -*- coding = utf-8 -*-\nclass Solution:\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return \"\"\n if len(strs) == 1:\n return strs[0]\n ans = []\n n = len(strs)\n minLen = min(len(s) for s in strs)\n for index in range(minLen):\n tmp = []\n for i in range(n):\n tmp.append(strs[i][index])\n if len(set(tmp)) == 1:\n ans.append(tmp[0])\n else:\n break\n result = \"\".join(ans)\n return result","sub_path":"字符串--最长公共前缀.py","file_name":"字符串--最长公共前缀.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"110527276","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport datetime\r\nfrom datetime import timedelta\r\nimport time\r\nimport random\r\nimport pprint\r\nimport mysql.connector\r\n\r\ndef formData(input='default', *subcase_info):\r\n# print(\"formData\")\r\n if subcase_info != ():\r\n subcase_info = subcase_info[0]\r\n case_no = subcase_info[0]\r\n defe= subcase_info[1]\r\n loc = subcase_info[2]\r\n crt = subcase_info[3].replace(\" \", \"+\")\r\n else :\r\n [case_no, defe, loc, crt] = ['', '', '', '']\r\n# print(subcase_info)\r\n# print(crt)\r\n# print([case_no, defe, loc, crt])\r\n form_data = {\r\n 'agree' : {\r\n 'ctl00$ctl00$siteMasterHolder$basicCategoryHolder$ddlLanguages' : 'en-us',\r\n 'ctl00$ctl00$siteMasterHolder$basicBodyHolder$btnAgree' : 'I+Agree'\r\n },\r\n 'search' : {\r\n 'ctl00$ctl00$siteMasterHolder$basicCategoryHolder$ddlLanguages' : 'en-us',\r\n 'ctl00$ctl00$siteMasterHolder$basicBodyHolder$caseNumber' : case_no,\r\n 'ctl00$ctl00$siteMasterHolder$basicBodyHolder$loc' : '',\r\n 'subcase' : 'Submit'\r\n },\r\n 'subcase' : {\r\n 'ctl00$ctl00$siteMasterHolder$basicCategoryHolder$ddlLanguages' : 'en-us',\r\n 'csn': case_no,\r\n 'loc': loc,\r\n 'def': defe,\r\n 'crt': crt\r\n }\r\n }\r\n return form_data[input]\r\n\r\ndef timeDelay(minimum, maximum):\r\n while True:\r\n a = random.gammavariate(3, .9)\r\n if a>=minimum and aAn Error Has Occured.
\\r\\n \\r\\n An unexpected error occured on our website. The website administrator has been notified.\\r\\n
\r\n return state, soup\r\n\r\ndef eachCaseLoop(state, soup, subcase_info):\r\n state = 'new form'\r\n while (state != 'full results' and state!= \"error\"):\r\n#will this always terminate at selection. even if only one case result?\r\n req = nextRequest(state, soup, subcase_info)\r\n timeDelay(.5, 1.5)\r\n print(state)\r\n response = sendNext(req)\r\n state, soup = parseResponse(response)\r\n return response, state, soup\r\n\r\n#need to clean up subcase table - some duplicates entered \r\n# also need to clean up compound code section entries\r\n### ideas\r\n# rank cities by municipal code violation\r\n# most common code violations\r\n# statutes most often invoked\r\n# proportions of pleas & dispositions, perhaps by location or code section\r\n# oddball code violations\r\n# jury trials\r\n# cases with most defendants\r\n# defendants with most charges\r\n# \r\n###\r\ndef caseLoop():\r\n soup, state = setup()\r\n print('scraping:')\r\n cnx = mysql.connector.connect(user='root', password='password', database='criminal_case_calendar')\r\n cnx2 = mysql.connector.connect(user='root', password='password', database='criminal_case_calendar')\r\n curA = cnx.cursor()\r\n subcase_qry = (\"SELECT DISTINCT csn, def, loc, crt FROM subcases where concat(loc, csn, '-', def) not in (select case_numb from case_info) and crt not like '%;%'\")\r\n curA.execute(subcase_qry)\r\n subc = curA.fetchone()\r\n while subc is not None:\r\n subcase_info = [subc[0], subc[1], subc[2], subc[3]]\r\n print(\"caseLoop\")\r\n print(subcase_info)\r\n response, state, soup = eachCaseLoop(state, soup, subcase_info)\r\n subcase_no = subcase_info[2]+subcase_info[0]+'-'+subcase_info[1]\r\n if state == 'full results':\r\n case_info = extractCaseInfo(subcase_no, soup)\r\n saveResult(cnx2, response, case_info)\r\n else:\r\n print('error')\r\n saveResult(cnx2, response, (subcase_no, '', '', '', '', '', ''))\r\n# print(state)\r\n subc = curA.fetchone()\r\n cnx.commit()\r\n cnx.close()\r\n cnx2.close()\r\n\r\ndef extractCaseInfo(subcase_no, soup):\r\n#from 'full result'\r\n if subcase_no == soup.find('div', id=\"caseNumb\").text:\r\n table = [soup.find('div', id=\"caseNumb\").text]\r\n results = soup.find('table', id=\"FillChargeInfo_tabCaseList\")\r\n if results is not None:\r\n results = results.find_all('td')\r\n for result in results:\r\n table.append(result.text)\r\n out = (table[0], table[1], table[2], table[3], table[4], table[5], table[6]) \r\n else:\r\n out = (table[0], '', '', '', '', '', '')\r\n else:\r\n out = (subcase_no, '', '', '', '', '', '')\r\n return out \r\n\r\ndef saveResult(cnx2, response, case_info):\r\n curB = cnx2.cursor()\r\n curC = cnx2.cursor()\r\n insert_result = (\r\n \"INSERT INTO html (case_numb, result_html)\"\r\n \"VALUES (%s, %s)\"\r\n )\r\n data = (case_info[0], response.text)\r\n curB.execute(insert_result, data)\r\n cnx2.commit()\r\n insert_case_info= (\r\n \"INSERT INTO case_info (case_numb, def, code_sect, statute, plea, disposition, date)\"\r\n \"VALUES (%s, %s, %s, %s, %s, %s, %s)\"\r\n )\r\n curC.execute(insert_case_info, case_info)\r\n cnx2.commit()\r\n\r\n\r\n\r\n","sub_path":"subcase_summary.py","file_name":"subcase_summary.py","file_ext":"py","file_size_in_byte":11127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"289874967","text":"from datetime import datetime as dt\nimport gspread\nimport imp\nimport pandas as pd\nimport pickle as pkl\n\ndpath = 'config/data/'\ntab3 = imp.load_source('online_campaigns', 'modules/tab3.py')\ntab5 = imp.load_source('offline_campaigns', 'modules/tab5.py')\ntab8 = imp.load_source('strat_segmentation', 'modules/tab8.py')\n\nclass Patcher():\n methods = {0: None,\n 1: \"get_group_increments\",\n 2: \"get_campaigns_online\",\n 3: \"get_campaigns_offline_monthly\",\n 4: \"get_campaigns_offline_weekly\",\n 5: None,\n 6: None,\n 7: \"get_strat_segmentation\"}\n\n def __init__(self, df, type, limit, gdf=None):\n \"\"\"\n Patcher for Google spreadsheets\n -> type: numerical index for spreadsheet\n -> data: dataframe object\n -> limit: max row or column to put new cells\n -> gdf: gspread dataframe (optional)\n \"\"\"\n self.df = df\n self.type = type\n self.limit = limit\n self.gdf = gdf if isinstance(gdf, pd.DataFrame) else None\n self.d = get_dict('data/cell_dicts.pkl')\n self.t = get_dict_value('triggers.pkl', type)\n self.patch = self.switch()\n\n def switch(self):\n \"\"\"Performs tabwise patch update\"\"\"\n return getattr(self, self.methods[self.type])()\n\n def get_group_increments(self):\n \"\"\"Incremental test/control weekly, tab 2 on dashboard\"\"\"\n patch = []\n for group_type, info in self.d[self.type].items():\n for name, row in info.items():\n x = self.t.get(name)\n patch.append(gspread.models.Cell(row, self.limit,\n str(self.df.loc[self.df.group == group_type, x].values[0])))\n return patch\n\n def get_campaigns_online(self):\n \"\"\"Online campaigns tab 3 (weekly) on dashboard\"\"\"\n attr = get_dict_value('attribution.pkl', self.type)\n\n def create_new_campaigns(gdf, new_campaigns, max_cols):\n \"\"\"User interface to update campaign-trigger dictionary\n -> gdf: gspread-dataframe\n -> campaigns: list of unknown campaigns\n -> max_cols: column for current period (row 3)\n \"\"\"\n # Slice google sheet array based on start indices for channels\n outlay = get_dict(dpath+'outlay3.pkl')\n main = gdf.iloc[:outlay['email']]\n email = gdf.iloc[outlay['email']:outlay['wp']]\n wp = gdf.iloc[outlay['wp']:outlay['seasonal']]\n seasonal = gdf.iloc[outlay['seasonal']:outlay['sms']]\n sms = tab3.filter_na(gdf.iloc[outlay['sms']:])\n del gdf\n self.gdf = tab3.check_new_groups(new_campaigns, main, email, wp,\n seasonal, sms, self.t, self.type)\n tab3.reindex_outlay(self.gdf, self.t)\n del main, email, wp, seasonal, sms\n\n actual_values = []\n for channel, values in self.t.items():\n for df_trigger in values.keys():\n actual_values.append(df_trigger)\n new_campaigns = list(self.df.name.loc[~self.df.name.isin(actual_values)])\n create_new_campaigns(self.gdf, new_campaigns, self.limit - 1)\n self.gdf = tab3.update_campaigns(self.df, self.gdf, self.limit - 1, self.t, attr)\n self.gdf = tab3.fill_main(self.gdf, self.limit - 1)\n self.format = tab3.build_format_patch(self.gdf, self.limit)\n self.gdf = self.gdf.fillna('')\\\n .replace('#DIV/0! (Function DIVIDE parameter 2 cannot be zero.)', '')\n self.gdf.iloc[2:, 4:] = self.gdf.iloc[2:,4:]\\\n .astype(str).replace('\\.', ',', regex=True)\n\n def get_pct(cell):\n if str(cell) != '': return cell + '%'\n else: return cell\n\n values = ['OR', 'CR', 'CTR', \"UR\", 'Заказов от трафика',\n 'Конверсия в зака��ы', 'Отношение выручки к предыдущей неделе',\n 'Процент отработанных корзин']\n idx = self.gdf.loc[self.gdf.iloc[:,0].isin(values)].index\n self.gdf.iloc[idx, 4:] = self.gdf.iloc[idx, 4:].applymap(get_pct)\n return self.gdf\n\n def get_campaigns_offline_weekly(self):\n \"\"\"Offline campaigns tab 5 (weekly) on dashboard\"\"\"\n attr = get_dict_value('attribution.pkl', self.type)\n self.df = tab5.format_data(self.df)\n patch = []\n rows = [x for x in range(len(self.df.name.unique()))]\n for ind, name in zip(rows, self.df.name.unique()):\n for key, column in zip(attr.keys(), self.d[self.type].values()):\n val = str(self.df.loc[self.df.name == name, key].values[0])\n patch.append(gspread.models.Cell(self.limit+ind, column, val))\n return patch\n\n def get_strat_segmentation(self):\n \"\"\"Strategic segmentation weekly, tab 8 on dashboard\"\"\"\n res = tab8.build_segmentation(self.df)\n patch = []\n for (name, row), val in zip(self.d[self.type].items(), res):\n patch.append(gspread.models.Cell(row, self.limit, str(val)))\n return patch\n\n\ndef put_dict(filename, obj):\n with open(filename, 'wb') as f:\n pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)\n\n\ndef get_dict(filename):\n with open(filename, 'rb') as f:\n return pkl.load(f)\n\n\ndef update_dictionary(filename, key, value):\n tmp = get_dict(filename)\n tmp[key] = value\n put_dict(filename, tmp)\n\n\ndef get_dict_value(filename, key):\n return get_dict(dpath + filename).get(key)\n","sub_path":"modules/patcher.py","file_name":"patcher.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"587081789","text":"import numpy as np\nimport six\nfrom ..expressions import deconstruct_jaggedness, reconstruct_jaggedness\n\n\n__all__ = [\"get_pandas_reduction\"]\n\n\nclass BadReductionConfig(Exception):\n pass\n\n\nclass JaggedNth(object):\n def __init__(self, index, fill_missing, force_float=True):\n self.index = index\n self.fill_missing = fill_missing\n self.dtype = None\n if fill_missing is True or fill_missing is False:\n self.dtype = bool\n elif force_float or isinstance(fill_missing, float):\n self.dtype = float\n else:\n self.dtype = int\n\n def __call__(self, array):\n # The next two lines ought to be enough\n # result = array.pad(abs(self.index) + int(self.index >= 0))\n # result = result[..., self.index]\n\n # Flatten out the first K-1 dimensions:\n flat, counts = deconstruct_jaggedness(array, [])\n result = reconstruct_jaggedness(flat, counts[:1])\n\n # Now get the Nth item on the last dimension\n result = result.pad(abs(self.index) + int(self.index >= 0))\n result = result[..., self.index]\n\n # Now replay the remaining dimensions on this\n result = reconstruct_jaggedness(result, counts[1:])\n\n if self.dtype is not None:\n result = result.astype(self.dtype)\n result = result.fillna(self.fill_missing)\n return result\n\n\nclass JaggedMethod(object):\n def __init__(self, method):\n self.method_name = method\n\n def __call__(self, array):\n return getattr(array, self.method_name)()\n\n\nclass JaggedProperty(object):\n def __init__(self, prop_name):\n self.prop_name = prop_name\n\n def __call__(self, array):\n return getattr(array, self.prop_name)\n\n\n_jagged_methods = [\"sum\", \"prod\", \"any\", \"all\", \"count_nonzero\",\n \"max\", \"min\", \"argmin\", \"argmax\"]\n_jagged_properties = [\"counts\"]\n\n\ndef get_awkward_reduction(stage_name, reduction, fill_missing=np.nan):\n if isinstance(reduction, six.integer_types):\n return JaggedNth(int(reduction), fill_missing)\n\n if not isinstance(reduction, six.string_types):\n msg = \"{}: requested reduce method is not a string or an int\"\n raise BadReductionConfig(msg.format(stage_name))\n\n if reduction in _jagged_methods:\n return JaggedMethod(reduction)\n if reduction in _jagged_properties:\n return JaggedProperty(reduction)\n\n msg = \"{}: Unknown method to reduce: '{}'\"\n raise BadReductionConfig(msg.format(stage_name, reduction))\n\n\n_pandas_aggregates = [\"sum\", \"prod\", \"max\", \"min\", \"argmax\", \"argmin\"]\n_numpy_ops = [\"count_zero\"]\n\n\nclass PandasAggregate(object):\n def __init__(self, method):\n self.method = method\n\n def __call__(self, groups):\n return groups.agg(self.method)\n\n\nclass PandasNth(object):\n def __init__(self, index):\n self.index = index\n\n def __call__(self, groups):\n return groups.nth(self.index)\n\n\ndef get_pandas_reduction(stage_name, reduction):\n if not isinstance(reduction, (six.string_types, six.integer_types)):\n msg = \"{}: requested reduce method is not a string or an int\"\n raise BadReductionConfig(msg.format(stage_name))\n\n if reduction in _pandas_aggregates:\n return PandasAggregate(reduction)\n elif reduction in _numpy_ops:\n op = getattr(np, reduction)\n return PandasAggregate(op)\n else:\n try:\n index = int(reduction)\n except ValueError:\n pass\n else:\n return PandasNth(index)\n\n msg = \"{}: Unknown method to reduce: '{}'\"\n raise BadReductionConfig(msg.format(stage_name, reduction))\n","sub_path":"fast_carpenter/define/reductions.py","file_name":"reductions.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"262902416","text":"import os\nimport face_recognition\nimport cv2\n\nknown_face_encodes = []\nknown_face_names = []\n\ndef encode_face(filename=None):\n # 提取照片中最明显的人脸并进行特征提取, 如果输入照片为空则打开摄像机取照片\n if filename is not None:\n image = cv2.imread(filename)\n else:\n # Get a reference to webcam #0 (the default one)\n video_capture = cv2.VideoCapture(0)\n print(\"press the space or enter if ready!\")\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n # Display the resulting image\n cv2.imshow('Video', frame)\n k = cv2.waitKey(1)\n if k in [13, 32]: # enter or space\n break\n # else:\n # print(\"you pressed %d \" % k)\n # Convert the image from BGR color (which OpenCV uses) to RGB color\n # (which face_recognition uses)\n image = frame # [:, :, ::-1] # mac unnecessary\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)\n top, right, bottom, left = face_locations[0]\n face_img = image[top:bottom, left:right, :]\n face_encode = face_encodings[0]\n return face_encode, face_img\n\ndef list_file(filedir, sufix=None):\n # 根据输入的后缀来列举文件夹中的文件\n file_list = os.listdir(filedir)\n file_list = [filedir + f for f in file_list] # add path for files\n if sufix is not None:\n ret_list = [f for f in file_list if f.endswith(sufix)]\n return ret_list\n else:\n return file_list\n\n\ndef load_faces_img(dirname):\n # 从人脸图像加载人脸数据\n for img_file in list_file(dirname, '.jpg'):\n known_face_names.append(img_file.split(\"/\")[-1].split(\".\")[0])\n known_face_encode = encode_face(img_file)\n known_face_encodes.append(known_face_encode)\n print(known_face_encodes, known_face_names)\n\nif __name__ == \"__main__\":\n load_faces_img(\"F:/MyPaper/upload/\")\n","sub_path":"app/find_exist_img.py","file_name":"find_exist_img.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"36028141","text":"import argparse\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom model import *\nfrom data_loader import *\nfrom utils.util import ensure_dir, to_variable, save_imgs\nimport os\nimport subprocess\nimport re\nimport cv2\n\n\ndef main(args):\n checkpoint_path = os.path.join(\"saved/\", args.name, args.checkpoint)\n checkpoint = torch.load(checkpoint_path)\n config = checkpoint['config']\n\n model = eval(config['arch'])(config)\n\n model.load_state_dict(checkpoint['state_dict'])\n with_cuda = not args.no_cuda\n if with_cuda:\n model.cuda()\n model.eval()\n model.summary()\n\n gen_images = []\n torch.manual_seed(3150505454) # DCGAN seed\n # torch.manual_seed(1234) # WGAN-GP seed\n for i in range(25):\n input_noise = torch.randn(1, config['model']['noise_dim'], 1, 1)\n input_noise = to_variable(with_cuda, input_noise)\n gen_image = model.generator(input_noise).cpu().data.numpy()[0]\n gen_image = np.transpose(gen_image, (1, 2, 0))\n gen_image = (gen_image+1)/2\n gen_image = cv2.cvtColor(gen_image, cv2.COLOR_BGR2RGB)\n gen_images.append(gen_image)\n\n save_imgs(np.array(gen_images), args.output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='HW3-1 Testing')\n parser.add_argument('--name', required=True, type=str,\n help='Specify the name of folder')\n parser.add_argument('--checkpoint', required=True, type=str,\n help='model checkpoint file name')\n parser.add_argument('--output', required=True, type=str,\n help='output filename')\n parser.add_argument('--no-cuda', action=\"store_true\",\n help='use CPU instead of GPU')\n\n args = parser.parse_args()\n main(args)\n","sub_path":"hw3/test_gan.py","file_name":"test_gan.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"595815284","text":"ACTION_DO_NOTHING = 'donothing'\nACTION_SELECT_SCV = 'selectscv'\nACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'\nACTION_BUILD_BARRACKS = 'buildbarracks'\nACTION_SELECT_BARRACKS = 'selectbarracks'\nACTION_BUILD_MARINE = 'buildmarine'\nACTION_SELECT_ARMY = 'selectarmy'\nACTION_ATTACK = 'attack'\n\nai_actions = [\n ACTION_DO_NOTHING,\n ACTION_SELECT_SCV,\n ACTION_BUILD_SUPPLY_DEPOT,\n ACTION_BUILD_BARRACKS,\n ACTION_SELECT_BARRACKS,\n ACTION_BUILD_MARINE,\n ACTION_SELECT_ARMY,\n ACTION_ATTACK,\n]","sub_path":"learningmodules/possibleactions.py","file_name":"possibleactions.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"262916532","text":"import vtk\nimport itk\nimport argparse\nimport glob\nimport os\nimport shutil\n\nimport numpy as np\n\n\ndef Write(vtkdata, output_name):\n\toutfilename = output_name\n\tprint(\"Writting:\", outfilename)\n\tpolydatawriter = vtk.vtkPolyDataWriter()\n\tpolydatawriter.SetFileName(outfilename)\n\tpolydatawriter.SetInputData(vtkdata)\n\tpolydatawriter.Write()\n\n\ndef main(args):\n\timg_fn_array = []\n\n\tif args.image:\n\t\timg_obj = {}\n\t\timg_obj[\"img\"] = args.image\n\t\timg_obj[\"out\"] = args.out\n\t\timg_fn_array.append(img_obj)\n\n\telif args.dir:\n\t\tnormpath = os.path.normpath(\"/\".join([args.dir, '**', '*']))\n\t\tfor img_fn in glob.iglob(normpath, recursive=True):\n\t\t\tif os.path.isfile(img_fn) and True in [ext in img_fn for ext in [\".nrrd\"]]:\n\t\t\t\timg_obj = {}\n\t\t\t\timg_obj[\"img\"] = img_fn\n\t\t\t\timg_obj[\"out\"] = os.path.normpath(\"/\".join([args.out]))\n\t\t\t\timg_fn_array.append(img_obj)\n\n\t\n\tfor img_obj in img_fn_array:\n\t\timage = img_obj[\"img\"]\n\t\tout = img_obj[\"out\"]\n\t\tprint(\"Reading:\", image)\n\n\t\tsurf = vtk.vtkNrrdReader()\n\t\tsurf.SetFileName(image)\n\t\tsurf.Update()\n\n\n\t\tdmc = vtk.vtkDiscreteMarchingCubes()\n\t\tdmc.SetInputConnection(surf.GetOutputPort())\n\t\tdmc.GenerateValues(100, 1, 100)\n\n\t\t# LAPLACIAN smooth\n\t\tSmoothPolyDataFilter = vtk.vtkSmoothPolyDataFilter()\n\t\tSmoothPolyDataFilter.SetInputConnection(dmc.GetOutputPort())\n\t\tSmoothPolyDataFilter.SetNumberOfIterations(10)\n\t\tSmoothPolyDataFilter.SetFeatureAngle(120.0)\n\t\tSmoothPolyDataFilter.SetRelaxationFactor(0.6)\n\t\tSmoothPolyDataFilter.Update()\n\t\t\n\t\t# SINC smooth\n\t\t# smoother = vtk.vtkWindowedSincPolyDataFilter()\n\t\t# smoother.SetInputConnection(dmc.GetOutputPort())\n\t\t# smoother.SetNumberOfIterations(30)\n\t\t# smoother.BoundarySmoothingOff()\n\t\t# smoother.FeatureEdgeSmoothingOff()\n\t\t# smoother.SetFeatureAngle(120.0)\n\t\t# smoother.SetPassBand(0.001)\n\t\t# smoother.NonManifoldSmoothingOn()\n\t\t# smoother.NormalizeCoordinatesOn()\n\t\t# smoother.Update()\n\n\t\toutputFilename = out+\"/\"+os.path.splitext(os.path.basename(image))[0]+\".vtk\"\n\t\tWrite(SmoothPolyDataFilter.GetOutput(), outputFilename)\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='create RootCanal object from a segmented file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\tin_group_features = parser.add_mutually_exclusive_group(required=True)\n\tin_group_features.add_argument('--image', type=str, help='input file')\n\tin_group_features.add_argument('--dir', type=str, help='input dir')\n\n\tparser.add_argument('--out', type=str, help='output dir', default='')\n\n\targs = parser.parse_args()\n\n\tmain(args)","sub_path":"src/py/nrrd2vtk.py","file_name":"nrrd2vtk.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"298761544","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/3/25 16:15\n# @Author : hyang\n# @Site :\n# @File : main.py\n# @Software: PyCharm\nimport os\nimport sys\nimport logging\n\n\n\"\"\"\n日志文件设置\n\"\"\"\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# 数据库日志\nDATABASE = {\n 'engine': 'file_storage', #support mysql,postgresql in the future\n 'name': 'account',\n 'path': \"%s/db\" % BASE_DIR\n}\n\n# 日志等级\nLOG_LEVEL = logging.INFO\nLOG_TYPES = {\n 'transaction': 'transactions.log',\n 'access': 'access.log',\n}\n\n# 交易类型\nTRANSACTION_TYPE = {\n 'repay':{'action':'plus', 'interest':0}, # 还款\n 'withdraw':{'action':'minus', 'interest':0.05}, # 取款\n 'transfer':{'action':'minus', 'interest':0.05}, # 转账\n 'consume':{'action':'minus', 'interest':0}, # 消费\n\n}\n\n","sub_path":"学员作业/ATM/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"85782020","text":"#! /usr/bin/python\n\n#\n# Description:\n# ================================================================\n# Time-stamp: \"2021-06-10 20:13:51 trottar\"\n# ================================================================\n#\n# Author: Richard L. Trotta III \n#\n# Copyright (c) trottar\n#\n\nimport numpy as np\n\ndef Lumi(cross_sec,binevt,xbinwidth,qbinwidth,tbinwidth,xLbinwidth):\n\n # Luminosity\n\n # all binned events\n sig_all = cross_sec*1e5 # the 1e5 corrects the scaling up top\n evts_all = binevt\n\n lumi = [(e)/((s)*(qbinwidth)*(xbinwidth)*(tbinwidth)*(xLbinwidth)) for e,s in zip(evts_all,sig_all)]\n #print(\"---------------------------------\\n\")\n # print(\"\\nLuminosity: \", lumi)\n\n nevt = [100/(l*1e-6) for l in lumi] # The 1e-6 converts properly, integrated luminosiy: 100 fb^-1\n nevt = np.asarray(nevt)\n #print(\"\\nEvents expected running at 100 $fb^{-1}$: \", nevt)\n\n return nevt","sub_path":"mesonMC/src/process/luminosity.py","file_name":"luminosity.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"154683128","text":"#while\ni =1\nwhile i < 10:\n print(i)\n i +=1\n \n#print in one line\ni =1\nwhile i < 10:\n print(i,end=',')\n i +=1\n\n#print in rev\ni =10\nwhile i>0:\n print(i)\n i -=1\n\n#print all odd numbers betwee 1 to 30\ni =1\nwhile i<30:\n print(i)\n i +=2\n\n#wap to get sum of all even and odd numbers between 1 to 100\ni =1\nse =0\nso =0\nwhile i<=100:\n if i % 2==0:\n se +=i\n else:\n so +=i\n\n i+=1\n\nprint('sum of all even :',se)\nprint('sum of all odd :',so)\n\n#wap to print table of given no\nn = int(input('enter num :'))\ni =1\nwhile i<11:\n print(n,'*',i,'=',(n*i))\n i +=1\n\n\n \n\n\n\n\n\n \n\n\n\n \n\n\n\n\n \n\n\n\n\n\n\n","sub_path":"2/WhileLoopExample.py","file_name":"WhileLoopExample.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"598627893","text":"from django.forms import *\nfrom .models import Supplier\nfrom company.models import Benefit\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, HTML, Field\n\n\nclass ProfileEditForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(ProfileEditForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.form_action = 'submit_survey'\n self.helper.add_input(Submit('submit', 'Kaydet'))\n self.helper.layout = Layout(\n Field('name'),\n Field('email'),\n Field('image'),\n HTML(\n \"\"\"{% if form.image.value %} {% endif %}\"\"\"),\n )\n\n class Meta:\n model = Supplier\n fields = ('name', 'email', 'image')\n\n\nclass ServiceUseForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(ServiceUseForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Kaydet'))\n\n class Meta:\n model = Benefit\n fields = ('service','usage')\n\n def save(self, **kwargs):\n benefit = super(ServiceUseForm, self).save(commit=False)\n employe_id = kwargs.get('employe_id')\n service = kwargs.get(\"service\")\n employe = kwargs.get('employe')\n usage = kwargs.get('usage')\n if Benefit.objects.filter(employe_id=employe_id,service_id=service.id).exists():\n used_benefit=Benefit.objects.get(employe_id=employe_id, service_id=service.id)\n used_benefit.usage+=usage\n used_benefit.save()\n employe.credit-=(service.credit*usage)\n employe.save()\n else:\n benefit.employe_id = kwargs.get('employe_id')\n benefit.save()\n service = kwargs.get('service')\n employe.credit-=(service.credit*usage)\n employe.save()","sub_path":"supplier/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"297807180","text":"import logging\n\nimport pytest\nimport re\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef server_roles_categories(cfme_data):\n \"\"\" Provides the ``server_roles`` section from cfme_data\n \"\"\"\n return cfme_data.get(\"server_roles\", {})\n\n\n@pytest.fixture\ndef default_roles_list(server_roles_categories):\n \"\"\" Provides the list of default roles enabled from cfme_data\n \"\"\"\n return server_roles_categories.get(\"default\", [])\n\n\n@pytest.fixture\ndef server_roles(fixtureconf, cfme_data, default_roles_list, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n\n Usage examples:\n\n If you want to specify certain roles that have to be set,\n you can use this type of decoration:\n\n @pytest.mark.fixtureconf(server_roles=\"+automate\")\n def test_appliance_roles(server_roles, default_roles_list):\n assert len(server_roles) == len(default_roles_list) + 1\n\n This takes the default list from cfme_data.yaml and modifies\n it by the server_roles keyword. If prefixed with + or nothing, it adds,\n if prefixed with -, it removes the role. It can be combined either\n in string and in list, so these lines are functionally equivalent:\n\n \"+automate -foo bar\" # (add automate and bar, remove foo)\n [\"+automate\", \"-foo\", \"bar\"]\n\n If you specify the keyword ``clear_default_roles=True``, then all roles\n are flushed and the list contains only user_interface role.\n\n Roles can be pulled from the cfme_data fixture using yaml selectors,\n which will do a 'set' with the list of roles found at the target path:\n\n @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2'))\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 3\n\n Which corresponds to this yaml layout:\n\n level1:\n sublevel2:\n - database_operations\n - user_interface\n - web_services\n\n To ensure the appliance has the default roles:\n\n @pytest.mark.fixtureconf(server_roles=None)\n def test_appliance_roles(server_roles):\n do(test)\n\n This works because if a ``None`` parameter for server_roles is passed,\n default roles are used and no modification will be done.\n\n List of server role names currently exposed in the CFME interface:\n\n - automate\n - ems_metrics_coordinator\n - ems_metrics_collector\n - ems_metrics_processor\n - database_operations\n - database_synchronization\n - event\n - ems_inventory\n - ems_operations\n - notifier\n - reporting\n - scheduler\n - smartproxy\n - smartstate\n - user_interface\n - web_services\n\n \"\"\"\n\n if 'server_roles' in fixtureconf:\n roles_list = default_roles_list[:]\n if \"clear_default_roles\" in fixtureconf:\n if fixtureconf['clear_default_roles']:\n roles_list = [\"user_interface\"] # This must be\n # Modify it according to the server_roles\n server_roles_list = fixtureconf['server_roles']\n # Break the string down to the list\n if isinstance(server_roles_list, str):\n server_roles_list = [item.strip()\n for item\n in re.split(r\"\\s+\", server_roles_list.strip())\n if len(item) > 0] # Eliminate multiple spaces\n if server_roles_list is not None:\n # Process the prefixes to determine whether add or remove\n # Resulting format [(remove?, \"role\"), ...]\n server_roles_list = [(item[0] == \"-\", # 1) Bool whether remove?\n item[1:] # 2) Removing the prefix +,-\n if item.startswith((\"+\", \"-\")) # 2) If present\n else item) # 2) Else not\n for item\n in server_roles_list\n if len(item) > 0] # Ensure it is not empty\n for remove, role in server_roles_list:\n if remove and role in roles_list:\n roles_list.remove(role)\n elif not remove and role not in roles_list:\n roles_list.append(role)\n else:\n role_message = (\"+\", \"-\")[remove] + role # False = 0, True = 1\n logger.info(\"FIXTURE[server_roles]: No change with role setting %s\" %\n role_message)\n elif 'server_roles_cfmedata' in fixtureconf:\n roles_list = cfme_data\n # Drills down into cfme_data YAML by selector, expecting a list\n # of roles at the end. A KeyError here probably means the YAMe\n # selector is wrong\n for selector in fixtureconf['server_roles_cfmedata']:\n roles_list = roles_list[selector]\n else:\n raise Exception('server_roles config not found on test callable')\n\n # Deselecting the user interface role is really un-fun, and is\n # counterproductive in the middle of user interface testing.\n if 'user_interface' not in roles_list:\n raise Exception('Refusing to remove the user_interface role')\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n # Workaround to rudely bypass a popup that sometimes appears for\n # unknown reasons.\n # See also: https://github.com/RedHatQE/cfme_tests/issues/168\n from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n\n # Check whether we specified correct roles\n # Copy it to prevent excessive selenium querying\n # and we need also only the names\n possible_roles = [item.name for item in sst.server_roles]\n for role in roles_list:\n if role not in possible_roles:\n raise Exception(\"Role '%s' does not exist!\" % role)\n\n # Set the roles!\n if sorted(sst.selected_server_role_names) != sorted(roles_list):\n sst.set_server_roles(roles_list)\n sst.save()\n sst._wait_for_results_refresh()\n else:\n logger.info('FIXTURE[server_roles]: Server roles already match configured fixture roles,\" +\\\n \" not changing server roles')\n\n # If this assert fails, check roles names for typos or other minor differences\n assert sorted(sst.selected_server_role_names) == sorted(roles_list)\n\n return sst.selected_server_role_names\n","sub_path":"fixtures/server_roles.py","file_name":"server_roles.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"73442814","text":"\n\n#calss header\nclass _GREET():\n\tdef __init__(self,): \n\t\tself.name = \"GREET\"\n\t\tself.definitions = [u'to welcome someone with particular words or a particular action, or to react to something in the stated way: ', u'If you are greeted by a sight, sound, or smell, you notice it immediately when you arrive somewhere: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_greet.py","file_name":"_greet.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"162740012","text":"import unittest\nfrom utils import constructor_str\n\nclass InitClass:\n def __init__(self, a, b):\n self.a = a \n self.b = b\n\nclass NewClass:\n def __new__(cls, a, b, c):\n self = super().__new__(cls)\n self.a = a \n self.b = b\n self.c = c\n return self\n\nclass ArgsClass:\n def __init__(self, *args):\n self.a = args[0]\n self.b = args[1]\n\nclass TestConstructorStr(unittest.TestCase):\n def test_constructor_str(self):\n value = InitClass(('1', None, True, 3.4), '2')\n desired = \"InitClass(('1', None, True, 3.4), '2')\"\n self.assertEqual(constructor_str(value), desired)\n\n value = NewClass(1, InitClass(1, '2'), [1, 2, '3'])\n desired = \"NewClass(1, InitClass(1, '2'), [1, 2, '3'])\"\n self.assertEqual(constructor_str(value), desired)\n\n value = NewClass(1, [InitClass(1, '2'), 1], (InitClass('1', False), True))\n desired = \"NewClass(1, [InitClass(1, '2'), 1], (InitClass('1', False), True))\"\n self.assertEqual(constructor_str(value), desired)\n\n with self.assertRaises(ValueError):\n constructor_str(ArgsClass(1, 2))","sub_path":"tests/test_constructor_str.py","file_name":"test_constructor_str.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"587466435","text":"# -*- coding: utf-8 -*-\n\n\nimport time\nimport logging\nimport requests\n\n\n__all__ = [\"debug_request\", \"Client\"]\n\n\ndef debug_request(name, resp, start):\n \"\"\"\n 打印请求调试信息\n 并且生成curl格式方便测试\n\n :param name: 导入包名\n :param resp: :class:`~requests.Response` 对象\n :param start: 请求开始时间,方便统计请求耗时\n \"\"\"\n logger = logging.getLogger(name)\n\n command = \"curl -X {method} -H {headers} -d '{data}' '{uri}'\"\n req = resp.request\n method = req.method\n uri = req.url\n data = req.body\n headers = ['\"{0}: {1}\"'.format(k, v) for k, v in req.headers.items()]\n headers = \" -H \".join(headers)\n curl_info = command.format(method=method, headers=headers, data=data or '', uri=uri)\n spent = (time.time() - start) * 1000\n log = logger.warning\n content = resp.content\n if resp.status_code >= 200 and resp.status_code < 300:\n log = logger.info\n content = \"\"\n log(\"request [{0}] spent [{1:0.3f}]ms status [{2} - {3}] content [[{4}]]\".format(curl_info, spent, resp.status_code, resp.reason, content))\n\n\nclass _Client(object):\n session = requests.Session()\n\n @classmethod\n def do(self, method, url, params=None, data=None, headers=None, timeout=20,\n cert=None, verify=True, files=None, *args, **kwargs):\n start = time.time()\n req = requests.Request(method, url, params=params, data=data, files=files, headers=headers)\n prepped = req.prepare()\n resp = self.session.send(prepped, verify=verify, cert=cert, timeout=timeout)\n debug_request(__name__, resp, start)\n return resp\n\n\nclass Client(_Client):\n \"\"\"\n HTTP请求客户端\n 可以通过连缀形式生成http请求地址,并且请求\n\n 基本的使用形式\n\n ::\n\n client = Client(\"http://example.com\")\n resp = client.v1.users.get(params={\"user_id\": 1})\n print(resp.status_code)\n print(resp.content)\n\n 连缀的时候如果有特殊符号,可以使用括号代替.\n\n ::\n\n resp = client.v1(\"to path\").delete()\n\n\n :param path: 默认传入host\n \"\"\"\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Client(\"{}/{}\".format(self._path, path))\n\n def __str__(self):\n return self._path\n\n __call__ = __getattr__\n __repr__ = __str__\n\n def get(self, *args, **kwargs):\n \"\"\"\n GET请求\n \"\"\"\n return self.do(\"GET\", self._path, *args, **kwargs)\n\n def post(self, *args, **kwargs):\n \"\"\"\n POST请求\n \"\"\"\n return self.do(\"POST\", self._path, *args, **kwargs)\n\n def delete(self, *args, **kwargs):\n \"\"\"\n DELETE请求\n \"\"\"\n return self.do(\"DELETE\", self._path, *args, **kwargs)\n\n def put(self, *args, **kwargs):\n \"\"\"\n PUT请求\n \"\"\"\n return self.do(\"PUT\", self._path, *args, **kwargs)\n","sub_path":"pyweixin/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"151091740","text":"\"\"\"part 1\"\"\"\n# set up input file\ninput_path = './input3.txt'\nwith open(input_path) as f:\n paths = f.readlines()\n f.close()\n\npaths = [line.strip('\\n').split(',') for line in paths]\n\nclass Wire:\n def __init__(self):\n self.currX, self.currY = 0, 0\n self.coords = []\n\n def move(self, path):\n #print(path)\n dir = path[0]\n units = int(path[1:])\n\n if dir == 'U':\n self.coords = [(self.currX, self.currY + y) for y in range(1, units + 1)]\n self.currY += units\n elif dir == 'D':\n self.coords = [(self.currX, self.currY - y) for y in range(1, units + 1)]\n self.currY -= units\n elif dir == 'R':\n self.coords = [(self.currX + x, self.currY) for x in range(1, units + 1)]\n self.currX += units\n else:\n self.coords = [(self.currX - x, self.currY) for x in range(1, units + 1)]\n self.currX -= units\n return self.coords\n\ntemp = ['U10','R5', 'U3', 'L10']\ntemp2 = ['D5','R2','U20']\nwire1 = Wire()\nwire2 = Wire()\n#wire1.coords += [wire1.move(path) for path in temp if type(path) == str]\n#wire2.coords += [wire2.move(path) for path in temp2 if type(path) == str]\n\nwire1.coords += [wire1.move(path) for path in paths[0] if type(path) == str]\nwire2.coords += [wire2.move(path) for path in paths[1] if type(path) == str]\n\n# flatten lists\nwire1.coords = [item for sublist in wire1.coords for item in sublist]\nwire2.coords = [item for sublist in wire2.coords for item in sublist]\n# make sets\nwire1_set = set(wire1.coords)\nwire2_set = set(wire2.coords)\n# find intersections\ncommon = set(wire1_set & wire2_set)\nprint(common)\n\ndef distance(coord):\n return abs(coord[0]) + abs(coord[1])\n\ndistances = [distance(coord) for coord in common]\n#print(distances)\nprint(min(distances))\n\n\"\"\"part 2\"\"\"\n#print(wire1.coords)\n#print(wire2.coords)\nwire_dists = [wire1.coords.index(x) + wire2.coords.index(x) + 2 for x in common]\n#print(wire_dists)\nprint(min(wire_dists))\n","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"82380138","text":"#BACK PROPAGATION #supervised learning\nimport math,time\n\ntoleranceValue = 0.09\nlearningRate = 0.6\nsquaredError = []\n\ndataset = [[0.4,-0.7,0.1],\n [0.3,-0.5,0.05],\n [0.6,0.1,0.3],\n [0.2,0.4,0.25],\n [0.1,-0.2,0.12]]\n#print(\"\\nDataset\") \nfor data in dataset:\n #print(data)\n pass\n\n#the input is normalized(-1<=input<=1)\nV_weightVector = [[0.1,0.4],\n [-0.2,0.2]]\n \n\nTW_weightVector = [0.2, -0.5] \n\ndef batch(x1,x2,VweightVector, TWweightVector,targetOutput):\n #transpose of V weight vector\n TVweightVector = [ [VweightVector[0][0],VweightVector[1][0]],\n [VweightVector[0][1],VweightVector[1][1]] ] \n\n #input of input layer == output of output layer\n outputOfInputLayer = [x1, x2] \n #print(f\"\\nV- weight vector transpose \\n {TVweightVector}\")\n #print(f\"\\nW- weight vector transpose \\n {TWweightVector}\")\n #print(f\"\\nTARGET OUTPUT : {targetOutput}\")\n #input of hidden layer \n firstRow = round((((TVweightVector[0][0])*x1)+((TVweightVector[0][1])*x2)), 2)\n secondRow = round(((TVweightVector[1][0])*x1)+((TVweightVector[1][1])*x2),2)\n hiddenLayerInput = [firstRow,secondRow]\n #print(f\"\\nHidden layer input\\n{hiddenLayerInput}\")\n #output of hidden layer \n rowOne = round((1/(1+math.exp(-(firstRow)))), 4)\n rowTwo = round((1/(1+math.exp(-(secondRow)))), 4)\n hiddenLayerOutput = [rowOne, rowTwo]\n #print(f\"\\nHidden layer output\\n{hiddenLayerOutput}\")\n #input of output layer\n inputOfOutputLayer = round((TW_weightVector[0]*rowOne) + (TW_weightVector[1]*rowTwo), 5)\n #print(f\"\\nInput Of output layer\\n{inputOfOutputLayer}\\n\")\n #output of output layer\n outputOfOutputLayer = round(1/(1+math.exp(-(inputOfOutputLayer))), 4) \n #print(f\"\\nOutput Of output layer\\n{outputOfOutputLayer}\\n\")\n #print(\"*\"*30)\n #print(f\"\\nOutput Of System : {outputOfOutputLayer}\")\n #print(f\"\\nTarget output : {targetOutput}\")\n #print(\"*\"*30)\n #getting the e rror\n error = targetOutput - outputOfOutputLayer\n # print(f\"Error : {error}\")\n # time.sleep(2)\n\n errorSquared = round(pow((error),2), 5)\n #print(f\"(Error)^2: {errorSquared}\")\n squaredError.append(errorSquared)\n #new Wvector \n #change in Wvector = neX using Delta rule(Wildrow-Hoff): e=d\n d = error*outputOfOutputLayer*(1-outputOfOutputLayer)\n partOne = round((learningRate*d*rowOne),5)\n partTwo = round((learningRate*d*rowTwo),5)\n changeIn_Wvector = [partOne,partTwo] \n new_Wvector = [[TW_weightVector[0]+partOne],[TW_weightVector[1]+partTwo]]\n #print(f\"\\nNew Wvector\\n{new_Wvector}\")\n #new Vvector\n #calculating quantities e and d*\n eRowOne = TW_weightVector[0]*d\n eRowTwo = TW_weightVector[1]*d \n e = [eRowOne,eRowTwo]\n\n dStarRowOne = eRowOne*hiddenLayerOutput[0]*(1-hiddenLayerOutput[0])\n dStarRowTwo = eRowTwo*hiddenLayerOutput[1]*(1-hiddenLayerOutput[1])\n dStar = [dStarRowOne,dStarRowTwo]\n\n #calculating Xprime {O}i*Transpose(dStar)\n Xprime = [[x1*dStar[0],x1*dStar[1]],[x2*dStar[0],x2*dStar[1]]]\n\n #change in Vvector = nXprime\n changeIn_Vvector = [[learningRate*Xprime[0][0],learningRate*Xprime[0][1]],[learningRate*Xprime[1][0],learningRate*Xprime[1][1]]]\n new_Vvector = [[VweightVector[0][0]+changeIn_Vvector[0][0],VweightVector[0][1]+changeIn_Vvector[0][1]],[VweightVector[1][0]+changeIn_Vvector[1][0],VweightVector[1][1]+changeIn_Vvector[1][1]]]\n #print(f\"\\nNew Vvector\\n{new_Vvector}\")#confirm with slide 15 BP NN values(wrong values)\n newWeights =[new_Vvector, new_Wvector] \n return newWeights\n\n\nweights = [V_weightVector,TW_weightVector]\n#$for epoch in range(100):\nepoch = 1\nwhile(True):\n #print(\"*\"*30)\n #print(\"*\"*30)\n #print(f\"\\nEPOCH {epoch} \")\n #print(\"*\"*30)\n #print(\"*\"*30)\n for i in range(len(dataset)):\n #print(f\"\\nBatch {i+1}\")\n \n #print(f\"Weights before\\n\\n{weights}\\n\\n\")\n \n weights = batch(dataset[i][0],dataset[i][1],weights[0],weights[1], dataset[i][2])\n weights = list(weights) #converting tuple to list\n #print(f\"Weights after\\n\\n{weights}\\n\\n\")\n \n #print(f\"\\nNew Vvector\\n: {weights[0]}\")\n #print(f\"\\nNew Wvector\\n: {weights[1]}\")\n \n errorRate = (sum(squaredError))/len(dataset)\n print(f\"\\nError Rate : {squaredError}\") \n time.sleep(2)\n\n #print(f\"\\nTolerance Value : {toleranceValue}\")\n #print(f\"\\nEnd of epoch {epoch}\")\n # input() pause here to analyze epoch\n squaredError.clear()\n if(errorRatemaxProd:\r\n\t\tmaxProd=temp\r\n\t\t\r\nprint(maxProd)","sub_path":"Euler Projekt 008 - Largest Product in a Series/EulerProjekt_8.py","file_name":"EulerProjekt_8.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"194314606","text":"t = int(input())\n\nfor x in range(t):\n n = input()\n nList = [ord(i) for i in n]\n loopStatus = 0\n while loopStatus < 1:\n for i in nList:\n if nList[0] == 52:\n nList.append(51)\n del nList[0]\n else:\n nList.append(nList[0])\n del nList[0]\n nList = [chr(i) for i in nList]\n a = \"\".join(nList)\n a = int(a)\n b = int(n)-a\n bList = [ord(i) for i in str(b)]\n try:\n bList.index(52)\n except ValueError:\n print ('Case #' + str(x+1) + ': ' + str(a) + ' ' + str(b))\n loopStatus = 1\n","sub_path":"codejam/Problem_01/GCJ2019_01.py","file_name":"GCJ2019_01.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"40704377","text":"\n\nfrom xai.brain.wordbase.nouns._scotsman import _SCOTSMAN\n\n#calss header\nclass _SCOTSMEN(_SCOTSMAN, ):\n\tdef __init__(self,): \n\t\t_SCOTSMAN.__init__(self)\n\t\tself.name = \"SCOTSMEN\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"scotsman\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_scotsmen.py","file_name":"_scotsmen.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"321742612","text":"class Solution:\n \"\"\"\n @param grid: a 2D grid\n @return: An integer\n \"\"\"\n def shortestDistance(self, grid):\n # 将所有房子作为起始点,对所有空地进行搜索,记录下他们到空地的距离,并相加,返回距离最短的点\n shortest_path = -1\n # 判断边界\n if grid is None:\n return shortest_path\n n = len(grid)\n if n == 0:\n return shortest_path\n m = len(grid[0]) \n if m == 0:\n return shortest_path\n \n houses = [] \n empty_area = {} \n for i in range(n):\n for j in range(m):\n if grid[i][j] == 1:\n houses.append((i,j))\n if grid[i][j] == 0:\n empty_area[(i,j)] = 0\n num_houses = len(houses)\n \n deltaX = [1,0,-1,0]\n deltaY = [0,1,0,-1]\n \n count_visit = [[0 for j in range(m)] for i in range(n)]\n for house in houses:\n q = collections.deque()\n q.append(house)\n \n # 标记是否访问过\n visited = [[False for j in range(m)] for i in range(n)]\n step = 0\n \n while q:\n level_size = len(q)\n step += 1\n for iter_num in range(level_size):\n house = q.popleft()\n x, y = house\n \n for i in range(4):\n new_x = x + deltaX[i]\n new_y = y + deltaY[i]\n \n if self.isEmptyArea(grid, new_x, new_y) and not visited[new_x][new_y]:\n count_visit[new_x][new_y] += 1\n empty_area[(new_x, new_y)] += step\n q.append((new_x, new_y))\n visited[new_x][new_y] = True\n \n postOffice = None\n shortest_path = float(\"inf\")\n for key, value in empty_area.items():\n x, y = key\n if count_visit[x][y] == num_houses:\n if value < shortest_path:\n shortest_path = value\n postOffice = (x, y)\n\n return shortest_path if shortest_path != float(\"inf\") else -1\n \n \n def isEmptyArea(self, grid, x, y):\n n = len(grid)\n m = len(grid[0])\n if (x >= 0) and (x < n) and (y >= 0) and (y < m) and grid[x][y] == 0:\n return True\n else:\n return False","sub_path":"专题学习/BFS/BuildPostOfficeII.py","file_name":"BuildPostOfficeII.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"317763250","text":"from collections import OrderedDict\n\nfrom django.core.urlresolvers import reverse\n\n\nclass Sidebar():\n\n def __init__(self):\n self.sidebar = OrderedDict()\n self.sidebar['main'] = OrderedDict()\n\n self.sidebar['main']['dashboard'] = {\n 'name': 'Dashboard',\n 'icon': 'dashboard',\n 'link': reverse('home'),\n 'roles': ['admin', 'manager', 'user']\n }\n self.sidebar['main']['assessments'] = {\n 'name': 'Assessments',\n 'icon': 'check-square-o',\n 'link': reverse('view-assessment'),\n 'roles': ['admin', 'manager', 'user']\n }\n self.sidebar['main']['document'] = {\n 'name': 'Document library',\n 'icon': 'folder-open-o',\n 'link': reverse('document-home'),\n 'roles': ['admin', 'manager', 'user']\n }\n self.sidebar['main']['invites'] = {\n 'name': 'Invitations',\n 'icon': 'plus-square',\n 'link': reverse('survey-invite'),\n 'roles': ['admin', 'manager']\n }\n self.sidebar['main']['directory'] = {\n 'name': 'Directory',\n 'icon': 'book',\n 'link': reverse('directory'),\n 'roles': ['admin', 'manager', 'user']\n }\n self.sidebar['main']['faq'] = {\n 'name': 'Help & FAQ',\n 'icon': 'question-circle',\n 'link': reverse('faq'),\n 'roles': ['admin', 'manager', 'user']\n }\n\n self.sidebar['settings'] = OrderedDict()\n\n self.sidebar['settings']['profile'] = {\n 'name': 'My profile',\n 'icon': 'user',\n 'link': reverse('edit-profile'),\n 'roles': ['admin', 'manager', 'user']\n }\n self.sidebar['settings']['users'] = {\n 'name': 'Organization & users',\n 'icon': 'users',\n 'link': reverse('organization'),\n 'roles': ['admin']\n }\n self.sidebar['settings']['subscription'] = {\n 'name': 'Subscription & billing',\n 'icon': 'dollar',\n 'link': reverse('subscription'),\n 'roles': ['admin']\n }\n self.sidebar['settings']['logout'] = {\n 'name': 'Logout',\n 'icon': 'power-off',\n 'link': reverse('logout'),\n 'roles': ['admin', 'manager', 'user']\n }\n\n def get(self):\n return self.sidebar\n\n def set_sidebar_item_active(self, section, item):\n self.sidebar[section][item]['class'] = 'active'\n return self.sidebar\n","sub_path":"testAutomation/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"515640556","text":"\"\"\"\nhttps://coder-coder.com/build-flexbox-website-layout/\n\"\"\"\nfrom flask import Flask, render_template, url_for\nfrom pathlib import Path\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef landing():\n return render_template('landing.html')\n\n\n@app.route('/graph')\ndef graph():\n with open(Path(__file__).parent / \"data\" / \"price.csv\", \"r\") as f:\n price = f.read()\n\n price_labels = []\n price_data = []\n for pr in price.split('\\n'):\n try:\n d = pr.split(',')\n\n price_labels.append(\n d[0]\n )\n\n price_data.append(\n d[4]\n )\n except Exception:\n pass\n\n return render_template(\n 'graph.html',\n price_labels=price_labels,\n price_data=price_data\n )\n\n\n@app.route('/news')\ndef news():\n return render_template('news.html')\n\n\n@app.route('/stats')\ndef stats():\n return render_template('stats.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"29370463","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport pigpio\r\n\r\nfrom phy import phytogpio\r\n\r\n\r\nclass stepper:\r\n\r\n pi = None\r\n enable = 0\r\n physenable = 0\r\n physcw = 0\r\n physccw = 0\r\n pin1 = 0\r\n pin2 = 0\r\n pin3 = 0\r\n pin4 = 0\r\n conv360 = 540 # need to find proper value\r\n\r\n def __init__(self, pi, enable, pin1, pin2, pin3, pin4):\r\n self.pi = pi\r\n (self.physenable, self.physcw, self.physccw) = (enable, cw, ccw)\r\n self.enable = phytogpio[enable]\r\n (self.pin1, self.pin2, self.pin3, self.pin4) = \\\r\n (phytogpio[pin1], phytogpio[pin2], phytogpio[pin3],\r\n phytogpio[pin4])\r\n self.pi.set_mode(self.enable, pigpio.OUTPUT)\r\n self.pi.set_mode(self.pin1, pigpio.OUTPUT)\r\n self.pi.set_mode(self.pin2, pigpio.OUTPUT)\r\n self.pi.set_mode(self.pin3, pigpio.OUTPUT)\r\n self.pi.set_mode(self.pin4, pigpio.OUTPUT)\r\n self.pi.write(self.enable, 1)\r\n\r\n def stepForward(self, delay, steps):\r\n for i in range(0, steps):\r\n setStep(1, 0, 0, 1)\r\n time.sleep(delay)\r\n setStep(0, 1, 1, 0)\r\n time.sleep(delay)\r\n setStep(0, 1, 0, 1)\r\n time.sleep(delay)\r\n setStep(1, 0, 0, 1)\r\n time.sleep(delay)\r\n\r\n def stepBackward(self, delay, steps):\r\n for i in range(0, steps):\r\n setStep(1, 0, 0, 1)\r\n time.sleep(delay)\r\n setStep(0, 1, 0, 1)\r\n time.sleep(delay)\r\n setStep(0, 1, 1, 0)\r\n time.sleep(delay)\r\n setStep(1, 0, 1, 0)\r\n time.sleep(delay)\r\n\r\n def setStep(v1, v2, v3, v4):\r\n self.pi.write(self.pin1, v1)\r\n self.pi.write(self.pin2, v2)\r\n self.pi.write(self.pin3, v3)\r\n self.pi.write(self.pin4, v4)\r\n\r\n def angleMovement(self, angle, timeDiff):\r\n numSteps = conv360 / 360 * angle\r\n if angle > 0:\r\n self.stepForward(timeDiff, numSteps)\r\n else:\r\n numSteps = -1 * numSteps\r\n self.stepBackward(timeDiff, numSteps)\r\n\r\n def stop(self):\r\n self.setStep(0, 0, 0, 0)","sub_path":"stepper.py","file_name":"stepper.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"381927429","text":"\n\n# 433. Minimum Genetic Mutation\n\n# A gene string can be represented by an 8-character long string, with choices from \"A\", \"C\", \"G\", \"T\".\n\n# Suppose we need to investigate about a mutation (mutation from \"start\" to \"end\"), where ONE mutation is defined as ONE single character changed in the gene string.\n\n# For example, \"AACCGGTT\" -> \"AACCGGTA\" is 1 mutation.\n\n# Also, there is a given gene \"bank\", which records all the valid gene mutations. A gene must be in the bank to make it a valid gene string.\n\n# Now, given 3 things - start, end, bank, your task is to determine what is the minimum number of mutations needed to mutate from \"start\" to \"end\".\n# If there is no such a mutation, return -1.\n\n# Note:\n\n# Starting point is assumed to be valid, so it might not be included in the bank.\n# If multiple mutations are needed, all mutations during in the sequence must be valid.\n# You may assume start and end string is not the same.\n# Example 1:\n\n# start: \"AACCGGTT\"\n# end: \"AACCGGTA\"\n# bank: [\"AACCGGTA\"]\n\n# return: 1\n\n# Example 2:\n\n# start: \"AACCGGTT\"\n# end: \"AAACGGTA\"\n# bank: [\"AACCGGTA\", \"AACCGCTA\", \"AAACGGTA\"]\n\n# return: 2\n\n# Example 3:\n\n# start: \"AAAAACCC\"\n# end: \"AACCCCCC\"\n# bank: [\"AAAACCCC\", \"AAACCCCC\", \"AACCCCCC\"]\n\n# return: 3\n\n\nimport collections\nclass minMutation(object):\n\n\n def doit(self, start, end, bank):\n \"\"\"\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n \"\"\"\n def mutation(current, next):\n if len(current) != len(next):\n return False\n mutation = False\n for i in range(len(current)):\n if current[i] != next[i]:\n if mutation == True:\n return False\n mutation = True\n\n return mutation\n\n\n #########\n queue = collections.deque()\n while queue:\n current, prev, step = queue.popleft()\n \n if current == end:\n return step\n \n for c in bank:\n if mutation(c, current) and c != prev:\n queue.append([c, current, step+1])\n\n return -1\n\n\n\n\n def doit(self, start, end, bank):\n \"\"\"\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n \"\"\"\n def viableMutation(current_mutation, next_mutation):\n changes = 0\n for i in range(len(current_mutation)):\n if current_mutation[i] != next_mutation[i]:\n changes += 1\n return changes == 1\n\n queue = collections.deque()\n queue.append([start, start, 0]) # current, previous, num_steps\n\n while queue:\n current, previous, num_steps = queue.popleft()\n if current == end: # in BFS, the first instance of current == end will yield the minimum\n return num_steps\n for string in bank:\n if viableMutation(current, string) and string != previous:\n queue.append([string, current, num_steps+1])\n\n return -1\n\n\n\n def doit1(self, start, end, bank):\n \"\"\"\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n \"\"\"\n def mutation(s, e):\n if len(s) != len(e):\n return False\n\n ismutation = False\n for i in range(len(s)):\n if s[i] != e[i]:\n if ismutation:\n return False\n ismutation = True\n\n return ismutation\n\n\n maxstep = len(bank)\n sSteps, eSteps = 0, 0\n sGroup, eGroup = [start], [end]\n sBank, eBank = set(), set()\n\n for i, c in enumerate(bank):\n if start == c:\n sBank.add(i)\n\n if end == c:\n eBank.add(i)\n\n if not eBank:\n return -1\n\n for _ in range(maxstep):\n\n stmp = set()\n for s in sGroup:\n for i, a in enumerate(bank):\n if i not in sBank and mutation(s, a):\n stmp.add(a)\n sBank.add(i)\n\n etmp = set()\n for e in eGroup:\n for i, a in enumerate(bank):\n if i not in eBank and mutation(e, a):\n etmp.add(a)\n eBank.add(i)\n\n for c in eGroup:\n if c in stmp:\n return sSteps + eSteps + 1\n\n for c in sGroup:\n if c in etmp:\n return sSteps + eSteps + 1\n\n for c in stmp:\n if c in etmp:\n return sSteps + eSteps + 2\n\n sSteps += 1\n eSteps += 1\n \n sGroup, eGroup = list(stmp), list(etmp)\n\n if not sGroup or not eGroup:\n break\n \n return -1\n \n\n\nif __name__==\"__main__\":\n\n\n res = minMutation().doit(\"AACCGGTT\", \"AACCGGTA\", [\"AACCGGTA\"])\n\n \n res = minMutation().doit(\"AACCGGTT\", \"AAACGGTA\", [\"AACCGGTA\", \"AACCGCTA\", \"AAACGGTA\"])\n\n\n res = minMutation().doit(\"AAAAACCC\", \"AACCCCCC\", [\"AAAACCCC\", \"AAACCCCC\", \"AACCCCCC\"])\n\n\n res = minMutation().doit1(\"AAAAAAAA\", \"CCCCCCCC\", [\"AAAAAAAA\",\"AAAAAAAC\",\"AAAAAACC\",\"AAAAACCC\",\"AAAACCCC\",\"AACACCCC\",\"ACCACCCC\",\"ACCCCCCC\",\"CCCCCCCA\"])\n\n pass","sub_path":"PythonLeetcode/LeetCodeE/433_MinimumGeneticMutation.py","file_name":"433_MinimumGeneticMutation.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"212789127","text":"import datetime\nimport random\nimport time\nimport seg\nimport checkinput\n\ndef hraj():\n\tr = random.randrange(8,12)\n\tfinal = datetime.datetime.now() + datetime.timedelta(seconds = r);\n\tr = random.randrange(3,6)\n\n\twhile True:\n\t\tnow = datetime.datetime.now()\n\t\tif now < final and (final-now).seconds > r:\n\t\t\ta = (final - now).seconds\n\t\t\tx = \"\"\n\t\t\tif a < 10:\n\t\t\t\tx += \" \" + str(a)\n\t\t\telse:\n\t\t\t\tx += str(a)\n\t\t\tx += \" \"\n\t\t\ta = int(round((final-now).microseconds / 10000))\n\t\t\tif a < 10:\n\t\t\t\tx += '0' + str(a)\n\t\t\telse:\n\t\t\t\tx+=str(a)\n\t\t\tseg.seg(x)\n\n\t\tbutton = checkinput.stlacene_tlacitko()\n\t\tif button != 0:\n\t\t\tif now > final:\n\t\t\t\treturn button, (now - final).microseconds / 1000000 + (now - final).seconds\t\t\t\t#tu vyhral ten kto stlacil\n\t\t\telse:\n\t\t\t\treturn (button % 2) + 1, (now - final).microseconds / 1000000 + (now - final).seconds\t\t#tu vyhral ten kto nestlacil\n\t\tif now > final and (now - final).seconds > 10: #ked dlho nikto nic nestlaci\n\t\t\treturn 0, 0","sub_path":"hra.py","file_name":"hra.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"363196754","text":"class Solution:\n def plusOne(self, digits: list) -> list:\n for i in range(len(digits) - 1, -1, -1):\n digits[i] += 1\n digits[i] = digits[i] % 10\n if digits[i] % 10 != 0:\n return digits\n digits.append(0)\n digits[0] = 1\n return digits\n\n\ns = Solution()\nprint(s.plusOne([9, 9, 9]))\n","sub_path":"leetcode/shuzu/05_加1.py","file_name":"05_加1.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"538221411","text":"for t in range(1, 1+int(input())):\n trucks = []\n for n in range(int(input())):\n s, e = map(int, input().split())\n trucks.append((s, e))\n\n # 이게 키 포인트인듯;; 나는 계속 start순으로 정렬했는데, 일찍 끝나는 순으로 정렬\n trucks = sorted(trucks, key=lambda x: x[1])\n\n cnt = 0\n finish = 0\n\n # print(trucks)\n\n while trucks:\n start, end = trucks.pop(0)\n # 그래서 이전 작업시간(finish)랑 다음 작업의 시작시간을 비교해서 시작시간이 큰 경우에만 count\n if start >= finish:\n cnt += 1\n finish = end\n\n print(\"#{} {}\".format(t, cnt))\n","sub_path":"python/SWEC/09_Greedy/SWEA5202_화물도크.py","file_name":"SWEA5202_화물도크.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"355272318","text":"#coding:utf-8\nimport numpy as np\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, Activation, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\n\nfrom keras.datasets import mnist\n\nepochs = 40\nbatch_size = 100\n\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train= np.asarray(x_train, dtype='float32')\n x_test = np.asarray(x_test,dtype='float32')\n x_train = x_train/255.0\n x_test = x_test/255.0\n x_train = x_train.reshape(x_train.shape[0], 28,28,1)\n x_test = x_test.reshape(x_test.shape[0], 28,28,1)\n\n y_train = np_utils.to_categorical(y_train,10)\n y_test= np_utils.to_categorical(y_test, 10)\n return (x_train, y_train), (x_test, y_test)\n\n\ndef set_model():\n model = Sequential()\n model.add(Conv2D(4, kernel_size=(2,2), input_shape=(28,28,1)))\n model.add(Activation('tanh'))\n model.add(MaxPooling2D((2,2)))\n model.add(Conv2D(8, kernel_size=(2,2)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Flatten())\n\n model.add(Dense(100))\n model.add(Activation('relu'))\n model.add(Dropout(0.7))\n model.add(Dense(10))\n model.add(Activation('softmax'))\n sgd = SGD(lr=0.01,momentum=0.9,nesterov=True)\n model.compile(sgd,loss=keras.losses.categorical_crossentropy)\n return model\n\n\ndef train_model(model, x_train, y_train):\n model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)\n model.save('mnist.h5')\n return model\n\n\nif __name__ == '__main__':\n (x_train, y_train), (x_test, y_test) = load_data()\n # model = set_model()\n # model = train_model(model, x_train, y_train)\n\n model = load_model('mnist.h5')\n print(model.layers[0].input_shape)\n print(model.summary())\n (_, _), (_, y_test_ori) = mnist.load_data()\n classes = model.predict_classes(x_test) # 输出的是最终分类结果,如5、7\n acc = np.mean(np.equal(classes, y_test_ori))\n print(acc)\n\n res = model.predict(x_test) # 输出的是概率\n print(np.argmax(res[0]), y_test_ori[0])\n","sub_path":"mnist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"338283171","text":"import sys\nfrom collections import deque\nsys.stdin = open('연산.txt','r')\n\nT = int(input())\nfor tc in range(1,T+1):\n S,G = map(int,input().split())\n Q = deque()\n Q.append((S,0))\n visit = set()\n while Q:\n w,cnt = Q.popleft()\n cnt += 1\n if w == G:\n break\n else:\n if w-1 not in visit:\n if 0<=w-1<=1000000:\n visit.add(w-1)\n Q.append((w-1,cnt))\n if w+1 not in visit:\n if 0<=w+1<=1000000:\n visit.add(w+1)\n Q.append((w+1,cnt))\n if w*2 not in visit:\n if 0<=w*2<=1000000:\n visit.add(w*2)\n Q.append((w*2,cnt))\n if w-10 not in visit:\n if 0<=w-10<=1000000:\n visit.add(w-10)\n Q.append((w-10,cnt))\n print('#{} {}'.format(tc,cnt-1))\n \n\n\n \n \n ","sub_path":"10월/1016/연산.py","file_name":"연산.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"75033110","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport json\nimport pathlib\n\n\nTEMPLATE_PROJECT_DIR = (\n pathlib.Path(__file__).parent\n / \"..\"\n / \"..\"\n / \"..\"\n / \"apps\"\n / \"microtvm\"\n / \"zephyr\"\n / \"template_project\"\n).resolve()\n\nBOARDS = TEMPLATE_PROJECT_DIR / \"boards.json\"\n\n\ndef zephyr_boards() -> dict:\n \"\"\"Returns a dict mapping board to target model\"\"\"\n with open(BOARDS) as f:\n board_properties = json.load(f)\n\n boards_model = {board: info[\"model\"] for board, info in board_properties.items()}\n return boards_model\n\n\nZEPHYR_BOARDS = zephyr_boards()\n\n\ndef qemu_boards(board: str):\n \"\"\"Returns True if board is QEMU.\"\"\"\n with open(BOARDS) as f:\n board_properties = json.load(f)\n\n qemu_boards = [name for name, board in board_properties.items() if board[\"is_qemu\"]]\n return board in qemu_boards\n\n\ndef has_fpu(board: str):\n \"\"\"Returns True if board has FPU.\"\"\"\n with open(BOARDS) as f:\n board_properties = json.load(f)\n\n fpu_boards = [name for name, board in board_properties.items() if board[\"fpu\"]]\n return board in fpu_boards\n","sub_path":"tests/micro/zephyr/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"350680752","text":"import heapq\nclass Solution:\n \"\"\"\n @param nums: an integer array\n @param k: An integer\n @return: the top k largest numbers in array\n \"\"\"\n def topk(self, nums, k):\n # write your code here\n self.heap = []\n for num in nums:\n heapq.heappush(self.heap,num)\n if len(self.heap) > k:\n heapq.heappop(self.heap)\n res = []\n while len(self.heap)>0:\n item = heapq.heappop(self.heap)\n res.append(item)\n res.reverse()\n return res\na = Solution()\nprint(a.topk([3,10,1000,-99,4,100],3))","sub_path":"lintcode544.py","file_name":"lintcode544.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"351394885","text":"#!/usr/bin/env python\n\nimport scrollphat as sp\nimport sys, time, random, math\nfrom random import randint\n\nsp.set_rotate(False)\nsp.set_brightness(50)\n\nlines = open('/home/pi/Pimoroni/scrollphat/my_scrolls/text_to_scroll.txt').read().splitlines()\n\n#while True:\ntry: \n line_to_scroll = random.choice(lines)\n sp.write_string(\" * * * \" + line_to_scroll + \" \")\n string_length = sp.buffer_len()\n while string_length > 0:\n sp.scroll()\n time.sleep(0.065)\n string_length -= 1\nexcept KeyboardInterrupt:\n sp.clear()\n sys.exit(-1)\n\n","sub_path":"my_scrolls/scroll_rand_1line_inverted.py","file_name":"scroll_rand_1line_inverted.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"22597500","text":"# Questo script e' il core del codice che faccimo girare sulle schede wifi con micropython\n# il sistema \n\nimport network as net\nimport socket as sock\nimport machine \nimport os\nimport time\nimport ubinascii\nimport urandom\nimport math\n\n\ndef translate(value, leftMin, leftMax, rightMin, rightMax):\n # Figure out how 'wide' each range is\n leftSpan = leftMax - leftMin\n rightSpan = rightMax - rightMin\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = float(value - leftMin) / float(leftSpan)\n\n # Convert the 0-1 range into a value in the right range.\n return rightMin + (valueScaled * rightSpan)\n\ndef get_random_time():\n num = translate(urandom.getrandbits(16), 0, 65535, 0, 10000)\n integer = math.floor(num)/1000 # the translate function returns a float, which we gotta deal with somehow\n return integer\n\ndef log(message):\n print(message)\n\ndef get_ip_address():\n log('get local ip')\n ip_address = ''\n sta_if = net.WLAN(net.STA_IF)\n temp = sta_if.ifconfig()\n ip_address = temp[0]\n return ip_address\n\ndef toggle(p):\n log('toggle pin')\n p.value(not p.value())\n\ndef stripInMessage(inline):\n inline = str(inline)\n inline = inline.replace('b\\'','')\n outline = inline.replace('\\'','')\n return outline\n\ndef checkFileAndCreate(dir,filename):\n dirfilename = dir+'/'+filename\n log('check file ' + dirfilename)\n listfile = os.listdir(dir)\n if not (filename in listfile):\n log('file ' + dirfilename + ' not found, it will created')\n file = open(dirfilename,'w')\n file.close()\n\ndef FileGetServerIP(filename):\n log('getting SERVER IP from file: ' + filename)\n global ServerIP\n file = open(filename,'r')\n strServerIP = file.readline()\n file.close()\n if strServerIP!='':\n log('SERVER IP found: ' + strServerIP)\n elements = strServerIP.split(',')\n ServerIP[0] = elements[0]\n ServerIP[1] = int(elements[1])\n log('And set')\n return ServerIP\n\ndef FileSetServerIP(IP,filename):\n log('putting SERVER IP to file: ' + filename)\n try:\n file = open(filename,'w')\n file.write(IP[0]+','+str(IP[1]))\n file.close()\n log('done')\n except Exception as ex:\n log(str(ex))\n \n\ndef statusPin(pins,idpin):\n log('Getting status pin')\n pinval = pins[idpin].value()\n if pinval==0:\n message = mac + ',PIN,' + elements[1] + ',OFF'\n elif pinval==1:\n message = mac + ',PIN,' + elements[1] + ',ON'\n return message\n\ndef closeSocketServer():\n global sockServer\n log('Closing socket to receive form server')\n try: \n sockServer.close()\n sockServer = None\n sockServer = sock.socket(sock.AF_INET,sock.SOCK_STREAM)\n sockServer.settimeout(0.1)\n except Exception as ex:\n log(str(ex))\n\ndef bindSocketServer():\n global sockServer\n global ServerIP\n log('Opening socket to receive form server at: ' + str(ServerIP))\n try:\n closeSocketServer()\n sockServer.bind(('',ServerIP[1]))\n sockServer.listen(1)\n except Exception as ex:\n log(str(ex))\n\ndef sendSocketBroadcast(message):\n global ServerIP\n try:\n log('Try to send message to the server: '+ message + ' at : ' + str((ServerIP[0],BROADCASTPORT))) \n sockBroadCast.sendto(message,(ServerIP[0],BROADCASTPORT))\n log('Sent message to the server: '+ message) \n except Exception as ex:\n log(str(ex))\n\nlog('start initialization')\nmac = ubinascii.hexlify(net.WLAN().config('mac'),':').decode()\nip = get_ip_address()\n\nServerIP = ['',0]\n\nfiledata = 'serverip.data'\ndirdata = 'data'\ncheckFileAndCreate(dirdata,filedata)\nIPfile = dirdata + '/' + filedata\nServerIP = FileGetServerIP(IPfile)\nsockServerRun = False\n\nboardKind = 'H'\n\nBROADCASTPORT = 51082\nsockBroadCast=sock.socket(sock.AF_INET,sock.SOCK_DGRAM)\nsockBroadCast.settimeout(0.1)\nsockBroadCast.bind(('',BROADCASTPORT))\n\nip = get_ip_address()\nmessage = 'D,' + mac+','+ip+','+boardKind\n\n\nsockServer=sock.socket(sock.AF_INET,sock.SOCK_STREAM)\nsockServer.settimeout(0.1)\n\nif ServerIP[1]!=0:\n bindSocketServer()\n sendSocketBroadcast(message)\n\npins = [machine.Pin(12, machine.Pin.OUT)]\npins[0].on()\n\nsec2millis = 1000\nmin2sec = 60\nmin2millis = min2sec*sec2millis\n\nmillis_old = 0\nwaitTime = 0\nlog('start main loop')\nwhile True:\n try:\n millis_new = int(round(time.time()*sec2millis))\n millis_pass = millis_new-millis_old\n if (millis_pass/min2millis>=waitTime):\n millis_old = millis_new\n waitTime = 20\n ip = get_ip_address()\n message = 'D,' + mac+','+ip+','+boardKind\n sendSocketBroadcast(message)\n try:\n m=sockBroadCast.recvfrom(1024)\n line = stripInMessage(m[0])\n log('Recived message from broadcast: ' + line)\n elements = list(line.split(','))\n \n if elements[0].strip()=='S':\n waitTime = get_random_time()\n log('New waiting time: ' + str(waitTime))\n if (ServerIP[0] != elements[1]):\n log('build SERVERIP')\n ServerIP[0] = elements[1]\n ServerIP[1] = int(elements[2])\n\n FileSetServerIP(ServerIP,IPfile)\n\n bindSocketServer()\n else:\n log('nothong change, server ip is: ' +str(ServerIP[0]))\n log('nothong change, server port is: ' +str(ServerIP[1]))\n\n \n except Exception as ex:\n pass\n # if str(ex).strip()=='[Errno 110] ETIMEDOUT':\n # pass\n # if str(ex).strip()=='[Errno 22] EINVAL':\n # pass\n # else:\n # log('loop1')\n # log(str(ex))\n\n try:\n conn, addr = sockServer.accept()\n conn.settimeout(2)\n log('Connection address:' + str(addr))\n # time.sleep(0.01)\n while 1:\n try:\n temp = conn.recv(1024)\n if not temp:\n break\n messageFromServer = stripInMessage(temp)\n log('Recived message from server: ' + messageFromServer)\n elements = messageFromServer.split(',')\n messageToServer = 'ERROR'\n if elements[0]=='PIN':\n idpin = int(elements[1])\n if(len(pins)-1>=idpin):\n if elements[2]=='ON':\n pins[idpin].on()\n messageToServer = statusPin(pins,idpin)\n elif elements[2]=='OFF':\n pins[idpin].off()\n messageToServer = statusPin(pins,idpin)\n elif elements[2]=='STAT':\n messageToServer = statusPin(pins,idpin)\n conn.send(messageToServer)\n break\n except Exception as ex:\n pass\n conn.close()\n except Exception as ex:\n pass\n # if str(ex).strip()=='[Errno 110] ETIMEDOUT':\n # pass\n # if str(ex).strip()=='[Errno 22] EINVAL':\n # pass\n # else:\n # log('loop2')\n # log(str(ex))\n \n \n # if (millis_new-millis_old>=500):\n # toggle(pin)\n # # time.sleep_messageFromServer(1000)\n # sS.sendto('the value is ' + str(pin.value()), (ServerIP[0],12345))\n except KeyboardInterrupt:\n log('Detected KeyboardInterrupt')\n break\n except Exception as ex:\n if str(ex)=='[Errno 110] ETIMEDOUT':\n pass\n if str(ex)=='[Errno 11] EAGAIN':\n pass\n if str(ex)=='[Errno 22] EINVAL':\n pass\n else:\n log('loop3')\n log(str(ex))\n\nsockBroadCast.close()\ncloseSocketServer()\nlog('Exiting')\n","sub_path":"WemosD1Mini/mainMP_heater.py","file_name":"mainMP_heater.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"198295724","text":"import pytest\nfrom . import conftest as fix\nimport connaisseur.workload_object as wl\nimport connaisseur.exceptions as exc\nfrom connaisseur.image import Image\n\n\nstatic_k8s = [\n {\n \"kind\": \"Deployment\",\n \"apiVersion\": \"apps/v1\",\n \"namespace\": \"default\",\n \"name\": \"charlie-deployment\",\n },\n {\n \"kind\": \"Pod\",\n \"apiVersion\": \"v1\",\n \"namespace\": \"default\",\n \"name\": \"charlie-deployment-76fbf58b7d-\",\n },\n {\n \"kind\": \"ReplicaSet\",\n \"apiVersion\": \"apps/v1\",\n \"namespace\": \"default\",\n \"name\": \"charlie-deployment-558576bf6c\",\n },\n {\n \"kind\": \"CronJob\",\n \"apiVersion\": \"batch/v1beta1\",\n \"namespace\": \"default\",\n \"name\": \"yooob\",\n },\n {},\n {\n \"kind\": \"Deployment\",\n \"apiVersion\": \"apps/v1\",\n \"namespace\": \"default\",\n \"name\": \"test\",\n },\n]\n\n\n@pytest.fixture()\ndef adm_req_sample_objects():\n return [\n fix.get_admreq(t)[\"request\"][\"object\"]\n for t in (\n \"deployments\",\n \"pods\",\n \"replicasets\",\n \"cronjob\",\n \"wrong_version\",\n \"deployments_multi_image\",\n )\n ]\n\n\n@pytest.mark.parametrize(\n \"index, wl_class\",\n [\n (0, wl.WorkloadObject),\n (1, wl.Pod),\n (2, wl.WorkloadObject),\n (3, wl.CronJob),\n (5, wl.WorkloadObject),\n ],\n)\ndef test_k8s_object_new(adm_req_sample_objects, index, wl_class):\n obj = wl.WorkloadObject(adm_req_sample_objects[index], \"default\")\n assert isinstance(obj, wl_class)\n\n\n@pytest.mark.parametrize(\n \"index, exception\",\n [\n (0, fix.no_exc()),\n (1, fix.no_exc()),\n (2, fix.no_exc()),\n (3, fix.no_exc()),\n (4, pytest.raises(exc.UnknownAPIVersionError)),\n (5, fix.no_exc()),\n ],\n)\ndef test_k8s_object_init(adm_req_sample_objects, index, exception):\n with exception:\n obj = wl.WorkloadObject(adm_req_sample_objects[index], \"default\")\n assert obj.kind == static_k8s[index][\"kind\"]\n assert obj.api_version == static_k8s[index][\"apiVersion\"]\n assert obj.namespace == static_k8s[index][\"namespace\"]\n assert obj.name == static_k8s[index][\"name\"]\n\n\n@pytest.mark.parametrize(\n \"index, parent_list, exception\",\n [\n (0, {}, fix.no_exc()),\n (\n 1,\n {\n (\"containers\", 0): Image(\n \"securesystemsengineering/charlie-image@sha256\"\n \":91ac9b26df583762234c1cdb2fc930364754ccc59bc7\"\n \"52a2bfe298d2ea68f9ff\"\n ),\n },\n fix.no_exc(),\n ),\n (2, {}, pytest.raises(exc.ParentNotFoundError)),\n (3, {}, fix.no_exc()),\n ],\n)\ndef test_k8s_object_parent_containers(\n adm_req_sample_objects, m_request, index, parent_list, exception\n):\n obj = wl.WorkloadObject(adm_req_sample_objects[index], \"default\")\n with exception:\n assert obj.parent_containers == parent_list\n\n\n@pytest.mark.parametrize(\n \"index, images\",\n [\n (0, {(\"containers\", 0): Image(\"securesystemsengineering/alice-image:test\")}),\n (\n 1,\n {\n (\"containers\", 0): Image(\n \"securesystemsengineering/charlie-image@sha256:\"\n \"91ac9b26df583762234c1cdb2fc930364754ccc59bc752a2bfe298d2ea68f9ff\"\n )\n },\n ),\n (\n 2,\n {(\"containers\", 0): Image(\"securesystemsengineering/sample-san-sama:hai\")},\n ),\n (3, {(\"containers\", 0): Image(\"busybox\")}),\n (\n 5,\n {\n (\"containers\", 0): Image(\"redis:alpine\"),\n (\"containers\", 1): Image(\"mysql:8\"),\n (\"initContainers\", 0): Image(\"busybox:1.32\"),\n },\n ),\n ],\n)\ndef test_k8s_object_container_images(adm_req_sample_objects, index, images):\n obj = wl.WorkloadObject(adm_req_sample_objects[index], \"default\")\n assert obj.containers == images\n\n\n@pytest.mark.parametrize(\n \"index, image, image_index, image_type, patches\",\n [\n (\n 0,\n Image(\"redis:alpine\"),\n 0,\n \"containers\",\n {\n \"op\": \"replace\",\n \"path\": \"/spec/template/spec/containers/0/image\",\n \"value\": \"docker.io/library/redis:alpine\",\n },\n ),\n (\n 0,\n Image(\"redis:alpine\"),\n 1,\n \"containers\",\n {\n \"op\": \"replace\",\n \"path\": \"/spec/template/spec/containers/1/image\",\n \"value\": \"docker.io/library/redis:alpine\",\n },\n ),\n (\n 0,\n Image(\"redis:alpine\"),\n 1,\n \"initContainers\",\n {\n \"op\": \"replace\",\n \"path\": \"/spec/template/spec/initContainers/1/image\",\n \"value\": \"docker.io/library/redis:alpine\",\n },\n ),\n (\n 1,\n Image(\"redis:alpine\"),\n 1,\n \"containers\",\n {\n \"op\": \"replace\",\n \"path\": \"/spec/containers/1/image\",\n \"value\": \"docker.io/library/redis:alpine\",\n },\n ),\n (\n 3,\n Image(\"redis:alpine\"),\n 0,\n \"initContainers\",\n {\n \"op\": \"replace\",\n \"path\": \"/spec/jobTemplate/spec/template/spec/initContainers/0/image\",\n \"value\": \"docker.io/library/redis:alpine\",\n },\n ),\n ],\n)\ndef test_k8s_object_json_patch(\n adm_req_sample_objects, index, image, image_type, image_index, patches\n):\n obj = wl.WorkloadObject(adm_req_sample_objects[index], \"default\")\n assert obj.get_json_patch(image, image_type, image_index) == patches\n","sub_path":"tests/test_workload_object.py","file_name":"test_workload_object.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"395782577","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse,JsonResponse\nfrom rest_framework import serializers\nimport json\nimport twitter_scraper as ts\nimport numpy as np\nimport pandas as pd\nfrom textblob import TextBlob\nfrom rest_framework.views import APIView\nimport re\nimport os\nimport pickle as pkl\nimport warnings\nimport tweepy\nwarnings.simplefilter('ignore')\nfrom blacknet.settings import BASE_DIR\nfrom .models import SuspectList\n\nconsumer_key = 'nblgiZGKmb9XhK1IrRJI8IYJg'\nconsumer_secret = 'gttJPns2RfpYz0KNr3CWzK8apIbTT0jGHLegDSJBdR8fXEt0UJ'\n\naccess_token = '984160572739543040-7K8YTQirqEXqGm5VQRrBn4wpE5QRH1C'\naccess_token_secret = 'LkTEgW4mGJN7mNbNE1JDqQ9dCmc7QGdxNUIhnoHjJvY2S'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n# Create your views here.\ndef clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\ndef sentiment(tweet):\n analysis = TextBlob(clean_tweet(tweet))\n return analysis.sentiment.polarity\n\ndef get_profile(request,username):\n context = ts.Profile(username).to_dict()\n return JsonResponse(context,safe=False)\n\ndef hate_speech_detection(tweet):\n # loaded_model = pkl.load(open(\"/static/approach_1.sav\", 'rb'))\n loaded_model = pkl.load(open(os.path.join(BASE_DIR, 'approach_1.sav'), 'rb'))\n ans = loaded_model.predict([clean_tweet(tweet)])\n if ans == [1]:\n return \"Offensive\"\n if ans == [0]:\n return \"Non-Offensive\"\n\ndef profile_hatespeech_analyzer(keyword):\n tweet_data = []\n for tweet in ts.get_tweets(keyword):\n tweet_data.append(tweet)\n\n df = pd.DataFrame(data = [tweet['time'] for tweet in tweet_data], columns=[\"time\"])\n df['isRetweet'] = pd.DataFrame([tweet['isRetweet'] for tweet in tweet_data])\n df['text'] = np.array([tweet['text'] for tweet in tweet_data])\n df['replies'] = np.array([tweet['replies'] for tweet in tweet_data])\n df['retweets'] = np.array([tweet['retweets'] for tweet in tweet_data])\n df['likes'] = np.array([tweet['likes'] for tweet in tweet_data])\n df['sentiment_polarity'] = np.array([sentiment(tweet) for tweet in df['text']])\n df['offensive_or_not'] = np.array([hate_speech_detection(tweet) for tweet in df['text']])\n # return [[df.loc[idx,\"time\"],df.loc[idx,\"isRetweet\"],df.loc[idx,\"text\"],\n # df.loc[idx,\"replies\"],df.loc[idx,\"retweets\"],df.loc[idx,\"likes\"],\n # df.loc[idx,\"sentiment_polarity\"],df.loc[idx,\"offensive_or_not\"]] \n # for idx in df.index]\n return df\n\ndef profile_hashtag_analyzer(keyword):\n tweets = []\n for tweet in tweepy.Cursor(api.search, q=\"{} -filter:retweets\".format(keyword),tweet_mode='extended',lang=\"en\",since=\"2020-01-31\", until=\"2020-02-06\").items(10):\n tweets.append([tweet.created_at, tweet.user.profile_image_url,\n tweet.user.screen_name, tweet.user.followers_count, tweet.user.friends_count,\n tweet.in_reply_to_screen_name,tweet.full_text, tweet.favorite_count,tweet.retweet_count])\n df = pd.DataFrame(tweets,columns=[\"time\",\"profile_img_url\",\"username\",\"followers_count\",\"following_count\",\"reply_to\",\"text\",\"likes\",\"retweet_count\"])\n df['sentiment_polarity'] = np.array([sentiment(tweet) for tweet in df['text']])\n df['offensive_or_not'] = np.array([hate_speech_detection(tweet) for tweet in df['text']])\n return df\n\ndef TwitterHashTagProcess(hashtag):\n df = profile_hashtag_analyzer(hashtag)\n df = df.to_json(orient='index')\n df = json.loads(df)\n tweet_arr = []\n for i in df:\n tweet_arr.append(df[i])\n # print(df)\n return json.dumps(tweet_arr)\n\ndef TwitterUserNameProcess(userName):\n df = profile_hatespeech_analyzer(userName)\n df = df.to_json(orient='index')\n df = json.loads(df)\n tweet_arr = []\n for i in df:\n tweet_arr.append(df[i])\n return json.dumps(tweet_arr)\n\ndef home(request):\n if request.method == \"POST\":\n page = request.POST.get(\"page_request\")\n if page == \"twitter\":\n return redirect(\"/api/twitter\")\n context = {}\n return render(request,\"index.html\",context)\n\ndef twitterApi(request,userName):\n resp = TwitterUserNameProcess(userName)\n resp = json.loads(resp)\n return JsonResponse(resp,safe=False)\n\ndef twitterHashTagApi(request,hashtag):\n resp = TwitterHashTagProcess(\"#{}\".format(hashtag))\n resp = json.loads(resp)\n return JsonResponse(resp,safe=False)\n\ndef twitter(request):\n context = {}\n if request.method == \"POST\":\n resp_Arr = []\n user_input = request.POST.get(\"usernames\")\n user_input = user_input.strip()\n if user_input != \"\":\n if user_input[:1] == \"#\":\n twitter_resp_data = TwitterHashTagProcess(user_input)\n context[\"data1\"] = json.loads(twitter_resp_data)\n context[\"hashtag\"] = user_input\n # print(context)\n return render(request,\"twitter.html\",context)\n else:\n user_List = user_input.split(\",\")\n for user_name in user_List:\n user_name = user_name.strip()\n if user_name != \"\":\n # print(user_name)\n try:\n twitter_resp_data = TwitterUserNameProcess(user_name)\n user_profile = ts.Profile(user_name).to_dict()\n resp_Arr.append({\n \"username\":user_name,\n \"data\":json.loads(twitter_resp_data),\n \"user_profile\":user_profile\n })\n except Exception as e:\n context[\"error\"] = \"{}\".format(e)\n context[\"data\"] = resp_Arr\n\n else:\n context[\"error\"] = \"Please type a twitter username in the input field.\"\n\n return render(request,\"twitter.html\",context)\n\ndef addSuspect(request,username):\n context = {}\n prevList = SuspectList.objects.filter(name=\"raghav\")[0].suspect_list\n if username in prevList:\n context[\"code\"] = 1\n context[\"response\"] = \"User already exists\"\n return JsonResponse(context,safe=False)\n prevList.append(username)\n SuspectList.objects.filter(name=\"raghav\").update(suspect_list=prevList)\n context[\"response\"] = \"{} added to the suspect list\".format(username)\n context[\"code\"] = 0\n return JsonResponse(context,safe=False)\n\ndef deleteSuspect(request,username):\n context = {}\n prevList = SuspectList.objects.filter(name=\"raghav\")[0].suspect_list\n if username in prevList:\n prevList.remove(username)\n SuspectList.objects.filter(name=\"raghav\").update(suspect_list=prevList)\n context[\"code\"] = 0\n context[\"response\"] = \"{} removed from the suspect list\".format(username)\n else:\n context[\"code\"] = 1\n context[\"response\"] = \"User doesnot exist in the suspect list\"\n return JsonResponse(context,safe=False)\n\nclass SuspectSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SuspectList\n fields = '__all__'\n\nclass SuspectListView(APIView):\n def get(self,request):\n member = SuspectList.objects.all()\n serializer = SuspectSerializer(member, many=True)\n return JsonResponse(serializer.data,safe=False)\n def post(self):\n pass\n\ndef suspect(request):\n context = {}\n context[\"data\"] = []\n suspect_username = SuspectList.objects.filter(name=\"raghav\")[0].suspect_list\n for user in suspect_username:\n resp = ts.Profile(user).to_dict()\n context[\"data\"].append(resp)\n # print(context)\n return render(request,\"suspect.html\",context)\n\n# \"/api/twitter/raghav/suspect-list\"\n# \"for every user there should be an suspect list\"\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"175072469","text":"from graph_business_trip import __version__\nfrom graph_business_trip.graph_business_trip import *\nimport pytest\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n\n\n\n@pytest.fixture\ndef data():\n ver=Vertex('Metroville')\n ver2=Vertex('Pandora')\n ver3=Vertex('Arendelle')\n ver4=Vertex('Monstropolis')\n ver5=Vertex('Naboo')\n graph=Graph()\n graph.add_vertex(ver)\n graph.add_vertex(ver2)\n graph.add_vertex(ver3)\n graph.add_vertex(ver4)\n graph.add_vertex(ver5)\n graph.add_edges(ver,ver2,50)\n graph.add_edges(ver3,ver4,150)\n graph.add_edges(ver5,ver4,250)\n return graph\n\n\ndef test_correct_cost(data):\n graph=data\n arr=['Arendelle','Monstropolis', 'Naboo']\n assert business_trip(graph,arr) == (True,400)\n\n \ndef test_unable_to_trip():\n graph=data\n arr=['Arendelle','Amman', 'Naboo']\n assert business_trip(graph,arr) == (False,0)\ndef test_not_connected_city():\n graph=data\n arr=['Arendelle','Monstropolis', 'Metroville']\n assert business_trip(graph,arr) == (False,0)\ndef test_empty_list_city():\n graph=data\n arr=[]\n assert business_trip(graph,arr) == (False,0)\n","sub_path":"python/graph-business-trip/tests/test_graph_business_trip.py","file_name":"test_graph_business_trip.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"562513933","text":"#-*- encoding:utf-8 -*-\n\nimport shelve\nimport datetime\n\nd = shelve.open('shelve_test') #打开一个文件\n# print(d.get('hello'))\n# print(d.get('world'))\n# print(d.get('date'))\n\ninfo = {'age':22,'job':'it'}\n\nname = [\"alex\", \"rain\", \"test\"]\n\nd['hello'] = name\nd['world'] = info\nd['date'] = datetime.datetime.now()\n\nd.close()","sub_path":"pycharm/day5/shelve模块.py","file_name":"shelve模块.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"359282895","text":"import json \nimport argparse\nimport toMolar\n\ndef moad_parse(input_file):\n \"\"\"Toma la base de datos MOAD y la parsea en un JSON con jerarquias Ligand->PDB->Residues.\"\"\"\n input_file=open(input_file,\"r\")\n input_file_lines=input_file.readlines()\n input_file.close()\n\n compound_dict={}\n\n for line in input_file_lines:\n line_split=line.split(\",\")\n\n if all(value == \"\" or value == \"\\n\" for value in line_split):\n continue\n\n if line_split[0]!=\"\":\n continue\n\n if line_split[2]!=\"\":\n pdb=line_split[2]\n continue\n\n compound=line_split[3].split(\":\")[0]\n\n chain=line_split[3].split(\":\")[1]\n resid=line_split[3].split(\":\")[2]\n status=line_split[4]\n if line_split[7] !=\"\":\n afinity_standard=line_split[7]+line_split[8]\n afinity = toMolar.toMolar(float(line_split[7]),line_split[8])\n type_afinity=line_split[5]\n standard_relation=line_split[6]\n else:\n afinity=\"None\"\n afinity_standard=\"None\"\n type_afinity=\"None\"\n standard_relation=\"None\"\n\n if compound not in compound_dict.keys():\n compound_dict[compound]= { \"pdbs\": [] }\n record = { \"name\": pdb, \"residues\": [] }\n if type_afinity==\"None\":\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, \"standard_relation\": standard_relation}\n else:\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, type_afinity:afinity_standard, \"standard_relation\": standard_relation}\n record[\"residues\"].append(residues)\n compound_dict[compound][\"pdbs\"].append(record)\n\n else:\n check=0\n for element in compound_dict[compound][\"pdbs\"]:\n if pdb == element[\"name\"]:\n if type_afinity==\"None\":\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, \"standard_relation\": standard_relation}\n else:\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, type_afinity:afinity_standard, \"standard_relation\": standard_relation}\n element[\"residues\"].append(residues)\n check=1\n if check==0:\n if type_afinity==\"None\":\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, \"standard_relation\": standard_relation}\n else:\n residues = {\"chain\": chain, \"resid\" : resid, \"status\": status, \"standard_value\": afinity, type_afinity:afinity_standard, \"standard_relation\": standard_relation}\n record = { \"name\": pdb, \"residues\": [] }\n record[\"residues\"].append(residues)\n compound_dict[compound][\"pdbs\"].append(record)\n\n print(json.dumps(compound_dict, indent=4, sort_keys=True))\n return 0\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Create Ligand->PDB->Residues structure json database from MOAD file')\n parser.add_argument(\"-i\", '--moad_file', default='every_bind.csv', help=\"Provide input moad file and get json file\")\n return parser\n\ndef main():\n parser=parse_arguments()\n args=parser.parse_args()\n moad_parse(args.moad_file)\n return 0\n\nif __name__=='__main__':\n main()\n\n\n\n","sub_path":"MOAD_PDBIND/MOAD.py","file_name":"MOAD.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"195327479","text":"# VMOS\n# Team Lead: Varun Dhanraj\n# Team Members: Varun Dhanraj, Mohammed Laota, Owen Neale, Samia Muqeem, Aravind Vellora Vayalapra\n# Created May 26th, 2018\n\nimport json\nimport time # use this library to measure runtime of certain parts of the code, or to pause the program using time.sleep(60)\nimport os\nimport random\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\n\ntf.enable_eager_execution()\n\n# print(\"TensorFlow version: {}\".format(tf.VERSION))\n# print(\"Eager execution: {}\".format(tf.executing_eagerly()))\n\ndef main():\n pathToData = \"/Users/VarunDhanraj/Desktop/mpd.v1/data/\" # change this to the path to the folder that contains the .json playlist files\n\n inputFiles = os.listdir(pathToData) \n \n inputFiles = inputFiles[0:1] # change this according to how many files you wish to analyze \n \n uniqueTracks = set() # a set of all the unique tracks in the given data sample \n totalTrackNum = 0 # counts the total number of tracks (ie, not just unique tracks) \n playlists = [] # this groups songs according to their playlist \n \n for file in inputFiles: # loops through all the various files in inputFiles\n\n data = json.load(open(pathToData + file))\n \n for i in data[\"playlists\"]:\n playlists.append([])\n for j in i[\"tracks\"]:\n uniqueTracks.add(j[\"track_name\"])\n playlists[-1].append(j[\"track_name\"])\n totalTrackNum += 1\n \n # use this syntax to print json data\n # this would print 'playlists'\n # print(\"playlists:\\n\", json.dumps(playlists, indent=2, sort_keys=True))\n\n NumUnique = len(uniqueTracks)\n uniqueList = list(uniqueTracks)\n sr = pd.Series(uniqueList) \n\n model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu', input_shape=(NumUnique, )), \n tf.keras.layers.Dense(NumUnique)]) \n # this is the object which should contain the information about the neural network, such as the number of layers, size of input, activation function, etc\n \n print(\"Model Created\")\n \n learningRate = 0.01\n \n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learningRate)\n \n print(\"Optimizer Created\")\n \n Dict = CreateDict(uniqueList)\n \n i = random.randint(0, len(playlists) - 1) \n examplePlaylist = playlists.pop(i)\n\n j = random.randint(0, len(examplePlaylist) - 1)\n expectedOutput = examplePlaylist.pop(j)\n\n expectedIndex = bitfield(OneHotEncode([expectedOutput], Dict), NumUnique).index(1)\n\n trainingExample = bitfield(OneHotEncode(examplePlaylist, Dict), NumUnique)\n \n trainingInput = tf.convert_to_tensor([trainingExample], dtype=tf.float32)\n \n trainingOutput = tf.convert_to_tensor([expectedIndex], dtype=tf.int32) # choose the expected value to be the index of the removed element from the playlist\n\n print(trainingInput, trainingOutput)\n \n train_loss_results = []\n train_accuracy_results = []\n\n num_epochs = 101\n\n for epoch in range(num_epochs):\n\n epoch_loss_avg = tfe.metrics.Mean()\n epoch_accuracy = tfe.metrics.Accuracy()\n\n grads = grad(model, trainingInput, trainingOutput) \n\n # print(\"Gradient Calculated\")\n\n optimizer.apply_gradients(zip(grads, model.variables),\n global_step=tf.train.get_or_create_global_step())\n epoch_loss_avg(loss(model, trainingInput, trainingOutput)) # add current batch loss\n epoch_accuracy(tf.argmax(model(trainingInput), axis=1, output_type=tf.int32), trainingOutput)\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n\n if epoch % 10 == 0:\n print(\"Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}\".format(epoch,\n epoch_loss_avg.result(),\n epoch_accuracy.result()))\n \n\ndef CreateDict(uniqueList): # returns a dictionary of, with each key being the string corresponding to a song in uniqueList, and a value corresponding to a index which corresponds to that unique song \n indices = []\n count = 0\n for i in uniqueList:\n indices.append(2**count)\n count += 1\n DictionaryOfSongs = dict(zip(uniqueList, indices))\n return DictionaryOfSongs\n\ndef OneHotEncode(playlist, Dict): # this esentially creates a list of 0s and 1s. If playlist has the i'th song of Dict, the return list will have a 1 at its i'th element. Otherwise it will be 0 at the element \n x = 0\n for song in playlist:\n x = x | (Dict[song])\n return x\n\ndef bitfield(n, length):\n return [int(digit) for digit in bin(n)[2:].zfill(length)] # change int to bool if you want True or False instead of 0 and 1 \n\ndef loss(model, x, y):\n out = model(x)\n # print(out) # optional debuggint print statement\n return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=out)\n\ndef grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets)\n return tape.gradient(loss_value, model.variables)\n\n\n\nmain()\n","sub_path":"makeModel.py","file_name":"makeModel.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"113514098","text":"import random\nfrom itertools import product\nfrom oops_utils.queue_using_linked_list import Queue\n\n\nclass IndividualPlayerDetails:\n \"\"\"It will save each player details\"\"\"\n def __init__(self, qu):\n self.qu = qu\n\n\nclass DeckOfCards:\n def __init__(self):\n self.player = []\n self.suit = [\"Club\", \"Diamond\", \"Heart\", \"Spade\"]\n self.rank = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\"]\n\n def total_cards_list(self):\n \"\"\"It wil return a list containing of 36 random tuple having cards arranged randomly \"\"\"\n cartesian_product = product(self.suit, self.rank)\n list_of_cards = list(cartesian_product)\n return random.sample(list_of_cards, 36)\n\n def card_distribution(self):\n \"\"\"It will distribute the 36 cards between the four players\"\"\"\n count = 0\n for number in range(4):\n lst = []\n for card in range(9):\n lst.append(self.total_cards_list()[count])\n count += 1\n self.player.append(lst)\n\n def sort_players_card(self):\n \"\"\"It will sort cards by rank\"\"\"\n for player_list in self.player:\n for num in range(0, len(player_list)):\n for n in range(0, len(player_list) - num - 1):\n pos = player_list[n]\n next_pos = player_list[n + 1]\n if self.rank.index(pos[1]) < self.rank.index(next_pos[1]):\n player_list[n], player_list[n + 1] = player_list[n + 1], player_list[n]\n\n def queue_insertion(self):\n \"\"\"It will insert all the cards of players in a queue\"\"\"\n count = 1\n print(\"After inserting each player in a queue,we got: \")\n for lst in self.player:\n obj = Queue()\n obj.en_queue(lst)\n print(f\"The cards of player {count} are: \")\n count += 1\n obj.print_list()\n print()\n\n def individual(self):\n \"\"\"que stores each player name like player1 , player 2........\"\"\"\n que = Queue()\n for lst in self.player:\n if self.player.index(lst) == 0:\n obj = Queue()\n obj.en_queue(lst)\n player1 = IndividualPlayerDetails(obj)\n que.en_queue(player1)\n\n elif self.player.index(lst) == 1:\n obj = Queue()\n obj.en_queue(lst)\n player2 = IndividualPlayerDetails(obj)\n que.en_queue(player2)\n\n elif self.player.index(lst) == 2:\n obj = Queue()\n obj.en_queue(lst)\n player3 = IndividualPlayerDetails(obj)\n que.en_queue(player3)\n\n elif self.player.index(lst) == 3:\n obj = Queue()\n obj.en_queue(lst)\n player4 = IndividualPlayerDetails(obj)\n que.en_queue(player4)\n\n print()\n print(\"Finally when each player get the cards and after arranging acc. to rank,the cards in all of hands are: \")\n for i in range(4):\n x = que.de_queue_data()\n player_queue = x.qu\n print(player_queue.de_queue_data())\n\n\ndef main():\n obj_of_player = DeckOfCards()\n obj_of_player.total_cards_list()\n obj_of_player.card_distribution()\n obj_of_player.sort_players_card()\n obj_of_player.queue_insertion()\n obj_of_player.individual()\n\n\nmain()\n","sub_path":"deck_of_cards_extended.py","file_name":"deck_of_cards_extended.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"500224720","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n# Parameters\nN=1e6 # points to simulate\ndiv=100 # increments to evaluate stats at\nN=int(N)\ninc=int(N/div)\ndiv=int(div)\ndata = np.zeros( (div-1, 3) ) # hold stats\n\nrandom.seed() # set seed\ntally=0\ndatacounter=0\nfor i in xrange(N):\n x = 4*random.random()-2\n y = 4*random.random()-2\n z = 4*random.random()-2\n if (x-0.25)**2 + y**2 +z**2 <= 1:\n tally += 1\n if (x+0.25)**2 + y**2 +z**2 <= 1:\n tally += 1\n if (x-0.25)**2 + y**2 +z**2 <= 1 and (x+0.25)**2 + y**2 +z**2 <= 1:\n tally -= 1\n if i % inc == 0 and i != 0:\n vol = 64*tally/float(i) #4*mean\n # propogate error in std of mean\n std = 64*np.sqrt( ( tally/float(i)**2 )*( 1 - tally/float(i) ) )\n data[datacounter] = i, vol, std\n datacounter += 1\n print(vol)\n\n\n# 2 sigma error bars\nplt.errorbar(data[:,0], data[:,1], yerr=2*data[:,2], capsize = 3, capthick = 2, ecolor='r', errorevery = 5, label='Approx' )\nplt.legend(loc='upper right')\nplt.show()\n","sub_path":"hw2/volumes.py","file_name":"volumes.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"21715360","text":"import wso\n\n# Turn on debugging to see more info\nUEM = wso.WSO(debug=False)\n\n# OGS = UEM.get_all_ogs()\n\n# for OG in OGS[\"OrganizationGroups\"]:\n# print(OG)\n\ndevices = UEM.get_all_devices(pagesize=9999999)[\"Devices\"]\n\nprint(\"OG,SerialNumber,ModelId,LastSeen\")\n\nfor device in devices:\n print(\"%s,%s,%s,%s\" %\n (device[\"LocationGroupId\"][\"Name\"], device[\"SerialNumber\"],\n device[\"ModelId\"][\"Name\"], device[\"LastSeen\"]))\n","sub_path":"examples/get_number_of_devices_og.py","file_name":"get_number_of_devices_og.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"594392025","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\nclass TimberBeam:\n \"\"\"Timber structural calculations\"\"\"\n \"\"\"Must have the CSV timber propeties file to function\"\"\"\n \"\"\"Must have the kmod data file\"\"\"\n \n def __init__(self, b, h, grade='C16', ksys=1, \n service_class=1, load_duration='permanent', ley=3000, lez=3000):\n \"\"\" Form an instance of the timber beam class \"\"\"\n self.b = b\n self.h = h\n self.grade = grade\n self.kcrit = 1\n self.ksys = ksys\n self.service_class = service_class\n self.load_duration = load_duration\n self.ley= ley\n self.lez = lez\n \n \n # get all the timber materials and assign the grade passed when\n # forming the instance of the class\n all_timber_materials = pd.read_csv('./data/timberProperties.csv', index_col=0)\n self.timber = all_timber_materials.loc[grade]\n \n # set the partial factor on material\n if self.timber['Type'] == 'Solid':\n self.partial_factor = 1.3\n elif self.timber['Type'] == 'Glulam':\n self.partial_factor = 1.25\n else:\n self.partial_factor = 1.20\n # set the kcr factor for shear to allow for splitting\n self.kcr = 0.67\n \n # set the kh value\n if self.h < 150:\n self.kh = min(1.3, (150/self.h)**0.20)\n else:\n self.kh = 1\n \n # set the kmod value\n type = self.timber['Type']\n kmod_values = pd.read_csv('./data/kmod.csv', index_col=0)\n service_class_filter = kmod_values[kmod_values['service_class']\n == self.service_class]\n self.kmod = service_class_filter.loc[type, self.load_duration]\n \n # calculate the design bending strength\n self.fmd = (self.kcrit * self.ksys * self.kh * self.kmod * \n self.timber['fmk']\n / self.partial_factor)\n \n # calculate the geometric properties of the section\n self.A = self.b * self.h\n self.Zy = self.b * self.h**2 / 6\n self.Iy = self.b * self.h**3 / 12\n self.Iz = self.h * self.b**3 / 12\n self.ry = math.sqrt(self.Iy / self.A)\n self.rz = math.sqrt(self.Iz / self.A)\n \n # calculate the shear strength of the section\n self.fvd = (self.kmod * self.timber['fvk']\n * self.ksys / self.partial_factor)\n \n # calculate the design compressive strength parallel to the grain\n self.fc0d = self.ksys * self.kmod * self.timber['fc0k'] / self.partial_factor\n \n \n def k_i(lam_rel, beta_c):\n \"\"\" Calculates the ky factor from BS EN 1995-1-1 \"\"\"\n k = 0.50*(1 + beta_c * (lam_rel - 0.30) + lam_rel**2)\n return k\n \n \n def k_ci(k_i, lam_rel):\n \"\"\" Calculates the strength reduction factor for slenderness \"\"\"\n k = 1 / (k_i + math.sqrt(k_i**2 - lam_rel**2))\n return k\n \n\n self.beta_c = 0.20 # only solid sections considered at present\n lam_y = self.ley / self.ry\n lam_z = self.lez / self.rz\n e = self.timber['E005']\n f = self.timber['fc0k']\n lam_rely = (lam_y / math.pi * math.sqrt(f / e))\n lam_relz = (lam_z / math.pi * math.sqrt(f / e))\n k_y = k_i(lam_rely, self.beta_c)\n k_z = k_i(lam_relz, self.beta_c)\n self.k_cy = k_ci(k_y, lam_rely)\n self.k_cz = k_ci(k_z, lam_relz)\n \n # calculate the design compressive strength paralle to the grain\n # with allowance for buckling\n self.fc0dy = self.k_cy * self.fc0d\n self.fc0dz = self.k_cz * self.fc0d\n \n def capacity_check(self, M, V, F):\n \"\"\" Compare the design strength to the applied stress \"\"\"\n # calculate the applied stresses\n smd = M * 10**6 / self.Zy\n td = V * 10**3 / (self.A * self.kcr) * (3/2)\n sc0d = F * 10**3 / self.A\n \n # unity checks\n km = 0.70 # for rectangular timber sections\n u1 = (smd/(self.fmd * self.kcrit))**2 + (sc0d / self.fc0dz)\n u2 = smd/(self.fmd * self.kcrit) + (sc0d / self.fc0dy)\n u3 = smd/(self.fmd * self.kcrit) * km + (sc0d / self.fc0dz)\n \n # check the status of the beam\n if u1 <= 1 and u2 <= 1 and u3 <=1:\n uls_status = 'PASS'\n else:\n uls_status = 'FAIL'\n \n # plot results\n # print('Material Properties:\\n',self.timber)\n fig, (ax1, ax2) = plt.subplots(1,2)\n fig.set_size_inches(11,4)\n fig.suptitle(f'STATUS = {uls_status}')\n x_values = ['Bending', 'Shear', 'y Comp',\n 'z Comp']\n y_values = [self.fmd, self.fvd, self.fc0dy, self.fc0dz]\n colour=['red', 'green', 'orange', 'orange']\n ax1.bar(x_values, y_values, color=colour)\n ax1.set_xlabel('Design Capacities')\n ax1.set_ylabel('Stress MPa')\n ax2.bar(['U1', 'U2', 'U3'], [u1, u2, u3])\n ax1.axhline(smd, color='red', label=f'Applied bending stress { smd :.2f}MPa')\n ax1.axhline(td, color='green', label=f'Applied shear stress {td :.2f}MPa')\n ax1.axhline(sc0d, color='orange', label=f'Applied comp stress {sc0d :.2f}MPa')\n ax1.legend()\n ax1.set_title('Action Stresses')\n ax2.set_title('ULS Unity Ratio Checks')\n ax1.grid()\n ax2.grid()\n plt.show()\n ","sub_path":".ipynb_checkpoints/timber-checkpoint.py","file_name":"timber-checkpoint.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"298676633","text":"import sklearn_crfsuite\r\nfrom sklearn_crfsuite import metrics\r\nfrom helper_functions import *\r\n\r\n\r\ndef step_one(file_name, file_2):\r\n train_sentences = file_opener(file_name)\r\n dev_sentences = file_opener(file_2)\r\n\r\n x_train = [sentence_features(s) for s in train_sentences]\r\n y_train = [sentence_labels(s, step_one=True) for s in train_sentences]\r\n\r\n x_dev = [sentence_features(s) for s in dev_sentences]\r\n y_dev = [sentence_labels(s, step_one=True) for s in dev_sentences]\r\n\r\n crf = sklearn_crfsuite.CRF(\r\n algorithm='lbfgs',\r\n c1=0.09684573395986483,\r\n c2=0.0800864058815976,\r\n max_iterations=100,\r\n all_possible_transitions=True\r\n )\r\n crf.fit(x_train, y_train)\r\n labels = list(crf.classes_)\r\n labels.remove('O')\r\n y_predicted = crf.predict(x_dev)\r\n\r\n f1 = metrics.flat_f1_score(y_dev, y_predicted, average='weighted', labels=labels)\r\n print(\"IOB Score:\", f1)\r\n return step_two(train_sentences, dev_sentences, y_predicted)\r\n\r\n\r\ndef step_two(train_sentences, dev_sentences, y_predicted_iob):\r\n x_train = [sentence_features(s, step_two=True) for s in train_sentences]\r\n y_train = [sentence_labels(s, step_two=True) for s in train_sentences]\r\n\r\n x_dev = []\r\n for ii in range(len(dev_sentences)):\r\n x_dev.append(sentence_features(dev_sentences[ii], step_two=True, predictions=y_predicted_iob[ii]))\r\n y_dev = [sentence_labels(s, step_two=True) for s in dev_sentences]\r\n dev_key = [sentence_labels(s) for s in dev_sentences]\r\n\r\n crf = sklearn_crfsuite.CRF(\r\n algorithm='lbfgs',\r\n c1=0.09684573395986483,\r\n c2=0.0800864058815976,\r\n max_iterations=100,\r\n all_possible_transitions=True\r\n )\r\n\r\n crf.fit(x_train, y_train)\r\n labels = list(crf.classes_)\r\n labels.remove('O')\r\n print(labels)\r\n y_predicted = crf.predict(x_dev)\r\n\r\n f1 = metrics.flat_f1_score(y_dev, y_predicted, average='weighted', labels=labels)\r\n print(\"Class Score:\", f1)\r\n\r\n combined = []\r\n for ii in range(len(y_predicted)):\r\n combo = list(zip(y_predicted_iob[ii], y_predicted[ii]))\r\n combined.append(list(map(lambda j: j[0] + \"-\" + j[1] if j[0] != 'O' else 'O', combo)))\r\n\r\n y_pred_flat = []\r\n for x in dev_key:\r\n y_pred_flat += x\r\n\r\n labels = list(set(y_pred_flat))\r\n labels.remove('O')\r\n\r\n final_f1 = metrics.flat_f1_score(combined, dev_key, average='weighted', labels=labels)\r\n print(\"Overall Score:\", final_f1)\r\n\r\n\r\nif __name__ == '__main__':\r\n step_one(\"./nel-labeled/train\", \"./nel-labeled/dev\")\r\n","sub_path":"Development Code/crf_sequential_approach.py","file_name":"crf_sequential_approach.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"133435454","text":"import serial\nimport json\nimport time\nimport queue\n\nclass Reciever:\n\n\tdef __init__(self):\n\t\tself.openMVIn = serial.Serial('/dev/ttyUSB0', baudrate = 115200, timeout = 0)\n\t\tself.message = \"\"\n\t\tself.messages = queue.Queue(10)\n\n\tdef in_loop(self):\n\t\twhile True:\n\t\t\tinput = self.openMVIn.read(1)\n\t\t\tif input == b'\\n':\n\t\t\t\t#try:\n\t\t\t\t\t#foo = json.loads(self.message)\n\t\t\t\t\t#print(\"{0}\".format(foo))\n\t\t\t\t#except ValueError:\n\t\t\t\t\t#pass\n\t\t\t\tif(self.messages.full()):\n\t\t\t\t\tself.messages.get() #waste the least recent data\n\t\t\t\t\tself.messages.put(self.message, 2)\n\t\t\t\telse:\n\t\t\t\t\tself.messages.put(self.message, 2)\n\t\t\t\tself.message = \"\"\n\t\t\telse:\n\t\t\t\tself.message += input.decode(\"utf-8\", \"ignore\")\n\n\tdef get_message(self):\n\t\tif(self.messages.empty()):\n\t\t\tpass\n\t\telse:\n\t\t\treturn self.messages.get(2)\n","sub_path":"RasPi/OpenMV_IN.py","file_name":"OpenMV_IN.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"3684901","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom ...symbols import POS, PUNCT, SYM, ADJ, CCONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB\nfrom ...symbols import NOUN, PROPN, PART, INTJ,SPACE,PRON\n\nTAG_MAP = {\n \"Adjective\": {POS:ADJ},\n \"Adposition\": {POS:ADP},\n \"Adverb\": {POS:ADV},\n \"Conjuction_Coordinating\": {POS:CCONJ},\n \"Conjuction_Subordinating\": {POS:SCONJ},\n \"Determiner\": {POS:DET},\n \"Interjection\": {POS:INTJ},\n \"Noun_Common\": {POS:NOUN},\n \"Noun_Proper\": {POS:PROPN},\n \"Numeral\": {POS:NUM},\n \"Other\": {POS:X},\n \"Particle\": {POS:PART},\n \"Pronoun\": {POS:PRON},\n \"Punctuation\": {POS:PUNCT},\n \"Symbol\": {POS:SYM},\n \"Verb\": {POS:VERB}\n}\n","sub_path":"spacy/lang/el/tag_map_general.py","file_name":"tag_map_general.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"89880763","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nsender = 'smtp.163.com'\nreceiver = ['670482466@qq.com']\nmessage = MIMEText('发送测试报告!!!', 'plain', 'utf-8')\nmessage['from'] = Header('张伟', 'utf-8')\nmessage['To'] = Header('钮佳涛', 'utf-8')\n\nsubject = 'PYTHON 邮件测试'\nmessage['Subject'] = Header(subject, 'utf-8')\n\ntry:\n smtpObj = smtplib.SMTP()\n smtpObj.sendmail(sender, receiver, message.as_string())\n print('send success')\nexcept smtplib.SMTPException as e:\n print(e)\n print('error f**k!!!')\n","sub_path":"mail/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"407663082","text":"import os, time\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.applications import inception_v3\n\nimport matplotlib.pyplot as plt\nfrom utils import preprocess_image, deprocess_image\nfrom model import get_feature_extractor\n\nimport argparse\n\nprint(\"Libraries Loaded!\")\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n# You can tweak these setting to obtain new visual effects.\nlayer_settings = {\n \"mixed3\": 0.5,\n \"mixed4\": 1.8,\n \"mixed5\": 1.5,\n \"mixed6\": 1.5,\n \"mixed7\": 1.9,\n}\n\n# Playing with these hyperparameters will also allow you to achieve new effects\nstep_size = 0.01 # Gradient ascent step size\nnum_octave = 3 # Number of scales at which to run gradient ascent\noctave_scale = 1.4 # Size ratio between scales\noptim_steps = 10 # Number of ascent steps per scale\nmax_loss = 1500.0\n\n#----------------------------------------------------------------------\n\ndef loss_fn(image, model):\n features = model(image)\n \n #print(features) \n loss = tf.zeros(shape=())\n for layer in features.keys():\n coeff = layer_settings[layer]\n activation = features[layer]\n scaling = tf.reduce_prod(tf.cast(tf.shape(activation), tf.float32))\n loss+=coeff*tf.reduce_sum(tf.square(activation[:, 2:-2, 2:-2, :]))/scaling\n \n return loss\n\n\n@tf.function\ndef _gradient_ascent(img, model:tf.keras.Model, step_size):\n with tf.GradientTape() as tape:\n tape.watch(img)\n loss = loss_fn(img, model)\n grads = tape.gradient(loss, img)\n \n #Normalizing gradients: Crucial\n grads /= tf.maximum(tf.math.reduce_mean(tf.abs(grads)), 1e-8)\n img += step_size * grads\n img = tf.clip_by_value(img, -1, 1)\n \n return loss, img\n\ndef gradient_ascent_loop(img, model, optim_steps, step_size, max_loss=None):\n for i in range(optim_steps):\n loss, img = _gradient_ascent(img, model, step_size)\n \n if max_loss and loss>max_loss:\n print(\"max loss reached\")\n break\n \n print(f\"loss value at step {i}: {loss}\")\n return img\n\n#----------------------------------------------------------------------\n\ndef dream_on(original_img, feature_extractor, output_dir, iterations=1000, save_every=10, downscale_factor=2):\n\n #processed_img = preprocess_image(original_img)\n processed_img = original_img\n processed_img = tf.image.resize(processed_img, \n (int(processed_img.shape[1]/downscale_factor), int(processed_img.shape[2]/downscale_factor))\n )\n img = processed_img\n\n x_size, y_size = int(processed_img.shape[1]), int(processed_img.shape[2])\n print(f\"x_size: {x_size}, y_size:{y_size}\")\n\n for i in range(iterations):\n \n\n files = os.listdir(f\"{output_dir}\")\n files = sorted(files, key=lambda x: int(x.split(\"_\")[3].split(\".\")[0]))\n print(f\"recent saves: {files[-2:]}\")\n \n if os.path.isfile(f\"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}\" + \".jpg\"):\n print(f\"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}\" + \".jpg Exist\")\n\n elif len(os.listdir(f\"{output_dir}\"))==0:\n img = processed_img\n #img = tf.keras.preprocessing.image.img_to_array(img)\n tf.keras.preprocessing.image.save_img(f\"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}\" + \".jpg\", deprocess_image(img.numpy()))\n else:\n lastfile = files[-1]\n \n img = tf.keras.preprocessing.image.load_img(f\"{output_dir}/{lastfile}\")\n img = tf.keras.preprocessing.image.img_to_array(img)\n \n x_trim = 2\n y_trim = 2\n\n print(img.shape)\n #img = img[0:x_size-x_trim, 0:y_size-y_trim]\n img = tf.image.central_crop(img, central_fraction=0.99)\n img = tf.image.resize(img, (x_size, y_size))\n print(img.shape)\n\n #kernel = np.ones((5,5),np.float32)/25\n #img = cv2.filter2D(np.array(img),-1,kernel)\n #img = cv2.GaussianBlur(np.array(img), (9, 9), 0)\n #img = cv2.resize(img, (y_size, x_size))\n\n print(img.shape)\n img = tf.expand_dims(img, axis=0)\n img = inception_v3.preprocess_input(img)\n print(i%save_every)\n\n img = gradient_ascent_loop(img, feature_extractor, optim_steps, step_size, max_loss=None)\n\n if save_every>0 and i%save_every==0:\n deproc_img = deprocess_image(img.numpy())\n\n deproc_img = cv2.GaussianBlur(deproc_img, (3, 3), 0)\n\n tf.keras.preprocessing.image.save_img(f\"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}\" + \".jpg\", deproc_img)\n print(f\"-------dream_{img.shape[1]}_{img.shape[2]}_{i}\" + \".jpg-------\")\n\n#----------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Deep Dream tutorial\")\n parser.add_argument(\"--src_img\", default=\"sky.jpg\", required=True, type=str, help=\"Source image to perform deep dram on\")\n parser.add_argument(\"--directory\", default=\"../dream_dir\", type=str, help=\"Result directory to save intermediate images\")\n parser.add_argument(\"--iterations\", default=\"1000\", type=int, help=\"How long to dream.\")\n parser.add_argument(\"--save_every\", default=\"1\", type=int, help=\"Saving image after every _ iterations\")\n parser.add_argument(\"--downscale_factor\", default=\"3\", type=int, help=\"Downscale factor for reducing image scale\")\n parser.add_argument(\"--overwrite_save_dir\", default=False, type=bool, help=\"Delete all files in selected directory\")\n\n args = parser.parse_args()\n\n proc = preprocess_image(args.src_img)\n print(proc.shape)\n\n model = get_feature_extractor(layer_settings)\n print(\"model loaded\\nDreaming\")\n\n if not os.path.isdir(args.directory):\n try:\n os.mkdir(args.directory)\n print(f\"created directory \\\"{args.directory}\\\"\")\n except:\n print(\"couldn't create directory\")\n\n if len(os.listdir(args.directory))>0 and args.overwrite_save_dir==True:\n for f in os.listdir(args.directory):\n os.remove(os.path.join(args.directory, f))\n print(\"Directory cleaned\")\n\n dream_on(proc, model, args.directory, iterations=args.iterations, save_every=args.save_every, downscale_factor=args.downscale_factor)\n\n\nif __name__ == \"__main__\":\n st = time.time()\n main()\n print(f\"Total time: {time.time()-st} s\")\n\n","sub_path":"src/keep_dreaming.py","file_name":"keep_dreaming.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"245063302","text":"import os\nimport pandas as pd\nfrom simtools.Analysis.BaseAnalyzers import BaseAnalyzer\nimport matplotlib.pyplot as plt\n\nclass EradicationAnalyzer(BaseAnalyzer):\n def __init__(self):\n super().__init__(filenames=['output\\\\InsetChart.json'])\n\n self.ch_vec = ['Susceptible Population', 'Infected', 'Recovered Population', 'New Infections']\n\n def select_simulation_data(self, data, simulation):\n # Apply is called for every simulations included into the experiment\n # We are simply storing the population data in the pop_data dictionary\n header = data[self.filenames[0]][\"Header\"]\n time = [header[\"Start_Time\"] + dt * header[\"Simulation_Timestep\"] for dt in range(header[\"Timesteps\"])]\n\n ret = {\n 'sample_index': simulation.tags.get('__sample_index__'),\n 'Time': time\n }\n\n for ch in self.ch_vec:\n ret[ch] = data[self.filenames[0]][\"Channels\"][ch][\"Data\"]\n\n return ret\n\n\n def finalize(self, all_data):\n fig, ax_vec = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(16,10))\n\n for key, data in all_data.items():\n d = pd.DataFrame(data)\n for ch, ax in zip(self.ch_vec, ax_vec.flatten()):\n ax.plot(d['Time'], d[ch])\n ax.set_xlabel('Time')\n ax.set_ylabel(ch)\n\n #ax.legend([s.id for s in all_data.keys()])\n fig.savefig(os.path.join(self.working_dir, \"EradicationAnalyzer.png\"))\n\n any_infected = []\n # Sort our data by sample_index\n # We need to preserve the order by sample_index\n for d in sorted(all_data.values(), key=lambda k: k['sample_index']):\n any_infected.append(2*(d['Infected'][-1] == 0.0)-1) # At final time\n\n return pd.Series(any_infected)\n\n def cache(self):\n # Somehow required function for calibtool?\n return None\n","sub_path":"examples/Separatrix_BHM/EradicationAnalyzer.py","file_name":"EradicationAnalyzer.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"35018228","text":"from datetime import datetime\n\n\ndef get_coa_data(radius_data):\n \"\"\"Extract CoA keys from radius data\n\n Args:\n radius_data (dict): Radius data dict.\n\n Returns:\n list: list of values\n extracted from radius_data.\n \"\"\"\n keys_to_extract = [\n 'User-Name',\n 'Reply-Message',\n 'code',\n ]\n values = [radius_data.get(key, None) for key in keys_to_extract]\n\n if radius_data.get('has_response'):\n values.append(True)\n else:\n values.append(False)\n\n if radius_data.get('code', None) == 44:\n values.append(True) # Acked\n elif radius_data.get('code', None) == 45:\n values.append(False) # Nacked\n\n return values\n\n\ndef get_pod_data(radius_data):\n \"\"\"Extract POD keys from radius data\n\n Args:\n radius_data (dict): Radius data dict.\n\n Returns:\n list: list of values\n extracted from radius_data.\n \"\"\"\n keys_to_extract = [\n 'Acct-Session-Id',\n 'Reply-Message',\n 'code',\n ]\n values = [radius_data.get(key, None) for key in keys_to_extract]\n\n if radius_data.get('has_response'):\n values.append(True)\n else:\n values.append(False)\n\n if radius_data.get('code', None) == 41:\n values.append(True) # Acked\n elif radius_data.get('code', None) == 42:\n values.append(False) # Nacked\n\n return values\n\n\ndef get_alert_data(radius_data):\n \"\"\"Extract alert keys from radius data\n\n Args:\n radius_data (dict): Radius data dict.\n\n Returns:\n list: list of values\n extracted from radius_data.\n \"\"\"\n keys_to_extract = [\n 'username',\n 'nas_ip_address',\n 'nas_identifier',\n 'nas_port_id',\n 'nas_port_type',\n 'nas_port',\n 'framed_ip_address',\n 'session_time',\n 'terminate_cause',\n ]\n\n values = [getattr(radius_data, key) for key in keys_to_extract]\n return values\n\n\ndef get_stats_data(radius_data):\n \"\"\"Extract stat keys from radius data\n\n Args:\n radius_data (dict): Radius data dict.\n\n Returns:\n list: list of values\n extracted from radius_data.\n \"\"\"\n keys_to_extract = [\n 'Acct-Input-Octets',\n 'Acct-Output-Octets',\n 'Acct-Input-Packets',\n 'Acct-Output-Packets',\n 'Acct-Session-Time',\n 'User-Name',\n 'NAS-Identifier',\n ]\n values = [radius_data.get(key) for key in keys_to_extract]\n return values\n\n\ndef get_auth_data(radius_data):\n \"\"\"Extract keys from radius data\n plus reply_code and reply_message\n\n Args:\n radius_data (dict): Radius data dict.\n\n Returns:\n list: a list of values\n extracted from radius_data.\n \"\"\"\n keys_to_extract = [\n 'username',\n 'nas_ip_address',\n 'nas_identifier',\n 'nas_port_id',\n 'nas_port_type',\n 'nas_port',\n 'called_station_id',\n 'calling_station_id'\n ]\n values = [getattr(radius_data, key) for key in keys_to_extract]\n\n reply_code = radius_data.reply_message\n reply_message = reply_message_converter(reply_code)\n values.append(reply_code)\n values.append(reply_message)\n return values\n\n\ndef append_monthly_data(values):\n \"\"\"\n Append event_date, event_timestamp\n\n Args:\n values (list): List of values.\n\n Returns:\n list: Values with other fields appended.\n\n \"\"\"\n now = datetime.utcnow()\n values.append(now.strftime('%Y-%m')) # Appending event_date e.g. 2017-1\n values.append(now) # Appending event_timestamp\n return values\n\n\ndef append_weekly_data(values):\n \"\"\"\n Append year, week_number and day to values\n\n Args:\n values (list): List of values\n\n Returns:\n list: New list with appended values.\n\n \"\"\"\n now = datetime.utcnow()\n year, week_number, _ = now.isocalendar()\n values.append(str(year))\n values.append(str(week_number))\n values.append(now) # Appending event_timestamp\n return values\n\n\ndef append_daily_data(values):\n \"\"\"\n Append event_date and event_timestamp\n\n Args:\n values (list): List of values\n\n Returns:\n list: New list with appended values.\n\n \"\"\"\n now = datetime.utcnow()\n values.append(now.strftime('%Y-%m-%d'))\n values.append(now) # Appending event_timestamp\n return values\n\n\ndef reply_message_converter(reply_code):\n \"\"\" Convert reply_code to reply messages \"\"\"\n return REPLY_MESSAGES.get(reply_code, None)\n\n\nREPLY_MESSAGES = {\n 'E=900': 'NORMAL_USERNAME_DOESNT_EXISTS',\n 'E=901': 'USER_LOCKED',\n 'E=902': 'NO_CHARGE_DEFINED',\n 'E=903': 'NO_APPLICABLE_RULE',\n 'E=904': 'ABS_EXP_DATE_REACHED',\n 'E=905': 'REL_EXP_DATE_REACHED|EXP_FROM_CREATION_DATE_REACHED|EXP_FROM_FIRST_LOGIN_REACHED',\n 'E=906': 'CREDIT_FINISHED',\n 'E=907': 'WRONG_PASSWORD',\n 'E=908': 'MAX_CONCURRENT|RAS_DOESNT_ALLOW_MULTILOGIN',\n 'E=909': 'UNKNOWN_ERROR',\n 'E=910': 'LOGIN_FROM_THIS_MAC_DENIED',\n 'E=911': 'LOGIN_FROM_THIS_IP_DENIED',\n 'E=912': 'CANT_USE_MORE_THAN_ONE_SERVICE',\n 'E=913': 'LOGIN_FROM_THIS_CALLER_ID_DENIED',\n 'E=914': 'TIMELY_QUOTA_EXCEEDED',\n 'E=915': 'TRAFFIC_QUOTA_EXCEEDED',\n 'E=916': 'SYSTEM_SHUTTING_DOWN',\n 'E=917': 'LOGIN_NOT_ALLOWED',\n 'E=918': 'DAY_USAGE_FINISHED',\n 'E=919': 'LOGIN_FROM_THIS_PORT_DENIED',\n 'E=920': 'LATE_AUTH_REQUEST'\n}\n","sub_path":"src/bolts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"1865556","text":"import csv\nimport os\nimport re\nimport shutil\n\n# 用于为“[电子图书·学校专集][万册pdf图书集][全][逍遥昱昕收集整理制作\"电子书专辑批量重命名\n# 根据索引为数字格式的文件名批量添加正式文字格式的文件名\n# 例如,将\"ts001001.pdf” 格式命名为 \"ts001001_id_author_bookname.pdf\"格式\n# 同时,将所有书籍按照各自分类重新归类\n# 此次为了安全起见,所有操作以复制为主\n\n\n\n\n\n# 获取索引\nbook_index_csv = 'C:\\\\Users\\\\****\\Desktop\\\\books.csv'\nwith open(book_index_csv, 'r', encoding='gb18030', newline='') as readcsv:\n\tcr1 = csv.reader(readcsv)\n\trows = [r for r in cr1]\n\trows[0] += ['bookid', '原文件名', '新文件名', '重命名状态(是否成功)']\n\n# 新文件将位于\nnew_root = 'd:\\\\new_root'\n\n# 遍历原数字文件名,匹配下标\nwd = os.path.abspath('C:\\\\Users\\\\****\\\\Desktop\\\\[电子图书·学校专集][万册pdf图书集][全][逍遥昱昕收集整理制作]')\nfor root, dir, files in os.walk(wd):\n\tfor f in files:\n\t\t# print(os.path.join(root,f))\n\t\t# 提取文件名数字编号部分,并分成前后两部分(切片,前三位为每100本分装的文件夹编号,后三位为图书在子文件夹内的编号)\n\t\tp = r'(?<=\\D)\\d+(?=\\D)'\n\t\tif re.search(p, f):\n\t\t\tdig = re.findall(r'(?<=\\D)\\d+(?=\\D)', f)[0]\n\t\t\tbookid = 100 * (int(dig[0:3]) - 1) + int(dig[3:6])\n\t\t\told_filename = os.path.splitext(f)\n\t\t\tnew_sub_folder = os.path.abspath(os.path.join(new_root, rows[bookid][1]))\n\t\t\tif not os.path.exists(new_sub_folder):\n\t\t\t\tos.makedirs(new_sub_folder)\n\t\t\tnew_file_name = 'bookid_' + str(\"%05d\" % bookid) + '_' + old_filename[0] + '_' + rows[bookid][2] + \\\n\t\t\t old_filename[1]\n\n\t\t\trows[bookid] += [str(bookid), os.path.join(root, f), os.path.join(new_sub_folder, new_file_name),\n\t\t\t 'successful!']\n\t\t\tif not os.path.exists(rows[bookid][6]):\n\t\t\t\tshutil.copy(rows[bookid][5], rows[bookid][6])\n\nprint('done!')\nprint('回写CSV备忘')\nwith open(os.path.join(new_root, 'new_files_utf-8.csv'), 'a', encoding='utf-8', newline='') as wrcsv:\n\tcsvwriter = csv.writer(wrcsv)\n\tcsvwriter.writerows(rows)\n\n\n\n\n\t# os.mkdir(new_sub_folder)\n\t# shutil.copy(rows[905][5],rows[905][6])\n\n\t# 在操作前,先提前将CSV文件中文件名一列的非法字符全部替换为下划线,如问号(EXCEL中替换操作的对象是转义字符\"~?\")、反斜杠、斜杠、空格(为避免问题)、引号等。\n","sub_path":"daily_codes/batch_file_op/rename_by_csv_indexed/rename_by_csv_indexed.py","file_name":"rename_by_csv_indexed.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"157251447","text":"# The board dimensions.\nWIDTH = 7\nHEIGHT = 6\n\n# The number of tokens that must be played in a row by a player in\n# order to win.\nWIN_LENGTH = 4\n\n# The token symbols for the different players.\n# There can be more than two players\nPLAYERS = \"XO\"\n\n\ndef get_piece(board, x, y):\n '''(str, int, int) -> str\n\n Return the piece at position x, y on the board or the empty string\n if the space is empty.\n\n >>> get_piece(' XOX XXXX ', 0, 2)\n ''\n >>> get_piece(' XOX XXXX ', 0, 3)\n 'X'\n >>> get_piece(' XOX XXXX ', 0, 4)\n 'O'\n '''\n \n position = (HEIGHT * x) + y #multiply the x by the height and add the y\n if board[position] == 'X':\n return 'X'\n elif board[position] == 'O':\n return 'O'\n else:\n return ''\n\n\ndef extract_run(board, x, y, delta_x, delta_y, length):\n '''(str, int, int, int, int, int) -> str\n\n Return all the non-empty pieces on the board in a line of slope\n delta_y/delta_x starting from position x, y.\n\n >>> extract_run(' X XO XOO XXOX ',\n 0, 5, 1, -1, 4)\n 'XXXX'\n >>> extract_run(' 123OXOXOX456 ',\n 0, 2, 0, 1, HEIGHT)\n ''\n '''\n\n # TODO (Optional): Complete this function.\n # You do not need to complete this function for full marks, but you may\n # find it useful.\n return ''\n\ndef print_board(board):\n '''(str) -> None\n\n Display the board on the screen.\n\n Note the blank line before and after the the board.\n\n >>> print_board('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef')\n\n Board:\n |1|7|C|I|O|U|a|\n |2|8|D|J|P|V|b|\n |3|9|E|K|Q|W|c|\n |4|0|F|L|R|X|d|\n |5|A|G|M|S|Y|e|\n |6|B|H|N|T|Z|f|\n ---------------\n\n '''\n \n print('')\n print('Board:')\n\n # first loop to create a new board with rearranged pieces.\n index = 0\n count = 0\n count2 = 1\n new_board = ''\n while count <= len(board):\n new_board = new_board + board[index] + '|'\n index = index + 6\n if index >= len(board):\n index = count2\n count2 = count2 + 1\n count = count + 1\n \n # second loop to print out the new rearranged board.\n i1 = 0\n i2 = 13\n n = 0\n while n < 6:\n print('|' + new_board[i1:i2] + '|')\n i1 = i2 + 1\n i2 = i2 + 14\n n = n + 1\n print('---------------')\n print('')\n\n\ndef has_won(board):\n '''(str ->) bool\n\n Return True if there are four consecutive peices on the board and a player has won.\n\n >>> has_won(' XOX XXXX ')\n 'True'\n >>> has_won(' XOX XXOX ')\n 'False'\n '''\n \n # loop to check vertical win via vertical indexes\n count = 0\n ver_index1 = 0\n ver_index2 = 6\n while count < WIDTH + 1:\n if 'X' * WIN_LENGTH in board[ver_index1:ver_index2] or 'O' * WIN_LENGTH in board[ver_index1:ver_index2]: \n return True\n else:\n ver_index1 = ver_index2\n ver_index2 = ver_index2 + 6\n count = count + 1\n\n # loop to check horizonal win via horizontal indexes\n count2 = 0\n horiz_index1 = 0\n horiz_index2 = 36\n while count2 < WIDTH + 1:\n if 'X' * WIN_LENGTH in board[horiz_index1:horiz_index2:6] or 'O' * WIN_LENGTH in board[horiz_index1:horiz_index2:6]:\n return True\n else:\n horiz_index1 = horiz_index1 + 1\n horiz_index2 = horiz_index2 + 1\n count2 = count2 + 1\n\n # loop to check diagonal win left to right via diagonal indexes\n count3 = 0\n diag_index1 = 0\n diag_index2 = 36\n while count3 < WIDTH + 1:\n if 'X' * WIN_LENGTH in board[diag_index1:diag_index2:7] or 'O' * WIN_LENGTH in board[diag_index1:diag_index2:7]:\n return True\n elif diag_index1 == 2:\n diag_index1 = diag_index1 + 4\n diag_index2 = diag_index2 + 18\n elif diag_index1 >= 6:\n diag_index1 = diag_index1 + 6\n diag_index2 = diag_index2 - 1\n else:\n diag_index1 = diag_index1 + 1\n diag_index2 = diag_index2 - 6\n count3 = count3 + 1\n # loop to check diagonal win right to left via diagonal indexes\n count4 = 0\n diag2_index1 = 3\n diag2_index2 = 19\n while count4 < WIDTH + 1:\n if 'X' * WIN_LENGTH in board[diag2_index1:diag2_index2:5] or 'O' * WIN_LENGTH in board[diag2_index1:diag2_index2:5]:\n return True\n elif diag2_index1 == 5:\n diag2_index1 = diag2_index1 + 6\n diag2_index2 = diag2_index2 + 6\n elif diag2_index1 >= 11:\n diag2_index1 = diag2_index1 + 6\n diag2_index2 = diag2_index2 + 1\n else:\n diag2_index1 = diag2_index1 + 1\n diag2_index2 = diag2_index2 + 6\n count4 = count4 + 1\n return False\n\n\ndef insert_piece(board, column, player):\n '''(str, int, str) -> str\n\n Return a string representing the board if player inserts a piece\n in column of board.\n\n board must have free space in column.\n\n >>> insert_piece('XXXXXX OOOO ', 1, 'X')\n 'XXXXXX XOOOO '\n '''\n \n col_pos = column * 6\n last_pos = col_pos + 5\n index = 0\n end = False\n new_board = ''\n # loop that creates a new board by adding each character\n # from the given board one by one.\n \n while index < len(board):\n if col_pos <= index <= col_pos + 5 and not end: # once the loop gets to the\n # given column it looks for the last white space \n if index == last_pos or (board[index] == \" \" and board[index + 1] != \" \"): \n new_board = new_board + player # insert the player character into the last space and end the loop\n index = index + 1\n end = True \n elif board[index] == \" \" and board[index + 1] == \" \":\n new_board = new_board + board[index]\n index = index + 1 \n else: # then continue copying the rest of the board to the new board\n new_board = new_board + board[index] \n index = index + 1\n return new_board\n \ndef next_player(cur_player):\n '''(str) -> str\n\n Return the name of the next player.\n\n >>> next_player('X')\n 'O' \n '''\n \n # loop to check the index of the current player\n index = 0\n while index < len(PLAYERS):\n if cur_player == PLAYERS[index]:\n if (index + 1) == len(PLAYERS): # if there is no next player, reset to first player.\n cur_player = PLAYERS[0]\n return cur_player\n else:\n cur_player = PLAYERS[index + 1] # otherwise switch to the next player\n return cur_player\n else:\n index = index + 1\n \ndef get_column(board, player):\n '''(str, str) -> int\n\n Return an integer representing the column number unless the specified\n column is full or an invalid number.\n\n >>> get_column('XXXXXX OOOO ', 'X')\n 'Player X, select a column from 1 to 7: '\n '''\n \n # first prompt for column\n usr_inp = input('Player ' + player + ', select a column from 1 to 7: ')\n \n # loop that keeps asking for different input unless a valid input is given.\n while usr_inp not in '1234567' or 0 > int(usr_inp) > 7 or len(usr_inp) > 1:\n usr_inp = input('Player ' + player + ', select a column from 1 to 7: ')\n usr_inp = int(usr_inp) - 1\n \n # second loop that checks whether the specified column is full.\n while \" \" not in board[usr_inp * 6:(usr_inp * 6) + 6]:\n print(\"Column is full.\")\n usr_inp = input('Player ' + player + ', select a column from 1 to 7: ')\n usr_inp = int(usr_inp) - 1\n\n return usr_inp\n\n##############################################################################\n##############################################################################\n############# ###################\n############# Do not change anything below this line ###################\n############# ###################\n##############################################################################\n##############################################################################\n\n\ndef board_filled(board):\n '''(str) -> bool\n\n Return True iff board contains no empty spaces.\n\n >>> board_filled(' XOX XXXX ')\n False\n >>> board_filled('ABCDEFGXXXXXABCDFGHXXXXXXXXXXXXXXXXXXXXXXX')\n True\n '''\n\n while board != '':\n if board[0] == ' ':\n return False\n\n board = board[1:]\n\n return True\n\n\ndef congratulate_winner(board, player):\n '''(str, str) -> None\n\n Print a congratulatory message to the player.\n '''\n\n print(\"Congratulations! \" + player + \" wins!\")\n\n\ndef draw_message(board):\n '''(str) -> None\n\n Print a message in the event of a draw. board is ignored.\n\n >>> draw_message('')\n Game ended in a draw.\n '''\n\n print(\"Game ended in a draw.\")\n\n\n### Main program ###\n\nboard = \" \" * WIDTH * HEIGHT\nplayer = PLAYERS[-1]\n\nwhile not has_won(board) and not board_filled(board):\n player = next_player(player)\n print_board(board)\n column = get_column(board, player)\n board = insert_piece(board, column, player)\n\n\nprint_board(board)\n\nif has_won(board):\n congratulate_winner(board, player)\nelse:\n draw_message(board)\n\n","sub_path":"a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":9858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"549675399","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.osv import fields,osv\n\n\nclass material_request_barcode(osv.osv):\n _inherit = \"material.request\"\n _columns = {\n 'mr_emp_code' : fields.char('Employee Code', size=64),\n 'mr_emp_id': fields.many2one('hr.employee', 'Employee'),\n 'show_barcode_info': fields.boolean('Show?'), \n }\n def onchange_mr_emp_code(self, cr, uid, ids, mr_emp_code, context=None):\n \"\"\" On change of product barcode.\n @param bc_product_code: Changed Product code\n @return: Dictionary of values\n \"\"\"\n \n if not mr_emp_code:\n return {}\n emp_obj = self.pool.get('hr.employee')\n emp_ids = emp_obj.search(cr, uid, [('emp_code','=',mr_emp_code)],context)\n if not emp_ids or len(emp_ids) == 0:\n return {}\n dept_id = emp_obj.browse(cr, uid, emp_ids, context)[0].department_id.id\n result = {'mr_emp_id':emp_ids[0],'mr_dept_id':dept_id}\n return {'value': result}\n \n def onchange_mr_emp_id(self, cr, uid, ids, mr_emp_id, context=None):\n \"\"\" On change of product barcode.\n @param bc_product_code: Changed Product code\n @return: Dictionary of values\n \"\"\"\n \n if not mr_emp_id:\n return {}\n emp_obj = self.pool.get('hr.employee')\n emp = emp_obj.browse(cr, uid, mr_emp_id,context)\n result = {'mr_dept_id':emp.department_id.id,'mr_emp_code':emp.emp_code}\n return {'value': result}\n \n def create(self, cr, user, vals, context=None):\n vals.update({'show_barcode_info':False})\n return super(material_request_barcode,self).create(cr, user, vals, context)\n \n def write(self, cr, user, ids, vals, context=None):\n vals.update({'show_barcode_info':False})\n return super(material_request_barcode,self).write(cr, user, ids, vals, context)\n \n _defaults={'show_barcode_info':False}\n \nclass material_request_line_barcode(osv.osv):\n _inherit = \"material.request.line\"\n _columns = {\n 'bc_product_code' : fields.char('Product Code', size=64),\n }\n def onchange_bc_product_code(self, cr, uid, ids, bc_product_code, mr_emp_id, product_id, context=None):\n \"\"\" On change of product barcode.\n @param bc_product_code: Changed Product code\n @return: Dictionary of values\n \"\"\"\n \n if not bc_product_code:\n return {}\n prod_obj = self.pool.get('product.product')\n prod_ids = prod_obj.search(cr, uid, [('default_code','ilike',bc_product_code)],context)\n if not prod_ids or len(prod_ids) == 0:\n return {}\n prod_id = prod_ids[0]\n #bc_product_name = prod_obj.name_get(cr, uid, [prod_id], context)[0][1]\n result = {'product_id':prod_id,'product_qty':1,'mr_emp_id':mr_emp_id}\n if product_id and product_id == prod_id:\n id_change_resu = self.onchange_product_id(cr, uid, ids, product_id)\n result.update(id_change_resu['value'])\n return {'value': result}\n def default_get(self, cr, uid, fields_list, context=None):\n resu = super(material_request_line_barcode,self).default_get(cr, uid, fields_list, context)\n #material_request.type: mr or mrr\n if context.get('set_emp_id'):\n resu.update({'mr_emp_id':context.get('set_emp_id')})\n return resu \n","sub_path":"dmp_stock_mt_barcode/stock_barcode.py","file_name":"stock_barcode.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"528555837","text":"from django.db import models\nfrom django.utils import timezone\n\nclass Work(models.Model):\n\tcourse = models.ForeignKey(\"Prefect.Course\", null=True, on_delete=models.SET_NULL)\n\tnumber = models.IntegerField()\n\tschool_year = models.ForeignKey(\"Base.SchoolYear\", null=True, on_delete=models.SET_NULL)\n\tmaxima = models.IntegerField(null=True)\n\tdate = models.DateField(default=timezone.now)\n\twork_type = models.ForeignKey(\"WorkType\", null=True, on_delete=models.SET_NULL)\n\tis_valid = models.BooleanField(default=True, blank=True, null=True)\n\tcategory = models.ForeignKey(\"Category\", on_delete=models.CASCADE)\n\n\tdef __str__(self):\n\t\treturn f\"work {self.course.name} #{self.number}\"\n\n\tclass Meta:\n\t\tunique_together = ('course', 'number', 'school_year','category')","sub_path":"apps/TeachersAndTitulars/models/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"467271222","text":"import os\nimport multiprocessing\nfrom itertools import repeat\n\nimport numpy as np\nimport numpy.testing as npt\nimport nose.tools as nt\nfrom scipy.signal import fftconvolve\n\nimport popeye.utilities as utils\nfrom popeye import dog, og\nfrom popeye.visual_stimulus import VisualStimulus, simulate_bar_stimulus, resample_stimulus\n\ndef test_dog():\n \n # stimulus features\n pixels_across = 800\n pixels_down = 600\n viewing_distance = 38\n screen_width = 25\n thetas = np.arange(0,360,45)\n num_steps = 20\n ecc = 10\n tr_length = 1.0\n frames_per_tr = 1.0\n scale_factor = 0.10\n dtype = 'short'\n \n # create the sweeping bar stimulus in memory\n bar = simulate_bar_stimulus(pixels_across, pixels_down, viewing_distance, screen_width, thetas, num_steps, ecc)\n \n # resample the stimulus to 50% of original\n bar = resample_stimulus(bar, 0.50)\n \n # create an instance of the Stimulus class\n stimulus = VisualStimulus(bar, viewing_distance, screen_width, scale_factor, dtype)\n \n # initialize the gaussian model\n model = dog.DifferenceOfGaussiansModel(stimulus)\n \n # set the pRF params\n x = -5.2\n y = 2.5\n sigma_center = 1.2\n sigma_surround = 2.9\n beta_center = 2.5\n beta_surround = 1.6\n hrf_delay = -0.2\n \n # create \"data\"\n data = dog.compute_model_ts(x, y, sigma_center, sigma_surround, beta_center, beta_surround, hrf_delay,\n stimulus.deg_x, stimulus.deg_y, stimulus.stim_arr, tr_length)\n \n # first fit the one gaussian\n search_bounds = ((-10,10),(-10,10),(0.25,5.25),(0.1,1e2),(-5,5))\n fit_bounds = ((-12,12),(-12,12),(1/stimulus.ppd,12),(0.1,1e3),(-5,5))\n og_fit = og.GaussianFit(model, data, search_bounds, fit_bounds, tr_length, (1,2,3), False, False)\n \n # then fit the two gaussian\n fit_bounds = ((-12,12),(-12,12),(1/stimulus.ppd,12),(1/stimulus.ppd,12),(0.1,1e2),(0.1,1e2),(-5,5),)\n dog_fit = dog.DifferenceOfGaussiansFit(og_fit, fit_bounds, True, False)\n \n # assert equivalence\n nt.assert_almost_equal(dog_fit.x, x)\n nt.assert_almost_equal(dog_fit.y, y)\n nt.assert_almost_equal(dog_fit.sigma_center, sigma_center)\n nt.assert_almost_equal(dog_fit.sigma_surround, sigma_surround)\n nt.assert_almost_equal(dog_fit.beta_center, beta_center)\n nt.assert_almost_equal(dog_fit.beta_surround, beta_surround)\n nt.assert_almost_equal(dog_fit.hrf_delay, hrf_delay)\n\n","sub_path":"popeye/tests/test_dog.py","file_name":"test_dog.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"37031617","text":"from requests import get\nfrom bs4 import BeautifulSoup\nfrom PyMyAdmin import Database\n\ndef makeRequest(url):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Connection': 'Keep-alive',\n }\n req = get(url, headers=headers)\n return req.text\n except Exception as erro:\n return \"[!] Error request\" + str(erro)\n\n\ndef getPrice(html):\n # Scraping html sented via getPriceAssets function\n bs4 = BeautifulSoup(html,'html.parser')\n asset_code = bs4.find('div', attrs={'class':'fin_metadata b_demoteText'}).text.split('·')[0]\n asset_price = bs4.find('div', attrs={'class':'b_focusTextMedium'}).text\n \n print(asset_code+' R$'+asset_price)\n Database(asset_code,asset_price).insertData()\n\n\ndef main():\n with open('Tickerlinks.txt','r') as ticker:\n x= ticker.readlines()\n for i in x:\n if not '-' in i:\n url = 'https://www.bing.com/search?q='+i.replace('\\n','')+' cotação'\n html = makeRequest(url)\n getPrice(html)\n \nmain()","sub_path":"filesnotused/GetPrice.py","file_name":"GetPrice.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"366544965","text":"import tensorflow as tf\nimport larq as lq\nfrom larq_zoo import utils\nfrom zookeeper import registry, HParams\n\n\n@registry.register_model\ndef dorefa_net(hparams, input_shape, num_classes, input_tensor=None, include_top=True):\n def conv_block(x, filters, kernel_size, strides=1, pool=False, pool_padding=\"same\"):\n x = lq.layers.QuantConv2D(\n filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n input_quantizer=hparams.input_quantizer,\n kernel_quantizer=hparams.kernel_quantizer,\n kernel_constraint=None,\n use_bias=False,\n )(x)\n x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9, epsilon=1e-4)(\n x\n )\n if pool:\n x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=pool_padding)(\n x\n )\n return x\n\n def fully_connected_block(x, units):\n x = lq.layers.QuantDense(\n units,\n input_quantizer=hparams.input_quantizer,\n kernel_quantizer=hparams.kernel_quantizer,\n kernel_constraint=None,\n use_bias=False,\n )(x)\n x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9, epsilon=1e-4)(\n x\n )\n return x\n\n # get input\n img_input = utils.get_input_layer(input_shape, input_tensor)\n\n # feature extractor\n out = tf.keras.layers.Conv2D(\n 96, kernel_size=12, strides=4, padding=\"valid\", use_bias=True\n )(img_input)\n out = conv_block(out, filters=256, kernel_size=5, pool=True)\n out = conv_block(out, filters=384, kernel_size=3, pool=True)\n out = conv_block(out, filters=384, kernel_size=3)\n out = conv_block(out, filters=256, kernel_size=3, pool_padding=\"valid\", pool=True)\n\n # classifier\n if include_top:\n out = tf.keras.layers.Flatten()(out)\n out = fully_connected_block(out, units=4096)\n out = fully_connected_block(out, units=4096)\n out = tf.keras.layers.Activation(\"clip_by_value_activation\")(out)\n out = tf.keras.layers.Dense(num_classes, use_bias=True)(out)\n out = tf.keras.layers.Activation(\"softmax\")(out)\n\n return tf.keras.Model(inputs=img_input, outputs=out, name=\"dorefanet\")\n\n\n@lq.utils.register_keras_custom_object\n@lq.utils.set_precision(1)\ndef magnitude_aware_sign_unclipped(x):\n \"\"\"\n Scaled sign function with identity pseudo-gradient as used for the\n weights in the DoReFa paper. The Scale factor is calculated per layer.\n \"\"\"\n scale_factor = tf.stop_gradient(tf.reduce_mean(tf.abs(x)))\n\n @tf.custom_gradient\n def _magnitude_aware_sign(x):\n return lq.math.sign(x) * scale_factor, lambda dy: dy\n\n return _magnitude_aware_sign(x)\n\n\n@lq.utils.register_keras_custom_object\ndef clip_by_value_activation(x):\n return tf.clip_by_value(x, 0, 1)\n\n\n@registry.register_hparams(dorefa_net)\nclass default(HParams):\n epochs = 90\n batch_size = 256\n learning_rate = 0.0002\n decay_start = 60\n decay_step_2 = 75\n fast_decay_start = 82\n activations_k_bit = 2\n\n @property\n def input_quantizer(self):\n return lq.quantizers.DoReFaQuantizer(k_bit=self.activations_k_bit)\n\n @property\n def kernel_quantizer(self):\n return magnitude_aware_sign_unclipped\n\n def learning_rate_schedule(self, epoch):\n if epoch < self.decay_start:\n return self.learning_rate\n elif epoch < self.decay_step_2:\n return 4e-5\n elif epoch < self.fast_decay_start:\n return 8e-6\n else:\n return 8e-6 * 0.1 ** ((epoch - self.fast_decay_start) // 2 + 1)\n\n @property\n def optimizer(self):\n return tf.keras.optimizers.Adam(self.learning_rate, epsilon=1e-5)\n\n\ndef DoReFaNet(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n classes=1000,\n):\n \"\"\"Instantiates the DoReFa-net architecture.\n Optionally loads weights pre-trained on ImageNet.\n ```netron\n dorefanet-v0.1.0/dorefanet.json\n ```\n ```plot-altair\n /plots/dorefanet.vg.json\n ```\n # Arguments\n include_top: whether to include the fully-connected layer at the top of the network.\n weights: one of `None` (random initialization), \"imagenet\" (pre-training on\n ImageNet), or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as\n image input for the model.\n input_shape: optional shape tuple, only to be specified if `include_top` is False,\n otherwise the input shape has to be `(224, 224, 3)`.\n It should have exactly 3 inputs channels.\n classes: optional number of classes to classify images into, only to be specified\n if `include_top` is True, and if no `weights` argument is specified.\n\n # Returns\n A Keras model instance.\n\n # Raises\n ValueError: in case of invalid argument for `weights`, or invalid input shape.\n\n # References\n - [DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low\n Bitwidth Gradients](https://arxiv.org/abs/1606.06160)\n \"\"\"\n input_shape = utils.validate_input(input_shape, weights, include_top, classes)\n\n model = dorefa_net(\n default(),\n input_shape,\n classes,\n input_tensor=input_tensor,\n include_top=include_top,\n )\n\n # Load weights.\n if weights == \"imagenet\":\n # download appropriate file\n if include_top:\n weights_path = utils.download_pretrained_model(\n model=\"dorefanet\",\n version=\"v0.1.0\",\n file=\"dorefanet_weights.h5\",\n file_hash=\"645d7839d574faa3eeeca28f3115773d75da3ab67ff6876b4de12d10245ecf6a\",\n )\n else:\n weights_path = utils.download_pretrained_model(\n model=\"dorefanet\",\n version=\"v0.1.0\",\n file=\"dorefanet_weights_notop.h5\",\n file_hash=\"679368128e19a2a181bfe06ca3a3dec368b1fd8011d5f42647fbbf5a7f36d45f\",\n )\n model.load_weights(weights_path)\n elif weights is not None:\n model.load_weights(weights)\n return model\n","sub_path":"larq_zoo/dorefanet.py","file_name":"dorefanet.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"109936260","text":"import cv2\r\nimport numpy as np\r\n\r\n### 웹툰처럼 보이게 하기 ###\r\n\r\n#img = cv2.imread('./lena.jpg')\r\n#cv2.imshow('src', img)\r\n\r\ncap = cv2.VideoCapture('./vtest.avi')\r\nwhile True:\r\n ret, img = cap.read()\r\n if ret:\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 그레이로 색상 변경\r\n img_gray = cv2.medianBlur(img_gray, 7) # 잡음 제거\r\n\r\n edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize = 5)\r\n ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV) # 흰 바탕에 검은 줄\r\n #kernel = np.ones((6, 6), np.uint8) # cv2.MORPH_RECT : 정사각형 구조\r\n # 사각형 사이즈 커질수록 선 자체가 두꺼워 짐\r\n #mask = cv2.erode(mask, kernel, iterations= 5) # 선이 더 두껍게 나옴\r\n\r\n img_sketch = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n\r\n ## 값 설정 ##\r\n sigma_color=5\r\n sigma_space=7\r\n size=5\r\n\r\n img_bi = cv2.bilateralFilter(img, size, sigma_color, sigma_space) # 경계 유지\r\n\r\n dst = cv2.bitwise_and(img_bi, img_bi, mask = mask)\r\n\r\n #cv2.imshow('Sketch', img_sketch)\r\n #cv2.imshow('Sketch', img_bi) # 뽀샵 효과\r\n cv2.imshow('Sketch', dst)\r\n key = cv2.waitKey(40) # 매개변수 안의 값은 영상 속도와 관련이 있음\r\n if key == 27: # esc\r\n break\r\n else:\r\n break\r\n\r\ncap.release()\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"형태학적 처리/cartoon.py","file_name":"cartoon.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"500265290","text":"import pandas as pd\nimport numpy as np\nimport talib as tb\nimport glob\nimport os\nimport time\nimport re\n\nfolder_path = 'C:\\\\Users\\\\amdge\\\\Desktop\\\\origcsvs'\nprint(\"Calculating...\")\n\n\n\nfor filename in glob.glob(os.path.join(folder_path, '*.csv')):\n start = time.time()\n x = re.match(r\"(C:\\\\Users\\\\amdge\\\\Desktop\\\\origcsvs)\\\\(.*).csv\", filename)\n datafile = filename\n data = pd.read_csv(datafile, index_col='Date')\n data.index = pd.to_datetime(data.index)\n\n #SMA, EMA\n\n smavol20 = data['Volume'].rolling(20).mean()\n ema20 = tb.EMA(np.asarray(data['Close']), timeperiod=20)\n ema50 = tb.EMA(np.asarray(data['Close']), timeperiod=50)\n ema100 = tb.EMA(np.asarray(data['Close']), timeperiod=100)\n ema200 = tb.EMA(np.asarray(data['Close']), timeperiod=200)\n\n # CCI\n\n cci10 = tb.CCI(np.asarray(data['High']), np.asarray(data['Low']), np.asarray(data['Close']), timeperiod=10)\n cci20 = tb.CCI(np.asarray(data['High']), np.asarray(data['Low']), np.asarray(data['Close']), timeperiod=20)\n cci30 = tb.CCI(np.asarray(data['High']), np.asarray(data['Low']), np.asarray(data['Close']), timeperiod=30)\n\n # STOCH\n\n slowk, slowd = tb.STOCH(np.asarray(data['High']), np.asarray(data['Low']), np.asarray(data['Close']), fastk_period=14,\n slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)\n\n #MEDPRICE\n\n medpr = tb.MEDPRICE(np.asarray(data['High']), np.asarray(data['Low']))\n\n #ADX\n\n adxpr = tb.ADX(np.asarray(data['High']), np.asarray(data['Low']), np.asarray(data['Close']), timeperiod=14)\n\n data['MEDPRICE'] = np.round(medpr, decimals=2)\n data['ADX'] = np.round(adxpr, decimals=2)\n data['CCI10'] = np.round(cci10, decimals=2)\n data['CCI20'] = np.round(cci20, decimals=2)\n data['CCI30'] = np.round(cci30, decimals=2)\n data['STOCHK'] = np.round(slowk, decimals=2)\n data['STOCHD'] = np.round(slowd, decimals=2)\n data['EMA20'] = np.round(ema20, decimals=2)\n data['EMA50'] = np.round(ema50, decimals=2)\n data['EMA100'] = np.round(ema100, decimals=2)\n data['EMA200'] = np.round(ema200, decimals=2)\n data['SMA20Vol'] = np.round(smavol20, decimals=2)\n data.to_csv(\"C:\\\\Users\\\\amdge\\\\Desktop\\\\CCIstockwise\\\\\" + x.group(2) + \".csv\")\n end = time.time()\n print(\"Calculated indicators for \" + x.group(2) + \" stock. Execution time: \" + str(round((end - start), 2)) + \" seconds\")\nprint(\"Task completed.\")\n","sub_path":"cci.py","file_name":"cci.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"79649074","text":"from flask_assets import Bundle, Environment\nfrom app import app\n\njs = Bundle(\n 'js/lib/bootstrap.js',\n 'js/lib/grayscale.js',\n 'js/lib/jquery.js',\n output='gen/js_bundle.js',\n filters='jsmin'),\n\ncss = Bundle(\n 'css/lib/bootsrap.css',\n 'css/lib/grayscale.css',\n output='gen/css_bundle.css',\n filters='cssmin')\n\nassets = Environment(app)\n\nassets.register('js_all', js)\nasssets.register('css_all', css)","sub_path":"app/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"471008316","text":"#!/usr/bin/python\n\na=['e',1,4,'r','t',3,1]\na.append(['r','r'])\nprint (a)\n'kavelacni verjic qarakusi pakagcerov' \n\nb=['e',1,4,'r','t',3,1]\nb.extend(['r','r'])\nprint(b) \n'kavelacni verjic aranc qarakusi pakagceri'\n\nc=[1,'k','3','tux']\nc.insert(2,'ITC')\nprint(c)\n'erkrord indexic heto kavelacni ITC'\n\nd=['true','false',543,]\nd.index( 543 )\nprint(d)\n'petq e tpi 543-i indexy bayc chi tpum... patchary chgitem'\n\n\ne=['for','while','for']\ne.remove('while')\nprint(e)\n'while clean'\n\n\nf=[67,32,2,54,1,766]\nf.sort()\nprint(f)\n'kdasavori achman kargov'\n\nm=[12,'polo','manko','gosh'] \nm.reverse()\nprint(m)\n'tars ktpi'\n\nx=[24,'polo','manko'] \nx.pop(2)\nprint(x)\n'mankon kjnji'\n\n","sub_path":"python/homework/Movses/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"217320627","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\n \ndef nwd_v3(a, b):\n if b == 0:\n return a\n return nwd_v3(b, a % b)\n\n\ndef main(args):\n a = int(input(\"podaj liczbę naturalą: \"))\n b = int(input(\"podaj drugą liczbę naturalną: \"))\n assert nwd_v3(5, 10) == 5\n assert nwd_v3(3, 9) == 3\n assert nwd_v3(11, 33) == 11\n print(\"NWD({:d}, {:d}) = {:d}\".format(a, b, nwd_v3(a, b)))\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"rekurencja6_Suter.py","file_name":"rekurencja6_Suter.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"276282539","text":"import math\nimport matplotlib.pyplot as plt\nimport PIL.Image as pil\nfrom torch import nn\nfrom torch.autograd import Function\nfrom utils import *\nfrom numba import jit\nimport torch\n\nimport bnmorph_getcorpts\n\ntorch.manual_seed(42)\n\n@jit(nopython=True)\ndef sigmoid_function(x):\n return 1 / (1 + np.exp(-x))\n\n@jit(nopython=True)\ndef window_function(x, smooth_range):\n sigmoid_bd = 12 / smooth_range\n bias_pos = smooth_range / 2 + 1\n bias_neg = 0 - smooth_range / 2\n if x < 1 and x > 0:\n y = 1\n elif x <= 0:\n y = sigmoid_function(sigmoid_bd * (x - bias_neg))\n else:\n y = sigmoid_function(-sigmoid_bd * (x - bias_pos))\n return y\n\n@jit(nopython=True)\ndef distance_function(x, pixel_range):\n sigmoid_bd = 12 / pixel_range\n bias = pixel_range / 2\n y = sigmoid_function(-sigmoid_bd * (x - bias))\n return y\n@jit(nopython=True)\ndef morph_along_lines(height, width, morphed_x, morphed_y, recorder, srcx_set, srcy_set, dstx_set, dsty_set, fig = 10):\n ratio = 1\n smooth_range = 0.5\n pixel_range = 40\n alpha_padding = 0.1\n for dimy in range(height):\n for dimx in range(width):\n for k in range(srcy_set.shape[0]):\n srcx = srcx_set[k]\n srcy = srcy_set[k]\n dstx = dstx_set[k]\n dsty = dsty_set[k]\n\n dragged_srcx = dstx + (ratio+1) * (srcx - dstx)\n dragged_srcy = dsty + (ratio+1) * (srcy - dsty)\n\n alpha = ((dimx - dragged_srcx) * (dstx - dragged_srcx) + (dimy - dragged_srcy) * (dsty - dragged_srcy)) / ((dragged_srcx - dstx)*(dragged_srcx - dstx) + (dragged_srcy - dsty)*(dragged_srcy - dsty) + 1e-5)\n\n d2src = np.sqrt((dragged_srcx - dimx)**2 + (dragged_srcx - dimy)**2)\n d2dst = np.sqrt((dstx - dimx)**2 + (dsty - dimy)**2)\n d2line = np.abs((dstx - srcx) * (srcy - dimy) - (srcx - dimx) * (dsty - srcy)) / (np.sqrt( (srcx - dstx)*(srcx - dstx) + (srcy - dsty)*(srcy - dsty) ) + 1e-5)\n\n if alpha < 1 and alpha > 0:\n dgeneral = d2line\n else:\n if d2src > d2dst:\n dgeneral = d2dst\n else:\n dgeneral = d2src\n\n alpha_weight = window_function(alpha, smooth_range)\n pixel_range_weight = distance_function(dgeneral, pixel_range)\n recorder[k, 0] = distance_function(dgeneral, 20)\n\n recorder[k,1] = pixel_range_weight * alpha_weight * (dragged_srcx - dstx) * alpha * 0.8\n recorder[k,2] = pixel_range_weight * alpha_weight * (dragged_srcy - dsty) * alpha * 0.8\n\n totweights = 0\n avex = 0\n avey = 0\n for k in range(srcy_set.shape[0]):\n totweights = totweights + recorder[k][0]\n for k in range(srcy_set.shape[0]):\n avex = avex + recorder[k,0] / (totweights + 1e-4) * recorder[k,1]\n avey = avey + recorder[k,0] / (totweights + 1e-4) * recorder[k,2]\n morphed_x[dimy][dimx] = avex + dimx\n morphed_y[dimy][dimx] = avey + dimy\n\n \"\"\"\n ratio = 1\n smooth_range = 1\n pixel_range = 10\n # for k in range(srcy_set.shape[0]):\n k = 0\n srcx = srcx_set[k]\n srcy = srcy_set[k]\n dstx = dstx_set[k]\n dsty = dsty_set[k]\n\n dragged_srcx = dstx + (ratio + 1) * (srcx - dstx)\n dragged_srcy = dsty + (ratio + 1) * (srcy - dsty)\n for dimy in range(height):\n for dimx in range(width):\n\n\n alpha = ((dimx - dragged_srcx) * (dstx - dragged_srcx) + (dimy - dragged_srcy) * (dsty - dragged_srcy)) / ((dragged_srcx - dstx)*(dragged_srcx - dstx) + (dragged_srcy - dsty)*(dragged_srcy - dsty) + 1e-5)\n\n d2src = np.sqrt((dragged_srcx - dimx)**2 + (dragged_srcx - dimy)**2)\n d2dst = np.sqrt((dstx - dimx)**2 + (dsty - dimy)**2)\n d2line = np.abs((dstx - srcx) * (srcy - dimy) - (srcx - dimx) * (dsty - srcy)) / (np.sqrt( (srcx - dstx)*(srcx - dstx) + (srcy - dsty)*(srcy - dsty) ) + 1e-5)\n\n if alpha < 1 and alpha > 0:\n dgeneral = d2line\n else:\n if d2src > d2dst:\n dgeneral = d2dst\n else:\n dgeneral = d2src\n\n\n alpha_weight = window_function(alpha, smooth_range)\n pixel_range_weight = distance_function(dgeneral, pixel_range)\n\n # recorder[k][0] = np.power(1.0 / (0.1 + dgeneral), 1)\n # recorder[k][1] = alpha_weight * alpha * (srcx - dstx) + dimx\n # recorder[k][2] = alpha_weight * alpha * (srcy - dsty) + dimy\n morphed_x[dimy][dimx] = pixel_range_weight * alpha_weight * alpha * (srcx - dstx)\n morphed_y[dimy][dimx] = pixel_range_weight * alpha_weight * alpha * (srcy - dsty)\n # totweights = 0\n # avex = 0\n # avey = 0\n # for k in range(srcy_set.shape[0]):\n # totweights = totweights + recorder[k][0]\n # for k in range(srcy_set.shape[0]):\n # avex = avex + recorder[k][0] / totweights * recorder[k][1]\n # avey = avey + recorder[k][0] / totweights * recorder[k][2]\n # morphed_x[dimy][dimx] = avex\n # morphed_y[dimy][dimx] = avey\n morphed_sum = np.abs(morphed_x) + np.abs(morphed_y)\n vmax = 0.978\n morphed_sum = morphed_sum / vmax\n cm = plt.get_cmap('magma')\n morphed_sum = (cm(morphed_sum) * 255).astype(np.uint8)\n morphed_sum = pil.fromarray(morphed_sum)\n plt.figure()\n plt.imshow(morphed_sum)\n plt.plot([srcx, dstx], [srcy, dsty])\n plt.plot([srcx, dragged_srcx], [srcy, dragged_srcy])\n plt.scatter([dstx], [dsty], c = 'r')\n plt.scatter([dragged_srcx], [dragged_srcy], c = 'g')\n plt.scatter([srcx], [srcy], c = 'b')\n\n \"\"\"\n return morphed_x, morphed_y\n\n\nclass BNMorphFunction(Function):\n @staticmethod\n def forward(ctx):\n return\n\n @staticmethod\n def backward(ctx):\n return\n\n @staticmethod\n def find_corresponding_pts(binMapsrc, binMapdst, xx, yy, sxx, syy, cxx, cyy, pixel_distance_weight, alpha_distance_weight, pixel_mulline_distance_weight, alpha_padding):\n binMapsrc = binMapsrc.float()\n binMapdst = binMapdst.float()\n pixel_distance_weight = float(pixel_distance_weight)\n alpha_distance_weight = float(alpha_distance_weight)\n alpha_padding = float(alpha_padding)\n pixel_mulline_distance_weight = float(pixel_mulline_distance_weight)\n orgpts_x, orgpts_y, correspts_x, correspts_y, morphedx, morphedy = bnmorph_getcorpts.find_corespond_pts(binMapsrc, binMapdst, xx, yy, sxx, syy, cxx, cyy, pixel_distance_weight, alpha_distance_weight, pixel_mulline_distance_weight, alpha_padding)\n ocoeff = dict()\n ocoeff['orgpts_x'] = orgpts_x\n ocoeff['orgpts_y'] = orgpts_y\n ocoeff['correspts_x'] = correspts_x\n ocoeff['correspts_y'] = correspts_y\n return morphedx, morphedy, ocoeff\n\n\n @staticmethod\n def find_corresponding_pts_debug(binMapsrc, binMapdst, disparityMap, xx, yy, sxx, syy, cxx, cyy, pixel_distance_weight, alpha_distance_weight, pixel_mulline_distance_weight, alpha_padding, semantic_figure):\n binMapsrc = binMapsrc.float()\n binMapdst = binMapdst.float()\n pixel_distance_weight = float(pixel_distance_weight)\n alpha_distance_weight = float(alpha_distance_weight)\n alpha_padding = float(alpha_padding)\n pixel_mulline_distance_weight = float(pixel_mulline_distance_weight)\n\n # edited at 2019 / 10/ 24\n alpha_padding = 3\n pixel_distance_weight = 20\n pixel_mulline_distance_weight = 1.3\n orgpts_x, orgpts_y, correspts_x, correspts_y, morphedx, morphedy = bnmorph_getcorpts.find_corespond_pts(binMapsrc, binMapdst, xx, yy, sxx, syy, cxx, cyy, pixel_distance_weight, alpha_distance_weight, pixel_mulline_distance_weight, alpha_padding)\n\n colsearchSpan = np.arange(0, binMapsrc.shape[2])\n rowsearchSpan = np.arange(0, binMapsrc.shape[3])\n xx_rec, yy_rec = np.meshgrid(rowsearchSpan, colsearchSpan)\n xx_rec = torch.from_numpy(xx_rec).unsqueeze(0).unsqueeze(0).cuda().float()\n yy_rec = torch.from_numpy(yy_rec).unsqueeze(0).unsqueeze(0).cuda().float()\n diff = torch.abs(xx_rec - morphedx) + torch.abs(yy_rec - morphedy)\n # tensor2disp(diff, vmax=6, ind=0).show()\n\n height = disparityMap.shape[2]\n width = disparityMap.shape[3]\n morphedxt = (morphedx / (width - 1) - 0.5) * 2\n morphedyt = (morphedy / (height - 1) - 0.5) * 2\n grid = torch.cat([morphedxt, morphedyt], dim=1).permute(0, 2, 3, 1)\n disparityMap_morphed = torch.nn.functional.grid_sample(disparityMap, grid, padding_mode=\"border\")\n tensor2disp(disparityMap_morphed, percentile=95, ind=0).show()\n tensor2disp(disparityMap, percentile=95, ind=0).show()\n\n figdisp = tensor2disp(disparityMap, vmax=0.1, ind=0)\n r = np.zeros([binMapsrc.shape[2], binMapsrc.shape[3]])\n g = np.ones([binMapsrc.shape[2], binMapsrc.shape[3]]) * 255\n b = np.ones([binMapsrc.shape[2], binMapsrc.shape[3]]) * 255\n\n b = b * binMapsrc.squeeze(0).squeeze(0).cpu().detach().numpy()\n g = g * binMapdst.squeeze(0).squeeze(0).cpu().detach().numpy()\n edgecombined = np.stack([r, g, b], axis=2).astype(np.uint8)\n edgecombined = pil.fromarray(edgecombined)\n\n combinedfig = combined_2_img(figdisp, semantic_figure, 0.7)\n figedge = tensor2disp(binMapsrc, vmax=1, ind=0)\n selector = orgpts_x[0, 0, :, :] > -1e-3\n srcx_set = orgpts_x[0, 0, :, :][selector].cpu().numpy()\n srcy_set = orgpts_y[0, 0, :, :][selector].cpu().numpy()\n dstx_set = correspts_x[0, 0, :, :][selector].cpu().numpy()\n dsty_set = correspts_y[0, 0, :, :][selector].cpu().numpy()\n\n morphedxset = morphedx[0,0,:,:].cpu().numpy()[dsty_set.astype(np.int), dstx_set.astype(np.int)]\n morphedyset = morphedy[0,0,:,:].cpu().numpy()[dsty_set.astype(np.int), dstx_set.astype(np.int)]\n plt.imshow(edgecombined)\n # for i in range(len(srcx_set)):\n # plt.plot([srcx_set[i], dstx_set[i]], [srcy_set[i], dsty_set[i]], c = 'r')\n for i in range(len(srcx_set)):\n # if np.mod(i, 1) == 0:\n # plt.plot([srcx_set[i], morphedxset[i]], [srcy_set[i], morphedyset[i]], c = 'r')\n # plt.plot([srcx_set[i], dstx_set[i]], [srcy_set[i], dsty_set[i]], c='g')\n plt.plot([dstx_set[i], morphedxset[i]], [dsty_set[i], morphedyset[i]], c='c')\n plt.plot([srcx_set[i], morphedxset[i]], [srcy_set[i], morphedyset[i]], c='r')\n plt.scatter(morphedxset, morphedyset, c = 'c', s = 1)\n # plt.scatter(srcx_set, srcy_set, c='r', s=1)\n # plt.scatter(dstx_set, dsty_set, c='r', s=1)\n\n\n\n # morphed_x = np.zeros([height, width])\n # morphed_y = np.zeros([height, width])\n # selector = orgpts_x[0, 0, :, :] > -1e-3\n # srcx_set = orgpts_x[0, 0, :, :][selector].cpu().numpy()\n # srcy_set = orgpts_y[0, 0, :, :][selector].cpu().numpy()\n # dstx_set = correspts_x[0, 0, :, :][selector].cpu().numpy()\n # dsty_set = correspts_y[0, 0, :, :][selector].cpu().numpy()\n # recorder = np.zeros([srcx_set.shape[0], 3])\n #\n # selector = srcx_set < -1e10\n # for i in range(srcx_set.shape[0]):\n # if np.sqrt((srcx_set[i] - 238)**2 + (srcy_set[i] - 141)**2) < 1e10:\n # selector[i] = 1\n # srcy_set = srcy_set[selector]\n # dstx_set = dstx_set[selector]\n # dsty_set = dsty_set[selector]\n # srcx_set = srcx_set[selector]\n # morphed_x, morphed_y = morph_along_lines(height, width, morphed_x, morphed_y, recorder, srcx_set, srcy_set, dstx_set, dsty_set)\n #\n # morphedx = torch.from_numpy(morphed_x).unsqueeze(0).unsqueeze(0).cuda().float()\n # morphedy = torch.from_numpy(morphed_y).unsqueeze(0).unsqueeze(0).cuda().float()\n # height = disparityMap.shape[2]\n # width = disparityMap.shape[3]\n # morphedx = (morphedx / (width - 1) - 0.5) * 2\n # morphedy = (morphedy / (height - 1) - 0.5) * 2\n # grid = torch.cat([morphedx, morphedy], dim=1).permute(0, 2, 3, 1)\n # disparityMap_morphed = torch.nn.functional.grid_sample(disparityMap, grid, padding_mode=\"border\")\n # if semantic_figure is not None:\n # fig_morphed = tensor2disp(disparityMap_morphed, vmax=0.08, ind=0)\n # fig_disp = tensor2disp(disparityMap, vmax=0.08, ind=0)\n # fig_morphed_overlayed = pil.fromarray((np.array(semantic_figure) * 0.5 + np.array(fig_morphed) * 0.5).astype(np.uint8))\n # fig_disp_overlayed = pil.fromarray((np.array(semantic_figure) * 0.5 + np.array(fig_disp) * 0.5).astype(np.uint8))\n # fig_combined = pil.fromarray(np.concatenate([np.array(fig_disp_overlayed), np.array(fig_morphed_overlayed), np.array(fig_disp), np.array(fig_morphed)], axis=0))\n # else:\n # fig_combined = None\n # return fig_combined, disparityMap_morphed\n return morphedx, morphedy\n\nclass BNMorph(nn.Module):\n # def __init__(self, height, width, serachWidth = 7, searchHeight = 3, sparsityRad = 2, senseRange = 20, pixel_distance_weight = 20, alpha_distance_weight = 0.7, pixel_mulline_distance_weight = 15, alpha_padding = 0.6):\n def __init__(self, height, width, serachWidth=7, searchHeight=3, sparsityRad=2, senseRange=20, pixel_distance_weight=24, alpha_distance_weight=0.7, pixel_mulline_distance_weight=1.9, alpha_padding=1.6):\n super(BNMorph, self).__init__()\n self.height = height\n self.width = width\n self.searchWidth = serachWidth\n self.searchHeight = searchHeight\n self.sparsityRad = sparsityRad\n self.senseRange = senseRange\n self.pixel_distance_weight = pixel_distance_weight\n self.alpha_distance_weight = alpha_distance_weight\n self.alpha_padding = alpha_padding\n self.pixel_mulline_distance_weight = pixel_mulline_distance_weight\n\n self.pixel_distance_weight_store = None\n self.alpha_distance_weight_store = None\n self.pixel_mulline_distance_weight_store = None\n self.alpha_padding_store = None\n\n colsearchSpan = np.arange(-self.searchHeight, self.searchHeight + 1)\n rowsearchSpan = np.arange(-self.searchWidth, self.searchWidth + 1)\n xx, yy = np.meshgrid(rowsearchSpan, colsearchSpan)\n xx = xx.flatten()\n yy = yy.flatten()\n dist = xx**2 + yy**2\n sortedInd = np.argsort(dist)\n self.xx = torch.nn.Parameter(torch.from_numpy(xx[sortedInd]).float(), requires_grad=False)\n self.yy = torch.nn.Parameter(torch.from_numpy(yy[sortedInd]).float(), requires_grad=False)\n\n sparsittSpan = np.arange(-self.sparsityRad, self.sparsityRad + 1)\n sxx, syy = np.meshgrid(sparsittSpan, sparsittSpan)\n self.sxx = torch.nn.Parameter(torch.from_numpy(sxx.flatten()).float(), requires_grad=False)\n self.syy = torch.nn.Parameter(torch.from_numpy(syy.flatten()).float(), requires_grad=False)\n\n\n senseSpan = np.arange(-self.senseRange, self.senseRange + 1)\n cxx, cyy = np.meshgrid(senseSpan, senseSpan)\n cxx = cxx.flatten()\n cyy = cyy.flatten()\n dist = cxx ** 2 + cyy ** 2\n sortedInd = np.argsort(dist)\n self.cxx = torch.nn.Parameter(torch.from_numpy(cxx[sortedInd]).float(), requires_grad=False)\n self.cyy = torch.nn.Parameter(torch.from_numpy(cyy[sortedInd]).float(), requires_grad=False)\n\n def find_corresponding_pts_debug(self, binMapsrc, binMapdst, disparityMap, semantic_figure):\n return BNMorphFunction.find_corresponding_pts_debug(binMapsrc, binMapdst, disparityMap, self.xx, self.yy, self.sxx, self.syy, self.cxx, self.cyy, self.pixel_distance_weight, self.alpha_distance_weight, self.pixel_mulline_distance_weight, self.alpha_padding, semantic_figure)\n\n def find_corresponding_pts(self, binMapsrc, binMapdst, pixel_distance_weight = None, alpha_distance_weight = None, pixel_mulline_distance_weight = None, alpha_padding = None):\n if pixel_distance_weight is None:\n pixel_distance_weight = self.pixel_distance_weight\n\n if alpha_distance_weight is None:\n alpha_distance_weight = self.alpha_distance_weight\n\n if pixel_mulline_distance_weight is None:\n pixel_mulline_distance_weight = self.pixel_mulline_distance_weight\n\n if alpha_padding is None:\n alpha_padding = self.alpha_padding\n\n self.pixel_distance_weight_store = pixel_distance_weight\n self.alpha_distance_weight_store = alpha_distance_weight\n self.pixel_mulline_distance_weight_store = pixel_mulline_distance_weight\n self.alpha_padding_store = alpha_padding\n\n # alpha_padding = 1.9\n # pixel_mulline_distance_weight = 2\n return BNMorphFunction.find_corresponding_pts(binMapsrc, binMapdst, self.xx, self.yy, self.sxx, self.syy, self.cxx, self.cyy, pixel_distance_weight, alpha_distance_weight, pixel_mulline_distance_weight, alpha_padding)\n def print_params(self):\n if self.pixel_distance_weight_store is None:\n self.pixel_distance_weight_store = self.pixel_distance_weight\n if self.alpha_distance_weight_store is None:\n self.alpha_distance_weight_store = self.alpha_distance_weight\n if self.pixel_mulline_distance_weight_store is None:\n self.pixel_mulline_distance_weight_store = self.pixel_mulline_distance_weight\n if self.alpha_padding_store is None:\n self.alpha_padding_store = self.alpha_padding\n print(\"Sparsity %f, pixel_distance_weight % f, alpha_distance_weight % f, pixel_mulline_distance_weight % f, alpha_padding % f\" % (self.sparsityRad, self.pixel_distance_weight_store, self.alpha_distance_weight_store, self.pixel_mulline_distance_weight_store, self.alpha_padding_store))\n\n","sub_path":"bnmorph/bnmorph.py","file_name":"bnmorph.py","file_ext":"py","file_size_in_byte":18037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"408708077","text":"import unittest\nimport json\nimport rest1\n\n\nclass FlaskTest(unittest.TestCase):\n def setUp(self):\n rest1.app.testing = True\n self.client = rest1.app.test_client()\n\n\n # /api/multiply?param1=3¶m2=4\n # -> ? 12\n def test_index(self):\n response = self.client.get('/')\n\n # response code: 200\n self.assertEqual(response.status_code, 200)\n # content type: text/html; charset=utf-8\n self.assertIn(\"text/html\", response.content_type)\n self.assertEqual(response.charset, 'utf-8')\n\n content = response.data\n # 반환 데이터 확인\n self.assertEqual(content.decode('utf-8'), 'Hello, Flask!')\n\n\n def test_multyfly(self):\n response = self.client.get('/api/multiply?param1=3¶m2=4')\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"application/json\", response.content_type)\n \n # TDD(Test Driven Development)\n json_result = json.loads(response.data)\n self.assertEqual(json_result.get('state'), 1)\n self.assertEqual(json_result.get('response'), 12)\n \n\nif __name__ == '__main__':\n unittest.main()","sub_path":"rest1_test.py","file_name":"rest1_test.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"504913691","text":"def contaletra(s):\n conta={} #dicionario vazio\n for caracter in s:\n if caracter in conta: #a letra ja apareceu antes!\n conta[caracter] = conta[caracter] + 1\n else: #eh a primeira vez que aparece\n conta[caracter] = 1\n print(conta.items())\n\ncontaletra(input(\"Digite uma palavra: \"))\n","sub_path":"code/aula09/a9-ex1.py","file_name":"a9-ex1.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"159380085","text":"#!/usr/bin/env python\nimport os\nhome = os.environ['HOME']\nimport sys\nimport syslog\nimport datetime\nimport time\nimport recog\nimport recorder\nimport anal\nimport blanket\nimport transServer\nimport argparse\n\ndefaultPort = 8085\n\n\nif __name__ == '__main__':\n pname = sys.argv[0]\n parser = argparse.ArgumentParser()\n parser.add_argument('-d','--displayEnable', action = 'store_true',help='set displayEnable')\n args = parser.parse_args()\n \n os.environ['DISPLAY']=\":0.0\"\n os.chdir(os.path.dirname(sys.argv[0]))\n syslog.syslog(pname+\" at \"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\n\n it = recorder.inputThread()\n it.setDaemon(True)\n\n rt = recog.recogThread(it,args.displayEnable)\n rt.setDaemon(True)\n\n# analt = anal.analThread(rt)\n# analt.setDaemon(True)\n \n pst = blanket.phraseSender(rt,False) # ALWAYS OFF for now\n pst.setDaemon(True)\n\n it.start()\n rt.start()\n# analt.start()\n pst.start()\n transServer = transServer.transServerThread(defaultPort)\n transServer.setDaemon(True)\n transServer.start()\n while True:\n try:\n time.sleep(2)\n except KeyboardInterrupt:\n syslog.syslog(pname+\": keyboard interrupt\")\n it.close()\n break\n except Exception as e:\n syslog.syslog(pname+\":\"+str(e))\n it.close()\n break\n\n syslog.syslog(pname+\" exiting\")\n exit(0)\n\n","sub_path":"iParrot/iParrot.py","file_name":"iParrot.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"58950356","text":"# CSV ne bi trebao da se cita ovako sa file.read()\nwith open('reading/asd.csv') as file:\n\tprint(file.read())\n\nprint()\n# Dobar nacin kako da se cita CSV file\n# reader\n#\n# Citam kao LIST\nfrom csv import reader\n\nwith open(\"reading/fighters.csv\") as file:\n\t# reader vraca iterator\n\tcsv_reader = reader(file)\n\t# s ovim preskacem prvi red koji je u stvari naslov za fighters podatke\n\t# iterable onda mogu da koristim next i kursor prelazi dalje na sledeci red\n\tnext(csv_reader)\n\tfor row in csv_reader:\n\t\tprint('NAME:', row[0])\n\nprint(\"____________________________________________________________\")\n\nwith open(\"reading/fightersII.csv\") as file:\n\t# reader() - radice bez drugog parametra, ali drugi parametar je delimiter koji se koristi u fajlu\n\tcsv_reader = reader(file, delimiter=\"|\")\n\t# ako zelim podatke da ubacim u list-u\n\tdata = list(csv_reader)\n\tprint(data)\n\nprint(\"____________________________________________________________\")\n\n# Citam kao DICTIONARY\n# Kada koristim OrderedDict kao ovde, onda dobijem Dict koji ce prvi red iz text fajla ( headers red sa naslovima ), da ubaci u Dict\nfrom csv import DictReader\n\nwith open(\"reading/fighters.csv\") as file:\n\tcsv_dict = DictReader(file)\n\tfor row in csv_dict:\n\t\tprint(row)\n\t\tprint(row['Name'])\n","sub_path":"File_IO/CSV/reading/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"576460127","text":"#!/usr/bin/env python\n#!coding: utf-8\n\n\"\"\"\n把应用的相似列表存入Redis的Hash中\nhash name: sims\nkey: id #闪存删除就旧的的哈素hash比啊数据数据bcefghjlnoprsuvz\nvalue: \"id1:sim id1:sim ...\" [id为应用id, sim为相似度]\n\n\"\"\"\nimport redis\n\ndef init_sims(file_path, hash_name, rdb):\n fin = open(file_path)\n sim_dic = {}\n for line in fin:\n infos = line.strip().split(\"\\t\")\n id1 = infos[0].strip()\n id2 = infos[1].strip()\n sim = infos[2].strip()\n if id1 in sim_dic:\n sim_dic[id1].append(\"%s:%s\"%(id2, sim))\n else:\n sim_dic[id1] = [\"%s:%s\"%(id2, sim)]\n for app, sim_list in sim_dic:\n rdb.hset(hash_name, id1, \" \".join(sim_list))\n\nif __name__ == \"__main__\":\n file_dir = \"/home/storm/data/jiyaodian/appSimilarity/\"\n rdb = redis.Redis(unix_socket_path=\"/tmp/redis.sock\", db=0)\n rdb.hdel(\"sims\") #删除旧的hash数据\n file_name1 = \"app_part-m-0000\"\n file_name2 = \"game_part-m-0000\"\n for i in range(0, 4):\n file_path = file_dir+file_name1+str(i)\n init_sims(\"sims\", file_path, rdb)\n file_path = file_dir+file_name2+str(i)\n init_sims(\"sims\", file_path, rdb)\n\n","sub_path":"stormAppResy/script/init_sims.py","file_name":"init_sims.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"207381163","text":"from noilibrarian.audio import loadaudio\nfrom librosa import feature, display, dtw, resample, stft\nimport numpy as np\nimport noilibrarian.library\n\ndef loadreference(refname):\n return loadaudio('noilibrarian/reference/' + refname + '.wav')\n\ndef getscore(wp):\n distance = 0\n for row in wp:\n distance += abs(row[0] - row[1])\n return distance\n \n\ndef compareto(audio, reference):\n xy, xsr = audio\n yy, ysr = reference\n \n mfccX = feature.mfcc(y=xy, sr=xsr)\n mfccY = feature.mfcc(y=yy, sr=ysr) \n \n chromaX = feature.chroma_cqt(y=xy, sr=xsr)\n chromaY = feature.chroma_cqt(y=yy, sr=ysr) \n \n distances = []\n score = 0\n \n D, wp = dtw(mfccX[0], mfccY[0])\n score += getscore(wp) * 2\n \n D, wp = dtw(chromaX, chromaY)\n score += getscore(wp)\n \n distances.append(score / 3)\n \n return sum(distances) / len(distances)\n\ndef classify(audio): \n distances = [\n { 'category': 'kick', 'distance': compareto(audio, loadreference('kick')) },\n { 'category': 'snare', 'distance': compareto(audio, loadreference('snare')) },\n { 'category': 'hihat', 'distance': compareto(audio, loadreference('hihat')) },\n ]\n \n distances.sort(key=lambda x: x['distance'])\n \n print(distances)\n \n return (distances[0]['category'], distances[0]['distance'], distances[1]['category'])","sub_path":"noilibrarian/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"360015713","text":"\"\"\"\n83. Remove Duplicates from Sorted List\n\nGiven a sorted linked list, delete all duplicates such that each element appear only once.\n\nExample 1:\n\nInput: 1->1->2\nOutput: 1->2\nExample 2:\n\nInput: 1->1->2->3->3\nOutput: 1->2->3\n\nExpected Result:\n\n1. [1, 1, 2] -> [1, 2]\n2. [1, 1, 2, 3, 3] -> [1, 2, 3]\n\"\"\"\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def delete_duplicates(self, head):\n dummy = head\n while head and head.next:\n if head.val == head.next.val:\n temp = head.next\n head.next = head.next.next\n temp = None\n else:\n head = head.next\n return dummy.next\n \n\n def nums_to_listnode(self, nums):\n res = node = ListNode(None)\n for n in nums:\n node.next = ListNode(n)\n node = node.next \n return res\n\n\n def listnode_to_nums(self, node):\n res = []\n while node:\n res.append(node.val)\n node = node.next\n return res\n\n\n def test(self):\n test_cases = [\n [1,1,2],\n [1,1,2,3,3]\n ]\n for i, nums in enumerate(test_cases, 1):\n head = self.nums_to_listnode(nums)\n node = self.delete_duplicates(head)\n list = self.listnode_to_nums(node)\n print(f\"{i}. {nums} -> {list}\")\n\n\nif __name__ == '__main__':\n Solution().test()\n\n","sub_path":"easy/list/00083-remove-duplicates-from-sorted-list/00083-remove-duplicates-from-sorted-list.py","file_name":"00083-remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"306846319","text":"import cv2\n\nimg = cv2.imread('pyimg.jpg')\n\nprint(type(img))\n\n# cv2.imshow('Original Image', img)\n# cv2.waitKey()\n\nprint(img.shape[0:2])\n\nwidth = img.shape[0]\nheight = img.shape[1]\n\n# (center point, angle, scale)\nrotationMatrix = cv2.getRotationMatrix2D((250, 650), 30, .5)\n\nrotatedImage = cv2.warpAffine(img, rotationMatrix, (width, height))\n\ncv2.imshow('Rotated Image', rotatedImage)\ncv2.waitKey()\n","sub_path":"my-opencv.py","file_name":"my-opencv.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"96639317","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author:Wen\r\n\"\"\"\r\nfrom get_train_data import dataloader_train\r\nimport get_train_data as g\r\nimport Net\r\nfrom torch import optim\r\nfrom torch import nn\r\nimport torch as t\r\nfrom main_val import start\r\nimport H_para as H_para\r\nprint(\"load for train ...\")\r\nd = 2\r\npub_net = Net.public_layers().cuda(d)\r\nseg_net = Net.seg_net(5).cuda(d)\r\nopt_pub = optim.SGD(params = pub_net.parameters() , lr = 0.001, momentum=0.9)\r\nopt_seg = optim.SGD(params = seg_net.parameters() , lr = 0.001, momentum = 0.9)\r\ncri = nn.CrossEntropyLoss(weight=t.tensor([1,4,4,4,4]).float()).cuda(d)\r\nopt_pub.zero_grad()\r\nopt_seg.zero_grad()\r\nloss_sum = 0\r\nsave_path_p = \"/home/fuyongkun/graduation_project/ALL_PUB.pth\"\r\nsave_path_s = \"/home/fuyongkun/graduation_project/ALL_SEG.pth\"\r\npub_net.load_state_dict(t.load( save_path_p))\r\nseg_net.load_state_dict(t.load( save_path_s))\r\nepoch = 2\r\nepoch = range(epoch)\r\n\r\nprint(\"All is OK ...\")\r\nfor epoch in epoch:\r\n for i,x in enumerate(dataloader_train):\r\n data ,label = x\r\n data = data.cuda(d)\r\n label = label.cuda(d)\r\n x1 = pub_net(data)\r\n x1 = seg_net(*x1)\r\n loss = cri(x1 , label )\r\n loss.backward()\r\n loss_sum = loss_sum + loss\r\n opt_pub.step()\r\n opt_seg.step()\r\n opt_pub.zero_grad()\r\n opt_seg.zero_grad()\r\n if i%10 == 0:\r\n print(loss )\r\n print(i )\r\n loss_sum = 0\r\n if i%1000 == 0:\r\n t.save(pub_net.state_dict(), save_path_p)\r\n t.save(seg_net.state_dict(), save_path_s)\r\n t.cuda.empty_cache()\r\n print(\"epoch finished \")\r\n t.save(pub_net.state_dict(), save_path_p)\r\n t.save(seg_net.state_dict(), save_path_s)\r\nprint(\"train finished \")\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"109311457","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\ntorch.backends.cudnn.bencmark = True\n\nimport os,sys,cv2,random,datetime,time,math\nimport argparse\nimport numpy as np\n\nimport net_s3fd\nfrom bbox import *\n\ndef detect(net,img):\n img = img - np.array([104,117,123])\n img = img.transpose(2, 0, 1)\n img = img.reshape((1,)+img.shape)\n\n img = Variable(torch.from_numpy(img).float(),volatile=True).cuda()\n BB,CC,HH,WW = img.size()\n olist = net(img)\n\n bboxlist = []\n for i in range(len(olist)/2): olist[i*2] = F.softmax(olist[i*2])\n for i in range(len(olist)/2):\n ocls,oreg = olist[i*2].data.cpu(),olist[i*2+1].data.cpu()\n FB,FC,FH,FW = ocls.size() # feature map size\n stride = 2**(i+2) # 4,8,16,32,64,128\n anchor = stride*4\n for Findex in range(FH*FW):\n windex,hindex = Findex%FW,Findex//FW\n axc,ayc = stride/2+windex*stride,stride/2+hindex*stride\n score = ocls[0,1,hindex,windex]\n loc = oreg[0,:,hindex,windex].contiguous().view(1,4)\n if score<0.05: continue\n priors = torch.Tensor([[axc/1.0,ayc/1.0,stride*4/1.0,stride*4/1.0]])\n variances = [0.1,0.2]\n box = decode(loc,priors,variances)\n x1,y1,x2,y2 = box[0]*1.0\n # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1)\n bboxlist.append([x1,y1,x2,y2,score])\n bboxlist = np.array(bboxlist)\n if 0==len(bboxlist): bboxlist=np.zeros((1, 5))\n return bboxlist","sub_path":"SFD/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"451243446","text":"from telegram.ext import Updater,CommandHandler,MessageHandler,Filters\nfrom Adafruit_IO import Client, Data\nimport requests\nimport os\nx = os.getenv('x')\ny = os.getenv('y')\nz = os.getenv('z')\naio = Client(x,y)\n\n\ndef on(bot,update):\n chat_id=update.message.chat_id\n bot.send_photo(chat_id,photo='https://img.icons8.com/plasticine/2x/light-on.png')\n bot.send_message(chat_id,text='led is on')\n aio = Client(x,y)\n value=Data(value=1)\n value_send=aio.create_data('bot',value)\n\n\ndef off(bot,update):\n chat_id=update.message.chat_id\n bot.send_photo(chat_id,photo='https://pngimg.com/uploads/bulb/bulb_PNG1241.png')\n bot.send_message(chat_id,text='led is off')\n aio = Client(x,y)\n value=Data(value=0)\n value_send=aio.create_data('bot',value)\n\ndef inmes(bot,update):\n mess_text = update.message.text\n if mess_text == 'turn on':\n on(bot,update)\n elif mess_text == 'turn off':\n off(bot,update)\n\nu=Updater(z)\ndp=u.dispatcher\ndp.add_handler(CommandHandler('turnon',on))\ndp.add_handler(CommandHandler('turnoff',off))\ndp.add_handler(MessageHandler(Filters.text&(~Filters.command),inmes))\nu.start_polling()\nu.idle() \n\n\n\n\n\n","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"300145274","text":"from Acquisition import aq_inner\nfrom zope.component import getUtility\nfrom zope.component import getMultiAdapter\nfrom plone.app.layout.viewlets import ViewletBase\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom collective.contentleadimage.config import IMAGE_FIELD_NAME\nfrom collective.contentleadimage.config import IMAGE_CAPTION_FIELD_NAME\nfrom collective.contentleadimage.leadimageprefs import ILeadImagePrefsForm\n\nclass LeadImageViewlet(ViewletBase):\n \"\"\" A simple viewlet which renders leadimage \"\"\"\n\n @property\n def prefs(self):\n portal = getUtility(IPloneSiteRoot)\n return ILeadImagePrefsForm(portal)\n\n def bodyTag(self, css_class='newsImage'):\n \"\"\" returns img tag \"\"\"\n context = aq_inner(self.context)\n field = context.getField(IMAGE_FIELD_NAME)\n if field is not None and \\\n field.get_size(context) != 0:\n scale = self.prefs.body_scale_name\n return field.tag(context, scale=scale, css_class=css_class)\n else:\n if field is not None:\n imageData = field.get(context)\n field.set(context, imageData)\n return ''\n\n def descTag(self, css_class='tileImage'):\n \"\"\" returns img tag \"\"\"\n context = aq_inner(self.context)\n field = context.getField(IMAGE_FIELD_NAME)\n if field is not None and \\\n field.get_size(context) != 0:\n scale = self.prefs.desc_scale_name\n return field.tag(context, scale=scale, css_class=css_class)\n return ''\n\n def caption(self):\n context = aq_inner(self.context)\n return context.widget(IMAGE_CAPTION_FIELD_NAME, mode='view')\n\n def render(self):\n context = aq_inner(self.context)\n portal_type = getattr(context, 'portal_type', None)\n if portal_type in self.prefs.allowed_types:\n return super(LeadImageViewlet, self).render()\n else:\n return ''\n","sub_path":"v2/theme/browser/viewlets.py","file_name":"viewlets.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"563239688","text":"import pygame\nfrom . import base\nfrom logging import getLogger\nfrom weakref import ref\nfrom subprocess import run, PIPE, DEVNULL\nfrom collections import OrderedDict\nimport re\nfrom pygame import gfxdraw\n\n\ndef get_screen_resolution(log):\n try:\n xrandr = run(['xrandr'], stdout=PIPE, stderr=DEVNULL,\n encoding='utf8').stdout\n res = re.search('\\s*(\\d+x\\d+).*\\*', xrandr).group(1).split('x')\n res = [int(i) for i in res]\n except BaseException as e:\n log.exception('Failed to get screen resolution, using 800x600: {e}')\n return [800, 400]\n log.info(f'Screen resolution: {res[0]}x{res[1]}')\n return res\n\n\ndef init(fullscreen=False, hide_cursor=False):\n log = getLogger('main.gui')\n pygame.init()\n log.debug('Pygame initialized')\n res = get_screen_resolution(log)\n flags = pygame.HWSURFACE | pygame.DOUBLEBUF\n if hide_cursor:\n # create an invisble cursor image\n pygame.mouse.set_cursor((8, 8), (0, 0),\n (0, 0, 0, 0, 0, 0, 0, 0),\n (0, 0, 0, 0, 0, 0, 0, 0))\n if fullscreen:\n return pygame.display.set_mode(res, flags | pygame.FULLSCREEN)\n#800, 400\n return pygame.display.set_mode((800, 400), flags)\n\n\ndef quit():\n pygame.quit()\n\n\nclass Group(OrderedDict):\n\n def __init__(self, *args, **kwargs):\n self.log = getLogger('main.group')\n OrderedDict.__init__(self, *args, **kwargs)\n\n @property\n def clickable_elements(self):\n def is_clickable(e):\n return isinstance(e, base.Clickable) or isinstance(e, Group)\n elements = [(k, v) for k, v in self.items() if is_clickable(v)]\n return OrderedDict(elements)\n\n @property\n def mouse_sentive_elements(self):\n def is_mouse_sensitive(e):\n mse = isinstance(e, base.MouseMotionSensitive)\n return mse or isinstance(e, Group)\n elements = [(k, v) for k, v in self.items() if is_mouse_sensitive(v)]\n return OrderedDict(elements)\n\n def draw(self):\n for element in self.values():\n try:\n element.draw()\n except BaseException as e:\n self.log.exception(f'Failed to draw element: {e}')\n\n def click_down(self, pos, catched):\n for e in reversed(self.clickable_elements.values()):\n try:\n catched = e.click_down(pos, catched) or catched\n except BaseException as e:\n self.log.exception(f'Failed to exec click down: {e}')\n return catched\n\n def click_up(self, pos, catched):\n for e in reversed(self.clickable_elements.values()):\n try:\n catched = e.click_up(pos, catched) or catched\n except BaseException as e:\n self.log.exception(f'Failed to exec click up: {e}')\n return catched\n\n def mouse_motion(self, pos, catched):\n for e in reversed(self.mouse_sentive_elements.values()):\n try:\n catched = e.mouse_motion(pos, catched) or catched\n except BaseException as e:\n self.log.exception(f'Failed to exec mouse motion: {e}')\n return catched\n\n\nclass Layer(Group):\n\n def __init__(self, app, bg_color=(200, 200, 255)):\n super().__init__()\n self._app = ref(app)\n self.bg_color = bg_color\n self.log = getLogger('main.layer')\n\n @property\n def app(self):\n return self._app()\n\n @property\n def screen(self):\n return self.app.screen\n\n def draw(self):\n self.screen.fill(self.bg_color)\n Group.draw(self)\n\n\nclass Text(base.Element):\n\n # TODO tune default font_size\n def __init__(self, layer, pos, text='blabla', font_size=18, color=(0, 0, 0),\n gray_color=(160, 160, 160), always_gray=False,\n font='fonts/texgyreheros-regular.otf'):\n super().__init__(layer, pos)\n self.fg_color = color\n self.gray_color = gray_color\n self.always_gray = always_gray\n self.font_size = font_size\n self.font = pygame.font.Font(font, self.font_size)\n self.text = text\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, t):\n self._text = t\n self.surf, self.rect = self.text_objects(t, self.fg_color)\n self.gray_surf, _ = self.text_objects(t, self.gray_color)\n self.rect.center = self.pos\n\n def text_objects(self, text, color):\n surf = self.font.render(text, True, color)\n return surf, surf.get_rect()\n\n def draw(self, gray=False):\n if gray or self.always_gray:\n self.screen.blit(self.gray_surf, self.rect)\n else:\n self.screen.blit(self.surf, self.rect)\n\n\nclass Image(base.Element):\n\n def __init__(self, layer, pos, path, w=None, h=None):\n super().__init__(layer, pos)\n self.img = pygame.image.load(path)\n iw, ih = self.img.get_width(), self.img.get_height()\n r = iw / ih\n if not w and not h:\n w, h = iw, ih\n elif not w:\n w, h = round(h * r), h\n elif not h:\n w, h = w, round(w / r)\n self.size = w, h\n self.img = pygame.transform.scale(self.img, self.size)\n\n def draw(self):\n p = self.pos[0] - self.size[0] / 2, self.pos[1] - self.size[1] / 2\n self.screen.blit(self.img, p)\n\n\nclass Rectangle(base.Element):\n\n def __init__(self, layer, pos, size, color=(255, 0, 0)):\n base.Element.__init__(self, layer, pos)\n self.size = size\n self.bg_color = color\n\n def draw(self, force_color=None):\n c, s = self.pos, self.size\n x, y = c[0] - s[0] / 2, c[1] - s[1] / 2\n color = force_color if force_color else self.bg_color\n pygame.draw.rect(self.screen, color, [x, y, *s])\n\n\nclass Button(Rectangle, Text, base.RectangleClickable):\n\n def __init__(self, layer, pos, size, text, action, disabled=False):\n Rectangle.__init__(self, layer, pos, size, (230, 220, 220))\n Text.__init__(self, layer, pos, text)\n base.RectangleClickable.__init__(self, pos, size)\n self.action = action\n self.disabled = disabled\n self.is_pressed = False\n\n def draw(self):\n if self.disabled:\n Rectangle.draw(self, (200, 200, 200))\n Text.draw(self, gray=True)\n elif self.is_pressed:\n Rectangle.draw(self, (210, 210, 210))\n Text.draw(self)\n else:\n Rectangle.draw(self)\n Text.draw(self)\n\n def on_click_down(self, inside, catched):\n if not catched and inside:\n self.is_pressed = True\n return True\n return False\n\n def on_click_up(self, inside, catched):\n if self.is_pressed and not self.disabled and inside:\n self.action()\n self.is_pressed = False\n return False\n\n\nclass Circle(base.Element):\n\n def __init__(self, layer, pos, radius, color=(255, 100, 100),\n thickness=4):\n base.Element.__init__(self, layer, pos)\n self.radius = radius\n self.color = color\n self.thickness = thickness\n\n def draw(self, force_color=None):\n color = force_color if force_color else self.color\n # draw multiple circles to thicken the line\n gfxdraw.aacircle(self.screen, *self.pos, self.radius-1, color)\n gfxdraw.aacircle(self.screen, *self.pos, self.radius, color)\n gfxdraw.aacircle(self.screen, *self.pos, self.radius+1, color)\n #pygame.draw.circle(self.screen, color, self.pos, self.radius,\n #self.thickness)\n\n\nclass DetectionCircle(Circle, base.Draggable, base.CircleClickable):\n\n def __init__(self, layer, pos, radius, color=(255, 100, 100)):\n base.Draggable.__init__(self, layer, pos)\n base.CircleClickable.__init__(self, pos, radius)\n Circle.__init__(self, layer, pos, radius, color)\n self.is_selected = False\n\n def draw(self):\n if self.is_selected or self.dragging:\n Circle.draw(self, (20, 200, 0))\n else:\n Circle.draw(self)\n\n def on_click_down(self, inside, catched):\n if catched or not inside:\n return False\n self.layer.select_circle(self)\n self.drag_start()\n return True\n\n def on_click_up(self, inside, catched):\n if inside and not catched and self.dragging:\n self.drag_stop()\n return True\n self.drag_stop()\n return False\n\n\nclass LoadingBar(base.Element):\n\n def __init__(self, layer, pos, size, bg_color=(42, 42, 42),\n fg_color=(200, 200, 200), padding=1, progress=0):\n super().__init__(layer, pos)\n self.bg_color = bg_color\n self.fg_color = fg_color\n self._progress = progress\n self.size = size\n self.padding = padding\n\n @property\n def progression(self):\n return self._progress\n\n @progression.setter\n def progression(self, v):\n self._progress = max(0, min(1, v))\n\n def draw(self):\n c, s, p, v = self.pos, self.size, self.padding, self.progression\n x, y = c[0] - s[0]/2, c[1] - s[1]/2\n xp, yp = x + p, y + p\n wp, hp = v * (s[0] - 2 * p), s[1] - 2 * p\n pygame.draw.rect(self.screen, self.bg_color, (x, y, *s))\n pygame.draw.rect(self.screen, self.fg_color, (xp, yp, wp, hp))\n\n\nclass Video(base.Element):\n\n def __init__(self, layer):\n super().__init__(layer, (0, 0))\n\n def draw(self):\n if self.app.live_image is None:\n return\n self.screen.blit(self.app.live_image, (0, 0))\n\n\nclass Slider(base.Draggable, base.RectangleClickable):\n\n def __init__(self, layer, pos, size, vmin, vmax, action,\n padding=20, line_width=4):\n base.Draggable.__init__(self, layer, pos)\n base.RectangleClickable.__init__(self, pos, size)\n self.padding = padding\n self.line_width = line_width\n self.vmin = vmin\n self.vmax = vmax\n self.action = action\n self._value = 0\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, v):\n self._value = v\n self.action(self.vmin + v * (self.vmax - self.vmin))\n\n def set(self, v):\n self.value = (v - self.vmin) / (self.vmax - self.vmin)\n\n def draw(self):\n self.draw_line()\n self.draw_dot()\n\n def draw_line(self):\n x, y = self.pos\n w, h = self.size[0] - 2 * self.padding, self.line_width\n x, y = x - w / 2, y - h / 2\n pygame.draw.rect(self.screen, (42, 42, 42), [x, y, w, h])\n\n def draw_dot(self):\n w = self.size[0] - 2 * self.padding\n x = round(self.pos[0] + (self.value - 0.5) * w)\n y = self.pos[1]\n gfxdraw.filled_circle(self.screen, x, y, 16,\n (100, 255, 100))\n # pygame.draw.circle(self.screen, (100, 255, 100), (x, y), 16)\n\n def mouse_motion(self, pos, catched):\n if self.dragging:\n x = pos[0] - (self.pos[0] - self.size[0] / 2)\n self.value = min(max(x / self.size[0], 0), 1)\n","sub_path":"application/gui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"206360484","text":"# model settings\nmodel = dict(\n type='ImageClassifier',\n backbone=dict(\n type='PyramidVig',\n arch='base',\n k=9,\n act_cfg=dict(type='GELU'),\n norm_cfg=dict(type='BN'),\n graph_conv_type='mr',\n graph_conv_bias=True,\n epsilon=0.2,\n use_stochastic=False,\n drop_path=0.1,\n norm_eval=False,\n frozen_stages=0),\n neck=dict(type='GlobalAveragePooling'),\n head=dict(\n type='VigClsHead',\n num_classes=1000,\n in_channels=1024,\n hidden_dim=1024,\n act_cfg=dict(type='GELU'),\n dropout=0.,\n loss=dict(type='CrossEntropyLoss', loss_weight=1.0),\n topk=(1, 5),\n ),\n train_cfg=dict(augments=[\n dict(type='Mixup', alpha=0.8),\n dict(type='CutMix', alpha=1.0)\n ]),\n)\n","sub_path":"configs/_base_/models/vig/pyramid_vig_base.py","file_name":"pyramid_vig_base.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"271255672","text":"#!/usr/bin/env python\r\ns2={}\r\ns = {33,12,33,32121}\r\nfor i in s:\r\n print(i)\r\nprint(type(s))\r\nprint(type(s2))\r\ns1=set()\r\ns1.add(11)\r\ns1.add(22)\r\ns1.add(33)\r\nprint(s1)","sub_path":"PycharmProjects/learn/day4/aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"180686365","text":"from collections import defaultdict\nclass Solution:\n '''\n #brute-force solution\n def corpFlightBookings(self, bookings, n):\n map = defaultdict(int)\n for b in bookings:\n for i in range(b[0], b[1]+1):\n map[i] += b[2]\n return [map[i] for i in range(1, n+1)]\n '''\n def corpFlightBookings(self, bookings, n):\n ans = [0]*n\n for bk in bookings:\n ans[bk[0] - 1] += bk[2]\n if bk[1] < n: ans[bk[1]] -= bk[2]\n for i in range(1, n):\n ans[i] += ans[i-1]\n return ans \n \n\nif __name__ == '__main__':\n sol = Solution()\n \n bookings = [[1,2,10],[2,3,20],[2,5,25]]\n n = 5\n\n r = sol.corpFlightBookings(bookings, n)\n print(r)","sub_path":"lc_1109_corporate_flight_bookings.py","file_name":"lc_1109_corporate_flight_bookings.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"511424581","text":"# -- coding:utf-8 --\r\nfrom pylab import mpl\r\nimport random\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\nmpl.rcParams['font.sans-serif']=['FangSong'] # 指定默认字体\r\nmpl.rcParams['axes.unicode_minus']=False # 解决保存图像是负号'-'显示为方块的问题\r\ncoordination_source=\"\"\"\r\n{name:'兰州', geoCoord:[103.73, 36.03]},\r\n{name:'嘉峪关', geoCoord:[98.17, 39.47]},\r\n{name:'西宁', geoCoord:[101.74, 36.56]},\r\n{name:'成都', geoCoord:[104.06, 30.67]},\r\n{name:'石家庄', geoCoord:[114.48, 38.03]},\r\n{name:'拉萨', geoCoord:[102.73, 25.04]},\r\n{name:'贵阳', geoCoord:[106.71, 26.57]},\r\n{name:'武汉', geoCoord:[114.31, 30.52]},\r\n{name:'郑州', geoCoord:[113.65, 34.76]},\r\n{name:'济南', geoCoord:[117, 36.65]},\r\n{name:'南京', geoCoord:[118.78, 32.04]},\r\n{name:'合肥', geoCoord:[117.27, 31.86]},\r\n{name:'杭州', geoCoord:[120.19, 30.26]},\r\n{name:'南昌', geoCoord:[115.89, 28.68]},\r\n{name:'福州', geoCoord:[119.3, 26.08]},\r\n{name:'广州', geoCoord:[113.23, 23.16]},\r\n{name:'长沙', geoCoord:[113, 28.21]},\r\n{name:'海口', geoCoord:[110.35, 20.02]},\r\n{name:'沈阳', geoCoord:[123.38, 41.8]},\r\n{name:'长春', geoCoord:[125.35, 43.88]},\r\n{name:'哈尔滨', geoCoord:[126.63, 45.75]},\r\n{name:'太原', geoCoord:[112.53, 37.87]},\r\n{name:'西安', geoCoord:[108.95, 34.27]},\r\n{name:'台湾', geoCoord:[121.30, 25.03]},\r\n{name:'北京', geoCoord:[116.46, 39.92]},\r\n{name:'上海', geoCoord:[121.48, 31.22]},\r\n{name:'重庆', geoCoord:[106.54, 29.59]},\r\n{name:'天津', geoCoord:[117.2, 39.13]},\r\n{name:'呼和浩特', geoCoord:[111.65, 40.82]},\r\n{name:'南宁', geoCoord:[108.33, 22.84]},\r\n{name:'西藏', geoCoord:[91.11, 29.97]},\r\n{name:'银川', geoCoord:[106.27, 38.47]},\r\n{name:'乌鲁木齐', geoCoord:[87.68, 43.77]},\r\n{name:'香港', geoCoord:[114.17, 22.28]},\r\n{name:'澳门', geoCoord:[113.54, 22.19]}\r\n\"\"\"\r\n\r\n\r\ncity_location={}\r\n\r\ntest_string=\"{name:'兰州', geoCoord:[103.73, 36.03]},\"\r\nimport re\r\npattern=re.compile(r\"name:'(\\w+)',\\s+geoCoord:\\[(\\d+.\\d+),\\s(\\d+.\\d+)\\]\")\r\n\r\nfor line in coordination_source.split('\\n') :\r\n city_info=pattern.findall(line)\r\n if not city_info : continue\r\n city, long, lat=city_info[0]\r\n\r\n long, lat=float(long), float(lat)\r\n\r\n city_location[city]=(long, lat)\r\n\r\nimport math\r\n#球面距离\r\ndef geo_distance(origin, destination) :\r\n\r\n lon1, lat1=origin\r\n lon2, lat2=destination\r\n radius=6371 # km\r\n\r\n dlat=math.radians(lat2 - lat1)\r\n dlon=math.radians(lon2 - lon1)\r\n a=(math.sin(dlat / 2) * math.sin(dlat / 2) +\r\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\r\n math.sin(dlon / 2) * math.sin(dlon / 2))\r\n c=2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\r\n d=radius * c\r\n return d\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\ncity_graph=nx.Graph()\r\ncity_graph.add_nodes_from(list(city_location.keys()))\r\nnx.draw(city_graph, city_location, with_labels=True, node_size=30)\r\n\r\nXs = city_location.values()\r\nXs = np.array(list(Xs))\r\n\r\n#找到合理的能源中心\r\nall_x,all_y=[],[]\r\nfor _,location in city_location.items():\r\n x,y = location\r\n all_x.append(x)\r\n all_y.append(y)\r\n#设置5个中心\r\nk = 5\r\n\r\n#找到随机的中心城市\r\ndef get_random_center(all_x,all_y):\r\n return random.uniform(min(all_x),max(all_x)), random.uniform(min(all_y),max(all_y))\r\ncenters = {\"{}\".format(i+1):get_random_center(all_x,all_y) for i in range(k)}\r\nchanged = True\r\nwhile changed:\r\n closet_points = defaultdict(list)\r\n for x,y in zip(all_x,all_y):\r\n closet_c,closet_dis = min([(k,geo_distance((x,y),centers[k])) for k in centers],key=lambda t:t[1])\r\n closet_points[closet_c].append([x,y])\r\n\r\n for c in closet_points:\r\n former_center = centers[c]\r\n neigbors_belong_to_c = closet_points[c]\r\n neighbors_center = np.mean(neigbors_belong_to_c,axis=0)\r\n if geo_distance(neighbors_center,former_center)>3:\r\n centers[c] = neighbors_center #赋值新的中心店\r\n changed = True\r\ncity_location_with_station={\"能源站-{}\".format(int(i)+1):position for i,position in centers.items()}\r\nprint(city_location_with_station)\r\ndef draw_cities(cities,color=None):\r\n city_graph = nx.Graph()\r\n city_graph.add_nodes_from(list(cities.keys()))\r\n nx.draw(city_graph,cities,node_color=color,with_labels=True,node_size = 20)\r\nplt.figure(1,figsize=(15,15))\r\ndraw_cities(city_location_with_station,color=\"red\")\r\ndraw_cities(city_location,color=\"blue\")\r\nplt.show()\r\n\r\n\r\n","sub_path":"K-means.py","file_name":"K-means.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"241897878","text":"import cv2 as cv\n\nfont = cv.FONT_HERSHEY_SIMPLEX\n\ndef draw_boxes(image, result):\n\tN = len(result['rois'])\n\tprint(\"In N\", N)\n\tif not N:\n\t\treturn\n\tprint(\"After N\", N)\n\tfor i in range(N):\n\t\tbox = result['rois'][i]\n\t\tscore = result['scores'][i]\n\t\tcv.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 0, 255), 3)\n\t\tcv.putText(image, '{0:.2f}'.format(score), (box[1], box[0] - 10), font, .7, (0,255,0), 3 ,cv.LINE_AA)\n\t\tprint(\"drawn rectangles\")\n","sub_path":"11. Capstone/src/website/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"257812758","text":"#!/usr/bin/python\n\n# phosphor_plot.py (c) 2015 Christian Vogel \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nimport logging\nfrom logging import debug, info, warning, error, critical\n\nimport math\nimport numpy as np\nimport scipy.signal\nimport scipy.misc\n\nfrom functools import reduce\nimport operator\nimport itertools\n\n# read in a filename, or some \"magic\" sources\ndef get_input(fn) :\n # magic square wave for testing\n if fn == '@square' :\n info('Generating a simple square wave.')\n data = np.zeros(100)\n for i in range(10) :\n a = i*10\n b,c = a+5, a+10\n data[a:b] = -1\n data[b:c] = 1\n return data\n\n if fn == '@sine' :\n info('Generating a simple sine wave.')\n return np.sin(np.linspace(0, 31.41, 100))\n\n if fn.lower().endswith('.wav') :\n info('Reading %s as a wav file.', fn)\n import scipy.io.wavfile\n rate, data = scipy.io.wavfile.read(fn)\n # for stereo, only use left channel\n\n if len(data.shape) == 2 :\n warning('%s: only using first channel!'%(fn))\n data = data[:,0]\n\n info('%s: has a sample rate of %d Hz (ignored) and %d samples.',\n fn, rate, data.shape[0])\n return data\n\n if fn.lower().endswith('.txt') :\n return np.loadtxt(fn)\n\n raise RuntimeError('Don\\'t know how to read file %s.', fn)\n\ndef fract_to_int_neigh_with_weight(fract) :\n '''Convert a fractional number to their integer neighbors and a weight\n indicating the proximity. E.g. f(1.0) -> [ (1, 1.0) ],\n f(1.5) -> [ (1, 0.5), (2, 0.5)], f(1.2) = [(1, .8), (2, 0.2)].'''\n\n floor_idx = math.floor(fract)\n ceil_idx = math.ceil(fract)\n\n floor_val = ceil_idx - fract\n ceil_val = fract - floor_idx\n\n if floor_idx == ceil_idx :\n return [(floor_idx, 1.0)]\n else :\n return [(floor_idx, floor_val), (ceil_idx, ceil_val)]\n\ndef add_at_fract_idx_with_weight(arr, fract_idx, increment=1.0) :\n '''Increment data in an array at a \"fractional index\" by spreading\n the increment around the nearest neighbors weighted by (linear) proximity.'''\n\n # fractional indices and weights for each dimension, [ [(i,w),(i,w)], ..]\n neigh_idx_arr = [ fract_to_int_neigh_with_weight(fi) for fi in fract_idx ]\n for i_w_arr in itertools.product(*neigh_idx_arr) :\n # i_w_arr is list of [(index,per_coordinate_weight)] for each\n # dimension, convert to combined index, and list of weights\n idx, weights = zip(*i_w_arr) # [(a,1),(b,2),(c,3) -> (a,b,c),(1,2,3)\n weight = reduce(operator.mul, weights, 1.0)\n arr[idx] += weight*increment\n\ndef scale_and_offs(amin, amax, bmin, bmax, offs_is_on_a=False) :\n '''Calculate scale factor and offset to transfer a range\n [amin, amax] to [bmin, bmax]\n if offs_is_on_a is False (default) then\n b = a * scale + offs\n if offs_is_on_a is True then\n b = (a+offs) * scale'''\n\n scale = (float(bmax)-float(bmin))/(float(amax)-float(amin))\n offs = bmin-amin*scale\n if offs_is_on_a :\n offs /= scale\n# debug('scale_and_offs(%f, %f, %f, %f, %s) -> (%f, %f)',\n# amin, amax, bmin, bmax, offs_is_on_a, scale, offs)\n return scale, offs\n\ndef rescale_array(arr, new_min, new_max) :\n '''Rescale array so that its values cover the range new_min to new_max.'''\n curr_min = np.amin(arr)\n curr_max = np.amax(arr)\n scale, offs = scale_and_offs(curr_min, curr_max, new_min, new_max)\n return arr*scale+offs\n\ndef rescale_array_percentile_clip(arr, min_pct, max_pct, new_min, new_max) :\n '''Rescale array so that the min_pct and max_pct percentile will cover\n the range new_min, new_max. Over and underflows will be clipped.'''\n\n pct_vals = np.percentile(arr, [min_pct, max_pct] )\n scale, offs = scale_and_offs(pct_vals[0], pct_vals[1], new_min, new_max)\n return np.clip(arr*scale+offs, new_min, new_max)\n\ndef add_trace_to_img(img_arr, trace_arr, color) :\n '''Add trace_arr(grayscale) to RGB image img_arr, using specified color.'''\n for i, c in enumerate(color) :\n img_arr[:,:,i] += trace_arr * c\n\n# our favourite colours, traces cycle through them\nCOLORS = [ (000.0, 255.0, 255.0), # cyan\n (255.0, 255.0, 000.0), # yellow\n (255.0, 000.0, 000.0), # red\n (000.0, 255.0, 000.0), # green\n ]\n\ndef main() :\n logging.basicConfig(format='\\033[0;1m%(asctime)-15s\\033[0m %(message)s')\n\n import optparse\n parser = optparse.OptionParser(usage='%prog [options] INPUTS...')\n\n parser.add_option('-W', '--width', dest='width', type='int', default=800,\n metavar='N', help='Width of picture to generate. (def: 800)')\n parser.add_option('-H', '--height', dest='height', type='int', default=600,\n metavar='N', help='Height of picture to generate. (def: 600)')\n\n parser.add_option('-s', '--sigma', dest='sigma', type='float', default=0.0,\n metavar='PIXELS',\n help='Make trace unsharp by convolving with a gaussian of PIXELS width.'+\\\n '(def: off)')\n parser.add_option('-g', '--gamma', dest='gamma', type='float', default=0.3,\n metavar='GAMMA',\n help='Apply gamma to image, to intensify dark parts of the traces. (def: 0.3)')\n\n parser.add_option('-R', '--resample', dest='resample', type='int', default=100,\n metavar='N',\n help='Minimal resampling (factor times the horizontal resolution) (def:100)')\n\n parser.add_option('-v', '--verbose', dest='verbose', action='store_true',\n help='Be verbose. (def: not)')\n parser.add_option('-d', '--debug', dest='debug', action='store_true',\n help='Be even more verbose (for debugging) (def: not).')\n\n parser.add_option('-o', '--output', dest='output', default=None,\n metavar='FILENAME', help='Write out plot as image to FILENAME.')\n\n opts, args = parser.parse_args()\n\n logging.getLogger().setLevel(logging.DEBUG if opts.debug else (\n logging.INFO if opts.verbose else logging.WARNING ))\n\n if not args :\n parser.error('You have to specify at least one file to plot.')\n if not opts.output :\n parser.error('You have to specify the output file using -o / --output.')\n\n ##################################\n ### NOW FOR THE ACTUAL DRAWING\n ##################################\n\n # pixels to keep empty around image, to avoid under/overflows in\n # computation of neighbors, ...\n border = 5\n\n # generate array representing image\n img_data = np.zeros((opts.height, opts.width, 3))\n# dtype=[('r',np.float),('g',np.float),('b',np.float)])\n\n img_xmin, img_xmax = border, img_data.shape[1]-border\n img_ymin, img_ymax = border, img_data.shape[0]-border\n\n # minimum numbers of samples in X that we need, to decide on resampling\n min_xpts = (img_xmax - img_xmin)*opts.resample\n\n for i, arg in enumerate(args) :\n data = get_input(arg)\n\n info('%s: %d samples in original data source', arg, data.shape[0])\n\n if data.shape[0] < min_xpts :\n data = scipy.signal.resample(data, min_xpts)\n info('Not enough datapoints, resampled to %d.', min_xpts)\n\n # build x and y coordinate array\n x_coords = np.linspace(img_xmin, img_xmax, len(data))\n y_coords = rescale_array(data, img_ymin, img_ymax)\n\n trace_img = np.zeros(img_data.shape[0:2])\n\n # draw actual pixels\n for x, y in np.nditer((x_coords, y_coords)) :\n add_at_fract_idx_with_weight(trace_img, (y, x))\n\n if opts.sigma :\n info('Making the trace unsharp, using a gaussian of width %f.',opts.sigma)\n trace_img = scipy.ndimage.filters.gaussian_filter(trace_img, opts.sigma)\n\n info('Applying gamma function for gamma=%f', opts.gamma)\n trace_img = np.power(rescale_array(trace_img, 0.0, 1.0), opts.gamma)\n\n info('Using color %s for this trace.', COLORS[i % len(COLORS)])\n add_trace_to_img(img_data, trace_img, COLORS[i % len(COLORS)])\n\n info('Writing out plot to file %s.', opts.output)\n scipy.misc.toimage(img_data).save(opts.output)\n\n\nif __name__ == '__main__' :\n main()\n","sub_path":"phosphor_plot.py","file_name":"phosphor_plot.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"504013089","text":"# count and display number of flushes in 10k tries\n# construct deck\n# shuffle by using random.shuffle\n# draw first five, [:4]\n\nimport random\n\n# create lists\nval = ['2','3','4','5','6','7','8','9','T','J','Q','K','A']\nface = ['C','S','D','H']\ncards = []\n\n# construct the cards list\nfor k in val:\n\tfor i in face:\n\t\tcards += [k + i]\n\n# declare variables to count flushes and hands dealt\t\ndeals_left = 10000\nflushes = 0\n\nwhile deals_left >= 0:\n\trandom.shuffle(cards)\n\t# variables of each card's suit in a hand\n\ts1 = cards[0][1]\n\ts2 = cards[1][1]\n\ts3 = cards[2][1]\n\ts4 = cards[3][1]\n\ts5 = cards[4][1]\n\t# determine if their hand is a flush\n\tif s1 == s2 and s2 == s3 and s3 == s4 and s4 == s5:\n\t\tflushes += 1\n\tdeals_left -= 1\n\n# display flush statistics\t\nprint(\"Total Flushes:\", flushes)\nprint((flushes / 10000) * 100, \"%\")\n","sub_path":"CSCI/hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"104894509","text":"import cx_Freeze\nimport os\nimport sys\n\nos.environ['TCL_LIBRARY'] = \"G:\\\\Python\\\\tcl\\\\tcl\\\\tcl8.6\"\nos.environ['TK_LIBRARY'] = \"G:\\\\Python\\\\tcl\\\\tcl\\\\tk8.6\"\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nexecutables = [cx_Freeze.Executable('digitRec.py', base=base)]\n\ncx_Freeze.setup(\n\n\tname=\"Digit Recognizer\",\n\tversion=\"1.0\",\n\toptions={\"build_exe\":{ \"packages\":[\"pygame\",\"numpy\",\"pandas\",\"operator\",\n\t\t\t\t\t\t\t\t\t\t\"matplotlib.pyplot\",\"pygame.locals\"],\n\t\t\t\t\t\t\t\"include_files\":[\"train.csv\"]}},\n\n\texecutables = executables\t\t\t\t\t\t\t\t\t\n\n\t)","sub_path":"demo/setup_cx_freeze.py","file_name":"setup_cx_freeze.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"151644131","text":"from sandbox.rocky.tf.algos.trpo import TRPO\nfrom sandbox.rocky.tf.algos.vpg import VPG\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.grid_world_env import GridWorldEnv\nfrom rllab.envs.normalized_env import normalize\nfrom sandbox.rocky.tf.envs.base import TfEnv\nfrom sandbox.rocky.tf.policies.categorical_mlp_policy import CategoricalMLPPolicy\n\n\nmap_desc = '4x4' # map description, see multi_agent_grid_world_env.py\nn_row = 4 # n_row and n_col need to be compatible with desc\nn_col = 4\n\nenv = TfEnv(normalize(GridWorldEnv(desc = map_desc)))\n\npolicy = CategoricalMLPPolicy(\n 'MAP',\n env_spec=env.spec,\n)\n\nbaseline = LinearFeatureBaseline(env_spec=env.spec)\n\nalgo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=3000,\n max_path_length=20,\n n_itr=40,\n discount=0.99,\n step_size=0.01,\n)\n\"\"\"\nalgo = VPG(\n env = env,\n policy = policy,\n baseline = baseline,\n batch_size = 3000,\n max_path_length=20,\n n_itr=40,\n discount=0.99,\n step_size=0.01,\n)\n\"\"\"\nalgo.train()\n","sub_path":"examples/trpo_base_grid.py","file_name":"trpo_base_grid.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"535236150","text":"\n\nfrom xai.brain.wordbase.nouns._disservice import _DISSERVICE\n\n#calss header\nclass _DISSERVICES(_DISSERVICE, ):\n\tdef __init__(self,): \n\t\t_DISSERVICE.__init__(self)\n\t\tself.name = \"DISSERVICES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"disservice\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_disservices.py","file_name":"_disservices.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"160201506","text":"from matplotlib import pyplot as plt\nfrom argparse import ArgumentParser\nfrom imutils import paths\nimport keras.backend as K\nimport tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nimport math as mt\nimport datetime\nimport time\nimport cv2\nimport os\n\nap = ArgumentParser()\nap.add_argument('-d', '--dataset', required = True, help = 'path to input dataset')\nap.add_argument('-e', '--epoch', required = False, type = int, default = 100, help = 'how many iterate learning')\nap.add_argument('-b', '--batch_size', required = False, type = int, default = 1)\nap.add_argument('-r', '--resize', required = False, type = int, default=256)\nap.add_argument('-c', '--channels', required = False, type = int, default = 3)\nap.add_argument('-s', '--strides', required = False, type = int, default = 2)\nargs = vars(ap.parse_args())\n\nK.set_image_data_format('channels_last')\nPATH = args['dataset']+'/'\n\nBUFFER_SIZE, BATCH_SIZE, LAMBDA = 400, args['batch_size'], 100\nIMG_WIDTH, IMG_HEIGHT = args['resize'], args['resize']\nOUTPUT_CHANNELS = args['channels']\nEPOCHS = args['epoch']\nSTRIDE = args['strides']\n\ndef imgLoad(imgFile):\n image = tf.io.read_file(imgFile)\n image = tf.image.decode_jpeg(image)\n\n w = tf.shape(image)[1]\n w = w // 2\n\n realImg = image[:, :w, :]\n inputImg = image[:, w:, :]\n\n inputImg = tf.cast(inputImg, tf.float32)\n realImg = tf.cast(realImg, tf.float32)\n\n return inputImg, realImg\n\ndef resize(input_image, real_image, height, width):\n input_image = tf.image.resize(input_image, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n real_image = tf.image.resize(real_image, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n return input_image, real_image\n\ndef random_crop(input_image, real_image):\n stacked_image = tf.stack([input_image, real_image], axis=0)\n cropped_image = tf.image.random_crop(\n stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, OUTPUT_CHANNELS])\n\n return cropped_image[0], cropped_image[1]\n\ndef normalize(input_image, real_image):\n input_image = (input_image / 127.5) - 1\n real_image = (real_image / 127.5) - 1\n\n return input_image, real_image\n\n@tf.function()\ndef random_jitter(input_image, real_image):\n # resizing to resize + 30, resize + 30, 3\n input_image, real_image = resize(input_image, real_image, IMG_WIDTH + 30, IMG_HEIGHT + 30)\n\n # randomly cropping to 256 x 256 x 3\n input_image, real_image = random_crop(input_image, real_image)\n\n if tf.random.uniform(()) > 0.5:\n # random mirroring\n input_image = tf.image.flip_left_right(input_image)\n real_image = tf.image.flip_left_right(real_image)\n\n return input_image, real_image\n\ndef load_image_train(image_file):\n input_image, real_image = imgLoad(image_file)\n input_image, real_image = random_jitter(input_image, real_image)\n input_image, real_image = normalize(input_image, real_image)\n\n return input_image, real_image\n\ndef load_image_test(image_file):\n input_image, real_image = imgLoad(image_file)\n input_image, real_image = resize(input_image, real_image, IMG_HEIGHT, IMG_WIDTH)\n input_image, real_image = normalize(input_image, real_image)\n\n return input_image, real_image\n\ndef downsample(filters, size, apply_batchnorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',\n kernel_initializer=initializer, use_bias=False))\n\n if apply_batchnorm:\n result.add(tf.keras.layers.BatchNormalization())\n\n result.add(tf.keras.layers.LeakyReLU())\n\n return result\n \ndef upsample(filters, size, apply_dropout=False):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv2DTranspose(filters, size, strides=STRIDE, padding='same', kernel_initializer=initializer, use_bias=False))\n\n result.add(tf.keras.layers.BatchNormalization())\n\n if apply_dropout:\n result.add(tf.keras.layers.Dropout(0.5))\n\n result.add(tf.keras.layers.ReLU())\n\n return result\n\ndef Generator():\n inputs = tf.keras.layers.Input(shape=[IMG_WIDTH,IMG_HEIGHT, OUTPUT_CHANNELS])\n\n size = IMG_WIDTH\n iterate = int(mt.log(size) / mt.log(STRIDE))\n\n down_stack = [\n downsample(size // 4, 4, apply_batchnorm=False), # (bs, 128, 128, 64)\n downsample(size //2, 4), # (bs, 64, 64, 128)\n downsample(size, 4), # (bs, 32, 32, 256)\n downsample(size*2, 4), # (bs, 16, 16, 512)\n downsample(size*2, 4), # (bs, 8, 8, 512)\n downsample(size*2, 4), # (bs, 4, 4, 512)\n downsample(size*2, 4), # (bs, 2, 2, 512)\n downsample(size*2, 4), # (bs, 1, 1, 512)\n ]\n\n up_stack = [\n upsample(size*2, 4, apply_dropout=True), # (bs, 2, 2, 1024)\n upsample(size*2, 4, apply_dropout=True), # (bs, 4, 4, 1024)\n upsample(size*2, 4, apply_dropout=True), # (bs, 8, 8, 1024)\n upsample(size*2, 4), # (bs, 16, 16, 1024)\n upsample(size, 4), # (bs, 32, 32, 512)\n upsample(size // 2, 4), # (bs, 64, 64, 256)\n upsample(size //4, 4), # (bs, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n activation='tanh') # (bs, 256, 256, 3)\n\n x = inputs\n\n # Downsampling through the model\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n # Upsampling and establishing the skip connections\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)\n\ndef generator_loss(disc_generated_output, gen_output, target):\n gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)\n\n # mean absolute error\n l1_loss = tf.reduce_mean(tf.abs(target - gen_output))\n\n total_gen_loss = gan_loss + (LAMBDA * l1_loss)\n\n return total_gen_loss, gan_loss, l1_loss\n\ngenerator = Generator()\ngenerator.summary()\n\n# tf.keras.utils.plot_model(generator, show_shapes=True)\n\ndef Discriminator():\n initializer = tf.random_normal_initializer(0., 0.02)\n\n inp = tf.keras.layers.Input(shape=[IMG_WIDTH, IMG_HEIGHT, OUTPUT_CHANNELS], name='input_image')\n tar = tf.keras.layers.Input(shape=[IMG_WIDTH, IMG_HEIGHT, OUTPUT_CHANNELS], name='target_image')\n\n x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)\n\n down1 = downsample(64, 4, False)(x) # (bs, 128, 128, 64)\n down2 = downsample(128, 4)(down1) # (bs, 64, 64, 128)\n down3 = downsample(256, 4)(down2) # (bs, 32, 32, 256)\n\n zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)\n conv = tf.keras.layers.Conv2D(512, 4, strides=1, kernel_initializer=initializer, use_bias=False)(zero_pad1) # (bs, 31, 31, 512)\n\n batchnorm1 = tf.keras.layers.BatchNormalization()(conv)\n leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)\n zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)\n last = tf.keras.layers.Conv2D(1, 4, strides=1, kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)\n\n return tf.keras.Model(inputs=[inp, tar], outputs=last)\n\ndiscriminator = Discriminator()\n# tf.keras.utils.plot_model(discriminator, show_shapes=True)\n\nloss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)\ndef discriminator_loss(disc_real_output, disc_generated_output):\n real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)\n\n generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)\n\n total_disc_loss = real_loss + generated_loss\n\n return total_disc_loss\n\ngenerator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator)\n\ndef generate_images(model, test_input, tar, epoch, cnt):\n prediction = model(test_input, training=True)\n display_list = [test_input[0], tar[0], prediction[0]]\n # title = ['Input Image', 'Ground Truth', 'Predicted Image']\n\n for idx in range(3):\n\n # plt.subplot(1, 3, i+1)\n # plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n # plt.imshow(display_list[i] * 0.5 + 0.5)\n\n display_list[idx] = np.array(display_list[idx]*0.5 + 0.5)\n \n # tf.keras.preprocessing.image.save_img(f'genImgs/{time.time()}.png', display_list[idx])\n display_list[idx] = tf.keras.preprocessing.image.array_to_img(display_list[idx])\n\n # title = 'input image' if idx == 0 else ('ground truth' if idx == 1 else 'predicted image')\n\n\n images = np.hstack([display_list[0], display_list[1], display_list[2]])\n # cv2.putText(images, 'input image', (10, 25), cv2.FONT_HERSHEY_SIMPLEX,\t0.3, (255, 255, 0), 1)\n # cv2.putText(images, 'ground truth', (10 + IMG_HEIGHT, 25), cv2.FONT_HERSHEY_SIMPLEX,\t0.3, (255, 255, 0), 1)\n # cv2.putText(images, 'predicted image', (10 + IMG_HEIGHT*2, 25), cv2.FONT_HERSHEY_SIMPLEX,\t0.3, (255, 255, 0), 1)\n\n if not os.path.isdir(f'genImgs/{epoch}_epoch'):\n os.makedirs(f'genImgs/{epoch}_epoch')\n\n\n cv2.imwrite(f'genImgs/{epoch}_epoch/genImg_{cnt}.jpg', images)\n # plt.axis('off')\n # plt.show() \n\ntrain_dataset = tf.data.Dataset.list_files(PATH+'train/*.jpg')\ntrain_dataset = train_dataset.map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ntrain_dataset = train_dataset.shuffle(BUFFER_SIZE)\ntrain_dataset = train_dataset.batch(BATCH_SIZE)\n\ntest_dataset = tf.data.Dataset.list_files(PATH+'test/*.jpg')\ntest_dataset = test_dataset.map(load_image_test)\ntest_dataset = test_dataset.batch(BATCH_SIZE)\n\nlog_dir=\"logs/\"\n\nsummary_writer = tf.summary.create_file_writer(\n log_dir + \"fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\n\n@tf.function\ndef train_step(input_image, target, epoch):\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n gen_output = generator(input_image, training=True)\n\n disc_real_output = discriminator([input_image, target], training=True)\n disc_generated_output = discriminator([input_image, gen_output], training=True)\n\n gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target)\n disc_loss = discriminator_loss(disc_real_output, disc_generated_output)\n\n generator_gradients = gen_tape.gradient(gen_total_loss,\n generator.trainable_variables)\n discriminator_gradients = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(generator_gradients,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(discriminator_gradients,\n discriminator.trainable_variables))\n\n with summary_writer.as_default():\n tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)\n tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)\n tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)\n tf.summary.scalar('disc_loss', disc_loss, step=epoch)\n\n return gen_total_loss, gen_gan_loss, gen_l1_loss, disc_loss\ndef fit(train_ds, epochs, test_ds):\n for epoch in range(epochs):\n start = time.time()\n\n # display.clear_output(wait=True)\n print(\"Epoch: \", epoch)\n if epoch % 50 == 0:\n cnt = 1\n for example_input, example_target in test_ds.take(5):\n generate_images(generator, example_input, example_target, epoch, cnt)\n cnt += 1\n \n # Train\n for n, (input_image, target) in train_ds.enumerate():\n print('.', end='')\n if (n+1) % 100 == 0:\n print()\n gen_total_loss, gen_gan_loss, gen_l1_loss, disc_loss = train_step(input_image, target, epoch)\n print()\n\n # saving (checkpoint) the model every 20 epochs\n if epoch % 20 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n if not os.path.isdir('models/genModels'):\n os.makedirs('models/genModels')\n\n if not os.path.isdir('models/disModels'):\n os.makedirs('models/disModels')\n \n generator.save(f'models/genModels/gen_{epoch}.hdf5')\n discriminator.save(f'models/disModels/dis_{epoch}.hdf5')\n\n spendTime = (time.time() - start)\n print('-'*16+'info'+'-'*16)\n print (f'Time taken for epoch {epoch + 1} is {spendTime:.2f} sec\\n')\n print(f'gen loss : {gen_total_loss:.2f}, adv loss : {gen_gan_loss:.2f}\\nl1 loss : {gen_l1_loss:.2f} disc loss : {disc_loss:.2f}')\n print('-'*36+'\\n')\n checkpoint.save(file_prefix = checkpoint_prefix)\n\nfit(train_dataset, EPOCHS, test_dataset) ","sub_path":"keras/케라스공부/5.GANs/kim_pix2pix.py","file_name":"kim_pix2pix.py","file_ext":"py","file_size_in_byte":13079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"590194762","text":"# Copyright (c) 2022 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n\nimport copy\nimport itertools\nimport os\n\nimport numpy as np\nfrom infinibatch import iterators\n\nfrom .basic_loader import BaseBatchGen\nfrom .utils import NativeCheckpointableIterator, WeightIterator\n\n\nclass MLMLoader(BaseBatchGen):\n def __init__(\n self,\n args,\n dataset,\n dictionary,\n tokenizer,\n max_tokens=None,\n max_sentences=None,\n max_positions=None,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n num_shards=1,\n shard_id=0,\n ):\n super().__init__()\n self.args = args\n self.data = dataset.data\n self.data_dir = dataset.data_dir\n self.shuffle = dataset.shuffle\n self.dictionary = dictionary\n self.tokenizer = tokenizer\n\n self.max_tokens = max_tokens\n self.max_sentences = max_sentences\n self.max_positions = max_positions\n self.tokens_per_sample = args.tokens_per_sample\n self.sample_break_mode = args.sample_break_mode\n self.ignore_invalid_inputs = ignore_invalid_inputs\n self.required_batch_size_multiple = required_batch_size_multiple\n self.seed = str(seed)\n self.num_shards = num_shards\n self.shard_id = shard_id\n\n self.batch_read_ahead = args.batch_read_ahead\n\n self._build_iter()\n\n def _build_iter(self):\n tokenized_lines = self._multilingual_tokenize()\n self.padded_batches = self._batchify(tokenized_lines)\n\n prefetch_batches = iterators.PrefetchIterator(\n self.padded_batches,\n buffer_size=10000,\n buffer_in_main_process=True,\n log_empty_buffer_warning=True and self.shard_id == 0,\n )\n\n prefetch_batches = iterators.MapIterator(prefetch_batches, self._move_to_tensor)\n\n self._iter = prefetch_batches\n\n def _multilingual_tokenize(self):\n multilingual_iters = []\n weights = []\n\n for data in self.data:\n multilingual_iters.append(self._tokenize(data))\n if \"weight\" in data:\n weights.append(float(data[\"weight\"]))\n else:\n weights.append(int(data[\"count\"]))\n\n if len(multilingual_iters) == 1:\n return multilingual_iters[0]\n\n sampling_iterator = WeightIterator(weights)\n control_iterator = NativeCheckpointableIterator(sampling_iterator)\n tokenized_lines = iterators.MultiplexIterator(\n control_iterator, multilingual_iters\n )\n\n return tokenized_lines\n\n def _tokenize(self, data):\n \"\"\"\n data:\n {\n 'source': list[Path],\n 'source_lang': str,\n 'count': int,\n 'weight': float,\n 'name': str,\n }\n \"\"\"\n dataset = list(\n zip(\n data[\"source\"],\n itertools.repeat(data[\"source_lang\"]),\n )\n )\n\n if self.shuffle:\n chunk_files = iterators.InfinitePermutationSourceIterator(\n dataset,\n seed=self.seed,\n shuffle=self.shuffle,\n num_instances=self.num_shards,\n instance_rank=self.shard_id,\n )\n else:\n chunk_files = iterators.ChunkedSourceIterator(\n dataset,\n num_instances=self.num_shards,\n instance_rank=self.shard_id,\n )\n\n tokenized_lines = iterators.SelectManyIterator(\n chunk_files, lambda files: self._read_from_files(*files)\n )\n tokenized_lines = iterators.SamplingRandomMapIterator(\n tokenized_lines, self._prepare, self.seed\n )\n\n return tokenized_lines\n\n def _batchify(self, lines):\n\n if self.max_sentences is not None:\n if self.batch_read_ahead > 0:\n lines = iterators.BlockwiseShuffleIterator(\n lines, self.batch_read_ahead, self.seed\n )\n batches = iterators.FixedBatchIterator(lines, self.max_sentences)\n else:\n\n def dynamic_batch_size(sample):\n lengths = [len(x) for x in sample]\n batch_size = self.max_tokens // max(lengths)\n batch_size = (\n batch_size\n // self.required_batch_size_multiple\n * self.required_batch_size_multiple\n )\n return max(1, batch_size)\n\n batches = iterators.BucketedReadaheadBatchIterator(\n lines,\n read_ahead=self.batch_read_ahead,\n key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None,\n batch_size=dynamic_batch_size,\n shuffle=self.shuffle,\n seed=self.seed,\n )\n\n def collate(batch):\n batch_size = len(batch)\n\n mlm_source_max_length = max([len(x[0]) for x in batch])\n mlm_target_max_length = max([len(x[1]) for x in batch])\n s2s_source_max_length = max([len(x[2]) for x in batch])\n s2s_target_max_length = max([len(x[3]) for x in batch])\n\n mlm_source_ids = np.full(\n shape=(batch_size, mlm_source_max_length),\n dtype=np.int32,\n fill_value=self.dictionary.pad(),\n )\n mlm_target_ids = np.full(\n shape=(batch_size, mlm_target_max_length),\n dtype=np.int32,\n fill_value=self.dictionary.pad(),\n )\n s2s_source_ids = np.full(\n shape=(batch_size, s2s_source_max_length),\n dtype=np.int32,\n fill_value=self.dictionary.pad(),\n )\n s2s_target_ids = np.full(\n shape=(batch_size, s2s_target_max_length - 1),\n dtype=np.int32,\n fill_value=self.dictionary.pad(),\n )\n s2s_prev_input_ids = np.full(\n shape=(batch_size, s2s_target_max_length - 1),\n dtype=np.int32,\n fill_value=self.dictionary.pad(),\n )\n\n for i, (\n mlm_input_ids,\n mlm_label_ids,\n s2s_input_ids,\n s2s_label_ids,\n ) in enumerate(batch):\n mlm_source_ids[i, : len(mlm_input_ids)] = mlm_input_ids\n mlm_target_ids[i, : len(mlm_label_ids)] = mlm_label_ids\n s2s_source_ids[i, : len(s2s_input_ids)] = s2s_input_ids\n s2s_target_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[1:]\n s2s_prev_input_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[:-1]\n\n ret_batch = {\n \"net_input\": {\n \"src_tokens\": mlm_source_ids.astype(np.int64),\n },\n \"target\": mlm_target_ids.astype(np.int64),\n \"nsentences\": batch_size,\n \"ntokens\": sum([len(x[0]) for x in batch]),\n }\n\n return ret_batch\n\n padded_batches = iterators.MapIterator(batches, collate)\n\n return padded_batches\n\n def _prepare(self, _random, doc):\n nonmasked_tokens, masked_tokens = self._mask_lm(_random, doc)\n nonnoise_spans, noise_spans = self._span_corruption(_random, doc)\n return nonmasked_tokens, masked_tokens, nonnoise_spans, noise_spans\n\n def _mask_lm(self, _random, doc):\n def mask_tokens():\n return \"\"\n\n length = len(doc)\n mask_tokens_num = int(length * self.args.mask_prob)\n mask_tokens_num = min(max(mask_tokens_num, 1), length - 1)\n possible_mask_positions = _random.sample(range(length), k=mask_tokens_num)\n possible_mask_positions = sorted(possible_mask_positions)\n\n nonmasked_tokens = copy.deepcopy(doc)\n masked_tokens = [self.dictionary.pad() for _ in range(len(doc))]\n\n for position in possible_mask_positions:\n # masked_tokens.append(nonmasked_tokens[position])\n masked_tokens[position] = nonmasked_tokens[position]\n nonmasked_tokens[position] = self.dictionary.indices[mask_tokens()]\n\n return nonmasked_tokens, masked_tokens\n\n def _span_corruption(self, _random, doc):\n def mask_tokens(i):\n return f\"\"\n\n length = len(doc)\n noise_tokens_num = int(length * self.args.mask_prob)\n noise_tokens_num = min(max(noise_tokens_num, 1), length - 1)\n noise_spans_num = int(noise_tokens_num / self.args.span_length)\n noise_spans_num = max(noise_spans_num, 1)\n nonnoise_tokens_num = length - noise_tokens_num\n\n if noise_spans_num == 1:\n noise_split_positions = [0, noise_tokens_num]\n else:\n possible_split_positions = list(range(1, noise_tokens_num))\n _random.shuffle(possible_split_positions)\n noise_split_positions = sorted(\n possible_split_positions[: noise_spans_num - 1]\n )\n noise_split_positions = [0] + noise_split_positions + [noise_tokens_num]\n\n possible_insert_positions = list(range(nonnoise_tokens_num))\n _random.shuffle(possible_insert_positions)\n noise_insert_positions = sorted(possible_insert_positions[:noise_spans_num])\n\n nonnoise_spans, noise_spans = [], []\n last_end = 0\n for i in range(noise_spans_num):\n start_pos = noise_insert_positions[i] + noise_split_positions[i]\n end_pos = noise_insert_positions[i] + noise_split_positions[i + 1]\n mask_id = self.dictionary.indices[mask_tokens(i)]\n\n if getattr(self.args, \"remove_target_sentinel\", False):\n noise_spans.append(doc[start_pos:end_pos])\n else:\n noise_spans.append([mask_id] + doc[start_pos:end_pos])\n\n if getattr(self.args, \"remove_source_sentinel\", False):\n nonnoise_spans.extend(doc[last_end:start_pos])\n else:\n nonnoise_spans.extend(doc[last_end:start_pos] + [mask_id])\n\n last_end = end_pos\n\n nonnoise_spans.extend(doc[last_end:])\n noise_spans = sum(noise_spans, [])\n\n return nonnoise_spans, noise_spans\n\n def _read_from_files(self, source_file, source_lang):\n # data = []\n file_path = os.path.join(self.data_dir, source_file)\n\n if not os.path.exists(file_path):\n print(\"| file {} not exists\".format(file_path), flush=True)\n return iter([]) # skip bad file\n\n with open(file_path, \"r\", encoding=\"utf8\") as f:\n lines = f.read().strip().split(\"\\n\")\n\n doc = [self.dictionary.bos()]\n for line in lines:\n if line == \"\":\n if self.sample_break_mode == \"complete_doc\":\n # data.append(doc)\n yield doc\n doc = [self.dictionary.bos()]\n continue\n\n tokenized_line = self.tokenizer.EncodeAsPieces(line)\n tokenized_id = [\n self.dictionary.index(token) for token in tokenized_line\n ] + [self.dictionary.eos_index]\n\n if len(tokenized_id) > self.tokens_per_sample:\n continue\n if len(doc) + len(tokenized_id) > self.tokens_per_sample:\n # data.append(doc)\n yield doc\n doc = [self.dictionary.bos()]\n doc.extend(tokenized_id)\n\n if len(doc) > 1 and len(doc) <= self.tokens_per_sample:\n # data.append(doc)\n yield doc\n\n # return data\n","sub_path":"kosmos-2/torchscale/examples/fairseq/tasks/data/mlm_loader.py","file_name":"mlm_loader.py","file_ext":"py","file_size_in_byte":11708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"349233242","text":"import rf_scripts.utils_rf as urf\nimport pickle\n\nfile = \"../tables/noc_answers.csv\"\nx, x_agg, y, y_agg, x_noclvl, y_noclvl = urf.data_proccess(file,discrete=True)\n\nmae_results = []\n\nfor i in range(20):\n sfs_mae = urf.run_sfs(x,y['increase'],'cat',True)\n\n mae_results.append(sfs_mae.get_metric_dict())\n\nwith open('mae_results_ws_cat2.pkl','wb') as f:\n pickle.dump(mae_results,f)\n","sub_path":"old files/rf_scripts/SFFS Files/many_SFFS_ws_cat.py","file_name":"many_SFFS_ws_cat.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"2385766","text":"\n\nfrom xai.brain.wordbase.nouns._neurotic import _NEUROTIC\n\n#calss header\nclass _NEUROTICS(_NEUROTIC, ):\n\tdef __init__(self,): \n\t\t_NEUROTIC.__init__(self)\n\t\tself.name = \"NEUROTICS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"neurotic\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_neurotics.py","file_name":"_neurotics.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"603958912","text":"#!/usr/bin/env python3\n\n# Modified MIT License\n\n# Software Copyright (c) 2019 OpenAI\n\n# We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.\n# We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n# The above copyright notice and this permission notice need not be included\n# with content created by the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib.training import HParams\nimport sys\nsys.path.append(\"../sample\")\nimport pytorch.utils.gpt_token_encoder as encoder\nfrom utils.common import TransformerArgument\nfrom utils.common import DecodingGpt2Argument\nfrom utils.common import time_test\nfrom utils.encoder import build_sequence_mask\n\ndef sample_model(\n vocab_file=\"models/gpt2-vocab.json\",\n bpe_file=\"models/gpt2-merges.txt\",\n model_name='124M',\n nsamples=1,\n batch_size=1,\n length=12,\n temperature=1,\n top_k=4,\n top_p=0,\n models_dir='models',\n data_type='fp32'\n):\n \"\"\"Run the sample_model.\n\n :model_name=124M : String, which model to use\n :nsamples=0 : Number of samples to return, if 0, continues to\n generate samples indefinately.\n :batch_size=1 : Number of batches (only affects speed/memory).\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=4 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :models_dir : path to parent folder containing model subfolders\n (i.e. contains the folder)\n \"\"\"\n np.random.seed(1)\n tf.set_random_seed(1)\n\n if data_type == 'fp32':\n tf_data_type = tf.float32\n elif data_type == 'fp16':\n tf_data_type = tf.float16\n else:\n assert(False)\n\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n vocab_file=os.path.join(models_dir, model_name, 'encoder.json')\n bpe_file=os.path.join(models_dir, model_name, 'vocab.bpe')\n enc = encoder.get_encoder(vocab_file, bpe_file)\n hparams = HParams(n_vocab=0,\n n_ctx=1024,\n n_embd=768,\n n_head=12,\n n_layer=12)\n \n with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if length is None:\n length = hparams.n_ctx\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(graph=tf.Graph(), config=config) as sess:\n saver = tf.train.import_meta_graph(\"{}/{}/model.ckpt.meta\".format(models_dir, model_name))\n\n lengths = np.random.randint(low=1, high=8, size=batch_size)\n min_start_length = lengths.min()\n max_start_length = lengths.max()\n attention_mask = np.tile(np.tri(min_start_length), (batch_size, 1, 1))\n\n start_ids = np.ones([batch_size, max_start_length]) * enc.encoder['<|endoftext|>']\n for i in range(batch_size):\n start_ids[i][0:lengths[i]] = 198\n # User can put some real start ids here, we use '\\n' (198) here.\n\n sess.run(tf.global_variables_initializer())\n print(\"[INFO] restore the model {}/{}\".format(models_dir, model_name))\n saver.restore(sess, (\"{}/{}/model.ckpt\".format(models_dir, model_name)))\n \n decoder_args = TransformerArgument(beam_width=1,\n head_num=hparams.n_head,\n size_per_head=hparams.n_embd // hparams.n_head,\n num_layer=hparams.n_layer,\n dtype=tf_data_type,\n kernel_init_range=0.00,\n bias_init_range=0.00)\n\n decoding_args = DecodingGpt2Argument(hparams.n_vocab,\n enc.encoder['<|endoftext|>'],\n enc.encoder['<|endoftext|>'],\n length + 2,\n decoder_args,\n top_k,\n top_p,\n temperature)\n \n ckpt_dict = {}\n for var in tf.trainable_variables():\n ckpt_dict[var.name] = var\n decoding_vars = tf.trainable_variables()\n \n op_output = ft_gpt_op(decoding_vars,\n decoding_args,\n batch_size,\n start_ids,\n min_start_length,\n max_start_length,\n attention_mask)\n\n generated = 0\n \n while nsamples == 0 or generated < nsamples:\n op_out = sess.run(op_output)\n\n for i in range(batch_size):\n generated += 1\n \n text = enc.decode(op_out[i])\n print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n print(text)\n\ndef preprocess_decoder_var(decoding_vars,\n num_layer,\n using_model_var,\n checkpoint_filename,\n data_type,\n fuse_qkv=True):\n '''\n Args:\n decoding_vars: A list of tf.Tensor. The variables of decoding. \n num_layer: A int value. The number of transformer layer of decoder in decoding\n using_model_var: A bool value. Using the model variables of TensorFlow or not.\n If True, then putting the model variables of TensorFlow decoding model into decoding op directly. \n The data type is tensor of TensorFlow in this case. \n \n If False, then restoring the values of variables from the checkpoint_filename, and putting\n the values into decoding op.\n The data type is numpy is this case. \n checkpoint_file: A string. The checkpoint file name of storing the values of model. The checkpoint should be stored in \n pickle, and the name of checkpoint should be xxx.pkl.\n The model is saved by dict. \n The key of the dict is the name of variables\n The value of the dict is the values of variables\n For example, decoding_vars[0]=,\n then the key is 'transformer/decoder/layer_0/masked_multi_head/LayerNorm/beta:0'; the value is sess.run(decoding_vars[0])\n data_type: tf.float32 or tf.float16. \n Only used when using_model_var is False. Convert the numpy data to the data type of model.\n \n Outputs:\n vars_in_diff_layers_dict: A dict to store the variables by their name.\n \n For decoder variables, the key is like 'transformer/decoder/layer/masked_multi_head/LayerNorm/beta:0', \n which is similar to the name of variables, except we use 'layer' but not 'layer_x'. The value is a list, \n which contains 'transformer/decoder/layer_%d/masked_multi_head/LayerNorm/beta:0' % i for i in range(num_layer)\n \n For other variables, the key is the name of variable, and the value is the correspoding weight.\n \n Note that we return the concated weights. The concat operation would bring other overhead, and this should be optimized in \n the real application. The recommended method is pre-processing the weights as numpy format. Because TensorFlow do the operations\n for each inference if using the TensorFlow to pre-process the weights.\n '''\n \n var_dict = {}\n for var in decoding_vars:\n var_dict[var.name] = var\n \n vars_in_diff_layers_dict = {}\n vars_in_diff_layers_dict[\"transformer/decoder/LayerNorm/beta:0\"] = tf.cast(var_dict[\"model/ln_f/b:0\"], dtype=data_type)\n vars_in_diff_layers_dict[\"transformer/decoder/LayerNorm/gamma:0\"] = tf.cast(var_dict[\"model/ln_f/g:0\"], dtype=data_type)\n vars_in_diff_layers_dict[\"model/wpe:0\"] = tf.cast(var_dict[\"model/wpe:0\"], dtype=data_type)\n vars_in_diff_layers_dict[\"model/wte:0\"] = tf.cast(var_dict[\"model/wte:0\"], dtype=data_type)\n\n for i in range(num_layer):\n \"\"\"Handling the names of q, k, v kernel and bias because their names\n are different for fusing the qkv or not.\"\"\"\n \n layer_prefix_name = \"transformer/decoder/layer_%d/\" % i\n gpt2_layer_prefix_namx = \"model/h%d/\" % i\n\n var_dict[layer_prefix_name + 'masked_multi_head/query/kernel:0'], \\\n var_dict[layer_prefix_name + 'masked_multi_head/key/kernel:0'], \\\n var_dict[layer_prefix_name + 'masked_multi_head/value/kernel:0'] = tf.split(var_dict[gpt2_layer_prefix_namx + 'attn/c_attn/w:0'], 3, axis=-1)\n\n var_dict[layer_prefix_name + 'masked_multi_head/query/bias:0'], \\\n var_dict[layer_prefix_name + 'masked_multi_head/key/bias:0'], \\\n var_dict[layer_prefix_name + 'masked_multi_head/value/bias:0'] = tf.split(var_dict[gpt2_layer_prefix_namx + 'attn/c_attn/b:0'], 3, axis=-1)\n \n\n layer_prefix_name = 'transformer/decoder/layer'\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/LayerNorm/beta:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/ln_1/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/LayerNorm/gamma:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/ln_1/g:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/conv1d/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/attn/c_attn/w:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/conv1d/bias:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/attn/c_attn/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/query/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/query/kernel:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/query/bias:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/query/bias:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/key/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/key/kernel:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/key/bias:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/key/bias:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/value/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/value/kernel:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/value/bias:0'] = \\\n tf.cast(tf.concat([ var_dict[layer_prefix_name + '_%d/masked_multi_head/value/bias:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/conv1d_1/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/attn/c_proj/w:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/masked_multi_head/conv1d_1/bias:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/attn/c_proj/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n \n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/LayerNorm/beta:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/ln_2/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/LayerNorm/gamma:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/ln_2/g:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n \n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/conv1d/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/mlp/c_fc/w:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/conv1d/bias:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/mlp/c_fc/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/conv1d_1/kernel:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/mlp/c_proj/w:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n vars_in_diff_layers_dict[layer_prefix_name + '/ffn/conv1d_1/bias:0'] = \\\n tf.cast(tf.concat([ var_dict['model/h%d/mlp/c_proj/b:0' % i] for i in range(num_layer) ], axis=0), dtype=data_type)\n \n return vars_in_diff_layers_dict\n\ndef ft_gpt_op(decoding_vars,\n decoding_args,\n batch_size,\n start_ids,\n min_start_length,\n max_start_length,\n attention_mask):\n \"\"\"Run the decoding with sampling by FasterTransformer.\n\n Args:\n decoder_vars: A list of tf.Tensor. The variables for decoding. A list of model variables of TensorFlow model.\n decoder_args: The arguments for decoding. The details are in the class \"DecodingGpt2Argument\" of common.py\n Outputs:\n output_ids: A tf.Tensor with shape [batch_size, max(sequence_lengths)], with int type.\n The results of decoding. It contains the id of token of vocabulary.\n sequence_lengths: A tf.Tensor with shape [batch_size], with int type.\n \"\"\"\n decoder_args = decoding_args.decoder_args\n decoding_op_module = tf.load_op_library(os.path.join('./lib/libtf_gpt.so'))\n data_type = decoder_args.dtype\n\n vars_dict_in_differ_layers = preprocess_decoder_var(decoding_vars,\n decoder_args.num_layer,\n True,\n None,\n data_type,\n False)\n if decoder_args.fuse_qkv == True:\n masked_multi_head_first_kernel = vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/conv1d/kernel:0']\n masked_multi_head_first_bias = vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/conv1d/bias:0']\n else:\n masked_multi_head_first_kernel = vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/query/kernel:0'], # 4\n masked_multi_head_first_bias = vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/query/bias:0'], # 5\n\n output_ids = decoding_op_module.decoding_gpt(\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/LayerNorm/beta:0'], # 0\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/LayerNorm/gamma:0'], # 1\n masked_multi_head_first_kernel,\n masked_multi_head_first_bias,\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/key/kernel:0'], # 4\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/key/bias:0'], # 5\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/value/kernel:0'], # 6\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/value/bias:0'], # 7\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/conv1d_1/kernel:0'], # 8\n vars_dict_in_differ_layers['transformer/decoder/layer/masked_multi_head/conv1d_1/bias:0'], # 9\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/LayerNorm/beta:0'], # 10\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/LayerNorm/gamma:0'], # 11\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/conv1d/kernel:0'], # 12\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/conv1d/bias:0'], # 13\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/conv1d_1/kernel:0'], # 14\n vars_dict_in_differ_layers['transformer/decoder/layer/ffn/conv1d_1/bias:0'], # 15\n vars_dict_in_differ_layers['transformer/decoder/LayerNorm/beta:0'], # 16\n vars_dict_in_differ_layers['transformer/decoder/LayerNorm/gamma:0'], # 17\n vars_dict_in_differ_layers['model/wte:0'], # 18\n vars_dict_in_differ_layers['model/wte:0'], # 19\n vars_dict_in_differ_layers['model/wpe:0'], # 20\n attention_mask, # 21\n start_ids, # 22\n min_start_length, # 23\n max_start_length, # 24\n batch_size=batch_size,\n candidate_num=decoding_args.top_k,\n probability_threshold=decoding_args.top_p,\n max_seq_len=decoding_args.max_seq_len,\n head_num=decoder_args.head_num, \n size_per_head=decoder_args.size_per_head,\n num_layer=decoder_args.num_layer,\n start_id=decoding_args.start_id, \n end_id=decoding_args.end_id,\n temperature=decoding_args.temperature,\n is_fuse_qkv=decoder_args.fuse_qkv\n )\n \n output_ids = tf.transpose(output_ids, [1, 0])\n return output_ids\n\nif __name__ == '__main__':\n fire.Fire(sample_model)\n\n","sub_path":"developer/lab/tools/NVIDIA/FasterTransformer/sample/tensorflow/gpt_sample.py","file_name":"gpt_sample.py","file_ext":"py","file_size_in_byte":20550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"328009298","text":"\"\"\"\nI came across a weird warning in pycharm about 'default argument\nvalue is mutable'. This is a test file to understand...\n\nClass variables and default arguments are created when the function is loaded\n(and only once), that means that any changes to a \"mutable default argument\"\nor \"mutable class variable\" are permanent\n\n\"\"\"\n\n\ndef func(ilist=[1, 2, 3]):\n\n print('number of element in the input list : ', len(ilist),\n '\\n', 'With values : ')\n\n for element in ilist:\n print(element)\n\n ilist.append(4)\n\n\ndef main():\n func()\n func()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/note1.py","file_name":"note1.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"304038558","text":"import random\nimport gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\n\n\n\nENV_NAME = \"CartPole-v0\"\n\nGAMMA = 0.95\nLEARNING_RATE = 0.001 #value of alpha\n\nMEMORY_SIZE = 1000000\nBATCH_SIZE = 20\n\nEXPLORATION_MAX = 1.0 #epsilon max\nEXPLORATION_MIN = 0.1 #epsilon min\nEXPLORATION_DECAY = 0.995 #epsilon decay\n\n\nclass DQNSolver:\n\n def __init__(self, observation_space, action_space):\n self.exploration_rate = EXPLORATION_MAX# start with max exporation\n\n self.action_space = action_space #save action space\n self.memory = deque(maxlen=MEMORY_SIZE) # memory is a double ended queue which will be used to store the experience\n # initialize the model\n self.model = Sequential()\n self.model.add(Dense(24, input_shape=(observation_space,), activation=\"relu\")) #observation space forms the first layer with RELU activation and outputs into24 layers\n self.model.add(Dense(24, activation=\"relu\")) #input will be the output of previous layer and output will be another 24 units layer (basically hidden layer) with activation function as relu\n self.model.add(Dense(self.action_space, activation=\"linear\")) # output will be the action space and input will be previous layer, activation is linear.\n self.model.compile(loss=\"mse\", optimizer=Adam(lr=LEARNING_RATE))# loss function is Mean square error and optimizer is stochastic gradient descent method with learning rate of given value\n\n\n def remember(self, state, action, reward, next_state, done): # appends the values of th s,a,r,s' for later training\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state): #returns an action based for given state as per the value of epsilon\n if np.random.rand() < self.exploration_rate:\n return random.randrange(self.action_space)\n q_values = self.model.predict(state) # the zeroth element will be the array of size action space or basically the output of the neural network\n return np.argmax(q_values[0])\n\n def experience_replay(self):\n '''\n This method will feed the experience of the environment stored so far into the network. This method will only execute once the memory is full.\n done will be true for terminal or false for non terminal.?? (it can be true when the episode ends??)\n :return:\n '''\n if len(self.memory) < BATCH_SIZE:\n return\n batch = random.sample(self.memory, BATCH_SIZE) #gets randomized order of the experience so that there is better learning rather than only sequential learning\n for state, action, reward, state_next, terminal in batch:\n q_update = reward\n if not terminal:\n q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0])) #update rule for q learning\n q_values = self.model.predict(state)\n q_values[0][action] = q_update\n self.model.fit(state, q_values, verbose=0)# what does fit do exactly??\n # self.exploration_rate *= EXPLORATION_DECAY\n self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)\n\n\ndef cartpole():\n env = gym.make(ENV_NAME)\n # score_logger = ScoreLogger(ENV_NAME)\n observation_space = env.observation_space.shape[0]\n action_space = env.action_space.n\n dqn_solver = DQNSolver(observation_space, action_space)\n run = 0\n while True:\n run += 1\n state = env.reset()\n state = np.reshape(state, [1, observation_space])\n step = 0\n dqn_solver.exploration_rate *= EXPLORATION_DECAY\n while True:\n step += 1\n env.render()\n action = dqn_solver.act(state)\n state_next, reward, terminal, info = env.step(action)\n # reward = reward if not terminal else -reward # needs to changed for mountain car\n state_next = np.reshape(state_next, [1, observation_space]) # why reshape\n dqn_solver.remember(state, action, reward, state_next, terminal)\n state = state_next\n if terminal:\n print (\"Run: \" + str(run) + \", exploration: \" + str(dqn_solver.exploration_rate) + \", score: \" + str(step))\n # score_logger.add_score(step, run)\n break\n dqn_solver.experience_replay() # will only execute after the memory is full\n\n\nif __name__ == \"__main__\":\n cartpole()","sub_path":"RL Project CCE/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"3854853","text":"def some_function():\n print(\"1~10사���의 수를 입력하세요:\")\n num = int(input())\n if num<1 or num>10:\n raise Exception(\"유효하지 않은 숫자입니다/\")\n else:\n print(num)\ntry:\n some_function()\nexcept Exception as err:\n print(err)\n","sub_path":"python/raise_in_function.py","file_name":"raise_in_function.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"56743857","text":"#!/usr/bin/env python\n\n\"\"\"Helper functions for dbprocessing unit tests\n\nSimply importing this will redirect the logs and change pathing, so import\nbefore importing any dbprocessing modules.\n\"\"\"\n\nimport datetime\nimport json\nimport os\nimport os.path\nimport shutil\nimport sys\nimport sysconfig\nimport tempfile\n\nimport sqlalchemy\nimport sqlalchemy.engine\nimport sqlalchemy.schema\n\n\n#The log is opened on import, so need to quarantine the log directory\n#right away (before other dbp imports)\nos.environ['DBPROCESSING_LOG_DIR'] = os.path.join(os.path.dirname(__file__),\n 'unittestlogs')\n\n\ntestsdir = os.path.dirname(__file__) # Used by add_build_to_path\n\n\n# Define this before using it for importing dbprocessing modules...\ndef add_build_to_path():\n \"\"\"Adds the python build directory to the search path.\n\n Locates the build directory in the same repository as this test module\n and adds the (version-specific) library directories to the Python\n module search path, so the unit tests can be run against the built\n instead of installed version.\n\n This is run on import of this module.\n \"\"\"\n # Prioritize version-specific path; py2 tends to be version-specific\n # and py3 tends to use just \"lib\". But only use first-matching.\n for pth in ('lib', # Prepending, so add low-priority paths first.\n 'lib.{0}-{1}.{2}'.format(sysconfig.get_platform(),\n *sys.version_info[:2]),\n ):\n buildpath = os.path.abspath(os.path.join(testsdir, '..', 'build', pth))\n if os.path.isdir(buildpath):\n if not buildpath in sys.path:\n sys.path.insert(0, buildpath)\n break\n\n\n# Get the \"build\" version of dbp for definitions in this module.\nadd_build_to_path()\nimport dbprocessing.DButils\nimport dbprocessing.tables\nimport dbprocessing.Version\n\n\n__all__ = ['AddtoDBMixin', 'add_build_to_path', 'add_scripts_to_path',\n 'driveroot', 'testsdir']\n\n\ndriveroot = os.path.join(os.path.splitdrive(os.getcwd())[0], os.path.sep)\\\n if sys.platform == 'win32' else os.path.sep\n\"\"\"Root of the current drive (or filesystem)\"\"\"\n\nclass AddtoDBMixin(object):\n \"\"\"Mixin class providing helper functions for adding to database\n\n Useful for testing when db tables need to be populated with some\n simplified parameters and/or defaults.\n\n Assumes the existence of a ``dbu`` data member, which is a DBUtils\n instance to be used for adding to a database.\n \"\"\"\n\n def addProduct(self, product_name, instrument_id=None, level=0,\n format=None):\n \"\"\"Add a product to database (incl. inspector)\n\n Won't actually work, just getting the record in\n \"\"\"\n if instrument_id is None:\n ids = self.dbu.session.query(self.dbu.Instrument).all()\n instrument_id = ids[0].instrument_id\n if format is None:\n format = product_name.replace(' ', '_') + '_{Y}{m}{d}_v{VERSION}'\n pid = self.dbu.addProduct(\n product_name=product_name,\n instrument_id=instrument_id,\n relative_path='junk',\n format=format,\n level=level,\n product_description='Test product {}'.format(product_name)\n )\n self.dbu.addInstrumentproductlink(instrument_id, pid)\n self.dbu.addInspector(\n filename='fake.py',\n relative_path='inspectors',\n description='{} inspector'.format(product_name),\n version=dbprocessing.Version.Version(1, 0, 0),\n active_code=True,\n date_written='2010-01-01',\n output_interface_version=1,\n newest_version=True,\n product=pid,\n arguments=\"foo=bar\")\n return pid\n\n def addProcess(self, process_name, output_product_id,\n output_timebase='DAILY'):\n \"\"\"Add a process + code record to the database\n\n Again, just the minimum to get the records in\n \"\"\"\n process_id = self.dbu.addProcess(\n process_name,\n output_product=output_product_id,\n output_timebase=output_timebase)\n code_id = self.dbu.addCode(\n filename='junk.py',\n relative_path='scripts',\n code_start_date='2010-01-01',\n code_stop_date='2099-01-01',\n code_description='{} code'.format(process_name),\n process_id=process_id,\n version='1.0.0',\n active_code=1,\n date_written='2010-01-01',\n output_interface_version=1,\n newest_version=1,\n arguments=process_name.replace(' ', '_') + '_args')\n return process_id, code_id\n\n def addProductProcessLink(self, product_id, process_id, optional=False,\n yesterday=0, tomorrow=0):\n \"\"\"Minimum record for product-process link in db\"\"\"\n self.dbu.addproductprocesslink(product_id, process_id, optional,\n yesterday, tomorrow)\n\n def addFile(self, filename, product_id, utc_date=None, version=None,\n utc_start=None, utc_stop=None, exists=True):\n \"\"\"Add a file to the database\"\"\"\n if utc_date is None:\n utc_date = datetime.datetime.strptime(\n filename.split('_')[-2], '%Y%m%d')\n if version is None:\n version = filename.split('_v')[-1]\n while version.count('.') > 2:\n version = version[:version.rfind('.')]\n level = self.dbu.getEntry('Product', product_id).level\n if utc_start is None:\n utc_start = utc_date.replace(\n hour=0, minute=0, second=0, microsecond=0)\n if utc_stop is None:\n utc_stop = utc_date.replace(\n hour=23, minute=59, second=59, microsecond=999999)\n fid = self.dbu.addFile(\n filename=filename,\n data_level=level,\n version=dbprocessing.Version.Version.fromString(version),\n product_id=product_id,\n utc_file_date=utc_date,\n utc_start_time=utc_start,\n utc_stop_time=utc_stop,\n file_create_date=datetime.datetime.now(),\n exists_on_disk=exists,\n )\n return fid\n\n def addSkeletonMission(self):\n \"\"\"Starting with empty database, add a skeleton mission\n\n Should be called before opening a DButils instance so that\n the mission, etc. tables can be created.\n\n Makes one mission, one satellite, and two instruments\n\n Assumes self.td has the test/temp directory path (str),\n and self.dbname has the name of the database (including full\n path if sqlite).\n Will populate self.instrument_ids for a list of instruments.\n \"\"\"\n dbu = dbprocessing.DButils.DButils(self.dbname)\n if dbu.session.query(sqlalchemy.func.count(\n dbu.Mission.mission_id)).scalar():\n raise RuntimeError('Unit test database is not empty!')\n mission_id = dbu.addMission(\n 'Test mission',\n os.path.join(self.td, 'data'),\n os.path.join(self.td, 'incoming'),\n os.path.join(self.td, 'codes'),\n os.path.join(self.td, 'inspectors'),\n os.path.join(self.td, 'errors'))\n satellite_id = dbu.addSatellite('Satellite', mission_id)\n # Make two instruments (so can test interactions between them)\n self.instrument_ids = [\n dbu.addInstrument(instrument_name='Instrument {}'.format(i),\n satellite_id=satellite_id)\n for i in range(1, 3)]\n del dbu\n\n def makeTestDB(self):\n \"\"\"Create a test database and working directory\n\n Creates three attributes:\n * self.td: a temporary directory\n * self.pg: is the database postgres (if False, sqlite)\n * self.dbname: name of database (sqlite path or postgresql db)\n\n Does not open the database\n \"\"\"\n self.td = tempfile.mkdtemp()\n self.pg = 'PGDATABASE' in os.environ\n self.dbname = os.environ['PGDATABASE'] if self.pg\\\n else os.path.join(self.td, 'testDB.sqlite')\n dbprocessing.DButils.create_tables(\n self.dbname, dialect = 'postgresql' if self.pg else 'sqlite')\n\n def removeTestDB(self):\n \"\"\"Remove test database and working directory\n\n Assumes has a working (open) DBUtils instance in self.dbu, which\n will be closed.\n \"\"\"\n if self.pg:\n self.dbu.session.close()\n self.dbu.metadata.drop_all()\n self.dbu.closeDB() # Before the database is removed...\n del self.dbu\n shutil.rmtree(self.td)\n\n def loadData(self, filename):\n \"\"\"Load data into db from a JSON file\n\n Assumes existence of:\n * self.dbname: name of database (sqlite path or postgresql db)\n Must exist, with tables.\n * self.pg: if database is postgresql\n\n Creates:\n * self.dbu: open DButils instance\n\n Parameters\n ----------\n filename : :class:`str`\n Full path to the JSON file\n \"\"\"\n with open(filename, 'rt') as f:\n data = json.load(f)\n self.dbu = dbprocessing.DButils.DButils(self.dbname)\n for k, v in data.items():\n for row in v:\n for column in row:\n if column in ('code_start_date',\n 'code_stop_date',\n 'date_written',\n 'utc_file_date',\n ):\n row[column] \\\n = None if row[column] is None\\\n else datetime.datetime.strptime(\n row[column], '%Y-%m-%dT%H:%M:%S.%f').date()\n elif column in ('utc_start_time',\n 'utc_stop_time',\n 'check_date',\n 'file_create_date',\n 'processing_start_time',\n 'processing_end_time',\n ):\n row[column] \\\n = None if row[column] is None\\\n else datetime.datetime.strptime(\n row[column], '%Y-%m-%dT%H:%M:%S.%f')\n if 'unixtime' not in data:\n # Dump from old database w/o the Unixtime table\n insp = sqlalchemy.inspect(self.dbu.Unixtime)\n # persist_selectable added 1.3 (mapped_table deprecated)\n tbl = insp.persist_selectable\\\n if hasattr(insp, 'persist_selectable') else insp.mapped_table\n tbl.drop()\n self.dbu.metadata.remove(tbl)\n del self.dbu.Unixtime\n if data['productprocesslink']\\\n and 'yesterday' not in data['productprocesslink'][0]:\n # Dump from old database w/o yesterday/tomorrow,\n # set defaults.\n for row in data['productprocesslink']:\n row['yesterday'] = row['tomorrow'] = 0\n for t in dbprocessing.tables.names:\n if t not in data or not data[t]:\n # Data not in dump, nothing to insert\n continue\n insp = sqlalchemy.inspect(getattr(self.dbu, t.title()))\n table = insp.persist_selectable\\\n if hasattr(insp, 'persist_selectable') else insp.mapped_table\n ins = table.insert()\n self.dbu.session.execute(ins, data[t])\n idcolumn = '{}_id'.format(t)\n if self.pg and idcolumn in data[t][0]:\n maxid = max(row[idcolumn] for row in data[t])\n sel = \"SELECT pg_catalog.setval(pg_get_serial_sequence(\"\\\n \"'{table}', '{column}'), {maxid})\".format(\n table=t, column=idcolumn, maxid=maxid)\n self.dbu.session.execute(sel)\n self.dbu.commitDB()\n # Re-reference directories since new data loaded\n self.dbu.MissionDirectory = self.dbu.getMissionDirectory()\n self.dbu.CodeDirectory = self.dbu.getCodeDirectory()\n self.dbu.InspectorDirectory = self.dbu.getInspectorDirectory()\n\n\ndef add_scripts_to_path():\n \"\"\"Add the script build directory to Python path\n\n This allows unit testing of scripts.\n \"\"\"\n scriptpath = os.path.abspath(os.path.join(\n testsdir, '..', 'build', 'scripts-{}.{}'.format(*sys.version_info[:2])))\n if not scriptpath in sys.path and os.path.isdir(scriptpath):\n sys.path.insert(0, scriptpath)\n","sub_path":"unit_tests/dbp_testing.py","file_name":"dbp_testing.py","file_ext":"py","file_size_in_byte":12749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"72717189","text":"def invers(lista):\r\n cont=len(lista)\r\n indice=-1\r\n invertida=''\r\n while cont>0:\r\n invertida+=lista[indice]\r\n indice-=1\r\n cont-=1\r\n print(invertida)\r\ndef palindromo(lista):\r\n inversa=invers(lista)\r\n indice=0\r\n cont=0\r\n for i in range(len(lista)):\r\n if (inversa[indice]==lista[indice]):\r\n indice+=1\r\n cont+=1\r\n else:\r\n print(\"No es palindromo.\")\r\n break\r\n if cont==len(lista):\r\n print (\"Es palindromo.\")\r\nlista=['o','s','o']\r\ninvers(lista)\r\npalindromo(lista)\r\n","sub_path":"inversa.py","file_name":"inversa.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"88668134","text":"__author__ = 'michal'\n\nimport numpy\nimport cv2\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom CameraWidget import CameraWidget\n\nclass IdentificationCameraWidget(CameraWidget):\n def __init__(self, device, recognizer, parent = None):\n super(IdentificationCameraWidget, self).__init__(device, parent)\n self.recognizer = recognizer\n\n def paintRectangles(self, faces):\n painter = QPainter(self)\n frame = QImage(self._frame.tostring(), self._frame.width, self._frame.height, QImage.Format_RGB888).rgbSwapped()\n painter.drawImage(QPoint(0, 0), frame)\n painter.setPen(Qt.red)\n for (x, y, w, h) in faces:\n face = numpy.asarray(self._frame[y:y+w,x:x+h])\n grayface = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n grayresizedface = cv2.resize(grayface, (100, 100))\n [p_class, p_confidence] = self.recognizer.recognizePerson(grayresizedface)\n if p_class != -1:\n text = self.recognizer.labels[p_class] + \"\\n\" + str(p_confidence)\n else:\n text = \"Nie rozpoznano\"\n painter.drawText(x,y,w,h, Qt.AlignLeft, text)\n painter.drawRect(x, y, w, h)\n","sub_path":"IdentificationCameraWidget.py","file_name":"IdentificationCameraWidget.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"294991348","text":"__author__ = 'ogoldberg'\n\n\nfrom collections import defaultdict\n\ndef extract_error_response(errors):\n errors_dict = defaultdict(list);\n unified_error_response = []\n for key, value in errors.iteritems():\n if key == 'productTitlesScoreRequest' and isinstance(value, dict):\n for key, value in value.iteritems():\n if key == 'wildSourcesList':\n for key, value in value.iteritems():\n for key, value in value.iteritems():\n if key == 'vendorName':\n errors_dict['vendorName'].append(value[0])\n if key == 'sourceKey':\n errors_dict['sourceKey'].append(value[0])\n if key == 'productRefId':\n errors_dict['productRefId'].append(value[0])\n if key == 'invocationId':\n errors_dict['invocationId'].append(value[0])\n\n for key in errors_dict.iterkeys():\n unified_error_response.append(''.join(key)+\": \")\n for error_msg in errors_dict.get(key):\n unified_error_response.append(error_msg)\n\n return unified_error_response","sub_path":"functions/errorResponseExtractor.py","file_name":"errorResponseExtractor.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"402420719","text":"style.use(\"ggplot\") \r\nfigure = plt.figure()\r\nax = figure.add_subplot(1,1,1)\r\n\r\ndef animate(x):\r\n\tdata = pd.read_csv('data2.csv')\r\n\tsentiment = data.Sentiment_value\r\n\txar = []\r\n\tx = 0 \r\n\tyar = []\r\n\ty = 0 \r\n\r\n\tfor line in sentiment: \r\n\t\tx += 1\r\n\t\tif \"positive\" in line:\r\n\t\t\ty += 1\r\n\t\telse:\r\n\t\t\ty -= 1 \r\n\r\n\t\txar.append(x)\r\n\t\tyar.append(y)\r\n\r\n\tax.clear()\r\n\tax.plot(xar,yar)\r\n\r\nanimatron = animation.FuncAnimation(figure,animate, interval = 10000)\r\nplt.show()\r\n","sub_path":"Andrei2.py","file_name":"Andrei2.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"152616451","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport MySQLdb as mysql\nfrom DBUtils.PooledDB import PooledDB\nfrom app import app\nfrom utils import log\nimport traceback\n\nclass DB:\n def __init__(self):\n self.host = app.config.get(\"DB_HOST\")\n self.user = app.config.get(\"DB_USER\")\n self.passwd = app.config.get(\"DB_PASSWD\")\n self.dbname = app.config.get(\"DB_NAME\")\n self.max_pool = app.config.get(\"DB_POOL_MAX\")\n self.min_pool = app.config.get(\"DB_POOL_MIN\")\n self.pool = PooledDB(\n mysql,\n maxcached = self.max_pool,\n mincached = self.min_pool,\n host = self.host,\n user = self.user,\n passwd = self.passwd,\n db = self.dbname,\n setsession = ['SET AUTOCOMMIT = 1'],\n charset = \"utf8\"\n )\n\n def connect_db(self):\n self.db = self.pool.connection()\n self.cur = self.db.cursor()\n\n def close_db(self):\n self.cur.close()\n self.db.close()\n\n def execute(self,sql):\n self.connect_db()\n self.cur.execute(sql)\n\n def get_list(self,table,fields):\n sql = \"select %s from %s\"%(\",\".join(fields),table)\n try:\n self.execute(sql)\n log.WriteLog(\"db\").info(\"sql:'%s'\" % sql)\n result = self.cur.fetchall()\n if result:\n result = [dict((k,row[i]) for i,k in enumerate(fields)) for row in result]\n else:\n result = {}\n return result\n except:\n log.WriteLog(\"db\").error(\"Execute: '%s' error: %s\"%(sql,traceback.format_exc()))\n finally:\n self.close_db()\n\n def get_one(self,table,fields,where):\n if isinstance(where,dict) and where:\n conditions = [\"%s='%s'\"%(k,v) for k,v in where.items()]\n sql = \"select %s from %s where %s\"%(\",\".join(fields),table,\" and \".join(conditions))\n try:\n log.WriteLog(\"db\").info(\"sql:'%s'\"%sql)\n self.execute(sql)\n result = self.cur.fetchone()\n if result:\n result = dict((v,result[i]) for i,v in enumerate(fields))\n else:\n result = {}\n return result\n except:\n log.WriteLog(\"db\").error(\"Execute: '%s' error: %s\"%(sql,traceback.format_exc()))\n finally:\n self.close_db()\n\n def update(self,table,fields):\n data = \",\".join([\"%s='%s'\"%(k,v) for k,v in fields.items()])\n sql = \"update %s set %s where id=%s\"%(table,data,fields[\"id\"])\n try:\n log.WriteLog(\"db\").info(\"sql:'%s'\" % sql)\n return self.execute(sql)\n except:\n log.WriteLog(\"db\").error(\"Execute: '%s' error: %s\" % (sql, traceback.format_exc()))\n finally:\n self.close_db()\n\n def create(self,table,fields):\n sql = \"insert into %s(%s)values('%s')\"%(table,\",\".join(fields.keys()),\"','\".join(fields.values()))\n try:\n log.WriteLog(\"db\").info(\"sql:'%s'\" % sql)\n return self.execute(sql)\n except:\n log.WriteLog(\"db\").error(\"Execute: '%s' error: %s\" % (sql, traceback.format_exc()))\n finally:\n self.close_db()\n\n def delete(self,table,where):\n if isinstance(where,dict) and where:\n conditions = [\"%s='%s'\"%(k,v) for k,v in where.items()]\n sql = \"delete from %s where %s\"%(table,\" and \".join(conditions))\n try:\n log.WriteLog(\"db\").info(\"sql:'%s'\" % sql)\n return self.execute(sql)\n except:\n log.WriteLog(\"db\").error(\"Execute: '%s' error: %s\" % (sql, traceback.format_exc()))\n finally:\n self.close_db()\n\nif __name__ == \"__main__\":\n db = DB()\n #db.get_list(\"users\",[\"id\",\"name\"])\n #db.get_one(\"users\",[\"id\",\"name\",\"name_cn\"],{\"name\":\"admin\",\"id\":2})\n #db.update(\"users\",{\"name\":\"jack\",\"id\":1})\n #db.create(\"users\",{\"id\":\"1\",\"name\":\"jack\"})\n db.delete(\"users\",{\"id\":1,\"name\":\"jack\"})\n\n\n","sub_path":"DButil.py","file_name":"DButil.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"191951636","text":"from nltk import sent_tokenize\nimport tokenizer\nimport pickle\nimport operator\n# import dictionary\n# import word2vec\n\n\nclass nGram:\n \"\"\"\n This is a class which represent n-gram and helps to\n calculate probability too.\n \"\"\"\n\n def __init__(self, filename=None, N=4):\n \"\"\"\n filename : open nGram data from file.\n N : value of n in nGram, it's default value\n is 4, so it's four gram and stores bigram, trigram and 4 gram.\n \"\"\"\n self.filename = filename\n self.N = N\n self.words = []\n self.gram = [{} for i in range(0, N)]\n\n def get_grams(self, tokens, n):\n # word_vec_er = word2vec.WordVectorRep(dict=self.dict)\n # tokens_vec = tokens\n tokens_vec = []\n for t in tokens:\n tokens_vec.append(self.words.index(t))\n return [tuple(tokens_vec[i:i + n]) for i in\n range(0, len(tokens_vec) - n + 1)]\n\n def add_tokens(self, tokens):\n \"\"\"\n Update nGram data with given token list.\n \"\"\"\n for t in tokens:\n if t not in self.words:\n self.words.append(t)\n\n for i in range(1, self.N + 1):\n ngram_tup = self.gram[i - 1].keys()\n n_gram = self.get_grams(tokens, i)\n # print(n_gram)\n for g in n_gram:\n if g in ngram_tup:\n self.gram[i - 1][g] += 1 # increase count by one\n else:\n self.gram[i - 1][g] = 1 # if first entry then count is 1\n\n def write(self, filename):\n \"\"\"\n We are going to use pickle to write data object to file.\n \"\"\"\n print(\"Writing file.\")\n try:\n file = open(filename, 'wb')\n file.write(pickle.dumps(self.__dict__))\n print(\"[ done ]\")\n except FileNotFoundError:\n print(\"[ error ]\")\n\n def open_from_file(self, filename):\n print(\"Opening NGram database from file \" + filename + ' ', end='')\n try:\n file = open(filename, 'rb')\n datapickle = file.read()\n file.close()\n self.__dict__ = pickle.loads(datapickle)\n print(\"[ done ]\")\n except FileNotFoundError:\n print(\"[ error ]\")\n\n def trainFromFile(self, filename):\n self.filename = filename\n print(\"Training NGram from file \" + filename, end=' .')\n try:\n file = open(filename, 'r', encoding='ascii',\n errors='surrogateescape')\n text_data = file.read().lower()\n # let's replace newline char with white space\n text_data = text_data.replace('\\n', ' ')\n # let's tokenize sentences from text_data.\n # I use sent_tokenize nltk function to tokenize the sentences.\n sents = sent_tokenize(text_data)\n # print('sent:', sents)\n # let's iterate over sentences and tokenize words and update\n # n-gram data\n tok = tokenizer.Tokenizer()\n for s in sents:\n # tokens = nltk.word_tokenize(s)\n tokens = tok.word_tokenize(s)\n # print(tokens, 'added!')\n self.add_tokens(tokens)\n print(' [ done ]')\n except FileNotFoundError:\n print(' [ error ]')\n\n def get_nw_ngram(self, pw, n):\n \"\"\"\n It returns list of next words having high probabilities by\n using last (n-1) words of pw(previous words) using n gram.\n \"\"\"\n next_words = []\n # if pw(previous words) count is lesser than n-1 then we\n # cannot use ngram(markov model) to find next word.\n if len(pw) < n - 1:\n return []\n # add these count to the next word's probability\n # increase probability if next word is found by higher grams.\n pro_dist = [0, 100, 200]\n\n previous_words = pw[-n + 1:]\n # print('previous words:', [self.words[x] for x in previous_words])\n for (wt, c) in self.gram[n - 1].items():\n words_list = list(wt)\n if previous_words == words_list[:-1]:\n # save next word with probability as tuple\n n_w = words_list[-1]\n # if n_w in self.gram[0].keys():\n # probab = float(c) / float(self.gram[0][(n_w,)])\n probab = c + pro_dist[n - 2]\n # print('type prob:', type(probab))\n # print('probab : ', probab)\n next_words.append((n_w, probab))\n next_words = list(set(next_words))\n next_words = sorted(next_words, key=operator.itemgetter(1),\n reverse=True)\n return next_words[:] # return list of (word,prob) tuple\n\n def prob(self, word_list):\n \"\"\"\n It returns unique next word from word_list list of tuples\n it adds probability if words are appearing more than once.\n \"\"\"\n words = list(set([w[0] for w in word_list]))\n words_with_probs = []\n for w in words:\n prob = 0\n for wl in word_list:\n if w == wl[0]:\n prob += wl[1]\n words_with_probs.append((w, prob))\n return words_with_probs\n\n def get_next_word(self, till):\n # get list of tupels (word_id, count)\n from_bigram = self.get_nw_ngram(till, 2)\n from_trigram = self.get_nw_ngram(till, 3)\n from_fourgram = self.get_nw_ngram(till, 4)\n\n word_list = from_bigram + from_trigram + from_fourgram\n word_list = self.prob(word_list)\n word_list = sorted(word_list, key=operator.itemgetter(1),\n reverse=True)\n return word_list[:]\n\n def get_count(self, sents, conts):\n count = 0\n for c in conts:\n if c in sents:\n count += 1\n return count\n\n def get_word_id(self, token):\n \"\"\"\n Return word id from ngram database.\n If word doesn't exist, then return -1.\n \"\"\"\n try:\n return self.words.index(token.lower())\n except:\n return -1\n\n def get_sent_from_ids(self, sent):\n re_sent = []\n for i in sent:\n if i == 0 or i == self.words.index(tokenizer.END_TOKEN):\n continue\n re_sent.append(self.words[i])\n return re_sent\n\n def print_grams(self):\n for i in range(2, self.N):\n print(i, 'GRAM===========')\n for k in self.gram[i]:\n for j in range(len(k)):\n print(self.words[j], end=',')\n print(':', self.gram[i][k])\n\n def sent_generate(self, out_sents, done_sents, till, count, contain):\n \"\"\"\n contain = ['president', 'nepal']\n it returns list of sentences that is constructed using this ngram model\n start : starting word for sentence\n contain : list object which contains words that should be contained in\n constructed sentence.\n \"\"\"\n\n if till not in done_sents:\n done_sents.append(till)\n n_words = self.get_next_word(till[:])\n # print('next words: ', [self.words[i[0]] for i in n_words])\n # print('till:', [self.words[i] for i in till])\n # print('## root_word', self.words[till[-1]])\n for w in n_words:\n # till_tmp = till_2[:]\n if w[0] == self.words.index(tokenizer.END_TOKEN) or \\\n count > 10 or len(out_sents) > 500:\n # print('_END_TOKEN_')\n # print('till:', till)\n contain_count = self.get_count(till[:], contain)\n if contain_count > 0:\n # print('sent_made :', [self.words[i] for i in till])\n # if w is tokenizer.END_TOKEN:\n if till[:] not in [x[0] for x in out_sents] and\\\n w[0] == self.words.index(tokenizer.END_TOKEN):\n # print('sent:', self.get_sent_from_ids(\n # till[:]), ' added -----------> !')\n out_sents.append((till[:], contain_count))\n print('sent:', self.get_sent_from_ids(\n till[:]), 'count:', contain_count)\n # return\n else:\n continue\n # print('no contain')\n else:\n # till_tmp.append(w[0])\n # print('till_tmp:', [self.words[i] for i in till_tmp])\n self.sent_generate(out_sents, done_sents,\n till[:] + [w[0]], count + 1, contain)\n else:\n pass\n # contain_count = self.get_count(till, contain)\n # out_sents.append((till, contain_count))\n # print(\"I don't know what you are talking about\")\n","sub_path":"languagemodel/ngram.py","file_name":"ngram.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"227255274","text":"from scipy.sparse import csr_matrix\nfrom scipy.sparse import spdiags\nfrom scipy.stats import multivariate_normal\nimport turicreate\nimport numpy as np\nimport sys\nimport time\nfrom copy import deepcopy\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.preprocessing import normalize\n\ndef sframe_to_scipy(x, column_name):\n \"\"\"\n Convert a dictionary column of an SFrame into a sparse matrix format where\n each (row_id, column_id, value) triple corresponds to the value of\n x[row_id][column_id], where column_id is a key in the dictionary.\n\n Example\n >>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)\n \"\"\"\n assert type(x[column_name][0]) == dict, 'The chosen column must be dict type, representing sparse data.'\n\n ## Stack will transform x to have a row for each unique (row, key) pair.\n x = x.stack(column_name, ['feature', 'value'])\n\n ## Map feature words to integers and conversely (rev_mapping)\n mapping = {word: i for (i, word) in enumerate(sorted(x['feature'].unique()))}\n rev_mapping = {i: word for (i, word) in enumerate(sorted(x['feature'].unique()))}\n x['feature_id'] = x['feature'].apply(lambda x: mapping[x])\n\n ## Create numpy arrays that contain the data for the sparse matrix.\n row_id = np.array(x['id'])\n col_id = np.array(x['feature_id'])\n data = np.array(x['value'])\n\n width = x['id'].max() + 1\n height = x['feature_id'].max() + 1\n\n ## Create a sparse matrix.\n mat = csr_matrix((data, (row_id, col_id)), shape=(width, height))\n return (mat, mapping, rev_mapping)\n\ndef diag(array):\n n = len(array)\n return spdiags(array, 0, n, n)\n\ndef logpdf_diagonal_gaussian(x, mean, cov):\n \"\"\"\n Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x.\n A multivariate Gaussian distribution with a diagonal covariance is equivalent\n to a collection of independent Gaussian random variables.\n\n x should be a sparse matrix. The logpdf will be computed for each row of x.\n mean and cov should be given as 1D numpy arrays\n mean[i] : mean of i-th variable\n cov[i] : variance of i-th variable\n \"\"\"\n\n n = x.shape[0]\n dim = x.shape[1]\n assert(dim == len(mean) and dim == len(cov))\n\n\n two_sigma = 2 * np.sqrt(cov)\n\n ## multiply each i-th column of x by (1 / (2 * sigma_i)), where sigma_i is sqrt of variance of i-th variable.\n scaled_x = x.dot(diag(1. / two_sigma))\n\n ## multiply each i-th entry of mean by (1 / (2 * sigma_i))\n scaled_mean = mean / two_sigma\n\n ## sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)]\n return -np.sum(np.log(np.sqrt(2. * np.pi * cov))) - pairwise_distances(scaled_x, [scaled_mean],\n 'euclidean').flatten() ** 2\n\ndef log_sum_exp(x, axis):\n \"\"\"\n Compute the log of a sum of exponentials\n \"\"\"\n x_max = np.max(x, axis=axis)\n if axis == 1:\n return x_max + np.log(np.sum(np.exp(x - x_max[:, np.newaxis]), axis=1))\n else:\n return x_max + np.log(np.sum(np.exp(x - x_max), axis=0) )\n\ndef EM_for_high_dimension(data, means, covs, weights, cov_smoothing=1e-5, maxiter=int(1e3), thresh=1e-4, verbose=False):\n # cov_smoothing: specifies the default variance assigned to absent features in a cluster.\n # If we were to assign zero variances to absent features, we would be overconfient,\n # as we hastily conclude that those featurese would NEVER appear in the cluster.\n # We'd like to leave a little bit of possibility for absent features to show up later.\n n, dim = data.shape[0], data.shape[1]\n mu, Sigma = deepcopy(means), deepcopy(covs)\n K = len(mu)\n weights = np.array(weights)\n ll = None\n ll_trace = []\n\n for i in range(maxiter):\n ## E-step: compute responsibilities\n logresp = np.zeros((n, K))\n for k in range(K):\n logresp[:,k] = np.log(weights[k]) + logpdf_diagonal_gaussian(data, mu[k], Sigma[k])\n\n ll_new = np.sum(log_sum_exp(logresp, axis=1))\n if verbose: print(ll_new)\n sys.stdout.flush()\n\n logresp -= np.vstack(log_sum_exp(logresp, axis=1))\n resp = np.exp(logresp)\n counts = np.sum(resp, axis=0)\n\n ## M-step: update weights, means, covariances\n weights = counts / np.sum(counts)\n for k in range(K):\n mu[k] = (diag(resp[:,k]).dot(data)).sum(axis=0)/counts[k]\n mu[k] = mu[k].A1\n Sigma[k] = diag(resp[:, k]).dot(data.multiply(data) - 2. * data.dot(diag(mu[k]))).sum(axis=0) + (mu[k] ** 2) * counts[k]\n Sigma[k] = Sigma[k].A1 / counts[k] + cov_smoothing * np.ones(dim)\n\n ## check for convergence in log-likelihood\n ll_trace.append(ll_new)\n if ll is not None and (ll_new - ll) < thresh and ll_new > -np.inf:\n ll = ll_new\n break\n ll = ll_new\n ##\n return {'weights':weights, 'means':mu, 'covs':Sigma, 'loglik':ll_trace, 'resp':resp}\n","sub_path":"C04/common/em_utilities.py","file_name":"em_utilities.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"621396739","text":"import math\n\nSTOPS = [\n ( 0, 5, \"record_number\", int), \n ( 5, 5, \"inclinometer_1_raw\", int), \n (10, 5, \"inclinometer_2_raw\", int), \n (15, 5, \"fluxgate_1_raw\", int), \n (20, 5, \"fluxgate_2_raw\", int), \n (25, 5, \"thermistor_high_raw\", int), \n #(30, 5, \"thermistor_difference_raw\", int), \n (35, 5, \"lower_diameter_max_raw\", int),\n (40, 5, \"lower_diameter_min_raw\", int),\n (45, 5, \"pressure_raw\", int), # renamed from lower_pressure_max_raw\n #(50, 5, \"lower_pressure_min_raw\", int), # not used at all\n (55, 5, \"upper_diameter_max_raw\", int),\n #(60, 5, \"upper_pressure_max_raw\", int), # not used at all\n #(65, 5, \"upper_pressure_min_raw\", int), # not used at all\n #(70, 5, \"thermistor_low_raw\", int),\n #(75, 1, \"analog_channel\", lambda x: int(x, 16)),\n (76, 1, \"temperature_pressure_transducer\", lambda x: ord(x)-92),\n (77, 2, \"upper_diameter_min_raw\", lambda x: 64*(ord(x[0])-32) + ord(x[1])-2080), \n (79, 1, \"bottom_sensor\", str),\n (80, 6, \"depth\", int)\n]\n\nclass ParseException(Exception):\n pass\n\n\ndef parseRecord(line, offsets):\n\n line = str(line)\n\n if len(line) != 86:\n raise ParseException(line)\n\n x = {} # the record\n \n # divide by and convert the values in the stops above\n for idx, length, label, fn in STOPS:\n value = fn(line[idx:idx+length])\n x[label] = value\n\n # fixing offsets (before adjustment)\n for offset in offsets:\n if offset.endswith(\"_raw\"):\n x[offset] += offsets[offset]\n\n # wrapping depth scale\n x[\"depth\"] /= 100.00 # scale from cm to meter\n\n if x[\"depth\"] > 5000:\n x[\"depth\"] -= 10000\n\n # bottom sensor switch\n # set to 0 (disengaged) if value is \"F\", else set to 1 (engaged)\n x[\"bottom_sensor\"] = 0 if x[\"bottom_sensor\"] == \"F\" else 1 \n\n # adjust pressure with temperature compensation\n T = x[\"temperature_pressure_transducer\"]\n v = 25 + x[\"pressure_raw\"]/1e4\n pt0 = 25.5841 - 1.3293E-04 * T + 9.6979E-07 * pow(T, 2)\n pa = 4145.54 - 0.0434 * T \n pb = 2481.98 - 0.1805 * T \n x[\"pressure\"] = 0.325 + pa * (1 - pt0 / v) - pb * (1 - pt0 / v) * (1 - pt0 / v)\n\n # calculate TVH\n # B = x[\"pressure\"]\n # x[\"total_vertical_height\"] = 74.2641 + 10.828804602 * B + 8.896475080677E-05 * pow(B, 2) - 3.688903345E-06 * pow(B, 3) + 1.2268161442E-08 * pow(B, 4)\n \n # convert calipers\n # calibrated DDJ, CP 13.05.2015\n v = (x[\"lower_diameter_max_raw\"] + x[\"lower_diameter_min_raw\"])/2.0\n v = max(v, 190.0)\n\n # first order\n # x[\"lower_diameter\"] = 0.07025316 * v + 87.8770776\n # second order regression\n x[\"lower_diameter\"] = -0.000128519*pow(v,2) + 0.218129743*v + 45.5505040392\n\n v = (x[\"upper_diameter_max_raw\"] + x[\"upper_diameter_min_raw\"])/2.0\n v = max(v, 130.0)\n # first order\n # x[\"upper_diameter\"] = 0.07382636 * v + 91.8389944\n # second order regression\n x[\"upper_diameter\"] = -0.0002558835*pow(v,2) + 0.3266519317*v + 29.7664153322\n\n\n # convert thermistor temperature\n v = x[\"thermistor_high_raw\"] / 1e3\n\n try:\n \n R0 = 20e3\n R1 = 1e3\n R2 = 680.0\n #RF = 90e3\n VT = 1.895\n \n Rn = R2 * .5 * (R0 + 20000) / (R2 + .5 * (R0 + 20000))\n Vn = VT * Rn / (R1 + Rn)\n R = 20000 * (90 * Vn + 20 * v) / (110 * Vn - 20 * v)\n\n Rn = R2 * .5 * (R0 + R) / (R2 + .5 * (R0 + R))\n Vn = VT * Rn / (R1 + Rn)\n R = 20000 * (90 * Vn + 20 * v) / (110 * Vn - 20 * v)\n\n # convert measured resistance into temperature\n # Steinhart-Hart equation\n temperature = (1 / (.0014726583 + .00023759863 * math.log(R) + .00000010218834 * pow(math.log(R),3)))\n temperature += -273.15 # convert from kelvin to celcius\n\n # calibration 13.04.2015 DDJ/DTU Risoe\n temperature = 0.9984*temperature-0.0184496\n \n except ValueError: # math domain error in logarithm\n temperature = float('nan')\n\n x[\"thermistor_high\"] = temperature\n\n\n # calculate inclination and azimuth\n # calibrated 25/3/15 ddj, cp\n inclination_1_raw = x[\"inclinometer_1_raw\"] \n inclination_2_raw = x[\"inclinometer_2_raw\"]\n\n # calculate each component angle, calibrated in degrees\n # we found each component by watching the raw inclination values begin close to \n # the negative offset: -16 and -11 respectively\n inclination_1 = math.radians(0.01449 * (inclination_1_raw + 16))\n inclination_2 = math.radians(0.01445 * (inclination_2_raw + 11))\n\n # calculate the distance of each vector component\n inclination_1x = math.tan(inclination_1)\n inclination_2x = math.tan(inclination_2)\n\n # calculate the distance of the inclination vector\n inclination_x = math.sqrt(pow(inclination_1x, 2) + pow(inclination_2x, 2))\n\n # calculate the angle formed by the pendulum\n inclination = math.degrees(math.atan(inclination_x))\n\n # magnetometers\n fluxgate_1_raw = -x[\"fluxgate_1_raw\"]\n fluxgate_2_raw = x[\"fluxgate_2_raw\"]\n\n # the tube can rotate, what angle does it form with the ground?\n rotation_tube_angle = math.degrees(math.atan2(inclination_2x, inclination_1x))\n \n # the orientation of the mangetic field, relative to the tube\n magnetic_vector_angle = math.degrees(math.atan2(fluxgate_2_raw, fluxgate_1_raw + 0.01)) # no div by 0\n\n # azimuth is the combined angles, clamped to [0; 360]\n azimuth = (rotation_tube_angle + magnetic_vector_angle) % 360\n\n x[\"inclination\"] = inclination\n x[\"azimuth\"] = azimuth\n\n # fixing offsets (after adjustment)\n for offset in offsets:\n if not offset.endswith(\"_raw\"):\n x[offset] += offsets[offset]\n\n return x","sub_path":"corrections.py","file_name":"corrections.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"196176044","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QByteArray, pyqtSignal, pyqtSlot\nfrom ui_mainwindow import Ui_MainWindow\nfrom setting import appsetting\nfrom listenpipe import ListenPipe\nfrom transpipe import TransPipe\n\n\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n data_arrived = pyqtSignal(QByteArray)\n data_replied = pyqtSignal(QByteArray)\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent=parent)\n self.setupUi(self)\n self.edtIPListen.setText(appsetting.listen_ip)\n self.edtPortListen.setText(appsetting.listen_port)\n self.btnTCPListen.setChecked(True)\n self.btnUDPListen.setEnabled(False)\n\n self.edtLocalIPTrans1.setText(appsetting.trans_localip(1))\n self.edtIPTrans1.setText(appsetting.trans_ip(1))\n self.edtPortTrans1.setText(appsetting.trans_port(1))\n if appsetting.trans_protocol(1) == 'tcp':\n self.btnTCPTrans1.setChecked(True)\n else:\n self.btnUDPTrans1.setChecked(True)\n\n self.edtLocalIPTrans2.setText(appsetting.trans_localip(2))\n self.edtIPTrans2.setText(appsetting.trans_ip(2))\n self.edtPortTrans2.setText(appsetting.trans_port(2))\n if appsetting.trans_protocol(2) == 'tcp':\n self.btnTCPTrans2.setChecked(True)\n else:\n self.btnUDPTrans2.setChecked(True)\n\n self.listenpipe = ListenPipe()\n self.listenpipe.sig_data_arrived.connect(self.on_data_arrived)\n self.listenpipe.sig_listening_state.connect(self.on_listening_state)\n self.data_replied.connect(self.listenpipe.on_data_arrived)\n appsetting.sig_listen_changed.connect(self.listenpipe.start_listen)\n\n self.transpipes = {}\n self.transpipes[1] = TransPipe(1)\n self.data_arrived.connect(self.transpipes[1].on_data_arrived)\n self.transpipes[1].sig_Transing_state.connect(self.on_transing_state)\n self.transpipes[1].sig_data_arrived.connect(self.on_data_replied)\n appsetting.sig_trans_changed.connect(self.transpipes[1].start_trans)\n\n self.transpipes[2] = TransPipe(2)\n self.data_arrived.connect(self.transpipes[2].on_data_arrived)\n self.transpipes[2].sig_Transing_state.connect(self.on_transing_state)\n self.transpipes[2].sig_data_arrived.connect(self.on_data_replied)\n appsetting.sig_trans_changed.connect(self.transpipes[2].start_trans)\n\n appsetting.sig_listen_changed.emit()\n appsetting.sig_trans_changed.emit(1)\n appsetting.sig_trans_changed.emit(2)\n\n\n @pyqtSlot()\n def on_btnListenStart_clicked(self):\n print(\"on_btnListenStart_clicked\")\n appsetting.listen(self.edtIPListen.text()\n , self.edtPortListen.text()\n , 'tcp' if self.btnTCPListen.isChecked() else 'udp'\n , '1')\n\n @pyqtSlot()\n def on_btnListenStop_clicked(self):\n print(\"on_btnListenStop_clicked\")\n appsetting.listen(self.edtIPListen.text()\n , self.edtPortListen.text()\n , 'tcp' if self.btnTCPListen.isChecked() else 'udp'\n , '0')\n\n @pyqtSlot()\n def on_btnTrans1Start_clicked(self):\n print(\"on_btnTrans1Start_clicked\")\n appsetting.trans(1\n , self.edtLocalIPTrans1.text()\n , self.edtIPTrans1.text()\n , self.edtPortTrans1.text()\n , 'tcp' if self.btnTCPTrans1.isChecked() else 'udp'\n , '1')\n\n @pyqtSlot()\n def on_btnTrans1Stop_clicked(self):\n print(\"on_btnTrans1Stop_clicked\")\n appsetting.trans(1\n , self.edtLocalIPTrans1.text()\n , self.edtIPTrans1.text()\n , self.edtPortTrans1.text()\n , 'tcp' if self.btnTCPTrans1.isChecked() else 'udp'\n , '0')\n\n @pyqtSlot()\n def on_btnTrans2Start_clicked(self):\n print(\"on_btnTrans2Start_clicked\")\n appsetting.trans(2\n , self.edtLocalIPTrans2.text()\n , self.edtIPTrans2.text()\n , self.edtPortTrans2.text()\n , 'tcp' if self.btnTCPTrans2.isChecked() else 'udp'\n , '1')\n\n @pyqtSlot()\n def on_btnTrans2Stop_clicked(self):\n print(\"on_btnTrans2Stop_clicked\")\n appsetting.trans(2\n , self.edtLocalIPTrans2.text()\n , self.edtIPTrans2.text()\n , self.edtPortTrans2.text()\n , 'tcp' if self.btnTCPTrans2.isChecked() else 'udp'\n , '0')\n\n @pyqtSlot(QByteArray)\n def on_data_arrived(self, data):\n print(\"%s on_data_arrived\" % self.__class__.__name__)\n self.data_arrived.emit(data)\n\n @pyqtSlot(QByteArray)\n def on_data_replied(self, data):\n print(\"%s on_data_replied\" % self.__class__.__name__)\n self.data_replied.emit(data)\n\n @pyqtSlot(bool)\n def on_listening_state(self, listening):\n print(\"on_listening_state\")\n self.btnListenStart.setEnabled(not listening)\n self.btnListenStop.setEnabled(listening)\n\n @pyqtSlot(int, bool)\n def on_transing_state(self, pipe, transing):\n print(\"on_transing_state %d\" % pipe)\n if pipe == 1:\n self.btnTrans1Start.setEnabled(not transing)\n self.btnTrans1Stop.setEnabled(transing)\n elif pipe == 2:\n print(transing)\n self.btnTrans2Start.setEnabled(not transing)\n self.btnTrans2Stop.setEnabled(transing)\n\n","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"237842459","text":"# Copyright (c) 2019. Partners HealthCare and other members of\n# Forome Association\n#\n# Developed by Sergey Trifonov based on contributions by Joel Krier,\n# Michael Bouzinier, Shamil Sunyaev and other members of Division of\n# Genetics, Brigham and Women's Hospital\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#import sys\nimport abc\nfrom array import array\nfrom bitarray import bitarray\n\nfrom forome_tools.variants import VariantSet\nfrom app.eval.var_unit import (VarUnit, NumUnitSupport, EnumUnitSupport,\n ReservedNumUnit)\nfrom .val_stat import NumDiapStat, EnumStat\n#===============================================\nclass WS_Unit(VarUnit):\n def __init__(self, eval_space, unit_data,\n unit_kind = None, sub_kind = None):\n VarUnit.__init__(self, eval_space, unit_data, unit_kind, sub_kind)\n self.mExtractor = None\n\n @abc.abstractmethod\n def getRecVal(self, rec_no):\n return None\n\n @abc.abstractmethod\n def makeStat(self, condition, eval_h):\n return None\n\n @abc.abstractmethod\n def fillRecord(self, obj, rec_no):\n assert False\n\n#===============================================\nclass WS_NumericValueUnit(WS_Unit, NumUnitSupport):\n def __init__(self, eval_space, unit_data):\n WS_Unit.__init__(self, eval_space, unit_data, \"numeric\")\n assert self.getSubKind() in {\"float\", \"int\"}\n self._setScreened(self.getDescr()[\"min\"] is None)\n self.mArray = array(\"d\" if self.getSubKind() == \"float\" else \"q\")\n\n def getRecVal(self, rec_no):\n return self.mArray[rec_no]\n\n def makeStat(self, condition, eval_h):\n ret_handle = self.prepareStat()\n num_stat = NumDiapStat()\n for rec_no, _ in condition.iterSelection():\n num_stat.regValue(self.mArray[rec_no])\n num_stat.reportResult(ret_handle)\n return ret_handle\n\n def fillRecord(self, inp_data, rec_no):\n assert len(self.mArray) == rec_no\n self.mArray.append(inp_data.get(self.getInternalName()))\n\n#===============================================\nclass WS_EnumUnit(WS_Unit, EnumUnitSupport):\n def __init__(self, eval_space, unit_data, sub_kind = None):\n WS_Unit.__init__(self, eval_space, unit_data, \"enum\", sub_kind)\n variants_info = self.getDescr().get(\"variants\")\n if variants_info is None:\n self._setScreened()\n self.mVariantSet = None\n else:\n self.mVariantSet = VariantSet(\n [info[0] for info in variants_info])\n self._setScreened(\n sum(info[1] for info in variants_info) == 0)\n\n def getVariantSet(self):\n return self.mVariantSet\n\n def getVariantList(self):\n return list(iter(self.mVariantSet))\n\n def makeStat(self, condition, eval_h):\n ret_handle = self.prepareStat()\n enum_stat = EnumStat(self.mVariantSet)\n for rec_no, _ in condition.iterSelection():\n enum_stat.regValues(self.getRecVal((rec_no)))\n enum_stat.reportResult(ret_handle)\n return ret_handle\n\n#===============================================\nclass WS_StatusUnit(WS_EnumUnit):\n def __init__(self, eval_space, unit_data):\n WS_EnumUnit.__init__(self, eval_space, unit_data, \"status\")\n self.mArray = array('L')\n\n def getRecVal(self, rec_no):\n return {self.mArray[rec_no]}\n\n def fillRecord(self, inp_data, rec_no):\n assert len(self.mArray) == rec_no\n value = inp_data[self.getInternalName()]\n self.mArray.append(self.mVariantSet.indexOf(value))\n\n#===============================================\nclass WS_MultiSetUnit(WS_EnumUnit):\n def __init__(self, eval_space, unit_data):\n WS_EnumUnit.__init__(self, eval_space, unit_data)\n self.mArraySeq = [bitarray()\n for var in iter(self.mVariantSet)]\n\n def getRecVal(self, rec_no):\n ret = set()\n for var_no in range(len(self.mArraySeq)):\n if self.mArraySeq[var_no][rec_no]:\n ret.add(var_no)\n return ret\n\n def _setRecBit(self, rec_no, idx, value):\n self.mArraySeq[idx][rec_no] = value\n\n def fillRecord(self, inp_data, rec_no):\n values = inp_data.get(self.getInternalName())\n if values:\n idx_set = self.mVariantSet.makeIdxSet(values)\n else:\n idx_set = set()\n for var_no in range(len(self.mArraySeq)):\n self.mArraySeq[var_no].append(var_no in idx_set)\n\n#===============================================\nclass WS_MultiCompactUnit(WS_EnumUnit):\n def __init__(self, eval_space, unit_data):\n WS_EnumUnit.__init__(self, eval_space, unit_data)\n self.mArray = array('L')\n self.mPackSetDict = dict()\n self.mPackSetSeq = [set()]\n\n def getRecVal(self, rec_no):\n return self.mPackSetSeq[self.mArray[rec_no]]\n\n @staticmethod\n def makePackKey(idx_set):\n return '#'.join(map(str, sorted(idx_set)))\n\n def fillRecord(self, inp_data, rec_no):\n values = inp_data.get(self.getInternalName())\n if values:\n idx_set = self.mVariantSet.makeIdxSet(values)\n key = self.makePackKey(idx_set)\n idx = self.mPackSetDict.get(key)\n if idx is None:\n idx = len(self.mPackSetSeq)\n self.mPackSetDict[key] = idx\n self.mPackSetSeq.append(set(idx_set))\n else:\n idx = 0\n assert len(self.mArray) == rec_no\n self.mArray.append(idx)\n\n#===============================================\nclass WS_TranscriptNumericValueUnit(WS_Unit, NumUnitSupport):\n def __init__(self, eval_space, unit_data):\n WS_Unit.__init__(self, eval_space, unit_data, \"numeric\")\n assert self.getSubKind() in {\"transcript-float\", \"transcript-int\"}\n self._setScreened(self.getDescr()[\"min\"] is None)\n self.mArray = array(\"d\" if self.getSubKind() == \"float\" else \"q\")\n self.mDefaultValue = self.getDescr()[\"default\"]\n\n def isDetailed(self):\n return True\n\n def getItemVal(self, item_idx):\n return self.mArray[item_idx]\n\n def makeStat(self, condition, eval_h):\n ret_handle = self.prepareStat()\n num_stat = NumDiapStat(True)\n for group_no, it_idx in condition.iterItemIdx():\n num_stat.regValue([self.mArray[it_idx]], group_no)\n num_stat.reportResult(ret_handle)\n ret_handle[\"detailed\"] = True\n return ret_handle\n\n def fillRecord(self, inp_data, rec_no):\n values = inp_data.get(self.getInternalName())\n if values:\n self.mArray.extend(values)\n else:\n self.mArray.append(self.mDefaultValue)\n\n#===============================================\nclass WS_TranscriptStatusUnit(WS_Unit, EnumUnitSupport):\n def __init__(self, eval_space, unit_data):\n WS_Unit.__init__(self, eval_space, unit_data,\n \"enum\", \"transcript-status\")\n variants_info = self.getDescr().get(\"variants\")\n self.mVariantSet = VariantSet(\n [info[0] for info in variants_info])\n self.mDefaultValue = self.mVariantSet.indexOf(\n self.getDescr()[\"default\"])\n assert self.mDefaultValue is not None\n self._setScreened(\n sum(info[1] for info in variants_info) == 0)\n self.mArray = array('L')\n\n def isDetailed(self):\n return True\n\n def getVariantSet(self):\n return self.mVariantSet\n\n def getItemVal(self, item_idx):\n return {self.mArray[item_idx]}\n\n def fillRecord(self, inp_data, rec_no):\n values = inp_data.get(self.getInternalName())\n if not values:\n self.mArray.append(self.mDefaultValue)\n else:\n self.mArray.extend([self.mVariantSet.indexOf(value)\n for value in values])\n\n def makeStat(self, condition, eval_h):\n ret_handle = self.prepareStat()\n enum_stat = EnumStat(self.mVariantSet, detailed = True)\n for group_no, it_idx in condition.iterItemIdx():\n enum_stat.regValues([self.mArray[it_idx]], group_no = group_no)\n enum_stat.reportResult(ret_handle)\n ret_handle[\"detailed\"] = True\n return ret_handle\n\n#===============================================\nclass WS_TranscriptMultisetUnit(WS_Unit, EnumUnitSupport):\n def __init__(self, eval_space, unit_data):\n WS_Unit.__init__(self, eval_space, unit_data,\n \"enum\", unit_data[\"sub-kind\"])\n variants_info = self.getDescr().get(\"variants\")\n self.mVariantSet = VariantSet(\n [info[0] for info in variants_info])\n self._setScreened(\n sum(info[1] for info in variants_info) == 0)\n self.mArray = array('L')\n self.mPackSetDict = dict()\n self.mPackSetSeq = [set()]\n\n def isDetailed(self):\n return True\n\n def getVariantSet(self):\n return self.mVariantSet\n\n def getItemVal(self, item_idx):\n return self.mPackSetSeq[self.mArray[item_idx]]\n\n def _fillOne(self, values):\n if values:\n idx_set = self.mVariantSet.makeIdxSet(values)\n key = WS_MultiCompactUnit.makePackKey(idx_set)\n idx = self.mPackSetDict.get(key)\n if idx is None:\n idx = len(self.mPackSetSeq)\n self.mPackSetDict[key] = idx\n self.mPackSetSeq.append(set(idx_set))\n else:\n idx = 0\n self.mArray.append(idx)\n\n def fillRecord(self, inp_data, rec_no):\n seq = inp_data.get(self.getInternalName())\n if not seq:\n self.mArray.append(0)\n else:\n for values in seq:\n self._fillOne(values)\n\n def makeStat(self, condition, eval_h):\n ret_handle = self.prepareStat()\n enum_stat = EnumStat(self.mVariantSet, detailed = True)\n for group_no, it_idx in condition.iterItemIdx():\n enum_stat.regValues(self.mPackSetSeq[self.mArray[it_idx]],\n group_no = group_no)\n enum_stat.reportResult(ret_handle)\n ret_handle[\"detailed\"] = True\n return ret_handle\n\n#===============================================\ndef loadWS_Unit(eval_space, unit_data):\n kind = unit_data[\"kind\"]\n if kind == \"numeric\":\n if unit_data[\"sub-kind\"].startswith(\"transcript-\"):\n return WS_TranscriptNumericValueUnit(eval_space, unit_data)\n return WS_NumericValueUnit(eval_space, unit_data)\n assert kind == \"enum\", \"Bad kind: \" + kind\n if unit_data[\"sub-kind\"] == \"transcript-status\":\n return WS_TranscriptStatusUnit(eval_space, unit_data)\n if unit_data[\"sub-kind\"] == \"transcript-multiset\":\n return WS_TranscriptMultisetUnit(eval_space, unit_data)\n if unit_data[\"sub-kind\"] == \"transcript-panels\":\n return WS_TranscriptMultisetUnit(eval_space, unit_data)\n if unit_data[\"sub-kind\"] == \"status\":\n return WS_StatusUnit(eval_space, unit_data)\n if kind == \"enum\" and unit_data.get(\"compact\"):\n return WS_MultiCompactUnit(eval_space, unit_data)\n return WS_MultiSetUnit(eval_space, unit_data)\n\n#===============================================\nclass WS_ReservedNumUnit(ReservedNumUnit):\n def __init__(self, eval_space, name, rec_func, sub_kind = \"int\"):\n ReservedNumUnit.__init__(self, eval_space, name, sub_kind)\n self.mRecFunc = rec_func\n\n def getRecVal(self, rec_no):\n return self.mRecFunc(rec_no)\n\n def isDetailed(self):\n return False\n","sub_path":"app/ws/ws_unit.py","file_name":"ws_unit.py","file_ext":"py","file_size_in_byte":11936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"589936192","text":"import matplotlib.pyplot as plt\n\nxValues = [x for x in range(1, 1001)]\nyValues = [x**2 for x in xValues]\n\n# cmap: colormap 是一系列颜色映射,从起始颜色渐变到结束颜色\nplt.scatter(xValues, yValues, s=10, c=yValues, cmap=plt.cm.PuBu)\nplt.title(\"Square Numbers\", fontsize=24)\nplt.xlabel(\"Value\", fontsize=14)\nplt.ylabel(\"Square of Value\", fontsize=14)\nplt.tick_params(axis='both', labelsize=14, which='major')\nplt.axis([0,1050, 0, 1010000])\n\n# plt.show()\nplt.savefig('Square Fig.png')\n","sub_path":"scatter_squares.py","file_name":"scatter_squares.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"197122453","text":"import datetime\n\nfrom corehq.form_processor.interfaces.dbaccessors import FormAccessors\nfrom corehq.form_processor.interfaces.processor import FormProcessorInterface\nfrom corehq.form_processor.models import Attachment\nfrom corehq.form_processor.utils import convert_xform_to_json, adjust_datetimes\nfrom couchforms import XMLSyntaxError\nfrom couchforms.exceptions import DuplicateError, MissingXMLNSError\nfrom dimagi.utils.couch import LockManager, ReleaseOnError\n\n\nclass MultiLockManager(list):\n\n def __enter__(self):\n return [lock_manager.__enter__() for lock_manager in self]\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n for lock_manager in self:\n lock_manager.__exit__(exc_type, exc_val, exc_tb)\n\n\nclass FormProcessingResult(object):\n\n def __init__(self, submitted_form, existing_duplicate=None):\n self.submitted_form = submitted_form\n self.existing_duplicate = existing_duplicate\n\n if submitted_form.is_duplicate:\n assert existing_duplicate is None\n\n if existing_duplicate:\n assert existing_duplicate.is_deprecated\n\n self.interface = FormProcessorInterface(self.submitted_form.domain)\n\n def _get_form_lock(self, form_id):\n return self.interface.acquire_lock_for_xform(form_id)\n\n def get_locked_forms(self):\n if self.existing_duplicate:\n # Lock docs with their original ID's (before they got switched during deprecation)\n new_id = self.existing_duplicate.form_id\n old_id = self.existing_duplicate.orig_id\n return MultiLockManager([\n LockManager(self.submitted_form, self._get_form_lock(new_id)),\n LockManager(self.existing_duplicate, self._get_form_lock(old_id)),\n ])\n else:\n return MultiLockManager([\n LockManager(self.submitted_form, self._get_form_lock(self.submitted_form.form_id))\n ])\n\n\nclass LockedFormProcessingResult(FormProcessingResult):\n\n def __init__(self, submitted_form):\n super(LockedFormProcessingResult, self).__init__(submitted_form)\n assert submitted_form.is_normal\n self.lock = self._get_form_lock(submitted_form.form_id)\n\n def get_locked_forms(self):\n return MultiLockManager([LockManager(self.submitted_form, self.lock)])\n\n\ndef process_xform_xml(domain, instance, attachments=None):\n \"\"\"\n Create a new xform to ready to be saved to a database in a thread-safe manner\n Returns a LockManager containing the new XFormInstance(SQL) and its lock,\n or raises an exception if anything goes wrong.\n\n attachments is a dictionary of the request.FILES that are not the xform;\n key is parameter name, value is django MemoryFile object stream\n \"\"\"\n attachments = attachments or {}\n\n try:\n return _create_new_xform(domain, instance, attachments=attachments)\n except (MissingXMLNSError, XMLSyntaxError) as e:\n return _get_submission_error(domain, instance, e)\n except DuplicateError as e:\n return _handle_id_conflict(e.xform, domain)\n\n\ndef _create_new_xform(domain, instance_xml, attachments=None):\n \"\"\"\n create but do not save an XFormInstance from an xform payload (xml_string)\n optionally set the doc _id to a predefined value (_id)\n return doc _id of the created doc\n\n `process` is transformation to apply to the form right before saving\n This is to avoid having to save multiple times\n\n If xml_string is bad xml\n - raise couchforms.XMLSyntaxError\n :param domain:\n\n \"\"\"\n from corehq.form_processor.interfaces.processor import FormProcessorInterface\n interface = FormProcessorInterface(domain)\n\n assert attachments is not None\n form_data = convert_xform_to_json(instance_xml)\n if not form_data.get('@xmlns'):\n raise MissingXMLNSError(\"Form is missing a required field: XMLNS\")\n\n adjust_datetimes(form_data)\n\n xform = interface.new_xform(form_data)\n xform.domain = domain\n\n # Maps all attachments to uniform format and adds form.xml to list before storing\n attachments = map(\n lambda a: Attachment(name=a[0], raw_content=a[1], content_type=a[1].content_type),\n attachments.items()\n )\n attachments.append(Attachment(name='form.xml', raw_content=instance_xml, content_type='text/xml'))\n interface.store_attachments(xform, attachments)\n\n result = LockedFormProcessingResult(xform)\n with ReleaseOnError(result.lock):\n if interface.is_duplicate(xform.form_id):\n raise DuplicateError(xform)\n\n return result\n\n\ndef _get_submission_error(domain, instance, error):\n \"\"\"\n Handle's a hard failure from posting a form to couch.\n :returns: xform error instance with raw xml as attachment\n \"\"\"\n try:\n message = unicode(error)\n except UnicodeDecodeError:\n message = unicode(str(error), encoding='utf-8')\n\n xform = FormProcessorInterface(domain).submission_error_form_instance(instance, message)\n return FormProcessingResult(xform)\n\n\ndef _handle_id_conflict(xform, domain):\n \"\"\"\n For id conflicts, we check if the files contain exactly the same content,\n If they do, we just log this as a dupe. If they don't, we deprecate the\n previous form and overwrite it with the new form's contents.\n \"\"\"\n\n assert domain\n conflict_id = xform.form_id\n\n interface = FormProcessorInterface(domain)\n if interface.is_duplicate(conflict_id, domain):\n # It looks like a duplicate/edit in the same domain so pursue that workflow.\n return _handle_duplicate(xform)\n else:\n # the same form was submitted to two domains, or a form was submitted with\n # an ID that belonged to a different doc type. these are likely developers\n # manually testing or broken API users. just resubmit with a generated ID.\n xform = interface.assign_new_id(xform)\n return FormProcessingResult(xform)\n\n\ndef _handle_duplicate(new_doc):\n \"\"\"\n Handle duplicate xforms and xform editing ('deprecation')\n\n existing doc *must* be validated as an XFormInstance in the right domain\n and *must* include inline attachments\n\n \"\"\"\n interface = FormProcessorInterface(new_doc.domain)\n conflict_id = new_doc.form_id\n existing_doc = FormAccessors(new_doc.domain).get_with_attachments(conflict_id)\n\n existing_md5 = existing_doc.xml_md5()\n new_md5 = new_doc.xml_md5()\n\n if existing_md5 != new_md5:\n # if the form contents are not the same:\n # - \"Deprecate\" the old form by making a new document with the same contents\n # but a different ID and a doc_type of XFormDeprecated\n # - Save the new instance to the previous document to preserve the ID\n existing_doc, new_doc = apply_deprecation(existing_doc, new_doc, interface)\n\n return FormProcessingResult(new_doc, existing_doc)\n else:\n # follow standard dupe handling, which simply saves a copy of the form\n # but a new doc_id, and a doc_type of XFormDuplicate\n duplicate = interface.deduplicate_xform(new_doc)\n return FormProcessingResult(duplicate)\n\n\ndef apply_deprecation(existing_xform, new_xform, interface=None):\n # if the form contents are not the same:\n # - \"Deprecate\" the old form by making a new document with the same contents\n # but a different ID and a doc_type of XFormDeprecated\n # - Save the new instance to the previous document to preserve the ID\n\n interface = interface or FormProcessorInterface(existing_xform.domain)\n\n if existing_xform.persistent_blobs:\n for name, meta in existing_xform.persistent_blobs.items():\n with existing_xform.fetch_attachment(name, stream=True) as content:\n existing_xform.deferred_put_attachment(\n content,\n name=name,\n content_type=meta.content_type,\n content_length=meta.content_length,\n )\n new_xform.form_id = existing_xform.form_id\n existing_xform = interface.assign_new_id(existing_xform)\n existing_xform.orig_id = new_xform.form_id\n\n # and give the new doc server data of the old one and some metadata\n new_xform.received_on = existing_xform.received_on\n new_xform.deprecated_form_id = existing_xform.form_id\n new_xform.edited_on = datetime.datetime.utcnow()\n existing_xform.edited_on = new_xform.edited_on\n\n return interface.apply_deprecation(existing_xform, new_xform)\n","sub_path":"corehq/form_processor/parsers/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"106798229","text":"# encoding: utf-8\n\"\"\"\nAPI Interface Module\n\"\"\"\n\nimport json\nimport time\nfrom calendar import timegm\nimport requests\nfrom FlickAuth import FlickAuth\nimport util\nimport definitions\n\n\nclass FlickPriceApi(object):\n \"\"\" Flick Electric API Interface \"\"\"\n\n def __init__(self, username, password, client_id, client_secret):\n self.data = None\n self.had_expired = False\n auth_instance = FlickAuth(username, password, client_id, client_secret)\n self.session = auth_instance.get_token()\n self.get_raw_data(True)\n\n def update(self, write_to_file=False):\n \"\"\" Pull Updates From Flick Servers\"\"\"\n self.had_expired = True\n print(\"getting the latest price\")\n headers = {\n \"Authorization\": \"Bearer %s\" % self.session[\"id_token\"]\n }\n req = requests.get(definitions.FLICK_PRICE_ENDPOINT, headers=headers)\n if req.status_code is not 200:\n # If we don't get a success response, we raise an exception.\n raise Exception({\n \"status\": req.status_code,\n \"message\": req.text\n })\n # A 200OK response will contain the JSON payload.\n # TODO: Create Exception Handler to catch failed json.load.\n response = json.loads(req.text)\n if write_to_file is True:\n util.save_json_file(definitions.FLICK_PRICE_DATA_STORE, response)\n self.data = response\n return response\n\n def price_expired(self):\n \"\"\" Checks if spot price has expired \"\"\"\n now_epoch = int(time.time())\n next_epoch = self.get_next_update_time(True)\n # print \"%d\" % nextEpoch\n # print \"%d\" % nowEpoch\n return next_epoch < now_epoch\n\n def price_had_expired(self):\n return self.had_expired\n\n @staticmethod\n def get_update_time(update, is_epoch):\n \"\"\" Gets the prev/next update time \"\"\"\n if is_epoch is True:\n if 'Z' not in update:\n update = update.replace(\".000+\", \"+\")\n update = update.replace(\"+00:00\", \"\")\n utc_time = time.strptime(update, \"%Y-%m-%dT%H:%M:%S\")\n else:\n utc_time = time.strptime(update, \"%Y-%m-%dT%H:%M:%SZ\")\n epoch = timegm(utc_time)\n return epoch\n return update\n\n def get_raw_data(self, write_to_file=False):\n \"\"\" Public method to get pricing data \"\"\"\n self.data = util.get_json_file(definitions.FLICK_PRICE_DATA_STORE)\n if not self.data:\n self.data = self.update(write_to_file)\n else:\n self.had_expired = self.price_expired()\n if self.had_expired is True:\n self.data = self.update(True)\n return self.data\n\n def get_price_per_kwh(self):\n \"\"\" Get's the pure price per kwh as a number\"\"\"\n return self.data[\"needle\"][\"price\"]\n\n def get_price_breakdown(self):\n \"\"\" Get the price, broken down into it's constituent parts\"\"\"\n charges = {}\n if \"needle\" in self.data and \"components\" in self.data[\"needle\"]:\n for item in self.data[\"needle\"][\"components\"]:\n value = float(item[\"value\"])\n if item[\"charge_method\"] == \"kwh\":\n if value != 0:\n charges[item[\"charge_setter\"]] = value\n elif item[\"charge_method\"] == \"spot_price\":\n charges[\"spot_price\"] = value\n\n return charges\n\n def get_last_update_time(self, is_epoch=False):\n return self.get_update_time(self.data[\"needle\"][\"start_at\"], is_epoch)\n\n def get_next_update_time(self, is_epoch=False):\n return self.get_update_time(self.data[\"needle\"][\"end_at\"], is_epoch)\n","sub_path":"src/FlickPriceApi.py","file_name":"FlickPriceApi.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"431328153","text":"#! /usr/bin/python\n# vim:fileencoding=utf-8\n\n# Bibliotecas\nimport os,sys,datetime,re,MySQLdb\nfrom pprint import pprint # para debug...\n\nclass LogFileProcess:\n \"\"\"A simple example class\"\"\"\n i = 12345\n def f(self):\n return 'hello world'\n\n\nclass SimpleDataBase:\n \"\"\" Deals with the connection and transferences with the \"spam_data\" mysql\n database.\n \"\"\"\n\n def __init__(self, DB_HOST,DB_USER,DB_PASSWD,DB_SCHEMA):\n\n # Make the connection.\n\n try:\n self.conn = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWD, DB_SCHEMA)\n except:\n erro = \"Erro: SimpleDataBase: Não foi possivel conectar ao banco.\"\n print >>sys.stderr, erro\n print >>sys.stdout, erro\n raise\n\n # Important Queries:\n self.QUERY_MAIN = \"\"\"INSERT INTO `spam_data`.`fatorial_dados_gerais`\n (`id`, `dia`, `mes`, `ano`, `targ`, `ip`, `num_men`, `num_con`,\n `n_rcpt`, `n_dom`, `acc_size`)\n VALUES ( NULL , %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n\n self.QUERY_GET_ID_GENERIC = [\n \"SELECT `id` FROM `fatorial_dados_gerais_\",\n \"` WHERE `\",\n \"` = %s\"\n ]\n\n self.QUERY_INSERT_GENERIC = [\n \"INSERT INTO `spam_data`.`fatorial_dados_gerais_\",\n \"` (`id`, `\",\n \"`) VALUES (NULL, %s)\"\n ]\n\n self.QUERY_INSERT_LIG_GENERIC = [\n \"INSERT INTO `spam_data`.`fatorial_dados_gerais_lig_\",\n \"` (`id`, `id_\",\n \"`) VALUES (%s, %s);\"\n ]\n\n self.QUERY_GET_ID_DOMAIN = 'domain'.join(QUERY_GET_ID_GENERIC)\n self.QUERY_INSERT_DOMAIN = 'domain'.join(QUERY_INSERT_GENERIC)\n self.QUERY_INSERT_LIG_DOMAIN = 'domain'.join(QUERY_INSERT_LIG_GENERIC)\n\n self.QUERY_GET_ID_PROTOCOL = 'protocol'.join(QUERY_GET_ID_GENERIC)\n self.QUERY_INSERT_PROTOCOL = 'protocol'.join(QUERY_INSERT_GENERIC)\n self.QUERY_INSERT_LIG_PROTOCOL = 'protocol'.join(\n QUERY_INSERT_LIG_GENERIC)\n self.QUERY_INSERT_RCPT = ''' INSERT INTO\n `spam_data`.`fatorial_dados_gerais_rcpt`\n (`id`, `mail`, `domain`) VALUES (%s, %s, %s); '''\n\n \n\n\n\n\nif __name__ == '__main__':\n function()\n","sub_path":"spamprocess/supertobyprocess.py","file_name":"supertobyprocess.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"626611531","text":"import datetime\nimport logging\nimport re\n\nimport pendulum\nimport pytest\nfrom click.testing import CliRunner\nfrom dagster.core.test_utils import instance_for_test\nfrom dagster.daemon.cli import run_command\nfrom dagster.daemon.controller import DagsterDaemonController\nfrom dagster.daemon.daemon import SchedulerDaemon\nfrom dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon\nfrom dagster.daemon.types import DaemonType\n\n\ndef test_scheduler_instance():\n with instance_for_test(\n overrides={\n \"scheduler\": {\"module\": \"dagster.core.scheduler\", \"class\": \"DagsterDaemonScheduler\",},\n }\n ) as instance:\n controller = DagsterDaemonController(instance)\n\n daemons = controller.daemons\n\n assert len(daemons) == 2\n assert any(isinstance(daemon, SchedulerDaemon) for daemon in daemons)\n\n\ndef test_run_coordinator_instance():\n with instance_for_test(\n overrides={\n \"run_coordinator\": {\n \"module\": \"dagster.core.run_coordinator.queued_run_coordinator\",\n \"class\": \"QueuedRunCoordinator\",\n },\n }\n ) as instance:\n controller = DagsterDaemonController(instance)\n\n daemons = controller.daemons\n\n assert len(daemons) == 3\n assert any(isinstance(daemon, QueuedRunCoordinatorDaemon) for daemon in daemons)\n\n\ndef _scheduler_ran(caplog):\n for log_tuple in caplog.record_tuples:\n logger_name, _level, text = log_tuple\n\n if (\n logger_name == \"SchedulerDaemon\"\n and \"Not checking for any runs since no schedules have been started.\" in text\n ):\n return True\n\n return False\n\n\ndef _run_coordinator_ran(caplog):\n for log_tuple in caplog.record_tuples:\n logger_name, _level, text = log_tuple\n\n if logger_name == \"QueuedRunCoordinatorDaemon\" and \"Poll returned no queued runs.\" in text:\n return True\n\n return False\n\n\ndef test_ephemeral_instance():\n runner = CliRunner()\n with pytest.raises(\n Exception,\n match=re.escape(\n \"dagster-daemon can't run using an in-memory instance. Make sure the DAGSTER_HOME environment variable has been set correctly and that you have created a dagster.yaml file there.\"\n ),\n ):\n runner.invoke(run_command, env={\"DAGSTER_HOME\": \"\"}, catch_exceptions=False)\n\n\ndef test_different_intervals(caplog):\n with instance_for_test(\n overrides={\n \"scheduler\": {\"module\": \"dagster.core.scheduler\", \"class\": \"DagsterDaemonScheduler\",},\n \"run_coordinator\": {\n \"module\": \"dagster.core.run_coordinator.queued_run_coordinator\",\n \"class\": \"QueuedRunCoordinator\",\n \"config\": {\"dequeue_interval_seconds\": 5},\n },\n }\n ) as instance:\n init_time = pendulum.now(\"UTC\")\n controller = DagsterDaemonController(instance)\n\n assert caplog.record_tuples == [\n (\n \"dagster-daemon\",\n logging.INFO,\n \"instance is configured with the following daemons: ['QueuedRunCoordinatorDaemon', 'SchedulerDaemon', 'SensorDaemon']\",\n )\n ]\n\n controller.run_iteration(init_time)\n\n scheduler_daemon = controller.get_daemon(DaemonType.SCHEDULER)\n run_daemon = controller.get_daemon(DaemonType.QUEUED_RUN_COORDINATOR)\n\n assert scheduler_daemon\n assert (\n controller.get_daemon_last_iteration_time(scheduler_daemon.daemon_type()) == init_time\n )\n assert _scheduler_ran(caplog)\n\n assert run_daemon\n assert controller.get_daemon_last_iteration_time(run_daemon.daemon_type()) == init_time\n assert _run_coordinator_ran(caplog)\n caplog.clear()\n\n next_time = init_time + datetime.timedelta(seconds=5)\n controller.run_iteration(next_time)\n\n # Run coordinator does another iteration, scheduler does not\n assert (\n controller.get_daemon_last_iteration_time(scheduler_daemon.daemon_type()) == init_time\n )\n assert not _scheduler_ran(caplog)\n\n assert controller.get_daemon_last_iteration_time(run_daemon.daemon_type()) == next_time\n assert _run_coordinator_ran(caplog)\n caplog.clear()\n\n next_time = init_time + datetime.timedelta(seconds=30)\n controller.run_iteration(next_time)\n\n # 30 seconds later both daemons do another iteration\n assert (\n controller.get_daemon_last_iteration_time(scheduler_daemon.daemon_type()) == next_time\n )\n assert _scheduler_ran(caplog)\n\n assert controller.get_daemon_last_iteration_time(run_daemon.daemon_type()) == next_time\n assert _run_coordinator_ran(caplog)\n","sub_path":"python_modules/dagster/dagster_tests/daemon_tests/test_dagster_daemon.py","file_name":"test_dagster_daemon.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"509350673","text":"from decimal import Decimal\nimport random\n\nimport numpy as np\n\n\n# Detective, except:\n# - use nprtt instead of tit-for-tat for the forgiveness heuristic\n# - detect ftft and spam DCDCDCDCDC to take advantage of it\n# - detect alwaysCooperate and spam DDDDD to take advantage of it, at the cost of the\n# grimTrigger\ndef strategy(history, memory):\n \"\"\"\n :history: 2d numpy array of our and opponent past moves\n :memory: mode string, which may be None, 'tit-for-tat', 'alternate', or 'defect'\n \"\"\"\n num_rounds = history.shape[1]\n testing_schedule = [1, 0, 0, 1, 1]\n max_defection_threshold = Decimal(1) / Decimal(2) # do not forgive high defections\n\n if num_rounds < len(testing_schedule): # intitial testing phase\n choice = testing_schedule[num_rounds]\n elif num_rounds == len(testing_schedule): # time to transition to our modes\n opponent_moves = history[1]\n opponent_stats = dict(zip(*np.unique(opponent_moves, return_counts=True)))\n if opponent_stats.get(0, 0) < 1: # they never defected, take advantage of them\n choice = 0\n memory = \"defect\"\n elif opponent_stats.get(0, 0) == len(testing_schedule): # they always defect\n choice = 0\n memory = \"defect\"\n elif opponent_moves[2] == 1 and opponent_moves[3] == 0: # ftft detected\n choice = 0\n memory = \"alternate\"\n else:\n choice = 1\n memory = \"tit-for-tat\"\n else: # num_rounds > len(testing_schedule)\n if memory == \"defect\":\n choice = 0\n memory = \"defect\"\n elif memory == \"alternate\":\n our_last_move = history[0, -1] if num_rounds > 0 else 1\n choice = 0 if our_last_move else 1\n memory = \"alternate\"\n else: # tit-for-tat or None\n opponents_last_move = history[1, -1] if num_rounds >= 1 else 1\n our_second_last_move = history[0, -2] if num_rounds >= 2 else 1\n opponent_history = history[1, 0:num_rounds]\n opponent_stats = dict(zip(*np.unique(opponent_history, return_counts=True)))\n opponent_defection_rate = Decimal(int(opponent_stats.get(0, 0))) / Decimal(\n num_rounds\n )\n\n be_patient = opponent_defection_rate <= max_defection_threshold\n\n choice = (\n 1\n if (\n opponents_last_move == 1\n or (be_patient and our_second_last_move == 0)\n )\n else 0\n )\n memory = \"tit-for-tat\"\n\n return choice, memory\n","sub_path":"code/strats/ultimateDetective.py","file_name":"ultimateDetective.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"274371740","text":"# Tic-tac-toe\n# A very simple implementation\n\nboard = [[\"7\", \"8\", \"9\"], [\"4\", \"5\", \"6\"], [\"1\", \"2\", \"3\"]]\nplayer = \"X\"\nwon = False\nwinner = \"\"\n\nclass bcolors:\n HEADER = '\\033[95m'\n Cyan = '\\033[94m'\n O = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n X = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef switchplayer(player):\n if player == \"X\":\n return \"O\"\n return \"X\"\n\ndef getmove(player, board):\n '''Gets and checks for valid moves, returns x and y coordinates'''\n x, y = -1, -1\n print(\"Player: \" + player)\n while x == -1:\n y, x = findpos(input(\"Enter your move:\"), board)\n if x == -1:\n print(\"Position is not open.\")\n print(\"X:\" + str(x) + \" Y:\" + str(y))\n board[y][x] = player\n return board\n\ndef findpos(move, board):\n '''\n Given a move and the board, returns row and column of the position\n If none is found (spot is taken), then return -1, -1\n '''\n foundRow, foundCol = -1, -1\n for y in range(len(board)):\n for x in range(len(board[y])):\n if board[y][x] == move:\n foundRow = y\n foundCol = x\n return foundRow, foundCol\n\ndef printboard(board):\n print(\" _____________\")\n output = \"\"\n for y in board:\n pline = \" |\"\n for x in y:\n if x == \"X\":\n output = bcolors.X + x + bcolors.ENDC\n elif x == \"O\":\n output = bcolors.O + x + bcolors.ENDC\n else:\n output = x\n pline += \" \" + output + \" |\"\n print(pline)\n print(\" _____________\")\n\ndef checkwin(board):\n # Return t/f if game is won and player letter if so\n # Check horizontal\n for row in board:\n if \"\".join(row) == \"XXX\":\n return True, \"X\"\n if \"\".join(row) == \"OOO\":\n return True, \"O\"\n # Check vertical\n for x in range(3):\n check = \"\"\n for y in range(len(board)):\n check += board[y][x]\n if check == \"XXX\":\n return True, \"X\"\n if check == \"OOO\":\n return True, \"O\"\n # Check diagonal\n lr = \"\" # left to right\n rl = \"\" # right to left\n for i in range(3):\n lr += board[i][i]\n rl += board[2-i][2-i]\n if lr == \"XXX\" or rl == \"XXX\":\n return True, \"X\"\n if lr == \"OOO\" or lr == \"OOO\":\n return True, \"O\"\n\n return False,\"\"\n\ndef movesleft(board):\n # True or false if any moves left\n for row in board:\n for col in row:\n if col != \"X\" and col != \"O\":\n return True\n return False\n\n# Game loop\nwhile not won and movesleft(board):\n player = switchplayer(player)\n printboard(board)\n board = getmove(player, board)\n print(board)\n won, winner = checkwin(board)\n\nif won:\n printboard(board)\n print(bcolors.WARNING + \"Won by: \"+ winner + bcolors.ENDC)\nif not movesleft(board):\n print(\"No winner!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"603912393","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\nimport datetime\nimport PyRSS2Gen\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\n\ntitles = []\nlinks = []\ndescriptions = []\npubDates = []\nprefix = 'http://me.zju.edu.cn'\n\n\ndef get_article(link):\n try:\n r = requests.get(link)\n bsObj = BeautifulSoup(r.content.decode('utf-8', 'ignore'))\n article = bsObj.find('div', {'class':'wp_articlecontent'}).text\n except:\n article = ''\n article = re.sub('\\xa0|\\u2003', ' ', article)\n return article\n\n\ndef rssgen():\n items = []\n for i in range(len(titles)):\n items.append(PyRSS2Gen.RSSItem(\n title = titles[i],\n link = links[i],\n description = descriptions[i],\n pubDate = pubDates[i])\n )\n rss = PyRSS2Gen.RSS2(title = \"浙江大学机械工程学院-通知公告\",\n link = \"http://me.zju.edu.cn/meoffice/tzgg/list.htm\",\n description = \"浙江大学机械工程学院-通知公告\",\n lastBuildDate = datetime.datetime.now(),\n items = items\n )\n rss.write_xml(open(\"news_me_tzgg.xml\", \"w\"), 'gbk')\n\n\ndef me_tzgg():\n r = requests.get('http://me.zju.edu.cn/meoffice/tzgg/list.htm')\n bsObj = BeautifulSoup(r.content.decode('utf-8', 'ignore'))\n newsBunch = bsObj.find('div', {'id':'wp_news_w9'}).ul.findAll('li')\n for news in newsBunch:\n child1, child2 = news.children\n titles.append(child1.text)\n if child1.a.attrs['href'][0] == '/':\n fullLink = prefix + child1.a.attrs['href']\n else:\n fullLink = child1.a.attrs['href']\n links.append(fullLink)\n descriptions.append(get_article(fullLink))\n year, month, day = [int(a) for a in child2.text.split('-')]\n pubDates.append(datetime.datetime(year, month, day))\n rssgen()\n\n\nif __name__ == '__main__':\n me_tzgg()","sub_path":"rss/news_me_tzgg.py","file_name":"news_me_tzgg.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"337140875","text":"\"\"\"\nBasic model example using a 2D CNN with images as inputs and metadata\n\"\"\"\nimport tensorflow as tf\n\n\nclass CNN2D(tf.keras.Model):\n \"\"\"\n Simple 2D CNN\n \"\"\"\n def __init__(self):\n \"\"\"\n Define model layers\n \"\"\"\n super(CNN2D, self).__init__()\n self.conv_1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')\n self.pool_1 = tf.keras.layers.MaxPooling2D((2, 2))\n self.conv_2 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')\n self.pool_2 = tf.keras.layers.MaxPooling2D((2, 2))\n self.conv_3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')\n self.flatten = tf.keras.layers.Flatten()\n self.FC1 = tf.keras.layers.Dense(128, activation='relu')\n self.FC2 = tf.keras.layers.Dense(64, activation='relu')\n\n self.t0 = tf.keras.layers.Dense(4, name='t0')\n\n def call(self, inputs):\n \"\"\"\n Perform forward on inputs and returns predicted GHI at the\n desired times\n :param inputs: input images and metadata\n :return: a list of four floats for GHI at each desired time\n \"\"\"\n img, past_metadata, future_metadata = inputs # split images and metadatas\n # Remove timesteps dimensions from sequences of size 1\n patch_size = img.shape[-2]\n n_channels = img.shape[-1]\n img = tf.reshape(img, (-1, patch_size, patch_size, n_channels))\n past_metadata = tf.reshape(past_metadata, (-1, past_metadata.shape[-1]))\n x = self.conv_1(img)\n x = self.pool_1(x)\n x = self.conv_2(x)\n x = self.pool_2(x)\n x = self.conv_3(x)\n x = self.flatten(x)\n # concatenate encoded image and metadata\n x = tf.keras.layers.concatenate([x, past_metadata, future_metadata], 1)\n x = self.FC1(x)\n x = self.FC2(x)\n # Create 4 outputs for t0, t0+1, t0+3 and t0+6\n t0 = self.t0(x)\n return t0\n","sub_path":"models/CNN2D.py","file_name":"CNN2D.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"325025238","text":"from django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index_view, name='index'),\n path('detail/', views.detail_view, name='detail'),\n path('scan', views.scan_view, name='scan'),\n path('results', views.results_view, name='results'),\n path('deleteScan', views.delete_scans, name='delete_scans')\n]","sub_path":"URLscanner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"330882453","text":"\nimport anonfile\n# create anonfile object\n# if an API-Key is provided it uploads directly in your anonfile account. (provide with: api_key=)\naf = anonfile.Anonfile()\n# upload a file\nresponse = af.upload('test.txt')\n# access specific member of response\nprint(response.url_short)\nprint(response.file_id)\n# get full response as dictionary\nres_dict = response.getfullres()\nfor k, v in res_dict.items():\n print(k, v)","sub_path":"src/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"465715984","text":"'''\nthree way partition\nprocedure three-way-partition(A : array of values, mid : value):\n i = 0\n j = 0\n n = len(A)-1\n\n while j <= n:\n\n if A[j] < mid:\n swap A[i] and A[j]\n i +=1\n j +=1\n elif A[j] > mid:\n swap A[j] and A[n]\n n -=1\n else:\n j +=1\n\n[1,1,1,4,5,6]\nnums[::2] = [1,1,5]\nnums[1::2] = [1,4,6]\nnums[med::-1] = [1,1,1]\nnums[:med:-1] = 6,5,4\n'''\n#Time O(1)\nclass Solution(Object):\n def wiggleSort():\n nums.sort()\n med = (len(nums) - 1) / 2\n #even index, odd index\n nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]\n \n\n","sub_path":"Sort/wiggle_sort_ii.py","file_name":"wiggle_sort_ii.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"271053354","text":"# 2011.py\n# 2018.05.29\n\nimport sys\n\ndef decode(p):\n\tdp = [1]\n\tp_len = len(p)\n\tfor n in range(1, p_len):\n\t\tn1 = dp[n-1] if p[n] != 0 else 0\n\t\tn2 = dp[n-2] if 10 <= (p[n-1]*10) + p[n] <= 26 else 0\n\t\tdp.append(n1+n2)\n\treturn dp[p_len-1]\n\np = list(map(int, sys.stdin.readline()))\nresult = decode(p) % 1000000 if p[0] else 0\nprint(result)\n\n# DP[n] : n 번째 요소까지의 암호코드의 총 개수\n# DP[n] = DP[n-1] + DP[n-2]\n# (단, DP[n-1]이려면 p[n]이 0이 아니여야하고 DP[n-2]를 하려면 p[n-1]+p[n]이 10과 26 사이여야한다.)\n","sub_path":"2000/2011.py","file_name":"2011.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"446677201","text":"\"\"\"\nThis is an easy binning solution for credit scorecard build.\n\nAuthor : Tri Le \n\nFeature Selection for Credit Scoring\n\nThis is an easy feature selection procedure in which many methods are run in order to \ndetermine which subset of variables are best predicting the outcome.\n\n\"\"\"\nimport time\nfrom sklearn.feature_selection import mutual_info_classif\nfrom sklearn.feature_selection import chi2\nfrom scipy.stats import mannwhitneyu\nfrom scipy.stats import spearmanr\nfrom scipy.stats import pearsonr\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.metrics import auc, roc_curve\nimport optuna\nfrom joblib import Parallel, delayed\nfrom joblib import parallel_backend\n#from sklearn.utils import check_array\nimport numpy as np\nimport pandas as pd\n\nclass scFeatureSelection:\n \"\"\"\n \"\"\"\n def __init__(self, methods = ['median', 'chi2', 'mutual_info', 'p_cor', 's_cor', \n 'logreg', 'l1logreg', 'svm', 'gini_rf'], \n n_jobs = None, threshold_cor = 0,\n random_state = None):\n self.methods = methods\n self.n_jobs = n_jobs\n self.threshold_cor = threshold_cor\n self.random_state = random_state\n\n def median(self, X, y):\n classes = np.unique(y)\n p_value = np.ones(X.shape[1])\n for i in range(X.shape[1]):\n try:\n p_val = mannwhitneyu(X[(y == classes[0]).reshape(1, -1)[0], i],\n X[(y == classes[1]).reshape(1, -1)[0], i],\n alternative = 'two-sided')[1]\n except ValueError:\n p_val = 1.0\n p_value[i] = p_val\n p_value[p_value > 5e-2] = 1.0\n p_value = np.abs(-np.log10(p_value))\n return p_value/p_value.max()\n \n def mutl_info(self, X, y):\n dp_coef = np.zeros(X.shape[1])\n for i in range(X.shape[1]):\n coef = mutual_info_classif(X[:, i].reshape(-1,1), y,\n random_state = self.random_state)[0]\n dp_coef[i] = coef\n return dp_coef/dp_coef.max()\n \n def chi2(self, X, y):\n p_value = np.ones(X.shape[1])\n for i in range(X.shape[1]):\n p_val = chi2(X[:, i].reshape(-1,1), y)[1][0]\n p_value[i] = p_val\n p_value[p_value > 5e-2] = 1.0\n p_value[p_value == .0] = 1.0\n p_value = np.abs(-np.log10(p_value))\n return p_value/p_value.max()\n \n def cor_func(self, X, y, type = 'p_cor', threshold = 0):\n if type == 'p_cor':\n rho = np.array([pearsonr(X[:, i], y)[0] for i in range(X.shape[1])])\n elif type == 's_cor':\n rho = np.array([spearmanr(X[:, i], y)[0] for i in range(X.shape[1])])\n \n rho = np.abs(rho)\n pos = np.arange(X.shape[1])\n\n rho_f = rho.copy()\n pos_f = pos.copy()\n\n pos = pos[rho > threshold]\n rho = rho[rho > threshold]\n\n pos = pos[rho.argsort()]\n rho = rho[rho.argsort()]\n\n del_list = np.array([])\n for i in np.arange(len(pos)-1):\n for j in np.arange(i + 1, len(pos)):\n if type == 'p_cor':\n rho_i_j = pearsonr(X[:, pos[-(i + 1)]], X[:, pos[-(j + 1)]])[0]\n elif type == 's_cor':\n rho_i_j = spearmanr(X[:, pos[-(i + 1)]], X[:, pos[-(j + 1)]])[0]\n if rho_i_j >= rho[-(i + 1)]:\n del_list = np.append(del_list, pos[-(j + 1)])\n\n pos_r = pos[np.isin(pos, del_list, invert = True)]\n rho_f[np.isin(pos_f, pos_r, invert = True)] = .0\n return rho_f/rho_f.max()\n \n def logreg_svm(self, X, y, l1 = False, svm = False):\n X = RobustScaler().fit_transform(X)\n def objective(trial):\n c = trial.suggest_uniform('c', .0, 1.0)\n if svm:\n #https://link.springer.com/content/pdf/10.1023/A:1012487302797.pdf\n clf = LinearSVC(C = c, penalty = 'l1', dual = False, \n class_weight = 'balanced', \n max_iter = 10000, random_state = self.random_state)\n else:\n clf = LogisticRegression(C = c, penalty = 'l1' if l1 else 'l2',\n solver = 'saga' if l1 else 'lbfgs',\n max_iter = 10000, random_state = self.random_state)\n clf.fit(X, y)\n if svm:\n score = clf.score(X, y)\n else:\n fpr, tpr, thres = roc_curve(y, clf.predict_proba(X)[:, 0],\n pos_label = 0)\n return -score if svm else -(2 * auc(fpr, tpr) - 1)\n\n study = optuna.create_study()\n study.optimize(objective, n_trials = 50)\n if svm:\n clf = LinearSVC(C = study.best_params['c'], penalty = 'l1',\n dual = False, class_weight = 'balanced',\n max_iter = 10000, random_state = self.random_state)\n else:\n clf = LogisticRegression(C = study.best_params['c'],\n penalty = 'l1' if l1 else 'l2',\n solver = 'saga' if l1 else 'lbfgs',\n max_iter = 10000, random_state = self.random_state)\n clf.fit(X, y)\n coef = np.square(clf.coef_[0])\n return coef/coef.max()\n \n def rf(self, X, y):\n forest = ExtraTreesClassifier(n_estimators = max(100, 2 * X.shape[1]),\n class_weight = 'balanced',\n n_jobs = self.n_jobs, random_state = self.random_state) \n forest.fit(X, y)\n imp = forest.feature_importances_\n return imp/imp.max()\n\n def fit(self, X, y):\n start_time = time.perf_counter()\n optuna.logging.disable_default_handler()\n self.result = {}\n\n if not isinstance(X, (pd.core.frame.DataFrame,\n pd.core.series.Series, np.ndarray)):\n raise ValueError('Invalid data object')\n \n if isinstance(y, (pd.core.frame.DataFrame, pd.core.series.Series)):\n y = y.values\n if isinstance(X, pd.core.frame.DataFrame):\n self.columns_ = X.columns.values\n X = X.values\n elif isinstance(X, np.ndarray):\n self.columns_ = np.arange(X.shape[0])\n if 'median' in self.methods:\n m = self.median(X, y)\n self.result['median'] = {}\n for i, na in enumerate(self.columns_):\n self.result['median'][na] = m[i]\n if 'chi2' in self.methods:\n if (X < 0).sum().sum() == 0:\n f = self.chi2(X, y)\n self.result['chi2'] = {}\n for i, na in enumerate(self.columns_):\n self.result['chi2'][na] = f[i]\n else:\n print('Input X must be non-negative. \"chi2\" wont be run.\\n')\n if 'mutual_info' in self.methods:\n if (X < 0).sum().sum() == 0:\n f = self.mutl_info(X, y)\n self.result['mutual_info'] = {}\n for i, na in enumerate(self.columns_):\n self.result['mutual_info'][na] = f[i]\n else:\n print('Input X must be non-negative. \"mutual_info\" wont be run.\\n') \n if 'p_cor' in self.methods:\n p = self.cor_func(X, y, type = 'p_cor', threshold = self.threshold_cor)\n self.result['p_cor'] = {}\n for i, na in enumerate(self.columns_):\n self.result['p_cor'][na] = p[i]\n if 's_cor' in self.methods:\n s = self.cor_func(X, y, type = 's_cor', threshold = self.threshold_cor)\n self.result['s_cor'] = {}\n for i, na in enumerate(self.columns_):\n self.result['s_cor'][na] = s[i]\n if 'gini_rf' in self.methods:\n rf = self.rf(X, y)\n self.result['gini_rf'] = {}\n for i, na in enumerate(self.columns_):\n self.result['gini_rf'][na] = rf[i]\n \n def jobs(method):\n if method == 'logreg':\n l = self.logreg_svm(X, y)\n self.result['logreg'] = {}\n for i, na in enumerate(self.columns_):\n self.result['logreg'][na] = l[i]\n if method == 'l1logreg':\n l_1 = self.logreg_svm(X, y, l1 = True)\n self.result['l1logreg'] = {}\n for i, na in enumerate(self.columns_):\n self.result['l1logreg'][na] = l_1[i]\n if method == 'svm':\n sv = self.logreg_svm(X, y, svm = True)\n self.result['svm'] = {}\n for i, na in enumerate(self.columns_):\n self.result['svm'][na] = sv[i]\n return None\n \n #run jobs in parallel\n with parallel_backend('threading', n_jobs = self.n_jobs):\n Parallel()(delayed(jobs)(method) for method in [i for i in self.methods \n if i not in ['median', 'chi2', 'mutual_info', \n 'p_cor', 's_cor', 'gini_rf']])\n \n print('Elapsed time: {:.1f} seconds'.format(time.perf_counter() - start_time))\n return self","sub_path":"utils/_scFeatureSelection.py","file_name":"_scFeatureSelection.py","file_ext":"py","file_size_in_byte":9569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"489266538","text":"import numpy as np\nimport pandas as pd\nfrom scipy.integrate import solve_ivp\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta, datetime\n\n\n\nclass Epidemic_SIR(object):\n\n def __init__(self, data=None, params=None, start_pop=200000):\n self.data = data.get_data()\n self.pred = pd.DataFrame()\n self.y_meas = data.time_series()\n self.N = start_pop\n self.timespan = data.timespan()\n self.iv = data.initial_value()\n self.iv = np.hstack([self.N-np.sum(self.iv), self.iv])\n self.time = data.time()\n\n self.mdl = self.model()\n self.loss = self.loss()\n self.params = params\n\n self.rms = 0.\n\n def model(self):\n # Vectorial ODE\n def sir_mdl(theta, t, y, N) :\n\n # Unmarshall state\n (S, I, R) = y\n\n # Unmarshall parameters\n (l, mu, b, g) = theta\n\n # Compute derivatives\n dS = l - mu * S - b * I * S / N\n dI = b * I * S / N - (g + mu) * I\n dR = g * I - mu * R\n\n return [dS, dI, dR]\n\n mdl = lambda theta, t, y, N : sir_mdl(theta, t, y, N)\n return mdl\n\n def observables(self, y):\n return y[:,1:3]\n\n def loss(self):\n # l2 loss\n\n def l2_loss(theta):\n\n x = lambda t, y: self.mdl(theta, t, y, self.N)\n\n # model canot be used directly, I should use a lambda f\n # solve initial value problem\n solution = solve_ivp(x, self.timespan, self.iv,\n t_eval=self.time, vectorized=True)\n\n # compute l2 loss\n # TODO: use libary function to compute l2\n l2_loss = np.sum(\n (self.observables(solution.y.T) - self.y_meas) ** 2)\n\n return l2_loss\n\n # useless\n my_loss = lambda theta: l2_loss(theta)\n\n return my_loss\n\n def predict(self, n_range):\n\n self.pred['time'] = np.arange(n_range).astype(np.int)\n\n self.pred.set_index(self.data.index[0] +\n self.data.index.freq * self.pred['time'], inplace=True)\n\n mdl = lambda t, y: self.mdl(self.params, t, y, self.N)\n prediction = solve_ivp(mdl, [0, n_range], self.iv,\n t_eval=self.pred['time'].values, vectorized=True)\n self.pred['susceptible'] = prediction.y[0]\n self.pred['infectious'] = prediction.y[1]\n self.pred['resolved'] = prediction.y[2]\n\n print(self.pred)\n\n def view (self):\n plt.figure()\n axes = plt.gca()\n self.data.plot(style=\".\", y=['confirmed', 'resolved'],\n color=['red', 'magenta'], ax=axes)\n self.pred.plot(kind=\"line\", y=['infectious', 'resolved'],\n color=['yellow', 'blue'], ax=axes)\n plt.title('Cumulated Cases. rms=' + str(self.rms) +\n \". Max=\" + str(self.pred.index[self.pred['infectious'].argmax()]))\n #x=[]\n #self.pred.plot.bar( style='o', color='k', y=['infectious'])\n\n plt.figure()\n axes = plt.gca()\n self.data.diff().plot(style=\".\", y=['confirmed', 'resolved'],\n color=['red', 'magenta'], ax=axes)\n self.pred.diff().plot(kind=\"line\", y=['infectious', 'resolved'],\n color=['yellow', 'blue'], ax=axes)\n plt.title('Daily Increments')\n\n plt.figure()\n axes = plt.gca()\n self.data['p'] = self.data['resolved'] / (self.data['confirmed'] +\n self.data['resolved'])\n self.pred['p'] = self.pred['resolved'] / (self.pred['infectious'] +\n self.pred['resolved'])\n self.data.plot(style=\".\", y=['p'],\n color=\"red\", ax=axes)\n self.pred.plot(kind=\"line\", y=['p'],\n color=\"yellow\", ax=axes)\n plt.title('Resolved Probability')\n\n plt.show()\n\n def estimate(self):\n\n optimal = minimize(\n self.loss,\n [0.001, 1, 0.001, 0.001],\n method='L-BFGS-B',\n bounds=[(1e-8, 5e-1), (1e-8, 2e-1), (1e-8, 5e-1),\n (1e-8, 5e-1)],\n options={'gtol': 1e-9, 'disp': True}\n )\n self.params = optimal.x\n self.rms = np.sqrt(optimal.fun)\n print(self.params)\n print(self.rms)\n","sub_path":"src/epidemic_sir.py","file_name":"epidemic_sir.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"324963568","text":"\"\"\"\nTask2\nWritten by Xiang Gao, last modified: 17/10/2018, date of create:13/10/2018\n\"\"\"\n\nfrom task1 import *\nimport timeit\n\n# data\ntableSize = [250727, 402221, 1000081]\nb = [1, 27813, 250726]\n\n\ndef read_file(name, b, tablesize):\n \"\"\"\n read the file and hash all word in the file using specific base and table size\n best/worst complexity:O(n^2)/O(n^2)\n post-condition:None\n pre-condition:None\n :param name:name of the file\n :param b:base in the hash function\n :param tablesize:the initial size of the table\n :return: the table and time taken for setting the data into the table\n \"\"\"\n table = hash_table(tablesize, b)\n file = open(name, encoding='utf-8')\n\n Start = timeit.default_timer()\n for line in file:\n line_stripped = line.rstrip('\\n')\n words = line_stripped.split(\" \")\n for word in words:\n table.__setitem__(word, 1) # associated data 1\n t_taken = (timeit.default_timer() - Start)\n file.close()\n\n return [table, t_taken]\n\n\ndef print_result():\n \"\"\"\n print the result\n best/worst complexity:O(n^2)/O(n^2)\n post-condition:None\n pre-condition:None\n :return:None\n \"\"\"\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\"b\", \"TABLESIZE\", \"english_large.txt\", \"english_small.txt\", \"french.txt\"))\n for size in tableSize:\n for base in b:\n if base == 1 or size-base == 1:\n print(\"{}\\t{}\\t{}\\t\\t{}\\t\\t{}\".format(base, size, \"Takes too long time\", \"Takes too long time\",\n \"Takes too long time\"))\n else:\n engl = read_file(\"english_large.txt\", base, size)[1] # returning time taken\n engs = read_file(\"english_small.txt\", base, size)[1]\n fren = read_file(\"french.txt\", base, size)[1]\n print(\"{}\\t{}\\t{}s\\t{}s\\t{}s\".format(base, size, engl, engs, fren))\n\n\nif __name__ == \"__main__\":\n print_result()\n","sub_path":"A3/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"385700915","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nfrom options.test_options import TestOptions\r\nfrom data.data_loader import CreateDataLoader\r\nfrom models.classifier import Classifier\r\n\r\n\r\nopt = TestOptions().parse()\r\nopt.no_shuffle = True\r\n\r\n### set dataloader ###\r\nprint('### prepare DataLoader ###')\r\ndata_loader = CreateDataLoader(opt)\r\ntest_loader = data_loader.load_data()\r\nprint('numof test images : {}'.format(len(data_loader)))\r\nprint('numof iteration : {}'.format(len(test_loader)))\r\n\r\n### define model ###\r\nmodel = Classifier()\r\nmodel.initialize(opt)\r\nmodel.setup()\r\nmodel.network.eval()\r\nprint('model [{}] was created'.format(opt.model))\r\n\r\n### test loop ###\r\nprint('### start inference ! ###')\r\nlogits = np.empty((0, opt.n_class), float)\r\nlabels = np.empty(0, int)\r\nnumof_correct = 0\r\nfor iter, data in enumerate(test_loader):\r\n\r\n model.set_variables(data)\r\n logit, pred = model.inference()\r\n\r\n logits = np.append(logits, logit.cpu().numpy(), axis=0)\r\n labels = np.append(labels, data['label'].numpy(), axis=0)\r\n\r\n numof_correct += torch.sum(pred==data['label']).item()\r\n\r\ntest_acc = numof_correct / (iter+1)\r\nprint('test accuracy: {:.3f}'.format(test_acc))\r\n\r\nplt.scatter(logits[:, 0], logits[:, 1], c=labels)\r\nplt.colorbar()\r\nplt.show()\r\n","sub_path":"classifier/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"53224406","text":"\"\"\"\nBrute force approach made easy with itertools.\n\"\"\"\nfrom itertools import combinations_with_replacement as combr\nfrom collections import Counter\n\ndef parse_input():\n with open(\"input.txt\", 'r') as f:\n ingredients = {}\n\n for line in f:\n line = line.split()\n line[0] = line[0][:-1]\n ingredients[line[0]] = {}\n\n for idx in range(1, len(line) - 2, 2):\n ingredients[line[0]][line[idx]] = int(line[idx + 1][:-1])\n\n ingredients[line[0]][line[-2]] = line[-1] \n\n return ingredients\n\n\"\"\"\nIgnores calories for slight optimization.\n\"\"\"\ndef best_cookie():\n ingredients = parse_input()\n left = ingredients.keys()\n best = None\n\n for cookie in combr(left, 100):\n score = 1\n props = {}\n\n cookie = Counter(cookie)\n\n for ingr in iter(cookie):\n for prop in iter(ingredients[ingr]):\n if prop == \"calories\":\n continue\n\n if prop not in props:\n props[prop] = 0\n\n props[prop] += ingredients[ingr][prop] * cookie[ingr]\n\n for prop in iter(props):\n if prop != \"calories\":\n score *= max(0, props[prop])\n\n if best == None or score > best:\n best = score\n\n print(best)\n\nbest_cookie()\n","sub_path":"day15/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"506669551","text":"import sys\n\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QWidget, QApplication, QDesktopWidget\n\nfrom ImageButton import register_button\nfrom controllers.game_controller import GameController\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n self.mouse_events = set()\n\n def init_ui(self):\n self.setWindowTitle('Kings vs Zombies')\n self.set_window_background(\"assets/KingsVsZombies.png\")\n self.setFixedSize(800, 500)\n self.init_start_buttons()\n self.center()\n self.show()\n\n def init_start_buttons(self):\n self.btn_start_game = register_button(\n (267, 370),\n [\n \"assets/start_game.png\",\n \"assets/start_on.png\",\n \"assets/start_over.png\"\n ],\n self,\n self.start_game\n )\n\n def start_game(self):\n self.btn_start_game.deleteLater()\n self.controller = GameController(self, \"1000\")\n self.set_pause = False\n self.init_game_timer(self.controller.on_tick)\n\n def init_game_timer(self, target_method):\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(target_method)\n self.timer.start(15)\n\n def set_window_background(self, image):\n pal = self.palette()\n pal.setBrush(QtGui.QPalette.Normal, QtGui.QPalette.Window,\n QtGui.QBrush(QtGui.QPixmap(image)))\n self.setPalette(pal)\n self.autoFillBackground()\n\n def center(self):\n screen = QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width() - size.width()) / 2,\n (screen.height() - size.height()) / 2)\n\n def mousePressEvent(self, e):\n x, y = e.pos().x(), e.pos().y()\n for func in list(self.mouse_events):\n func((x, y))\n\n\nif __name__ == '__main__':\n\n application = QApplication(sys.argv)\n application.setWindowIcon(QtGui.QIcon(\"assets/icon.png\"))\n start = MainWindow()\n sys.exit(application.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"586295713","text":"from .common import WpsTestClient\n\n\ndef test_caps():\n wps = WpsTestClient()\n resp = wps.get(service='wps', request='getcapabilities')\n names = resp.xpath_text('/wps:Capabilities'\n '/wps:ProcessOfferings'\n '/wps:Process'\n '/ows:Identifier')\n sorted_names = sorted(names.split())\n\n expected_names = [\n 'analogs_compare',\n 'analogs_detection',\n 'analogs_model',\n 'analogs_viewer',\n 'climatefactsheet',\n 'fetch',\n 'indices_percentile',\n 'indices_simple',\n 'landseamask',\n 'plot_timeseries',\n 'robustness',\n 'sdm_allinone',\n 'sdm_csv',\n 'sdm_csvindices',\n 'sdm_gbiffetch',\n 'sdm_getindices',\n 'segetalflora',\n 'subset_continents',\n 'subset_countries',\n 'subset_points',\n 'subset_regionseurope',\n 'weatherregimes_model',\n 'weatherregimes_projection',\n 'weatherregimes_reanalyse',\n 'wps_c4i_simple_indice'\n ]\n assert sorted_names == expected_names\n","sub_path":"flyingpigeon/tests/test_wps_caps.py","file_name":"test_wps_caps.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"648038286","text":"# load modules\n# for legacy python compatibility\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# # TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# disable GPU and only CPU and Warning etc\nimport os, sys\n\nimport datetime\nfrom SALib.analyze import sobol\n\n# from tensorflow import feature_column\n# from sklearn.model_selection import train_test_split\n# import pathlib\n# import seaborn as sns\n\nimport ddmms.help.ml_help as ml_help\n\nimport ddmms.preprocess.ml_preprocess as ml_preprocess\n\nimport ddmms.parameters.ml_parameters as ml_parameters\nimport ddmms.parameters.ml_parameters_cnn as ml_parameters_cnn\n\nimport ddmms.models.ml_models as ml_models\nimport ddmms.models.ml_optimizer as ml_optimizer\nimport ddmms.models.ml_loss as ml_loss\n\nimport ddmms.postprocess.ml_postprocess as ml_postprocess\nimport ddmms.math.ml_math as ml_math\nimport ddmms.specials.ml_specials as ml_specials\n\nimport ddmms.misc.ml_misc as ml_misc\nimport ddmms.misc.ml_callbacks as ml_callbacks\n\n\nimport ddmms.train.ml_kfold as ml_kfold\n\n\n\n\nprint(\"TensorFlow version: \", tf.__version__)\nprint(os.getcwd())\nargs = ml_help.sys_args()\n\nargs.configfile = 'kbnn-load-cnn-1-frame.config'\n\nargs.platform = 'gpu'\nargs.inspect = 0\nargs.debug = False\nargs.verbose = 1\nargs.show = 0\n\nml_help.notebook_args(args)\nconfig = ml_preprocess.read_config_file(args.configfile, args.debug)\n# train_dataset, train_labels, val_dataset, val_labels, test_dataset, test_labels, test_derivative, train_stats = ml_preprocess.load_and_inspect_data(\n# config, args)\ndataset, labels, derivative, train_stats = ml_preprocess.load_all_data(config, args)\n\nstr_form = config['FORMAT']['PrintStringForm']\nepochs = int(config['MODEL']['Epochs'])\nbatch_size = int(config['MODEL']['BatchSize'])\nverbose = int(config['MODEL']['Verbose'])\nn_splits = int(config['MODEL']['KFoldTrain'])\n\nthe_kfolds = ml_kfold.MLKFold(n_splits, dataset)\ntrain_dataset, train_labels, val_dataset, val_labels, test_dataset, test_labels, test_derivative = the_kfolds.get_next_fold(\n dataset, labels, derivative, final_data=True)\n# train_derivative, val_derivative, test_derivative = the_kfolds.get_current_fold_derivative_data()\n\n#total_model_numbers = parameter.get_model_numbers()\nprint(\"...done with parameters\")\nmodel_summary_list = []\n\nconfig['RESTART']['CheckPointDir'] = './saved_weight'\nconfig['MODEL']['ParameterID'] = ''\ncheckpoint_dir = config['RESTART']['CheckPointDir'] + config['MODEL']['ParameterID']\nmodel_path = checkpoint_dir + '/' + 'model.h5'\n\nmodel = ml_models.build_model(config, train_dataset, train_labels, train_stats=train_stats)\n# https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model\n\nif (config['RESTART']['RestartWeight'].lower() == 'y'):\n print('checkpoint_dir for restart: ', checkpoint_dir)\n latest = tf.train.latest_checkpoint(checkpoint_dir)\n print(\"latest checkpoint: \", latest)\n # latest=\"/opt/scratch/ml/cnn-hyperelasticity-bvp-2d-conv/restart/cp-2000.ckpt\"\n if (latest != None):\n model.load_weights(latest)\n print(\"Successfully load weight: \", latest)\n else:\n print(\"No saved weights, start to train the model from the beginning!\")\n pass\n\nmetrics = ml_misc.getlist_str(config['MODEL']['Metrics'])\noptimizer = ml_optimizer.build_optimizer(config)\n# loss = ml_loss.build_loss(config)\nloss = ml_loss.my_mse_loss_with_grad(BetaP=0.0)\nmodel.compile(loss=loss, optimizer=optimizer, metrics=metrics)\nlabel_scale = float(config['TEST']['LabelScale'])\n\ncallbacks = ml_callbacks.build_callbacks(config)\ntrain_dataset = train_dataset.to_numpy()\ntrain_labels = train_labels.to_numpy()\n# print('before: ', train_labels, train_stats)\nval_dataset = val_dataset.to_numpy()\nval_labels = val_labels.to_numpy()\ntest_dataset = test_dataset.to_numpy()\ntest_labels = test_labels.to_numpy()\n\n# make sure that the derivative data is scaled correctly\n\n# The NN/DNS scaled derivative data should be: * label_scale * train_stats['std'] (has already multiplied by label_scale )\n\n# Since the feature is scaled, and label psi is scaled, the S_NN will be scaled to: label_scale * train_stats['std']\n# the model will scale S_NN back to no-scaled status.\n# here we scale F, and P to no-scaled status\nmodified_label_scale = np.array([1.0, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale, 1.0/label_scale])\ntrain_labels = train_labels*modified_label_scale\nval_labels = val_labels*modified_label_scale\ntest_labels = test_labels*modified_label_scale\nprint('after: ', train_labels)\n# print(type(train_dataset))\nhistory = model.fit(\n train_dataset,\n train_labels,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(val_dataset, val_labels), # or validation_split= 0.1,\n verbose=verbose,\n callbacks=callbacks)\n\nmodel.summary()\n# print(\"history: \" , history.history['loss'], history.history['val_loss'], history.history)\n\nall_data = {'test_label': [], 'test_nn': [], 'val_label': [], 'val_nn': [], 'train_label': [], 'train_nn': []}\n\ntest_nn = model.predict(test_dataset, verbose=0, batch_size=batch_size)\nval_nn = model.predict(val_dataset, verbose=0, batch_size=batch_size)\ntrain_nn = model.predict(train_dataset, verbose=0, batch_size=batch_size)\n\nfor i in np.squeeze(test_nn):\n # print('test_nn:', i)\n all_data['test_nn'].append(i[0] / label_scale)\nfor i in np.squeeze(val_nn):\n all_data['val_nn'].append(i[0] / label_scale)\nfor i in np.squeeze(train_nn):\n all_data['train_nn'].append(i[0] / label_scale)\n\nfor i in test_labels:\n all_data['test_label'].append(i[0] / label_scale)\n # print('test_label: ', i)\nfor i in val_labels:\n all_data['val_label'].append(i[0] / label_scale)\nfor i in train_labels:\n all_data['train_label'].append(i[0] / label_scale)\n# print('all_data: ', all_data)\nprint('test_nn shape: ', np.shape(np.squeeze(test_nn)))\nprint('test_labels shape: ', np.shape(test_labels))\n\nimport pickle\nimport time\nnow = time.strftime(\"%Y%m%d%H%M%S\")\npickle_out = open('all_data_' + now + '.pickle', \"wb\")\npickle.dump(all_data, pickle_out)\npickle_out.close()\n\npickle_out = open('history_' + now + '.pickle', \"wb\")\npickle.dump(history.history, pickle_out)\npickle_out.close()\n\n# all_data['P_DNS']= test_labels[:,1:4]/label_scale/train_stats['std'].to_numpy()[0:3]\n# all_data['P_NN'] = test_nn[:,1:4]/label_scale/train_stats['std'].to_numpy()[0:3]\nall_data['P_DNS']= test_labels[:,1:5]\nall_data['P_NN'] = test_nn[:,1:5]\n\npickle_out = open('all_P_' + now + '.pickle', \"wb\")\npickle.dump(all_data, pickle_out)\npickle_out.close()\n\nprint('save to: ', 'all_data_' + now + '.pickle', 'history_' + now + '.pickle', 'all_P_' + now + '.pickle')\nprint('the prediction of P and delta Psi_me is not the best model fit with lowest loss!')\n\n","sub_path":"ddmms/paper_results/1-rve-paper/3-cnn-kbnn-mechanical-behavior-1dns/1-microstructure-no-penalize-P/final-kbnn-1-frame/kbnn_1_frame_cnn.py","file_name":"kbnn_1_frame_cnn.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"63892946","text":"'''----------------------------------------------------------------------\nHistory:\nDate CR Number Who What\n2019-03-22 CHG1001539200 Tibor Reiss Update and refactor to enable PS aggregation\n2019-04-12 CHG1001622197 Tibor Reiss Fix bonds which have aggregated payment types\n (e.g. Aggregated Accrued): retain every payment\n type by using generalised function for determining\n if it is part of cash or not\n----------------------------------------------------------------------'''\nfrom collections import defaultdict\n\nimport acm\nfrom AGGREGATION_PARAMETERS import PARAMETERS\n\n\nclass CALC_SPACE():\n def __init__(self):\n self.__context = acm.GetDefaultContext()\n self.__sheetType = 'FTradeSheet'\n self.__virtualPortfolio = acm.FAdhocPortfolio()\n self.__columnId = 'Portfolio Cash Vector'\n self.__traderPerGrouper = None\n self.__presentValueColumnId = 'Portfolio Present Value'\n self.__grouper = None\n self.__queryFolder = None\n self.__generateCalcSpace()\n self.__columnConfigCurrency = self.__createColumnConfigForCurrency()\n self.__setQueryFolder()\n self.__setGrouper()\n \n def __createColumnConfigForCurrency(self, currName=None):\n vector = acm.FArray()\n if currName:\n currencies = acm.FCurrency.Select(\"name={}\".format(currName))\n else:\n currencies = acm.FCurrency.Select('')\n for currency in currencies:\n param = acm.FNamedParameters()\n param.AddParameter('currency', acm.FCurrency[currency.Name()])\n vector.Add(param)\n return acm.Sheet.Column().ConfigurationFromVector(vector)\n\n def calculateCashPerCurrency(self):\n return PARAMETERS.calcSpace.CreateCalculation(\n self.__virtualPortfolio,\n self.__columnId,\n self.__columnConfigCurrency)\n\n def __generateCalcSpace(self):\n PARAMETERS.calcSpace = acm.Calculations().CreateCalculationSpace(self.__context, self.__sheetType)\n \n def addTradesToVirtualPortfolio(self, fTrades):\n self.__virtualPortfolio = acm.FAdhocPortfolio()\n for trade in fTrades:\n self.__virtualPortfolio.Add(trade)\n \n def ApplyGlobalSimulation(self, columnId, value):\n PARAMETERS.calcSpace.SimulateGlobalValue(columnId, value)\n\n def RemoveGlobalSimulation(self, columnId):\n PARAMETERS.calcSpace.RemoveGlobalSimulation(columnId)\n\n def RemoveGlobalDateSimulations(self):\n self.RemoveGlobalSimulation('Portfolio Profit Loss End Date')\n self.RemoveGlobalSimulation('Portfolio Profit Loss End Date Custom')\n self.RemoveGlobalSimulation('Portfolio Profit Loss Start Date')\n self.RemoveGlobalSimulation('Portfolio Profit Loss Start Date Custom')\n\n def ApplyGlobalDateSimulations(self, start_date=None, end_date=None):\n self.RemoveGlobalDateSimulations()\n if start_date:\n PARAMETERS.calcSpaceClass.ApplyGlobalSimulation('Portfolio Profit Loss Start Date', 'Custom Date')\n PARAMETERS.calcSpaceClass.ApplyGlobalSimulation('Portfolio Profit Loss Start Date Custom', start_date)\n if end_date:\n PARAMETERS.calcSpaceClass.ApplyGlobalSimulation('Portfolio Profit Loss End Date', 'Custom Date')\n PARAMETERS.calcSpaceClass.ApplyGlobalSimulation('Portfolio Profit Loss End Date Custom', end_date)\n \n def __setQueryFolder(self):\n self.__queryFolder = PARAMETERS.queryFolder.Query()\n \n def __setGrouper(self):\n self.__grouper = PARAMETERS.grouper.Grouper()\n \n def __walkingTheTree(self, fTreeIterator):\n dict = defaultdict(list)\n masterList = []\n queueDepth = fTreeIterator.Tree().Depth() + 1\n while fTreeIterator.NextUsingDepthFirst():\n idx = fTreeIterator.Tree().Depth() - queueDepth\n if len(masterList) >= idx + 1:\n masterList[idx] = fTreeIterator.Tree().Item().StringKey()\n else:\n masterList.append(fTreeIterator.Tree().Item().StringKey())\n \n if fTreeIterator.Tree().Item().IsKindOf(acm.FTradeRow):\n key = tuple(masterList[ : idx])\n dict[key].append(fTreeIterator.Tree().Item().Trade())\n \n return dict\n\n def setTradesPerGrouper(self):\n PARAMETERS.calcSpace.Clear()\n topNode = PARAMETERS.calcSpace.InsertItem(self.__queryFolder)\n topNode.ApplyGrouper(self.__grouper) \n PARAMETERS.calcSpace.Refresh()\n self.__traderPerGrouper = self.__walkingTheTree(topNode.Iterator())\n return self.__traderPerGrouper\n\n def setupCalculation(self, trades):\n PARAMETERS.calcSpace.Clear()\n self.RemoveGlobalDateSimulations()\n self.addTradesToVirtualPortfolio(trades)\n\n def setupCalculationWithGrouper(self, trades, grouper):\n self.setupCalculation(trades)\n topNode = PARAMETERS.calcSpace.InsertItem(self.__virtualPortfolio)\n topNode.ApplyGrouper(grouper)\n PARAMETERS.calcSpace.Refresh()\n\n def calculateValue(self, calcObject, columnId=None, start_date=None, end_date=None):\n if columnId is None:\n columnId = self.__presentValueColumnId\n self.ApplyGlobalDateSimulations(start_date, end_date)\n return PARAMETERS.calcSpace.CreateCalculation(calcObject, columnId)\n\n def getPresentValue(self, trades):\n self.setupCalculation(trades)\n return self.calculateValue(self.__virtualPortfolio, self.__presentValueColumnId)\n","sub_path":"Extensions/ABSA_AGGREGATION/FPythonCode/AGGREGATION_CALC_SPACE.py","file_name":"AGGREGATION_CALC_SPACE.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"477399994","text":"import itertools\n\n\ndef primes():\n a, b = 1, []\n while True:\n a += 1\n b.clear()\n for i in range(1, a + 1):\n if a % i == 0:\n b += [i]\n if len(b) == 2:\n yield a\n\n\nprint(list(itertools.takewhile(lambda x: x <= 31, primes())))\n","sub_path":"19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"538960800","text":"\"\"\"\n=====================\nGet manga related stuff.\n=====================\n\"\"\"\n\nimport aiohttp, asyncio\nfrom bs4 import BeautifulSoup\nfrom AniManga.helpers.MangaHelpers import check_if_exists, format\n\nasync def req(link):\n async with aiohttp.ClientSession() as session:\n async with session.get(link) as resp:\n return await resp.content.read()\n\nclass Manga:\n \"\"\"\n Manga class.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n __init__\n \"\"\"\n self.base_manga_url = \"https://www.anime-planet.com/manga/\"\n self.base_manga_reviews = \"https://www.anime-planet.com/manga/{}/reviews\"\n self.base_manga_tags = \"https://www.anime-planet.com/manga/\"\n\n async def get_manga_json(self, manga: str) -> dict:\n \"\"\"\n Get information on a manga.\n \"\"\"\n manga = format(manga)\n r = await req(\"https://www.anime-planet.com/manga/\" + f\"{manga}\")\n soup = BeautifulSoup(r, \"html5lib\")\n tags = soup.find_all(\"div\", {\"class\": \"tags\"})\n rr = await req(\"https://www.anime-planet.com/manga/{}/reviews\".format(manga))\n rsoup = BeautifulSoup(rr, \"html5lib\")\n\n if check_if_exists(manga):\n\n rank = soup.find_all(\"div\", {\"class\":\"pure-1 md-1-5\"})\n for x in rank:\n if x.text.startswith(\"\\nRank\"):\n rank = x.text.replace(\"\\n\", \"\")\n\n tags_list = []\n\n for x in tags:\n x = x.find_all(\"li\")\n for z in x:\n z = z.text.replace(\"\\n\", \"\")\n tags_list.append(z)\n\n characters = []\n for x in soup.find_all(\"strong\",{\"class\":\"CharacterCard__title rounded-card__title\"}):\n characters.append(x.text)\n\n characters = characters[:-1]\n\n warning_list = []\n\n content_warning = soup.find_all(\"div\",{\"class\":\"tags tags--plain\"})\n\n for x in content_warning:\n x = x.text.replace(\"\\n\",\"\").replace(\"Content Warning\",\"\")\n warning_list.append(x)\n\n reviews = rsoup.find_all(\"div\", {\"class\": \"pure-1 userContent readMore\"})\n review_list = []\n\n for x in reviews:\n review_list.append(x)\n\n reviews = []\n\n for x in review_list:\n string = \"\"\n while True:\n try:\n x = x.find(\"p\")\n x = x.getText()\n string += f\"{x}\\n\"\n except:\n break\n\n reviews.append(string)\n\n dict = {}\n dict[\"title\"] = soup.find(\"meta\", property=\"og:title\")[\"content\"]\n dict[\"description\"] = soup.find(\"meta\", property=\"og:description\")[\n \"content\"\n ]\n dict[\"url\"] = soup.find(\"meta\", property=\"og:url\")[\"content\"]\n dict[\"type\"] = soup.find(\"meta\", property=\"og:type\")[\"content\"]\n dict[\"size\"] = soup.find(\"div\", {\"class\":\"pure-1 md-1-5\"}).text.replace(\"\\n\", \"\")\n dict[\"year\"] = soup.find(\"span\", {\"class\":\"iconYear\"}).text.replace(\"\\n\", \"\")\n dict[\"rating\"] = soup.find(\"div\", {\"class\":\"avgRating\"}).text.replace(\"\\n\",\"\")\n dict[\"rank\"] = rank\n dict[\"author\"] = soup.find(\"meta\", property=\"book:author\")[\"content\"]\n dict[\"author\"] = dict[\"author\"].replace(\n \"https://www.anime-planet.com/people/\", \"\"\n )\n dict[\"cover\"] = soup.find(\"meta\", property=\"og:image\")[\"content\"]\n dict[\"tags\"] = tags_list\n dict[\"content warning\"] = warning_list\n dict[\"characters\"] = characters\n dict[\"reviews\"] = reviews\n return dict\n else:\n return \"We could not find that.\"\n\n async def get_manga_description(self, manga: str) -> str:\n \"\"\"\n Get manga description.\n \"\"\"\n x = await self.get_manga_json(Manga, manga)\n try:\n return x[\"description\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_url(self, manga: str) -> str:\n \"\"\"\n Get Anime-Planet link of a manga.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"url\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_size(self, manga: str) -> str:\n \"\"\"\n Get size of a manga.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"size\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_year(self, manga: str) -> str:\n \"\"\"\n Get the years the manga ran.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"year\"]\n except:\n return \"[year] We could not find that\"\n\n async def get_manga_rating(self, manga: str) -> str:\n \"\"\"\n Get rating of a manga according to Anime-Planet.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"rating\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_rank(self, manga: str) -> str:\n \"\"\"\n Get rank of the manga according to Anime-Planet.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"rank\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_cover(self, manga: str) -> str:\n \"\"\"\n Get cover image of manga.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"cover\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_author(self, manga: str) -> str:\n \"\"\"\n Get author of a manga.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"author\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_tags(self, manga: str) -> list:\n \"\"\"\n Get the tags of a manga.\n \"\"\"\n\n x = await self.get_manga_json(manga)\n try:\n return x[\"tags\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_content_warning(self, manga: str) -> list:\n \"\"\"\n Get content warning of a manga.\n \"\"\"\n x = await self.get_manga_json(manga)\n try:\n return x[\"content warning\"]\n except:\n return \"We could not find that\"\n\n async def get_manga_reviews(self, manga: str) -> list:\n \"\"\"\n Get the reviews of a manga.\n \"\"\"\n\n x = await self.get_manga_json(manga)\n\n try:\n return x[\"reviews\"]\n except:\n return \"We could not find that.\"\n\n async def get_manga_characters(self, manga: str) -> list:\n \"\"\"\n Get the characters of a manga.\n \"\"\"\n manga = format(manga)\n r = await req(\"https://www.anime-planet.com/manga/{}/characters\".format(manga))\n soup = BeautifulSoup(r, \"html5lib\")\n\n character_list = []\n\n characters = soup.find_all(\"a\", {\"class\":\"name\"})\n\n for i in characters:\n character_list.append(i.text)\n \n try:\n return character_list\n except:\n return \"We could not find that.\"\n\n async def get_popular_manga(self) -> list:\n \"\"\"\n Gets current popular manga according to Anime-Planet.\n \"\"\"\n\n r = await req(\"https://www.anime-planet.com/manga/all\")\n soup = BeautifulSoup(r, \"html5lib\")\n\n try:\n x = soup.find_all(\"ul\", {\"class\": \"cardDeck cardGrid\"})\n\n list = []\n\n for ultag in x:\n for y in ultag.find_all(\"li\"):\n y = y.text.replace(\"Add to list \", \"\").replace(\"\\n\", \"\")\n list.append(y)\n\n return list\n except:\n return \"We could not find that\"\n\n#Made async/aiohttp friendly by TheOnlyWayUp, originally made by centipede000.\n","sub_path":"AniManga/Manga.py","file_name":"Manga.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"652345738","text":"bigwigs = open('/oak/stanford/groups/akundaje/projects/alzheimers_parkinsons/merged_tagAligns_outputs/fc.signal.unique.bigwig','r').read().strip().split('\\n') \nidr_peaks = open('/oak/stanford/groups/akundaje/projects/alzheimers_parkinsons/merged_tagAligns_outputs/idr.optimal.narrowPeaks.txt','r').read().strip().split('\\n') \nambig_peaks = open('/oak/stanford/groups/akundaje/projects/alzheimers_parkinsons/merged_tagAligns_outputs/ambig.optimal.narrowPeaks.2.txt','r').read().strip().split('\\n') \nsamples=open('/oak/stanford/groups/akundaje/projects/alzheimers_parkinsons/merged_tagAligns_outputs/samples.txt','r').read().strip().split('\\n')\nadpd_tasks = 'adpd.tasks.tsv'\nsample_dict=dict()\n#build the tasks file \nfor sample in samples:\n sample_dict[sample]=[]\n for idr_peaks_file in idr_peaks:\n if idr_peaks_file.__contains__(sample):\n sample_dict[sample].append(idr_peaks_file)\n break\n for bigwig_file in bigwigs:\n if bigwig_file.__contains__(sample):\n sample_dict[sample].append(bigwig_file)\n break \n for ambig_peaks_file in ambig_peaks:\n if ambig_peaks_file.__contains__(sample):\n sample_dict[sample].append(ambig_peaks_file)\n break\noutf=open(adpd_tasks,'w')\nfor sample in sample_dict:\n outf.write(sample+'\\t'+'\\t'.join(sample_dict[sample])+'\\n')\n \n","sub_path":"nn_training/model_inputs/generate_tasks_file.py","file_name":"generate_tasks_file.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"380122299","text":"import functions\nimport numpy as np\n\n\"\"\"\nlocate the Numpy array inputs\n\"\"\"\n\n#training datasets\n#class\ntrain09 = np.load('Samples/SLC off/C_train09.npy')\n#surface reflectance value\ntrain_array_09 = np.load('Samples/SLC off/C_train_array_sf_09.npy')\n\n#class\ntrain11 = np.load('Samples/SLC off/C_train11.npy')\n#surface reflectance value\ntrain_array_11 = np.load('Samples/SLC off/C_train_array_sf_11.npy')\n\n#ground truth datasets for validation\ntest09 = np.load('Samples/SLC off/C_test09.npy')\ntest11 = np.load('Samples/SLC off/C_test11.npy')\n\n\"\"\"\nlocate the satellite images\n\"\"\"\n#sat image 2009\nb1_09 = 'L2 imagery/2009/clip_b1r.tif'\nb2_09 = 'L2 imagery/2009/clip_b2r.tif'\nb3_09 = 'L2 imagery/2009/clip_b3r.tif'\nb4_09 = 'L2 imagery/2009/clip_b4r.tif'\nb5_09 = 'L2 imagery/2009/clip_b5r.tif'\nb7_09 = 'L2 imagery/2009/clip_b7r.tif'\nndvi_09 = 'L2 imagery/2009/ndvi.tif'\nndwi_09 = 'L2 imagery/2009/ndwi.tif'\nmndwi1_09 = 'L2 imagery/2009/mndwi1.tif'\nmndwi2_09 = 'L2 imagery/2009/mndwi2.tif'\nndbi_09 = 'L2 imagery/2009/ndbi.tif'\nmndbi_09 = 'L2 imagery/2009/mndbi.tif'\n\n#sat image 2011\nb1_11 = 'L2 imagery/2011/clip_b1r.tif'\nb2_11 = 'L2 imagery/2011/clip_b2r.tif'\nb3_11 = 'L2 imagery/2011/clip_b3r.tif'\nb4_11 = 'L2 imagery/2011/clip_b4r.tif'\nb5_11 = 'L2 imagery/2011/clip_b5r.tif'\nb7_11 = 'L2 imagery/2011/clip_b7r.tif'\nndvi_11 = 'L2 imagery/2011/ndvi.tif'\nndwi_11 = 'L2 imagery/2011/ndwi.tif'\nmndwi1_11 = 'L2 imagery/2011/mndwi1.tif'\nmndwi2_11 = 'L2 imagery/2011/mndwi2.tif'\nndbi_11 = 'L2 imagery/2011/ndbi.tif'\nmndbi_11 = 'L2 imagery/2011/mndbi.tif'\n\n\"\"\"\nlocate the test shapefiles\n\"\"\"\nfile_train09_shp = 'Samples/SLC off/2percent/s09train.shp'\nfile_train11_shp = 'Samples/SLC off/2percent/s11train.shp'\nfile_test09_shp = 'Samples/SLC off/2percent/s09test.shp'\nfile_test11_shp = 'Samples/SLC off/2percent/s11test.shp'\n\n#-----------------------------------------------------------------------------------------------\n#read raster values and combine them into train_array (test datasets)\n\n#2009\nb1_09_t = functions.extract_values(shp = file_test09_shp, raster = b1_09)\nb2_09_t = functions.extract_values(shp = file_test09_shp, raster = b2_09)\nb3_09_t = functions.extract_values(shp = file_test09_shp, raster = b3_09)\nb4_09_t = functions.extract_values(shp = file_test09_shp, raster = b4_09)\nb5_09_t = functions.extract_values(shp = file_test09_shp, raster = b5_09)\nb7_09_t = functions.extract_values(shp = file_test09_shp, raster = b7_09)\nndvi_09_t = functions.extract_values(shp = file_test09_shp, raster = ndvi_09)\nndwi_09_t = functions.extract_values(shp = file_test09_shp, raster = ndwi_09)\nmndwi1_09_t = functions.extract_values(shp = file_test09_shp, raster = mndwi1_09)\nmndwi2_09_t = functions.extract_values(shp = file_test09_shp, raster = mndwi2_09)\nndbi_09_t = functions.extract_values(shp = file_test09_shp, raster = ndbi_09)\nmndbi_09_t = functions.extract_values(shp = file_test09_shp, raster = mndbi_09)\n\ntest_09 = functions.combine_bands_sf(b1 = b1_09_t, b2 = b2_09_t, b3 = b3_09_t, b4 = b4_09_t, \n\tb5 = b5_09_t, b7 = b7_09_t, ndvi = ndvi_09_t, ndwi = ndwi_09_t, mndwi1 = mndwi1_09_t, \n\tmndwi2 = mndwi2_09_t, ndbi = ndbi_09_t, mndbi = mndbi_09_t,\n\tmultiband_array_file = 'Samples/SLC off/C_test_array_sf_09.npy')\n\n#2011\nb1_11_t = functions.extract_values(shp = file_test11_shp, raster = b1_11)\nb2_11_t = functions.extract_values(shp = file_test11_shp, raster = b2_11)\nb3_11_t = functions.extract_values(shp = file_test11_shp, raster = b3_11)\nb4_11_t = functions.extract_values(shp = file_test11_shp, raster = b4_11)\nb5_11_t = functions.extract_values(shp = file_test11_shp, raster = b5_11)\nb7_11_t = functions.extract_values(shp = file_test11_shp, raster = b7_11)\nndvi_11_t = functions.extract_values(shp = file_test11_shp, raster = ndvi_11)\nndwi_11_t = functions.extract_values(shp = file_test11_shp, raster = ndwi_11)\nmndwi1_11_t = functions.extract_values(shp = file_test11_shp, raster = mndwi1_11)\nmndwi2_11_t = functions.extract_values(shp = file_test11_shp, raster = mndwi2_11)\nndbi_11_t = functions.extract_values(shp = file_test11_shp, raster = ndbi_11)\nmndbi_11_t = functions.extract_values(shp = file_test11_shp, raster = mndbi_11)\n\ntest_11 = functions.combine_bands_sf(b1 = b1_11_t, b2 = b2_11_t, b3 = b3_11_t, b4 = b4_11_t, \n\tb5 = b5_11_t, b7 = b7_11_t, ndvi = ndvi_11_t, ndwi = ndwi_11_t, mndwi1 = mndwi1_11_t, \n\tmndwi2 = mndwi2_11_t, ndbi = ndbi_11_t, mndbi = mndbi_11_t,\n\tmultiband_array_file = 'Samples/SLC off/C_test_array_sf_11.npy')\n\n#-----------------------------------------------------------------------------------------------\n#train and predict with RF\n\n#2009\n#100 trees\ntest_result_09_100 = functions.train_rf(trees = 100, maxfeatures = None, \n\ttrain_array = train_array_09, gt_array = train09, \n\tmodel_sav = 'Models/C_rf_sf_09_100trees.sav', \n\timg = test_09,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_09_100.npy')\n# None == n_features\n\n#200 trees\ntest_result_09_200 = functions.train_rf(trees = 200, maxfeatures = None, \n\ttrain_array = train_array_09, gt_array = train09, \n\tmodel_sav = 'Models/C_rf_sf_09_200trees.sav', \n\timg = test_09,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_09_200.npy')\n# None == n_features\n\n#300 trees\ntest_result_09_300 = functions.train_rf(trees = 300, maxfeatures = None, \n\ttrain_array = train_array_09, gt_array = train09, \n\tmodel_sav = 'Models/C_rf_sf_09_300trees.sav', \n\timg = test_09,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_09_300.npy')\n# None == n_features\n\n#400 trees\ntest_result_09_400 = functions.train_rf(trees = 400, maxfeatures = None, \n\ttrain_array = train_array_09, gt_array = train09, \n\tmodel_sav = 'Models/C_rf_sf_09_400trees.sav', \n\timg = test_09,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_09_400.npy')\n# None == n_features\n\n#500 trees\ntest_result_09_500 = functions.train_rf(trees = 500, maxfeatures = None, \n\ttrain_array = train_array_09, gt_array = train09, \n\tmodel_sav = 'Models/C_rf_sf_09_500trees.sav', \n\timg = test_09,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_09_500.npy')\n# None == n_features\n\n#2011\n#100 trees\ntest_result_11_100 = functions.train_rf(trees = 100, maxfeatures = None, \n\ttrain_array = train_array_11, gt_array = train11, \n\tmodel_sav = 'Models/C_rf_sf_11_100trees.sav', \n\timg = test_11,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_11_100.npy')\n# None == n_features\n\n#200 trees\ntest_result_11_200 = functions.train_rf(trees = 200, maxfeatures = None, \n\ttrain_array = train_array_11, gt_array = train11, \n\tmodel_sav = 'Models/C_rf_sf_11_200trees.sav', \n\timg = test_11,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_11_200.npy')\n# None == n_features\n\n#300 trees\ntest_result_11_300 = functions.train_rf(trees = 300, maxfeatures = None, \n\ttrain_array = train_array_11, gt_array = train11, \n\tmodel_sav = 'Models/C_rf_sf_11_300trees.sav', \n\timg = test_11,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_11_300.npy')\n# None == n_features\n\n#400 trees\ntest_result_11_400 = functions.train_rf(trees = 400, maxfeatures = None, \n\ttrain_array = train_array_11, gt_array = train11, \n\tmodel_sav = 'Models/C_rf_sf_11_400trees.sav', \n\timg = test_11,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_11_400.npy')\n# None == n_features\n\n#500 trees\ntest_result_11_500 = functions.train_rf(trees = 500, maxfeatures = None, \n\ttrain_array = train_array_11, gt_array = train11, \n\tmodel_sav = 'Models/C_rf_sf_11_500trees.sav', \n\timg = test_11,\n\tresult_array_file = 'L2 imagery/Results/C_test_result_sf_11_500.npy')\n# None == n_features\n\n\n#-----------------------------------------------------------------------------------------------\n# calculate accuracy\n\nprint(\"Case C (n(samples) = 2% n(raster pixels)\")\nprint(\" \")\nfunctions.test_accuracy(year = 2009, trees = 100, test_array = test_result_09_100, gt_test_array = test09)\nfunctions.test_accuracy(year = 2009, trees = 200, test_array = test_result_09_200, gt_test_array = test09)\nfunctions.test_accuracy(year = 2009, trees = 300, test_array = test_result_09_300, gt_test_array = test09)\nfunctions.test_accuracy(year = 2009, trees = 400, test_array = test_result_09_400, gt_test_array = test09)\nfunctions.test_accuracy(year = 2009, trees = 500, test_array = test_result_09_500, gt_test_array = test09)\n\nfunctions.test_accuracy(year = 2011, trees = 100, test_array = test_result_11_100, gt_test_array = test11)\nfunctions.test_accuracy(year = 2011, trees = 200, test_array = test_result_11_200, gt_test_array = test11)\nfunctions.test_accuracy(year = 2011, trees = 300, test_array = test_result_11_300, gt_test_array = test11)\nfunctions.test_accuracy(year = 2011, trees = 400, test_array = test_result_11_400, gt_test_array = test11)\nfunctions.test_accuracy(year = 2011, trees = 500, test_array = test_result_11_500, gt_test_array = test11)\n","sub_path":"process_case_C_sf_testonly.py","file_name":"process_case_C_sf_testonly.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"256462672","text":"import os\nimport numpy as np\nimport keras.layers as L\nimport keras.models as M\nfrom keras.layers.core import RepeatVector\nfrom keras.layers.wrappers import TimeDistributed\nimport h5py\nimport time\nfrom make_songlist import songlist2vec\nfrom make_songlist import make_songlist\nfrom chord2vec import chord2vec\n\n\nmaxlen = 136\nn_hidden = 25\nn_in = 10\nc_max = 5\nn_max = 19\nc_BAR = chord2vec('./models/chord2vec.model', 'bar')\ndouble_bar = c_max * 2\n\n\n\nstime = time.time()\n\nmodel = M.Sequential()\n\n#encoder\nmodel.add(L.LSTM(n_hidden, input_shape=(n_in, 5)))\n\n#decoder\nmodel.add(RepeatVector(n_max))\nmodel.add(L.LSTM(n_hidden, return_sequences=True))\n\n\nmodel.add(TimeDistributed(L.Dense(maxlen)))\nmodel.add(L.Activation('sigmoid'))\noptimizers = M.optimizers.adam(lr=0.001)\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers, metrics=['accuracy'])\n\n\n\n#train\nfor i, file in enumerate(os.listdir(\"./MusicXml\")):\n print(str(i) + \" file : \" + file)\n\n songlist = make_songlist(\"./MusicXml/\" + file)\n x_vec_list, y_vec_list = songlist2vec(songlist)\n\n data_len = len(x_vec_list)\n\n bar_num = int(len(y_vec_list) / n_max)\n\n InputData = []\n\n for i, n in enumerate(range(0, data_len, c_max)):\n tmp = x_vec_list[n:n + c_max]\n\n if i < 2:\n InputData.extend(tmp)\n\n else:\n InputData.extend(InputData[- c_max:])\n InputData.extend(tmp)\n\n X = np.array(InputData).reshape(bar_num, double_bar, 5)\n\n Y = np.array(y_vec_list).reshape(bar_num, n_max, maxlen)\n\n model.fit(X, Y, epochs=300, batch_size=20)\n\njson_string = model.to_json()\n\nopen('./models/sample_f50_e300.json', 'w').write(json_string)\nmodel.save_weights('./models/sample_f50_e300.hdf5')\n\netime = time.time()\ntotal_time = etime - stime\nprint('total time is ' + str(total_time))","sub_path":"trane.py","file_name":"trane.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"267900136","text":"import sys\nclass Solution:\n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n m = len(matrix)\n if m == 0:\n return 0\n n = len(matrix[0])\n distances = [[-1 for i in range(n)] for j in range(m)]\n def helper(x, y, last):\n if x < 0 or y < 0 or x >= m or y >= n or last >= matrix[x][y]:\n return 0\n if distances[x][y] != -1:\n return distances[x][y]\n \n left = helper(x - 1, y, matrix[x][y])\n right = helper(x + 1, y, matrix[x][y])\n up = helper(x, y - 1, matrix[x][y])\n down = helper(x, y + 1, matrix[x][y])\n distances[x][y] = 1 + max(left, right, up, down)\n return distances[x][y]\n \n res = 0\n for i in range(m):\n for j in range(n):\n tmp = helper(i, j, -sys.maxsize)\n res = max(res, tmp)\n return res\n","sub_path":"leetcode/329. Longest Increasing Path in a Matrix.py","file_name":"329. Longest Increasing Path in a Matrix.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"248397737","text":"from ngram import Ngram\n\n\nclass TrainNgram:\n\n def __init__(self, word, ngram_size):\n self.word = word\n self.ngram_size = ngram_size\n self.word_dict = {}\n self.context_dict = {}\n self.lambda_word_dict = {}\n\n def train(self):\n bi_diff_word_dict = {}\n u_count_dict_key = {}\n count_dict_key = {}\n for word_line in self.word:\n count = 1\n while len(word_line) > count:\n ngram_count = 2\n word_dict = Ngram(self.word_dict, word_line[count])\n self.word_dict = word_dict.count_dict_key()\n\n count_dict_key = Ngram(count_dict_key, word_line[count - 1])\n count_dict_key = count_dict_key.count_dict_key()\n\n context_dict = Ngram(self.context_dict, '')\n self.context_dict = context_dict.count_dict_key()\n\n while self.ngram_size >= ngram_count:\n n_word = []\n for counter in range(0, ngram_count):\n unit_number = count - 1 + counter\n if len(word_line) <= unit_number:\n break\n n_word.append(word_line[unit_number])\n join_nword = ' '.join(n_word)\n word_dict = Ngram(self.word_dict, join_nword)\n self.word_dict = word_dict.count_dict_key()\n context_dict = Ngram(self.context_dict, join_nword)\n self.context_dict = context_dict.count_dict_key()\n ngram_count = ngram_count + 1\n bi_word = word_line[count - 1] + ' ' + word_line[count]\n bi_diff_word_dict.update({bi_word: word_line[count - 1]})\n context_dict = Ngram(self.context_dict, word_line[count - 1])\n self.context_dict = context_dict.count_dict_key()\n\n count = count + 1\n \"\"\" Bell Smoothing \"\"\"\n for k, v in bi_diff_word_dict.items():\n words = k.split(' ')\n if v in u_count_dict_key:\n bi_value = u_count_dict_key[v]\n u_count_dict_key.update({v: bi_value + 1})\n else:\n u_count_dict_key[v] = 1\n\n for k in count_dict_key.keys():\n lambda_w = 1 - (1.0 * u_count_dict_key[k] / (u_count_dict_key[k] + count_dict_key[k]))\n self.lambda_word_dict.update({k: lambda_w})\n\n print({k: v for k, v in self.word_dict.items()})\n for ngram, count in self.word_dict.items():\n words = ngram.split(' ')\n words.pop()\n if len(words) < 2:\n context = ''.join(words)\n else:\n context = ' '.join(words)\n prob = 1.0 * self.word_dict[ngram] / self.context_dict[context]\n self.word_dict.update({ngram: prob})\n","sub_path":"NLP_programing/chapter2/train_ngram.py","file_name":"train_ngram.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"610998595","text":"import os\nimport json\nfrom glob import glob\n\nimport geodaisy.converters as convert\nimport visvalingamwyatt as vw\n\nfor json_file in glob('*.json'):\n print(json_file)\n\n with open(json_file, 'r') as fh_in:\n j = json.load(fh_in)\n shape = j['View'][0]['Result'][0]['Location']['Shape']['Value']\n\n try:\n shape = json.loads(convert.wkt_to_geojson(shape))\n shape = vw.simplify_geometry(shape, ratio=0.2)\n j['View'][0]['Result'][0]['Location']['Shape']['Value'] = convert.geojson_to_wkt(shape)\n except (ValueError, TypeError):\n print('>>>>')\n\n with open('{}-simplified{}'.format(*os.path.splitext(json_file)), 'w') as fh_out:\n json.dump(j, fh_out)\n\n","sub_path":"public/data/geo_countries/simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"246828350","text":"import socket\nimport threading\nfrom pathlib import Path\n\nclass FtpServer:\n\n def __init__(self, ip='127.0.0.1', port=9999, ftpdirm ='C:/Ftp'):#初始化\n self.sock = socket.socket()\n self.addr = (ip, port)\n self.ftpdirm = ftpdirm\n self.ftppathdir = Path(ftpdirm)\n\n def start(self):#启动监听\n self.sock.bind(self.addr)#绑定\n self.sock.listen()#监听\n #accept会阻塞主进程,所以开个新线程\n threading.Thread(target=self.accept).start()\n\n def accept(self):#连接到Ftp\n while True:\n sock, client = self.sock.accept()\n #开新线程\n threading.Thread(target=self.recv, args=(sock,)).start()\n\n\n def recv(self, sock):\n while True:\n #获取命令\n cmd = sock.recv(1024).decode()\n if cmd == 'put':\n #获取文件名\n filename = sock.recv(1024).decode()\n #获取文件大小\n filesize = sock.recv(1024).decode()\n filesize = int(filesize)\n sendsize = 0\n #文件路径\n ftpdir = Path.joinpath(self.ftppathdir, filename)\n #上传文件\n with open(ftpdir, 'wb') as f:\n while sendsize != filesize:\n data = sock.recv(1024)\n f.write(data)\n sendsize += len(data)\n print('{}已接收'.format(filename))\n\n if cmd == 'get':\n #获取文件名\n filename = sock.recv(1024).decode()\n threading.Thread(target=self.retuen, args=(filename, sock)).start()\n\n if cmd == 'ls':\n threading.Thread(target=self.ls, args=(sock,)).start()\n\n\n def retuen(self, filename, sock):\n #判断文件是否存在\n if filename in (i.name for i in self.ftppathdir.iterdir()):\n #文件路径\n filepath = self.ftpdirm + '/' + filename\n #文件大小\n ftpdir = Path(filepath)\n filesize = Path.stat(ftpdir).st_size\n # 发送文件的大小\n sock.send(str(filesize).encode())\n #用来判断\n getedsize = 0\n #下载\n with open(ftpdir, 'rb') as f:\n while getedsize != filesize:\n data = f.read(1024)\n sock.send(data)\n getedsize += len(data)\n print('{}已被下载'.format(filename))\n\n else:\n self.sock.send('file is not exist'.encode())\n\n def ls(self, sock):\n #文件数量\n filenum = len(list(self.ftppathdir.iterdir()))\n #发送文件数量\n sock.send(str(filenum).encode())\n sendnum = 0\n #发送目录\n while sendnum < filenum:\n for i in self.ftppathdir.iterdir():\n sock.send(str(i.name).encode())\n sendnum += 1\n\n def stop(self):#停止服务\n self.sock.close()\n\ncs = FtpServer()\ncs.start()\n\n\n# 逻辑上没有什么问题,有些细节的地方可以参看下学号 p17027小伙伴的:像进度条,断点续传之类,认证之类的\n","sub_path":"P17054-顾园凯/homework/sixth/ftpsever.py","file_name":"ftpsever.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"353335169","text":"#!/usr/bin/env python\n\n\nimport sys\nfrom zipfile import ZipFile\nfrom argparse import ArgumentParser\nfrom sploitego.xmltools.objectify import objectify\nfrom sploitego.commands.common import cmd_name\n\n\n__author__ = 'Nadeem Douba'\n__copyright__ = 'Copyright 2012, Sploitego Project'\n__credits__ = ['Nadeem Douba']\n\n__license__ = 'GPL'\n__version__ = '0.1'\n__maintainer__ = 'Nadeem Douba'\n__email__ = 'ndouba@gmail.com'\n__status__ = 'Development'\n\n\nparser = ArgumentParser(\n description='Convert Maltego graph files (*.mtgx) to comma-separated values (CSV) file.',\n usage='sploitego %s ' % cmd_name(__name__)\n)\n\nparser.add_argument(\n 'graph',\n metavar='',\n help='The name of the graph file you wish to convert to CSV.',\n)\n\n\ndef parse_args(args):\n return parser.parse_args(args)\n\n\ndef help():\n parser.print_help()\n\n\ndef description():\n return parser.description\n\n\ndef run(args):\n\n opts = parse_args(args)\n\n zip = ZipFile(opts.graph)\n graphs = filter(lambda x: x.endswith('.graphml'), zip.namelist())\n\n for f in graphs:\n csv = open(f.split('/')[1].split('.')[0] + '.csv', 'w')\n xml = zip.open(f).read()\n o = objectify(xml)\n for node in o.graph.node:\n for d in node.data:\n if 'MaltegoEntity' in d:\n csv.write(('\"Entity Type=%s\",' % d.MaltegoEntity.type).strip())\n for prop in d.MaltegoEntity.Properties.Property:\n if '\"' in prop.Value:\n prop.Value.replace('\"', '\"\"')\n csv.write(('\"%s=%s\",' % (prop.displayName, prop.Value)).strip())\n csv.write('\\n')","sub_path":"src/sploitego/commands/mtgx2csv.py","file_name":"mtgx2csv.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"239111606","text":"class Wapen:\n def prick(self, heat):\n heat -= 500\n print(heat)\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.wapen = Wapen()\n\np1 = Person(\"日泰\",\"78\")\nheat = 1000\np1.wapen.prick(heat)\n","sub_path":"python_test/people_wapon.py","file_name":"people_wapon.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"644243850","text":"# 주어진 수 N개 중에서 소수가 몇 개인지 찾아서 출력하는 프로그램을 작성하시오.\n# 첫 줄에 수의 개수 N이 주어진다. N은 100이하이다. 다음으로 N개의 수가 주어지는데 수는 1,000 이하의 자연수이다.\n\nN = int(input())\nnumbers = map(int, input().split())\np = 0 # 소수의 개수\n\n\ndef is_prime_number(x):\n if x == 1:\n return False\n for i in range(2, x):\n if x % i == 0:\n return False\n return True\n\n\nfor i in numbers:\n if is_prime_number(i):\n p += 1\n\nprint(p)\n","sub_path":"python_/baekjoon/기본수학2/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"85102332","text":"a = 1\nb = a * a\n\nprint(a, b)\n\n\nclass ClassVar:\n\ta = 2\n\tb = a * a\n\nprint(ClassVar.a, ClassVar.b)\n\n\nfrom dataclasses import dataclass\n\n@dataclass\nclass Instance:\n\ta = 3\n\tb = a * a\n\ninst = Instance()\nprint(inst.a, inst.b)\n","sub_path":"docs/public-field-syntax/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"526689043","text":"import hlt\nfrom hlt import constants\nfrom hlt.positionals import Direction\nimport random\nimport logging\nfrom hlt.positionals import Position\nimport math\n\n# As soon as you call \"ready\" function below, the 2 second per turn timer will start.\ngame = hlt.Game()\ngame.ready(\"old convexbot\")\nlogging.info(\"Successfully created bot! My Player ID is {}.\".format(game.my_id))\n\nship_states = {}\ndropoffs = 0\nbuildShips = True\nbuildDropoffs = False\n\n#Magic Numbers\nmaxDropoffs = 3\nmaxShipBuildingTurn = 250\nminHaliteNeededForShipBuilding = 1500\npercentOfMaxHaliteToTriggerDeposit = .70\nminDropoffBuildingTurn = 50\nminDropoffBuildingHalite = 6500\nminDistancefromShipyard = game.game_map.height / 6\nratioThreshold = 10\n\ndef calcDistance(theShip, theDropoff):\n # Distance between two points\n # Example: calcDistance(ship.position.x, ship.position.y, me.shipyard.position.x, me.shipyard.position.y)\n return math.hypot(theDropoff.x - theShip.x, theDropoff.y - theShip.y)\n\n\"\"\" <<>> \"\"\"\n\nwhile True:\n game.update_frame()\n\n me = game.me\n game_map = game.game_map\n command_queue = []\n direction_order = [Direction.North, Direction.South, Direction.East, Direction.West, Direction.Still]\n position_choices = []\n\n for ship in me.get_ships():\n if ship.id not in ship_states:\n ship_states[ship.id] = \"collect\"\n\n for ship in me.get_ships():\n\n #Get the ship to dropoff ratio then use that to determine if we need to build ships or drop offs\n shipToDropoffRatio = len(me.get_ships()) / (len(me.get_dropoffs())+1)\n if shipToDropoffRatio < ratioThreshold:\n #we need to build ships\n buildShips = True\n buildDropoffs = False\n else:\n # we need to build dropoffs\n buildShips = False\n buildDropoffs = True\n\n #Find the closest drop off point\n returnPoint = me.shipyard.position\n returnDistance = calcDistance(ship.position, me.shipyard.position)\n for dropoff in me.get_dropoffs():\n # logging.info(f\"{dropoff.id} - {dropoff.position} is {calcDistance(ship.position, dropoff.position)} away from {ship}.\")\n if returnDistance > calcDistance(ship.position, dropoff.position):\n returnPoint = dropoff.position\n returnDistance = calcDistance(ship.position, dropoff.position)\n # logging.info(f\"Destination is {returnPoint}\")\n\n # Spits out map cords for N,S,E,W, and ship position\n # Example: [Position(19, 9), Position(19, 11), Position(20, 10), Position(18, 10), Position(19, 10)]\n position_options = ship.position.get_surrounding_cardinals() + [ship.position]\n\n # Stores the movement options mapped to actual map cord\n # Example:{(0, -1): Position(29, 6), (0, 1): Position(29, 8), (1, 0): Position(30, 7), (-1, 0): Position(28, 7), (0, 0): Position(29, 7)} \n position_dict = {}\n\n for n, direction in enumerate(direction_order):\n position_dict[direction] = position_options[n]\n\n # Stores amount of halite from the surrounding movement options\n # Example: {(0, -1): 206, (0, 1): 90, (1, 0): 173, (-1, 0): 144, (0, 0): 222}\n # An important part of this is that it checks position_choices. \n # If it's found to already be in there (becuase another ship has \"claimed\" this position choice already), it's not added. \n # This prevents collisions.\n halite_dict = {}\n \n for direction in position_dict:\n position = position_dict[direction]\n halite_amount = game_map[position].halite_amount\n if position_dict[direction] not in position_choices:\n if direction == Direction.Still:\n halite_dict[direction] = halite_amount * 3\n else:\n halite_dict[direction] = halite_amount\n\n # Ship is enroute to deposit. naive navigate to the shipyard. \n if ship_states[ship.id] == \"deposit\":\n move = game_map.naive_navigate(ship, returnPoint)\n position_choices.append(position_dict[move])\n\n #Do we need to make a dropoff?\n if (game.turn_number > minDropoffBuildingTurn\n and me.halite_amount > minDropoffBuildingHalite\n and calcDistance(ship.position, me.shipyard.position) > minDistancefromShipyard\n and dropoffs < maxDropoffs\n and not game_map[ship.position].has_structure\n and buildDropoffs == True):\n command_queue.append(ship.make_dropoff())\n dropoffs += 1\n buildDropoffs = False\n # logging.info(f\"Dropoffs:{dropoffs}\")\n else:\n command_queue.append(ship.move(move))\n\n # If the ship is still, then it must have reached the shipyard. Set it back to collecting.\n if move == Direction.Still:\n ship_states[ship.id] = \"collect\"\n \n # Ship is set to collect. Move to the adjacent position with the most halite and collect.\n elif ship_states[ship.id] == \"collect\":\n directional_choice = max(halite_dict, key=halite_dict.get)\n position_choices.append(position_dict[directional_choice])\n command_queue.append(ship.move(game_map.naive_navigate(ship, position_dict[directional_choice])))\n\n if ship.halite_amount > constants.MAX_HALITE * percentOfMaxHaliteToTriggerDeposit:\n ship_states[ship.id] = \"deposit\"\n\n #Logging Stuff\n #ship to drop off ratio\n logging.info(\"Ship to dropoff ratio\")\n logging.info(len(me.get_ships()) / (len(me.get_dropoffs())+1))\n logging.info(f\"Build ships: {buildShips}\")\n logging.info(f\"Build Dropoffs: {buildDropoffs}\")\n\n # If the game is in the first 200 turns and you have enough halite, spawn a ship.\n # Only spawn a ship if you have over 2000 halite 11/17 0405\n # Don't spawn a ship if you currently have a ship at port, though - the ships will collide.\n if (game.turn_number <= maxShipBuildingTurn \n and me.halite_amount >= constants.SHIP_COST \n and me.halite_amount > minHaliteNeededForShipBuilding\n and buildShips == True \n and not game_map[me.shipyard].is_occupied):\n command_queue.append(me.shipyard.spawn())\n\n # Send your moves back to the game environment, ending this turn.\n game.end_turn(command_queue)","sub_path":"old-versions/ConvexBot2.py","file_name":"ConvexBot2.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"412096320","text":"# Copyright (c) 2017, Daniele Venzano\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains the entrypoint for the commandline Zoe client\n\"\"\"\n\nfrom datetime import datetime, timezone\nimport json\nimport logging\nimport os\nimport sys\nfrom argparse import ArgumentParser, Namespace, FileType, RawDescriptionHelpFormatter\nfrom typing import Tuple\n\nfrom tabulate import tabulate\n\nfrom zoe_cmd import utils\nfrom zoe_lib.info import ZoeInfoAPI\nfrom zoe_lib.exceptions import ZoeAPIException, InvalidApplicationDescription\nfrom zoe_lib.executions import ZoeExecutionsAPI\nfrom zoe_lib.services import ZoeServiceAPI\nfrom zoe_lib.applications import app_validate\nfrom zoe_lib.version import ZOE_API_VERSION\n\n\ndef _check_api_version(auth):\n \"\"\"Checks if there is a version mismatch between server and client.\"\"\"\n info_api = ZoeInfoAPI(auth['url'], auth['user'], auth['pass'])\n info = info_api.info()\n if info['api_version'] != ZOE_API_VERSION:\n print('Warning: this client understands ZOE API v. {}, but server talks v. {}'.format(ZOE_API_VERSION, info['api_version']))\n print('Warning: certain commands may not work correctly')\n print('Warning: please upgrade or downgrade your client to match the server version')\n\n\ndef app_validate_cmd(auth_, args):\n \"\"\"Validate an application description.\"\"\"\n app_descr = json.load(args.jsonfile)\n try:\n app_validate(app_descr)\n except InvalidApplicationDescription as e:\n print(e)\n else:\n print(\"Static validation OK\")\n\n\ndef exec_list_cmd(auth, args):\n \"\"\"List executions\"\"\"\n exec_api = ZoeExecutionsAPI(auth['url'], auth['user'], auth['pass'])\n filter_names = [\n 'status',\n 'name',\n 'user_id',\n 'limit',\n 'earlier_than_submit',\n 'earlier_than_start',\n 'earlier_than_end',\n 'later_than_submit',\n 'later_than_start',\n 'later_than_end'\n ]\n filters = {}\n for key, value in vars(args).items():\n if key in filter_names:\n filters[key] = value\n data = exec_api.list(**filters)\n if len(data) == 0:\n return\n tabular_data = [[e['id'], e['name'], e['user_id'], e['status']] for e in sorted(data.values(), key=lambda x: x['id'])]\n headers = ['ID', 'Name', 'User ID', 'Status']\n print(tabulate(tabular_data, headers))\n\n\ndef exec_get_cmd(auth, args):\n \"\"\"Gather information about an execution.\"\"\"\n exec_api = ZoeExecutionsAPI(auth['url'], auth['user'], auth['pass'])\n cont_api = ZoeServiceAPI(auth['url'], auth['user'], auth['pass'])\n execution = exec_api.get(args.id)\n if execution is None:\n print('Execution not found')\n else:\n print('Execution {} (ID: {})'.format(execution['name'], execution['id']))\n print('Application name: {}'.format(execution['description']['name']))\n print('Status: {}'.format(execution['status']))\n if execution['status'] == 'error':\n print('Last error: {}'.format(execution['error_message']))\n print()\n print('Time submit: {}'.format(datetime.fromtimestamp(execution['time_submit'], timezone.utc).astimezone()))\n\n if execution['time_start'] is None:\n print('Time start: {}'.format('not yet'))\n else:\n print('Time start: {}'.format(datetime.fromtimestamp(execution['time_start'], timezone.utc).astimezone()))\n\n if execution['time_end'] is None:\n print('Time end: {}'.format('not yet'))\n else:\n print('Time end: {}'.format(datetime.fromtimestamp(execution['time_end'], timezone.utc).astimezone()))\n print()\n\n endpoints = exec_api.endpoints(execution['id'])\n if endpoints is not None and len(endpoints) > 0:\n print('Exposed endpoints:')\n for endpoint in endpoints:\n print(' - {}: {}'.format(endpoint[0], endpoint[1]))\n else:\n print('This ZApp does not expose any endpoint')\n\n print()\n tabular_data = []\n for c_id in execution['services']:\n service = cont_api.get(c_id)\n service_data = [service['id'], service['name'], 'true' if service['essential'] else 'false', service['status'], service['backend_status'], service['backend_host'], service['error_message'] if service['error_message'] is not None else '']\n tabular_data.append(service_data)\n headers = ['ID', 'Name', 'Essential', 'Zoe status', 'Backend status', 'Host', 'Error message']\n print(tabulate(tabular_data, headers))\n\n\ndef exec_rm_cmd(auth, args):\n \"\"\"Delete an execution and kill it if necessary.\"\"\"\n exec_api = ZoeExecutionsAPI(auth['url'], auth['user'], auth['pass'])\n exec_api.delete(args.id)\n\n\ndef exec_kill_user_cmd(auth, args):\n \"\"\"Terminates all executions for the given user.\"\"\"\n exec_api = ZoeExecutionsAPI(auth['url'], auth['user'], auth['pass'])\n filters = {\n 'status': 'running',\n 'user_id': args.user_id\n }\n data = exec_api.list(**filters)\n print('Terminating {} executions belonging to user {}'.format(len(data), args.user_id))\n for execution in data:\n exec_api.terminate(execution)\n print('Execution {} terminated'.format(execution))\n\n\nENV_HELP_TEXT = '''To authenticate with Zoe you need to define three environment variables:\nZOE_URL: point to the URL of the Zoe Scheduler (ex.: http://localhost:5000/\nZOE_USER: the username used for authentication\nZOE_PASS: the password used for authentication\n\nor create a ~/.zoerc file (another location can be specified with --auth-file) like this:\nurl = xxx\nuser = yyy\npass = zzz\n\nEnvironment variable will override the values specified in the configuration file.\n'''\n\n\ndef process_arguments() -> Tuple[ArgumentParser, Namespace]:\n \"\"\"Parse command line arguments.\"\"\"\n parser = ArgumentParser(description=\"Zoe command-line administration client\", epilog=ENV_HELP_TEXT, formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--debug', action='store_true', help='Enable debug output')\n parser.add_argument('--auth-file', type=str, help='Enable debug output', default=os.path.join(os.getenv('HOME', ''), '.zoerc'))\n\n subparser = parser.add_subparsers()\n\n # zapps\n argparser_zapp_validate = subparser.add_parser('zapp-validate', help='Validate an application description')\n argparser_zapp_validate.add_argument('jsonfile', type=FileType(\"r\"), help='Application description')\n argparser_zapp_validate.set_defaults(func=app_validate_cmd)\n\n # executions\n argparser_app_list = subparser.add_parser('exec-ls', help=\"List all executions for the calling user\")\n argparser_app_list.add_argument('--limit', type=int, help='Limit the number of executions')\n argparser_app_list.add_argument('--name', help='Show only executions with this name')\n argparser_app_list.add_argument('--user_id', help='Show only executions belonging to this user')\n argparser_app_list.add_argument('--status', choices=[\"submitted\", \"scheduled\", \"starting\", \"error\", \"running\", \"cleaning up\", \"terminated\"], help='Show only executions with this status')\n argparser_app_list.add_argument('--earlier-than-submit', help='Show only executions submitted earlier than this timestamp (seconds since UTC epoch)')\n argparser_app_list.add_argument('--earlier-than-start', help='Show only executions started earlier than this timestamp (seconds since UTC epoch)')\n argparser_app_list.add_argument('--earlier-than-end', help='Show only executions ended earlier than this timestamp (seconds since UTC epoch)')\n argparser_app_list.add_argument('--later-than-submit', help='Show only executions submitted later than this timestamp (seconds since UTC epoch)')\n argparser_app_list.add_argument('--later-than-start', help='Show only executions started later than this timestamp (seconds since UTC epoch)')\n argparser_app_list.add_argument('--later-than-end', help='Show only executions ended later than this timestamp (seconds since UTC epoch)')\n argparser_app_list.set_defaults(func=exec_list_cmd)\n\n argparser_execution_get = subparser.add_parser('exec-get', help=\"Get execution status\")\n argparser_execution_get.add_argument('id', type=int, help=\"Execution id\")\n argparser_execution_get.set_defaults(func=exec_get_cmd)\n\n argparser_execution_rm = subparser.add_parser('exec-rm', help=\"Deletes an execution\")\n argparser_execution_rm.add_argument('id', type=int, help=\"Execution id\")\n argparser_execution_rm.set_defaults(func=exec_rm_cmd)\n\n argparser_execution_kill_user = subparser.add_parser('user-terminate', help=\"Terminate all executions of a user\")\n argparser_execution_kill_user.add_argument('user_id', help=\"User name\")\n argparser_execution_kill_user.set_defaults(func=exec_kill_user_cmd)\n\n return parser, parser.parse_args()\n\n\ndef zoe():\n \"\"\"Main entrypoint.\"\"\"\n parser, args = process_arguments()\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n if not hasattr(args, \"func\"):\n parser.print_help()\n return\n\n auth = utils.read_auth(args)\n if auth is None:\n sys.exit(1)\n\n try:\n _check_api_version(auth)\n args.func(auth, args)\n except ZoeAPIException as e:\n print(e.message)\n except KeyboardInterrupt:\n print('CTRL-C pressed, exiting...')\n sys.exit(0)\n","sub_path":"zoe_cmd/entrypoint_admin.py","file_name":"entrypoint_admin.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"438509300","text":"import matplotlib.pyplot as plt\nfrom matplotlib import ticker\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\nimport numpy as np\nfrom scipy import signal\n\n\n#Ouverture du flux video\ncap = cv2.VideoCapture('../TP2_Videos/Extrait4-Entracte-Poursuite_Corbillard(358p).m4v')\nret, frame1 = cap.read() # Passe a l'image suivante\nret, frame2 = cap.read()\n\nx,y,z=frame1.shape\n\nnextimYuv = cv2.cvtColor(frame2,cv2.COLOR_BGR2YUV) \nindex=1;\nimYuv = cv2.cvtColor(frame1,cv2.COLOR_BGR2YUV) # Passage en niveaux de yuv\ny=[]\nwhile(ret):\n\n\t#Calculs histogrammes noir blanc\n\thist= cv2.calcHist([imYuv], [0], None, [32], [0,255])\n\thist=cv2.normalize(hist, hist)\n\thist1= cv2.calcHist([nextimYuv], [0], None, [32], [0,255])\n\thist1=cv2.normalize(hist1, hist1)\n\t\n\t#Correlation entre histogrammes\n\ty.append(cv2.compareHist(hist, hist1,cv2.HISTCMP_CORREL))\n\n\t#Plot Correlation\n\tfig2, ax2 =plt.subplots()\t\n\tax2 = plt.gca()\n\tax2.plot(y)\n\tplt.xlabel(\"Index\")\n\tplt.ylabel(\"Corrélation entre frames\")\n\tplt.draw()\n\tfig2.canvas.draw()\n\timgt = np.fromstring(fig2.canvas.tostring_rgb(), dtype=np.uint8,\n sep='')\n\timgt = imgt.reshape(fig2.canvas.get_width_height()[::-1] + (3,))\n\timgt = cv2.cvtColor(imgt,cv2.COLOR_RGB2BGR)\n\tcv2.imshow(\"Correlation entre frames\",imgt)\n\n\t#Plot Histogramme\n\tfig, ax =plt.subplots()\n\tax.set_xlim([0, 32 - 1])\n\tax.set_ylim([0, 32 - 1])\n\tim = ax.imshow(hist)\n\tfig.colorbar(im)\n\tfig.canvas.draw()\n\timg = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,\n sep='')\n\timg = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\timg = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n\t# display image with opencv or any operation you like\n\tcv2.imshow(\"plot\",img)\n\tcv2.imshow('Image et Champ de vitesses (Farnebäck)',frame2)\n\n\n\tk = cv2.waitKey(15) & 0xff\n\tif k == 27:\n\t\tbreak\n\telif k == ord('s'):\n\t\tcv2.imwrite('Frame_%04d.png'%index,frame2)\n\t\tcv2.imwrite('OF_hsv_%04d.png'%index,bgr)\n\timYuv = nextimYuv\n\tret, frame2 = cap.read()\n\tif (ret):\n\t\tnextimYuv = cv2.cvtColor(frame2,cv2.COLOR_BGR2YUV) \n\t\n \ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Detection-Changement-Plan-(Corelation-NoirBlanc).py","file_name":"Detection-Changement-Plan-(Corelation-NoirBlanc).py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"484633589","text":"# Author: Dan Ortiz\n# This file implements the Rail Fence cipher.\n\nfrom CipherInterface import CipherInterface\n\n\nclass RailFence(CipherInterface):\n key = 0\n file = \"\"\n\n def __init__(self):\n print(\"Child class RailFence created.\")\n\n def set_key(self, __key):\n self.key = int(__key)\n\n def encrypt(self, __input_file, __output_file):\n self.file = __input_file\n message_stream = []\n\n # Reads message from file\n read_file = open(self.file, 'r')\n for message in read_file:\n for character in message:\n message_stream.append(character)\n read_file.close()\n\n # Encrypts message\n encrypted_message = []\n for row in range(0, self.key):\n for position_in_message in range(0, len(message_stream), self.key):\n if position_in_message + row < len(message_stream):\n encrypted_message.append(message_stream[row + position_in_message])\n\n # Writes encrypted message to file\n cipher_file = open(__output_file, \"w\")\n cipher_file.write(\"\".join(encrypted_message))\n cipher_file.close()\n\n def decrypt(self, __input_file, __output_file):\n self.file = __input_file\n cipher_stream = []\n\n # Reads cipher from file\n read_file = open(self.file, 'r')\n for message in read_file:\n for character in message:\n cipher_stream.append(character)\n read_file.close()\n\n # Amount of letters that don't divide evenly with depth\n extra = len(cipher_stream) % self.key\n row_amount = int(len(cipher_stream)/self.key)\n\n # Put message into into matrix\n message = []\n longer_rows = 0\n position_in_cipher = 0\n for row in range(self.key):\n if longer_rows < extra:\n message.append(cipher_stream[position_in_cipher:position_in_cipher + row_amount + 1])\n longer_rows = longer_rows + 1\n position_in_cipher = position_in_cipher + row_amount + 1\n else:\n message.append(cipher_stream[position_in_cipher:position_in_cipher + row_amount])\n position_in_cipher = position_in_cipher + row_amount\n\n # Decrypts message\n decrypted_message = []\n row = 0\n column = 0\n position_in_message = 0\n while position_in_message < len(cipher_stream):\n if row == self.key:\n row = 0\n column = column + 1\n decrypted_message.append(message[row][column])\n row = row + 1\n position_in_message = position_in_message + 1\n\n # Writes decrypted message to output file\n decrypted_file = open(__output_file, \"w\")\n decrypted_file.write(\"\".join(decrypted_message))\n decrypted_file.close()\n\n\n# Test\n\"\"\"\ncipher = RailFence()\ncipher.set_key(3)\ncipher.encrypt(\"message.txt\", \"encrypted_file.txt\")\ncipher.decrypt(\"encrypted_file.txt\", \"decrypted_file.txt\")\n\"\"\"\n","sub_path":"RailFence.py","file_name":"RailFence.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"616766101","text":"from urllib.request import urlopen\nimport os\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nfrom albumentations import (\n BboxParams,\n HorizontalFlip,\n VerticalFlip,\n Resize,\n CenterCrop,\n RandomCrop,\n Crop,\n ShiftScaleRotate,\n HueSaturationValue,\n RandomContrast,\n RandomBrightness,\n Compose\n)\n# from modules.datasets import Car_dataset, batch_idx_fn\n\n\nBOX_COLOR = (255, 0, 0)\nTEXT_COLOR = (255, 255, 255)\n\n\ndef visualize_bbox(img, bbox, class_id, class_idx_to_name, color=BOX_COLOR, thickness=2):\n # x_min, y_min, x_max, _ = bbox\n # w, h =\n x_min, y_min, x_max, y_max = [int(i) for i in bbox]\n cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)\n class_name = class_idx_to_name[class_id]\n ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)\n cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)\n cv2.putText(img, class_name, (x_min, y_min - int(0.3 * text_height)), cv2.FONT_HERSHEY_SIMPLEX, 0.35,TEXT_COLOR, lineType=cv2.LINE_AA)\n return img\n\n\ndef visualize(annotations, category_id_to_name):\n img = annotations['image'].copy()\n for idx, bbox in enumerate(annotations['bboxes']):\n img = visualize_bbox(img, bbox, annotations['category_id'][idx], category_id_to_name)\n plt.figure(figsize=(12, 12))\n plt.imshow(img)\n plt.show()\n\ndef img_path2anno(image_path, transform):\n image = cv2.imread(image_path)\n base_path = image_path.split(\"Images\")[0]\n bbox_dir_path = base_path + \"Labels\"\n car_index = image_path.split(\"/\")[-2] # 000 or 001 or...\n text_file = image_path.split(\"/\")[-1].split(\".\")[0] + \".txt\"\n bbox_text_path = os.path.join(bbox_dir_path, car_index, text_file)\n category_id = []\n bboxes = []\n with open(bbox_text_path) as f:\n lines = f.readlines()\n for i in range(len(lines)):\n if i == 0:\n continue\n line = lines[i]\n label_bbox = [round(float(i), 5) for i in line.split(\"\\n\")[0].split(\" \")]\n label = label_bbox[4]\n bbox = label_bbox[:4]\n print(bbox)\n category_id.append(label)\n bboxes.append(bbox)\n\n annotations = {'image':image, 'bboxes':bboxes, 'category_id':category_id}\n augmented = transform(**annotations)\n return augmented\n\ndef img_bbox_show(image_path, transform, label_txt_path):\n annotations = img_path2anno(image_path, transform)\n with open(label_txt_path) as f:\n category_id_to_name = {i:j.strip() for i,j in enumerate(f.readlines())}\n visualize(annotations, category_id_to_name)\n\nif __name__ ==\"__main__\":\n image_path = \"/home/tomp11/ML/datasets/car/Images/000/005238.jpg\"\n label_txt_path = \"/home/tomp11/ML/datasets/car/label.txt\"\n transform = Compose([\n # HorizontalFlip(p=0.5),\n # ShiftScaleRotate(rotate_limit=(-30,30), p=0.5),\n # HueSaturationValue(p=0.5),\n # RandomContrast(p=0.5),\n # RandomBrightness(p=0.5),\n Resize(416, 416),\n # ToTensor(),\n ], bbox_params=BboxParams(format='pascal_voc', label_fields=['category_id']))\n img_bbox_show(image_path, transform, label_txt_path)\n # pass\n","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"437020716","text":"import time\nimport winsound\nimport ctypes\n\ndef pomo(t, m):\n while t:\n mins, secs = divmod(t, 60)\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\n ctypes.windll.kernel32.SetConsoleTitleA(timeformat.encode())\n print(timeformat, end=\"\\r\")\n time.sleep(1)\n t -= 1\n if m == 't':\n soundfile = \"j:/Users/jbauman/Documents/git/personal/pomo-timer/complete.wav\"\n if m == 'b':\n soundfile = \"j:/Users/jbauman/Documents/git/personal/pomo-timer/breakover.wav\"\n winsound.PlaySound(soundfile, winsound.SND_FILENAME|winsound.SND_ASYNC)\n\nif __name__ == '__main__':\n t = ''\n while(t != 'e'):\n t = input(\"Enter 't' for timer or 'b' for break or 'e' for exit: \\n\")\n if(t == 't'):\n pomo(1500, 't')\n print(\"Finished PomoTimer\")\n elif(t == 'b'):\n pomo(300, 'b')\n print(\"Finished Break\")\n","sub_path":"pomo-timer.py","file_name":"pomo-timer.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"314807693","text":"def num_flips(chars): \r\n\tcount = 0 \r\n\tis_pos = all(c == '+' for c in chars)\r\n\r\n\tif not chars or is_pos: \r\n\t\treturn count \r\n\r\n\ti = 0 \r\n\r\n\twhile not is_pos: \r\n\t\twhile i < len(chars) and chars[i] != '+': \r\n\t\t\ti += 1\r\n\r\n\t\tif i != 0: \r\n\t\t\tfor j in range(i): \r\n\t\t\t\tif chars[j] == '-': \r\n\t\t\t\t\tchars[j] = '+'\r\n\t\t\t\telse: \r\n\t\t\t\t\tchars[j] = '-'\r\n\t\t\t\r\n\t\t\tcount += 1\r\n\r\n\t\tis_pos = all(c == '+' for c in chars)\r\n\r\n\t\tif is_pos: \r\n\t\t\tbreak\r\n\r\n\t\twhile i < len(chars) and chars[i] == '+': \r\n\t\t\ti += 1\t\t\r\n\r\n\t\tif i != 0: \r\n\t\t\tfor j in range(i): \r\n\t\t\t\tif chars[j] == '-': \r\n\t\t\t\t\tchars[j] = '+'\r\n\t\t\t\telse: \r\n\t\t\t\t\tchars[j] = '-'\r\n\t\t\t\r\n\t\t\tcount += 1\t\t\t\r\n\r\n\t\tis_pos = all(c == '+' for c in chars)\r\n\r\n\treturn count \r\n\r\nif __name__ == '__main__': \r\n\tf2 = open('output2.txt', 'w')\r\n\r\n\twith open('B-large.in', 'r') as f: \r\n\t\tcount = 0 \r\n\r\n\t\tfor line in f: \r\n\t\t\tif count == 0: \r\n\t\t\t\tpass \r\n\t\t\telse: \r\n\t\t\t\tchars = list(line.strip())\r\n\t\t\t\tf2.write('Case #{}: {}\\n'.format(count, num_flips(chars)))\r\n\r\n\t\t\tcount += 1","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_hackr_prob2.py","file_name":"16_0_2_hackr_prob2.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"631741618","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('packages', '0013_auto_20160615_1131'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='package',\n name='volume',\n field=models.CharField(max_length=9, choices=[(b'0.005', b'0.005GB'), (b'1', b'1GB'), (b'3', b'3GB'), (b'5', b'5GB'), (b'8', b'8GB'), (b'10', b'10GB'), (b'12', b'12GB'), (b'15', b'15GB'), (b'20', b'20GB'), (b'25', b'25GB'), (b'Unlimited', b'Unlimited')]),\n ),\n ]\n","sub_path":"packages/migrations/0014_auto_20160629_1437.py","file_name":"0014_auto_20160629_1437.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"16473869","text":"import pyautogui, subprocess, time, sys, json\r\n\r\n# Checks if there is any accounts in login.data\r\n# If exists, continue, else terminate with 'No active accounts found'\r\ntry:\r\n with open('res/login.data', 'r') as d:\r\n credentials = json.load(d)\r\n d.close()\r\n\r\n pyautogui.FAILSAFE = True\r\n # Get steam.exe path and launch it\r\n print('Launching Steam.exe...')\r\n path = ['C:\\Program Files (x86)\\Steam\\Steam.exe']\r\n run_prog = subprocess.Popen(path)\r\n\r\n while True:\r\n steam = pyautogui.locateOnScreen('res/steam.png')\r\n if steam is not None:\r\n # Automation\r\n print(\"1 active account found. Getting it's credentials..\")\r\n time.sleep(1)\r\n print(\"Uid of account founded is, \"+credentials[0])\r\n time.sleep(4)\r\n\r\n x, y = 960, 455 # Get coordintates of username input\r\n pyautogui.doubleClick(x, y, button='left')\r\n pyautogui.typewrite(credentials[1])\r\n print('Entering username... '+str(len(credentials[1]))+\" length\")\r\n\r\n x, y = 960, 489 # Get coordinates of password input\r\n pyautogui.click(x, y, clicks=1, button='left')\r\n pyautogui.typewrite(credentials[2])\r\n print('Entering password...'+str(len(credentials[2]))+\" length\")\r\n pyautogui.typewrite(['enter'])\r\n time.sleep(2)\r\n\r\n # If steam guard detected on the account\r\n sguard_found = pyautogui.locateOnScreen('res/sguard.png')\r\n if sguard_found is not None:\r\n print('This account seems to be running on an authorization.')\r\n time.sleep(0.5)\r\n print('Searching for any available backup codes in PC..')\r\n time.sleep(0.5)\r\n # We used 'READ FILE' method to retrieve our steam codes\r\n # If exists, continue, else terminate with 'No backup codes found'\r\n try:\r\n\r\n x, y = 1068, 588 # Get coordinates of steam guard input\r\n code_path = 'C:\\\\Users\\\\admin\\\\Desktop\\\\codes.txt'\r\n\r\n with open(code_path, 'r') as f:\r\n print(code_path.split(\"\\\\\")[-1] + \" containing backup codes was found!\")\r\n codelist = []\r\n for line in f:\r\n codes = line.split('\\n')\r\n codelist.append(codes[0])\r\n print(\"File is holding \" + str(len(codelist)) + \" backup codes.\")\r\n getCode = codelist[0] # Get the first code of the list\r\n pyautogui.click(x, y, clicks=1, button='left')\r\n pyautogui.typewrite(getCode)\r\n pyautogui.typewrite(['enter'])\r\n codelist.pop(0) # Removing the code since it's gonna be used\r\n\r\n # Updating codelist by re-writing the file\r\n codes = \"\"\r\n for code in codelist:\r\n codes += code + \"\\n\"\r\n with open(code_path, 'w') as d:\r\n d.write(codes)\r\n time.sleep(0.5)\r\n print('Done..! Logging-in.')\r\n\r\n except FileNotFoundError:\r\n print('Could not find any files with steam guard codes.')\r\n\r\n else:\r\n print('Account does not require any subsequent authentication.')\r\n time.sleep(0.5)\r\n print('Done..! Logging-in.')\r\n\r\n # End\r\n break\r\n\r\nexcept FileNotFoundError:\r\n credentials = [\"\",\"\",\"\"]\r\n print(\"No active accounts found. Make sure you have at least 1 account setup as active.\")\r\n\r\n# End\r\nsys.exit(0)\r\n","sub_path":"Steam Automation/steam_automation.py","file_name":"steam_automation.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"353122269","text":"'''K-Messed Array Sort\nGiven an array of integers arr where each element is at most k places away from its sorted position, code an efficient function sortKMessedArray that sorts arr. For instance, for an input array of size 10 and k = 2, an element belonging to index 6 in the sorted array will be located at either index 4, 5, 6, 7 or 8 in the input array.\n\nAnalyze the time and space complexities of your solution.\n\nExample:\n\ninput: arr = [1, 4, 5, 2, 3, 7, 8, 6, 10, 9], k = 2\n\noutput: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nConstraints:\n\n[time limit] 5000ms\n\n[input] array.integer arr\n\n1 ≤ arr.length ≤ 100\n[input] integer k\n\n1 ≤ k ≤ 20\n[output] array.integer'''\n\nimport heapq\n\ndef sort_k_messed_array(arr, k):\n heap = []\n for i in range(len(arr)):\n if len(heap) < k + 1:\n heapq.heappush(heap, arr[i])\n if len(heap) == k + 1:\n arr[i - k] = heapq.heappop(heap)\n\n for i in range(len(arr) - k, len(arr)):\n arr[i] = heapq.heappop(heap)\n\n return arr","sub_path":"Pramp/sort_k_messed_array.py","file_name":"sort_k_messed_array.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"519807211","text":"from flask import Blueprint, render_template,request,redirect,url_for,flash\nfrom models.user import User\nfrom models.image import Image\nfrom flask_login import current_user, login_required\nfrom instagram_web.util.helpers import upload_file_to_s3,allowed_file\nfrom werkzeug.utils import secure_filename\nimport datetime\n\nimages_blueprint = Blueprint('images',\n __name__,\n template_folder='templates')\n\n\n# @images_blueprint.route('/new', methods=['GET'])\n# def new():\n# return render_template('images/new.html')\n\n\n@images_blueprint.route('/', methods=['POST'])\ndef create():\n \n if 'image_file' not in request.files:\n flash('No image_file key in request.files')\n return render_template(\"home.html\")\n\n file = request.files['image_file']\n\n if file.filename == '':\n flash('No selected file')\n return render_template(\"home.html\")\n\n if file and allowed_file(file.filename):\n file.filename = secure_filename(f\"{str(datetime.datetime.now())}{file.filename}\")\n output = upload_file_to_s3(file) \n if output:\n image = Image(user=current_user.id,image_path=file.filename,caption=request.form.get(\"caption\"))\n image.save()\n flash(\"Image successfully uploaded\",\"success\")\n return redirect(url_for('users.show',username=current_user.username))\n else:\n flash(output,\"danger\")\n return render_template(\"home.html\")\n\n else:\n flash(\"File type not accepted,please try again.\")\n return render_template(\"home.html\")\n","sub_path":"instagram_web/blueprints/images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"547880265","text":"#!/usr/bin/python\n#\n# Copyright (C) 2014 eNovance SAS \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\nfrom mock import patch\nimport export_issues\nimport redmine\n\n\nclass TestIssueImporter(unittest.TestCase):\n\n def setup(self):\n pass\n\n def test_get_config_value(self):\n pid = export_issues.get_config_value('REDMINE', 'name')\n self.assertIsNotNone(pid)\n\n def test_get_config_value_wrong_option(self):\n pid = export_issues.get_config_value('REDMINE', 'abc')\n self.assertIsNone(pid)\n\n def test_get_config_value_wrong_section(self):\n pid = export_issues.get_config_value('xyz', 'id')\n self.assertIsNone(pid)\n\n @patch.object(redmine.managers.ResourceManager, 'create')\n def test_main(self, mock_create):\n assert redmine.managers.ResourceManager.create is mock_create\n mock_create.return_value = None\n try:\n export_issues.main()\n except:\n self.fail(\"Exception thrown\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tools/issues_export_tool/test_export_issues.py","file_name":"test_export_issues.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"312633901","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# coding=utf-8 \n\n\"\"\"\n@author: Li Tian\n@contact: 694317828@qq.com\n@software: pycharm\n@file: autoencoder_11.py\n@time: 2019/7/2 10:02\n@desc: 去噪自动编码器:使用高斯噪音\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport sys\n\n\nn_inputs = 28 * 28\nn_hidden1 = 300\nn_hidden2 = 150 # codings\nn_hidden3 = n_hidden1\nn_outputs = n_inputs\n\nlearning_rate = 0.01\n\nnoise_level = 1.0\n\nX = tf.placeholder(tf.float32, shape=[None, n_inputs])\nX_noisy = X + noise_level * tf.random_normal(tf.shape(X))\n\nhidden1 = tf.layers.dense(X_noisy, n_hidden1, activation=tf.nn.relu,\n name=\"hidden1\")\nhidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, # not shown in the book\n name=\"hidden2\") # not shown\nhidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, # not shown\n name=\"hidden3\") # not shown\noutputs = tf.layers.dense(hidden3, n_outputs, name=\"outputs\") # not shown\n\nreconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntraining_op = optimizer.minimize(reconstruction_loss)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nn_epochs = 10\nbatch_size = 150\nmnist = input_data.read_data_sets('D:/Python3Space/BookStudy/book2/MNIST_data/')\n\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n n_batches = mnist.train.num_examples // batch_size\n for iteration in range(n_batches):\n print(\"\\r{}%\".format(100 * iteration // n_batches), end=\"\")\n sys.stdout.flush()\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n sess.run(training_op, feed_dict={X: X_batch})\n loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})\n print(\"\\r{}\".format(epoch), \"Train MSE:\", loss_train)\n saver.save(sess, \"D:/Python3Space/BookStudy/book4/model/my_model_stacked_denoising_gaussian.ckpt\")\n\n","sub_path":"1-books/book4_机器学习实战——基于Scikit-Learn和TensorFlow/c15/autoencoder_11.py","file_name":"autoencoder_11.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"114959087","text":"# 使用 pySerial 套件\n# 需 pip install pyserial\n\n'''\nimport serial\n\nser = serial.Serial()\nser.baudrate = 115200 #波特率\nser.port = 'COM1' #com口\nser.stopbits=1 #停止位 1 1.5 2\nser.bytesize=8 #資料位\nser.parity='N' #奇偶位 N沒有 E偶校驗 O奇校驗\nser.timeout=5 #超時時間\n\nser.open() #連線失敗會丟擲錯誤\n\nser.write('s\\n'.encode()) #傳送資訊\n\nresult=ser.readline().decode() #接收資訊\n\nprint(result)\n\n\n\n#使用 pySerial 套件\nimport serial\nser = serial.Serial('COM6', baudrate=115200, bytesize=8, parity='N', stopbits=1)\n\n#ser.write(xxxxxx)\n\nresp = ser.readline()\n'''\n\nimport serial # 引用pySerial模組\n\nCOM_PORT = 'COM4' # 指定通訊埠名稱\nBAUD_RATES = 115200 # 設定傳輸速率\nser = serial.Serial(COM_PORT, BAUD_RATES) # 初始化序列通訊埠\n\ntry:\n while True:\n while ser.in_waiting: # 若收到序列資料…\n data_raw = ser.readline() # 讀取一行\n #data = data_raw.decode() # 用預設的UTF-8解碼\n print('接收到的原始資料:', data_raw)\n #print('接收到的資料:', data)\n\nexcept KeyboardInterrupt:\n ser.close() # 清除序列通訊物件\n print('再見!')\n\n#按 ctrl + C 離開\n","sub_path":"_4.python/import_module/module_PySerial.py","file_name":"module_PySerial.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"319782416","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/insights/parsers/tests/test_grubby.py\n# Compiled at: 2019-05-16 13:41:33\nimport pytest, doctest\nfrom insights.parsers import grubby\nfrom insights.parsers.grubby import GrubbyDefaultIndex, GrubbyDefaultKernel\nfrom insights.tests import context_wrap\nfrom insights.parsers import SkipException, ParseException\nDEFAULT_INDEX_1 = '0'\nDEFAULT_INDEX_2 = '1'\nABDEFAULT_INDEX_EMPTY = ''\nDEFAULT_INDEX_AB = '-2'\nDEFAULT_KERNEL = '/boot/vmlinuz-2.6.32-573.el6.x86_64'\nDEFAULT_KERNEL_EMPTY = ''\nDEFAULT_KERNEL_AB = ('\\n/boot/vmlinuz-2.6.32-573.el6.x86_64\"\\n/boot/vmlinuz-2.6.32-573.el6.x86_64\"\\n').strip()\n\ndef test_grubby_default_index():\n res = GrubbyDefaultIndex(context_wrap(DEFAULT_INDEX_1))\n assert res.default_index == 0\n res = GrubbyDefaultIndex(context_wrap(DEFAULT_INDEX_2))\n assert res.default_index == 1\n\n\ndef test_grubby_default_index_ab():\n with pytest.raises(SkipException) as (excinfo):\n GrubbyDefaultIndex(context_wrap(ABDEFAULT_INDEX_EMPTY))\n assert 'Empty output' in str(excinfo.value)\n with pytest.raises(ParseException) as (excinfo):\n GrubbyDefaultIndex(context_wrap(DEFAULT_INDEX_AB))\n assert 'Invalid output:' in str(excinfo.value)\n\n\ndef test_grubby_default_kernel_ab():\n with pytest.raises(SkipException) as (excinfo):\n GrubbyDefaultKernel(context_wrap(DEFAULT_KERNEL_EMPTY))\n assert 'Empty output' in str(excinfo.value)\n with pytest.raises(ParseException) as (excinfo):\n GrubbyDefaultKernel(context_wrap(DEFAULT_KERNEL_AB))\n assert 'Invalid output:' in str(excinfo.value)\n\n\ndef test_grubby_default_kernel():\n res = GrubbyDefaultKernel(context_wrap(DEFAULT_KERNEL))\n assert res.default_kernel == DEFAULT_KERNEL\n\n\ndef test_doc_examples():\n env = {'grubby_default_index': GrubbyDefaultIndex(context_wrap(DEFAULT_INDEX_1)), \n 'grubby_default_kernel': GrubbyDefaultKernel(context_wrap(DEFAULT_KERNEL))}\n failed, total = doctest.testmod(grubby, globs=env)\n assert failed == 0","sub_path":"pycfiles/insights_core-3.0.163-py2.7/test_grubby.py","file_name":"test_grubby.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"437642673","text":"from renderers.games import GameRenderer\nfrom rgbmatrix import RGBMatrix, RGBMatrixOptions\nfrom utils import args, led_matrix_options\nfrom data.scoreboard_config import ScoreboardConfig\nimport renderers.standings\nimport renderers.offday\nimport datetime\nimport mlbgame\nimport debug\n\n# Get supplied command line arguments\nargs = args()\n\n# Check for led configuration arguments\nmatrixOptions = led_matrix_options(args)\n\n# Initialize the matrix\nmatrix = RGBMatrix(options = matrixOptions)\ncanvas = matrix.CreateFrameCanvas()\n\n# Read scoreboard options from config.json if it exists\nconfig = ScoreboardConfig(\"config.json\")\ndebug.set_debug_status(config)\n\n# Render the current standings or today's games depending on\n# the provided arguments\nnow = datetime.datetime.now()\nyear = now.year\nmonth = now.month\nday = now.day\n\nif config.display_standings:\n standings = mlbgame.standings(datetime.datetime(year, month, day))\n division = next(division for division in standings.divisions if division.name == config.preferred_division)\n renderers.standings.render(matrix, matrix.CreateFrameCanvas(), division)\nelse:\n while True:\n games = mlbgame.day(year, month, day)\n if not len(games):\n renderers.offday.render(matrix, matrix.CreateFrameCanvas())\n else:\n GameRenderer(matrix, matrix.CreateFrameCanvas(), games, config).render()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"79665558","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n # solution 1\n # nums = []\n # while head:\n # nums.append(head.val)\n # head = head.next\n #\n # if len(nums) < 2:\n # return True\n #\n # i, j = 0, len(nums) - 1\n # while i < j:\n # if nums[i] != nums[j]:\n # return False\n # i, j = i + 1, j - 1\n # return True\n\n # solution 2\n def reverse(h: ListNode) -> ListNode:\n prev = None\n while h:\n next = h.next\n h.next = prev\n prev, h = h, next\n return prev\n\n fast, slow = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n slow = slow.next if fast else slow\n\n p, q = head, reverse(slow)\n while q:\n if p.val != q.val:\n return False\n p, q = p.next, q.next\n\n return True\n\n\ndef test_solution():\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(1)\n assert Solution().isPalindrome(head)\n\n head.next.next = ListNode(3)\n assert not Solution().isPalindrome(head)\n\n head.next.next = ListNode(2)\n head.next.next.next = ListNode(1)\n assert Solution().isPalindrome(head)\n\n\n\n","sub_path":"primary/link_list/234_palindrome_linked_list.py","file_name":"234_palindrome_linked_list.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"366588520","text":"import numpy as np\n\ndef naive(limit, mults):\n\t\"\"\"Sum of all numbers less than limit that are multiples of [mults]\"\"\"\n\ttotal = 0\n\tfor i in range(2, limit):\n\t\tfor mult in mults:\n\t\t\tif i % mult == 0:\n\t\t\t\ttotal += i\n\t\t\t\tbreak\n\treturn total\n\n\ndef faster(limit, mults):\n # Use the fact that sum of multiples of x below n can be written as\n # x * (1 + 2 + ... + floor(n/x)). With more than one multiple, though, we\n # need to remove cross products for double counting (i.e. 3 * 5 if using 3 and 5),\n # and with more than two multiples we need to add back 3-way multiples, etc.\n # The below uses binary representation to get all cross-products of multiples\n # then uses the sequential sum shortcut to add/subtract from the total.\n \n num_mults = len(mults)\n \n def get_combos(mults):\n # Get binary numbers to represent all combos of elements in mults\n bins = ['{:0{}b}'.format(i, num_mults) for i in range(1, (2 ** num_mults))]\n \n # Create lists for all combos. Also keep track of whether we are using\n # the combo to add or subtract from our total. Combos with odd number\n # of base multipliers add ot the total, even subtracts.\n combos = []\n signs = []\n for b in bins:\n use = [mults[i] for i in range(len(b)) if b[i] == '1']\n combos.append(np.product(use))\n signs.append(((-1)** (b.count('1') + 1)))\n return combos, signs\n \n combos, signs = get_combos(mults)\n total = 0\n \n for i in range(len(combos)):\n upper = np.floor(float(limit - 1) / combos[i])\n seq_sum = (upper * (upper + 1)) / 2\n total += signs[i] * (combos[i] * seq_sum)\n \n return total\n \n \n \n\nif __name__ == \"__main__\":\n\tprint(faster(1000, [3,5]))\n ","sub_path":"P001.py","file_name":"P001.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"465809239","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns(\n 'apps.mails.dashboard.views',\n url(r'^$', 'mailbox', name='dashboard_mailbox'),\n url(r'^/(?P[0-9]+)$', 'mail_view', name='dashboard_mail_view'),\n url(r'^/(?P[0-9]+)/reply$', 'mail_reply', name='dashboard_mail_reply'),\n url(r'^/new_mail_lady/(?P[0-9]+)$', 'new_mail_lady', name='dashboard_new_mail_lady'),\n url(r'^/mails_loader$', 'mails_loader', name='dashboard_mails_loader'),\n url(r'^/mails_lady_loader$', 'mails_lady_loader', name='dashboard_mails_lady_loader'),\n url(r'^/mails_last_loader$', 'mails_last_loader', name='dashboard_mails_last_loader'),\n url(r'^/history$', 'mails_history', name='dashboard_mails_history'),\n)\n","sub_path":"apps/mails/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"147379348","text":"import pandas as pd\nimport sys, json, numpy as np\nimport csv\nimport math\n\n\ndef main():\n lines = sys.stdin.readlines()\n lines = json.loads(lines[0])\n\n shape = \"empty\"\n null_val = \"empty\"\n colName = \"empty\"\n dtypes = \"empty\"\n count = \"empty\"\n categorical = \"empty\"\n numerical = \"empty\"\n mean = \"empty\"\n median = \"empty\"\n minimum = \"empty\"\n maximum = \"empty\"\n std = \"empty\"\n quant25 = \"empty\"\n quant50 = \"empty\"\n quant75 = \"empty\"\n skewness = \"empty\"\n unique = \"empty\"\n uniqueValues = \"empty\"\n top = \"empty\"\n freq = \"empty\"\n\n try:\n try:\n df = pd.read_csv(lines)\n df = df.dropna(how=\"all\", axis=\"columns\")\n df.to_csv(lines, index=False)\n df = pd.read_csv(lines)\n\n colName = []\n dtypes = []\n shape = df.shape\n\n for x in df.dtypes.iteritems():\n colName.append(x[0])\n dtypes.append(str(x[1]))\n\n count = []\n for x in df.count():\n count.append(x)\n\n categorical = []\n numerical = []\n\n for i in range(df.shape[1]):\n if dtypes[i] != \"object\":\n numerical.append(colName[i])\n else:\n categorical.append(colName[i])\n\n if len(numerical) == 0:\n numerical = \"empty\"\n else:\n mean = []\n for x in df.mean():\n if math.isnan(x):\n mean.append(0.0)\n else:\n mean.append(round(x, 4))\n\n median = []\n for x in df.median():\n if math.isnan(x):\n median.append(0.0)\n else:\n median.append(round(x, 4))\n\n minimum = []\n for x in df[numerical].min():\n if math.isnan(x):\n minimum.append(0.0)\n else:\n minimum.append(round(x, 4))\n\n maximum = []\n for x in df[numerical].max():\n if math.isnan(x):\n maximum.append(0.0)\n else:\n maximum.append(round(x, 4))\n\n std = []\n for x in df.std():\n if math.isnan(x):\n std.append(0.0)\n else:\n std.append(round(x, 4))\n\n df.quantile(q=0.25)\n quant25 = []\n for x in df.quantile(q=0.25):\n if math.isnan(x):\n quant25.append(0.0)\n else:\n quant25.append(round(x, 4))\n\n quant50 = []\n for x in df.quantile(q=0.5):\n if math.isnan(x):\n quant50.append(0.0)\n else:\n quant50.append(round(x, 4))\n\n quant75 = []\n for x in df.quantile(q=0.75):\n if math.isnan(x):\n quant75.append(0.0)\n else:\n quant75.append(round(x, 4))\n skewness = []\n for i in numerical:\n skewness.append(round(df[i].skew(), 3))\n\n if len(categorical) == 0:\n categorical = \"empty\"\n else:\n cat = df.describe(include=[object]).values\n unique = []\n uniqueValues = []\n\n for x in cat[1, :]:\n unique.append(x)\n\n for i in categorical:\n unlist = []\n for j in df[i].unique().tolist():\n if pd.isna(j):\n unlist.append(\"NAN\")\n else:\n unlist.append(j)\n\n uniqueValues.append(unlist)\n\n top = []\n for x in cat[2, :]:\n top.append(x)\n\n freq = []\n for x in cat[3, :]:\n freq.append(x.item())\n\n df = df.replace(r\"^\\s*$\", np.NaN, regex=True)\n df = df.replace(r\"NA\", np.NaN, regex=True)\n null_val = []\n for x in df.isnull().sum():\n null_val.append(x)\n\n if len(null_val) == 0:\n null_val = \"empty\"\n\n except:\n x = 1\n\n output = {\n \"shape\": shape,\n \"columns\": colName,\n \"dtype\": dtypes,\n \"count\": count,\n \"null_val\": null_val,\n \"numerical\": numerical,\n \"mean\": mean,\n \"median\": median,\n \"minimum\": minimum,\n \"maximum\": maximum,\n \"std\": std,\n \"quant25\": quant25,\n \"quant50\": quant50,\n \"quant75\": quant75,\n \"skewness\": skewness,\n \"categorical\": categorical,\n \"unique\": unique,\n \"unique_val\": uniqueValues,\n \"top\": top,\n \"freq\": freq,\n }\n\n except:\n output = {\"error\": \"No Data Parsed\"}\n\n output = json.dumps(output)\n print(output)\n\nif __name__ == \"__main__\":\n main()","sub_path":"pyScript/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"136086794","text":"import flask_login\nfrom pprint import pprint\n\nfrom flask import Flask, render_template, session\ndef replace_table_sql(sql,new_name_table,table,sql_table):\n sql = sql.replace(table,new_name_table)\n sql = sql.replace(\"FROM \"+new_name_table,\"FROM ({0}) as {1}\".format(sql_table,new_name_table))\n return sql\n\ndef SQL_QUERY_MUTATOR(sql, username, security_manager,database):\n if str(database) == \"Agenda\":\n if not security_manager.current_user:\n raise EnvironmentError(\"User is required to be authenticated\")\n sql = replace_table_sql(sql,\"agenda_bi_temp\",\"public.agenda_bi\",\"select * from public.agenda_bi where emp_id in (select emp_id from usuario_empresa where usu_id = (select usuario.id from usuario where email ilike '{0}'))\".format(security_manager.current_user.email))\n print(database)\n print(\"MUTATE\",sql)\n return sql","sub_path":"local_app/mutator.py","file_name":"mutator.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"499117379","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 29 10:48:19 2018\r\n\r\n@author: cindy\r\n\"\"\"\r\nimport names as names\r\nFOV=['2.8','5.0']\r\nOCCULTER=['On','Off']\r\nND_FILTER=['Open','2ND','4ND']\r\nMOD_TYPE=[names.DISCRETE,names.CONTINIOUS]\r\nMOD_POS=['0','1','45','90','180','225','270']\r\nMOD_STATE=[names.ON,names.OFF,names.OUT]\r\nSP_SLIT=['120','233(Single)']\r\n\r\nOCCULTER_FOV_28 = [1.054,1.064]\r\nOCCULTER_FOV_5=[1.080,1.091]\r\nDEFAULT_R_28=1.059\r\nDEFAULT_R_5=1.086\r\n\r\nDEFAULT_R=1.5","sub_path":"limits.py","file_name":"limits.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"477175151","text":"from pytac.exceptions import PvException\nimport pytac.device\nimport pytest\nimport mock\n\n\nSP_PV = 'SR01A-PC-SQUAD-01:SETI'\nRB_PV = 'SR01A-PC-SQUAD-01:I'\nENABLE_PV = 'SR01C-DI-EBPM-01:CF:ENABLED_S'\nENABLED_VALUE = '1.0'\n\n@pytest.fixture\ndef create_device(readback=RB_PV, setpoint=SP_PV, _enable_pv=ENABLE_PV, _enabled_value=ENABLED_VALUE):\n _rb = readback\n _sp = setpoint\n mock_cs = mock.MagicMock()\n mock_cs.get.return_value = '1.0'\n if _enable_pv and _enabled_value:\n pve = pytac.device.PvEnabler(_enable_pv, _enabled_value, mock_cs)\n device = pytac.device.Device(cs=mock.MagicMock(), enabled=pve, rb_pv=_rb, sp_pv=_sp)\n else:\n device = pytac.device.Device(cs=mock.MagicMock(), enabled=True, rb_pv=_rb, sp_pv=_sp)\n return device\n\n\ndef test_set_device_value(create_device):\n create_device.put_value(40)\n create_device._cs.put.assert_called_with(SP_PV, 40)\n\n\ndef test_device_invalid_sp_raise_exception():\n device2 = create_device(RB_PV, None)\n with pytest.raises(PvException):\n device2.put_value(40)\n with pytest.raises(PvException):\n create_device(None, None)\n\n\ndef test_get_device_value(create_device):\n with pytest.raises(PvException):\n create_device.get_value('non_existent')\n\n\ndef test_is_enabled_empty_string():\n device = create_device(_enabled_value='')\n assert device.is_enabled()\n\ndef test_is_enabled(create_device):\n assert create_device.is_enabled()\n\n\ndef test_is_disabled():\n device = create_device(_enabled_value='3')\n assert not device.is_enabled()\n\n\ndef test_PvEnabler():\n mock_cs = mock.MagicMock()\n mock_cs.get.return_value = '40'\n pve = pytac.device.PvEnabler('enable-pv', '40', mock_cs)\n assert pve\n\n mock_cs.get.return_value = 50\n assert not pve\n","sub_path":"test/test_device.py","file_name":"test_device.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"50209413","text":"import tensorflow as tf\r\nimport matplotlib as plt\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n\r\nx = tf.placeholder(tf.float32, [None, 784])\r\n\r\nW0 = tf.Variable(tf.random_normal([784, 200]))\r\nb0 = tf.Variable(tf.random_normal([200]))\r\nhidden0 = tf.nn.relu(tf.matmul(x, W0) + b0)\r\n\r\nW1 = tf.Variable(tf.random_normal([200, 200]))\r\nb1 = tf.Variable(tf.random_normal([200]))\r\nhidden1 = tf.nn.relu(tf.matmul(hidden0, W1) + b1)\r\n\r\nW2 = tf.Variable(tf.random_normal([200, 200]))\r\nb2 = tf.Variable(tf.random_normal([200]))\r\nhidden2 = tf.nn.relu(tf.matmul(hidden1, W2) + b2)\r\n\r\nW3 = tf.Variable(tf.random_normal([200, 10]))\r\nb3 = tf.Variable(tf.random_normal([10]))\r\ny = tf.matmul(hidden2, W3) + b3\r\n\r\ny_ = tf.placeholder(tf.float32, [None, 10])\r\n\r\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\r\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)\r\nwith tf.Session() as session:\r\n session.run(tf.global_variables_initializer())\r\n for _ in range(10000):\r\n batch_xs, batch_ys = mnist.train.next_batch(128)\r\n session.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\r\n if _ % 1000 == 0:\r\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n print(_, session.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\r\n\r\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n print('final:', session.run(accuracy, feed_dict={x: mnist.test.images,\r\n y_: mnist.test.labels}))\r\n\r\n'''graf = plt.figure()\r\nplt.plot(x, y)\r\nplt.title('График функции')\r\nplt.ylabel('Ось Y')\r\nplt.xlabel('Ось X')\r\nplt.grid(True)\r\nplt.show()'''","sub_path":"шлак/FFNN_hidden.py","file_name":"FFNN_hidden.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"574871190","text":"\narrayLength = int(input(\"Enter the length of the array: \"))\n\nnumbers = [0] * arrayLength\n\nfor i in range(arrayLength):\n numbers[i] = 1\n\nfor i in range(arrayLength):\n print(\"numbers[\" + str(i) + \"] = \" + str(numbers[i]))\n","sub_path":"lab6/lab6-assets/fill_array.py","file_name":"fill_array.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"499352750","text":"\"\"\" rpi-2.2TFT-kbrd.py by ukonline2000 2015.12.08\nGPIO Keyboard driver for Raspberry Pi 2.2TFT for use with 5 Buttons\nrequires uinput kernel module (sudo modprobe uinput)\nrequires python-uinput (git clone https://github.com/tuomasjjrasanen/python-uinput)\nrequires python RPi.GPIO (from http://pypi.python.org/pypi/RPi.GPIO/)\n\nSteps:\n\n1.Install the python lib\n$sudo apt-get update\n$sudo apt-get install libudev-dev\n$sudo apt-get install python-pip\n$sudo pip install rpi.gpio\n$sudo pip install python-uinput\n\n2. Perform the command\n$sudo modprobe uinput\n\n3. Perform the demo python program\n$sudo python rpi-2.2TFT-kbrd.py\n\n\"\"\"\n\n\n\nimport uinput\nimport time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n# Up, Down, left, right, fire\nGPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP) #Trigon Button for GPIO24\nGPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP) #X Button for GPIO5\nGPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP) #Circle Button for GPIO23\nGPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP) #Square Button for GPIO22\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP) #R Button for GPIO4\n#L Button for GPIO17\n\nevents = (uinput.KEY_UP, uinput.KEY_DOWN, uinput.KEY_LEFT, uinput.KEY_RIGHT, uinput.KEY_LEFTCTRL)\n\ndevice = uinput.Device(events)\n\nfire = False\nup = False\ndown = False\nleft = False\nright = False\n\nwhile True:\n if (not fire) and (not GPIO.input(4)): # Fire button pressed\n fire = True\n device.emit(uinput.KEY_LEFTCTRL, 1) # Press Left Ctrl key\n if fire and GPIO.input(4): # Fire button released\n fire = False\n device.emit(uinput.KEY_LEFTCTRL, 0) # Release Left Ctrl key\n if (not up) and (not GPIO.input(24)): # Up button pressed\n up = True\n device.emit(uinput.KEY_UP, 1) # Press Up key\n if up and GPIO.input(24): # Up button released\n up = False\n device.emit(uinput.KEY_UP, 0) # Release Up key\n if (not down) and (not GPIO.input(5)): # Down button pressed\n down = True\n device.emit(uinput.KEY_DOWN, 1) # Press Down key\n if down and GPIO.input(5): # Down button released\n down = False\n device.emit(uinput.KEY_DOWN, 0) # Release Down key\n if (not left) and (not GPIO.input(23)): # Left button pressed\n left = True\n device.emit(uinput.KEY_LEFT, 1) # Press Left key\n if left and GPIO.input(23): # Left button released\n left = False\n device.emit(uinput.KEY_LEFT, 0) # Release Left key\n if (not right) and (not GPIO.input(22)): # Right button pressed\n right = True\n device.emit(uinput.KEY_RIGHT, 1) # Press Right key\n if right and GPIO.input(22): # Right button released\n right = False\n device.emit(uinput.KEY_RIGHT, 0) # Release Right key\n time.sleep(.04)\n","sub_path":"src_files_py/rpi-2.2TFT-kbrd.py","file_name":"rpi-2.2TFT-kbrd.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"587745686","text":"t = int(input())\nfor i in range(t):\n\tc = 0\n\t(n, a, b, k) = map(int, input().split())\n\tfor j in range(1, n+1):\n\t\tif (j%a == 0 and j%b != 0) or (j%a != 0 and j%b == 0):\n\t\t\tc += 1\n\tif c >= k:\n\t\tprint(\"Win\")\n\telse:\n\t\tprint(\"Lose\")","sub_path":"cc_FebChallenge_1.py","file_name":"cc_FebChallenge_1.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"229168860","text":"f = open(\"tundra.urdf\", \"w\")\n\ndef write(str):\n f.write(str)\n f.write('\\n')\n\n\nwrite('')\nfor i in range(0, 8):\n write(' ')\nwrite('')\nfor i in range(0, 7):\n write(' ')\n write(' ')\n write(' ')\n write(' ')\n write(' ')\n write('')\nwrite(' ')\n\nf.close()\n \n\n\n","sub_path":"links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"443050706","text":"from interface import common_interface,teacher_interface\nfrom lib import common\n\nteacher_info = {'user':None}\n\ndef teacher_login():\n while True:\n username = input('请输入用户名>>>').strip()\n password = input('请输入密码>>>').strip()\n flag, msg = common_interface.login_interface(username, password,user_type='teacher')\n print(msg)\n if flag:\n teacher_info['user'] = username\n break\n\n@common.login_auth('teacher')\ndef check_course():\n course_list = teacher_interface.check_course_interface(teacher_info.get('user'))\n print(course_list)\n\n@common.login_auth('teacher')\ndef choose_course():\n while True:\n course_list = common_interface.get_course_interface()\n\n if not course_list:\n print('暂无课程')\n break\n\n for course in course_list:\n print(course)\n\n choice = input('请选择教授课程>>>').strip()\n if choice in course_list:\n course_name = choice\n flag,msg = teacher_interface.choose_course_interface(teacher_info.get('user'),course_name)\n print(msg)\n if flag:\n break\n else:\n print('请选择存在的课程')\n\n@common.login_auth('teacher')\ndef check_student():\n while True:\n course_list = teacher_interface.check_course_interface(teacher_info.get('user'))\n if not course_list:\n print('暂无课程')\n break\n for course in course_list:\n print(course)\n\n choice = input('请选择你要查看的课程>>>').strip()\n if choice in course_list:\n course_name = choice\n flag,msg = teacher_interface.check_student_interface(teacher_info.get('user'),course_name)\n print(msg)\n if flag:\n break\n break\n else:\n print('请选择存在的课程')\n\n@common.login_auth('teacher')\ndef modify_score():\n while True:\n course_list = teacher_interface.check_course_interface(teacher_info.get('user'))\n if not course_list:\n print('暂无课程')\n break\n for course in course_list:\n print(course)\n course_choice = input('请选择课程>>>').strip()\n if course_choice in course_list:\n course_name = course_choice\n course = teacher_interface.get_course_interface(course_name)\n student_list = course.student_list\n if not student_list:\n print('暂无学生')\n break\n for student in student_list:\n print(student)\n student_choice = input('请选择学生>>>').strip()\n if student_choice in student_list:\n student_name = student_choice\n score = input('请输入分数>>>').strip()\n flag,msg = teacher_interface.change_score_interface(teacher_info.get('user'),course_name,student_name,score)\n print(msg)\n if flag:\n break\n\nfunc_dic = {\n '1':teacher_login,\n '2':check_course,\n '3':choose_course,\n '4':check_student,\n '5':modify_score,\n}\n\ndef teacher_view():\n while True:\n print('''\n 1、登录\n 2、查看教授课程\n 3、选择教授课程\n 4、查看课程下学生\n 5、修改学生成绩 \n ''')\n choice = input('请输入你想要的功能,按q退出>>>').strip()\n if choice == 'q':\n break\n if choice not in func_dic:\n continue\n func_dic[choice]()","sub_path":"core/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"387860339","text":"import sys\nimport os\nimport cv2\n\nfolders = sys.argv[1:]\n\nfor folder in folders:\n output_directory = folder + '_cent'\n if not os.path.isdir(output_directory+'/'):\n os.mkdir(output_directory)\n for image in os.listdir(folder):\n img = cv2.imread(folder + '/' + image, cv2.IMREAD_UNCHANGED)\n cent_x = len(img)//2\n cent_y = len(img[0])//2\n cent_cut = img[cent_x-128:cent_x+128,cent_y-128:cent_y+128]\n\n cv2.imwrite(output_directory + '/' + image, cent_cut)\n","sub_path":"extract_center.py","file_name":"extract_center.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"56698823","text":"from maperipy import App\n\nMinLon=34\nMaxLon=36\n\nMinLat=29\nMaxLat=34\n\nApp.run_command('set-dem-source ASTER')\n\nfor Lon in range (MinLon, MaxLon, 1) :\n for Lat in range (MinLat, MaxLat, 1) :\n App.run_command('generate-contours interval=10 bounds='+str(Lon)+','+str(Lat)+ ',' +str(Lon+1)+','+str(Lat+1))\n App.run_command('save-source ASTER-'+str(Lon)+'-'+str(Lat)+'.contours')\n\nApp.run_command('save-map-script file=Scripts\\Contours.mscript')\n","sub_path":"Scripts/Maperipy/GenASTERContours.py","file_name":"GenASTERContours.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"419688280","text":"# -*- coding:utf-8 -*-\nimport websocket\nimport threading\nimport time\n\n# 動作検証用の乱数生成にのみ使用\nimport random\n\ndef on_message(ws, message):\n # 変数への変更をスレッド間で共有したいのでglobal化\n # Read Onlyで良い場合には不要\n global msg\n with msgLock:\n msg = message\n\n with printLock:\n print(\"on_message: \" + message)\n\ndef on_error(ws, error):\n with printLock:\n print(\"ERROR: \" + error)\n\ndef on_close(ws):\n with printLock:\n print(\"CONNECTION CLOSED\")\n\ndef on_open(ws):\n thread = threading.Thread(target=sendLoop, args=(ws,))\n thread.setDaemon(True)\n thread.start()\n\ndef sendLoop(ws):\n # 関数ローカルの変数はlocalStorage以外はスレッド間で共有される\n localStorage.counter = 0\n while True:\n # このあたりで適当にセンサ情報を取得するなど\n\n with lock:\n ws.send(\"From Python {}\".format(localStorage.counter))\n\n # global化した変数の挙動確認のためにランダム時間でsleep\n time.sleep(random.random())\n\n # on_message()で受け取ったデータが共有されていることを確認\n with printLock:\n print(\"sendLoop: \" + msg)\n\n localStorage.counter += 1\n\n\nlock = threading.Lock()\nprintLock = threading.Lock()\nmsgLock = threading.Lock()\n\n# sendLoop()を呼び出すスレッドがひとつのみの場合は不要\nlocalStorage = threading.local()\n\nmsg = \"\"\n\n# 通信の様子を詳しく見たい場合には利用\n#websocket.enableTrace(True)\n\nws = websocket.WebSocketApp(\"ws://localhost:5001\", on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close)\nws.run_forever()\n\n\n","sub_path":"echo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"395440761","text":"class Move:\n\n def __init__(self, piece, newPos, pieceToCapture=None):\n self.check = False\n self.checkMate = False\n self.staleMate = False\n self.kingSideCastle = False\n self.queenSideCastle = False\n self.castle = False\n self.suicide = False\n self.cripple = False\n self.northPiece = None\n self.southPiece = None\n self.eastPiece = None\n self.westPiece = None\n self.whip = False\n self.waitTimeDecrease = False\n self.piece = piece\n self.oldPos = piece.position\n self.newPos = newPos\n self.specialPos = None\n self.pieceToCapture = pieceToCapture\n self.specialMovePiece = None\n self.rookMove = None\n\n self.notation = self.getNotation()\n\n def getNotation(self):\n notation = ''\n\n if self.queenSideCastle:\n return '0-0-0'\n\n if self.kingSideCastle:\n return '0-0'\n\n newPosNotation = self.positionToHumanCoord(self.newPos if not \\\n self.specialPos else self.specialPos)\n oldPosNotation = self.positionToHumanCoord(self.oldPos)\n captureNotation = 'x' if self.pieceToCapture else ''\n promotionNotation = '={}'.format(self.specialMovePiece.stringRep) \\\n if self.specialMovePiece else ''\n notation += oldPosNotation + captureNotation + newPosNotation + promotionNotation\n\n return notation\n\n def positionToHumanCoord(self, pos):\n transTable = str.maketrans('01234567', 'abcdefgh')\n notation = str(pos[0]).translate(transTable) + str(pos[1] + 1)\n return notation\n\n def __str__(self):\n self.notation = self.getNotation()\n displayString = 'Old Pos: {}'.format(self.oldPos) + \\\n 'New Pos: {}'.format(self.newPos)\n if self.notation:\n displayString += 'Notation: {}'.format(self.notation)\n return displayString\n\n def __eq__(self, other):\n if self.oldPos == other.oldPos and \\\n self.newPos == other.newPos and \\\n self.specialMovePiece == other.specialMovePiece:\n if not self.specialMovePiece:\n return True\n if self.specialMovePiece and \\\n self.specialMovepiece == other.specialMovePiece:\n return True\n else:\n return False\n else:\n return False\n\n def __hash__(self):\n return hash((self.oldPos, self.newPos))\n\n def reverse(self):\n return Move(self.piece, self.piece.position,\n pieceToCapture=self.pieceToCapture)\n","sub_path":"OddMove.py","file_name":"OddMove.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"220934451","text":"# -*- coding: utf-8 -*-\n\n'''去空格'''\n# 对字符串进行循环处理,去除\\n\\r\\t标签,以及行首行尾空格\ndef clear_space_str(str):\n str = str.replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace('\\t', \"\").replace(\" \", \"\").strip()\n return str\n\n# 对列表进行循环处理,去除\\n\\r\\t标签,以及行首行尾空格\ndef clear_space(templist):\n for i in range(len(templist)):\n templist[i] = clear_space_str(templist[i])\n\n# 去除列表中字符串连续的空格\ndef clear_lianxu_space(templist):\n clear_space(templist)\n while '' in templist:\n templist.remove('')\n templist1Str = '\\n'.join(templist).strip()\n return templist1Str\n\n\ndef clear_space_list(templist):\n for i in range(len(templist)):\n templist[i] = templist[i].replace('\\n', \" \")\n templist[i] = templist[i].strip(\" \")\n templist[i] = templist[i].replace('\\r', \" \")\n templist[i] = templist[i].replace('\\t', \" \")\n return templist\n\n\n'''获取雅思/托福'''\nimport re\n# 仅限于两个分数的,一个总分,一个统一的小分\n# 从文本中正则匹配雅思分数进行拆分, 返回一个雅思字典\ndef get_ielts(ieltsStr):\n ieltDict = {}\n ieltlsrw = re.findall(r\"\\d\\.\\d\", ieltsStr)\n ieltlsrw = re.findall(r\"[\\d\\.]{1,4}\", ieltsStr)\n if len(ieltlsrw) >= 2:\n ieltDict['IELTS'] = ieltlsrw[0]\n ieltDict['IELTS_L'] = ieltlsrw[1]\n ieltDict['IELTS_S'] = ieltlsrw[1]\n ieltDict['IELTS_R'] = ieltlsrw[1]\n ieltDict['IELTS_W'] = ieltlsrw[1]\n elif len(ieltlsrw) == 1:\n ieltDict['IELTS'] = ieltlsrw[0]\n ieltDict['IELTS_L'] = ieltlsrw[0]\n ieltDict['IELTS_S'] = ieltlsrw[0]\n ieltDict['IELTS_R'] = ieltlsrw[0]\n ieltDict['IELTS_W'] = ieltlsrw[0]\n return ieltDict\n\n# 从文本中正则匹配托福分数进行拆分, 返回一个托福字典\ndef get_toefl(toeflStr):\n toeflDict = {}\n toefllsrw = re.findall(r\"\\d+\", toeflStr)\n # print(toefllsrw)\n if len(toefllsrw) >= 2:\n toeflDict['TOEFL'] = toefllsrw[0]\n toeflDict['TOEFL_L'] = toefllsrw[1]\n toeflDict['TOEFL_S'] = toefllsrw[1]\n toeflDict['TOEFL_R'] = toefllsrw[1]\n toeflDict['TOEFL_W'] = toefllsrw[1]\n elif len(toefllsrw) == 1:\n toeflDict['TOEFL'] = toefllsrw[0]\n toeflDict['TOEFL_L'] = toefllsrw[0]\n toeflDict['TOEFL_S'] = toefllsrw[0]\n toeflDict['TOEFL_R'] = toefllsrw[0]\n toeflDict['TOEFL_W'] = toefllsrw[0]\n return toeflDict\n\n'''获取学费'''\n# 获取学费当中的最大值\ndef getTuition_fee(str):\n allfee = re.findall(r'\\d+,\\d+', str)\n # print(allfee)\n for index in range(len(allfee)):\n fee = allfee[index].split(\",\")\n allfee[index] = ''.join(fee)\n # print(allfee[index])\n # print(allfee)\n maxfee = 0\n for fee in allfee:\n if int(fee) >= maxfee:\n maxfee = int(fee)\n return maxfee\n\ndef getT_fee(str):\n allfee = re.findall(r'\\d{5}', str)\n maxfee = 0\n for fee in allfee:\n if int(fee) >= maxfee:\n maxfee = int(fee)\n return maxfee\n\n'''去除标签'''\nimport re\n# 去除标签中的属性和a标签\ndef remove_class(var):\n clear_class=re.findall('[a-zA-Z\\-]*=\".+?\"', var)\n for i in clear_class:\n var=var.replace(' ' + i, '')\n var = var.replace('', '').replace(' ', '')\n return var","sub_path":"whb_crawler/scrapySchool_Canada_Ben/scrapySchool_Canada_Ben/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"541922786","text":"import structlog\nfrom strategies.strategy_utils import Utils\nimport pandas as pd\n\nclass MovingAvgConvDiv():\n def __init__(self):\n self.logger = structlog.get_logger()\n self.utils = Utils()\n\n def get_12_day_EMA(self, frame):\n twelve_day_EMA = frame.ewm(span=12)\n return list(twelve_day_EMA.mean()[\"Prices\"])\n\n def get_26_day_EMA(self, frame):\n twenty_six_day_EMA = frame.ewm(span=26)\n return list(twenty_six_day_EMA.mean()[\"Prices\"])\n\n def calculate_MACD_line(self, historical_data):\n closing_prices = self.utils.get_closing_prices(historical_data)\n emadict = {\"Prices\": closing_prices}\n frame = pd.DataFrame(emadict)\n twelve_day_EMA = self.get_12_day_EMA(frame)\n twenty_six_day_EMA = self.get_26_day_EMA(frame)\n macd = []\n for i in range(len(twelve_day_EMA)):\n macd.append(twelve_day_EMA[i] - twenty_six_day_EMA[i])\n return macd\n\n def calculate_signal_line(self, macd):\n signaldata = []\n for i in macd:\n signaldata.append(i)\n signal_dict = {\"Prices\": signaldata}\n signal = pd.DataFrame(signal_dict)\n signal = signal.ewm(span=9)\n signal = list(signal.mean()[\"Prices\"])\n return signal\n\n def calculate_MACD_delta(self, historical_data):\n MACD_line = self.calculate_MACD_line(historical_data)\n signal_line = self.calculate_signal_line(MACD_line)\n length = len(MACD_line)\n # Returns the difference between the last items in MACD and signal line\n return MACD_line[length-1] - signal_line[length-1]\n","sub_path":"app/strategies/moving_avg_convergence_divergence.py","file_name":"moving_avg_convergence_divergence.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"603030913","text":"import pytest\nfrom synapseclient.core.exceptions import SynapseHTTPError\nfrom src.syn_reports.commands.benefactor_permissions_report import BenefactorView\nfrom src.syn_reports.core.synapse_proxy import SynapseProxy\nimport synapseclient as syn\n\n\n@pytest.fixture()\ndef benefactor_view(synapse_test_helper):\n bv = BenefactorView()\n yield bv\n if bv.view_project:\n synapse_test_helper.dispose(bv.view_project)\n\n\n@pytest.fixture(scope='session')\ndef grant_access(syn_client):\n def _grant(syn_id, principal_id, access_type=SynapseProxy.Permissions.ADMIN):\n syn_client.setPermissions(syn_id, principalId=principal_id, accessType=access_type, warn_if_inherits=False)\n\n yield _grant\n\n\n@pytest.fixture(scope='session')\ndef test_data(synapse_test_helper, syn_client, grant_access):\n # Project\n # file0\n # folder1/\n # file1\n # folder2/\n # file2\n # folder3/\n # file3\n user_id = syn_client.getUserProfile()['ownerId']\n\n project = synapse_test_helper.create_project(prefix='project_')\n file0 = synapse_test_helper.create_file(parent=project, path=synapse_test_helper.create_temp_file(),\n prefix='file0_')\n grant_access(file0.id, user_id)\n\n folder1 = synapse_test_helper.create_folder(parent=project, prefix='folder1_')\n grant_access(folder1.id, user_id)\n file1 = synapse_test_helper.create_file(parent=folder1, path=synapse_test_helper.create_temp_file(),\n prefix='file1_')\n grant_access(file1.id, user_id)\n\n folder2 = synapse_test_helper.create_folder(parent=folder1, prefix='folder2_')\n grant_access(folder2.id, user_id)\n file2 = synapse_test_helper.create_file(parent=folder2, path=synapse_test_helper.create_temp_file(),\n prefix='file2_')\n grant_access(file2.id, user_id)\n\n folder3 = synapse_test_helper.create_folder(parent=folder2, prefix='folder3_')\n grant_access(folder3, user_id)\n file3 = synapse_test_helper.create_file(parent=folder3, path=synapse_test_helper.create_temp_file(),\n prefix='file3_')\n grant_access(file3, user_id)\n\n return {\n 'project': project,\n 'file0': file0,\n\n 'folder1': folder1,\n 'file1': file1,\n\n 'folder2': folder2,\n 'file2': file2,\n\n 'folder3': folder3,\n 'file3': file3,\n\n 'all_entities': [project, file0, folder1, file1, folder2, file2, folder3, file3]\n }\n\n\ndef test_it_loads_all_the_benefactors_for_a_project(benefactor_view, test_data):\n project = test_data['project']\n benefactor_view.set_scope(project)\n\n expected_entities = test_data['all_entities']\n assert len(benefactor_view) == len(expected_entities)\n for entity in expected_entities:\n assert {'benefactor_id': entity.id, 'project_id': project.id} in benefactor_view\n\n\ndef test_it_loads_all_the_benefactors_for_a_folder(benefactor_view, test_data):\n project = test_data['project']\n folder1 = test_data['folder1']\n benefactor_view.set_scope(folder1)\n\n expected_entities = [e for e in test_data['all_entities'] if\n e not in [test_data['project'], test_data['file0']]]\n assert len(benefactor_view) == len(expected_entities)\n for entity in expected_entities:\n assert {'benefactor_id': entity.id, 'project_id': project.id} in benefactor_view\n\n\ndef test_it_loads_all_the_benefactors_for_a_file(benefactor_view, test_data):\n project = test_data['project']\n file0 = test_data['file0']\n benefactor_view.set_scope(file0)\n\n expected_entities = [file0]\n assert len(benefactor_view) == len(expected_entities)\n for entity in expected_entities:\n assert {'benefactor_id': entity.id, 'project_id': project.id} in benefactor_view\n\n\ndef test_it_falls_back_to_individual_loading(benefactor_view, test_data, mocker):\n project = test_data['project']\n folder3 = test_data['folder3']\n\n orig__create_view = benefactor_view._create_view\n\n def mock__create_view(entity_types):\n # Allow the project view and folder3 view to be created, all others should fail and use the fallback.\n if entity_types == [syn.EntityViewType.PROJECT] or benefactor_view.scope == folder3:\n return orig__create_view(entity_types)\n else:\n raise SynapseHTTPError('scope exceeds the maximum number')\n\n mocker.patch.object(benefactor_view, '_create_view', new=mock__create_view)\n benefactor_view.set_scope(project)\n\n expected_entities = test_data['all_entities']\n assert len(benefactor_view) == len(expected_entities)\n for entity in expected_entities:\n assert {'benefactor_id': entity.id, 'project_id': project.id} in benefactor_view\n\n\ndef test_it_does_not_add_duplicate_items(benefactor_view):\n benefactor_view._add_item('1', '2')\n benefactor_view._add_item('1', '2')\n benefactor_view._add_item('1', '2')\n assert len(benefactor_view) == 1\n","sub_path":"tests/syn_reports/commands/benefactor_permissions_report/test_benefactor_view.py","file_name":"test_benefactor_view.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"527789500","text":"'''\nAGE SEX SPLITTING\n'''\n\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\nfrom os.path import join\nfrom db_queries import get_location_metadata, get_population, get_cause_metadata\nfrom db_tools.ezfuncs import query\nimport getpass\nCOD_DIR = 'FILEPATH'.format(getpass.getuser())\nsys.path.append(COD_DIR)\nfrom cod_prep.claude.age_sex_split import AgeSexSplitter\nfrom cod_prep.downloaders import get_current_location_hierarchy\nfrom db_queries import (get_demographics,\n get_demographics_template as get_template,\n get_location_metadata as lm,\n get_population)\n\ndef run_cod_age_sex_splitting(db):\n # CHECK COMPLETENESS\n cause_set_version = 269 \n cm = get_cause_metadata(cause_set_version_id=cause_set_version)\n possible_causes = cm['cause_id'].unique().tolist()\n for cause_id in db['cause_id'].unique().tolist():\n assert cause_id in possible_causes, \"Cause ID {} not in hierarchy\".format(cause_id)\n loc_meta = get_location_metadata(gbd_round_id=5, location_set_id=21)\n possible_locs = loc_meta['location_id'].tolist()\n db = db.loc[db['location_id'].isin(possible_locs),:]\n db = db.loc[db['best'] > 0,:]\n db['hi_best_ratio'] = db['high'] / db['best']\n db['lo_best_ratio'] = db['low'] / db['best']\n db = db.reset_index(drop=True)\n db['unique_join'] = db.index\n db_merge_later = db.loc[:,['unique_join','hi_best_ratio','lo_best_ratio']]\n db = db.drop(labels=['high','low','hi_best_ratio','lo_best_ratio'],axis=1)\n id_cols = [i for i in db.columns if i not in ['best','age_group_id','sex_id']]\n cause_set_version_id = query(\"\"\"SELECT cause_set_version_id\n FROM ADDRESS\n WHERE gbd_round_id=5 AND cause_set_id=4;\"\"\",\n conn_def='epi').iloc[0,0]\n pop_run_id = get_population(gbd_round_id=5, status=\"recent\")['run_id'].iloc[0]\n splitter = AgeSexSplitter(\n cause_set_version_id=cause_set_version,\n pop_run_id=104,\n distribution_set_version_id=29,\n id_cols=['unique_join'],\n value_column='best')\n split_db = splitter.get_computed_dataframe(\n df=db,\n location_meta_df=loc_meta)\n split_db = pd.merge(left=split_db,\n right=db_merge_later,\n on=['unique_join'],\n how='left')\n split_db['low'] = split_db['best'] * split_db['lo_best_ratio']\n split_db['high'] = split_db['best'] * split_db['hi_best_ratio']\n split_db = split_db.drop(labels=['unique_join','lo_best_ratio',\n 'hi_best_ratio'],axis=1)\n return split_db\n\ndef get_korean_war_locations():\n side_a = [\"China\",\"Russian Federation\"]\n locs = get_current_location_hierarchy()\n side_a = pd.DataFrame(data = side_a, columns = {\"location_name\"})\n side_a = pd.merge(side_a, locs[['location_name','location_id']],how='left')\n side_a = list(locs[locs['parent_id'].isin(side_a['location_id'])]['location_id'])\n\n side_b_us = ['United States']\n side_b_us = pd.DataFrame(data = side_b_us, columns = {\"location_name\"})\n side_b_us = pd.merge(side_b_us, locs[['location_name','location_id']],how='left')\n side_b_us = list(locs[locs['parent_id'].isin(side_b_us['location_id'])]['location_id'])\n \n side_b_uk = ['United Kingdom']\n side_b_uk = pd.DataFrame(data = side_b_uk, columns = {\"location_name\"})\n side_b_uk = pd.merge(side_b_uk, locs[['location_name','location_id']],how='left')\n side_b_uk = list(locs[locs['parent_id'].isin(side_b_uk['location_id'])]['location_id'])\n side_b_uk = list(locs[locs['parent_id'].isin(side_b_uk)]['location_id'])\n side_b_uk = list(locs[locs['parent_id'].isin(side_b_uk)]['location_id'])\n location_id = [16,76,18,179,125,82,80,89,101,71,155,44850,44851] + side_a + side_b_uk + side_b_us\n return location_id\n\n\ndef war_age_override(df, age_groups, location_id, year_id):\n cause_id = 855\n war_df = df[(df['cause_id'] == cause_id) & \n (df['location_id'].isin(location_id)) & \n (df['year_id'].isin(year_id))]\n final_df = df[~((df['cause_id'] == cause_id) & \n (df['location_id'].isin(location_id)) & \n (df['year_id'].isin(year_id)))]\n\n assert round(war_df['best'].sum() + final_df['best'].sum()) == round(df['best'].sum())\n \n war_df = war_df.groupby(['year_id','location_id','cause_id','dataset'], as_index=False)['best','low','high'].sum()\n to_add = pd.DataFrame()\n for age_group in age_groups['age_group_id'].unique():\n percentage = age_groups[age_groups['age_group_id'] == age_group]['death_percentage'].iloc[0]\n war_df['age_group_id'] = age_group\n war_df['best_split'] = war_df['best'] * percentage\n war_df['high_split'] = war_df['high'] * percentage\n war_df['low_split'] = war_df['low'] * percentage\n to_add = to_add.append(war_df)\n to_add.drop(['best','low','high'],axis=1, inplace =True)\n to_add.rename(columns = {'best_split':'best',\n 'high_split':\"high\",\n 'low_split':\"low\"}, inplace=True)\n to_add['sex_id'] = 1\n final_df = final_df.append(to_add)\n assert round(final_df['best'].sum()) == round(df['best'].sum())\n return final_df\n\ndef split_by_pop(full_df, cause_id):\n total_b = round(full_df['best'].sum())\n total_h = round(full_df['high'].sum())\n total_l = round(full_df['low'].sum())\n \n final = full_df[full_df['cause_id'] != cause_id]\n df = full_df[full_df['cause_id'] == cause_id]\n\n if cause_id == 387:\n final.append(df.query(\"age_group_id == 2 | age_group_id == 3\"))\n df = df.query(\"age_group_id != 2 & age_group_id != 3\")\n\n locations = df.location_id.unique()\n years = df.year_id.unique()\n ages = df.age_group_id.unique()\n\n pop = get_population(age_group_id = list(ages),\n location_id = list(locations),\n year_id = list(years),\n sex_id = [1,2],\n run_id = 104)\n\n df = pd.merge(df,pop,how='left',on=['age_group_id', 'location_id','year_id','sex_id'])\n df['tpop']= df.groupby(['location_id','year_id'])['population'].transform(sum)\n df['tbest'] = df.groupby(['location_id','year_id'])['best'].transform(sum)\n df['thigh'] = df.groupby(['location_id','year_id'])['high'].transform(sum)\n df['tlow'] = df.groupby(['location_id','year_id'])['low'].transform(sum)\n df['rate'] = df['population'] / df['tpop']\n df['best'] = df['rate'] * df['tbest']\n df['high'] = df['rate'] * df['thigh']\n df['low'] = df['rate'] * df['tlow']\n df.drop(['population','run_id','tpop','rate',\"tbest\"],axis=1,inplace=True)\n\n final = final.append(df)\n assert round(final['best'].sum()) == total_b\n assert round(final['high'].sum()) == total_h\n assert round(final['low'].sum()) == total_l\n \n return final\n\n\ndef catch_over_100_pop(df):\n total = round(df['best'].sum())\n pop = get_population(age_group_id = -1,\n location_id = -1,\n year_id = range(1950,2018),\n sex_id = [1,2],\n run_id = 104)\n\n df_over = pd.merge(df,pop, how='left', on = ['location_id','year_id','age_group_id','sex_id'])\n df_over['percent'] = df_over['best'] / df_over['population']\n df_over = df_over.query(\"percent >= 1\")\n df_over['over'] = 1\n df = pd.merge(df,df_over[['location_id','year_id','over','sex_id','age_group_id','cause_id']], how='left', on =['cause_id','age_group_id','location_id','year_id','sex_id'])\n country_years = df.query(\"over == 1\")\n country_years = country_years.groupby(['location_id','year_id','cause_id'], as_index=False)['over'].mean()\n df.drop(['over'],axis=1,inplace=True)\n df = pd.merge(df,country_years,how='left',on=['location_id','year_id','cause_id'])\n final = df.query(\"over != 1\")\n df = df.query(\"over == 1\")\n\n\n df_over_50 = df.query(\"age_group_id >= 15\")\n df_under_5 = df.query(\"age_group_id <= 5\")\n df_ok = df.query(\"age_group_id > 5 & age_group_id < 15\")\n final = final.append(df_ok)\n\n df_over_50 = pd.merge(df_over_50,pop,how='left', on =['age_group_id','location_id','year_id','sex_id'])\n df_over_50['total_pop'] = df_over_50.groupby(['location_id','year_id','cause_id','sex_id'])['population'].transform(sum)\n df_over_50['total_best'] = df_over_50.groupby(['location_id','year_id','cause_id','sex_id'])['best'].transform(sum)\n df_over_50['rate'] = df_over_50['population'] / df_over_50['total_pop']\n df_over_50['best'] = df_over_50['total_best'] * df_over_50['rate']\n df_over_50.drop(['over','population','run_id','total_pop','total_best','rate'],axis=1, inplace=True)\n final = final.append(df_over_50)\n\n df_under_5 = pd.merge(df_under_5,pop,how='left', on =['age_group_id','location_id','year_id','sex_id'])\n df_under_5['total_pop'] = df_under_5.groupby(['location_id','year_id','cause_id','sex_id'])['population'].transform(sum)\n df_under_5['total_best'] = df_under_5.groupby(['location_id','year_id','cause_id','sex_id'])['best'].transform(sum)\n df_under_5['rate'] = df_under_5['population'] / df_under_5['total_pop']\n df_under_5['best'] = df_under_5['total_best'] * df_under_5['rate']\n df_under_5.drop(['over','population','run_id','total_pop','total_best','rate'],axis=1, inplace=True)\n final = final.append(df_under_5)\n final.drop('over',axis=1,inplace=True)\n assert round(final['best'].sum()) == total\n return final\n\nif __name__==\"__main__\":\n # Read input arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\",\"--infile\",type=str,\n help=\"The CSV file that needs to be split\")\n parser.add_argument(\"-o\",\"--outfile\",type=str,\n help=\"The CSV file where age-sex split data will be \"\n \"saved.\")\n parser.add_argument(\"-n\",\"--encoding\",type=str,\n help=\"Encoding for all CSV files\")\n cmd_args = parser.parse_args()\n db = pd.read_csv(cmd_args.infile, encoding=cmd_args.encoding)\n db.loc[~db['age_group_id'].isin(range(1,22) + [30,31,32,235]),\n 'age_group_id']=22\n split_db = run_cod_age_sex_splitting(db)\n\n \n vietnam_war_age_group_distribution = pd.read_csv(\"\")\n korean_war_locations = get_korean_war_locations()\n korean_war_years = list(range(1950,1954))\n split_db = war_age_override(split_db,vietnam_war_age_group_distribution,korean_war_locations,korean_war_years)\n\n split_db = catch_over_100_pop(split_db)\n\n split_db.to_csv(cmd_args.outfile,index=False,encoding=cmd_args.encoding)\n","sub_path":"gbd_2017/mortality_code/fataldiscontinuities/age_sex_splitting/age_sex_split.py","file_name":"age_sex_split.py","file_ext":"py","file_size_in_byte":10839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"336553055","text":"import os\r\nimport glob\r\nfrom fileio import load_core\r\nimport numpy as np\r\n\r\ndef sub(fn, app_nap, mm):\r\n data, names = load_core(fn)\r\n if mm == 'min':\r\n j = data[app_nap].argmin()\r\n else:\r\n j = data[app_nap].argmax()\r\n r = data[j]\r\n d1 = r[0].strftime('%Y-%m-%d')\r\n print('%-12s %2d %s %5.1f %5.1f %6.0f' % (\r\n os.path.basename(fn), j, d1, r[2], r[3], r[4]))\r\n \r\ndef proc(app_nap, mm):\r\n ff = glob.glob('f:/github/sensfre/CabinetApproval/data/*.txt')\r\n print('%s %s' % (app_nap, mm))\r\n for f in ff:\r\n if os.path.basename(f)[:6] == 'sample':\r\n pass\r\n else:\r\n sub(f, app_nap, mm)\r\n print()\r\n \r\ndef main():\r\n proc('NAP_RATE', 'min')\r\n proc('NAP_RATE', 'max')\r\n proc('APP_RATE', 'max')\r\n proc('APP_RATE', 'min')\r\n \r\nmain()\r\n","sub_path":"script/p_minmax.py","file_name":"p_minmax.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"94437456","text":"import json\nfrom collections import namedtuple\n\n\nRole = namedtuple(\n \"Role\", [\"name\", \"held_in_conjunction\", \"number_of_positions\"]\n)\n\n\ndef decode_hook(o):\n if \"name\" in o:\n return Role(\n o[\"name\"],\n o[\"held_in_conjunction\"],\n o[\"number_of_positions\"],\n )\n return o\n\n\ndef load_roles(roles_fp):\n with open(roles_fp) as infile:\n return json.load(infile, object_hook=decode_hook)\n","sub_path":"roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"398883877","text":"'''\nCreated on Jul 7, 2011\n\n@author: qingyang\n'''\nimport dataPreparation\nimport csv\nimport sys\n\nRUBBOS_RESULTS_DIR_CONFIG_FILE = ''\n\ndef main():\n # process options\n if len(sys.argv) == 2:\n RUBBOS_RESULTS_DIR_CONFIG_FILE = sys.argv[1]\n else: \n # Windows environment\n RUBBOS_RESULTS_DIR_CONFIG_FILE = 'D:\\workspace\\SysVizResultAnalysis\\data/ana3/set_elba_env.sh'\n #sys.exit(1)\n \n\n rubbosAnalyzer = dataPreparation.analyzer('D:\\workspace\\SysVizResultAnalysis\\data\\ana3')\n \n output_dir = 'D:\\workspace\\SysVizResultAnalysis\\data/ana3' \n filepath = 'D:\\workspace\\SysVizResultAnalysis\\data/ana3' \n fileList = [\"wl3400\"]\n MultiMetricsList = [\"total:HTTP\", \"total:AJP\", \"total:DBmsg\", \"total:DBmsg_over10ms\",\"total:DBmsg_over100ms\",\"total:DBmsg_over1s\", \n \"1:HTTP\", \"1:AJP\", \"1:DBmsg\", \"1:DBmsg_over10ms\",\"1:DBmsg_over100ms\",\"1:DBmsg_over1s\",\n \"4:HTTP\", \"4:AJP\", \"4:DBmsg\", \"4:DBmsg_over10ms\",\"4:DBmsg_over100ms\",\"4:DBmsg_over1s\",\n \"11:HTTP\", \"11:AJP\", \"11:DBmsg\", \"11:DBmsg_over10ms\",\"11:DBmsg_over100ms\",\"11:DBmsg_over1s\"\n ]\n \n# MultiMetricsList = [\"total:HTTP\", \"total:AJP\", \"total:DBmsg\", \"total:DBmsg_over10ms\",\"total:DBmsg_over100ms\",\"total:DBmsg_over1s\", \n# \"1:HTTP\", \"1:AJP\", \"1:DBmsg\", \"1:DBmsg_over10ms\",\"1:DBmsg_over100ms\",\"1:DBmsg_over1s\",\n# \"4:HTTP\", \"4:AJP\", \"4:DBmsg\", \"4:DBmsg_over10ms\",\"4:DBmsg_over100ms\",\"4:DBmsg_over1s\",\n# \"11:HTTP\", \"11:AJP\", \"11:DBmsg\", \"11:DBmsg_over10ms\",\"11:DBmsg_over100ms\",\"11:DBmsg_over1s\", \n# ]\n \n# MultiMetricsList = [\"total:HTTP\", \"total:AJP\", \"total:DBmsg\", \"total:DBmsg_over10ms\",\"total:DBmsg_over100ms\",\"total:DBmsg_over1s\", \n# \"1:HTTP\", \"1:AJP\", \"1:DBmsg\", \"1:DBmsg_over10ms\",\"1:DBmsg_over100ms\",\"1:DBmsg_over1s\",\n# \"4:HTTP\", \"4:AJP\", \"4:DBmsg\", \"4:DBmsg_over10ms\",\"4:DBmsg_over100ms\",\"4:DBmsg_over1s\",\n# \"11:HTTP\", \"11:AJP\", \"11:DBmsg\", \"11:DBmsg_over10ms\",\"11:DBmsg_over100ms\",\"11:DBmsg_over1s\", \n# ]\n \n InOutPutMetricsList = [\"total:DBmsg_start\", \"total:DBmsg_start_over10ms\", \"total:DBmsg_start_over100ms\",\"total:DBmsg_start_over1s\",\"total:DBmsg_end\",\"total:DBmsg_end_over10ms\", \"total:DBmsg_end_over100ms\", \"total:DBmsg_end_over1s\"]\n# RSMetricsList = [\"total:HTTP_start_aveRS\", \"total:AJP_start_aveRS\", \"total:DBmsg_start_aveRS\", \"total:DBmsg_end_aveRS\", \n# \"1:HTTP_start_aveRS\", \"1:AJP_start_aveRS\", \"1:DBmsg_start_aveRS\", \"1:DBmsg_end_aveRS\", \n# \"4:HTTP_start_aveRS\", \"4:AJP_start_aveRS\", \"4:DBmsg_start_aveRS\", \"4:DBmsg_end_aveRS\", \n# \"11:HTTP_start_aveRS\", \"11:AJP_start_aveRS\", \"11:DBmsg_start_aveRS\", \"11:DBmsg_end_aveRS\"\n# ] \n \n RSMetricsList = [\"total:DBmsg_start_aveRS\", \"total:DBmsg_start_over10ms\", \"total:DBmsg_start_over100ms\", \"total:DBmsg_start_over1s\", \n \"1:DBmsg_start_aveRS\", \"1:DBmsg_start_over10ms\", \"1:DBmsg_start_over100ms\", \"1:DBmsg_start_over1s\", \n \"4:DBmsg_start_aveRS\", \"4:DBmsg_start_over10ms\", \"4:DBmsg_start_over100ms\", \"4:DBmsg_start_over1s\", \n \"11:DBmsg_start_aveRS\", \"11:DBmsg_start_over10ms\", \"11:DBmsg_start_over100ms\", \"11:DBmsg_start_over1s\"\n ] \n \n# RSMetricsList = [\"total:AJP_start_aveRS\", \"total:AJP_start_over10ms\", \"total:AJP_start_over100ms\", \"total:AJP_start_over1s\", \n# \"1:AJP_start_aveRS\", \"1:AJP_start_over10ms\", \"1:AJP_start_over100ms\", \"1:AJP_start_over1s\", \n# \"4:AJP_start_aveRS\", \"4:AJP_start_over10ms\", \"4:AJP_start_over100ms\", \"4:AJP_start_over1s\", \n# \"11:AJP_start_aveRS\", \"11:AJP_start_over10ms\", \"11:AJP_start_over100ms\", \"11:AJP_start_over1s\"\n# ] \n\n epochTime = True\n \n \n \n InOutPutMetricsList = [\"total:DBmsg_start\", \"total:DBmsg_start_over10ms\", \"total:DBmsg_start_over100ms\",\"total:DBmsg_start_over1s\",\"total:DBmsg_end\",\"total:DBmsg_end_over10ms\", \"total:DBmsg_end_over100ms\",\"total:DBmsg_end_over1s\",\n \"1:DBmsg_start\", \"1:DBmsg_start_over10ms\", \"1:DBmsg_start_over100ms\", \"1:DBmsg_start_over1s\",\n \"4:DBmsg_start\", \"4:DBmsg_start_over10ms\", \"4:DBmsg_start_over100ms\", \"4:DBmsg_start_over1s\",\n \"11:DBmsg_start\", \"11:DBmsg_start_over10ms\", \"11:DBmsg_start_over100ms\", \"11:DBmsg_start_over1s\"]\n# InOutPutMetricsList = [\"total:AJP_start\", \"total:AJP_start_over10ms\", \"total:AJP_start_over100ms\",\"total:AJP_start_over1s\",\n# \"1:AJP_start\", \"1:AJP_start_over10ms\", \"1:AJP_start_over100ms\", \"1:AJP_start_over1s\",\n# \"4:AJP_start\", \"4:AJP_start_over10ms\", \"4:AJP_start_over100ms\", \"4:AJP_start_over1s\",\n# \"11:AJP_start\", \"11:AJP_start_over10ms\", \"11:AJP_start_over100ms\", \"11:AJP_start_over1s\"]\n \n rubbosAnalyzer.generateCustmizedMultiplicity(filepath, output_dir, fileList, MultiMetricsList, epochTime)\n #rubbosAnalyzer.generateCustmizedResponsetime(filepath, output_dir, fileList, RSMetricsList, epochTime)\n rubbosAnalyzer.generateCustmizedInOutput(filepath, output_dir, fileList, InOutPutMetricsList, epochTime)\n \n MultiMetricsList = [\"total:DBmsg\"]\n CPUMetricsList = [\"[CPU]Totl%\", \"[CPU]User%\", \"[CPU]Sys%\", \"[CPU]Wait%\"] \n #rubbosAnalyzer.generateCustmizedFineGrainedMonitor(filepath, output_dir, fileList, CPUMetricsList, MultiMetricsList, epochTime)\n \n rubbosAnalyzer.generateSaturationDurationAna(filepath, output_dir, fileList, MultiMetricsList, epochTime)\n\n#\n# H = {}\n# for row in csv.reader(open(RUBBOS_RESULTS_DIR_CONFIG_FILE, \"r\"), delimiter=\"=\" ):\n# if len(row) == 2:\n# H[row[0]] = row[1]\n# \n# bonnServer_out_dir = H[\"BONN_RUBBOS_RESULTS_DIR_BASE\"]\n# sysVizServer_output_dir = H[\"SYSVIZ_RUBBOS_RESULTS_DIR_BASE\"]\n# rubbosData_output_dir = H[\"RUBBOS_RESULTS_DIR_NAME\"]\n# \n## bonnServer_out_dir = '/mnt/sdc/qywang/rubbosResult/2011-06-28T103236-0400-QYW-121-oneCore-DBconn12'\n## sysVizServer_output_dir = '/home/qywang/AnaResult'\n# tiers = int(H[\"EXPERIMENT_CONFIG_TIERS\"])\n# \n# if tiers ==3:\n# importModule = \"analyzer6_linux_middleTwoTier\" \n# rubbosAnalyzer.generateSysVizProcessingScripts(bonnServer_out_dir, sysVizServer_output_dir, rubbosData_output_dir, tiers, importModule)\n# elif tiers == 4:\n# importModule = \"analyzer6_linux_4tier_middleTwoTier\" \n# rubbosAnalyzer.generateSysVizProcessingScripts(bonnServer_out_dir, sysVizServer_output_dir, rubbosData_output_dir, tiers, importModule)\n \nif __name__ == '__main__':\n main()\n \n\n","sub_path":"rubbos_vm/elba_script/dataPreparationControlWindows.py","file_name":"dataPreparationControlWindows.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"588470130","text":"import numpy, random, math, sys\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\nimport test_data, kernel_functions as kf\n\nkfun = 'linear'\nopt = 2\nslack = None\nplotId = 0\nprint(sys.argv)\nfor i, arg in enumerate(sys.argv):\n if sys.argv[i] == '-kf':\n kfun = sys.argv[i+1]\n opt = int(sys.argv[i+2])\n if sys.argv[i] == '-slack':\n slack = float(sys.argv[i+1])\n if sys.argv[i] == '-pid':\n plotId = int(sys.argv[i+1])\n \n# What kernel function to use\nkernel_function = lambda x,y : kf.functions[kfun](x, y, opt)\n# Number of training samples\nN = 200\n# Generate our test data\ndata = test_data.TestData(N, True)\ndata.generate_data()\n# Initial guess of the alpha vector\nstart = numpy.zeros(N, dtype='float64')\nupper_bound = slack\n# Lower and upper bounds for each value in alpha vector\nB = [(0, upper_bound) for b in range(N)]\n# Global variable for alpha, targets and support vectors\nnonZeroAlpha = []\ntarget_values = []\nsupport_vectors = []\nbValue = 0\n\n# Pre-compute the matrix P by multiplying the every combination of target values, t, and kernel K.\npreComputedMatrix = numpy.empty([N,N])\nfor i in range(N):\n for j in range(N):\n preComputedMatrix[i][j] = data.targets[i] * data.targets[j] * kernel_function(data.inputs[i], data.inputs[j])\n\ndef printInputData():\n print(\"Input data\")\n for p in data.classA:\n print(f\"({p[0]}, {p[1]})\")\n for p in data.classB:\n print(f\"({p[0]}, {p[1]})\")\n print(\"\")\n\ndef zerofun(vec):\n scalar = numpy.dot(vec, data.targets)\n return scalar\n\nXC = constraint = {'type':'eq', 'fun':zerofun}\n\n# Take the alpha vector and return a scalar value by implementing the expression that should be minimized.\ndef objective(alpha_vector):\n alpha_sum = numpy.sum(alpha_vector)\n\n matmul = numpy.dot(alpha_vector, preComputedMatrix)\n vecmul = numpy.dot(alpha_vector, matmul)\n return 0.5 * vecmul - alpha_sum\n\ndef indicator(point):\n kernelMat = [kernel_function(point, x) for x in support_vectors]\n #val = numpy.sum(numpy.dot(kernelMat, numpy.dot(target_values, nonZeroAlpha))) - bValue\n val = 0\n for i,p in enumerate(kernelMat):\n val += nonZeroAlpha[i] * target_values[i] * kernelMat[i]\n #print(f\"{point} has indicator value {val}\")\n return val - bValue\n\ndef plot():\n plt.clf()\n # Plot input data\n plt.plot([p[0] for p in data.classA], [p[1] for p in data.classA], 'b. ')\n plt.plot([p[0] for p in data.classB], [p[1] for p in data.classB], 'r. ')\n plt.axis('equal') # Force same scale on both axes\n \n # Plot decision boundary\n xgrid = numpy.linspace(-5, 5)\n ygrid = numpy.linspace(-4, 4)\n\n grid = numpy.array([[indicator([x,y]) for x in xgrid] for y in ygrid])\n #print(grid)\n plt.contour(xgrid, ygrid, grid, (-1.0, 0.0, 1.0), colors=('red', 'black', 'blue'), linewidths=(1,3,1))\n\n # Plot support vector points in green\n for i, point in enumerate(support_vectors):\n ind = indicator(point)\n plt.plot(point[0], point[1], 'k.')\n\n path = 'C:/Users/Adrian/Pictures/ML-DD2421/svm/slack C 1/svmplot{:d}.png'.format(plotId)\n plt.savefig(path) # Save a copy in a file\n #plt.show() # Show the plot on the screen\n\ndef main():\n #printInputData()\n global nonZeroAlpha, support_vectors, target_values, bValue\n ret = minimize(objective, start, bounds = B, constraints = XC)\n success = ret['success']\n alpha = ret['x']\n if success:\n #print(\"Alpha vector\\n\", alpha)\n # Find all alpha values above a certain threshhold and get the\n # corresponding inputs and target values\n nonZeroAlpha = alpha[alpha > 10 ** -5]\n indices = numpy.nonzero(alpha > 10 ** -5)\n sWithCorrValues = [(alpha[x], data.inputs[x], data.targets[x]) for x in indices[0]]\n # Unzip to get our target values in a list\n _, support_vectors, target_values = zip(*sWithCorrValues)\n\n # Calculate b value\n tmp = [kernel_function(support_vectors[0], x) for x in support_vectors]\n #bValue = numpy.sum(numpy.dot(numpy.dot(nonZeroAlpha, target_values), tmp)) - target_values[0]\n for i,p in enumerate(tmp):\n bValue += nonZeroAlpha[i] * target_values[i] * tmp[i]\n bValue -= target_values[0] \n #print(bValue)\n\n #print(\"\\nSupport vectors\")\n # Indicator function\n \"\"\"\n for i, point in enumerate(support_vectors):\n ind = indicator(point)\n print(f\"({point[0]}, {point[1]}) classified as {ind}\")\n print(f\"alpha = {nonZeroAlpha[i]}\")\n print(f\"target = {target_values[i]}\")\n \"\"\"\n plot()\n #plotDecisionBoundary()\n else:\n print(\"No solution found\")\n\nmain()\n","sub_path":"svm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"390235571","text":"# First we'll import the os module\n# This will allow us to create file paths across operating systems\nimport os\n\n# Module for reading CSV files\nimport csv\n\n#filepath = '..\\Resources\\Accounting.csv'\ncsvpath = os.path.join(\"..\", \"Resources\", 'election_data.csv')\nprint(\"csvpath.. \" + csvpath)\n\n\n\nwith open(csvpath) as csvfile:\n\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n\n # Read the header row first (skip this step if there is now header)\n csv_header = next(csvreader)\n print(f\"CSV Header: {csv_header}\")\n\n\n print(csvreader)\n total_number_of_votes_cast = 0\n Khan_count = 0\n Correy_count = 0\n Li_count = 0\n Other_count = 0\n OTooley_count = 0\n \n \n # Read each row of data after the header\n for row in csvreader:\n voterID = str(row[0])\n county = str(row[1])\n candidate = str(row[2])\n total_number_of_votes_cast += 1\n if candidate == 'Khan':\n Khan_count += 1\n elif (candidate == 'Correy'):\n Correy_count += 1\n elif (candidate == 'Li'):\n Li_count += 1\n elif (candidate == \"O'Tooley\"):\n OTooley_count += 1\n else:\n Other_count += 1\n print(\"Other Candidate: \" + row[2])\n \n \n print(\"Election Results\")\n print(\"-------------------------\")\n print(\"Total Votes: \" + str(total_number_of_votes_cast))\n print(\"-------------------------\")\n Khan_percentage = (Khan_count/total_number_of_votes_cast) * 100\n Li_percentage = (Li_count/total_number_of_votes_cast) * 100\n OTooley_percentage = (OTooley_count/total_number_of_votes_cast) * 100\n Correy_percentage = (Correy_count/total_number_of_votes_cast) * 100\n if (Khan_count > Li_count) and (Khan_count > OTooley_count) and (Khan_count > Correy_count):\n winner = 'Khan'\n elif (Li_count > OTooley_count) and (Li_count > Khan_count) and (Li_count > Correy_count):\n winner = 'Li'\n elif (Correy_count > OTooley_count) and (Correy_count > Khan_count) and (Correy_count > Li_count):\n winner = 'Correy'\n elif (OTooley_count > Li_count) and (OTooley_count > Khan_count) and (OTooley_count > Correy_count):\n winner = \"O'Tooley\"\n \n print(\"Khan: \" + str(Khan_percentage) + \"%\" + \" \" + \"(\" + str(Khan_count) + \")\")\n print(\"Correy: \" + str(Correy_percentage) + \"%\" + \" \" + \"(\" + str(Correy_count) + \")\")\n print(\"Li: \" + str(Li_percentage) + \"%\" + \" \" + \"(\" + str(Li_count) + \")\")\n print(\"O'Tooley: \" + str(OTooley_percentage) + \"%\" + \" \" + \"(\" + str(OTooley_count) + \")\")\n print(\"-------------------------\")\n print(\"Winner: \" + str(winner))\n print(\"-------------------------\")\n\n\n","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"57831115","text":"#\n# @lc app=leetcode.cn id=264 lang=python3\n#\n# [264] 丑数 II\n#\n\n# @lc code=start\nimport heapq\nclass Ugly:\n def __init__(self):\n seen={1,}\n self.nums=nums=[]\n heap=[]\n heapq.heappush(heap,1)\n\n for i in range(1690):\n curr_ugly=heapq.heappop(heap)\n nums.append(curr_ugly)\n for j in [2,3,5]:\n new_ugly=curr_ugly*j\n if new_ugly not in seen:\n seen.add(new_ugly)\n heapq.heappush(heap,new_ugly)\nclass Solution:\n u=Ugly()\n def nthUglyNumber(self, n: int) -> int:\n return self.u.nums[n-1]\n \n# @lc code=end\n\n","sub_path":"Week_03/G20200343040276/264.丑数-ii.py","file_name":"264.丑数-ii.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"441068176","text":"import random\nimport time\nfrom functools import reduce\n\n#游戏\ndef game():\n print('------------------------------------------')\n print('歡迎光臨21點!遊戲開始!')\n user = [randcard(),randcard()]\n ai = [randcard(),randcard()]\n ai_hide = [ai[0],'暗牌']\n print('您的牌組:',user)\n print('莊家牌組:',ai_hide)\n if sum(user) > 21:\n print('遊戲結束!您的牌組是',user,',點數是',sum(user),',超過了21點。')\n elif sum(user) == 21:\n print('哇哦!剛好21點!')\n elif sum(ai) > 21:\n print('恭喜!您獲得了勝利!莊家的點數為',sum(ai),',超過了21點。')\n elif sum(user) > 21 and sum(ai) > 21:\n print('平局!您的點數是',sum(user),',莊家的點數是',sum(ai),'。')\n else:\n decide(user,ai,ai_hide)\n # compare(user,ai)\n check = input(\"你要繼續玩嗎(Y/N)? :\")\n while True:\n if check == 'Y' or check == 'y':\n print(\"\\n\\n\")\n return play()\n elif check == 'N' or check == 'n':\n return\n else:\n check = input(\"格式錯誤,請輸入Y/N : \")\n\n#抉择\ndef decide(user,ai,ai_hide):\n decision = str(input('請輸入您的決定,“h”是繼續發牌,“s”是停止發牌(h/s):')).lower()\n if decision == 'h':\n user.append(randcard())\n ai.append(randcard())\n ai_hide.append('暗牌')\n if sum(user) > 21:\n print('遊戲結束!您的牌組是',user,',點數是',sum(user),',超過了21點。')\n elif sum(ai) > 21:\n print('恭喜!您獲得了勝利!莊家的點數為',sum(ai),',超過了21點。')\n elif sum(user) > 21 and sum(ai) > 21:\n print('平局!您的點數是',sum(user),',莊家的點數是',sum(ai),',你們的點數都超過了21點。')\n else:\n print('您的牌组:',user)\n print('莊家牌組:',ai_hide)\n return decide(user,ai,ai_hide)\n elif decision == 's':\n compare(user,ai)\n\n#牌组\ncards = [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13]\n\n#发牌\ndef randcard():\n index = random.randint(0,len(cards)-1)\n randcard = cards[index]\n del cards[index]\n if len(cards) == 0:\n print('遊戲結束!沒牌發啦!')\n else:\n return randcard\n\n#求和\ndef sum(user):\n return reduce(lambda x,y: x+y, user)\n\n#判断\ndef compare(user,ai):\n minus_user = 21 - sum(user)\n minus_ai = 21 - sum(ai)\n if minus_user < minus_ai:\n print('恭喜!您獲得了勝利!您的點數是',sum(user),',莊家的點數是',sum(ai),'。')\n elif minus_user > minus_ai:\n print('非常遺憾!您輸掉了本場對局,您的點數是',sum(user),',莊家的點數是',sum(ai))\n elif minus_user == minus_ai:\n print('平局!您的點數是',sum(user),',莊家的點數是',sum(ai),'。')\n \ndef play():\n game()\n\nif __name__ == \"__main__\":\n play()","sub_path":"BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"76085463","text":"import requests\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport random\nfrom concurrent.futures import ThreadPoolExecutor\nimport time\nimport json\nimport os\nimport csv\n\ndef write_success_csv(info,file='success.csv'):\n with open(file, 'a', encoding='utf-8',newline='') as f:\n csv_f=csv.writer(f)\n csv_f.writerow(info)\n\n\ndef read_urls(file='urls.txt'):\n with open(file,'r',encoding='utf-8') as f:\n return [x.strip() for x in f.readlines() if x.strip()!='']\n\ndef read_log(file='log.txt'):\n if os.path.exists(file):\n with open(file,'r',encoding='utf-8') as f:\n return [x.strip() for x in f.readlines() if x.strip()!='']\n else:\n return []\n\ndef write_log(info,file='log.txt'):\n with open(file, 'a', encoding='utf-8') as f:\n f.write(info+'\\n')\n\ndef write_success(info,file='success.txt'):\n with open(file, 'a', encoding='utf-8') as f:\n f.write(info+'\\n')\n\ndef get_visiter_id(sess,domain,business_id, service_id):\n url=f'{domain}/index/index/home?visiter_id=&visiter_name=&avatar=&business_id={business_id}&groupid=0&special={service_id}'\n # print(url)\n headers = {\n \"User-Agent\": UA,\n \"Referer\": \"https:///www.google.com\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n }\n response=sess.get(url,headers=headers,verify=False,allow_redirects=True,timeout=timeout)\n # print(response.text)\n if response.status_code==500:\n # print(500)\n return False\n elif response.status_code==200:\n # print(200)\n visiter_id=response.cookies['visiter_id']\n chat_url=response.url\n return chat_url,visiter_id,url\n else:\n # print(\"???\",response.status_code)\n return False\n\ndef send_notice(sess,domain,visiter_id,business_id, service_id,chat_url):\n url=f'{domain}/admin/event/notice'\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Referer': chat_url, # 'Cookie':f'''PHPSESSID={sess.cookies['PHPSESSID']};visiter_id={visiter_id}''',\n 'User-Agent': UA,\n 'X-Requested-With': 'XMLHttpRequest',\n }\n data={\n 'visiter_id': visiter_id,\n 'visiter_name':'游客'+visiter_id,\n 'business_id': business_id,\n 'from_url':'',\n 'avatar': '/assets/images/index/avatar-red2.png',\n 'groupid': '0',\n 'special': service_id\n }\n\n response=sess.post(url,data=data,headers=headers,verify=False,timeout=timeout)\n if response.status_code!=200:\n return False\n try:\n # print(response.text)\n response=json.loads(response.text)\n if response['code'] == 0:\n return True\n except:\n print(response.status_code)\n return False\n\ndef send_message(sess,domain,visiter_id,business_id, service_id,chat_url):\n url=f'{domain}/admin/event/chat'\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cache-Control': 'no-cache',\n 'Pragma': 'no-cache',\n 'Host':domain.replace('https://','').replace('http://',''),\n 'Origin':domain,\n 'Referer': chat_url,\n 'Cookie':f'''PHPSESSID={sess.cookies['PHPSESSID']};visiter_id={visiter_id}''',\n 'User-Agent': UA,\n 'X-Requested-With': 'XMLHttpRequest',\n # 'sec-ch-ua': '\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"',\n # 'sec-ch-ua-mobile': '?0''',\n # 'sec-ch-ua-platform': \"Windows\",\n # 'Sec-Fetch-Dest': 'empty',\n # 'Sec-Fetch-Mode': 'cors',\n # 'Sec-Fetch-Site': 'same-origin'\n }\n\n data={\n 'visiter_id': visiter_id,\n 'content': \"test`}\",\n 'business_id': business_id,\n 'avatar': '/assets/images/index/avatar-red2.png',\n 'record': \"\",\n 'service_id': service_id\n }\n response=sess.post(url,data=data,verify=False,headers=headers,timeout=timeout)\n print(url,business_id,service_id,response.text)\n print(headers)\n response=json.loads(response.text)\n if response['code'] in ['0',0] :\n return True\n else:\n return False\n\ndef check_domain_isalive(domain):\n try:\n domain = domain.strip()\n domain = domain.rstrip('/')\n url = domain + '/admin/event/chat'\n response = requests.get(url, verify=False)\n print(response.status_code)\n if response.status_code in [500]:\n return True\n else:\n return False\n except requests.exceptions.RequestException as e:\n return False\n\n\ndef check_id(domain,business_id):\n\n print(f'开始检测{domain},business_id:{business_id}')\n for service_id in range(1,100):\n sess = requests.Session()\n business_id = str(business_id)\n service_id=str(service_id)\n\n\n result= get_visiter_id(sess,domain,business_id, service_id)\n if result:\n chat_url, visiter_id, url=result\n else:\n break\n\n result=send_notice(sess=sess,domain=domain,visiter_id=visiter_id, business_id=business_id, service_id=service_id, chat_url=chat_url)\n if not result:\n continue\n\n result=send_message(sess=sess,domain=domain,visiter_id=visiter_id, business_id=business_id, service_id=service_id, chat_url=chat_url)\n\n if result:\n write_success(url)\n write_success_csv([domain,business_id,service_id,url,chat_url])\n print(f'{url} 检测成功,写入记录')\n break\n\ndef check_domain(domain):\n\n private_pool=ThreadPoolExecutor(max_workers=thread_of_pre_domain)\n\n for business_id in range(1,100):\n private_pool.submit(check_id,domain,business_id)\n\n private_pool.shutdown(wait=True)\n\n write_log(info=domain)\n\n has_done.append(domain)\n\nt=time.time()\n\ndomains=read_urls()\n\nhas_done=read_log()\n\ntimeout=(10,10)\n\nUA='''Mozilla/5.0 (iPhone; CPU OS 16_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Mobile/14E304 Safari/605.1.15'''\n\nthread_of_domain=10\nthread_of_pre_domain=15\n\ndomains_pool=ThreadPoolExecutor(max_workers=thread_of_domain)\n\nfor domain in domains:\n\n domain = domain.strip()\n domain = domain.rstrip('/')\n\n if not check_domain_isalive(domain):\n print(f'{domain} 检测不符合要求,跳过')\n continue\n\n if domain in has_done:\n print(f'{domain} 已检测,跳过')\n continue\n\n domains_pool.submit(check_domain,domain)\n\ndomains_pool.shutdown(wait=True)\n\nprint(f\"所有请求已完成,用时{time.time()-t}s\")\n\n# check_domain('http://114.132.153.58:8089')\n#\n\n\n\n","sub_path":"爬虫/周杰伦Ovo-访问网址,发送信息/遍历url.py","file_name":"遍历url.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"181099118","text":"# -*- coding: utf-8 -*-\n#\n# Authors:\n# Pavel Březina \n#\n# Copyright (C) 2019 Red Hat\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport argparse\nimport colorama\nimport copy\nimport inspect\nimport sys\nimport textwrap\nimport traceback\n\nfrom .actions import SubparsersAction\nfrom .colors import format_colors\nfrom .decorators import TimeoutError\nfrom .shell import Shell, ShellScriptError, ShellScriptTimeoutError\nfrom .task import TaskList\n\n\ndef check_instance(item, allowed_classes):\n if isinstance(item, allowed_classes):\n return\n\n raise ValueError('Expected instance of {}, got {}'.format(\n ', '.join([cls.__name__ for cls in allowed_classes]),\n item.__class__\n ))\n\n\nclass Actor(object):\n def __init__(self):\n self.shell = Shell()\n self.parser = None\n self.runner = None\n\n def _setup_parser(self, parser):\n self.parser = parser\n self.setup_parser(parser)\n\n def _set_runner(self, runner):\n self.runner = runner\n\n def setup_parser(self, parser):\n # There are not any options by default.\n return\n\n def message(self, message, *args, **kwargs):\n self.runner.message(message, *args, **kwargs)\n\n def error(self, message, *args, **kwargs):\n self.runner.error(message, *args, **kwargs)\n\n def call(self, command, *args, **kwargs):\n self.runner.call(command, *args, **kwargs)\n\n def tasklist(self, name=None, tasks=None):\n return TaskList(self.runner, name, tasks)\n\n def run(self):\n raise NotImplementedError(\"run() method is not implemented\")\n\n\nclass Command(object):\n def __init__(self, name, help, handler, **kwargs):\n self.handler = handler() if inspect.isclass(handler) else handler\n self.name = name\n self.help = help\n self.kwargs = kwargs\n\n def setup_parser(self, parent_parser):\n check_instance(self.handler, (Actor, CommandParser))\n\n parser = parent_parser.add_parser(\n self.name, help=self.help,\n formatter_class=argparse.RawTextHelpFormatter,\n **self.kwargs\n )\n\n if isinstance(self.handler, Actor):\n parser.set_defaults(func=self.handler)\n self.handler._setup_parser(parser)\n return\n\n # CommandParser\n parser.set_defaults(func=parser)\n self.handler.setup_parser(parser)\n\n\nclass CommandParser(object):\n def __init__(self, items=None, title=None, metavar='COMMANDS', **kwargs):\n self.items = items if items is not None else []\n self.title = title\n self.metavar = metavar\n self.kwargs = kwargs\n\n def add(self, item):\n self.items.append(item)\n return self\n\n def add_list(self, items):\n self.items += items\n return self\n\n def setup_parser(self, parent_parser):\n subparser = parent_parser.add_subparsers(\n action=SubparsersAction,\n title=self.title,\n metavar=self.metavar,\n **self.kwargs\n )\n\n for item in self.items:\n check_instance(item, (Command, CommandList, CommandGroup))\n item.setup_parser(subparser)\n\n return subparser\n\n def __iter__(self):\n return self.items.__iter__()\n\n def __next__(self):\n return self.items.__next__()\n\n\nclass CommandGroup(CommandParser):\n def __init__(self, title, items=None, **kwargs):\n super().__init__(items, title=title, **kwargs)\n\n def setup_parser(self, parent_parser):\n group = parent_parser.add_parser_group(self.title)\n\n for item in self.items:\n check_instance(item, (Command, CommandList, CommandGroup))\n item.setup_parser(group)\n\n return group\n\n\nclass CommandList(CommandParser):\n def __init__(self, items=None):\n super().__init__(items)\n\n def setup_parser(self, parent_parser):\n for item in self.items:\n check_instance(item, (Command, CommandGroup))\n item.setup_parser(parent_parser)\n\n\nclass Runner:\n def __init__(self, name):\n self.name = name\n self._print_shell = False\n self._dry_run = False\n\n def execute(self, parser, argv):\n split_argv = []\n if '--' in argv:\n split_argv = argv[argv.index('--') + 1:]\n argv = argv[:argv.index('--')]\n\n parser.add_argument(\n '--print-shell', action='store_true', dest='_runner_print_shell',\n help='Print shell commands that are being executed.'\n )\n\n parser.add_argument(\n '--dry-run', action='store_true', dest='_runner_dry_run',\n help='Print commands that are being executed without '\n 'actually running them.'\n )\n\n args = parser.parse_args(argv)\n self._print_shell = args._runner_print_shell\n self._dry_run = args._runner_dry_run\n\n Shell.PrintCommand = self._print_shell or self._dry_run\n Shell.DryRun = self._dry_run\n\n if not hasattr(args, 'func'):\n parser.print_help()\n return 1\n\n try:\n self._run_handler(args.func, args, split_argv)\n except ShellScriptTimeoutError as e:\n self._handle_shell_timeout_error(e, *sys.exc_info())\n return 255\n except ShellScriptError as e:\n self._handle_shell_error(e, *sys.exc_info())\n return e.returncode\n except TimeoutError as e:\n self._handle_timeout_error(e, *sys.exc_info())\n return 255\n except Exception:\n self._handle_exception(*sys.exc_info())\n return 1\n\n return 0\n\n def call(self, command, args=None, argv=None, **kwargs):\n handler = command() if inspect.isclass(command) else command\n argv = argv if argv is not None else []\n\n args = copy.copy(args) if args is not None else argparse.Namespace()\n for name in kwargs:\n setattr(args, name, kwargs[name])\n\n setattr(args, '_runner_print_shell', self._print_shell)\n setattr(args, '_runner_dry_run', self._dry_run)\n setattr(args, 'func', handler)\n\n self._run_handler(handler, args, argv)\n\n def message(self, message, color=colorama.Fore.BLUE,\n style=colorama.Style.BRIGHT, file=sys.stdout, without_prefix=False):\n if without_prefix:\n print(textwrap.indent(\n textwrap.dedent(message).strip(),\n ' ' * len('[{runner}] '.format(runner=self.name))\n ), file=file, flush=True)\n return\n\n print(textwrap.indent(\n textwrap.dedent(message).strip().format(**format_colors()),\n '{s-r}{style}{color}[{runner}]{s-r} '.format(\n color=color,\n style=style,\n runner=self.name,\n **format_colors()\n )\n ), file=file, flush=True)\n\n def error(self, message, color=colorama.Fore.RED,\n style=colorama.Style.BRIGHT, file=sys.stderr, without_prefix=False):\n self.message(message, color, style, file, without_prefix)\n\n def _run_handler(self, handler, args, argv):\n check_instance(handler, (Actor, argparse.ArgumentParser))\n\n # Handler is ArgumentParser, print help.\n if isinstance(handler, argparse.ArgumentParser):\n handler.print_help()\n return\n\n # Handler is Actor, finalize it and execute.\n handler._set_runner(self)\n numargs = len(inspect.signature(handler.run).parameters)\n if numargs == 0:\n handler.run()\n elif numargs == 1:\n handler.run(args)\n elif numargs == 2:\n handler.run(args, argv)\n else:\n raise TypeError('Unexpected number of arguments for command handler.')\n\n def _handle_exception(self, type, value, tb):\n self.error('''\n {s-b}Exception {c-b}{type}{s-r}{s-b}: {value}{s-r}\n {s-b}Traceback (most recent call last):{s-r}\n '''.format(\n type=type.__name__,\n value=value,\n **format_colors()\n ))\n\n traceback.print_tb(tb)\n\n def _handle_shell_error(self, err, type, value, tb):\n env = '(empty)' if not err.env else err.flat_env()\n\n self.error('''\n {s-b}The following command exited with {code}:\n {s-b}Working directory: {s-r}{c-b}{cwd}{s-r}\n {s-b}Environment: {s-r}{c-b}{env}{s-r}\n {s-b}Command: {s-r}{cmd}\n '''.format(\n code=err.returncode,\n env=env,\n cwd=err.cwd,\n cmd=err.flat_cmd(),\n **format_colors()\n ))\n\n if err.script:\n self.error(err.script, without_prefix=True)\n\n if err.output:\n self.error('{s-b}Command standard output:{s-r}')\n self.error(err.output.decode('utf-8'), without_prefix=True)\n\n if err.stderr:\n self.error('{s-b}Command error output:{s-r}')\n self.error(err.stderr.decode('utf-8'), without_prefix=True)\n\n def _handle_shell_timeout_error(self, err, type, value, tb):\n env = '(empty)' if not err.env else err.flat_env()\n\n self.error('''\n {s-b}The following command did not finished in {timeout} seconds:\n {s-b}Working directory: {s-r}{c-b}{cwd}{s-r}\n {s-b}Environment: {s-r}{c-b}{env}{s-r}\n {s-b}Command: {s-r}{cmd}\n '''.format(\n timeout=err.timeout,\n env=env,\n cwd=err.cwd,\n cmd=err.flat_cmd(),\n **format_colors()\n ))\n\n if err.script:\n self.error(err.script, without_prefix=True)\n\n if err.output:\n self.error('{s-b}Command standard output:{s-r}')\n self.error(err.output.decode('utf-8'), without_prefix=True)\n\n if err.stderr:\n self.error('{s-b}Command error output:{s-r}')\n self.error(err.stderr.decode('utf-8'), without_prefix=True)\n\n def _handle_timeout_error(self, err, type, value, tb):\n self.error('''\n {s-b}Exception {c-b}{type}{s-r}{s-b}{s-r}\n {s-b}Operation did not finished in {timeout} seconds.\n {s-b}{message}{s-r}\n '''.format(\n type=type.__name__,\n timeout=err.timeout,\n message=str(err),\n **format_colors()\n ))\n","sub_path":"cli/lib/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"231141902","text":"\"\"\"add super user\n\nRevision ID: 9d6648e02bfb\nRevises: 03838d5ae179\nCreate Date: 2017-06-09 14:40:56.125696\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom config.database_config import SUPER_USER_FIRST_NAME, SUPER_USER_LAST_NAME, SUPER_USER_EMAIL\n\n\n# revision identifiers, used by Alembic.\nrevision = '9d6648e02bfb'\ndown_revision = '03838d5ae179'\nbranch_labels = None\ndepends_on = None\n\n# Create an ad-hoc tables to use for the insert and delete statements.\nmember_table = sa.table('members',\n sa.Column('id', sa.Integer),\n sa.Column('first_name', sa.String),\n sa.Column('last_name', sa.String),\n sa.Column('email_address', sa.String)\n )\n\nrole_table = sa.table('roles',\n sa.Column('member_id', sa.Integer),\n sa.Column('role', sa.String))\n\n\ndef upgrade():\n\n op.bulk_insert(member_table, [{'id': 1, 'first_name': SUPER_USER_FIRST_NAME,\n 'last_name': SUPER_USER_LAST_NAME, 'email_address': SUPER_USER_EMAIL}])\n\n\n op.bulk_insert(role_table, [{'member_id': 1, 'role': 'admin'}])\n\n\ndef downgrade():\n op.execute(\n role_table.delete().where(role_table.c.member_id == 1)\n )\n op.execute(\n member_table.delete().where(member_table.c.id == 1)\n )\n pass\n","sub_path":"alembic/versions/9d6648e02bfb_add_super_user.py","file_name":"9d6648e02bfb_add_super_user.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"79906635","text":"__all__ = []\n\n\n# standard library\nfrom logging import getLogger\nfrom pathlib import Path\nfrom subprocess import PIPE\nfrom subprocess import run as sprun\nfrom subprocess import CalledProcessError, TimeoutExpired\n\n\n# dependencies\nimport ndradex\n\n\n# constants\nN_VARS = 10\nERROR_OUTPUT = (\"NaN\",) * N_VARS\n\n\nlogger = getLogger(__name__)\n\n\n# main function\ndef run(\n input,\n radex=None,\n timeout=None,\n cleanup=True,\n logfile=\"radex.log\",\n encoding=\"utf-8\",\n):\n \"\"\"Run RADEX and get result as tuple of string.\n\n Note that this function only reads the last line of RADEX outfile.\n This means that only the values of the transition at the highest\n frequency spacified in the RADEX input will be returned.\n\n Args:\n input (str or sequence): RADEX input. See examples below.\n radex (str or path, optional): RADEX path. If not spacified,\n then the builtin RADEX with uniform geometry will be used.\n timeout (int, optional): Timeout of a RADEX run in units of second.\n Default is None (unlimited run time is permitted).\n cleanup (bool, optional): If True (default), then the RADEX outfile\n (e.g. radex.out) and logfile (e.g., radex.log) will be deleted.\n logfile (str or path, optional): Path of logfile. This is only used\n for identifying the path of logfile in the cleanup method.\n encoding (str, optional): File encofing. Default is utf-8.\n\n Returns:\n output (tuple of str): RADEX output values.\n\n Examples:\n To get the values of CO(1-0) @ T_kin = 100 K, n_H2 = 1e3 cm^-3,\n N_CO = 1e15 cm^-2, T_bg = 2.73 K, and dv = 1.0 km s^-1:\n\n >>> input = ['co.dat', 'radex.out', '110 120', '100',\n '1', 'H2', '1e3', '2.73', '1e15', '1.0', 0]\n >>> output = run(input)\n\n \"\"\"\n if radex is None:\n radex = ndradex.RADEX_BINPATH / \"radex-uni\"\n\n try:\n input, outfile = ensure_input(input, encoding)\n except (AttributeError, IndexError, TypeError):\n logger.warning(\"RADEX did not run due to invalid input\")\n return ERROR_OUTPUT\n\n try:\n cp = sprun(\n [radex],\n input=input,\n timeout=timeout,\n stdout=PIPE,\n stderr=PIPE,\n check=True,\n )\n return ensure_output(cp, outfile, encoding)\n except FileNotFoundError:\n logger.warning(\"RADEX path or moldata does not exist\")\n return ERROR_OUTPUT\n except CalledProcessError:\n logger.warning(\"RADEX failed due to invalid input\")\n return ERROR_OUTPUT\n except TimeoutExpired:\n logger.warning(\"RADEX interrupted due to timeout\")\n return ERROR_OUTPUT\n except RuntimeError:\n logger.warning(\"RADEX version is not valid\")\n return ERROR_OUTPUT\n finally:\n if cleanup:\n remove_file(logfile)\n remove_file(outfile)\n\n\n# utility functions\ndef ensure_input(input, encoding=\"utf-8\"):\n \"\"\"Ensure the type of input and the path of outfile.\"\"\"\n if isinstance(input, (list, tuple)):\n outfile = input[1]\n input = \"\\n\".join(input).encode(encoding)\n else:\n outfile = input.split(\"\\n\")[1]\n input = input.encode(encoding)\n\n return input, outfile\n\n\ndef ensure_output(cp, outfile, encoding=\"utf-8\"):\n \"\"\"Ensure that the RADEX output is valid.\"\"\"\n if ndradex.RADEX_VERSION not in cp.stdout.decode(encoding):\n raise RuntimeError(\"RADEX version is not valid\")\n\n with open(outfile, encoding=encoding) as f:\n return f.readlines()[-1].split()[-N_VARS:]\n\n\ndef remove_file(path):\n \"\"\"Remove file forcibly (i.e., rm -f ).\"\"\"\n try:\n Path(path).unlink()\n except FileNotFoundError:\n pass\n","sub_path":"ndradex/radex.py","file_name":"radex.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"609454931","text":"from django.shortcuts import render, get_object_or_404,\\\n HttpResponseRedirect\nfrom articles.models import Article, Category\nfrom articles.forms import ArticleForm, CategoryForm\nfrom django.urls import reverse\nfrom django.db.models import Q\nimport random\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef search_articles(request):\n articles = Article.objects.all().order_by('-published_on')\n query = request.GET.get('q')\n if query:\n queryset_list = articles.filter(\n Q(title__icontains=query)|\n Q(body__icontains=query)|\n Q(tags__name__in=[query])\n ).distinct()\n return render(request, 'articles/search_result.html', {\"articles\":queryset_list, 'query':query})\n return render(request, 'articles/search_result.html', {})\n\ndef article_list(request):\n articles = Article.objects.all().order_by('-published_on')\n front_featured_article = []\n featured_articles = articles.filter(is_featured=True)\n if featured_articles:\n front_featured_article = featured_articles[random.randint(0, len(featured_articles) - 1)]\n template_name = 'articles/list.html'\n context = {\n \"articles\": articles,\n \"featured_articles\":featured_articles,\n \"front_featured_article\": front_featured_article\n }\n return render(request, template_name, context)\n\n\ndef article_detail(request, slug=None):\n articles = Article.objects.all().order_by('-published_on')[:5]\n article = get_object_or_404(Article, slug=slug)\n template_name = 'articles/detail.html'\n context = {\n 'article': article,\n 'articles': articles\n }\n return render(request, template_name, context)\n\n@login_required\ndef add_article(request):\n if request.method == 'POST':\n form = ArticleForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('ArticleList', args=[]))\n else:\n form = ArticleForm()\n\n template_name = 'articles/new.html'\n context = {\n 'form':form,\n }\n return render(request, template_name, context)\n@login_required\ndef update_article(request, slug=None):\n article = get_object_or_404(Article, slug=slug)\n if request.method == 'POST':\n form = ArticleForm(request.POST, request.FILES, instance=article)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('ArticleDetail', kwargs={'slug':slug,}))\n else:\n form = ArticleForm(instance = article)\n\n template_name = 'articles/update.html'\n context = {\n 'form':form,\n }\n return render(request, template_name, context)\n\n@login_required\ndef add_category(request):\n if request.method == 'POST':\n form = CategoryForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('ArticleList', args =[]))\n else:\n form = CategoryForm()\n template_name = 'articles/new_category.html'\n context = {\n 'form':form,\n }\n return render(request, template_name, context)\n\n\n\n\n\ndef handler404(request):\n data = {}\n return render(request,'404.html', data)\n \ndef handler500(request):\n data = {}\n return render(request,'500.html', data)","sub_path":"PersonalWebsite/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"600510936","text":"from reporting import Reporter\nfrom tmp36sensing import Sensors\nimport time\nimport os\n\nhost = os.getenv(\"REDIS_HOST\")\nif(host== None):\n host = \"redis\"\n\nsensors = Sensors()\nreporter = Reporter(host, 6379)\nreporter.announce()\n\nwhile(True):\n output = sensors.read()\n print(output)\n reporter.set(output)\n reporter.publish()\n time.sleep(3)\n","sub_path":"non_demo/tmp36_sensor/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"73631120","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public\n# License as published by the Free Software Foundation; either version\n# 2 of the License (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied,\n# including the implied warranties of MERCHANTABILITY,\n# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should\n# have received a copy of GPLv2 along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n\nimport copy\nimport logging\nimport os\nimport shutil\n\nfrom pulp.server.db.model.criteria import UnitAssociationCriteria\n\nfrom pulp_rpm.common import models, constants\nfrom pulp_rpm.plugins.importers.yum import depsolve\nfrom pulp_rpm.plugins.importers.yum import existing\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef associate(source_repo, dest_repo, import_conduit, config, units=None):\n \"\"\"\n This is the primary method to call when a copy operation is desired. This\n gets called directly by the Importer\n\n Certain variables are set to \"None\" as the method progresses so that they\n may be garbage collected.\n\n :param source_repo: source repo\n :type source_repo: pulp.plugins.model.Repository\n :param dest_repo: destination repo\n :type dest_repo: pulp.plugins.model.Repository\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n :param config: config object for the distributor\n :type config: pulp.plugins.config.PluginCallConfiguration\n :param units: iterable of Unit objects to copy\n :type units: iterable\n :return:\n \"\"\"\n if units is None:\n # this might use a lot of RAM since RPMs tend to have lots of metadata\n # TODO: so we should probably do something about that\n units = import_conduit.get_source_units()\n\n # get config items that we care about\n recursive = config.get(constants.CONFIG_RECURSIVE)\n if recursive is None:\n recursive = False\n\n associated_units = set([_associate_unit(dest_repo, import_conduit, unit) for unit in units])\n # allow garbage collection\n units = None\n\n associated_units |= copy_rpms((unit for unit in associated_units if unit.type_id == models.RPM.TYPE),\n import_conduit, recursive)\n\n # return here if we shouldn't get child units\n if not recursive:\n return list(associated_units)\n\n group_ids, rpm_names, rpm_search_dicts = identify_children_to_copy(associated_units)\n\n # ------ get group children of the categories ------\n group_criteria = UnitAssociationCriteria([models.PackageGroup.TYPE],\n unit_filters={'id': {'$in': list(group_ids)}})\n group_units = list(import_conduit.get_source_units(group_criteria))\n if group_units:\n associated_units |= set(associate(source_repo, dest_repo, import_conduit, config, group_units))\n\n # ------ get RPM children of errata ------\n wanted_rpms = get_rpms_to_copy_by_key(rpm_search_dicts, import_conduit)\n rpm_search_dicts = None\n rpms_to_copy = filter_available_rpms(wanted_rpms, import_conduit)\n associated_units |= copy_rpms(rpms_to_copy, import_conduit, recursive)\n rpms_to_copy = None\n\n # ------ get RPM children of groups ------\n names_to_copy = get_rpms_to_copy_by_name(rpm_names, import_conduit)\n associated_units |= copy_rpms_by_name(names_to_copy, import_conduit, recursive)\n\n return list(associated_units)\n\n\ndef get_rpms_to_copy_by_key(rpm_search_dicts, import_conduit):\n \"\"\"\n Errata specify NEVRA for the RPMs they reference. This method is useful for\n taking those specifications and finding actual units available in the source\n repository.\n\n :param rpm_search_dicts: iterable of dicts that include a subset of rpm\n unit key parameters\n :type rpm_search_dicts: iterable\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n\n :return: set of namedtuples needed by the dest repo\n \"\"\"\n # identify which RPMs are desired and store as named tuples\n named_tuples = set()\n for key in rpm_search_dicts:\n # ignore checksum from updateinfo.xml\n key['checksum'] = None\n key['checksumtype'] = None\n named_tuples.add(models.RPM.NAMEDTUPLE(**key))\n\n # identify which of those RPMs already exist\n existing_units = existing.get_existing_units(rpm_search_dicts, models.RPM.UNIT_KEY_NAMES,\n models.RPM.TYPE, import_conduit.get_destination_units)\n # remove units that already exist in the destination from the set of units\n # we want to copy\n for unit in existing_units:\n unit_key = unit.unit_key.copy()\n # ignore checksum from updateinfo.xml\n unit_key['checksum'] = None\n unit_key['checksumtype'] = None\n named_tuples.discard(models.RPM.NAMEDTUPLE(**unit_key))\n return named_tuples\n\n\ndef get_rpms_to_copy_by_name(rpm_names, import_conduit):\n \"\"\"\n Groups reference names of RPMs. This method is useful for taking those names\n and removing ones that already exist in the destination repository.\n\n :param rpm_names: iterable of RPM names\n :type rpm_names: iterable\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n\n :return: set of names that don't already exist in the destination repo\n :rtype: set\n \"\"\"\n search_dicts = ({'name': name} for name in rpm_names)\n units = existing.get_existing_units(search_dicts, models.RPM.UNIT_KEY_NAMES,\n models.RPM.TYPE, import_conduit.get_destination_units)\n names = set(rpm_names)\n for unit in units:\n names.discard(unit.unit_key['name'])\n return names\n\n\ndef filter_available_rpms(rpms, import_conduit):\n \"\"\"\n Given a series of RPM named tuples, return an iterable of those which are\n available in the source repository\n\n :param rpms: iterable of RPMs that are desired to be copied\n :type rpms: iterable of pulp_rpm.common.models.RPM.NAMEDTUPLE\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n :return: iterable of Units that should be copied\n :return: iterable of pulp.plugins.models.Unit\n \"\"\"\n return existing.get_existing_units((_no_checksum_clean_unit_key(unit) for unit in rpms),\n models.RPM.UNIT_KEY_NAMES, models.RPM.TYPE,\n import_conduit.get_source_units)\n\n\ndef copy_rpms(units, import_conduit, copy_deps):\n \"\"\"\n Copy RPMs from the source repo to the destination repo, and optionally copy\n dependencies as well. Dependencies are resolved recursively.\n\n :param units: iterable of Units\n :type units: iterable of pulp.plugins.models.Unit\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n :param copy_deps: if True, copies dependencies as specified in \"Requires\"\n lines in the RPM metadata. Matches against NEVRAs\n and Provides declarations that are found in the\n source repository. Silently skips any dependencies\n that cannot be resolved within the source repo.\n\n :return: set of pulp.plugins.models.Unit that were copied\n :rtype: set\n \"\"\"\n unit_set = set()\n\n for unit in units:\n import_conduit.associate_unit(unit)\n unit_set.add(unit)\n\n if copy_deps and unit_set:\n deps = depsolve.find_dependent_rpms(unit_set, import_conduit.get_source_units)\n # only consider deps that exist in the source repo\n available_deps = set(filter_available_rpms(deps, import_conduit))\n # remove rpms already in the destination repo\n existing_units = set(existing.get_existing_units([dep.unit_key for dep in available_deps],\n models.RPM.UNIT_KEY_NAMES, models.RPM.TYPE,\n import_conduit.get_destination_units))\n to_copy = available_deps - existing_units\n _LOGGER.debug('Copying deps: %s' % str(sorted([x.unit_key['name'] for x in to_copy])))\n if to_copy:\n unit_set |= copy_rpms(to_copy, import_conduit, copy_deps)\n\n return unit_set\n\n\ndef _no_checksum_clean_unit_key(unit_tuple):\n \"\"\"\n Return a unit key that does not include the checksum or checksumtype. This\n is useful when resolving dependencies, because those unit specifications\n (on \"Requires\" lines in spec files) do not specify particular checksum info.\n\n This also removes any key-value pairs where the value is None, which is\n particularly useful for repos where the errata to not specify epochs\n\n :param unit_tuple: unit to convert\n :type unit_tuple: pulp_rpm.common.models.RPM.NAMEDTUPLE\n\n :return: unit key without checksum data\n :rtype: dict\n \"\"\"\n ret = unit_tuple._asdict()\n # ignore checksum from updateinfo.xml\n del ret['checksum']\n del ret['checksumtype']\n for key, value in ret.items():\n if value is None:\n del ret[key]\n return ret\n\n\ndef copy_rpms_by_name(names, import_conduit, copy_deps):\n \"\"\"\n Copy RPMs from source repo to destination repo by name\n\n :param names: iterable of RPM names\n :type names: iterable of basestring\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n\n :return: set of pulp.plugins.model.Unit that were copied\n :rtype: set\n \"\"\"\n to_copy = {}\n\n search_dicts = ({'name': name} for name in names)\n units = existing.get_existing_units(search_dicts, models.RPM.UNIT_KEY_NAMES,\n models.RPM.TYPE,\n import_conduit.get_source_units)\n for unit in units:\n model = models.RPM.from_package_info(unit.unit_key)\n previous = to_copy.get(model.key_string_without_version)\n if previous is None:\n to_copy[model.key_string_without_version] = (model.complete_version_serialized, unit)\n else:\n to_copy[model.key_string_without_version] = max(((model.complete_version_serialized, unit), previous))\n\n return copy_rpms((unit for v, unit in to_copy.itervalues()), import_conduit, copy_deps)\n\n\ndef identify_children_to_copy(units):\n \"\"\"\n Takes an iterable of Unit instances, and for each that is of a child-bearing\n type (Group, Category, Errata), collects the child definitions.\n\n :param units: iterable of Units\n :type units: iterable of pulp.plugins.models.Unit\n\n :return: set(group names), set(rpm names), list(rpm search dicts)\n \"\"\"\n groups = set()\n rpm_names = set()\n rpm_search_dicts = []\n for unit in units:\n # TODO: won't work for distribution, but we probably don't care.\n # we should handle that somehow though\n model = models.TYPE_MAP[unit.type_id](metadata=unit.metadata, **unit.unit_key)\n if model.TYPE == models.PackageCategory.TYPE:\n groups.update(model.group_names)\n elif model.TYPE == models.PackageGroup.TYPE:\n rpm_names.update(model.all_package_names)\n elif model.TYPE == models.PackageEnvironment.TYPE:\n groups.update(model.group_ids)\n groups.update(model.optional_group_ids)\n elif model.TYPE == models.Errata.TYPE:\n rpm_search_dicts.extend(model.rpm_search_dicts)\n return groups, rpm_names, rpm_search_dicts\n\n\ndef _associate_unit(dest_repo, import_conduit, unit):\n \"\"\"\n Associate one particular unit with the destination repository. There are\n behavioral exceptions based on type:\n\n Group, Category, Environment and Yum Metadata File units need to have their \"repo_id\"\n attribute set.\n\n RPMs are convenient to do all as one block, for the purpose of dependency\n resolution. So this method skips RPMs and lets them be done together by\n other means\n\n :param dest_repo: destination repo\n :type dest_repo: pulp.plugins.model.Repository\n :param import_conduit: import conduit passed to the Importer\n :type import_conduit: pulp.plugins.conduits.unit_import.ImportUnitConduit\n :param unit: Unit to be copied\n :type unit: pulp.plugins.model.Unit\n\n :return: copied unit\n :rtype: pulp.plugins.model.Unit\n \"\"\"\n if unit.type_id in (models.PackageGroup.TYPE,\n models.PackageCategory.TYPE,\n models.PackageEnvironment.TYPE):\n new_unit = _safe_copy_unit_without_file(unit)\n new_unit.unit_key['repo_id'] = dest_repo.id\n saved_unit = import_conduit.save_unit(new_unit)\n return saved_unit\n elif unit.type_id == models.RPM.TYPE:\n # copy will happen in one batch\n return unit\n elif unit.type_id == models.YumMetadataFile.TYPE:\n model = models.YumMetadataFile(unit.unit_key['data_type'], dest_repo.id, unit.metadata)\n model.clean_metadata()\n relative_path = os.path.join(model.relative_dir, os.path.basename(unit.storage_path))\n new_unit = import_conduit.init_unit(model.TYPE, model.unit_key, model.metadata, relative_path)\n shutil.copyfile(unit.storage_path, new_unit.storage_path)\n import_conduit.save_unit(new_unit)\n return new_unit\n else:\n import_conduit.associate_unit(unit)\n return unit\n\n\ndef _safe_copy_unit_without_file(unit):\n \"\"\"\n Makes a deep copy of the unit, removes its \"id\", and removes anything in\n \"metadata\" whose key starts with a \"_\".\n\n :param unit: unit to be copied\n :type unit: pulp.plugins.model.Unit\n\n :return: copy of the unit\n :rtype unit: pulp.plugins.model.Unit\n \"\"\"\n new_unit = copy.deepcopy(unit)\n new_unit.id = None\n for key in new_unit.metadata.keys():\n if key.startswith('_'):\n del new_unit.metadata[key]\n return new_unit\n","sub_path":"plugins/pulp_rpm/plugins/importers/yum/associate.py","file_name":"associate.py","file_ext":"py","file_size_in_byte":14614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"214997929","text":"import random\nl= [] ; m= []\nfor i in range(10):\n l.append(random.randint(0,9))\nprint(l)\n\nfor j in l:\n if j not in m:\n m.append(j)\nprint(m)\n\n\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"298994906","text":"'''Extracts frames from a recording with the .bk2 extension.\nCroops and reduces the size of each frame\nOutput: database in a HDF5 file\n'''\nimport h5py\nimport numpy as np\n\ndatabase_names = ['concatTrain.hdf5', 'concatValid.hdf5', 'concatTest.hdf5']\noutput_database_names = ['colorTrain.hdf5', 'colorValid.hdf5', 'colorTest.hdf5']\n\n# database_names = ['concatNeighbourTrain.hdf5', 'concatNeighbourValid.hdf5', 'concatNeighbourTest.hdf5']\n# output_database_names = ['colorNeighbourTrain.hdf5', 'colorNeighbourValid.hdf5', 'colorNeighbourTest.hdf5']\nrepertory = './data/'\n#data = h5py.File('./data/data_All_Stars.hdf5', 'r')\n#list_nbMovies = [0, 0, 4, 1, 4, 4]\n\ndef getIndexPix(pixel):\n return (pixel[0], pixel[1], pixel[2])\n\ndef buildPixDictionary(database):\n pixDictionary = {}\n batch, height, width, channel = database.shape\n print(batch)\n for img in range(100):\n for i in range(height):\n for j in range(width):\n key = getIndexPix(database[img,i,j])\n if key in pixDictionary:\n pixDictionary[key] += 1\n else:\n pixDictionary[key] = 1\n print(\"Img number \",img,\" len Dictionary\",len(pixDictionary))\n return pixDictionary\n\ndef convertToFreq(database, pixDictionary, output_database_name):\n sizePixDict = len(pixDictionary)\n batch, height, width, channel = database.shape\n const_coef = height*width*batch / sizePixDict\n freqDatabase = []\n for img in range(batch):\n freqImg = []\n for i in range(height):\n freqRow = []\n for j in range(width):\n key = getIndexPix(database[img,i,j])\n if key in pixDictionary:\n apparitions = pixDictionary[key]\n else:\n apparitions = 1\n #color_coef = height*width*batch / (sizePixDict*apparitions)\n freq_coef = const_coef / apparitions\n freqRow.append(freq_coef)\n freqImg.append(freqRow)\n freqDatabase.append(freqImg)\n print('Freq Database shape: ',freqDatabase.shape)\n my_file = h5py.File(output_database_name, 'a')\n try:\n dataset = my_file.create_dataset(name='freqColor', data=freqDatabase, dtype=\"f8\")\n except RuntimeError:\n pass\n my_file.close()\n\n\ndef main(database_names, output_database_names, repertory):\n trainData_name = repertory + database_names[0]\n trainData = h5py.File(trainData_name, 'r')\n pixDictionary = buildPixDictionary(trainData['/']['runs'])\n print(\"DONE Building Dictionary of length\", len(pixDictionary))\n for nBase in range(len(database_names)):\n database_name = repertory + database_names[nBase]\n output_database_name = repertory + output_database_names[nBase]\n database = h5py.File(database_name, 'r')\n convertToFreq(database['/']['runs'], pixDictionary,\n output_database_name)\n\nif __name__ == '__main__':\n main(database_names, output_database_names, repertory)\n","sub_path":"colorfrequency.py","file_name":"colorfrequency.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"246016829","text":"class fcfs1():\r\n def __init__(self):\r\n processname=\" \"\r\n arrival_t = 0\r\n burst_t = 0\r\n start_t = 0\r\n run_t=0\r\n\r\n\r\n\r\n\r\n\r\nmainlist = []\r\nnop = input('enter no of processe')\r\nfcfs=[fcfs1() for i in range(nop)]\r\nfor i in range(nop):\r\n\r\n fcfs[i].processname = input('enter name of prcoess p')\r\n fcfs[i].arrival_t = int(input('enter arrival time for process p' + str(i)))\r\n fcfs[i].burst_t = int(input('enter burst time for processp' + str(i)) )\r\n fcfs[i].start_t = 0\r\n fcfs[i].run_t=0\r\n #mainlist.append(obj)\r\n\r\nmainlist.sort(key=lambda x :x.mainlist[i])\r\ntotal=0\r\nfor i in range(nop):\r\n chk=False\r\n while(fcfs[i].run_t!=fcfs[i].burst_t):\r\n if(fcfs[i].arrival_t>total):\r\n while(True):\r\n total+=1\r\n if(fcfs[i].arrival_t==total):\r\n break\r\n else:\r\n if(fcfs[i].start_t==0 and chk==False):\r\n fcfs[i].start_t=total\r\n chk=True\r\n total+=1\r\n fcfs[i].run_t+=1\r\nfor i in range(nop):\r\n print('process name', fcfs[i].processname,' ' ,'arrival time', fcfs[i].arrival_t ,\" \",'burst time', fcfs[i].burst_t ,\" \" , 'termination time', fcfs[i].start_t+fcfs[i].burst_t , \" \" , 'turnaround time' ,(fcfs[i].start_t+fcfs[i].burst_t) - fcfs[i].arrival_t , \" \" , 'waiting time' ,fcfs[i].start_t-fcfs[i].arrival_t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"FCFS.py","file_name":"FCFS.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"353431789","text":"'''\nFor this challenge, create a bank account class that has two attributes:\nowner\nbalance\nand two methods:\ndeposit\nwithdraw\nAs an added requirement, withdrawals may not exceed the available balance.\nInstantiate your class, make several deposits and withdrawals, and test to make sure the account can't be overdrawn.\n'''\n\n\nclass Account:\n def __init__(self, owner, balance=0):\n self.owner = owner\n self.balance = balance\n\n def __str__(self):\n return f'Account owner: {self.owner}\\nAccount balance: ${self.balance}'\n\n def deposit(self, dep_ammount):\n if dep_ammount > 0:\n self.balance += dep_ammount\n print('Deposit Accepted')\n\n def withdraw(self, wd_amount):\n if self.balance < wd_amount:\n print('Funds Unavailable!')\n else:\n self.balance -= wd_amount\n print('Withdrawal Accepted')\n\n\n# 1. Instantiate the class\nacct1 = Account('Jose', 100)\n\n# 2. Print the object\nprint(acct1)\n\n# 3. Show the account owner attribute\nprint(acct1.owner)\n\n# 4. Show the account balance attribute\nprint(acct1.balance)\n \n# 5. Make a series of deposits and withdrawals\nacct1.deposit(50)\nprint(acct1.balance)\n\nacct1.withdraw(75)\nprint(acct1.balance)\n\n# 6. Make a withdrawal that exceeds the available balance\nacct1.withdraw(500)\nprint(acct1.balance)\n","sub_path":"oop/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"149030852","text":"#!/usr/bin/env python\nimport os\nimport time\nimport re\nimport numpy\nfrom pyscf import lib\nfrom pyscf import gto, scf, dft, mcscf, mp, cc, lo\n\ndef sort_mo(casscf, idx, mo_coeff):\n mol = casscf.mol\n corth = lo.orth.orth_ao(mol)\n casorb = corth[:,idx]\n\n nmo = mo_coeff.shape[1]\n ncore = casscf.ncore\n ncas = casscf.ncas\n nelecas = casscf.nelecas\n assert(ncas == casorb.shape[1])\n\n mo1 = reduce(numpy.dot, (casorb.T, casscf._scf.get_ovlp(), mo_coeff))\n sdiag = numpy.einsum('pi,pi->i', mo1, mo1)\n\n nocc = ncore + nelecas[0]\n casidx = numpy.hstack((numpy.argsort(sdiag[:nocc])[ncore:],\n nocc+numpy.argsort(-sdiag[nocc:])[:ncas-nelecas[0]]))\n notcas = [i for i in range(nmo) if i not in casidx]\n mo = numpy.hstack((mo_coeff[:,notcas[:ncore]],\n mo_coeff[:,casidx],\n mo_coeff[:,notcas[ncore:]]))\n return mo\n\nmol = gto.Mole()\nmol.verbose = 0\nlog = lib.logger.Logger(mol.stdout, 5)\nwith open('/proc/cpuinfo') as f:\n for line in f:\n if 'model name' in line:\n log.note(line[:-1])\n break\nwith open('/proc/meminfo') as f:\n log.note(f.readline()[:-1])\nlog.note('OMP_NUM_THREADS=%s\\n', os.environ.get('OMP_NUM_THREADS', None))\n\nfor bas in ('3-21g', '6-31g*', 'cc-pVTZ', 'ANO-Roos-TZ'):\n mol.atom = 'N 0 0 0; N 0 0 1.1'\n mol.basis = bas\n mol.build(0, 0)\n cpu0 = time.clock(), time.time()\n\n mf = scf.RHF(mol)\n mf.kernel()\n cpu0 = log.timer('N2 %s RHF'%bas, *cpu0)\n\n mymp2 = mp.MP2(mf)\n mymp2.kernel()\n cpu0 = log.timer('N2 %s MP2'%bas, *cpu0)\n\n mymc = mcscf.CASSCF(mf, 4, 4)\n idx = mol.search_ao_label('2p[xy]')\n mo = sort_mo(mymc, idx, mf.mo_coeff)\n mymc.kernel(mo)\n cpu0 = log.timer('N2 %s CASSCF'%bas, *cpu0)\n\n mycc = cc.CCSD(mf)\n mycc.kernel()\n cpu0 = log.timer('N2 %s CCSD'%bas, *cpu0)\n\n mf = dft.RKS(mol)\n mf.xc = 'b3lyp'\n mf.kernel()\n cpu0 = log.timer('N2 %s B3LYP'%bas, *cpu0)\n\n mf = scf.density_fit(mf)\n mf.kernel()\n cpu0 = log.timer('N2 %s density-fit RHF'%bas, *cpu0)\n","sub_path":"examples/2-benchmark/n2.py","file_name":"n2.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"437813494","text":"import requests\nimport pickle\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\nfrom ipwhois import IPWhois\nfrom ipwhois.utils import get_countries\nimport time\n\nclass Bay:\n def __init__(self,initial_proxy):\n self.proxy = initial_proxy\n self.proxies = []\n\n def getfile(self,url,proxy,ext=\"\"):\n proxies = {\n #\"http\": \"http://211.137.39.61:8080\",\n \"http\": proxy,\n }\n headers = {'user-agent': '5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'}\n try:\n r = requests.get(url,headers=headers,timeout=10, proxies=proxies)\n print(r.status_code)\n if r.status_code == requests.codes.ok:\n pf = open(\"data\" + ext,\"wb\")\n pickle.dump(r.content,pf)\n pf.close()\n pcf = open(\"cookie\",\"wb\")\n pickle.dump(r.cookies,pcf)\n pcf.close()\n else:\n return \"error\"\n except:\n return \"error\"\n\n\n def gethtml(self,name):\n opf = open(name, \"rb\")\n html = pickle.load(opf,encoding=\"UTF-8\")\n html = html.decode(\"UTF-8\")\n opf.close()\n return html\n\n\n def get_tables(self,htmldoc):\n soup = BeautifulSoup(htmldoc,'html.parser')\n tables = soup.findAll('table')\n return tables[0].findAll('tr')\n\n\n def getips(self):\n #ipurl = 'http://www.samair.ru/proxy-by-country/China-01.htm'\n ipurl = 'https://www.us-proxy.org/'\n self.getfile(ipurl,self.proxy)\n t = self.get_tables(self.gethtml(\"data\"))\n #print(t)\n tbl = BeautifulSoup(str(t),'html.parser')\n #print(tbl.findAll(\"tr\"))\n arr = []\n for p in tbl.findAll(\"tr\"):\n try:\n if p.td.next_sibling.next_sibling.next_sibling.next_sibling.get_text() == \"elite proxy\":\n arr.append(p.td.get_text() + \":\" + p.td.next_sibling.get_text())\n except:\n pass\n self.proxies = arr\n return arr\n\n\n def set(self,pagenum,kw):\n try:\n url = 'http://www.ebay.com/sch/i.html?_from=R40&_sacat=0&LH_Complete=1&LH_Sold=1&LH_ItemCondition=3&_nkw=' + kw + '&_pgn=' + str(pagenum) + '&_ipg=200&rt=nc'\n if len(self.proxies) > 0:\n countries = get_countries()\n obj = IPWhois(self.proxies[0].split(':')[0])\n results = obj.lookup(False)\n\n if countries[results['nets'][0]['country']] == \"United States\":\n\n if self.getfile(url,self.proxies[0],\".ht\") == \"error\":\n print(\"Switching Proxy\")\n self.proxies.pop(0)\n self.set(pagenum,kw)\n\n else:\n print(countries[results['nets'][0]['country']])\n print(\"Non-US IP \" + self.proxies[0].split(':')[0] + \": Switching Proxy\")\n self.proxies.pop(0)\n self.set(pagenum,kw)\n\n else:\n print(\"No Proxies in Queue\")\n\n\n except Exception as e:\n print(str(e))\n\n\n\n def get(self,n,fn):\n html_doc = self.gethtml(\"data.ht\")\n html_doc = html_doc.replace(\"\\n\",\"\")\n html_doc = html_doc.replace(\"\\r\",\"\")\n html_doc = html_doc.replace(\"\\t\",\"\")\n soup = BeautifulSoup(html_doc, 'html.parser')\n listings = soup.find_all(class_=\"lvresult\")\n #titles = soup.find_all(class_=\"lvtitle\")\n #print(listings)\n titlearray = []\n for title in listings:\n titlearray.append(title.find(class_=\"lvtitle\").a.get_text())\n\n linkarray = []\n for link in listings:\n linkarray.append(link.find(class_=\"lvtitle\").a.get('href'))\n #print(linkarray)\n\n\n dates = soup.find_all(class_=\"tme\")\n datearray = []\n for date in dates:\n datearray.append(date.span.string)\n\n\n images = soup.find_all(class_=\"lvpicinner\")\n imagearray = []\n for image in images:\n try:\n imagearray.append(image.a.img['imgurl'])\n except:\n try:\n imagearray.append(image.a.img['src'])\n except:\n imagearray.append(\"\")\n\n\n pricearray = []\n for price in listings:\n pr = price.find(class_=\"bold bidsold\").string\n try:\n p = re.search('[\\d.,]+', pr)\n prc = p.group(0)\n except:\n prc = \"NaN\"\n pricearray.append(prc)\n\n\n shippingarray = []\n for shipping in listings:\n try:\n shippingarray.append(shipping.find(class_=\"bfsp\").string)\n except:\n shippingarray.append(\"None\")\n\n\n fromarray = []\n for fr in listings:\n try:\n fromarray.append(fr.find(class_=\"lvdetails\").findAll(\"li\")[1].get_text())\n except:\n fromarray.append(\"\")\n #print(fromarray)\n\n auctionarray = []\n for auction in listings:\n try:\n atext = auction.find(class_=\"lvformat\").get_text()\n if atext == \"Buy It Now\":\n auctionarray.append((atext,atext,\"\"))\n else:\n auctionarray.append((atext[-4:],\"Bids\",re.search('[\\d]+', atext).group(0)))\n except:\n auctionarray.append(\"\")\n #print(auctionarray)\n\n\n\n df1 = pd.DataFrame(data = titlearray, columns=['Titles'])\n df2 = pd.DataFrame(data = pricearray, columns=['Price'])\n df3 = pd.DataFrame(data = datearray, columns=['Date'])\n df4 = pd.DataFrame(data = imagearray, columns=['Image'])\n df5 = pd.DataFrame(data = shippingarray, columns=['Shipping'])\n df6 = pd.DataFrame(data = fromarray, columns=['From'])\n df7 = pd.DataFrame(data = auctionarray, columns=['Auction','Bids','Bids Text'])\n df8 = pd.DataFrame(data = linkarray, columns=['Link'])\n\n df1['Prices'] = df2\n df1['Dates'] = df3\n df1['Image'] = df4\n df1['Shipping'] = df5\n df1['From'] = df6\n df1[['Auction','Bids','Bids Text']] = df7\n df1['Link'] = df8\n\n df1.to_excel(fn + str(n) + '.xlsx', index=False)\n print('Created: ' + fn + str(n) + '.xlsx')\n\n\n\n# Set Initial Proxy to Get New Proxies\nme = Bay(\"111.14.40.155:8081\")\n\n# Get New Proxies\nme.getips()\nprint(me.proxies)\n\n# Or Use Your Own Proxies\n# me.proxies = ['52.27.149.22:80','161.68.250.139:80','161.68.250.181:8080']\n\n\nme.set(3,'sexy+one+size')\ntime.sleep(3)\nme.get(3,'tst')\n","sub_path":"bay-proxy.py","file_name":"bay-proxy.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"80245603","text":"import pytest\nfrom pg2avro import get_avro_schema, ColumnMapping\n\n\ndef test_get_avro_schema_custom_mapping():\n \"\"\"\n Test using dictionary with custom mapping.\n \"\"\"\n\n columns = [{\"c1\": \"smallint\", \"c2\": \"smallint\", \"c3\": \"int2\", \"c4\": False}]\n\n table_name = \"test_table\"\n namespace = \"test_namespace\"\n\n expected = {\n \"name\": table_name,\n \"namespace\": namespace,\n \"type\": \"record\",\n \"fields\": [{\"name\": \"smallint\", \"type\": \"int\"}],\n }\n\n actual = get_avro_schema(\n table_name,\n namespace,\n columns,\n ColumnMapping(\n name=\"c1\",\n type=\"c2\",\n nullable=\"c4\",\n numeric_precision=\"c5\",\n numeric_scale=\"c6\",\n ),\n )\n\n assert expected == actual\n\n\ndef test_get_avro_schema_assumed_column_interface():\n \"\"\"\n Test using dictionary with custom mapping.\n \"\"\"\n columns = [\n {\n \"name\": \"smallint\",\n \"type\": \"smallint\",\n \"secondary_type\": \"int2\",\n \"nullable\": False,\n }\n ]\n\n table_name = \"test_table\"\n namespace = \"test_namespace\"\n\n expected = {\n \"name\": table_name,\n \"namespace\": namespace,\n \"type\": \"record\",\n \"fields\": [{\"name\": \"smallint\", \"type\": \"int\"}],\n }\n\n actual = get_avro_schema(table_name, namespace, columns)\n\n assert expected == actual\n\n\ndef test_get_avro_schema_invalid_column_interface():\n \"\"\"\n Test incompatible dict with no mapping, this shall result in exception.\n \"\"\"\n columns = [{\"incompatible\": \"smallint\", \"type\": \"smallint\", \"nullable\": False}]\n\n table_name = \"test_table\"\n namespace = \"test_namespace\"\n\n # Not passing column mapping, this should raise an exception.\n with pytest.raises(Exception, match=\"Assuming pg2avro compatible column interface\"):\n get_avro_schema(table_name, namespace, columns)\n","sub_path":"tests/test_schema_dict.py","file_name":"test_schema_dict.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"652913228","text":"# -*- coding: utf-8 -*-\n\"\"\"\n proxy.py\n ~~~~~~~~\n ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on\n Network monitoring, controls & Application development, testing, debugging.\n\n :copyright: (c) 2013-present by Abhinav Singh and contributors.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport logging\nimport multiprocessing\nimport socket\nimport threading\n# import time\nfrom multiprocessing import connection\nfrom multiprocessing.reduction import send_handle\nfrom typing import List, Optional, Type\n\nfrom .acceptor import Acceptor\nfrom ..threadless import ThreadlessWork\nfrom ..event import EventQueue, EventDispatcher\nfrom ...common.flags import Flags\n\nlogger = logging.getLogger(__name__)\n\nLOCK = multiprocessing.Lock()\n\nproxy_id_glob = multiprocessing.Value('i', 0)\n\n\nclass AcceptorPool:\n \"\"\"AcceptorPool.\n\n Pre-spawns worker processes to utilize all cores available on the system. Server socket connection is\n dispatched over a pipe to workers. Each worker accepts incoming client request and spawns a\n separate thread to handle the client request.\n \"\"\"\n\n def __init__(self, flags: Flags, work_klass: Type[ThreadlessWork]) -> None:\n self.flags = flags\n self.socket: Optional[socket.socket] = None\n self.acceptors: List[Acceptor] = []\n self.work_queues: List[connection.Connection] = []\n self.work_klass = work_klass\n\n self.event_queue: Optional[EventQueue] = None\n self.event_dispatcher: Optional[EventDispatcher] = None\n self.event_dispatcher_thread: Optional[threading.Thread] = None\n self.event_dispatcher_shutdown: Optional[threading.Event] = None\n self.manager: Optional[multiprocessing.managers.SyncManager] = None\n\n if self.flags.enable_events:\n self.manager = multiprocessing.Manager()\n self.event_queue = EventQueue(self.manager.Queue())\n\n def listen(self) -> None:\n self.socket = socket.socket(self.flags.family, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((str(self.flags.hostname), self.flags.port))\n self.socket.listen(self.flags.backlog)\n self.socket.setblocking(False)\n logger.info(\n 'Listening on %s:%d' %\n (self.flags.hostname, self.flags.port))\n\n def start_workers(self) -> None:\n \"\"\"Start worker processes.\"\"\"\n for acceptor_id in range(self.flags.num_workers):\n work_queue = multiprocessing.Pipe()\n acceptor = Acceptor(\n idd=acceptor_id,\n work_queue=work_queue[1],\n flags=self.flags,\n work_klass=self.work_klass,\n lock=LOCK,\n event_queue=self.event_queue,\n )\n acceptor.start()\n logger.debug(\n 'Started acceptor#%d process %d',\n acceptor_id,\n acceptor.pid)\n self.acceptors.append(acceptor)\n self.work_queues.append(work_queue[0])\n logger.info('Started %d workers' % self.flags.num_workers)\n\n def start_event_dispatcher(self) -> None:\n self.event_dispatcher_shutdown = threading.Event()\n assert self.event_dispatcher_shutdown\n assert self.event_queue\n self.event_dispatcher = EventDispatcher(\n shutdown=self.event_dispatcher_shutdown,\n event_queue=self.event_queue\n )\n self.event_dispatcher_thread = threading.Thread(\n target=self.event_dispatcher.run\n )\n self.event_dispatcher_thread.start()\n logger.debug('Thread ID: %d', self.event_dispatcher_thread.ident)\n\n def shutdown(self) -> None:\n logger.info('Shutting down %d workers' % self.flags.num_workers)\n for acceptor in self.acceptors:\n acceptor.running.set()\n if self.flags.enable_events:\n assert self.event_dispatcher_shutdown\n assert self.event_dispatcher_thread\n self.event_dispatcher_shutdown.set()\n self.event_dispatcher_thread.join()\n logger.debug(\n 'Shutdown of global event dispatcher thread %d successful',\n self.event_dispatcher_thread.ident)\n for acceptor in self.acceptors:\n acceptor.join()\n logger.debug('Acceptors shutdown')\n\n def setup(self) -> None:\n \"\"\"Listen on port, setup workers and pass server socket to workers.\"\"\"\n self.listen()\n if self.flags.enable_events:\n logger.info('Core Event enabled')\n self.start_event_dispatcher()\n self.start_workers()\n\n # Send server socket to all acceptor processes.\n assert self.socket is not None\n for index in range(self.flags.num_workers):\n send_handle(\n self.work_queues[index],\n self.socket.fileno(),\n self.acceptors[index].pid\n )\n self.work_queues[index].close()\n self.socket.close()\n","sub_path":"proxy/core/acceptor/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"54754231","text":"import requests\r\nimport json\r\nimport re\r\n\r\nheaders = {\r\n 'User-Agent': 'BiLiBiLi/1.0.0 (1244382469@qq.com)'\r\n}\r\n\r\n# 视频\r\nfilter_list = ['新型肺炎', '新型冠状病毒', '传染']\r\nurl = 'https://api.bilibili.com/x/web-interface/view?aid='\r\n\r\nfor av in range(84726389, 84738342):\r\n txt = requests.get(url + str(av), headers=headers).text\r\n txt = json.loads(txt)\r\n if txt['message'] == '0':\r\n flag = False\r\n for i in filter_list:\r\n if i in txt['data']['dynamic']:\r\n flag = True\r\n\r\n for i in filter_list:\r\n if i in txt['data']['title']:\r\n flag = True\r\n\r\n print(av)\r\n if flag:\r\n av = txt['data']['aid'] # av号\r\n partition = txt['data']['tname'] # 分区\r\n up_name = txt['data']['owner']['name'] # up名\r\n title = txt['data']['title'] # 标题\r\n description = txt['data']['dynamic']\r\n # up_pic = txt['data']['owner']['face'] # up头像\r\n # pic = txt['data']['pic'] # 封面\r\n\r\n with open('data1.doc', 'a+') as f:\r\n f.write(\"{} \\n\".format(title))\r\n f.write(description + '\\n')\r\n f.write('up:{} 分区:{}\\n'.format(up_name, partition))\r\n f.write('链接:www.bilibili.com/video/av{} \\n \\n'.format(av))\r\n\r\n","sub_path":"弹幕/弹幕.py","file_name":"弹幕.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"375728046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 看到类似__slots__这种形如__xxx__的变量或者函数名就要注意,这些在Python中是有特殊用途的。\n# __slots__我们已经知道怎么用了,__len__()方法我们也知道是为了能让class作用于len()函数。\n# 除此之外,Python的class中还有许多这样有特殊用途的函数,可以帮助我们定制类。\n\n# __str__\nclass Student(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t# 自定义__str__()\n\tdef __str__(self):\n\t\treturn 'Student object (name:%s)' % self.name\n\t__repr__ = __str__\nprint(Student('Micheal'))\n\n# __iter__ + __next__()\n# 如果一个类想被用于for ... in循环,类似list或tuple那样,就必须实现一个__iter__()方法,\n# 该方法返回一个迭代对象,然后,Python的for循环就会不断调用该迭代对象的__next__()方法\n# 拿到循环的下一个值,直到遇到StopIteration错误时退出循环\nclass Fib(object):\n\tdef __init__(self):\n\t\tself.a, self.b = 0, 1\n\n\tdef __iter__(self):\n\t\treturn self #实例本身就是迭代对象\n\n\tdef __next__(self):\n\t\tself.a, self.b = self.b, self.a + self.b\n\t\tif self.a > 10000: # 退出循环的条件\n\t\t\traise StopIteration()\n\t\treturn self.a # 返回下一个值\n\t\n\t# 支持切片\n\tdef __getitem__(self, n):\n\t\tif isinstance(n, int): # n是索引\n\t\t\ta, b = 1, 1\n\t\t\tfor x in range(n):\n\t\t\t\ta, b = b, a + b\n\t\t\treturn a\n\t\tif isinstance(n, slice): # n是切片\n\t\t\tstart = n.start\n\t\t\tstop = n.stop\n\t\t\tif start is None:\n\t\t\t\tstart = 0\n\t\t\ta, b = 1, 1\n\t\t\tL = []\n\t\t\tfor x in range(stop):\n\t\t\t\tif x >= start:\n\t\t\t\t\tL.append(a)\n\t\t\t\ta, b = b, a + b\n\t\t\treturn L\n\nfor n in Fib():\n\tprint(n)\n\n# __getitem__\n# Fib实例虽然能作用于for循环,看起来和list有点像,但是,把它当成list来使用还是不行\n# 要表现得像list那样按照下标取出元素,需要实现__getitem__()方法\n# 定义在上面\nf = Fib()\nprint(f[0], f[1], f[100])\nprint(f[0:5])\nprint(f[:10])\n","sub_path":"class/customize_class.py","file_name":"customize_class.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"120926479","text":"import numpy as np\nimport pdb\nimport torch\ndef iou_eval(pred,label,num_class=None):\n assert pred.shape==label.shape\n metric=SegmentationMetric(num_class)\n pred = pred.flatten()\n label = label.flatten()\n metric.addBatch(pred, label)\n acc = metric.pixelAccuracy()\n mIoU = metric.meanIntersectionOverUnion()\n return acc,mIoU\n\nclass SegmentationMetric(object):\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,)*2)\n def meanIntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n mIoU = np.nanmean(IoU)\n return mIoU\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy\n # acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n\n def genConfusionMatrix(self, imgPredict, imgLabel):\n #def genConfusionMatrix(self, pred_label,gt_label):\n # remove classes from unlabeled pixels in gt image and predict\n imgLabel=imgLabel.flatten()\n imgPredict=imgPredict.flatten()\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass**2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n '''class_num=20\n index = (gt_label * class_num + pred_label).astype('int32')\n index=index.flatten()\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((class_num, class_num))\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n pdb.set_trace()\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n return confusion_matrix'''\n return confusionMatrix\n\n def addBatch(self, imgPredict, imgLabel):\n assert imgPredict.shape == imgLabel.shape\n self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)","sub_path":"add_sharefeature_20_32/mmdet/core/evaluation/iou.py","file_name":"iou.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"406783340","text":"# -*- coding: utf-8 -*-\nimport libs.lif as lf\n\nkey = \"question\"\nvalue = self.context['question']\n\ntheta = self.get_theta(all_float=False, key=key, value=value)\nLif = lf.Lif(theta, x0=1.0, A=1.4 , T=100, gamma=.004, omega=.8, lifversion=2)\nLif.update(self.action[\"t\"],self.action[\"x\"], self.reward)\nself.set_theta(Lif, key=key, value=value)\n\nimport time\n\nself.log_data({\n \"type\" : \"setreward\",\n \"t\" : self.action[\"t\"],\n \"x\" : self.action[\"x\"],\n \"y\" : self.reward,\n \"x0\" : theta['x0'],\n \"time\" : int(time.time()),\n \"context\" : self.context,\n \"q\" : self.context['question']\n})\n\n# Example URL\n# /2/setReward.json?key=24ff7bb26&action={\"x\":7.8,\"t\":2.0}&reward=6.8&context={\"question\":2}","sub_path":"app/libs/defaults/lif_setreward.py","file_name":"lif_setreward.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"496720175","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ApplicationTypeInfo(Model):\n \"\"\"Information about an application type.\n\n :param name:\n :type name: str\n :param version:\n :type version: str\n :param default_parameter_list:\n :type default_parameter_list: list of :class:`ApplicationParameter\n `\n :param status: Possible values include: 'Invalid', 'Provisioning',\n 'Available', 'Unprovisioning', 'Failed'\n :type status: str or :class:`enum `\n :param status_details:\n :type status_details: str\n :param application_type_definition_kind: Possible values include:\n 'Invalid', 'ServiceFabricApplicationPackage', 'Compose'\n :type application_type_definition_kind: str or :class:`enum\n `\n \"\"\"\n\n _attribute_map = {\n 'name': {'key': 'Name', 'type': 'str'},\n 'version': {'key': 'Version', 'type': 'str'},\n 'default_parameter_list': {'key': 'DefaultParameterList', 'type': '[ApplicationParameter]'},\n 'status': {'key': 'Status', 'type': 'str'},\n 'status_details': {'key': 'StatusDetails', 'type': 'str'},\n 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'},\n }\n\n def __init__(self, name=None, version=None, default_parameter_list=None, status=None, status_details=None, application_type_definition_kind=None):\n self.name = name\n self.version = version\n self.default_parameter_list = default_parameter_list\n self.status = status\n self.status_details = status_details\n self.application_type_definition_kind = application_type_definition_kind\n","sub_path":"azure-servicefabric/azure/servicefabric/models/application_type_info.py","file_name":"application_type_info.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"332775480","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '2llhl+m&5y%47+3e+rmvfo7r!!jid@396i+-a66@qu&v4p(y13'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nALLOWED_HOSTS = []\n\n# Application definition\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n \"ecommerce\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'Epsilon.urls'\nWSGI_APPLICATION = 'Epsilon.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nLOGIN_URL = 'login'\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\nWWW_PATH = os.path.join(BASE_DIR, 'www')\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(WWW_PATH, 'media')\nTEMPLATE_DIRS = (os.path.join(WWW_PATH, 'templates'),)\nSTATICFILES_DIRS = (os.path.join(WWW_PATH, 'static'),)\n\n#set cookie lifetime = 1 day\nSESSION_COOKIE_AGE = 24 *60 * 60 \n\n\n\n\n\n","sub_path":"Epsilon/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"51984745","text":"from utils import *\nfrom unsupervised_models import get_lda_model,get_svd_model,get_nmf_model\nfrom get_features import get_tfidf_features,get_countvec_features\nfrom data_process import load_preprocess,pre_process_df\nfrom post_process import get_post_processed_df\nfrom process_supervised_data import get_x_y\nfrom tokenize_n_padding import get_tokeniser_paddings\nfrom supervised_models import create_model,train_model,create_model_with_embeddings\nfrom prepare_embed_matrix import prepare_embeddings\nfrom save_n_load_state import save_model_state,load_model_state\nfrom inference import get_inference_from_supervised,get_inference_from_unsupervised_model\n\n\ndata_path='.\\datasets\\dataset.csv'\ndocuments=load_preprocess(data_path)\nprint(\"Data loaded..!\")\n\n\ntfidf_vectorizer,tfidf_mat,tfidf_feature_names=get_tfidf_features(documents)\ntf_vectorizer,tf_mat,tf_feature_names=get_countvec_features(documents)\nprint(\"Features Extracted..!\")\n\nno_topics = 12\nno_top_words = 15\n\nprint(\"Now Running Models.. \")\nnmf_model=get_nmf_model(tfidf_mat)\nsvd_model=get_svd_model(tfidf_mat)\nlda_model=get_lda_model(tf_mat)\n\nprint(\"Extracting Topic Clusters.. \")\ntopics_nmf=display_topics(nmf_model, tfidf_feature_names, no_top_words)\ntopics_svd=display_topics(svd_model, tfidf_feature_names, no_top_words)\ntopics_lda=display_topics(lda_model, tf_feature_names, no_top_words)\n\nprint(\"Topics Obtained : \\n\")\nprint(topics_lda)\n\nprint(\"Now building post processed dataframe for supervised task.. \")\ndf=get_post_processed_df()\n\ndf_text=pre_process_df(df)\n\ntext = df_text.values\nlabels = df['target_label'].values\n\n\nX_train, y_train, X_test, y_test,max_len=get_x_y(text,labels)\n\n\n\nword_index,encoder,tokenizer,train_padded,validation_padded,training_labels,validation_labels=get_tokeniser_paddings(X_train, y_train, X_test, y_test,max_len)\n\nembedding_matrix=prepare_embeddings(word_index)\n\n\n\"\"\"\nPlease Note - > You can select any one of the following - > \n'model' - to train on latent tensorflow embeddings\n'model2' - to train model with glove-300-D Embeddings\n\"\"\"\nmodel=create_model(train_padded,validation_padded)\n\nmodel2=create_model_with_embeddings(train_padded,validation_padded,embedding_matrix)\n\nhistory,model=train_model(model2,train_padded,validation_padded,training_labels,validation_labels)\n\nsave_model_state(model,tokenizer)\n\nloaded_model, loaded_tokenizer=load_model_state()\n\n\n\n\"\"\"\nPlease Note - > You can select any one of the following - > \n'get_inference_from_supervised' - to get inference from supervised classifier\n'get_inference_from_unsupervised_model' - to get inference from unsupervised model\n\"\"\"\nsample_text=\"Delivered what I ordered and had in stock, excellent fitting service, price was decent and on time.\"\nget_inference_from_supervised(sample_text,loaded_model,max_len,loaded_tokenizer,encoder)\nget_inference_from_unsupervised_model(lda_model,tf_vectorizer,sample_text,threshold=0)\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"388051096","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import ListView, DetailView\n\nfrom .forms import DeliveryTypeForm, DeliveryAddressForm, ContactInfoForm\nfrom .models import Advert, Category, Order, DeliveryType, ContactInfo, AdvertStatus, OrderStatus\nfrom .services import PlaceOrderService, OrderService, CategoryService, AdvertService\n\n\nclass LoginRequiredMixin(object):\n\n @classmethod\n def as_view(cls):\n return login_required(super(LoginRequiredMixin, cls).as_view(), login_url='/login')\n\n\nclass HeaderAwareListView(ListView):\n extra_context = {\"category_root\": Category.objects.get(url=\"root\")}\n\n\nclass HeaderAwareDetailView(DetailView):\n extra_context = {\"category_root\": Category.objects.get(url=\"root\")}\n\n\nclass HomePageView(HeaderAwareListView):\n model = Advert\n template_name = \"website/home.html\"\n\n\nclass AdvertDetailsView(HeaderAwareDetailView):\n model = Advert\n\n def get(self, request, *args, **kwargs):\n advert = self.get_object()\n advert.increase_view_counter()\n return super().get(request, args, kwargs)\n\n\nclass CategoryAdvertListView(HeaderAwareListView):\n model = Advert\n slug_field = \"url\"\n template_name = \"website/category.html\"\n\n def get_context_data(self, *, object_list=None, **kwargs):\n # get default impl\n context_data = super(CategoryAdvertListView, self).get_context_data(object_list=None, kwargs=kwargs)\n # extend with category info\n context_data['category'] = Category.objects.get(url=self.kwargs[\"url\"])\n return context_data\n\n def get_queryset(self):\n query_url = self.kwargs[\"url\"]\n if \"root\" in query_url:\n return AdvertService.get_active_adverts()\n else:\n self_and_descendants = CategoryService.get_category_and_descendants(query_url)\n return AdvertService.get_active_adverts_for_categories(category_list=self_and_descendants)\n\n\nclass PaymentDeliveryView(LoginRequiredMixin, DetailView):\n model = Advert\n template_name = 'website/payment-delivery.html'\n extra_context = {\n \"delivery_type_form\": DeliveryTypeForm(),\n \"delivery_address_form\": DeliveryAddressForm(),\n \"contact_info_form\": ContactInfoForm()\n }\n\n\n@login_required(login_url='/login')\ndef payment_delivery_submit(request, pk):\n if request.method == 'POST':\n delivery_type = DeliveryTypeForm(request.POST)\n contact_info = ContactInfoForm(request.POST)\n if delivery_type.data['delivery_type'] == 'self':\n delivery_type = DeliveryType.for_name('self')\n contact_info = \\\n ContactInfo.objects.get_or_create(email=contact_info.data['email'], phone=contact_info.data['phone'])[0]\n advert = Advert.objects.get(pk=pk)\n order = PlaceOrderService(delivery_type, None, contact_info, advert).place_order()\n return redirect(to='order-confirmation', pk=order.pk)\n\n\nclass OrderConfirmationView(HeaderAwareDetailView):\n\n model = Order\n template_name = \"website/order_confirmation.html\"\n\n\nclass OrderHistoryView(HeaderAwareListView):\n\n model = Order\n template_name = \"website/order_history.html\"\n\n def get_queryset(self):\n return OrderService.get_all_orders()\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"485043233","text":"from django.conf.urls import url, include\nfrom . import views\n\napp_name = 'rentacar'\n\nurlpatterns = [\n url(r'^clientelist/$', views.RClienteList),\n url(r'^$', views.IndexView.as_view(), name='index'),\n\n url(r'^veiculos/$', views.VeiculosView.as_view(), name=\"veiculos\"),\n url(r'^veiculo/(?P[0-9]+)/$', views.VeiculoView.as_view(), name=\"veiculo\"),\n url(r'^veiculo/add/$', views.VeiculoCreate.as_view(), name='veiculo_add'),\n url(r'^veiculo/delete/(?P[0-9]+)/$', views.VeiculoDelete.as_view(), name='veiculo_delete'),\n url(r'^veiculo/edit/(?P[0-9]+)/$', views.VeiculoEdit.as_view(), name='veiculo_edit'),\n\n url(r'^clientes/$', views.ClientesView.as_view(), name=\"clientes\"),\n url(r'^cliente/(?P[0-9]+)/$', views.ClienteView.as_view(), name=\"cliente\"),\n url(r'^cliente/add/$', views.ClienteCreate.as_view(), name='cliente_add'),\n url(r'^cliente/delete/(?P[0-9]+)/$', views.ClienteDelete.as_view(), name='cliente_delete'),\n url(r'^cliente/edit/(?P[0-9]+)/$', views.ClienteEdit.as_view(), name='cliente_edit'),\n \n url(r'^alugueis/$', views.AlugueisView.as_view(), name=\"alugueis\"),\n url(r'^aluguel/(?P[0-9]+)/$', views.AluguelView.as_view(), name=\"aluguel\"),\n url(r'^aluguel/add/$', views.AluguelCreate.as_view(), name='aluguel_add'),\n url(r'^aluguel/devolucao/(?P[0-9]+)/$', views.AluguelDevolucao.as_view(), name='aluguel_devolucao'),\n\n url(r'^reservas/$', views.IndexView.as_view(), name=\"reservas\")\n]","sub_path":"rentacar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"216363419","text":"import requests\r\nimport datetime\r\nimport os\r\nimport winsound\r\nimport bs4 as bs\r\nimport webbrowser\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nfrom urllib.request import Request, urlopen\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\n\r\ndef link_parser(image):\r\n image = image.replace('thumbnails', '')\r\n link = image.replace('/th', '/i').replace('small', 'big').replace('//t', '//i').replace('/t/', '/i/').replace('_t', '')\r\n if 'imgcandy' in link:\r\n link = link.replace('imgc', 'i.imgc').replace('/upload', '')\r\n if 'pixhost' in link:\r\n link = link.replace('//i', '//img').replace('iumbs', 'images')\r\n if 'img.yt' in link:\r\n link = link.replace('img.yt', 's.img.yt').replace('/upload', '')\r\n\r\n return link\r\n\r\ndef link_opener(url, jump):\r\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage = urlopen(req).read()\r\n soup = bs.BeautifulSoup(webpage, 'lxml')\r\n images = []\r\n \r\n for img in soup.findAll('img'):\r\n image = str(img.get('src'))\r\n if image[:4]=='http' and '' in image:\r\n images.append(link_parser(image)) \r\n i=0\r\n k=3\r\n for img in images:\r\n i+=1\r\n url = img\r\n if i == k:\r\n k +=int(len(images)/jump)\r\n webbrowser.open_new(url)\r\n \r\n print(str(i)+'. '+img)\r\n\r\ndef multiple_threads(urls, names):\r\n start_all = datetime.datetime.now()\r\n\r\n for url, name in zip(urls, names):\r\n url_link = url\r\n req = Request(url_link, headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage = urlopen(req).read()\r\n soup = bs.BeautifulSoup(webpage, 'lxml')\r\n images = []\r\n \r\n #List with urls\r\n for img in soup.findAll('img'):\r\n image = str(img.get('src'))\r\n if image[:4]=='http' and '' in image:\r\n images.append(link_parser(image)) \r\n\r\n #numbers for naming files \r\n limit = len(images)\r\n i = [x for x in range(1, limit+1)]\r\n \r\n folder_name = f'{name}'\r\n folder_path = 'F:\\Pyk\\Photos\\\\'+folder_name+'\\\\'\r\n print(folder_name)\r\n\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n print(f'Created folder: {folder_path}')\r\n \r\n stuff = os.listdir(folder_path)\r\n stuff = [x for x in stuff if 't_' not in x]\r\n stuff = [x.replace('.jpg', '') for x in stuff]\r\n buuu = []\r\n \r\n def everything(images, i):\r\n now = str(datetime.datetime.now())\r\n out_of = limit\r\n \r\n if str(i).zfill(3) not in stuff: \r\n if i%10 == 0:\r\n print('Processing... '+str(i)+'/'+str(out_of)+'\\t'+'\\t'+now)\r\n new = 'F:\\Pyk\\Photos\\\\'+folder_name+'\\\\'+''+str(i).zfill(3)+'.jpg'\r\n \r\n try:\r\n img = Image.open(BytesIO(requests.get(images).content))\r\n img.save(new)\r\n \r\n except:\r\n buuu.append(i)\r\n pass\r\n \r\n start = datetime.datetime.now()\r\n \r\n pool = ThreadPool(16) \r\n pool.starmap(everything, zip(images, i))\r\n pool.close() \r\n pool.join() \r\n \r\n print('Following {} went to shit:'.format(len(buuu)))\r\n print(set(buuu))\r\n \r\n end = datetime.datetime.now()\r\n winsound.PlaySound('SystemExit', winsound.SND_ALIAS)\r\n print('Done in '+str(end-start))\r\n print()\r\n \r\n end_all = datetime.datetime.now()\r\n print('Done \\'em all in '+str(end_all-start_all))\r\n\r\ndef from_post(page, url, postcount, postnames):\r\n proper_start = datetime.datetime.now()\r\n\r\n req = Request(url+page, headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage = urlopen(req).read()\r\n soup = bs.BeautifulSoup(webpage, 'lxml')\r\n posts = soup.find_all('li')\r\n \r\n k = 0\r\n lel = len(postcount)\r\n \r\n for postname, req_post in zip(postnames, postcount):\r\n k += 1\r\n images = []\r\n for post in posts:\r\n names = post.findAll('a')\r\n for name in names:\r\n if '#'+str(req_post) in name: \r\n interesting = post.find_all('img')\r\n for img in interesting:\r\n check = str(img.get('src'))\r\n if check[:4]=='http':\r\n images.append(link_parser(check)) \r\n\r\n #numbers for naming files \r\n limit = len(images)\r\n i = [x for x in range(1, limit+1)]\r\n \r\n folder_name = postname\r\n folder_path = 'F:\\Pyk\\Photos\\\\'+folder_name+'\\\\'\r\n \r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n\r\n def everything(images, i):\r\n now = str(datetime.datetime.now())\r\n out_of = limit\r\n if i%20 == 0:\r\n print('Processing... '+str(i)+'/'+str(out_of)+'\\t'+'\\t'+now)\r\n img = Image.open(BytesIO(requests.get(images).content))\r\n new = 'F:\\Pyk\\Photos\\\\'+folder_name+'\\\\'+''+str(i).zfill(3)+'.jpg'\r\n img.save(new)\r\n \r\n start = datetime.datetime.now()\r\n \r\n pool = ThreadPool(16) \r\n pool.starmap(everything, zip(images, i))\r\n pool.close() \r\n pool.join() \r\n \r\n end = datetime.datetime.now()\r\n \r\n print(f'{k}/{lel} Done in {end-start}')\r\n\r\n proper_end = datetime.datetime.now()\r\n winsound.PlaySound('SystemExit', winsound.SND_ALIAS)\r\n print('Done \\'em all in '+str(proper_end-proper_start))\r\n\r\n\r\npostcount = [61,62,66,67,69,72,73]\r\npostnames = ['Anita C - Lodels', 'Anita C - Bringing', 'Anita C - Ivimas', \r\n 'Anita C - Totally', 'Anita C - To The Top', 'Anita C - Sensix', \r\n 'Anita C - Velian']\r\npage = '5'\r\nurl = 'https://vipergirls.to/threads/1586427-Anita-Anita-C-Anita-Silver-Arina-Danica-Danita-Luisa-Mocca-Vasilisa-Mudraja/page'\r\n\r\n#from_post(page, url, postcount, postnames)\r\n\r\nurls = ['https://vipergirls.to/threads/1280907-Engelie-Extreme-Perspective-(X45)-10000px?highlight=Engelie',\r\n 'https://vipergirls.to/threads/639846-Engelie-Tropical-Garden-x59?highlight=Engelie']\r\nnames = ['Engelie - Extreme Perspective', 'Engelie - Tropical Garden']\r\n\r\n#multiple_threads(urls, names)\r\n\r\n\r\nurl = 'https://vipergirls.to/threads/639846-Engelie-Tropical-Garden-x59?highlight=Engelie'\r\n#link_opener(url, 3)","sub_path":"download_pic.py","file_name":"download_pic.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"362473281","text":"\n\n#calss header\nclass _CHAPEL():\n\tdef __init__(self,): \n\t\tself.name = \"CHAPEL\"\n\t\tself.definitions = [u'a room that is part of a larger building and is used for Christian worship: ', u'a building used for Christian worship by Christians who do not belong to the Church of England or the Roman Catholic Church']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_chapel.py","file_name":"_chapel.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"117951350","text":"import collections\nN = int(input())\nA = list(map(int, input().split()))\nB = [A[i] - (i+1) for i in range(N)]\n\nB.sort()\n\nans = 0\nt = B[len(B)//2]\n\nfor x in B:\n ans += abs(x - t)\n\nprint(ans)\n","sub_path":"Python_codes/p03311/s742518098.py","file_name":"s742518098.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"9485916","text":"# pylint: disable=invalid-name,duplicate-code\nimport pytest\nimport django\nfrom django.test import TestCase, override_settings\nfrom django.test import Client\nfrom django.core.urlresolvers import reverse\nfrom django.conf import global_settings\nfrom django.contrib.auth import get_user_model\n\n\nUser = get_user_model()\n\n\n@override_settings(\n STATICFILES_STORAGE=global_settings.STATICFILES_STORAGE)\nclass SmokeTest(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.user = User.objects.create(username=\"chipy\",)\n\n def test__profile_list_url__GET(self):\n # SETUP\n\n # TEST\n response = self.client.get(reverse('profiles:list'), follow=True)\n\n # CHECK\n self.assertEqual(response.status_code, 200)\n\n def test__profile_edit_url__GET_annon(self):\n # SETUP\n\n # TEST\n response = self.client.get(reverse('profiles:edit'), follow=True)\n\n # CHECK\n self.assertEqual(response.status_code, 200)\n\n @pytest.mark.skipif(\n django.VERSION < (1, 9, 0),\n reason=\"Django 1.9 introduces force_login\")\n def test__profile_edit_url__GET_auth(self):\n # SETUP\n self.client.force_login(self.user)\n\n # TEST\n response = self.client.get(reverse('profiles:edit'), follow=True)\n\n # CHECK\n self.assertEqual(response.status_code, 200)\n\n @pytest.mark.skipif(\n django.VERSION < (1, 9, 0),\n reason=\"Django 1.9 introduces force_login\")\n def test__profile_edit_url__POST_auth(self):\n # SETUP\n display_name = \"ChiPy\"\n self.client.force_login(self.user)\n\n # TEST\n response = self.client.post(\n reverse('profiles:edit'),\n {'display_name': display_name, 'show': True}, follow=True)\n\n # CHECK\n self.user.profile.refresh_from_db()\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\n self.user.profile.display_name,\n display_name)\n self.assertTrue(self.user.profile.show)\n","sub_path":"chipy_org/apps/profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"574777116","text":"from graphel import Graphel\n\n\ndef main():\n initial_airport = \"WAW\"\n airports_to_visit = (\"ATH\", \"VIE\", \"BRU\", \"SOF\", \"LCA\", \"PGR\", \"ORY\", \"SXF\")\n date_range = (\"2020-02-01\", \"2020-03-01\")\n day_range = (2, 4)\n total_cost = 200.0\n\n graphel = Graphel(date_range, (initial_airport, ) + airports_to_visit)\n graphel.insert_chains(initial_airport, day_range, total_cost)\n\n Graphel.show_best_results(date_range, initial_airport, airports_to_visit, )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"353687752","text":"import json\nimport time\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\n\ndef lambda_handler(event, context):\n\n #get current time minuc 30 minutes\n timeCheck = (int(time.time())-1800)\n #print(timeCheck)\n\n # dynamodb client\n client = boto3.resource('dynamodb')\n dynamodb_client = boto3.client('dynamodb')\n\n table = client.Table('twitter_sentiment')\n\n response = table.scan(\n FilterExpression= Attr('sentiment_score').lt(1) & Attr('is_spam').lt(1) & Attr('created_at').gt(timeCheck)\n )\n\n data = response['Items']\n while 'LastEvaluatedKey' in response:\n response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'], FilterExpression= Attr('sentiment_score').lt(1) & Attr('is_spam').lt(1) & Attr('created_at').gt(timeCheck))\n data.extend(response['Items'])\n avgSentiment = 0\n count = 0\n\n for tweet in data:\n count = count +1\n avgSentiment = avgSentiment + (float(tweet.get('sentiment_score')))\n # print(tweet.get('sentiment_score'))\n\n print(count)\n if(count != 0):\n avgSentiment = avgSentiment/count\n avgSentiment = str(avgSentiment)\n else:\n avgSentiment = \"0\"\n\n timeNew = int(time.time())\n timeNew = str(timeNew)\n\n sentiment_id = \"TSLA\" + timeNew\n #print(avgSentiment)\n\n table_name = 'sentiment_stock'\n newSentiment = {\n 'sentiment_id': {'S': sentiment_id},\n 'timestamp': {'N': timeNew},\n 'sentiment' : {'N' : avgSentiment},\n 'ticker' : {'S': 'TSLA'}\n }\n\n dynamodb_client.put_item(TableName = table_name, Item= newSentiment)\n\n\n return None\n","sub_path":"CORE/lambda/sentimentUpdateCycle.py","file_name":"sentimentUpdateCycle.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"623665899","text":"import typing\nfrom collections import OrderedDict\n\nimport pytest\n\nfrom flytekit.common.exceptions.user import FlyteAssertion\nfrom flytekit.common.translator import get_serializable\nfrom flytekit.core import context_manager\nfrom flytekit.core.context_manager import Image, ImageConfig\nfrom flytekit.core.dynamic_workflow_task import dynamic\nfrom flytekit.core.node_creation import create_node\nfrom flytekit.core.task import task\nfrom flytekit.core.workflow import workflow\n\n\ndef test_normal_task():\n @task\n def t1(a: str) -> str:\n return a + \" world\"\n\n @dynamic\n def my_subwf(a: int) -> typing.List[str]:\n s = []\n for i in range(a):\n s.append(t1(a=str(i)))\n return s\n\n @workflow\n def my_wf(a: str) -> (str, typing.List[str]):\n t1_node = create_node(t1, a=a)\n dyn_node = create_node(my_subwf, a=3)\n return t1_node.o0, dyn_node.o0\n\n r, x = my_wf(a=\"hello\")\n assert r == \"hello world\"\n assert x == [\"0 world\", \"1 world\", \"2 world\"]\n\n serialization_settings = context_manager.SerializationSettings(\n project=\"test_proj\",\n domain=\"test_domain\",\n version=\"abc\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"image\", tag=\"name\")),\n env={},\n )\n wf_spec = get_serializable(OrderedDict(), serialization_settings, my_wf)\n assert len(wf_spec.template.nodes) == 2\n assert len(wf_spec.template.outputs) == 2\n\n @task\n def t2():\n ...\n\n @task\n def t3():\n ...\n\n @workflow\n def empty_wf():\n t2_node = create_node(t2)\n t3_node = create_node(t3)\n t3_node.runs_before(t2_node)\n\n # Test that VoidPromises can handle runs_before\n empty_wf()\n\n @workflow\n def empty_wf2():\n t2_node = create_node(t2)\n t3_node = create_node(t3)\n t3_node >> t2_node\n\n serialization_settings = context_manager.SerializationSettings(\n project=\"test_proj\",\n domain=\"test_domain\",\n version=\"abc\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"image\", tag=\"name\")),\n env={},\n )\n wf_spec = get_serializable(OrderedDict(), serialization_settings, empty_wf)\n assert wf_spec.template.nodes[0].upstream_node_ids[0] == \"n1\"\n assert wf_spec.template.nodes[0].id == \"n0\"\n\n wf_spec = get_serializable(OrderedDict(), serialization_settings, empty_wf2)\n assert wf_spec.template.nodes[0].upstream_node_ids[0] == \"n1\"\n assert wf_spec.template.nodes[0].id == \"n0\"\n\n with pytest.raises(FlyteAssertion):\n\n @workflow\n def empty_wf2():\n create_node(t2, \"foo\")\n\n\ndef test_more_normal_task():\n nt = typing.NamedTuple(\"OneOutput\", t1_str_output=str)\n\n @task\n def t1(a: int) -> nt:\n # This one returns a regular tuple\n return (f\"{a + 2}\",)\n\n @task\n def t1_nt(a: int) -> nt:\n # This one returns an instance of the named tuple.\n return nt(f\"{a + 2}\")\n\n @task\n def t2(a: typing.List[str]) -> str:\n return \" \".join(a)\n\n @workflow\n def my_wf(a: int, b: str) -> (str, str):\n t1_node = create_node(t1, a=a).with_overrides(aliases={\"t1_str_output\": \"foo\"})\n t1_nt_node = create_node(t1_nt, a=a)\n t2_node = create_node(t2, a=[t1_node.t1_str_output, t1_nt_node.t1_str_output, b])\n return t1_node.t1_str_output, t2_node.o0\n\n x = my_wf(a=5, b=\"hello\")\n assert x == (\"7\", \"7 7 hello\")\n\n\ndef test_reserved_keyword():\n nt = typing.NamedTuple(\"OneOutput\", outputs=str)\n\n @task\n def t1(a: int) -> nt:\n # This one returns a regular tuple\n return (f\"{a + 2}\",)\n\n # Test that you can't name an output \"outputs\"\n with pytest.raises(FlyteAssertion):\n\n @workflow\n def my_wf(a: int) -> str:\n t1_node = create_node(t1, a=a)\n return t1_node.outputs\n\n\ndef test_runs_before():\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @task()\n def sleep_task(a: int) -> str:\n a = a + 2\n return \"world-\" + str(a)\n\n @dynamic\n def my_subwf(a: int) -> (typing.List[str], int):\n s = []\n for i in range(a):\n s.append(sleep_task(a=i))\n return s, 5\n\n @workflow\n def my_wf(a: int, b: str) -> (str, typing.List[str], int):\n subwf_node = create_node(my_subwf, a=a)\n t2_node = create_node(t2, a=b, b=b)\n subwf_node.runs_before(t2_node)\n subwf_node >> t2_node\n return t2_node.o0, subwf_node.o0, subwf_node.o1\n\n my_wf(a=5, b=\"hello\")\n","sub_path":"tests/flytekit/unit/core/test_node_creation.py","file_name":"test_node_creation.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"627436642","text":"\"\"\"\n#!-*- coding=utf-8 -*-\n@author: BADBADBADBADBOY\n@contact: 2441124901@qq.com\n@software: PyCharm Community Edition\n@file: prune.py\n@time: 2020/6/27 10:23\n\n\"\"\"\nimport sys\nsys.path.append('/home/aistudio/external-libraries')\nfrom models.DBNet import DBNet\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport collections\nimport torchvision.transforms as transforms\nimport cv2\nimport os\nimport argparse\nimport math\nfrom PIL import Image\nfrom torch.autograd import Variable\n\ndef resize_image(img,short_side=736):\n height, width, _ = img.shape\n if height < width:\n new_height = short_side\n new_width = int(math.ceil(new_height / height * width / 32) * 32)\n else:\n new_width = short_side\n new_height = int(math.ceil(new_width / width * height / 32) * 32)\n resized_img = cv2.resize(img, (new_width, new_height))\n return resized_img\n\ndef prune(args):\n\n\n img = cv2.imread(args.img_file)\n img = resize_image(img)\n img = Image.fromarray(img)\n img = img.convert('RGB')\n img = transforms.ToTensor()(img)\n img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)\n img = Variable(img.cuda()).unsqueeze(0)\n\n model = DBNet(args.backbone, adaptive=False).cuda()\n model_dict = torch.load(args.checkpoint)['state_dict']\n state = model.state_dict()\n for key in state.keys():\n if key in model_dict.keys():\n state[key] = model_dict[key]\n model.load_state_dict(state)\n model.eval()\n with torch.no_grad():\n out = model(img)\n cv2.imwrite('re.jpg',out[0,0].cpu().numpy()*255)\n\n\n bn_weights = []\n for m in model.modules():\n if (isinstance(m, nn.BatchNorm2d)):\n bn_weights.append(m.weight.data.abs().clone())\n bn_weights = torch.cat(bn_weights, 0)\n\n sort_result, sort_index = torch.sort(bn_weights)\n\n thresh_index = int(args.cut_percent * bn_weights.shape[0])\n\n if (thresh_index == bn_weights.shape[0]):\n thresh_index = bn_weights.shape[0] - 1\n\n prued = 0\n prued_mask = []\n bn_index = []\n conv_index = []\n remain_channel_nums = []\n for k, m in enumerate(model.modules()):\n if (isinstance(m, nn.BatchNorm2d)):\n bn_weight = m.weight.data.clone()\n mask = bn_weight.abs().gt(sort_result[thresh_index])\n remain_channel = mask.sum()\n\n if (remain_channel == 0):\n remain_channel = 1\n mask[int(torch.argmax(bn_weight))] = 1\n\n v = 0\n n = 1\n if (remain_channel % args.base_num != 0):\n if (remain_channel > args.base_num):\n while (v < remain_channel):\n n += 1\n v = args.base_num * n\n if (remain_channel - (v - args.base_num) < v - remain_channel):\n remain_channel = v - args.base_num\n else:\n remain_channel = v\n if (remain_channel > bn_weight.size()[0]):\n remain_channel = bn_weight.size()[0]\n remain_channel = torch.tensor(remain_channel)\n result, index = torch.sort(bn_weight)\n mask = bn_weight.abs().ge(result[-remain_channel])\n\n remain_channel_nums.append(int(mask.sum()))\n prued_mask.append(mask)\n bn_index.append(k)\n prued += mask.shape[0] - mask.sum()\n elif (isinstance(m, nn.Conv2d)):\n conv_index.append(k)\n print(remain_channel_nums)\n print('total_prune_ratio:', float(prued) / bn_weights.shape[0])\n print(bn_index)\n\n new_model = DBNet(args.backbone, adaptive=False).cuda()\n\n merge1_index = [13, 17, 24, 32]\n merge2_index = [41, 45, 52, 60, 68]\n merge3_index = [77, 81, 88, 96, 104, 112, 120]\n merge4_index = [129, 133, 140, 148]\n\n index_0 = []\n for item in merge1_index:\n index_0.append(bn_index.index(item))\n mask1 = prued_mask[index_0[0]] | prued_mask[index_0[1]] | prued_mask[index_0[2]] | prued_mask[index_0[3]]\n\n index_1 = []\n for item in merge2_index:\n index_1.append(bn_index.index(item))\n mask2 = prued_mask[index_1[0]] | prued_mask[index_1[1]] | prued_mask[index_1[2]] | prued_mask[index_1[3]] | prued_mask[\n index_1[4]]\n\n index_2 = []\n for item in merge3_index:\n index_2.append(bn_index.index(item))\n mask3 = prued_mask[index_2[0]] | prued_mask[index_2[1]] | prued_mask[index_2[2]] | prued_mask[index_2[3]] | prued_mask[\n index_2[4]] | prued_mask[index_2[5]] | prued_mask[index_2[6]]\n\n index_3 = []\n for item in merge4_index:\n index_3.append(bn_index.index(item))\n mask4 = prued_mask[index_3[0]] | prued_mask[index_3[1]] | prued_mask[index_3[2]] | prued_mask[index_3[3]]\n\n\n for index in index_0:\n prued_mask[index] = mask1\n\n for index in index_1:\n prued_mask[index] = mask2\n\n for index in index_2:\n prued_mask[index] = mask3\n\n for index in index_3:\n prued_mask[index] = mask4\n\n print(new_model)\n##############################################################\n index_bn = 0\n index_conv = 0\n\n bn_mask = []\n conv_in_mask = []\n conv_out_mask = []\n\n for m in new_model.modules():\n if (isinstance(m, nn.BatchNorm2d)):\n m.num_features = prued_mask[index_bn].sum()\n bn_mask.append(prued_mask[index_bn])\n index_bn += 1\n elif (isinstance(m, nn.Conv2d)):\n if(index_conv == 0):\n m.in_channels = 3\n conv_in_mask.append(torch.ones(3))\n else:\n m.in_channels = prued_mask[index_conv - 1].sum()\n conv_in_mask.append(prued_mask[index_conv - 1])\n m.out_channels = prued_mask[index_conv].sum()\n conv_out_mask.append(prued_mask[index_conv])\n index_conv += 1\n if (index_bn > len(bn_index) - 3):\n break\n\n conv_change_index = [16,44,80,132] # \n change_conv_bn_index = [3,32,68,120] # \n tag = 0\n for m in new_model.modules():\n if (isinstance(m, nn.Conv2d)):\n if(tag in conv_change_index):\n index = conv_change_index.index(tag)\n index = change_conv_bn_index[index]\n index =bn_index.index(index)\n mask = prued_mask[index]\n conv_in_mask[index+4] = mask\n m.in_channels = mask.sum()\n tag+=1\n\n \n\n bn_i = 0\n conv_i = 0\n scale_i = 0 \n scale_mask = [mask4,mask3,mask2,mask1]\n for [m0, m1] in zip(model.modules(), new_model.modules()):\n if (bn_i > len(bn_mask)-1):\n if isinstance(m0, nn.Conv2d):\n # import pdb\n # pdb.set_trace()\n if(scale_i<4):\n m1.in_channels = scale_mask[scale_i].sum()\n idx0 = np.squeeze(np.argwhere(np.asarray(scale_mask[scale_i].cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(torch.ones(256).cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w = m0.weight.data[:, idx0, :, :].clone()\n m1.weight.data = w[idx1, :, :, :].clone()\n if m1.bias is not None:\n m1.bias.data = m0.bias.data[idx1].clone()\n \n else:\n m1.weight.data = m0.weight.data.clone()\n if m1.bias is not None:\n m1.bias.data = m0.bias.data.clone()\n scale_i+=1\n\n else:\n if isinstance(m0, nn.BatchNorm2d):\n idx1 = np.squeeze(np.argwhere(np.asarray(bn_mask[bn_i].cpu().numpy())))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n m1.weight.data = m0.weight.data[idx1].clone()\n if m1.bias is not None:\n m1.bias.data = m0.bias.data[idx1].clone()\n m1.running_mean = m0.running_mean[idx1].clone()\n m1.running_var = m0.running_var[idx1].clone()\n bn_i += 1\n elif isinstance(m0, nn.Conv2d):\n if (isinstance(conv_in_mask[conv_i], list)):\n idx0 = np.squeeze(np.argwhere(np.asarray(torch.cat(conv_in_mask[conv_i], 0).cpu().numpy())))\n else:\n idx0 = np.squeeze(np.argwhere(np.asarray(conv_in_mask[conv_i].cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(conv_out_mask[conv_i].cpu().numpy())))\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w = m0.weight.data[:, idx0, :, :].clone()\n m1.weight.data = w[idx1, :, :, :].clone()\n if m1.bias is not None:\n m1.bias.data = m0.bias.data[idx1].clone()\n conv_i += 1\n\n print(new_model)\n new_model.eval()\n with torch.no_grad():\n out = new_model(img)\n print(out.shape)\n cv2.imwrite('re1.jpg',out[0,0].cpu().numpy()*255)\n\n save_obj = {'prued_mask': prued_mask, 'bn_index': bn_index, 'state_dict': new_model.state_dict()}\n torch.save(save_obj, os.path.join(args.save_prune_model_path, 'pruned_dict.pth.tar'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--backbone', nargs='?', type=str, default='resnet50')\n\n parser.add_argument('--num_workers', nargs='?', type=int, default=0,\n help='num workers to train')\n parser.add_argument('--base_num', nargs='?', type=int, default=8,\n help='Base after Model Channel Clipping')\n parser.add_argument('--cut_percent', nargs='?', type=float, default=0.9,\n help='Model channel clipping scale')\n parser.add_argument('--checkpoint', default='./checkpoints/DB_resnet50_bs_16_ep_1200/DB.pth.tar',\n type=str, metavar='PATH',\n help='ori model path')\n parser.add_argument('--save_prune_model_path', default='./pruned/checkpoints/', type=str, metavar='PATH',\n help='pruned model path')\n parser.add_argument('--img_file',\n default='/home/aistudio/work/data/icdar/test_img/img_10.jpg',\n type=str,\n help='')\n args = parser.parse_args()\n\n prune(args)\n","sub_path":"pruned/prune.py","file_name":"prune.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"276652104","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\n__author__ = \"Jon-Mikkel Korsvik & Petter Bøe Hørtvedt\"\n__email__ = \"jonkors@nmbu.no & petterho@nmbu.no\"\n\nfrom src.biosim.simulation import BioSim\nfrom src.biosim.animals import Carnivore, Herbivore, BaseAnimal\n\ndefault_population = [\n {\n \"loc\": (3, 3),\n \"pop\": [\n {\"species\": \"Herbivore\", \"age\": 5, \"weight\": 20}\n for _ in range(2000)\n ],\n },\n]\nsim = BioSim(island_map='OOOOOOO\\n'\n 'ODDDDDO\\n'\n 'ODDDDDO\\n'\n 'ODDDDDO\\n'\n 'ODDDDDO\\n'\n 'ODDDDDO\\n'\n 'OOOOOOO', ini_pop=default_population,\n img_base='migrate_checker', ymax_animals=3000)\nsim.set_animal_parameters('Herbivore', {'mu': 1e10, 'omega': 1e-10, 'eta': 1e-10})\nprint(Herbivore.mu, Herbivore.omega, Herbivore.eta, Herbivore.w_birth, Herbivore.sigma_birth)\nsim.make_movie()","sub_path":"examples/migrate_test.py","file_name":"migrate_test.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"557625508","text":"\"\"\"\nタスクを一括で受け入れ完了にする\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport time\nfrom typing import Any, Callable, Dict, List, Optional # pylint: disable=unused-import\n\nimport requests\n\nimport annofabapi\nimport annofabcli\nfrom annofabapi.typing import Inspection, Task\nfrom annofabcli import AnnofabApiFacade\n\nlogger = logging.getLogger(__name__)\n\nTaskId = str\nInputDataId = str\nInspectionJson = Dict[TaskId, Dict[InputDataId, List[Inspection]]]\n\n\nclass ComleteTasks:\n \"\"\"\n タスクを受け入れ完了にする\n \"\"\"\n\n def __init__(self, service: annofabapi.Resource, facade: AnnofabApiFacade):\n self.service = service\n self.facade = facade\n\n def complete_tasks_with_changing_inspection_status(self, project_id: str, task_id_list: List[str],\n inspection_status: str, inspection_json: InspectionJson):\n \"\"\"\n 検査コメントのstatusを変更(対応完了 or 対応不要)にした上で、タスクを受け入れ完了状態にする\n Args:\n project_id: 対象のproject_id\n task_id_list: 受け入れ完了にするタスクのtask_idのList\n inspection_status: 変更後の検査コメントの状態\n inspection_json: 変更対象の検査コメントのJSON情報\n\n \"\"\"\n\n account_id = self.facade.get_my_account_id()\n\n for task_id in task_id_list:\n task, _ = self.service.api.get_task(project_id, task_id)\n if task[\"phase\"] != \"acceptance\":\n logger.warning(f\"task_id: {task_id}, phase: {task['phase']} \")\n continue\n\n # 担当者変更\n try:\n self.facade.change_operator_of_task(project_id, task_id, account_id)\n self.facade.change_to_working_phase(project_id, task_id, account_id)\n logger.debug(f\"{task_id}: 担当者を変更した\")\n\n except requests.HTTPError as e:\n logger.warning(e)\n logger.warning(f\"{task_id} の担当者変更に失敗\")\n\n # 担当者変更してから数秒待たないと、検査コメントの付与に失敗する(「検査コメントの作成日時が不正です」と言われる)\n time.sleep(3)\n\n # 検査コメントを付与して、タスクを受け入れ完了にする\n try:\n self.complete_acceptance_task(project_id, task, inspection_status, inspection_json, account_id)\n except requests.HTTPError as e:\n logger.warning(e)\n logger.warning(f\"{task_id} の受入完了に失敗\")\n\n self.facade.change_to_break_phase(project_id, task_id, account_id)\n continue\n\n def update_status_of_inspections(self, project_id: str, task_id: str, input_data_id: str,\n inspection_json: InspectionJson, inspection_status: str):\n target_insepctions = inspection_json.get(task_id, {}).get(input_data_id)\n\n if target_insepctions is None or len(target_insepctions) == 0:\n logger.warning(f\"変更対象の検査コメントはなかった。task_id = {task_id}, input_data_id = {input_data_id}\")\n return\n\n target_inspection_id_list = [inspection[\"inspection_id\"] for inspection in target_insepctions]\n\n def filter_inspection(arg_inspection: Inspection) -> bool:\n \"\"\"\n statusを変更する検査コメントの条件。\n \"\"\"\n\n return arg_inspection[\"inspection_id\"] in target_inspection_id_list\n\n self.service.wrapper.update_status_of_inspections(project_id, task_id, input_data_id, filter_inspection,\n inspection_status)\n logger.debug(f\"{task_id}, {input_data_id}, {len(target_insepctions)}件 検査コメントの状態を変更\")\n\n def complete_acceptance_task(self, project_id: str, task: Task, inspection_status: str,\n inspection_json: InspectionJson, account_id: str):\n \"\"\"\n 検査コメントのstatusを変更(対応完了 or 対応不要)にした上で、タスクを受け入れ完了状態にする\n \"\"\"\n\n task_id = task[\"task_id\"]\n\n # 検査コメントの状態を変更する\n for input_data_id in task[\"input_data_id_list\"]:\n self.update_status_of_inspections(project_id, task_id, input_data_id, inspection_json, inspection_status)\n\n # タスクの状態を検査する\n if self.validate_task(project_id, task_id):\n self.facade.complete_task(project_id, task_id, account_id)\n logger.info(f\"{task_id}: タスクを受入完了にした\")\n else:\n logger.warning(f\"{task_id}, タスク検査で警告/エラーがあったので、タスクを受入完了できなかった\")\n self.facade.change_to_break_phase(project_id, task_id, account_id)\n\n def validate_task(self, project_id: str, task_id: str) -> bool:\n # Validation\n validation, _ = self.service.api.get_task_validation(project_id, task_id)\n validation_inputs = validation[\"inputs\"]\n is_valid = True\n for validation in validation_inputs:\n input_data_id = validation[\"input_data_id\"]\n inspection_summary = validation[\"inspection_summary\"]\n if inspection_summary in [\"unprocessed\", \"new_unprocessed_inspection\"]:\n logger.warning(f\"{task_id}, {input_data_id}, {inspection_summary}, 未処置の検査コメントがある。\")\n is_valid = False\n\n annotation_summaries = validation[\"annotation_summaries\"]\n if len(annotation_summaries) > 0:\n logger.warning(\n f\"{task_id}, {input_data_id}, {inspection_summary}, アノテーションにエラーがある。{annotation_summaries}\")\n is_valid = False\n\n return is_valid\n\n def main(self, args):\n annofabcli.utils.load_logging_config_from_args(args, __file__)\n logger.info(f\"args: {args}\")\n\n task_id_list = annofabcli.utils.read_lines_except_blank_line(args.task_id_file)\n\n with open(args.inspection_json) as f:\n inspection_json = json.load(f)\n\n self.complete_tasks_with_changing_inspection_status(args.project_id, task_id_list, args.inspection_status,\n inspection_json)\n\n\ndef parse_args(parser: argparse.ArgumentParser):\n parser.add_argument('--project_id', type=str, required=True, help='対象のプロジェクトのproject_id')\n\n parser.add_argument('--task_id_file', type=str, required=True, help='受入を完了するタスクのtask_idの一覧が記載されたファイル')\n\n parser.add_argument(\n '--inspection_json', type=str, required=True, help='未処置の検査コメントの一覧。このファイルに記載された検査コメントの状態を変更する。'\n 'jsonの構成は`Dict[TaskId, Dict[InputDatId, List[Inspection]]]。'\n '`print_unprocessed_inspections`ツールの出力結果である。')\n\n parser.add_argument('--inspection_status', type=str, required=True,\n choices=[\"error_corrected\", \"no_correction_required\"], help='未処置の検査コメントをどの状態に変更するか。'\n 'error_corrected: 対応完了,'\n 'no_correction_required: 対応不要')\n\n parser.set_defaults(subcommand_func=main)\n\n\ndef main(args):\n service = annofabapi.build_from_netrc()\n facade = AnnofabApiFacade(service)\n ComleteTasks(service, facade).main(args)\n\n\ndef add_parser(subparsers: argparse._SubParsersAction):\n subcommand_name = \"complete_tasks\"\n subcommand_help = \"未処置の検査コメントを適切な状態に変更して、タスクを受け入れ完了にする。\"\n description = (\"未処置の検査コメントを適切な状態に変更して、タスクを受け入れ完了にする。\" \"オーナ権限を持つユーザで実行すること。\")\n\n parser = annofabcli.utils.add_parser(subparsers, subcommand_name, subcommand_help, description)\n parse_args(parser)\n","sub_path":"examples/annofabcli/complete_tasks.py","file_name":"complete_tasks.py","file_ext":"py","file_size_in_byte":8331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"371123576","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# 业务包:通用函数\n\nimport json\nimport xmltodict\n\nimport core.tesExcel as excel\nimport core.tesLog as log\nimport core.tesMysql as mysql\nimport core.tesReport as report\nimport core.tesRequest as request\nimport gl\nimport randomUtil\n\n\n# filename = gl.FILE_NAME\nlogging = log.getLogger()\ncachedResult = {}\nglobal null , false , true\nnull = None\nfalse = False\ntrue = True\n\n\ndef prepare_data(host, user, password, db, sql):\n \"\"\"数据准备,添加测试数据\"\"\"\n if len(sql) != 0:\n logging.info(\"RunSql: %s\", sql)\n db = mysql.create_engine(user, password, db, host)\n res = 0\n if sql.find(';') > 0:\n sqllist = sql.split(';')\n for sqll in sqllist:\n if len(sqll) > 1:\n res += mysql.exc_sql(db, sqll)\n else:\n res = mysql.exc_sql(db, sql)\n logging.info(\"Run sql: the row number affected is %s\", res)\n mysql.db_close(db)\n return res\n else:\n logging.info(\"No sql need to execute!\")\n return\n\n\ndef get_excel_sheet(path, module):\n \"\"\"依据模块名获取sheet\"\"\"\n excel.open_excel(path)\n return excel.get_sheet(module)\n\n\ndef replace_holder(value):\n \"\"\"遍历字典替换占位符\"\"\"\n for holder in gl.PLACE_HOLDER:\n value = value.replace(holder, gl.PLACE_HOLDER[holder])\n return value\n\n\ndef get_prepare_sql(sheet):\n \"\"\"获取预执行SQL\"\"\"\n return replace_holder(excel.get_content(sheet, gl.SQL_ROW, gl.SQL_COL))\n\n\ndef get_prepare_del_sql(sheet):\n \"\"\"获取清理数据的SQL\"\"\"\n return replace_holder(excel.get_content(sheet, gl.DEL_ROW, gl.DEL_COL))\n\n\ndef pre_deal_testdata(testdata):\n \"\"\"处理请求报文,替换请求报文中被参数化掉的参数\"\"\"\n data = str(testdata)\n if '$RANDOM' in data:\n data = data.replace(\"$RANDOMUID\", randomUtil.randomUid()).replace(\"$RANDOMEMAIL\", randomUtil.randomEmail())\n if '$TIME' in data:\n data = data.replace(\"$TIME_NOW\", randomUtil.time_now()).replace(\"$TIME_TODAY\", randomUtil.time_today()).replace(\"$TIME_FUTURE\", randomUtil.time_future()).replace(\"$TIME_NEXT_MONTH\", randomUtil.time_next_month())\n if '$DATE' in data:\n data = data.replace(\"$DATE_TODAY\", randomUtil.date_today()).replace(\"$DATE_NEXT_MONTH\", randomUtil.date_next_month()).replace(\"$DATE_MONTH_LATER\", randomUtil.date_month_later())\n return data\n\n\ndef update_testdata_by_depend_case(testCase, resp, depend_detail):\n \"\"\"根据依赖用例的返回结果,更新测试用例数据\"\"\"\n pass\n\n\ndef get_test_cases(sheet):\n \"\"\"从excel获取用例,可以获取指定row的用例;默认获取所有需要执行的用例,返回testCase的dict\"\"\"\n testCases = []\n\n rows = excel.get_rows(sheet)\n for i in range(2, rows):\n \"\"\"判断is_run_switch为N时,不需执行用例\"\"\"\n if str(excel.get_content(sheet, i, gl.IS_RUN_SWITCH)) == 'N':\n continue\n \"\"\"获取所有需要执行的用例数据\"\"\"\n testCase = {}\n testCase[\"testNumber\"] = str(excel.get_content(sheet, i, gl.CASE_NUMBER))\n if \"\" in str(excel.get_content(sheet, i, gl.CASE_DATA)):\n tempData = json.dumps(dict(xmltodict.parse(str(excel.get_content(sheet, i, gl.CASE_DATA)))))\n testCase[\"testData\"] = json.loads(tempData)\n testCase[\"isDataXml\"] = 1\n else:\n testCase[\"testData\"] = eval(pre_deal_testdata(excel.get_content(sheet, i, gl.CASE_DATA)))\n testCase[\"isDataXml\"] = 0\n testCase[\"testName\"] = excel.get_content(sheet, i, gl.CASE_NAME)\n testCase[\"testUrl\"] = excel.get_content(sheet, i, gl.CASE_URL)\n testCase[\"testMethod\"] = excel.get_content(sheet, i, gl.CASE_METHOD)\n testCase[\"testDigestInfo\"] = excel.get_content(sheet, i, gl.CASE_Digest)\n testCase[\"testHeaders\"] = str(excel.get_content(sheet, i, gl.CASE_HEADERS))\n testCase[\"testHeaders\"] = eval(replace_holder(testCase[\"testHeaders\"]))\n testCase[\"testAssertKey\"] = excel.get_content(sheet, i, gl.CASE_KEY)\n testCase[\"testHopeCode\"] = excel.get_content(sheet, i, gl.CASE_CODE)\n testCase[\"store\"] = excel.get_content(sheet, i, gl.DATA_TO_STORE)\n testCase[\"replace\"] = excel.get_content(sheet, i, gl.DATA_TO_REPLACE)\n testCase[\"pre_sql\"] = excel.get_content(sheet, i, gl.PRE_SQL)\n testCases.append(testCase)\n\n return testCases\n\n\ndef excute_case(testCase, url):\n if testCase[\"isDataXml\"] == 1:\n testCase[\"testData\"] = xmltodict.unparse(testCase[\"testData\"])\n else:\n testCase[\"testData\"] = (json.dumps(testCase[\"testData\"]))\n # 执行接口调用\n digestusername = None\n digestpassword = None\n if testCase[\"testDigestInfo\"] is not None and testCase[\"testDigestInfo\"] != \"\":\n if testCase[\"testDigestInfo\"].find('|') > 0:\n \"\"\"\n 1、获取digest账号密码\n \"\"\"\n digestinfo = testCase[\"testDigestInfo\"].split('|')\n digestusername = digestinfo[0]\n digestpassword = digestinfo[1]\n testResponse, actualCode = request.api_test(testCase[\"testMethod\"], url + testCase[\"testUrl\"],\n testCase[\"testData\"], testCase[\"testHeaders\"],\n testCase[\"testAssertKey\"],\n digestusername=digestusername,\n digestpassword=digestpassword)\n return testResponse, actualCode\n else:\n testResponse, actualCode = request.api_test(testCase[\"testMethod\"], url + testCase[\"testUrl\"],\n testCase[\"testData\"], testCase[\"testHeaders\"],\n testCase[\"testAssertKey\"])\n return testResponse, actualCode\n\n\ndef store_data(keys, response, testCaseNumber):\n result = {}\n for k in keys.split(\"|\"):\n temp = eval(response)\n for key in k.split(\".\"):\n if key.find(\"#\") > 0:\n s = key.split(\"#\")\n temp = temp[s[0]]\n for l in range(1,len(s)):\n temp = temp[int(s[l])]\n else:\n temp = temp[key]\n result[k] = temp\n cachedResult[str(testCaseNumber)] = result\n return cachedResult\n\n\ndef replace_data(testCase):\n condition_list = testCase[\"replace\"].split(\"|\")\n for con in condition_list:\n num = con.split(\":\")[0]\n key_list = con.split(\":\")[1].split(\">\")\n source_key = key_list[0]\n source = cachedResult[str(num)]\n dest_key = key_list[1]\n dest = testCase[\"testData\"]\n for k in dest_key.split(\".\"):\n if k.find(\"#\") > 0:\n s = k.split(\"#\")\n dest = dest[s[0]]\n for l in range(1,len(s)):\n dest = dest[int(s[l])]\n else:\n dest = dest[k]\n temp = json.dumps(testCase[\"testData\"])\n temp1 = temp.replace(str(dest), str(source[source_key]))\n testCase[\"testData\"] = json.loads(temp1)\n\n\ndef run_test(sheet, module, url):\n \"\"\"执行测试用例\"\"\"\n total_fail = 0\n testDetailList = []\n testCases = get_test_cases(sheet)\n tsum = len(testCases)\n\n for testCase in testCases:\n logging.info(\"Number %s\", testCase[\"testNumber\"])\n logging.info(\"CaseNmae %s\", testCase[\"testName\"])\n fail = 0\n #判断是否有sql需要先执行\n if testCase[\"pre_sql\"] is not None and testCase[\"pre_sql\"] != \"\":\n prepare_data(gl.get_value(\"dbhost\"), gl.get_value(\"user\"), gl.get_value(\"password\"), gl.get_value(\"dbname\"), str(testCase[\"pre_sql\"]))\n # 判断是否需要替换参数化的testData\n if testCase[\"replace\"] is not None and testCase[\"replace\"] != \"\":\n replace_data(testCase)\n # 2.执行原用例\n testResponse, actualCode = excute_case(testCase, url)\n\n if testResponse is None:\n testResponse = 'response is null'\n\n # print str ( actualCode ), str ( expectCode )\n if actualCode is None or actualCode == 'RequestIsERROR':\n logging.error(\"Request is ERROR! %s\", testCase[\"testNumber\"])\n logging.info(\"----------Next Case----------\")\n fail += 1\n total_fail += 1\n elif str(actualCode) != str(testCase[\"testHopeCode\"]):\n logging.info(\"Fail: %s\", testCase[\"testNumber\"])\n logging.info(\"----------Next Case----------\")\n fail += 1\n total_fail += 1\n else:\n # 用例执行成功后,判断是否需要存储response值\n if testCase[\"store\"] is not None and testCase[\"store\"] != \"\":\n store_data(testCase[\"store\"], testResponse, testCase[\"testNumber\"])\n\n if fail > 0:\n result = False\n else:\n logging.info(\"Pass: %s\", testCase[\"testNumber\"])\n logging.info(\"----------Next Case----------\")\n result = True\n\n # 获取每次执行用例结果\n testDetail = report.get_test_detail(t_id=testCase[\"testNumber\"], t_name=testCase[\"testName\"],\n t_method=testCase[\"testMethod\"], t_url=testCase[\"testUrl\"],\n t_param=json.dumps(testCase[\"testData\"]), t_response=testResponse,\n t_hope=testCase[\"testHopeCode\"],\n t_actual=actualCode,\n t_result=result)\n testDetailList.append(testDetail)\n\n # 获取测试结果\n testSumInfo = report.get_test_suminfo(test_sum=tsum, test_success=tsum - total_fail, test_failed=total_fail)\n testDetailInfo = report.get_all_test_detail(testDetailList)\n\n return testSumInfo, testDetailInfo\n","sub_path":"oprsrc/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"302460981","text":"from equadratures import *\nimport numpy as np\n\ndef main():\n \n def fun(x):\n return 1.0/(1 + 50*(x[0]- 0.9)**2 + 50*(x[1] + 0.9)**2 )\n\n \n xvec = np.linspace(-1.,1.,40) \n x,y = np.meshgrid(xvec, xvec)\n z = 1.0/(1 + 50*(x - 0.9)**2 + 50*(y + 0.9)**2 ) \n stackOfPoints, x1, x2 = meshgrid(-1.0, 1.0, 40, 40)\n\n value_large = 10\n x1 = Parameter(param_type=\"Uniform\", lower=-1, upper=1, points=value_large)\n x2 = Parameter(param_type=\"Uniform\", lower=-1, upper=1, points=value_large)\n uq = Polyint([x1,x2])\n p, w = uq.getPointsAndWeights()\n tapprox = uq.getPolynomialApproximation(fun, stackOfPoints)\n tapprox = tapprox.reshape(40,40)\n tapprox = tapprox.T\n\n no_of_subsamples = 10\n x1 = Parameter(param_type=\"Uniform\", lower=-1, upper=1, points=no_of_subsamples)\n x2 = Parameter(param_type=\"Uniform\", lower=-1, upper=1, points=no_of_subsamples)\n parameters = [x1, x2]\n Hyperbolic = IndexSet(\"Hyperbolic basis\", orders=[no_of_subsamples-1,no_of_subsamples-1], q=1.0)\n e = Polylsq(parameters, Hyperbolic)\n minimum_subsamples = e.least_no_of_subsamples_reqd() \n e.set_no_of_evals(minimum_subsamples)\n p, w = uq.getPointsAndWeights()\n psmall = e.subsampled_quadrature_points\n\n\n \n zapprox = e.getPolynomialApproximation(stackOfPoints, fun) \n zapprox = zapprox.reshape(40,40)\n zapprox = zapprox.T\n plotting.contour_plot(x, y, zapprox, 'EQ_approx_Hyperbolic_q_1.0.eps', pts=p, other_pts=psmall)\n plotting.contour_plot(x, y, tapprox, 'Tensor_approx.eps', pts=p, other_pts=psmall)\n plotting.contour_plot(x, y , z, 'Real.eps')\n Hyperbolic.plot(filename='Hyperbolic_q_1.0.eps')\n\n\nmain()","sub_path":"siamuq2016/Figure8_9.py","file_name":"Figure8_9.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"40420520","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 3 16:37:44 2020\n\n@author: kerui\n\"\"\"\n\n'''\n删除训练集中多余的图片\n'''\nimport os\n\n# RGB图像路径\ndepth_root = r'E:\\Study\\THpractice\\code\\data\\training\\train_velodyne_reflectance'\nRGB = os.listdir(r'E:\\Study\\THpractice\\code\\data\\training\\train_image_2_lane')\n# 深度图路径\ndepth = os.listdir(r'E:\\Study\\THpractice\\code\\data\\training\\train_velodyne_reflectance')\n\n# 两个文件夹中共同含有的文件\ncommon_file = [file for file in depth if file not in RGB]\n\n\n\n# 删除depth中多余的文件\n\nfor file in common_file:\n common_file_path = os.path.join(depth_root, file)\n if os.path.exists(common_file_path):\n os.remove(common_file_path)\n print('delete file: %s' % common_file_path)\n else:\n print('no such file: %s' % common_file_path)","sub_path":"v4/delete_redundant_train.py","file_name":"delete_redundant_train.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"631411957","text":"# turtle을 이용한 sin 그래프 그리기\n\nimport math\nimport turtle\n\nt = turtle.Turtle()\n\n#x축 그리기\nt.pendown()\nfor x in range(360): #360까지 그림\n t.goto(x, 0)\nt.penup()\n\n#y축 그리기\nt.pendown()\nfor y in range((int)(math.sin(math.radians(90)) * 100)): #sin 그래프는 90도에서 최댓값\n t.goto(0, y)\nt.penup()\n\nt.pendown() #터틀 객체의 펜을 내림\nfor angle in range(360): # sin 그래프는 각도에 따라 변함\n y = math.sin(math.radians(angle)) # sin 값을 계산\n\n scaledX = angle # x축의 좌표값을 각도로 함\n scaledY = y * 100 #y축의 좌표값을 sin 값으로 함 (단, 표현하기에 작으��로 100을 곱함)\n t.goto(scaledX, scaledY) #터틀 객체를 좌표로 이동함\n\nt.penup() #터틀 객체의 펜을 올림\n\nturtle.mainloop()","sub_path":"chap04_loop/chap04_turtle_sin.py","file_name":"chap04_turtle_sin.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"186053843","text":"\"\"\"\nCalculate the decrease in lifetime for IEC caused by temporal coherence\n\"\"\"\nimport sys\nlibpath = 'C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\dissertation'\nif (libpath not in sys.path): sys.path.append(libpath)\n \nimport JR_Library.main as jr\nimport os, pickle, json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as scio\n\n# plot style\nplt.style.use(jr.stylepath('duke_paper'))\n\n# wind datasets\ndatasets = ['NREL','fluela','PM06','texastech']\n#datasets = ['fluela']\n\n# define turbine name and run name\nTurbName = 'WP5.0A04V00'\n\nzRef = 90.\nshear = 0.2\nUref_lo,Uref_hi = 5,22\n\nnbins = 50\nNumAvg = 20\n\ncov = 0.1\n\nNumSamps = 20*365*24*6 # number of 10-minute samples\n\nparameters = [['DEL-h','RootMFlp1','MN-m',1000.,12],\n ['DEL-h','HSShftTq','kN-m',1,5],\n ['DEL-h','TwrBsMyt','MN-m',1000,5]]\nWindParmSamp = ['Mean_Wind_Speed','Sigma_u','Tau_u','Concentration_u']\n\nDmgDictDir = 'C:\\\\Users\\\\jrinker\\\\Dropbox\\\\research\\\\' + \\\n 'processed_data'\n\n# base directory where the stats are stored\nBaseDir = 'C:\\\\Users\\\\jrinker\\\\Dropbox\\\\research\\\\' + \\\n 'processed_data'\nBaseStatDir = os.path.join(BaseDir,'proc_stats')\nSaveDir = 'C:\\\\Users\\\\jrinker\\\\Dropbox\\\\my_publications\\\\' + \\\n '2016-02-15_dissertation\\\\figures'\nBaseTurbDir = 'C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n 'dissertation\\\\FAST_models\\\\FAST7'\nRSMDir = 'C:\\\\Users\\\\jrinker\\\\Documents\\\\GitHub\\\\' + \\\n 'dissertation\\\\fast_analysis\\\\fitting_metamodels\\\\RSMs'\n\n# -----------------------------------------------------------------------------\n\n# load RSM dictionary for that turbine\nTurbRSMDictName = '{:s}_RSM.bdat'.format(TurbName)\nTurbRSMDictPath = os.path.join(RSMDir,TurbRSMDictName)\nwith open(TurbRSMDictPath,'rb') as f:\n TurbRSMDict = pickle.load(f)\n \n# load turbine dictionary to get hub height\nTurbDictPath = os.path.join(BaseTurbDir,TurbName,'parameters',\n '{:s}_Dict.dat'.format(TurbName))\nwith open(TurbDictPath,'r') as DictFile:\n zHub = json.load(DictFile)['HH']\n \nfor dataset in datasets:\n print('Processing dataset \\\"{:s}\\\"'.format(dataset))\n \n DmgDictName = 'UltIncr_{:s}.txt'.format(dataset)\n \n # load composite distribution information\n dist_fname = '{:s}_6dist_comp_parms.txt'.format(dataset)\n dist_fpath = os.path.join(BaseDir,dist_fname)\n with open(dist_fpath,'r') as f:\n dist_dict = json.load(f)\n p_parms_opt = dist_dict['p_parms_opt']\n parms = dist_dict['parms']\n parms[2] = 'Tau_u'\n \n # get height index closest to hub height\n heights = jr.datasetSpecs(dataset)['IDs']\n iH = np.abs(heights - zHub).argmin()\n zSamp = heights[iH]\n if dataset == 'PM06': zSamp = 1.5 # measurement height for Plaine Morte\n \n # calculate wind speed range, sample from empirical distribution\n Usamp_lo,Usamp_hi = Uref_lo*(zSamp/zRef)**shear, \\\n Uref_hi*(zSamp/zRef)**shear\n WindParms = jr.SampleWindParameters(NumSamps,dataset,BaseDir,WindParmSamp,iH,\n URange=[Usamp_lo,Usamp_hi])\n \n # create wind parameter array\n x = np.empty((NumSamps,4))\n x[:,0],x[:,1],x[:,2],x[:,3] = WindParms[:,0]/(zSamp/zRef)**shear,\\\n WindParms[:,1]/(WindParms[:,0]/(zSamp/zRef)**shear),\\\n np.log10(WindParms[:,0]*WindParms[:,2]),\\\n WindParms[:,3]\n \n del WindParms\n \n # loop through statistics\n iPlot = 0\n \n Fs = np.empty((len(parameters),2))\n ns = np.empty((len(parameters),2,nbins))\n bs = np.empty((len(parameters),2,nbins+1))\n for istat in range(len(parameters)):\n stat,parm,units,scale,m = parameters[istat]\n \n print(' {:s} {:s}'.format(stat,parm))\n \n # load the RSM data for that statistic\n DictKey = '{:s}_{:s}'.format(parm,stat)\n RSMDict = TurbRSMDict[DictKey]\n ps, cs = RSMDict['ps_red'], RSMDict['cs_red']\n \n # -------------- with temporal coherence ----------------------------------\n \n # calculate mean loads\n Xv = jr.myvander(x,ps)\n mean_loads = np.dot(Xv,cs)\n \n # add randomness\n randn = np.random.normal(size=NumSamps)\n loads = mean_loads + mean_loads*cov*randn\n \n # calculate and save lifetime metric\n Fs[istat,1] = np.mean(np.sort(loads)[-NumAvg:])\n \n # calculate and save histogram\n n, b = np.histogram(loads,bins=nbins,normed=True)\n ns[istat,1] = n\n bs[istat,1] = b\n \n # -------------- without temporal coherence -------------------------------\n \n x[:,3] = 0.\n \n # calculate mean loads\n Xv = jr.myvander(x,ps)\n mean_loads = np.dot(Xv,cs)\n \n # add randomness\n randn = np.random.normal(size=NumSamps)\n loads = mean_loads + mean_loads*cov*randn\n \n # calculate and save lifetime metric\n Fs[istat,0] = np.mean(np.sort(loads)[-NumAvg:])\n \n # calculate and save histogram\n n, b = np.histogram(loads,bins=nbins,normed=True)\n ns[istat,0] = n\n bs[istat,0] = b\n \n del(randn)\n \n # save data dictionary using pickle\n OutDict = {}\n OutDict['TurbName'] = TurbName\n OutDict['parameters'] = parameters\n OutDict['dataset'] = dataset\n OutDict['Fs'] = Fs\n OutDict['ns'] = ns\n OutDict['bs'] = bs\n DmgDictPath = os.path.join(DmgDictDir,DmgDictName)\n with open(DmgDictPath,'wb') as DictFile:\n pickle.dump(OutDict,DictFile)\n print('\\nDictionary {:s} saved.'.format(DmgDictName))\n\n\n","sub_path":"fast_analysis/monte_carlo/calc-TC_ultimate_increase_data.py","file_name":"calc-TC_ultimate_increase_data.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"97986481","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile 1:\n\tflag, img = cap.read()\n\tif flag:\n\t\timgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t\tret,thresh = cv2.threshold(imgray,127,255,0)\n\t\tcontours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\t\timg = cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n\t\tcv2.imshow(\"Img\", img)\n\t\tif cv2.waitKey(25) > 0:\n\t\t\tbreak\n\n","sub_path":"opencv/trackColor/trackColor2.py","file_name":"trackColor2.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"560005057","text":"\"\"\" identityAPI.shared.lib.apiHelper\n\n This module contains various helper functions for the Identity API.\n\"\"\"\nimport datetime\n\nfrom django.conf import settings\nfrom django.http import HttpResponseNotAllowed, HttpResponseBadRequest\nfrom django.utils import timezone\n\nfrom identityAPI.shared.models import *\n\n#############################################################################\n\ndef process_params(request, method=\"GET_OR_POST\",\n required_params=[], optional_params=[]):\n \"\"\" Extract the parameters from an HTTP \"POST\" request.\n\n The parameters are as follows:\n\n 'request'\n\n The HttpRequest object passed to our Django view function.\n\n 'method'\n\n The HTTP method which we should accept. One of:\n\n \"GET\"\n \"POST\"\n \"GET_OR_POST\"\n\n 'required_params'\n\n A list of parameters which are required by this view.\n\n 'optional_params'\n\n A list of parameters which the view can optionally accept.\n\n Upon completion, we return a (success, response) tuple, where 'success'\n is True if and only if the parameters supplied to the view were\n acceptable. If 'success' is True, 'response' will be a dictionary\n mapping parameter names to their value; otherwise, 'response' will be a\n subclass of HttpResponse which should be returned back to the caller to\n tell them what was wrong with the request.\n \"\"\"\n # Check that the HTTP method is correct, and extract our CGI parameters.\n\n if method == \"GET\":\n if request.method != \"GET\":\n return (False, HttpResponseNotAllowed([\"GET\"]))\n else:\n raw_params = request.GET\n elif method == \"POST\":\n if request.method != \"POST\":\n return (False, HttpResponseNotAllowed([\"POST\"]))\n else:\n raw_params = request.POST\n elif method == \"GET_OR_POST\":\n if request.method not in [\"GET\", \"POST\"]:\n return (False, HttpResponseNotAllowed([\"GET\", \"POST\"]))\n else:\n if request.method == \"GET\":\n raw_params = request.GET\n else:\n raw_params = request.POST\n\n # Process the parameters.\n\n params = {}\n for param in raw_params.keys():\n params[param] = raw_params[param]\n for param in request.FILES.keys():\n params[param] = request.FILES[param]\n\n for param in required_params:\n if param not in params:\n return (False, HttpResponseBadRequest(\"missing required '\" +\n param + \"' parameter\"))\n\n for param in params.keys():\n if param not in required_params and param not in optional_params:\n return (False, HttpResponseBadRequest(\"unexpected parameter '\" +\n param + \"'\"))\n\n return (True, params)\n\n#############################################################################\n\ndef create_session_for(user):\n \"\"\" Create and return a new Session object for the given user.\n \"\"\"\n session = Session()\n session.token = uuid.uuid4().hex\n session.user = user\n session.created_at = timezone.now()\n session.session_length = settings.SESSION_LENGTH # Hardwired for now.\n session.save()\n\n return session\n\n#############################################################################\n\ndef is_session_valid(session_token):\n \"\"\" Return True if the session with the given token is still valid.\n \"\"\"\n session = get_session(session_token)\n if session == None:\n return False\n\n expires = (session.created_at +\n datetime.timedelta(minutes=session.session_length))\n if expires < timezone.now():\n return False\n\n return True\n\n#############################################################################\n\ndef get_session(session_token):\n \"\"\" Return the Session object for the given session token.\n\n If there is no session with the given token, we return None.\n \"\"\"\n try:\n return Session.objects.get(token=session_token)\n except Session.DoesNotExist:\n return None\n\n#############################################################################\n\ndef get_remote_ip(request):\n \"\"\" Return the remote IP address associated with the given HTTPRequest.\n\n When running behind an nginx server, the server should have the\n following option set:\n\n proxy_set_header X-Real-IP $remote_addr;\n\n This ensures that the caller's real IP address will be available via\n the HTTP_X_REAL_IP header. If this header exists, we return that\n value. Otherwise, we return the value of the REMOTE_ADDR header.\n \"\"\"\n if \"HTTP_X_REAL_IP\" in request.META:\n return request.META['HTTP_X_REAL_IP']\n else:\n return request.META['REMOTE_ADDR']\n\n\n","sub_path":"identityAPI/shared/lib/apiHelper.py","file_name":"apiHelper.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"179300343","text":"import tensorflow as tf\n\nfrom utils.sparse_molecular_dataset import SparseMolecularDataset\nfrom utils.trainer import Trainer\nfrom utils.utils import *\n\nfrom models.gan import GraphGANModel\nfrom models import encoder_rgcn, decoder_adj, decoder_dot, decoder_rnn\n\nfrom optimizers.gan import GraphGANOptimizer\nimport argparse\nimport os\nfrom rdkit import rdBase\n\nrdBase.DisableLog('rdApp.error')\nrdBase.DisableLog('rdApp.warning')\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-b\", \"--batch_size\", type=int, default=32, required=False)\nparser.add_argument(\"-d\", \"--dropout\", type=float, default=0.0, required=False)\nparser.add_argument(\"--n_critic\", type=int, default=5, required=False)\nparser.add_argument(\"--metrics\", type=str, default=\"logp,sas,qed\", required=False)\nparser.add_argument(\"--n_samples\", type=int, default=5000, required=False)\nparser.add_argument(\"-z\", \"--z_dim\", type=int, default=32, required=False)\nparser.add_argument(\"-l\", \"--lambd\", type=float, default=1.0, required=False)\nparser.add_argument(\"--lambd_SL\", type=float, default=1.0, required=False)\nparser.add_argument(\"-e\", \"--epochs\", type=int, default=300, required=False)\nparser.add_argument(\"-a\", \"--activation_epoch\", type=int, default=300, required=False)\nparser.add_argument(\"--activation_epoch_SL\", type=int, default=300, required=False)\nparser.add_argument(\"-s\", \"--save_every\", type=int, default=10, required=False)\nparser.add_argument(\"--lr\", type=float, default=1e-3, required=False)\nparser.add_argument(\"--batch_discriminator\", type=bool, default=True, required=False)\nparser.add_argument(\"--name\", type=str, default=\"./output\", required=True)\nparser.add_argument(\"--sl_use_sigmoid\", type=bool, default=False, required=False)\nparser.add_argument(\"--discrete_z\", type=int, default=0, required=False)\nparser = parser.parse_args()\n\nbatch_dim = parser.batch_size\ndropout = parser.dropout\nn_critic = parser.n_critic\n\n\"\"\"\nQED = druglikeness\nlogp = solubility\nsas = synthetizability\n\"\"\"\nmetric = parser.metrics\n# metric = 'validity'\nn_samples = parser.n_samples\nz_dim = parser.z_dim\n\nla = parser.lambd\nla_SL = parser.lambd_SL\nepochs = parser.epochs\npast_epoch = parser.activation_epoch\npast_epoch_SL = parser.activation_epoch_SL\nsave_every = parser.save_every\nlr = parser.lr\nbatch_discriminator = parser.batch_discriminator\nsl_use_sigmoid = parser.sl_use_sigmoid\nname = parser.name\n\ndata = SparseMolecularDataset()\n# data.load('data/gdb9_9nodes.sparsedataset')\ndata.load('data/qm9_5k.sparsedataset')\n\nsteps = (len(data) // batch_dim)\n\nif not os.path.exists(name):\n os.makedirs(name)\n\nwith open(\"%s/parameters.txt\" % name, \"w\") as file:\n for arg in vars(parser):\n print(arg, getattr(parser, arg))\n file.write(\"%s = %s\\n\" % (arg, getattr(parser, arg)))\n\n\ndef train_fetch_dict(i, steps, epoch, epochs, min_epochs, model, optimizer):\n a = [optimizer.train_step_G] if i % n_critic == 0 else [optimizer.train_step_D]\n b = [optimizer.train_step_V] if i % n_critic == 0 and la < 1 else []\n return a + b\n\n\ndef train_feed_dict(i, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim):\n mols, _, _, a, x, _, _, _, _ = data.next_train_batch(batch_dim)\n embeddings = model.sample_z(batch_dim)\n\n if la < 1 or la_SL < 1:\n\n if i % n_critic == 0:\n rewardR = reward(mols)\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax],\n feed_dict={model.training: False, model.embeddings: embeddings})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n\n rewardF = reward(mols)\n\n feed_dict = {model.edges_labels: a,\n model.nodes_labels: x,\n model.embeddings: embeddings,\n model.rewardR: rewardR,\n model.rewardF: rewardF,\n model.training: True,\n model.dropout_rate: dropout,\n optimizer.la: la if epoch > past_epoch else 1.0,\n optimizer.la_SL: la_SL if epoch > past_epoch_SL else 1.0}\n\n else:\n feed_dict = {model.edges_labels: a,\n model.nodes_labels: x,\n model.embeddings: embeddings,\n model.training: True,\n model.dropout_rate: dropout,\n optimizer.la: la if epoch > past_epoch else 1.0,\n optimizer.la_SL: la_SL if epoch > past_epoch_SL else 1.0}\n else:\n feed_dict = {model.edges_labels: a,\n model.nodes_labels: x,\n model.embeddings: embeddings,\n model.training: True,\n model.dropout_rate: dropout,\n optimizer.la: 1.0,\n optimizer.la_SL: 1.0}\n\n return feed_dict\n\n\ndef eval_fetch_dict(i, epochs, min_epochs, model, optimizer):\n dict = {'loss D': optimizer.loss_D, 'loss G': optimizer.loss_G,\n 'loss RL': optimizer.loss_RL, 'loss V': optimizer.loss_V,\n 'la': optimizer.la, 'loss SL': optimizer.loss_SL}\n for name, wmc in optimizer.SL_log_dict.items():\n dict[name] = wmc\n return dict\n\n\ndef eval_feed_dict(i, epochs, min_epochs, model, optimizer, batch_dim):\n mols, _, _, a, x, _, _, _, _ = data.next_validation_batch()\n embeddings = model.sample_z(a.shape[0])\n\n rewardR = reward(mols)\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax],\n feed_dict={model.training: False, model.embeddings: embeddings})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n\n rewardF = reward(mols)\n\n feed_dict = {model.edges_labels: a,\n model.nodes_labels: x,\n model.embeddings: embeddings,\n model.rewardR: rewardR,\n model.rewardF: rewardF,\n model.training: False}\n return feed_dict\n\n\ndef test_fetch_dict(model, optimizer):\n return {'loss D': optimizer.loss_D, 'loss G': optimizer.loss_G,\n 'loss RL': optimizer.loss_RL, 'loss V': optimizer.loss_V,\n 'la': optimizer.la}\n\n\ndef test_feed_dict(model, optimizer, batch_dim):\n mols, _, _, a, x, _, _, _, _ = data.next_test_batch()\n embeddings = model.sample_z(a.shape[0])\n\n rewardR = reward(mols)\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax],\n feed_dict={model.training: False, model.embeddings: embeddings})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n\n rewardF = reward(mols)\n\n feed_dict = {model.edges_labels: a,\n model.nodes_labels: x,\n model.embeddings: embeddings,\n model.rewardR: rewardR,\n model.rewardF: rewardF,\n model.training: False}\n return feed_dict\n\n\ndef reward(mols):\n rr = 1.\n for m in ('logp,sas,qed,unique' if metric == 'all' else metric).split(','):\n\n if m == 'np':\n rr *= MolecularMetrics.natural_product_scores(mols, norm=True)\n elif m == 'logp':\n rr *= MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=True)\n elif m == 'sas':\n rr *= MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=True)\n elif m == 'qed':\n rr *= MolecularMetrics.quantitative_estimation_druglikeness_scores(mols, norm=True)\n elif m == 'novelty':\n rr *= MolecularMetrics.novel_scores(mols, data)\n elif m == 'dc':\n rr *= MolecularMetrics.drugcandidate_scores(mols, data)\n elif m == 'unique':\n rr *= MolecularMetrics.unique_scores(mols)\n elif m == 'diversity':\n rr *= MolecularMetrics.diversity_scores(mols, data)\n elif m == 'validity':\n rr *= MolecularMetrics.valid_scores(mols)\n else:\n raise RuntimeError('{} is not defined as a metric'.format(m))\n\n return rr.reshape(-1, 1)\n\n\ndef _eval_update(i, epochs, min_epochs, model, optimizer, batch_dim, eval_batch):\n mols = samples(data, model, session, model.sample_z(n_samples), sample=True)\n m0, m1 = all_scores(mols, data, norm=True)\n m0 = {k: np.array(v)[np.nonzero(v)].mean() for k, v in m0.items()}\n m0.update(m1)\n return m0\n\n\ndef _test_update(model, optimizer, batch_dim, test_batch):\n mols = samples(data, model, session, model.sample_z(n_samples), sample=True)\n m0, m1 = all_scores(mols, data, norm=True)\n m0 = {k: np.array(v)[np.nonzero(v)].mean() for k, v in m0.items()}\n m0.update(m1)\n return m0\n\n\n# model\nmodel = GraphGANModel(data.vertexes,\n data.bond_num_types,\n data.atom_num_types,\n z_dim,\n decoder_units=(128, 256, 512),\n discriminator_units=((128, 64), 128, (128, 64)),\n decoder=decoder_adj,\n discriminator=encoder_rgcn,\n soft_gumbel_softmax=True,\n hard_gumbel_softmax=False,\n batch_discriminator=batch_discriminator,\n discrete_z=parser.discrete_z)\n\n# optimizer\noptimizer = GraphGANOptimizer(model, learning_rate=lr, feature_matching=False, sl_use_sigmoid=sl_use_sigmoid)\n\n# session\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\n# trainer\ntrainer = Trainer(model, optimizer, session)\n\nprint('Parameters: {}'.format(np.sum([np.prod(e.shape) for e in session.run(tf.trainable_variables())])))\n\ntrainer.train(batch_dim=batch_dim,\n epochs=epochs,\n steps=steps,\n train_fetch_dict=train_fetch_dict,\n train_feed_dict=train_feed_dict,\n eval_fetch_dict=eval_fetch_dict,\n eval_feed_dict=eval_feed_dict,\n test_fetch_dict=test_fetch_dict,\n test_feed_dict=test_feed_dict,\n save_every=save_every,\n directory=name,\n _eval_update=_eval_update,\n _test_update=_test_update)\n","sub_path":"src/thirdparties/MolGAN/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":10323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"223728034","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport comparator.tools\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Pack',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=256)),\n ],\n ),\n migrations.CreateModel(\n name='Result',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('result', comparator.tools.RangedFloatField()),\n ],\n ),\n migrations.CreateModel(\n name='Song',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256)),\n ('difficulty', models.IntegerField()),\n ('pack', models.ForeignKey(to='comparator.Pack')),\n ],\n ),\n migrations.AddField(\n model_name='result',\n name='song',\n field=models.ForeignKey(to='comparator.Song'),\n ),\n migrations.AddField(\n model_name='result',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"comparator/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"24690973","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator\nfrom models import Question, Answer\nfrom forms import AskForm, AnswerForm, SignupForm, LoginForm\nfrom django.contrib.auth import authenticate, login as dlogin, logout as dlogout\n\n\ndef home(request):\n template = 'home.html'\n limit = 10\n \n try:\n page = int(request.GET.get('page', 1))\n except:\n page = 1\n\n paginator = Paginator(Question.objects.new(), limit)\n questions = paginator.page(page)\n context = {\n 'questions': questions,\n }\n return render(request, template, context)\n\n\ndef popular(request):\n template = 'popular.html'\n limit = 10\n \n try:\n page = int(request.GET.get('page', 1))\n except:\n page = 1\n\n paginator = Paginator(Question.objects.popular(), limit)\n questions = paginator.page(page)\n context = {\n 'questions': questions,\n }\n return render(request, template, context)\n\n\ndef question(request, question_pk):\n template = 'question.html'\n question = get_object_or_404(Question, pk=question_pk)\n user = request.user\n\n if request.method == 'POST':\n answer_form = AnswerForm(request.POST)\n if answer_form.is_valid():\n user = request.user\n if not user.is_authenticated():\n return redirect('login')\n answer = answer_form.save(user=user)\n #return redirect('question', question.pk)\n else:\n answer_form = AnswerForm(initial={'question': question.pk})\n\n context = {\n 'user': user,\n 'question': question,\n 'answer_form': answer_form,\n }\n\n return render(request, template, context)\n\n\ndef ask(request):\n template = 'ask.html'\n\n if request.method == 'POST':\n ask_form = AskForm(request.POST)\n\n if ask_form.is_valid():\n user = request.user\n if not user.is_authenticated():\n return redirect('login')\n question = ask_form.save(user=user)\n return redirect('question', question.pk)\n else:\n ask_form = AskForm()\n\n context = {\n 'ask_form': ask_form,\n }\n\n return render(request, template, context)\n\n\ndef signup(request):\n template = 'signup.html'\n if request.method == 'POST':\n signup_form = SignupForm(request.POST)\n if signup_form.is_valid():\n signup_form.save()\n data = signup_form.cleaned_data\n user = authenticate(username=data['username'], password=data['password'])\n if user is not None:\n dlogin(request, user)\n return redirect('home')\n else:\n # the authentication system was unable to verify the username and password\n print(\"The username and password were incorrect.\")\n else:\n signup_form = SignupForm()\n\n context = {\n 'signup_form': signup_form,\n }\n return render(request, template, context)\n\n\ndef login(request):\n template = 'login.html'\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n data = login_form.cleaned_data\n user = authenticate(username=data['username'], password=data['password'])\n if user is not None:\n dlogin(request, user)\n return redirect('home')\n else:\n # the authentication system was unable to verify the username and password\n print(\"The username and password were incorrect.\")\n else:\n login_form = LoginForm()\n\n context = {\n 'login_form': login_form,\n }\n return render(request, template, context)\n\ndef logout(request):\n dlogout(request)\n return redirect('home')\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK')\n","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"402781355","text":"# standard library\nfrom os.path import isfile\n\n# fabric\nfrom fabric.api import cd\nfrom fabric.api import env\nfrom fabric.api import local\nfrom fabric.api import prefix\nfrom fabric.api import put\nfrom fabric.api import run\nfrom fabric.api import task\nfrom fabric.colors import green\nfrom fabric.colors import red\nfrom fabric.contrib import files\n\n# local tasks\nfrom . import deb_handler\nfrom . import gunicorn\nfrom . import memcached\nfrom . import nginx\nfrom .db import backup_db\nfrom .db import migrate\nfrom .utils import confirm_target\nfrom .utils import git_checkout\nfrom .utils import git_clone\n\n\n@task\ndef update():\n \"\"\" Updates server repository. \"\"\"\n branch = local('git rev-parse --abbrev-ref HEAD', capture=True)\n\n if env.branch != branch:\n msg = 'Wrong branch. You need to be on branch \"{}\" to deploy'.format(\n env.branch\n )\n print(red(msg))\n exit()\n\n # validate_deployment()\n update_server()\n\n\ndef update_helper(root_dir):\n with cd(root_dir):\n run('git pull')\n\n\n@task\ndef validate_deployment():\n local('python manage.py test --failfast')\n\n\n@task\ndef update_server():\n \"\"\" Updates server repository. \"\"\"\n print(green('backup database before updating'))\n backup_db()\n\n update_helper(env.server_root_dir)\n\n with cd(env.server_root_dir):\n print(green('installing pipenv requirements'))\n run('pipenv sync')\n\n print(green('installing npm packages'))\n run('npm ci')\n\n print(green('compiling webpack packages'))\n run('npm run build')\n\n print(green('collecting static files'))\n run('pipenv run python manage.py collectstatic --noinput')\n\n print(green('compiling translations'))\n run('pipenv run python manage.py compilemessages')\n\n print(green('Migrate database'))\n migrate()\n\n\n@task\ndef restart():\n \"\"\" Restarts gunicorn and nginx. \"\"\"\n gunicorn.restart()\n nginx.restart()\n\n\n@task\ndef update_restart():\n \"\"\" Updates server repository and restarts gunicorn and nginx \"\"\"\n update()\n restart()\n\n\n@task\ndef stop():\n \"\"\" Stops gunicorn and nginx. \"\"\"\n gunicorn.stop()\n nginx.stop()\n\n\n@task\ndef start():\n \"\"\" Starts gunicorn and nginx. \"\"\"\n gunicorn.start()\n nginx.start()\n\n\n@task\ndef db_reset():\n \"\"\" Resets database. \"\"\"\n\n confirm_target('Are you sure you want to reset the database?')\n\n # backup database before resetting\n backup_db()\n with cd(env.server_root_dir):\n with prefix('pipenv shell'):\n run('./reset.sh')\n\n\n@task\ndef set_deploy_key():\n # check if the ssh key is already present\n if files.exists('.ssh/id_rsa'):\n # key already deployed\n return\n\n # put ssh key\n ssh_key = '%s/fabfile/templates/ssh_key'\n ssh_key %= env.local_root_dir\n\n if not isfile(ssh_key):\n local('ssh-keygen -t rsa -f %s' % ssh_key)\n\n run('mkdir -p -m 0700 .ssh')\n put(ssh_key, '.ssh/id_rsa', mode=0o600)\n pub_key = '{}.pub'.format(ssh_key)\n put(pub_key, '.ssh/id_rsa.pub', mode=0o644)\n\n\n@task\ndef initial_deploy():\n \"\"\" Performs a complete deploy of the project. \"\"\"\n\n # put ssh key\n set_deploy_key()\n\n # github host handshake\n run('ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts')\n # bitbucket host handshake\n run('ssh-keyscan -t rsa bitbucket.org >> ~/.ssh/known_hosts')\n\n # install necessary dependencies to handle the project\n install_project_handling_dependencies()\n\n # clone repository\n git_clone(env.server_git_url, env.server_root_dir)\n\n # checkout branch\n with cd(env.server_root_dir):\n git_checkout(env.branch)\n\n # dependencies installation (quickstart)\n with cd(env.server_root_dir):\n run('./quickstart.sh')\n\n # gunicorn installation and configuration\n gunicorn.install()\n gunicorn.add_gunicorn_service()\n gunicorn.start()\n\n # nginx installation and configuration\n nginx.install()\n nginx.add_django_site()\n nginx.start()\n\n # memcached installation and configuration\n memcached.install()\n memcached.restart()\n\n\n@task\ndef install_project_handling_dependencies():\n # install zip dependencies\n deb_handler.install('zip')\n deb_handler.install('unzip')\n\n\n@task\ndef run_django_command(command):\n with cd(env.server_root_dir):\n run('pipenv run python manage.py {}'.format(command))\n","sub_path":"fabfile/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"199617697","text":"from .algoritmos import distancia_euclidiana\n\nclass Particula:\n def __init__(self,id=0,origen_x=0,origen_y=0,destino_x=0,destino_y=0,\n velocidad=0,red=0,green=0,blue=0,distancia=0):\n self.__id = id\n self.__origen_x = origen_x\n self.__origen_y = origen_y\n self.__destino_x = destino_x\n self.__destino_y = destino_y\n self.__velocidad = velocidad\n self.__red = red\n self.__green = green\n self.__blue = blue\n self.__distancia = distancia_euclidiana(origen_x, origen_y, destino_x, destino_y)\n\n def __str__(self):\n return (\n 'ID: ' + str(self.__id) + '\\n' +\n 'Origen en x: ' + str(self.__origen_x) + '\\n' +\n 'Origen en y: ' + str(self.__origen_y) + '\\n' +\n 'Destino en x: ' + str(self.__destino_x) + '\\n' +\n 'Destino en y: ' + str(self.__destino_y) + '\\n' +\n 'Velocidad: ' + str(self.__velocidad) + ' m/s \\n' +\n 'Rojo: ' + str(self.__red) + '\\n' +\n 'Verde: ' + str(self.__green) + '\\n' +\n 'Azul: ' + str(self.__blue) + '\\n' +\n 'Distancia: ' + str(self.__distancia) + '\\n' \n )\n\n# l01 = Particula(id=100,origen_x=56,origen_y=34,destino_x=345,destino_y=400,\n# velocidad=34,red=23,green=123,blue=200,distancia=0)\n# print(l01)\n# l02 = Particula(id=100,origen_x=156,origen_y=134,destino_x=45,destino_y=409,\n# velocidad=34,red=23,green=123,blue=200,distancia=0)\n# print(l02)","sub_path":"particula.py","file_name":"particula.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"562353888","text":"#!/usr/bin/python3\n\n\nfrom src.excel.finder.ExcelReader import ExcelUtils\nfrom src.util.db.DBExecutor import DbUtils\n\n\n# 比对mysql苹果订单id和excel导出的某一列,\n# 寻找excel中比mysql中多了哪些订单\ndef take_lost_id_list():\n exceltool = ExcelUtils('/home/caikun/.deepinwine/Deepin-WXWork/drive_c/users/caikun/Downloads/55024-20190725.xlsx')\n dbut = DbUtils(\"192.168.?.?\", \"user\", \"pwd\", \"gamesdk_ios\", 14051)\n\n print(\"It's a rational plan \")\n lost_app_id = []\n for i in exceltool.get_rows():\n # 筛掉4列为0的数据\n if i[4].value != 0:\n # print(i[4])\n tar_id = str(i[3].value).replace(\"\\n\", \"\")\n result_one = dbut.fetch_one('SELECT 1 FROM standalone_orders WHERE transaction_id = \"%s\" ' % (\n tar_id))\n if result_one is None:\n print(tar_id)\n lost_app_id.append(tar_id)\n return lost_app_id\n\n\nif __name__ == '__main__':\n take_lost_id_list()\n print(\", therefore bound to success\")\n\n\n\n","sub_path":"src/excel/finder/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"98946424","text":"import requests\nfrom typing import List\n\n\nclass MozillaIoTClient:\n def __init__(self, host: str, token: str):\n \"\"\"\n Client for interacting with the Mozilla IoT API\n \"\"\"\n self.host = host\n self.headers = {\n \"Authorization\": \"Bearer {}\".format(token),\n \"Content-Type\": \"application/json\",\n }\n self.things = self.get_things()\n self.entity_names: List[str] = [\n thing[\"title\"] for thing in self.things if \"title\" in thing\n ]\n\n def _request(self, method: str, endpoint: str, data: dict = None):\n\n url = self.host + endpoint\n\n response = requests.request(method, url, json=data, headers=self.headers)\n\n response.raise_for_status()\n\n return response\n\n def get_things(self):\n if self.host:\n return self._request(\"GET\", \"/things/\")\n return []\n","sub_path":"mozilla_client.py","file_name":"mozilla_client.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"111815116","text":"class Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n #since all the number are selected from 0 to n inclusivly\n #totally there are n+1 numbers there, and a len of n array is given\n # if we use the XOR, then if all if we XOR 2n+1 number rature than 2(n+1)\n #then the missing number is prepresent\n #first of all, all the index from 0 ~ n-1 will be XOR\n #all the values, from 0 ~ n will be selected n numbers, then either one of the 0 - n-1 or the n is missed\n #if we intialized the origianl ans to n, then 0-n are garenteed to be XOR once, and only the missing number is XOR only once.\n #all other selected numbers are XORed twice.\n # a number XOR itself will be zero\n # zero XOR any number will become itself\n ans = len(nums)\n for i in range(ans):\n ans ^= i\n ans ^= nums[i]\n return ans","sub_path":"268.py","file_name":"268.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"629839915","text":"from selenium import webdriver\nimport pandas as pd\nimport datetime\n\ndriver = webdriver.Chrome(executable_path=\"C:\\\\chrme\\\\chromedriver\")\ndriver.set_window_size(1024, 768) # optional\ndriver.get('http://www.payscale.com/college-salary-report')\ndriver.find_element_by_xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[2]/div/div/a/button').click()\ndriver.find_element_by_xpath('//*[@id=\"collegeSalaryReportContent\"]/div/div/div[2]/div/div/div/div[2]/a').click()\n\n# for title\nth = driver.find_elements_by_xpath('//*[@id=\"collegeSalaryReportContent\"]/div/div/div[2]/div/div/table/thead/tr/th')\nhdata = [header.text for header in th]\nlist1 = list(filter(None, hdata))\nheader = [x.strip() for x in list1]\ndel header[1]\nheader.insert(1, 'Name')\nheader.insert(7, 'Ranking Name')\nheader.insert(8, 'Scope')\nheader.insert(9, 'Ranking Year')\nheader.insert(10, 'Publication Date')\nprint(header)\ndata1 =[]\ncdata = ()\nnow = datetime.datetime.now()\n\n#for data\nfor tr in driver.find_elements_by_xpath('//*[@id=\"collegeSalaryReportContent\"]/div/div/div[2]/div/div/table/tbody/tr'):\n tds = tr.find_elements_by_tag_name('td')\n data = (td.text for td in tds)\n cdata = (list(filter(None, data)))\n del cdata[1]\n cdata.insert(7, 'Payscale_College_Salary_Report')\n cdata.insert(8, 'National')\n cdata.insert(9, now.year)\n cdata.insert(10, pd.Timestamp(str(int(now.year) - 1) + '-09-20'))\n print(cdata)\n data1.insert(len(data1),cdata)\n\n\nprint(data1)\ndf = pd.DataFrame(data1, columns=header)\ndf['Publication Date'] = df['Publication Date'].dt.strftime('%m/%d/%Y')\ndf.to_csv('C:\\\\Users\\\\Saurabh Pore\\\\Desktop\\\\NJIT\\\\payscale_college_salary.csv', index=False)\ndriver.quit()\n","sub_path":"Payscale_College_Salary_Report.py","file_name":"Payscale_College_Salary_Report.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"571272352","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Created on 2017-03-07 19:05:22\n# Project: meiju\n\nfrom pyspider.libs.base_handler import *\nfrom pymongo import MongoClient\nimport random\nimport os\n\nUSER_AGENT_LIST = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n]\n\nDIR_PATH = 'g:/img'\n\nclient = MongoClient(\"mongodb://112.74.44.140:27017\")\ndb = client['drama']\n\nclass Handler(BaseHandler):\n crawl_config = {\n }\n \n headers = {\n \"Accept-Encoding\":\"gzip, deflate, sdch\",\n \"Accept-Language\":\"zh-CN,zh;q=0.8\",\n \"Cache-Control\":\"no-cache\",\n \"Host\":\"www.meijutt.com\",\n \"Pragma\":\"no-cache\",\n \"Referer\":\"http://www.meijutt.com\",\n \"Upgrade-Insecure-Requests\":\"1\"\n }\n\n def __init__(self):\n self.base_url = 'http://www.meijutt.com/file/list1.html'\n self.io_util = IOUtil()\n \n # @every(minutes=24 * 60)\n def on_start(self):\n self.crawl('http://www.meijutt.com/file/list1.html', callback = self.index_page, headers = self.headers)\n\n # 10 days\n @config(age=10 * 24 * 60 * 60)\n def index_page(self, response):\n for each in response.doc('.cn_box2 .bor_img3_right a[href^=\"http\"]').items():\n self.headers['User-Agent'] = random.choice(USER_AGENT_LIST)\n self.crawl(each.attr.href, callback = self.detail_page, fetch_type = 'js', headers = self.headers)\n \n self.crawl(response.doc('div.page a:nth-last-child(2)').attr.href, callback = self.index_page, headers = self.headers)\n \n @config(priority=2)\n def detail_page(self, response):\n profile = response.doc('div.o_r_contact ul')\n profile.remove('em')\n \n img_src = response.doc('div.o_big_img_bg_b img').attr.src\n file_src_parts = img_src.split('/')\n file_parent_root = file_src_parts[-2]\n file_name = file_src_parts[-1]\n\t\t\n self.headers['User-Agent'] = random.choice(USER_AGENT_LIST)\n self.crawl(img_src, callback = self.save_img, save={'file_parent_root': file_parent_root, 'file_name': file_name}, headers = {\n 'Accept':\"image/webp,image/*,*/*;q=0.8\",\n \"Accept-Encoding\":\"gzip, deflate, sdch\",\n \"Cache-Control\":\"no-cache\",\n \"Host\":\"img.kukan5.com:808\",\n \"Pragma\":\"no-cache\",\n \"Proxy-Connection\":\"keep-alive\",\n \"Referer\":response.url\n })\n \n drama = {\n \"img_src\" : file_parent_root + '/' + file_name,\n \"title_en\" : profile.find('li:nth-child(2)').text(),\n \"title_cn\" : profile.find('li:nth-child(3)').text(),\n \"debut_date\" : profile.find('li:nth-child(7)').text(),\n \"plot\" : profile.find('li:nth-child(9)').text(),\n \"nation\" : profile.find('li:nth-child(10) label:nth-child(1)').text(),\n \"tv\" : profile.find('li:nth-child(10) label:nth-child(2)').text(),\n \"hot\" : profile.find('li:nth-child(11) label:nth-child(1)').text(),\n \"length\" : profile.find('li:nth-child(11) label:nth-child(2)').text(),\n \"category\" : profile.find('li:nth-child(12) label:nth-child(2)').text(),\n\n \"script_writers\" : [x.text() for x in profile.find('li:nth-child(4) a').items()],\n \"directors\" : [x.text() for x in profile.find('li:nth-child(5) a').items()],\n \"actors\" : [x.text() for x in profile.find('li:nth-child(6) a').items()],\n\n \"average_score\" : response.doc('div#average-score').text(),\n \"star5_num\" : response.doc('span#small-total-star5').text(),\n \"star4_num\" : response.doc('span#small-total-star4').text(),\n \"star3_num\" : response.doc('span#small-total-star3').text(),\n \"star2_num\" : response.doc('span#small-total-star2').text(),\n \"star1_num\" : response.doc('span#small-total-star1').text()\n \n }\n \n self.store(drama)\n\n \n def store(self, drama):\n db.dramas.insert_one(drama)\n \n def save_img(self, response):\n file_parent_root = response.save['file_parent_root']\n self.io_util.mkDir(file_parent_root)\n file_path = file_parent_root + '/' + response.save['file_name']\n # print(file_path)\n self.io_util.save(response.content, file_path)\n\t\t\n \n \nclass IOUtil(object):\n def __init__(self):\n self.path = DIR_PATH\n if not self.path.endswith('/'):\n self.path = self.path + '/'\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n def mkDir(self, path):\n path = path.strip()\n dir_path = self.path + path\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n def save(self, content, path):\n absolute_path = self.path + path\n f = open(absolute_path, 'wb')\n f.write(content)\n f.close()\n\n def getExtension(self, url):\n extension = url.split('.')[-1]\n return extension \n","sub_path":"tutorial/pyspider_script/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"354083076","text":"from database import Database\nfrom models.base import Base\n\n\nclass Token(Base):\n def __init__(self, user_id, token, date):\n self.set_user_id(user_id)\n self.set_token(token)\n self.set_date(date)\n\n def __repr__(self):\n return ''.format(self.user_id, self.token, self.date)\n\n async def to_json(self):\n try:\n async with Database.pool.acquire() as connection:\n token = await connection.fetch('''SELECT user_id, token, date FROM public.token WHERE user_id = $1''', self.user_id)\n return {\"user_id\": token[0][0], \"token\": token[0][1], \"date\": token[0][2]}\n except Exception as error:\n print(error)\n\n def set_date(self, date):\n if self.validate_date(date):\n self.date = date\n else:\n raise Exception('Date must be string')\n\n def validate_date(self, date):\n if type(date) == str:\n return True\n else:\n return False\n\n def set_token(self, token):\n if token:\n self.token = token\n else:\n raise Exception('Bad token')\n\n def set_user_id(self, user_id):\n if type(user_id) == int:\n self.user_id = user_id\n else:\n raise TypeError(\"user id is not integer\")\n\n @staticmethod\n async def load_from_db(user_id):\n try:\n async with Database.pool.acquire() as connection:\n token = await connection.fetch('''SELECT user_id, token, date FROM public.token WHERE user_id=$1''', int(user_id))\n return token\n except Exception as error:\n print(error)\n \n async def save_to_db(self):\n try:\n async with Database.pool.acquire() as connection:\n await connection.execute('''INSERT INTO public.token (token, date, user_id) VALUES ($1, $2, $3)''', self.token, self.date, self.user_id)\n except Exception as error:\n print(error)\n\n @staticmethod\n async def delete(id):\n try:\n async with Database.pool.acquire() as connection:\n await connection.execute('''DELETE FROM public.token WHERE user_id = $1''', int(id))\n except Exception as error:\n pass\n\n# token = \"\"\"CREATE TABLE IF NOT EXISTS token(\n# user_id INTEGER PRIMARY KEY NOT NULL REFERENCES users(id),\n# token varchar(512) UNIQUE NOT NULL DEFAULT NULL,\n# date varchar(200) NOT NULL\n# );\"\"\"","sub_path":"app/models/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"525721976","text":"import requests\nfrom bs4 import BeautifulSoup as bs \nimport csv \n\n#url = input('enter url: ')\nurl = 'https://hh.ru/search/vacancy?clusters=true&area=1&enable_snippets=true&salary=&st=searchVacancy&text=Python+junior&from=suggest_post'\nheaders = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15',\n'accept': '*/*'}\npure_html = 'https://hh.ru' \nFILE = 'jobs.csv'\n\n\n\ndef get_html(url, params=None):\n\tr = requests.get(url, headers = headers, params = params)\n\treturn r\n\n\ndef pag_count(html):\n\t#r1 = get_html(url)\n\tsoup = bs(html, 'html.parser')\n\tpagination = soup.find_all('a', class_ = 'bloko-button HH-Pager-Control')\n\tif pagination:\n\t\treturn int(pagination[-1].getText())\n\telse:\n\t\treturn 0\n\n\ndef get_content(html):\n\tsoup = bs(html, 'html.parser')\n\titems = soup.find_all('div', class_ = 'vacancy-serp-item')\n\tjobs = []\n\tfor item in items:\n\t\tsalary = item.find('span', attrs = {'class': 'bloko-section-header-3 bloko-section-header-3_lite' , 'data-qa': \"vacancy-serp__vacancy-compensation\"})\n\t\tif salary:\n\t\t\tsalary = salary.getText()\n\t\telse:\n\t\t\tsalary = 'Ask for salary'\n\n\n\t\tjobs.append({\n\t\t\t'Name': item.find('a', attrs={'class':'bloko-link HH-LinkModifier'}).getText(),\n\t\t\t'Link': item.find('a', attrs={'class':'bloko-link HH-LinkModifier'})['href'],\n\t\t\t'Salary': salary\n\t\t\t})\n\t\t#print(link.getText() + ' | ' + link['href'] + '\\n')\n\treturn jobs\n\n\ndef save_file(items, path):\n\twith open(path, 'w', newline = '') as file:\n\t\twriter = csv.writer(file, delimiter = ';')\n\t\twriter.writerow(['Name', 'Link', 'Salary'])\n\t\tfor item in items:\n\t\t\twriter.writerow([item['Name'], item['Link'], item['Salary']])\n\n\n\ndef parse():\n\turl = input('Link from hh.ru: ')\n\thtml = get_html(url)\n\tpages_count = pag_count(html.text)\n\tjobs = []\n\tfor page in range(0, pages_count + 1):\n\t\tprint(f'Парсинг страницы {page} из {pages_count}...')\n\t\t#href = get_html(pure_html + link['href'])\n\t\thtml = get_html(url, params={'page': page})\n\t\tjobs.extend(get_content(html.text))\n\tsave_file(jobs, FILE)\n\tprint(f'Получено {len(jobs)} предложений')\n\n\nparse()\n\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"113089324","text":"import PySimpleGUI as sg\n\n\nclass GUI:\n\n def gui_greeeting(self):\n \"\"\"\n\n :return:\n \"\"\"\n layout = [[sg.Text(\"Do you wish to search for case or death data?\")],\n [sg.Listbox(enable_events=True, values=[\"cases\", 'deaths'], size=(30, 6))]]\n window = sg.Window('Covid19 Data Analyser', layout)\n while True:\n event, values = window.read()\n if event in (None, 'Cancel'):\n # User closed the Window or hit the Cancel button\n break\n\n data_searched = values.get(0)\n window.close()\n\n return data_searched\n\n def gui_cases_or_deaths(self, data_searched):\n \"\"\"\n\n :param data_type:\n :return:\n \"\"\"\n death_data_types = [\"newDeaths28DaysByDeathDate\", \"cumDeaths28DaysByDeathDate\",\n \"cumDeaths28DaysByDeathDateRate\"]\n cases_data_types = [\"newCasesBySpecimenDate\", \"cumCasesBySpecimenDateRate\", \"newPillarOneTestsByPublishDate\",\n \"newPillarTwoTestsByPublishDate\", \"newPillarTwoTestsByPublishDate\",\n \"newPillarFourTestsByPublishDate\"]\n if data_searched == \"cases\":\n\n layout = [[sg.Text(f\"Please select type of {data_searched} data\")],\n [sg.Listbox(enable_events=True, values=cases_data_types, size=(30, 6))]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n\n break\n\n data_type = values.get(0)\n\n window.close()\n\n return data_type[0]\n\n elif data_searched == \"deaths\":\n\n layout = [[sg.Text(f\"Please select type of {data_searched} data\")],\n [sg.Listbox(enable_events=True, values=death_data_types, size=(30, 6))]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n\n data_type = values.get(0)\n\n window.close()\n\n return data_type[0]\n\n def gui_duration(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n possible_timeframes = [\"fortnight\", \"month\", \"allTime\", \"custom\"]\n layout = [[sg.Text(f\"Please select time period for data\")],\n [sg.Listbox(enable_events=True, values=possible_timeframes, size=(30, 6))]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n\n duration = values.get(0)\n\n window.close()\n\n preset_times = [\"fortnight\", \"month\", \"allTime\"]\n if duration[0] in preset_times:\n\n return duration[0]\n\n elif duration[0] == \"custom\":\n\n layout = [[sg.Text(f\"Please input desired number of days\"), sg.InputText()],\n [sg.Button('OK'), sg.Button('Cancel')]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n duration = values.get(0)\n\n window.close()\n\n return duration\n print(\"Failure in setting duration\")\n\n def gui_area_type(self):\n\n area_type = [\"ltla\", \"utla\", \"nation\", \"region\"]\n layout = [[sg.Text(f\"Please select level: \")],\n [sg.Listbox(enable_events=True, values=area_type, size=(30, 6))]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n\n area_type = values.get(0)\n\n window.close()\n\n return area_type[0]\n\n def gui_area_name(self, area_type):\n\n layout = [[sg.Text(f\"Please input desired location name for {area_type}\"), sg.InputText()],\n [sg.Button('OK'), sg.Button('Cancel')]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n area_name = values.get(0)\n\n window.close()\n\n return area_name\n\n def gui_check_if_more_data_required(self):\n layout = [[sg.Text(f\"Would you like to access more data?\"), sg.InputText()],\n [sg.Listbox(enable_events=True, values=[\"Yes\", \"No\"], size=(30, 6))]]\n\n window = sg.Window('Covid19 Data Analyser', layout)\n\n while True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n break\n\n proceed_or_kill = values.get(0)\n\n if proceed_or_kill == \"Yes\":\n window.close()\n return True\n\n elif proceed_or_kill == \"No\":\n window.close()\n return False\n\n\n\n window.close()\n\n return True\n\n\n def gui_runner(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n params = {}\n\n data_searched = self.gui_greeeting()\n\n params[\"data_type\"] = self.gui_cases_or_deaths(data_searched[0])\n\n params[\"duration\"] = self.gui_duration()\n\n params[\"area_type\"] = self.gui_area_type()\n\n params[\"area_name\"] = self.gui_area_name(params[\"area_type\"])\n\n return params","sub_path":"scripts/gui/covid19visualisationlibrary/modules/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"124365574","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 14 14:01:46 2013\n\n@authors: David Schoorisse & Mustafa Karaalioglu\n\"\"\"\nfrom sqlalchemy import Column, Integer, Sequence, String, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom dbconnection import engine, session, Base\nfrom models.answer import AnswerModel\nfrom basemodel import BaseEntity\n\n\nclass Tag(Base, BaseEntity):\n __tablename__ = 'Tags'\n\n name = Column(String(32), unique=True)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n if self.id is None:\n return \"\" % (self.name)\n else:\n return \"\" % (self.id, self.name)\n\n @staticmethod\n def add_tag(name):\n if session.query(Tag.name).filter(Tag.name == name).first() is None:\n session.add(Tag(name))\n session.commit()\n\n @staticmethod\n def remove_tag(tag_id):\n for tag in session.query(Tag).filter(Tag.id == tag_id):\n session.delete(tag)\n\n\nclass AnswerTag(Base):\n __tablename__ = 'AnswerTags'\n\n answer_id = Column(Integer, ForeignKey('answer.id'), primary_key=True)\n tag_id = Column(Integer, ForeignKey('Tags.id'), primary_key=True)\n\n def __init__(self, answer_id, tag_id):\n self.answer_id = answer_id\n self.tag_id = tag_id\n\n def __repr__(self):\n return \"\" % (self.answer_id, self.tag_id)\n\nBase.metadata.create_all(engine)\n","sub_path":"models/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"545992865","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: longshuicui\n@date : 2021/1/29\n@function:\n547. Number of Provinces (Medium)\nhttps://leetcode.com/problems/number-of-provinces/\n题目描述\n 给定一个二维的 0-1 矩阵,如果第 (i, j) 位置是 1,则表示第 i 个人和第 j 个人是朋友。已知\n 朋友关系是可以传递的,即如果 a 是 b 的朋友, b 是 c 的朋友,那么 a 和 c 也是朋友,换言之这\n 三个人处于同一个朋友圈之内。求一共有多少个朋友圈。\n输入输出样例\n 输入是一个二维数组,输出是一个整数,表示朋友圈数量。因为朋友关系具有对称性,该二\n 维数组为对称矩阵。同时,因为自己是自己的朋友,对角线上的值全部为 1。\n Input:\n [[1,1,0],\n [1,1,0],\n [0,0,1]]\n Output: 2\n 在这个样例中, [1,2] 处于一个朋友圈, [3] 处于一个朋友圈。\n\n题解\n 该题与695题不同,每行代表一个节点,列代表是否存在相邻节点。\n 695题有m*n个节点,该题有N个节点。\n\"\"\"\n\n\ndef findCircleNum(isConnected):\n connected = {i:False for i in range(len(isConnected))}\n count=0\n def dfs(i):\n connected[i]=True\n for k in range(len(isConnected)):\n if isConnected[i][k]==1 and not connected[k]:\n dfs(k)\n for i in range(len(isConnected)):\n if not connected[i]:\n dfs(i)\n count+=1\n return count\n\n\nisConnected = [[1,1,0],[1,1,0],[0,0,1]]\ncount=findCircleNum(isConnected)\nprint(count)","sub_path":"05.搜索方式/深度优先搜索/547.Number of Provinces (Medium).py","file_name":"547.Number of Provinces (Medium).py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"653501731","text":"import random\r\nimport os\r\nimport time\r\nr1=[]\r\nr2=[]\r\nitem=[]\r\nd1=[]\r\nd2=[]\r\nd3=[]\r\na1=0\r\nclass lottery():\r\n\tdef _init_(self,input1,Guarantee):\r\n\t\tself.input1=input1\r\n\t\tself.Guarantee=Guarantee\r\n\tdef input_item(self,check=0,r3=[]):\r\n\t\twhile check == 0:\r\n\t\t\tinput1=input()\r\n\t\t\tif input1==\"no\":\r\n\t\t\t\tr1.extend(r3)\r\n\t\t\t\tbreak\r\n\t\t\tr3.append(input1)\r\n\t\t\tprint(\"您增加了項目:\"+input1)\r\n\t\t\tprint(\"不增加新的項目請輸入no\")\r\n\t\t\tprint(\"您以輸入了以下元素\")\r\n\t\t\tprint(r3)\r\n\tdef method(self,item1=[],count=0,count1=0,Guarantee=a1):\r\n\t\tfor line in item:\r\n\t\t\tcheck=0\r\n\t\t\tcount=0\r\n\t\t\tnumber=0\r\n\t\t\twhile check==0:\r\n\t\t\t\tif count1==-1:\r\n\t\t\t\t\tprint(\"保底抽到:\"+r1[r2.index(min(r2))])\r\n\t\t\t\t\td1.append(r1[r2.index(min(r2))])\r\n\t\t\t\t\tcount1=0\r\n\t\t\t\t\tcheck=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tif line<=number+r2[count]:\r\n\t\t\t\t\t\tprint(\"抽選到:\"+r1[count])\r\n\t\t\t\t\t\tif r1[count]==r1[r2.index(min(r2))]:\r\n\t\t\t\t\t\t\tcount1=0\r\n\t\t\t\t\t\t\tprint(\"因為抽到大獎:\"+\"-\"+r1[r2.index(min(r2))]+\"-\"+\"保底重製\")\r\n\t\t\t\t\t\td1.append(r1[count])\r\n\t\t\t\t\t\tcheck=1\r\n\t\t\t\t\t\tcount1+=1\r\n\t\t\t\t\t\tif count1==Guarantee:\r\n\t\t\t\t\t\t\tcount1=-1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tnumber+=r2[count]\r\n\t\t\t\t\t\tcount+=1\r\n\t\tfor line in r1:\r\n\t\t\ta=d1.count(line)\r\n\t\t\ta=str(a)\r\n\t\t\tprint(\"總共抽到:\"+\"-\"+line+\"-\"+\"共\"+a+\"次\")\r\n\t\td1.clear()\r\n\tdef set_probability(self,check=100):\r\n\t\tfor set in r1:\r\n\t\t\tcheck=str(check)\r\n\t\t\tprint(\"剩餘可配置機率\"+check)\r\n\t\t\tprint(\"設定\"+set+\"機率為:\")\r\n\t\t\tcheck=int(check)\r\n\t\t\tinput1=input()\r\n\t\t\tinput1=int(input1)\r\n\t\t\tcheck-=input1\r\n\t\t\tr2.append(input1)\r\n\tdef lottery_item(self,count=0,r4=0):\r\n\t\tprint(\"設定抽籤次數:\")\r\n\t\tinput1=input()\r\n\t\tinput1=int(input1)\r\n\t\twhile count 0, -ious, dists))\n\n anchor_ids = tf.zeros(tf.shape(candidates)[0], dtype=tf.int32)\n for i in range(tf.shape(candidates)[0]):\n available = tf.math.reduce_all(\n candidates[i][..., tf.newaxis] !=\n anchor_ids[:i][tf.newaxis], axis=-1)\n\n next_index = candidates[i][available][0]\n\n anchor_ids = tf.tensor_scatter_nd_add(\n anchor_ids, [[i]], [next_index])\n\n deltas = tf.concat([\n (bboxes[:, :2] - tf.gather(anchor_boxes, anchor_ids)[:, :2]) /\n tf.gather(anchor_boxes, anchor_ids)[:, 2:],\n tf.math.log(bboxes[:, 2:] /\n tf.gather(anchor_boxes, anchor_ids)[:, 2:])\n ], axis=1)\n\n return {\n 'image': image,\n 'anchor_ids': anchor_ids\n }, {\n 'labels': features['labels'],\n 'bboxes': bboxes,\n 'deltas': deltas\n }\n\n return data.map(transform)\n\n\ndef padded_batch(data, batch_size):\n return data.padded_batch(batch_size, padding_values=({\n 'image': 0.,\n 'anchor_ids': -1,\n }, {\n 'labels': 0.,\n 'bboxes': 0.,\n 'deltas': 0.\n }), padded_shapes=({\n 'image': [None, None, 3],\n 'anchor_ids': [None]\n }, {\n 'labels': [None, None],\n 'bboxes': [None, 4],\n 'deltas': [None, 4]\n }))\n","sub_path":"squeezedet/legacy/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"309013877","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/01/08\n# @Company : INVINCIBLE STUDIO\n# @Author : Mo Wenlong\n# @Email : invincible0918@126.com\n# @File : ragdollToolView.py\n\n\nimport os\nfrom Core.MayaGUI.LitFrame.view import View\nfrom Core.MayaGUI.widgets import DoubleValidator\nfrom functools import partial\n\n\nclass RagdollToolView(View):\n @property\n def _uiFile(self):\n return os.path.join(os.path.split(__file__)[0], '../GUI/dialog.ui')\n\n def _initGUI(self):\n self._parameter_control_dict = {self._ui.massLE: 'mass',\n self._ui.frictionLE: 'friction',\n self._ui.restitutionLE: 'restitution',\n self._ui.maxFrictionTorqueLE: 'maxFrictionTorque'}\n\n for ctl in self._parameter_control_dict.keys():\n ctl.setValidator(DoubleValidator(0, 1000, 2, ctl))\n\n def _bindMethod(self):\n def modify_ragdoll(widget, msg):\n self._sendMessage('ragdoll_parameters', msg, float(widget.text()))\n\n for k, v in self._parameter_control_dict.items():\n k.returnPressed.connect(partial(modify_ragdoll, k, v))\n\n\n","sub_path":"Projects/Maya/Tools/RagdollTool/Package/Scripts/ragdollToolView.py","file_name":"ragdollToolView.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"135974671","text":"import re\n__author__ = 'yonny'\n\nfrom nameparser import HumanName\nfrom unidecode import unidecode\n\nfrom nameparser.config import CONSTANTS\n\nclass TextProcessingPipeline(object):\n professional_suffixes = [\"CFA\"]\n\n def __init__(self):\n CONSTANTS.titles.remove('Wing')\n\n def process_item(self, item):#removed spider parameter\n if not 'name_full' in item:\n print(\"name_full key not found in item from url %s\" % item['url'])\n return item\n\n full_name = item['name_full']\n\n prepared_fullname = self.prepare_name_for_parsing(full_name)\n prepared_fullname = self.format_name_dots(prepared_fullname) # example : J. P. Grownder =>\n # JP Grownder\n name = HumanName(prepared_fullname)\n\n item['split_title'] = name.title\n item['split_first_name'] = name.first\n item['split_mid_name'] = name.middle\n item['split_last_name'] = name.last\n item['split_suffix'] = name.suffix\n\n if len(name.first) == 1 and len(name.middle) == 1:\n item['split_first_name'] = ''.join([name.first, name.middle])\n item['split_mid_name'] = ''\n if len(name.last) == 1 and len(name.middle) == 1:\n item['split_last_name'] = ''.join([name.first, name.last])\n item['split_mid_name'] = ''\n\n item[\"username\"] = self.generate_username(name.first, name.middle, name.last)\n item[\"simplified_joined_name\"] = self.generate_simplified_joined_name(name.first, name.middle, name.last)\n\n return item\n\n def generate_username(self, first_name, middle_name, last_name):\n username = \"%s%s%s\" % (first_name, middle_name, last_name)\n username = re.sub(r'[^\\w]', '', username)\n return username\n\n def generate_simplified_joined_name(self, first_name, middle_name, last_name):\n return \"%s %s\" % (first_name, last_name)\n\n def prepare_name_for_parsing(self, name):\n # convert foreign characters\n name = unidecode(name)\n\n # remove suffixes, example : Milena Muller, CFA => Milena Muller\n for suffix in self.professional_suffixes:\n name = name.replace(suffix, \"\")\n\n name = name.strip()\n name = name.rstrip(',').lstrip(',')\n name = name.strip()\n\n return name\n def format_name_dots(self, name):\n \"\"\"\n name: J. P. Gownder | Julie A. Ask\n :return: J P Grownder | Julie A Ask\n \"\"\"\n stripped_dots_name = name.replace('.', '')\n return stripped_dots_name\n","sub_path":"nameparse.py","file_name":"nameparse.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"84000501","text":"import qt\nimport numpy as np\nexecfile(qt.reload_current_setup)\nimport measurement.lib.measurement2.pq.pq_measurement as pq\n\ndebug=False\nm=pq.PQMeasurement('test')\nm.params['MAX_DATA_LEN'] = int(100e6)\nm.params['BINSIZE'] = 1 #2**BINSIZE*(BASERESOLUTION = 1 ps for HH)\nm.params['MIN_SYNC_BIN'] = 0 #5 us \nm.params['MAX_SYNC_BIN'] = 3000 #1 us per opt pi pulse\nm.params['measurement_time'] = 24*60*60 #sec = 24H\nm.params['measurement_abort_check_interval'] = 1 #sec\nm.run(debug=debug)\nm.finish()","sub_path":"scripts/lt3_scripts/testing/T2_measure.py","file_name":"T2_measure.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"480570259","text":"import sys\nreadline = sys.stdin.readline\n\ndef main():\n N, A = map(int, readline().rstrip().split())\n X = list(map(int, readline().rstrip().split()))\n lim = max(X) * N\n X = [x-A for x in X]\n dp = [[0] * (2*lim) for _ in range(N+1)]\n dp[0][lim] = 1\n for i in range(1, N+1):\n x = X[i-1]\n for j in range(2*lim):\n if 0 <= j - x < 2 * lim:\n dp[i][j] = dp[i-1][j] + dp[i-1][j-x]\n else:\n dp[i][j] = dp[i-1][j]\n \n print(dp[N][lim] - 1)\n\n\nif __name__ == '__main__':\n main()","sub_path":"ABC044/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"22103893","text":"import urllib.request as http\nimport re\nclass BaiduSource:\n def __init__(self,video):\n self.video = video\n\n INTERFACE = \"http://huhupan.com/e/extend/down/?id=\"\n\n __list_express = \"(.*?)
\"\n __list_pattern = re.compile(__list_express,re.S)\n\n __t1_express = \"
(.*?)\" % (req.get_app_static(), j))\n\n for c in css:\n r.append(\"
\" % (req.get_app_static(), c))\n\n return \"\\n\".join(r)\n","sub_path":"tachyonic/ui/html_assets.py","file_name":"html_assets.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"550387493","text":"import math\n\nclass Search(object):\n\t\"\"\"docstring for Search\"\"\"\n\n\tdef __init__(self, arg):\n\t\tsuper(Search, self).__init__()\n\t\tself.arg = arg\n\n\ndef linearSearch (L, elem):\n\n\tindex = 0\n\n\twhile(index < len(L)):\n\n\t\tif(L[index] == elem):\n\t\t\treturn index\n\n\t\tindex += 1\n\t\t\n\treturn -1\n\ndef binarySearch(L, elem):\n\t\n\tstart = 0\n\tend = len(L) - 1\n\n\twhile(start <= end):\n\n\t\tmid = (start + end)//2\n\n\t\tif(L[mid] == elem):\n\t\t\treturn mid\n\n\t\telif(L[mid] > elem):\n\t\t\tend = mid - 1\n\n\t\telse:\n\t\t\tstart = mid + 1\n\n\treturn -1\n\ndef jumpSearch (L, elem):\n\n\tjump_size = int(math.sqrt(len(L)))\n\tindex = 0\n\trange_start = 0\n\trange_end = 0\n\n\twhile(index < len(L)):\n\n\t\tif(L[index] == elem):\n\t\t\treturn index\n\n\t\telif(L[index] > elem):\n\t\t\trange_start = index - jump_size + 1\n\t\t\trange_end = index - 1\n\t\t\tbreak\n\n\t\tindex += jump_size\n\n\tif(range_start < 0):\n\t\treturn -1\n\n\telse:\n\n\t\tif(index >= len(L)):\n\t\t\trange_start = index - jump_size + 1\n\t\t\trange_end = len(L) - 1\n\n\t\tindex = range_start\n\n\t\twhile(index <= range_end):\n\n\t\t\tif(L[index] == elem):\n\t\t\t\treturn index\n\n\t\t\tindex += 1\n\n\t\treturn -1\t\nprint(jumpSearch([0,3,4,5,6], 3))\n\n","sub_path":"searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"231162699","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom multiprocessing import Pool\nimport os\nimport pymongo\nimport requests\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nfrom requests.exceptions import *\nfrom hashlib import md5\n\nbase_url = \"httP://www.mmjpg.com\"\n\nheaders = {\n 'UserAgent': UserAgent().random\n}\n\nmongo_cli = pymongo.MongoClient(host='localhost')\nmongo_db = mongo_cli['mmjpg']\n\ndef get_html(url=None):\n \"\"\"发送请求并取得response\"\"\"\n url = url if url else base_url\n try:\n response = requests.get(url, headers=headers, timeout=5)\n if response.status_code == 200:\n # print('*****获取页面成功*****')\n response.encoding = response.apparent_encoding\n return response.text\n else:\n return None\n except (HTTPError, Timeout, ConnectionError, Exception) as e:\n print('错误消息',e)\n\ndef parse_html(html):\n \"\"\"获取所有页面的html\"\"\"\n soup = BeautifulSoup(html, 'lxml')\n pages = int(soup.select('em.info')[0].get_text()[1:3]) # 获取总共的页数信息\n # print(pages)\n for page in range(1,pages+1):\n url = base_url + '/home/' + repr(page) # 组合URL链接\n yield get_html(url)\n\ndef parse_detail(html):\n \"\"\"获取每个页面中图集的链接\"\"\"\n soup = BeautifulSoup(html, 'lxml')\n elements = soup.select('div.pic ul li a')\n # print(elements)\n for element in elements:\n yield element.get('href')\n\ndef send_args(func):\n from functools import wraps\n @wraps(func)\n def wrapper(args):\n if isinstance(args, dict):\n return func(**args)\n else:\n return func(*args)\n return wrapper\n\n@send_args\ndef parse_single_page(url, link, images=None, referer=None):\n \"\"\"解析单个图集\"\"\"\n if images is None:\n images = []\n if referer is None:\n referer = []\n referer.append(url)\n html = get_html(url)\n # print(html) # 取得单个图集的html\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('div.article h2')[0].get_text()\n image_url = soup.select('div.content a img')[0].get('src')\n print(title, image_url)\n images.append(image_url)\n if soup.select('div.page a')[-1].get_text() == '下一张':\n next_page = soup.select('div.page a')[-1].get('href')\n if next_page:\n pattern = re.search(r'/.*?/.*?(/\\d+$)', next_page).group(1)\n next_url = link + pattern\n print('正在前往页面', next_url)\n # 跳转下一页\n parse_single_page_diff(next_url, link, images, title, referer)\n # print('1')\n url_referer = []\n for url, referer in zip(images, referer):\n url_referer.append([url, referer])\n date = {\n 'title': title,\n 'url_referer': url_referer\n }\n save_image(date)\n\ndef parse_single_page_diff(url, link, images, title,referer):\n html = get_html(url)\n # print(html) # 取得单个图集的html\n soup = BeautifulSoup(html, 'lxml')\n image_url = soup.select('div.content a img')[0].get('src')\n # print(title, image_url)\n referer.append(url)\n images.append(image_url)\n if soup.select('div.page a')[-1].get_text() == '下一张':\n next_page = soup.select('div.page a')[-1].get('href')\n if next_page:\n pattern = re.search(r'/.*?/.*?(/\\d+$)', next_page).group(1)\n next_url = link + pattern\n print('正在前往页面', next_url)\n # 跳转下一页\n parse_single_page_diff(next_url, link, images, title, referer)\n\ndef save_image(date):\n if mongo_db['items'].update({'title': date['title']}, {'$set': date}, True):\n print('存入mongodb成功')\n flode_name = date.get('title')\n docs = date.get('url_referer')\n file_path = '/media/gbc/Download/mmjpg/' + flode_name + '/'\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n for doc in docs:\n try:\n response = requests.get(doc[0], headers={'Referer': doc[1]})\n if response.status_code == 200:\n content = response.content\n file_name = md5(content).hexdigest() + '.jpg'\n if not os.path.exists(file_name):\n with open(file_path + file_name, 'wb') as f:\n print('正在保存图片中 ', file_name)\n f.write(content)\n f.close()\n else:\n print('图片已经存在')\n except:\n pass\n\ndef spider():\n try:\n html = get_html()\n if html:\n htmls = parse_html(html) # 所有页面的html\n for html in htmls:\n urls = parse_detail(html) # 网站上所有的图集链接\n if urls:\n urls = list(urls)[::2]\n loop = Pool(6)\n loop.map(parse_single_page, zip(urls, urls))\n loop.close()\n # loop.join()\n finally:\n mongo_cli.close()\n # for url in list(urls)[::2]:\n # result = parse_single_page(url, link=url)\n # print(result)\n # if result:\n # save_to_mongo(result)\n\n\n\n\n\nif __name__ == '__main__':\n spider()","sub_path":"mmjpg.py","file_name":"mmjpg.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"276753010","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nassignment 18.\n\nCopyright 2016 Mukesh Maharjan. All rights reserved.\n\"\"\"\nimport os\nimport argparse\nfrom Bio.Blast import NCBIWWW\nimport time\nfrom Bio import Entrez\nEntrez.email = \"mmahar4@lsu.edu\"\n\n\ndef get_blast_result(organism, direcory_name):\n esearch_query = Entrez.esearch(db=\"nucleotide\",\n term=organism, retmode=\"xml\")\n esearch_result = Entrez.read(esearch_query)\n bb_seq_id = esearch_result['IdList']\n\n for gi in bb_seq_id:\n filename = direcory_name+\"/\"\n\n try:\n result_handle = NCBIWWW.qblast(\"blastn\", \"nt\", gi,\n format_type='Text')\n output = result_handle.read()\n except ValueError:\n output = ''\n filename = filename+'gi_'+gi+'.txt'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n save_file = open(filename, \"w\")\n save_file.write(output)\n time.sleep(1)\n\n save_file.close()\n result_handle.close()\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\"organism_name\", help=\"Give the name of the organism\" +\n \" strictly inside \\\" \\\" \")\n parse.add_argument(\"output_directory\",\n help=\"Give the name of the output file.\")\n\n file = parse.parse_args()\n org_name = file.organism_name\n out_file = file.output_directory\n\n get_blast_result(org_name, out_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/mukkuchagit/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"521895259","text":"# -*- coding: utf-8 -*-\n\nclass YeelinkAPIError(Exception):\n def __init__(self, result):\n self.result = result\n try:\n self.type = result.code()\n except:\n self.type = \"\"\n\n # OAuth 2.0 Draft 10\n try:\n self.message = result.read()\n except:\n self.message = result\n\n Exception.__init__(self, self.message)\n","sub_path":"yeelink/api/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"236288950","text":"p = 'b'\n\n\ndef solve(case):\n # print('case', case)\n count = len(case)\n if count == 1:\n if case == '-':\n return 1\n else:\n return 0\n else:\n flips = 0\n prev = case[0]\n for i in range(1, count):\n if prev != case[i]:\n flips += 1\n if i+1 == count and case[i] == '-':\n flips += 1\n prev = case[i]\n return flips\n\nwith open('%s.in' % p) as fin:\n with open('%s.out' % p, 'w+') as fout:\n cases = int(next(fin))\n for i in range(1, cases+1):\n case = next(fin)\n solution = 'Case #%s: %s\\n' % (i, solve(case.strip()))\n # print('solution', solution)\n fout.write(solution)\n","sub_path":"codes/CodeJamCrawler/16_0_2/pr0filer/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"494740812","text":"import socket\nimport time\nimport cv2\nimport numpy as np\n\nhost = '192.168.0.142'\nport = 9725\n\ndef setupSocket(): \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n print(\"Connection established.\")\n return s\n\ndef recv_all(s, count):\n buf = bytes()\n while count:\n newbuf = s.recv(count)\n if not newbuf:\n return 'rewait'\n buf += newbuf\n count -= len(newbuf)\n # Check if the header is corrupted.\n if len(newbuf) == 16:\n try:\n _ = int(newbuf)\n except:\n return 'rewait'\n return buf\n\n# Switch on the Extra Delay Mode.\nwhile True:\n sli = input(\"Extra Delay Mode: \")\n if sli == 'no':\n slip = 0\n break\n elif sli == 'yes':\n slip = 0.01\n break\n\n# Switch on the Extra Packet Mode.\nwhile True:\n hd_input = input(\"Extra Packet Mode: \")\n if hd_input == 'yes':\n hd_on = True\n packet_on = True\n redun = bytes(0)\n redun_size = 4096\n break\n elif hd_input == 'no':\n hd_on = False\n packet_on = False\n break\n\ns = setupSocket()\n\n# Enter the password.\nwhile True:\n pword = input(\"Enter the word: \")\n s.sendall(pword.encode('utf-8'))\n aword = s.recv(1024)\n aword = aword.decode('utf-8')\n if aword == 'Login Successful':\n time.sleep(2)\n print(aword)\n break\n else:\n time.sleep(1)\n print(aword)\n time.sleep(1)\n continue\n \n# # Set the full screen window.\n# cv2.namedWindow(\"Video Streaming\", cv2.WINDOW_NORMAL)\n# cv2.setWindowProperty(\"Video Streaming\",\\\n# cv2.WND_PROP_FULLSCREEN,\\\n# cv2.WINDOW_FULLSCREEN)\n\nwhile True:\n # Receive the redundant data.\n if hd_on and packet_on:\n while True:\n redun_rec = s.recv(1024)\n redun = redun + redun_rec\n s.sendall(bytes(1))\n if len(redun) == redun_size:\n redun = bytes(0)\n packet_on = False\n break\n data_len = recv_all(s, 16)\n # MJPEG video streaming.\n if data_len == 'rewait':\n continue\n if len(data_len) == 16:\n time.sleep(float(slip))\n img_data = recv_all(s, int(data_len))\n packet_on = True\n # Check if the JPEG data is corrupted.\n if img_data[0] != 255 or img_data[1] != 216\\\n or img_data[-2] != 255 or img_data[-1] != 217:\n continue\n # Convert string back to array.\n img_array = np.frombuffer(img_data, dtype='uint8')\n # JPEG decoding.\n frame = cv2.imdecode(img_array, 1)\n # Specify the size.\n frame = cv2.resize(frame, (800, 480))\n # Play the video.\n cv2.imshow(\"Video Streaming\", frame)\n cv2.waitKey(1)\n\n","sub_path":"Project Source Code/Control Terminal/con_vid.py","file_name":"con_vid.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"213758525","text":"from app.api import bp, schemas, auth, errors\nfrom app import db\nfrom app.models import Survey, Student, Team, Assessment, AssessmentResult\nfrom flask import jsonify, request\nfrom jsonschema import validate, ValidationError, FormatChecker\nimport datetime\nimport secrets\nimport json\n\n#/api/assessments/submit?survey_id=133&assessor_id=123891272\n\n@bp.route('/assessments/submit', methods = ['POST'])\n@auth.verification_required\ndef submit_assessment():\n #check presence in the db\n a = Assessment.query.\\\n join(Assessment.survey).\\\n join(Assessment.assessor).\\\n filter(Survey.xorro_survey_id==request.args.get('survey_id')).\\\n filter(Student.student_id==request.args.get('assessor_id')).\\\n first()\n if not a:\n return errors.not_found(\"Assessment not found\")\n \n #start handling the json body \n body = request.json\n try:\n validate(body, schemas.assessment_submission, format_checker=FormatChecker())\n except ValidationError as e:\n return errors.bad_request(f'Structutral validation error: {e.message}')\n\n team = Team.query.\\\n join(Team.students).\\\n filter(Student.id == a.assessor.id).\\\n filter(Team.survey_id==a.survey_id).\\\n first()\n \n ids_r = list(map(lambda x: str(x['assessee_id']), body['peer_assessments']))\n if not team.is_matching_student_list(ids_r):\n return errors.bad_request(f'Logical validation error: Incorrect list of assessments')\n\n #start of transaction\n try:\n for a_r in body['peer_assessments']:\n assessee = Student.query.filter_by(student_id = a_r['assessee_id']).first()\n ad = AssessmentResult.query\\\n .filter_by(assessment_id = a.id, assessee_id = assessee.id).first()\n\n if not ad:\n ad = AssessmentResult.from_json(a_r)\n ad.assessee = assessee\n ad.assessment = a\n else:\n ad.update_from_json(a_r)\n db.session.add(ad)\n\n a.t_teacheradv = body['teacher_advice']\n a.complete()\n db.session.add(a)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return errors.server_error(json.dumps(e.args))\n\n #end of transaction\n\n return ('', 204)","sub_path":"app/api/assessments.py","file_name":"assessments.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"533850772","text":"############################################################\n# Motif Search in DNA \n# \n# This Python 3 application searches all potential sequence \n# motifs in a DNA or locate a specific motif in it. \n# \n# Author: Sophie R. Liu \n# University of Texas at Austin \n#\n# Last update: October 28, 2019 \n#\n# Copyright (c) 2019 Sophie R. Liu\n#\n# This application is licensed under The MIT License.\n# (See this application's dirver files for license text,\n# or https://opensource.org/licenses/MIT.)\n#\n# ##########################################################\n#\n# Class MotifLocate\n#\n# Function: Algorithm to locate a specific motif\n#\n# ##########################################################\n\nclass MotifLocate:\n # Constructor Variables\n sequences = {} # Holds info of clones (keys) - sequences (values)\n keys = {}\n itKeys = iter(keys)\n motif = \"\"\n seqSize = 0 # Following three variables all info regarding the motif that is being searched for\n primerSize = 0\n motSize = 0\n # Method Variables\n dat = {} # Return data; keys are motifs, values are lists of sequences the motif is found in\n refKey = \"\" # Following three variables all info regarding the tested motif\n refSeq = \"\"\n refMotif = \"\"\n\n # Constructor\n def __init__(self, seq, m, size, pSize):\n self.sequences = seq\n self.keys = seq.keys()\n self.itKeys = iter(self.keys)\n self.motif = m\n self.motSize = len(self.motif)\n self.seqSize = size\n self.primerSize = pSize\n\n # Locate Methods\n def locateMotif(self):\n while True: # Iterates through each clone\n try:\n refKey = self.itKeys.__next__()\n refSeq = self.sequences.get(refKey)\n n = 0 # n = frame-shift\n locations = []\n\n # Iterates through sequence of a clone, n shifts reading frame\n while (self.motSize + n) <= self.seqSize:\n refMotif = refSeq[0+n : self.motSize+n]\n if refMotif.__eq__(self.motif):\n locations.append(self.primerSize + n + 1)\n n += 1\n if locations: # Checks to see if 'locations' has data inside\n self.dat[refKey] = locations\n except StopIteration:\n break\n return self.dat\n\n #Get Methods\n def getResults(self):\n return self.dat\n","sub_path":"MotifLocate.py","file_name":"MotifLocate.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"638991820","text":"from .GlobalMenu import GlobalMenu\nfrom .MayaContext import MayaContext\nfrom basetools.App import Hook\nimport maya.mel\n\nclass MayaHook(Hook):\n \"\"\"\n Hook implementation for maya.\n \"\"\"\n\n def startup(self):\n \"\"\"\n Perform startup routines.\n \"\"\"\n super(MayaHook, self).startup()\n\n self.__buildMenus()\n\n def __buildMenus(self):\n \"\"\"\n Create the default menus.\n \"\"\"\n # returning when application is under batch mode\n if not self.context().hasGUI():\n return\n\n umediaMenu = GlobalMenu(\"UMedia\")\n\n # items avaialble under umedia menu\n umediaMenu.addItem(\n 'Rendering/Send to the farm...',\n lambda: maya.mel.eval(\"SubmitJobToDeadline()\")\n )\n\n\n# registering hook\nHook.register(\n 'maya',\n MayaHook,\n MayaContext\n)\n","sub_path":"src/lib/mayatools/App/MayaHook.py","file_name":"MayaHook.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"355950426","text":"import time\n\nfrom captcha import *\nfrom utils import *\nfrom zhihu import ZhihuClient\n\nlogger = get_common_logger(__file__, \"mark_captcha.log\")\n\n\ndef mark_captcha_for_training():\n client = ZhihuClient()\n total = 0\n hits = 0\n while True:\n logger.warning(\"Killing captcha...\")\n total += 1\n success, resp = client.get_web_captcha()\n if success:\n image = resp[\"img_base64\"].replace(\"\\n\", \"\")\n captcha = predict_captcha(image)\n # 验证码提交过快接口会直接拒绝\n time.sleep(1)\n killed = client.verify_web_captcha(captcha)\n if killed:\n hits += 1\n hits_logger.info(\"%s:%s\", captcha, image)\n logger.warning(\n \"Killed captcha! Total:{} Hits:{} Accuracy:{}\".format(total, hits, hits / total))\n\n\nif __name__ == '__main__':\n mark_captcha_for_training()\n","sub_path":"spiders/mark_captcha.py","file_name":"mark_captcha.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"624924366","text":"\"\"\"\nGiven a date string in the form Day Month Year, where:\n\nDay is in the set {\"1st\", \"2nd\", \"3rd\", \"4th\", ..., \"30th\", \"31st\"}.\nMonth is in the set {\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"}.\nYear is in the range [1900, 2100].\nConvert the date string to the format YYYY-MM-DD, where:\n\nYYYY denotes the 4 digit year.\nMM denotes the 2 digit month.\nDD denotes the 2 digit day.\n\n\nExample 1:\n Input: date = \"20th Oct 2052\"\n Output: \"2052-10-20\"\n\nExample 2:\n Input: date = \"6th Jun 1933\"\n Output: \"1933-06-06\"\n\nExample 3:\n Input: date = \"26th May 1960\"\n Output: \"1960-05-26\"\n\nConstraints:\n The given dates are guaranteed to be valid, so no error handling is necessary.\n\"\"\"\n\n\ndef reformatDate(date):\n day, month, year = date.split(\" \")\n day = day[: -2]\n if len(day) < 2:\n day = \"0\" + day\n m = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n num_month = dict()\n for i in range(12):\n s = str(i + 1)\n if len(s) < 2:\n s = \"0\" + s\n num_month[m[i]] = s\n month = num_month[month]\n return ('-').join([year, month, day])\n\n\ndate = \"20th Oct 2052\"\nprint(reformatDate(date))\n\ndate = \"6th Jun 1933\"\nprint(reformatDate(date))\n\ndate = \"26th May 1960\"\nprint(reformatDate(date))\n","sub_path":"LeetCode-Python/1507 Reformat Date.py","file_name":"1507 Reformat Date.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"458126122","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth import get_user_model\nfrom django.views.generic import DetailView, View, CreateView\nfrom django.shortcuts import render, redirect, reverse\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import datetime\nfrom django.contrib.auth import get_user_model\nfrom .models import Profile, Group, GroupInvite\nfrom healthclub.models import HealthClub, HealthDiary\nfrom .forms import RegisterNormalForm, RegisterMasterForm\n\nUser = get_user_model()\n\n@login_required(login_url = \"/login\")\ndef group(request):\n user = request.user\n groups = user.is_group.all()\n group_all = Group.objects.all()\n groupinvites = GroupInvite.objects.filter(Q(new_member = user) & Q(confirmed = False)).all()\n \n notgroups = []\n for group in group_all:\n if user not in group.members.all() and group.public:\n notgroups.append(group)\n context = {'groups' : groups, 'notgroups' : notgroups, 'groupinvites' : groupinvites}\n return render(request, 'group.html', context)\n\ndef group_invite_accept(request, pk):\n group_invite = GroupInvite.objects.get(id = pk)\n group_invite.confirmed = True\n group_invite.save()\n group_invite.delete()\n group = group_invite.group\n group.members.add(request.user)\n group.save()\n \n return HttpResponseRedirect(reverse('profiles:group'))\n\ndef group_invite_decline(request, pk):\n group_invite = GroupInvite.objects.get(id = pk)\n group_invite.confirmed = True\n group_invite.save()\n group_invite.delete()\n \n return HttpResponseRedirect(reverse('profiles:group'))\n \ndef group_detail(request, pk):\n group = Group.objects.get(id = pk)\n groupname = group.name\n groupid = group.id\n group_masters = group.group_masters.all()\n members = group.members.all().order_by('-profile__exercised')\n context = {'groupname' : groupname, 'groupid' : groupid, 'members' : members, 'group_masters' : group_masters}\n return render(request, 'group_detail.html', context)\n\ndef add_group_master(request, groupid, userid):\n new_master = User.objects.get(id = userid)\n group = Group.objects.get(id = groupid)\n group.group_masters.add(new_master)\n group.save()\n return HttpResponseRedirect(reverse('profiles:group_detail', kwargs={'pk' : groupid}))\n\ndef group_update(request, pk):\n group = Group.objects.get(id=pk)\n users = Profile.objects.filter(is_health_master=False).exclude(user__is_group=group)\n healthclub = request.user.profile.healthclub\n same_healthclub_users = users.filter(healthclub = request.user.profile.healthclub)\n groupname = group.name\n groupid = pk\n print(same_healthclub_users)\n\n members=[]\n for user in users:\n if len(user.user.is_group.all().filter(id=pk)) == 0:\n members.append(user)\n context = {'same_healthclub_users':same_healthclub_users, 'healthclub':healthclub, 'users' : users, 'groupname' : groupname, 'groupid' : groupid, 'members' : members}\n return render(request, 'group_update.html', context)\n\ndef group_register(request, pk):\n group = Group.objects.get(id = pk)\n if request.user not in group.members.all():\n group.members.add(request.user)\n return HttpResponseRedirect('/profiles/group/detail/{}/'.format(pk))\n\ndef group_update_confirm(request, pk):\n if request.method=='POST':\n group = Group.objects.get(id=pk)\n groupname = request.POST.get(\"groupname\")\n username = request.POST.getlist(\"username\")\n search_ids = request.POST.getlist(\"search_ids\")\n public = str(request.POST.get(\"public\"))\n \n for search_id in search_ids:\n username.append(search_id)\n \n if public==\"private\":\n group.public = False\n\n group.name = groupname\n for user in username:\n new_user = User.objects.get(username = user)\n \n check = GroupInvite.objects.filter(\n inviter = request.user,\n new_member = new_user,\n group = group,\n confirmed = False\n )\n if len(check) == 0:\n group_invite = GroupInvite.objects.create(\n inviter = request.user,\n new_member = new_user,\n group = group,\n confirmed = False\n )\n group_invite.save()\n group.save()\n return HttpResponseRedirect('/profiles/group/detail/{}/'.format(pk))\n\ndef group_exit(request, pk):\n group = Group.objects.get(id=pk)\n user_id = request.user.id\n group.members.set(group.members.all().exclude(id=user_id))\n group.group_masters.set(group.group_masters.all().exclude(id=user_id))\n group.save()\n if len(group.members.all()) == 0:\n group.delete()\n return HttpResponseRedirect('/profiles/group/')\n\ndef group_create(request):\n users = Profile.objects.filter(is_health_master=False).exclude(user=request.user).all()\n same_healthclub_users = users.filter(healthclub = request.user.profile.healthclub)\n healthclub = request.user.profile.healthclub\n context = {'users' : users, 'same_healthclub_users' : same_healthclub_users, 'healthclub' : healthclub}\n\n return render(request, 'group_create.html', context)\n\ndef group_create_confirm(request):\n context = {}\n \n if request.method==\"POST\":\n name = request.POST.get(\"groupname\")\n username = request.POST.getlist(\"username\")\n search_ids = request.POST.getlist(\"search_ids\")\n public = request.POST.get(\"public\")\n group = Group.objects.create(name = name)\n \n if public == \"private\":\n group.public = False\n \n for search_id in search_ids:\n new_user = User.objects.get(username = search_id)\n \n groupinvite = GroupInvite.objects.create(\n inviter = request.user,\n new_member = new_user,\n confirmed = False,\n group = group\n )\n groupinvite.save()\n #group.members.add(new_user)\n \n for user in username:\n new_user = User.objects.get(username = user)\n \n groupinvite = GroupInvite.objects.create(\n inviter = request.user,\n new_member = new_user,\n confirmed = False,\n group = group\n )\n groupinvite.save()\n #group.members.add(new_user)\n group.members.add(request.user)\n group.group_masters.add(request.user)\n group.save()\n\n return HttpResponseRedirect(reverse('profiles:group'))\n \ndef mypage(request):\n user = request.user\n profile = Profile.objects.get(user=user)\n last_diary = HealthDiary.objects.filter(user = user)\n \n expire_date = profile.expire_date #When expire_date Expires\n if expire_date != None:\n if expire_date < datetime.now():\n profile.healthclub = None\n profile.expire_date = None\n profile.start_date = None\n profile.save()\n if(len(last_diary)==0):\n pass\n else:\n last_diary = last_diary.last().timestamp\n if last_diary.month < datetime.now().month:\n profile.exercised = 0\n profile = Profile.objects.get(user=user)\n record = HealthDiary.objects.filter(user=user)\n context = {'profile' : profile, 'username' : user.username, 'record' : record, 'real_name' : user.profile.real_name}\n return render(request, 'mypage.html', context)\n\n# Create your views here.\nclass RegisterViewNormal(CreateView):\n form_class = RegisterNormalForm\n template_name = 'registration/register_normal.html'\n success_url = '/login/'\n \nclass RegisterViewMaster(CreateView):\n form_class = RegisterMasterForm\n template_name = 'registration/register_master.html'\n success_url = '/login/'\n","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"224529404","text":"import numpy as np\nimport pandas as pd\nimport os\nfrom konlpy.tag import Okt\nfrom collections import Counter\nimport warnings\n\nfileSet = [['TommyJeans_2021_Test_data_2.xlsx',1117],['TommyJeans_2021_Test_data_3.xlsx',536],['RA_2021_Test_data_4.xlsx',1217]]\n\nfor i in range(3):\n#엑셀 불러오기\n # base_dir = 'VitaminPlusChat/static/excel'\n excel_file = fileSet[i][0]\n print(excel_file)\n # excel_dir = os.path.join(base_dir, excel_file)\n\n with warnings.catch_warnings(record=True): \n warnings.simplefilter(\"always\")\n df_from_excel = pd.read_excel(excel_file,\n sheet_name = 'chatting list',\n header = 0,\n dtype = {'index':str,\n 'liveTime':str,\n 'time':str,\n 'memberDiff':str,\n 'chat':str,\n 'userCode':str,\n 'userGroup':str,\n 'userId':str,\n 'userName':str,\n 'userNick':str,\n 'phone':str},\n index_col = 'index',\n na_values = 'NaN',\n thousands='',\n nrows = fileSet[i][1],\n comment = '#',\n engine=\"openpyxl\")\n\n\n m_list = list(df_from_excel['chat'])\n text = \" \".join(m_list)\n print(text)\n\n #명사 추출\n okt = Okt()\n noun = okt.nouns(text)\n\n for i,v in enumerate(noun):\n if len(v) < 2:\n noun.pop(i)\n\n count = Counter(noun)\n noun_list = count.most_common(100)\n\n f = open('Okt'+str(i)+'.txt', 'w',encoding='UTF-8')\n f.write(noun_list)\n f.close()","sub_path":"VitaminPlusChat/WEB/templates/keyword/okttest.py","file_name":"okttest.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"166316849","text":"#仮にsample.txtを使用\nfile = \"../data/sample.txt\"\n\nwith open(file) as fileobj:\n text = fileobj.read()\n wordlist = text.split(\" \") #区切り文字を基準に全単語を求める\n worddict = dict.fromkeys(wordlist, 0) #全単語を辞書化し、値の初期値を0とする\n \n #全単語を確認し、辞書のキーごとにカウントアップする\n for word in wordlist:\n worddict[word] += 1\n \n for key, count in worddict.items():\n print(f\"{key}は{count}回使われました。\")","sub_path":"Python_Course/Assignment/Section13_reading-text-file.py","file_name":"Section13_reading-text-file.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"645344124","text":"import logging.config\nimport datetime\nfrom settings import LOGGING\nfrom orm_mapping import *\nfrom base import session\nfrom messaging_utils import MessagingUtils\nfrom instrument_variable_utils import InstrumentVariablesUtils\nfrom variable_utils import VariableUtils\nfrom status_utils import StatusUtils\n\n# Set up logging and attach the logging to the right part of the config.\nlogging.config.dictConfig(LOGGING)\nlogger = logging.getLogger(\"queue_processor\")\n\n\nclass ReductionRunUtils(object):\n @staticmethod\n def cancel_run(reduction_run):\n \"\"\"\n Try to cancel the run given, or the run that was scheduled as the next retry of the run.\n When we cancel, we send a message to the backend queue processor, telling it to ignore this run if it arrives.\n This is most likely through a delayed message through ActiveMQ's scheduler.\n We also set statuses and error messages. If we can't do any of the above, we set the variable (retry_run.cancel)\n that tells the frontend to not schedule another retry if the next run fails.\n \"\"\"\n\n def set_cancelled(run):\n run.message = \"Run cancelled by user\"\n run.status = StatusUtils().get_error()\n run.finished = datetime.datetime.now()\n run.retry_when = None\n run.save()\n\n # This is the queued run, send the message to queueProcessor to cancel it\n if reduction_run.status == StatusUtils().get_queued():\n MessagingUtils().send_cancel(reduction_run)\n set_cancelled(reduction_run)\n\n # Otherwise this run has already failed, and we're looking at a scheduled rerun of it\n # We don't actually have a rerun, so just ensure the retry time is set to \"Never\" (None)\n elif not reduction_run.retry_run:\n reduction_run.retry_when = None\n\n # This run is being queued to retry, so send the message to queueProcessor to cancel it, and set it as cancelled\n elif reduction_run.retry_run.status == StatusUtils().get_queued():\n MessagingUtils().send_cancel(reduction_run.retry_run)\n set_cancelled(reduction_run.retry_run)\n\n # We have a run that's retrying, so just make sure it doesn't retry next time\n elif reduction_run.retry_run.status == StatusUtils().get_processing():\n reduction_run.cancel = True\n reduction_run.retry_run.cancel = True\n\n # The retry run already completed, so do nothing\n else:\n pass\n\n # save the run states we modified\n reduction_run.save()\n if reduction_run.retry_run:\n reduction_run.retry_run.save()\n\n @staticmethod\n def create_retry_run(reduction_run, script=None, variables=None, delay=0, username=None):\n \"\"\"\n Create a run ready for re-running based on the run provided. If variables (RunVariable) are provided, copy them\n and associate them with the new one, otherwise use the previous run's.\n If a script (as a string) is supplied then use it, otherwise use the previous run's.\n \"\"\"\n # find the previous run version, so we don't create a duplicate\n last_version = -1\n for run in session.query(ReductionRun).filter_by(experiment=reduction_run.experiment,\n run_number=reduction_run.run_number).all():\n last_version = max(last_version, run.run_version)\n\n # get the script to use:\n script_text = script if script is not None else reduction_run.script\n\n # create the run object and save it\n new_job = ReductionRun(run_number=reduction_run.run_number,\n run_version=last_version + 1,\n run_name=\"\",\n experiment=reduction_run.experiment,\n instrument=reduction_run.instrument,\n script=script_text,\n status=StatusUtils().get_queued(),\n created=datetime.datetime.now(),\n last_updated=datetime.datetime.now(),\n message=\"\",\n started_by=username,\n cancel=0,\n hidden_in_failviewer=0,\n admin_log=\"\",\n reduction_log=\"\"\n )\n\n try:\n session.add(new_job)\n session.commit()\n\n reduction_run.retry_run = new_job\n reduction_run.retry_when = datetime.datetime.now() + datetime.timedelta(seconds=delay if delay else 0)\n session.add(new_job)\n session.commit()\n\n data_locations = session.query(DataLocation).filter_by(reduction_run_id=reduction_run.id).all()\n\n # copy the previous data locations\n for data_location in data_locations:\n new_data_location = DataLocation(file_path=data_location.file_path, reduction_run=new_job)\n session.add(new_data_location)\n session.commit()\n\n if variables is not None:\n # associate the variables with the new run\n for var in variables:\n var.reduction_run = new_job\n session.add(var)\n session.commit()\n else:\n # provide variables if they aren't already\n InstrumentVariablesUtils().create_variables_for_run(new_job)\n\n return new_job\n\n except:\n session.delete(new_job)\n session.commit()\n raise\n\n @staticmethod\n def get_script_and_arguments(reduction_run):\n \"\"\"\n Fetch the reduction script from the given run and return it as a string, along with a dictionary of arguments.\n \"\"\"\n script = reduction_run.script\n run_variables = (session.query(RunJoin).filter_by(reduction_run=reduction_run)).all()\n\n standard_vars, advanced_vars = {}, {}\n for variables in run_variables:\n value = VariableUtils().convert_variable_to_type(variables.value, variables.type)\n if variables.is_advanced:\n advanced_vars[variables.name] = value\n else:\n standard_vars[variables.name] = value\n\n arguments = {'standard_vars': standard_vars, 'advanced_vars': advanced_vars}\n\n return script, arguments\n","sub_path":"QueueProcessors/QueueProcessor/utils/reduction_run_utils.py","file_name":"reduction_run_utils.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"119237648","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: fei.wang\n@license: (C) Copyright 2017-2020, United Imaging Intelligence Company (UII)\n@contact: fei.wang@united-imaing.com\n@software: UII\n@file: xml_psudo_parser.py\n@time: 2018/10/03 15:22\n@desc:\n\"\"\"\n\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\n\n\ndef psudo_parser(filename):\n \"\"\"\n trans xml to a dict\n :param filename:xml path\n :return:dict\n \"\"\"\n DOMTree = xml.dom.minidom.parse(filename)\n root = DOMTree.documentElement\n dic = dict()\n rgb_content = []\n opac_content = []\n\n color_list = root.getElementsByTagName(\"LUT\")\n lut_name = root.getAttribute(\"Type\")\n color_level = root.getAttribute(\"ColorLevel\")\n\n color_items = root.getElementsByTagName(\"Color\")\n point_value = 0\n for item in color_items:\n if 256 > point_value >= 0:\n # print(item.getAttribute(\"Point\"))\n rgb_content.append(point_value)\n opac_content.append(point_value)\n opac_content.append(1.0)\n if item.hasAttribute(\"Red\"):\n rgb_content.append(int(item.getAttribute(\"Red\"))/255.0)\n if item.hasAttribute(\"Green\"):\n rgb_content.append(int(item.getAttribute(\"Green\"))/255.0)\n if item.hasAttribute(\"Blue\"):\n rgb_content.append(int(item.getAttribute(\"Blue\"))/255.0)\n point_value += 1\n\n dic[\"name\"] = lut_name\n dic[\"color_level\"] = color_level\n dic[\"rgb\"] = rgb_content\n dic[\"opacity\"] = opac_content\n\n return dic\n","sub_path":"visualization/lut_psudo_parser.py","file_name":"lut_psudo_parser.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"73642429","text":"import random\n\nN = 100\narr = [x for x in range(0, N)]\n\ndef gen_random_array(M):\n global arr, N\n if M > N:\n print('M is too big')\n return\n\n for i in range(0, M):\n x = random.randint(i, N)\n tmp = arr[i]\n arr[i] = arr[x]\n arr[x] = tmp\n\n return arr[0:M]\n\nprint(gen_random_array(10))\n","sub_path":"18/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"96308886","text":"class FCGrammarChecker:\n # not using currently because of recursion; use SQLChecker.py instead\n Grammar_Definition_Values = {}\n optional_Keys_Position = []\n optional_Definition_Places_Values = {}\n compulsary_Keys_Position = []\n compulsary_Definition_Places_Values = {}\n Total_Grammar_keys_Position = {}\n Position = [0]\n grammarVals = []\n\n def Definition(self, name, values):\n self.Grammar_Definition_Values[name] = values\n self.grammarVals.append(values)\n\n def grammar_input_Parser(self, requirement, name):\n if requirement == 'O':\n self.optional_Keys_Position.append(self.Position[0])\n self.optional_Definition_Places_Values[self.Position[0]] = name\n else:\n self.compulsary_Keys_Position.append(self.Position[0])\n self.compulsary_Definition_Places_Values[self.Position[0]] = name\n self.Total_Grammar_keys_Position[self.Position[0]] = name\n self.Position[0] = self.Position[0] + 1\n\n def getInput(self):\n self.Definition(\"First_Name\",\n [\"Jaya Sriram\", \"Sri Krishna Kireeti\", \"Parinitha\", \"Satya Krishna\", \"Mokshitha\", \"Mohaneesh\"])\n self.Definition(\"Last_Name\", [\".Ganeshna\", \".Kotari\", \".Achanta\"])\n self.Definition(\"@\", [\"@\"])\n self.Definition(\"Mail\", [\"gmail\", \"outlook\", \"hotmail\"])\n self.Definition(\"ending\", [\".com\", \".in\", \".ca\"])\n self.Definition(\"Enter\", [\"--\", \"#\", \"/*\"])\n self.grammar_input_Parser('C', \"First_Name\")\n self.grammar_input_Parser('O', \"Last_Name\")\n self.grammar_input_Parser('C', \"@\")\n self.grammar_input_Parser('C', \"Mail\")\n self.grammar_input_Parser('C', \"ending\")\n self.grammar_input_Parser('O', \"Enter\")\n\n def grammarChecker(self, stringAsList):\n self.getInput()\n grammarvalcount = 0\n temp = 0\n if len(self.compulsary_Keys_Position) <= len(stringAsList):\n for i in range(len(stringAsList)):\n if stringAsList[i] in self.grammarVals[grammarvalcount]:\n grammarvalcount = grammarvalcount + 1\n elif stringAsList[i] not in self.grammarVals[grammarvalcount]:\n if grammarvalcount in self.optional_Keys_Position:\n continue\n else:\n temp = 1\n break\n else:\n temp = 1\n if temp == 0:\n print(\"Grammar Parsed\")\n else:\n print(\"Grammar not parsed\")\n\n\nif __name__ == \"__main__\":\n k = FCGrammarChecker()\n k.grammarChecker([\"Jaya Sriram\", \".Ganeshna\", \"@\", \"gmail\", \".com\"])\n","sub_path":"python/grammar_lib/FuzzyCrawler_Grammar_Checker.py","file_name":"FuzzyCrawler_Grammar_Checker.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"504122248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 25 15:00:35 2017\n\n@author: jerome\n\"\"\"\nimport cv2\nimport numpy as np\nimport os, sys, getopt\nimport undistort\nimport warper\nimport preprocess\nimport find_lanes\n\nfrom moviepy.editor import VideoFileClip\n\nleft_fit = ''\nright_fit = ''\nmask = ''\nmtx = ''\ndist = ''\n\ndef pipeline(img):\n global left_fit, right_fit, mask, mtx, dist\n #src = np.float32([[316,650],[999,650],[537,490],[751,490]])\n #dst = np.float32([[316,650],[999,650],[316,490],[999,490]])\n src = np.float32(\n [[(img.shape[1] / 2) - 55, img.shape[0] / 2 + 100],\n [((img.shape[1] / 6) - 10), img.shape[0]],\n [(img.shape[1] * 5 / 6) + 60, img.shape[0]],\n [(img.shape[1] / 2 + 55), img.shape[0] / 2 + 100]])\n dst = np.float32(\n [[(img.shape[1] / 4), 0],\n [(img.shape[1] / 4), img.shape[0]],\n [(img.shape[1] * 3 / 4), img.shape[0]],\n [(img.shape[1] * 3 / 4), 0]])\n\n und_img = undistort.undistort(img, mtx, dist)\n war_img = warper.warper(und_img,(und_img.shape[1], und_img.shape[0]),src,dst)\n binary_warped = preprocess.process_image(war_img)\n outimage, left_fit, right_fit, mask,curverad,locx = find_lanes.find_lane(binary_warped,left_fit,right_fit,mask)\n cv2.putText(img,\"Radius of Curvature = \"+ str(curverad)+\"(m)\",(30,30),cv2.FONT_HERSHEY_SIMPLEX,1,(200,255,155))\n if locx < 0 :\n cv2.putText(img,\"Vehicle is \"+ '{:.2f}'.format(abs(locx))+\"m left of center\",(30,60),cv2.FONT_HERSHEY_SIMPLEX,1,(200,255,155))\n else :\n cv2.putText(img,\"Vehicle is \"+ '{:.2f}'.format(abs(locx))+\"m right of center\",(30,60),cv2.FONT_HERSHEY_SIMPLEX,1,(200,255,155))\n return cv2.addWeighted(img, 1,warper.warper(outimage,(outimage.shape[1], outimage.shape[0]),dst,src), 0.5, 0)\n\ndef main(argv):\n global mtx,dist\n ifile = ''\n ofile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print ('project4.py -i
-o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('project4.py -i -o ')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n ifile = arg\n elif opt in (\"-o\", \"--ofile\"):\n ofile = arg\n if ifile == '' or ifile == '' :\n print ('project4.py -i -o ')\n sys.exit()\n mtx,dist = undistort.load_calib_param()\n if ifile.split(\".\")[-1] == \"mp4\":\n white_output = os.path.abspath(os.path.curdir)+ofile\n clip = VideoFileClip(os.path.abspath(os.path.curdir)+ifile)\n white_clip = clip.fl_image(pipeline) #NOTE: this function expects color images!!\n #white_clip = clip.fx(pipeline, mtx, dist)\n white_clip.write_videofile(white_output, audio=False)\n else :\n img = cv2.imread(os.path.abspath(os.path.curdir)+ifile)\n cv2.imwrite(os.path.abspath(os.path.curdir)+ofile,pipeline(img))\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"CarND-Project4_Advanced-Lane-Lines/project4.py","file_name":"project4.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"53701540","text":"import pytest\nfrom model_mommy import mommy\n\nfrom reqs.management.commands.sync_agencies import Command\nfrom reqs.models import Agency, AgencyGroup\n\n\n@pytest.mark.django_db\ndef test_create_system_groups():\n AgencyGroup.objects.create(slug='cfo-act', name='Alt CFO')\n cmd = Command()\n cmd.create_system_groups()\n\n assert AgencyGroup.objects.count() == 3\n assert AgencyGroup.objects.get(slug='executive').name == 'Executive'\n # name not changed\n assert AgencyGroup.objects.get(slug='cfo-act').name == 'Alt CFO'\n assert AgencyGroup.objects.get(slug='cio-council').name == 'CIO Council'\n\n\n@pytest.mark.django_db\ndef test_sync_row_new():\n cmd = Command()\n cmd.create_system_groups()\n cmd.sync_row({\n 'agencyAbbreviation': None,\n 'agencyCode': '123',\n 'agencyName': 'Aquarius',\n 'agencyType': '5-Other Branches',\n 'CFO_Act': '1',\n 'CIO_Council': None,\n })\n\n assert Agency.objects.count() == 1\n agency = Agency.objects.get()\n assert agency.name == 'Aquarius'\n assert agency.abbr == ''\n assert agency.omb_agency_code == '123'\n assert not agency.nonpublic\n group_slugs = {g.slug for g in agency.groups.all()}\n assert group_slugs == {'cfo-act'}\n\n\n@pytest.mark.django_db\ndef test_sync_row_existing():\n mommy.make(Agency, omb_agency_code='90210')\n assert Agency.objects.count() == 1\n\n cmd = Command()\n cmd.create_system_groups()\n cmd.sync_row({\n 'agencyAbbreviation': 'BH',\n 'agencyCode': '90210',\n 'agencyName': 'New Name Here',\n 'agencyType': '1-CFO Act', # this will be ignored\n 'CFO_Act': '',\n 'CIO_Council': '1',\n })\n\n assert Agency.objects.count() == 1\n agency = Agency.objects.get()\n assert agency.name == 'New Name Here'\n assert agency.abbr == 'BH'\n assert agency.omb_agency_code == '90210'\n assert not agency.nonpublic\n group_slugs = {g.slug for g in agency.groups.all()}\n assert group_slugs == {'cio-council', 'executive'}\n","sub_path":"reqs/tests/sync_agencies_tests.py","file_name":"sync_agencies_tests.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"631797212","text":"#\n# Identify the 10 resources that consume the most bandwidth on the site\n# Part 1: resource_count() calculate each resource consumption (i.e., frequency * bytes)\n# Part 2: top10_resource() get the top 10 resources and consumption\n# #\n\nimport traceback\nimport operator\n\nfrom utility import write_to_file\n\n\n#\n# count resource\n#\ndef resource_count(resource_table, resource, resource_bytes):\n resource = resource.replace(\" HTTP/1.0\", \"\")\n if resource not in resource_table:\n resource_table[resource] = resource_bytes\n else:\n resource_table[resource] += resource_bytes\n return resource_table\n\n\n#\n# write top 10 resource to file\n#\ndef top10_resource(resource_table):\n try:\n for line in resource_table.most_common(10):\n write_to_file('log_output/resources.txt', line[0])\n except ValueError:\n traceback.print_exc()\n\n","sub_path":"src/Feature2.py","file_name":"Feature2.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"481381947","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.settings import STATE_COLORS\n\n\nclass State:\n \"\"\"\n Static class with task instance states constants and color method to\n avoid hardcoding.\n \"\"\"\n\n # scheduler\n NONE = None # type: None\n REMOVED = \"removed\"\n SCHEDULED = \"scheduled\"\n\n # set by the executor (t.b.d.)\n # LAUNCHED = \"launched\"\n\n # set by a task\n QUEUED = \"queued\"\n RUNNING = \"running\"\n SUCCESS = \"success\"\n SHUTDOWN = \"shutdown\" # External request to shut down\n FAILED = \"failed\"\n UP_FOR_RETRY = \"up_for_retry\"\n UP_FOR_RESCHEDULE = \"up_for_reschedule\"\n UPSTREAM_FAILED = \"upstream_failed\"\n SKIPPED = \"skipped\"\n SENSING = \"sensing\"\n\n task_states = (\n SUCCESS,\n RUNNING,\n FAILED,\n UPSTREAM_FAILED,\n SKIPPED,\n UP_FOR_RETRY,\n UP_FOR_RESCHEDULE,\n QUEUED,\n NONE,\n SCHEDULED,\n SENSING,\n REMOVED,\n )\n\n dag_states = (\n SUCCESS,\n RUNNING,\n FAILED,\n )\n\n state_color = {\n QUEUED: 'gray',\n RUNNING: 'lime',\n SUCCESS: 'green',\n SHUTDOWN: 'blue',\n FAILED: 'red',\n UP_FOR_RETRY: 'gold',\n UP_FOR_RESCHEDULE: 'turquoise',\n UPSTREAM_FAILED: 'orange',\n SKIPPED: 'pink',\n REMOVED: 'lightgrey',\n SCHEDULED: 'tan',\n NONE: 'lightblue',\n SENSING: 'lightseagreen',\n }\n state_color.update(STATE_COLORS) # type: ignore\n\n @classmethod\n def color(cls, state):\n \"\"\"Returns color for a state.\"\"\"\n return cls.state_color.get(state, 'white')\n\n @classmethod\n def color_fg(cls, state):\n \"\"\"Black&white colors for a state.\"\"\"\n color = cls.color(state)\n if color in ['green', 'red']:\n return 'white'\n return 'black'\n\n running = frozenset([RUNNING, SENSING])\n \"\"\"\n A list of states indicating that a task is being executed.\n \"\"\"\n\n finished = frozenset(\n [\n SUCCESS,\n FAILED,\n SKIPPED,\n UPSTREAM_FAILED,\n ]\n )\n \"\"\"\n A list of states indicating a task has reached a terminal state (i.e. it has \"finished\") and needs no\n further action.\n\n Note that the attempt could have resulted in failure or have been\n interrupted; or perhaps never run at all (skip, or upstream_failed) in any\n case, it is no longer running.\n \"\"\"\n\n unfinished = frozenset(\n [\n NONE,\n SCHEDULED,\n QUEUED,\n RUNNING,\n SENSING,\n SHUTDOWN,\n UP_FOR_RETRY,\n UP_FOR_RESCHEDULE,\n ]\n )\n \"\"\"\n A list of states indicating that a task either has not completed\n a run or has not even started.\n \"\"\"\n\n failed_states = frozenset([FAILED, UPSTREAM_FAILED])\n \"\"\"\n A list of states indicating that a task or dag is a failed state.\n \"\"\"\n\n success_states = frozenset([SUCCESS, SKIPPED])\n \"\"\"\n A list of states indicating that a task or dag is a success state.\n \"\"\"\n\n\nclass PokeState:\n \"\"\"Static class with poke states constants used in smart operator.\"\"\"\n\n LANDED = 'landed'\n NOT_LANDED = 'not_landed'\n POKE_EXCEPTION = 'poke_exception'\n","sub_path":"airflow/utils/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"313477475","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.http import Http404\nfrom django.template import RequestContext\nfrom consultorio.models import Consultorio\nfrom consultorio.forms import ConsultorioForm\n\n\ndef index(request):\n consultorios = Consultorio.objects.all()\n return render_to_response(\"consultorio/index.html\", {'consultorios': consultorios})\n\n\ndef consultorio_agregar(request):\n if request.method == 'POST':\n f = ConsultorioForm(request.POST)\n if f.is_valid():\n f.save()\n return HttpResponseRedirect('/consultorio/')\n else:\n return render_to_response(\"consultorio/agregar.html\", {'form': f},context_instance=RequestContext(request))\n else:\n f = ConsultorioForm()\n return render_to_response(\"consultorio/agregar.html\", {'form': f},context_instance=RequestContext(request))\n\ndef consultorio_modificar(request, consultorio_id):\n try:\n c = Consultorio.objects.get(id=consultorio_id)\n if request.method == 'POST':\n f = ConsultorioForm(request.POST)\n if f.is_valid():\n c.numero = f.cleaned_data['numero']\n c.ubicacion = f.cleaned_data['ubicacion']\n c.hora_inicio = f.cleaned_data['hora_inicio']\n c.hora_cierre = f.cleaned_data['hora_cierre']\n c.save()\n return HttpResponseRedirect('/consultorio/') \n else:\n return render_to_response(\"consultorio/agregar.html\", {'form': f},context_instance=RequestContext(request))\n else:\n f = ConsultorioForm(initial = {\n 'numero' : c.numero,\n 'ubicacion' : c.ubicacion,\n 'hora_inicio' : c.hora_inicio,\n 'hora_cierre' : c.hora_cierre\n })\n return render_to_response(\"consultorio/modificar.html\", {'form': f},context_instance=RequestContext(request))\n \n except Consultorio.DoesNotExist:\n return HttpResponseRedirect('/consultorio/')\n \ndef consultorio_eliminar(request, consultorio_id):\n try:\n c = Consultorio.objects.get(id=consultorio_id)\n c.delete()\n return HttpResponseRedirect('/consultorio/')\n except Consultorio.DoesNotExist:\n return HttpResponseRedirect('/consultorio/')\n\n","sub_path":"consultorio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"514849273","text":"from unittest.mock import MagicMock\n\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom coin_system.factories import CountryDefaultConfigFactory\nfrom coin_user.factories import ContactFactory\nfrom coin_user.views import VerifyEmailView\nfrom common.constants import COUNTRY, FIAT_CURRENCY, LANGUAGE\nfrom common.tests.utils import AuthenticationUtils\n\n\nclass ProfileTests(APITestCase):\n def setUp(self):\n self.auth_utils = AuthenticationUtils(self.client)\n self.user = self.auth_utils.create_exchange_user()\n self.auth_utils.login()\n\n def test_profile(self):\n url = reverse('user:profile')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_update_profile(self):\n url = reverse('user:profile')\n updated_name = 'FirstName'\n response = self.client.patch(url, {\n 'phone_number': '1234567890',\n 'first_name': 'FirstName',\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n user = User.objects.get(username=self.user.user.username)\n self.assertEqual(user.first_name, updated_name)\n\n\nclass SignUpTests(APITestCase):\n def setUp(self):\n CountryDefaultConfigFactory(country=COUNTRY.PH, currency=FIAT_CURRENCY.PHP, language=LANGUAGE.en)\n VerifyEmailView.send_verification_email = MagicMock(return_value=None)\n\n def test_sign_up(self):\n url = reverse('user:sign-up')\n\n response = self.client.post(url, data={\n 'username': 'dev@exchange.com',\n 'password': '12345678',\n 'name': 'Username',\n 'country': COUNTRY.PH,\n 'referral': ''\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_sign_up_referral(self):\n url = reverse('user:sign-up')\n\n self.auth_utils = AuthenticationUtils(self.client)\n self.user = self.auth_utils.create_exchange_user()\n\n response = self.client.post(url, data={\n 'username': 'dev@exchange.com',\n 'password': '12345678',\n 'name': 'Username',\n 'country': COUNTRY.PH,\n 'referral': self.user.name\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n\nclass VerificationTests(APITestCase):\n pass\n\n\nclass ContactTests(APITestCase):\n def setUp(self):\n self.auth_utils = AuthenticationUtils(self.client)\n self.user = self.auth_utils.create_exchange_user()\n self.auth_utils.login()\n\n def test_add_contact(self):\n url = reverse('user:contact-list')\n\n response = self.client.post(url, data={\n 'name': 'Contact',\n 'email': 'contact@contact.com',\n 'phone_number': '1234567890',\n 'description': 'Some description',\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_update_contact(self):\n contact = ContactFactory(user=self.user)\n url = reverse('user:contact-detail', kwargs={'pk': contact.id})\n\n response = self.client.put(url, data={\n 'name': 'Contact',\n 'email': 'contact@contact.com',\n 'phone_number': '1234567890',\n 'description': 'Some description',\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass ExchangeUserLogTests(APITestCase):\n def setUp(self):\n self.auth_utils = AuthenticationUtils(self.client)\n self.user = self.auth_utils.create_exchange_user()\n self.auth_utils.login()\n\n def test_add_contact(self):\n url = reverse('user:exchangeuserlog-list')\n\n response = self.client.post(url, data={\n 'name': 'Log',\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n","sub_path":"src/coin_user/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"342004120","text":"# This establishes Redis connection\nfrom redis import Redis\nfrom rq import Queue\nimport settings\nfrom tasks import add_article\n\nredis = Redis(host=settings.REDIS_CONFIG[\"host\"], port=settings.REDIS_CONFIG[\"port\"], db=settings.REDIS_CONFIG[\"db\"])\n\n\ndef scrape_news(existing_ids=None):\n from scrapers import scrapers\n queue = Queue('sources', connection=redis)\n for scraper in scrapers:\n queue.enqueue(parse_source, scraper, existing_ids)\n\n\ndef parse_source(scraper, existing_ids):\n articles = scraper.parse_source(existing_ids)\n queue = Queue('articles', connection=redis)\n if articles:\n for article in articles:\n queue.enqueue(parse_article, scraper, article)\n\n\ndef parse_article(scraper, article_url):\n article = scraper.parse_article(article_url)\n if article:\n add_article.add_article(article)\n","sub_path":"news_buddy/tasks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"425107643","text":"\"\"\"\nProgram: Caesar Cipher Decrypter\nAuthor: Kevin Tran\n\nThe purpose of this program is to decrypt a message\n\n1. Get message and distance inputs\n2. Loop to convert all characters according to ASCII values plus distance\n3. Print the resulting message\n\"\"\"\n\nmessage = input(\"Enter the message you would like to decrypt: \")\ndistance = int(input(\"Enter a distance value: \"))\ndMessage = \"\"\n\nfor ch in message:\n ordValue = ord(ch)\n eValue = ordValue - distance\n dMessage += chr(eValue)\nprint(dMessage)\n","sub_path":"decryption.py","file_name":"decryption.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"510044407","text":"from multiprocessing import Process, Queue\nfrom time import sleep\nimport tensorflow as tf\nimport random\nimport sys\nimport math\nimport numpy as np\nimport scipy\nimport scipy.misc\nfrom PIL import Image\nfrom db.data_handler import DataHandler\n\nclass BezierDataHandler(DataHandler):\n def __init__(self, batch_size, target_size): # Not use datafiles\n super(BezierDataHandler, self).__init__(batch_size, target_size)\n self.queue = Queue(40)\n self.msg_queue = Queue(4)\n self.procs = []\n self.start_threads()\n\n def getPt(self, n1, n2, perc):\n diff = n2 - n1\n return n1 + diff*perc\n\n def drawLine(self, points, canvas):\n h, w, _ = canvas.shape\n x1, y1, x2, y2, x3, y3, x4, y4 = points\n #brush_r = np.random.randint(0, 3)\n brush_r = 0\n for i in range(0, 1000):\n perc = i / 1000.0\n xa = self.getPt(x1, x2, perc)\n ya = self.getPt(y1, y2, perc)\n xb = self.getPt(x2, x3, perc)\n yb = self.getPt(y2, y3, perc)\n xc = self.getPt(x3, x4, perc)\n yc = self.getPt(y3, y4, perc)\n\n xm = self.getPt(xa, xb, perc)\n ym = self.getPt(ya, yb, perc)\n xn = self.getPt(xb, xc, perc)\n yn = self.getPt(yb, yc, perc)\n\n x = int(self.getPt(xm, xn, perc))\n y = int(self.getPt(ym, yn, perc))\n\n if brush_r == 0:\n canvas[y, x] = 1\n elif brush_r == 1:\n for x_ in range(max(0, x-1), min(w, x+1)):\n for y_ in range(max(0, y-1), min(h, y+1)):\n canvas[y_, x_] = 1\n else:\n for x_ in range(max(0, x-2), min(w, x+2)):\n for y_ in range(max(0, y-2), min(h, y+2)):\n canvas[y_, x_] = 1\n\n\n def drawCircle(self, canvas):\n h, w, _ = canvas.shape\n r = np.random.randint(5, 50, 1)\n a, b = np.random.randint(r, 256-r, 2)\n #brush_r = np.random.randint(0, 3)\n brush_r = 0\n for i in range(0, 1000):\n t = math.pi * 2.0 * i / 1000.0\n x = int(a + r * math.cos(t))\n y = int(b + r * math.sin(t))\n\n if brush_r == 0:\n canvas[y, x] = 1\n elif brush_r == 1:\n for x_ in range(max(0, x-1), min(w, x+1)):\n for y_ in range(max(0, y-1), min(h, y+1)):\n canvas[y_, x_] = 1\n else:\n for x_ in range(max(0, x-2), min(w, x+2)):\n for y_ in range(max(0, y-2), min(h, y+2)):\n canvas[y_, x_] = 1\n\n def _draw_canvas(self):\n canvas_size = int(self.target_size * 1.5)\n canvas = np.zeros([canvas_size, canvas_size, 1])\n # first, randomly select three points\n points = np.random.randint(0, canvas_size, 8)\n x1, y1, x2, y2, x3, y3, x4, y4 = points\n\n self.drawLine(points, canvas)\n \n n_line = np.random.randint(3, 7)\n for i in range(n_line):\n points = np.random.randint(0, canvas_size, 8)\n points[0] = x4\n points[1] = y4\n x1, y1, x2, y2, x3, y3, x4, y4 = points\n\n self.drawLine(points, canvas)\n\n n_circle = np.random.randint(5, 10)\n for i in range(n_circle):\n self.drawCircle(canvas)\n\n\n topleft_y, topleft_x = \\\n np.random.randint(0, canvas_size - self.target_size, 2)\n\n cropped_canvas = canvas[topleft_y:topleft_y+self.target_size, \n topleft_x:topleft_x+self.target_size]\n # invert color and zero centering\n cropped_canvas = 1.0 - cropped_canvas\n cropped_canvas = cropped_canvas*2.0 - 1.0\n return cropped_canvas\n\n def next(self):\n output = self.queue.get()\n return output\n\n def _enqueue_op(self, queue, msg_queue):\n while msg_queue.qsize() == 0:\n sz = self.target_size\n output = np.zeros([self.batch_size, sz, sz, 1])\n for i in range(self.batch_size):\n output[i] = self._draw_canvas()\n queue.put(output)\n\n def start_threads(self):\n print(\"start threads called\")\n for i in range(2):\n proc = Process(target=self._enqueue_op, args=(self.queue, self.msg_queue))\n self.procs.append(proc)\n proc.start()\n\n print(\"enqueue thread started!\")\n\n\n def get_batch_shape(self):\n return (self.batch_size, self.target_size, self.target_size, 1)\n\n def kill(self):\n self.msg_queue.put(\"illkillyou\")\n for proc in self.procs:\n proc.terminate()\n proc.join()\n print(\"bezier data killed\")\n \nif __name__ == '__main__':\n test = BezierDataHandler(10, 256)\n canvases = test.next()\n print(canvases[0])\n","sub_path":"srcs/db/bezier_data_handler.py","file_name":"bezier_data_handler.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"361218195","text":"import random\n\n\ndef get_histogram(word_list):\n # Output: { fish: { blue: 1, red: 1, two: 1 }, blue: {'fish': 1} }\n histogram = {}\n\n for index, word in enumerate(word_list):\n\n if index == len(word_list) - 1:\n break\n\n next_word = word_list[index + 1]\n if word not in histogram:\n histogram[word] = {next_word: 1}\n else:\n if next_word not in histogram[word]:\n histogram[word][next_word] = 1\n else:\n histogram[word][next_word] += 1\n\n return histogram\n\ndef generate_sentence(sentence_length, histogram):\n pass\n\nword_list = ['one','fish', 'two', 'fish', 'blue', 'fish', 'red', 'fish', 'house','fish', 'house', 'strawberry']\n\nresult = get_histogram(word_list)\nprint(result)\n","sub_path":"coursework/classwork/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"309739752","text":"#!/usr/bin/env python\n#\n# igcollect - Redis\n#\n# Copyright (c) 2016 InnoGames GmbH\n#\n\nfrom argparse import ArgumentParser\nfrom subprocess import check_output\nfrom time import time\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('--prefix', default='redis')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n redis_info = check_output(['redis-cli', '-a', redis_pwd(), 'info'])\n\n redis_stats = {}\n for x in redis_info.splitlines():\n if x.find(b':') != -1:\n key, value = x.split(b':')\n redis_stats[key.decode('utf-8')] = value.decode('utf-8')\n\n template = args.prefix + '.{} {} ' + str(int(time()))\n headers = (\n 'total_connections_received',\n 'total_commands_processed',\n 'keyspace_hits',\n 'keyspace_misses',\n 'used_memory',\n 'used_cpu_sys',\n 'used_cpu_user',\n 'used_cpu_sys_children',\n 'used_cpu_user_children',\n )\n for metric in headers:\n print(template.format(metric, redis_stats[metric]))\n\n\ndef redis_pwd():\n \"\"\"Get the Redis password from the configuration\"\"\"\n with open(\"/etc/redis/redis.conf\") as fd:\n secret_cfg = fd.read().splitlines()\n\n for line in secret_cfg:\n line = line.strip()\n if line.startswith(\"requirepass\"):\n return line.split(\" \")[1].strip()\n return ''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"563996188","text":"#!/usr/bin/env python\n#\n# Copyright Codeplay Software Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use these files except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# Automatically generate the batchnorm test cases using TensorFlow to provide\n# the expected values.\n\nfrom __future__ import print_function\n\ntry:\n # With python3 `zip` returns an iterator, however with python2, use\n # `itertools.izip` instead\n import itertools.izip as zip\nexcept ImportError:\n pass\n\nimport itertools\nimport os\nfrom collections import namedtuple\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python.framework.ops import get_gradient_function\nimport numpy as np\n\nimport helpers\n\nBATCHES = [1, 3]\nCHANNELS = [1, 5, 8]\nIN_SIZES = [1, 8, 9] # Assumes square inputs in the spatial dimensions.\nTEST_TYPES = ['batchnorm']\nDIRECTIONS = ['forward']\nOPERATIONS = ['Training', 'Inference']\n\nINCLUDES = r\"\"\"\n#include \n\n#include \"sycldnn/data_format.h\"\n\n#include \"sycldnn/batchnorm/direction.h\"\n#include \"sycldnn/batchnorm/params.h\"\n\n#include \"test/batchnorm/batchnorm_fixture.h\"\n#include \"test/types/kernel_data_types.h\"\n\n#include \"\"\"\nTYPED_TEST_CASE_DECL_TPL = r\"\"\"\nusing namespace sycldnn; // NOLINT(google-build-using-namespace)\ntemplate \nusing {test_case} = BatchNormFixture;\nTYPED_TEST_CASE({test_case}, types::GTestKernelDataTypes);\"\"\"\n\nTestCaseParams = namedtuple('TestCaseParams', ['test_type', 'direction', 'operation'])\nTestParams = namedtuple('TestParams', ['in_shape', 'data_format'])\n\nTENSORFLOW_OPS_MAP = {\n 'batchnorm': tf.nn.batch_normalization,\n}\n\ndef get_forward_results(max_val, input_shape):\n \"\"\"\n Construct and run a Tensorflow graph to compute a forward batchnorm op.\n\n Will create an input tensor of the required size filled with values 1, 2,\n 3... and use these to compute the batchnorm op. Returns the computed values\n in a numpy array.\n \"\"\"\n with tf.Graph().as_default():\n total_inp_size = np.product(input_shape)\n\n input_vals = helpers.get_tensor_data(total_inp_size, max_val)\n\n inp_tensor = tf.constant(input_vals,\n shape=input_shape,\n dtype=np.float32)\n\n mean = tf.math.reduce_mean(inp_tensor,axis=[0,1,2])\n\n variance = tf.math.reduce_variance(inp_tensor,axis=[0,1,2])\n\n output = tf.nn.batch_normalization(inp_tensor, mean, variance, 0., 1., 0.001)\n \n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n sess.graph.finalize()\n return sess.run(output), sess.run(mean), sess.run(variance)\n\n\ndef get_result_function(test_case):\n \"\"\"\n Get the function which will compute the expected values for the given test case.\n \"\"\"\n return get_forward_results\n\n\n#TODO dansoutar: fix these and the remainder of the file.\nTEST_CASE_TPL = \"{test_type}{direction}{operation}\"\nTEST_NAME_TPL = \"{in_s[0]}x{in_s[1]}x{in_s[2]}x{in_s[3]}\"\nIN_SHAPE_INIT_TPL = \"{{{{ {0[0]}, {0[1]}, {0[2]}, {0[3]} }}}}\"\n\n\nDIRECTION_MAP = {\n 'forward': 'batchnorm::Forward'\n}\n\nOPERATION_MAP = {\n 'Training': 'batchnorm::Training',\n 'Inference': 'batchnorm::Inference'\n}\n\n\ndef get_result(test_case, test_params):\n REQUIRED_MAX = 2**24\n max_input_val=max(test_params.in_shape[0], test_params.in_shape[1], test_params.in_shape[2], test_params.in_shape[3])\n max_output_val = REQUIRED_MAX + 1\n floor_div=True\n input_shape=test_params.in_shape\n while max_output_val > REQUIRED_MAX:\n if floor_div:\n max_input_val = max_input_val // 2\n else:\n max_input_val /= 2\n func = get_result_function(test_case)\n output, mean, variance = func(max_input_val, input_shape)\n max_output_val = np.max(output)\n return output, mean, variance, max_input_val\n\n\ndef get_test_lines(test_case, test_params):\n \"\"\"\n Create a list of strings corresponding to the lines in a single test case.\n\n Uses TensorFlow to compute the expected results for the given parameters,\n and provides the code to call the test fixture to run the test.\n \"\"\"\n channel_idx = -1 if test_params.data_format == 'NHWC' else 1\n output, mean, variance, max_input_val = get_result(test_case, test_params)\n camel_case_type = helpers.to_camel_case(test_case.test_type)\n test_case_name = TEST_CASE_TPL.format(test_type=camel_case_type,\n direction=helpers.to_camel_case(\n test_case.direction),\n operation=helpers.to_camel_case(\n test_case.operation))\n test_name = TEST_NAME_TPL.format(in_s=test_params.in_shape)\n in_shape_init = IN_SHAPE_INIT_TPL.format(test_params.in_shape)\n test_lines = [\n \"TYPED_TEST({}, {}) {{\".format(test_case_name, test_name),\n \" using DataType = typename TestFixture::DataType;\",\n \" const std::vector exp_out = {};\".format(\n helpers.format_tensor(output)),\n \" const std::vector mean = {};\".format(helpers.format_tensor(mean)),\n \" const std::vector variance = {};\".format(helpers.format_tensor(variance)),\n \" const std::array in_shape = {};\".format(in_shape_init),\n \" const auto params = getBatchNormParams(in_shape, DataFormat::{});\".format(test_params.data_format),\n \" const DataType max_input_val = {:.1f};\".format(max_input_val),\n \" this->test_batchnorm(exp_out, mean, variance, params, max_input_val);\",\n \"}\",\n ]\n return test_lines\n\n\ndef test_params_for_test_case(test_case):\n \"Test params generator for all different tests in a given test case.\"\n for in_shape in itertools.product(BATCHES, IN_SIZES, IN_SIZES, CHANNELS):\n yield TestParams(in_shape=in_shape, data_format='NHWC')\n\n\ndef output_for_test_case(test_case):\n \"\"\"\n Create a list of strings corresponding to separate lines in the full test\n case. The output contains headers, includes, setup and all the tests for\n the test case.\n \"\"\"\n scriptname = os.path.basename(__file__)\n camel_case_type = helpers.to_camel_case(test_case.test_type)\n test_case_name = TEST_CASE_TPL.format(test_type=camel_case_type,\n direction=helpers.to_camel_case(\n test_case.direction),\n operation=helpers.to_camel_case(test_case.operation))\n output = [\n helpers.get_license(),\n helpers.get_dont_modify_comment(scriptname=scriptname),\n INCLUDES,\n TYPED_TEST_CASE_DECL_TPL.format(\n test_case=test_case_name,\n direction=DIRECTION_MAP[test_case.direction],\n operation=OPERATION_MAP[test_case.operation]),\n ]\n\n for test_params in test_params_for_test_case(test_case):\n output.extend(get_test_lines(test_case, test_params))\n output.append(\"\\n\")\n return output\n\n\nFILENAME_TPL = \"batchnorm/{test_type}_{direction}_{operation}.cc\"\n\n\ndef get_test_case_filename(test_case):\n \"Get filename for test case.\"\n return FILENAME_TPL.format(test_type=test_case.test_type,\n direction=test_case.direction,\n operation=test_case.operation)\n\n\ndef test_cases():\n \"Test case generator giving all possible test cases.\"\n for test_type, direction, operation in itertools.product(TEST_TYPES, DIRECTIONS, OPERATIONS):\n yield TestCaseParams(test_type=test_type, direction=direction, operation=operation)\n\n\ndef generate_batchnorm_tests():\n np.set_printoptions(suppress=True, threshold=1000000, linewidth=1000000)\n test_dir = helpers.get_test_directory()\n os.chdir(test_dir)\n for test_case in test_cases():\n filename = get_test_case_filename(test_case)\n output = output_for_test_case(test_case)\n with open(filename, 'w') as f:\n f.write('\\n'.join(output))\n print(\"File '{}' written\".format(filename))\n\n\nif __name__ == \"__main__\":\n generate_batchnorm_tests()\n\n","sub_path":"test/gen/generate_batchnorm_tests.py","file_name":"generate_batchnorm_tests.py","file_ext":"py","file_size_in_byte":8665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"157932748","text":"\"\"\"Installers for programming language specific libraries.\n\"\"\"\n\nfrom fabric.api import *\nfrom fabric.contrib.files import *\n\ndef r_library_installer(config):\n \"\"\"Install R libraries using CRAN and Bioconductor.\n \"\"\"\n # Create an Rscript file with install details.\n out_file = \"install_packages.R\"\n if exists(out_file):\n run(\"rm -f %s\" % out_file)\n run(\"touch %s\" % out_file)\n repo_info = \"\"\"\n cran.repos <- getOption(\"repos\")\n cran.repos[\"CRAN\" ] <- \"%s\"\n options(repos=cran.repos)\n source(\"%s\")\n \"\"\" % (config[\"cranrepo\"], config[\"biocrepo\"])\n append(out_file, repo_info)\n install_fn = \"\"\"\n repo.installer <- function(repos, install.fn) {\n update.or.install <- function(pname) {\n if (pname %in% installed.packages())\n update.packages(lib.loc=c(pname), repos=repos, ask=FALSE)\n else\n install.fn(pname)\n }\n }\n \"\"\"\n append(out_file, install_fn)\n std_install = \"\"\"\n std.pkgs <- c(%s)\n std.installer = repo.installer(cran.repos, install.packages)\n lapply(std.pkgs, std.installer)\n \"\"\" % (\", \".join('\"%s\"' % p for p in config['cran']))\n append(out_file, std_install)\n if len(config.get(\"bioc\", [])) > 0:\n bioc_install = \"\"\"\n bioc.pkgs <- c(%s)\n bioc.installer = repo.installer(biocinstallRepos(), biocLite)\n lapply(bioc.pkgs, bioc.installer)\n \"\"\" % (\", \".join('\"%s\"' % p for p in config['bioc']))\n append(out_file, bioc_install)\n if config.get(\"update_packages\", True):\n final_update = \"\"\"\n update.packages(repos=biocinstallRepos(), ask=FALSE)\n update.packages(ask=FALSE)\n \"\"\"\n append(out_file, final_update)\n # run the script and then get rid of it\n env.safe_sudo(\"Rscript %s\" % out_file)\n run(\"rm -f %s\" % out_file)\n","sub_path":"cloudbio/libraries.py","file_name":"libraries.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"95938483","text":"import freedan\n\n\ndef account_hierarchy(credentials_path):\n \"\"\"\n This script will loop over your accounts and print out the names of all accounts.\n By default MCC accounts will be skipped. You can change this by changing the 'skip_mcc' parameter\n :param credentials_path: str, path to your adwords credentials file\n \"\"\"\n\n # init connection to adwords API\n adwords_service = freedan.AdWordsService(credentials_path)\n\n # access your accounts\n for account in adwords_service.accounts():\n print(account)\n\n\nif __name__ == \"__main__\":\n adwords_credentials_path = \"adwords_credentials.yaml\"\n account_hierarchy(adwords_credentials_path)\n","sub_path":"examples/basic/account_hierarchy.py","file_name":"account_hierarchy.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"634151893","text":"from flask import Flask, Blueprint, jsonify\nfrom flask import Flask, render_template, flash, redirect, url_for, g, session, logging, request\nfrom passlib.hash import sha256_crypt\nfrom functools import wraps\nfrom sqlalchemy.orm import sessionmaker\nfrom jinja2 import Environment, PackageLoader, select_autoescape\nfrom .form import RegisterForm, TransactionForm, EditTransForm, EmailForm, PasswordForm\nfrom it680vizapp import mysql, mail, serialize\nfrom flask_mail import Message\n#from mysql import escape_string as thwart\nfrom flask import current_app\nfrom flask_breadcrumbs import register_breadcrumb, default_breadcrumb_root\nimport pandas as pd\nimport json\nimport datetime as dt\nfrom flask_paginate import Pagination, get_page_parameter\n\n\n#from wapy.api import Wapy\n#from walmart_api_client import WalmartApiClient\nimport pandas.io.sql as psql\nfrom it680vizapp.group.routes import get_users_id\n\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\n\n#Site blue print\nmod = Blueprint('site', __name__, template_folder='templates', static_url_path='/site/static', static_folder='./static')\ndefault_breadcrumb_root(mod, '.')\n\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n \tflash('Not logged in, Please login first.','danger')\n \treturn redirect(url_for('site.login'))\n return wrap\n\n\n\n\n#Index route \n@mod.route('/')\n# @register_breadcrumb(mod, '.', 'Home')\ndef index():\n\treturn render_template('site/index.html')\n\n#Site homepage route\n@mod.route('/home')\n@register_breadcrumb(mod, '.', 'Home', order=0)\ndef home():\n\treturn render_template('site/index.html')\n\n\n#Site about page route\n@mod.route('/about')\n@register_breadcrumb(mod, '.about', 'About', order=5)\ndef about():\n\treturn render_template('site/about.html')\n\n#Site contact page route\n@mod.route('/contact')\n# @register_breadcrumb(mod, '.', 'Contact')\ndef contact():\n\treturn render_template('site/contact.html')\n\n@mod.route('/viz')\ndef viz():\n\treturn render_template('site/viz.html')\n\n\n@mod.route('/getdata')\n@login_required\ndef getdata():\n\tconn = mysql.connection\n\tcur = mysql.connection.cursor()\n\tgroup_id = session['group_id']\n\t\n\tquery_trans = ''' SELECT sum(ut.share_amount) as amount, t.item\n\t\t\t\t\tFROM user_transaction ut\n\t\t\t\t\tjoin transaction t on t.transaction_id = ut.transaction_id\n\t\t\t\t\twhere t.group_id = %s\n\t\t\t\t\tgroup by t.item\n\t\t\t\t\torder by t.item '''\n\n\tcur.execute(query_trans, ([group_id]))\n\tresult = cur.fetchall()\n\tcur.close()\n\treturn jsonify(result)\n\n@mod.route('/get_user_chart_data')\n@login_required\ndef get_user_chart_data():\n\tconn = mysql.connection\n\tcur = mysql.connection.cursor()\n\tgroup_id = session['group_id']\n\t\n\tquery_trans = ''' SELECT u.fname as name, u.user_id, sum(ut.share_amount) as amount\n\t\t\t\t\tFROM user u\n\t\t\t\t\tjoin user_transaction ut on ut.user_id = u.user_id\n\t\t\t\t\tjoin transaction t on t.transaction_id = ut.transaction_id\n\t\t\t\t\twhere t.group_id = %s\n\t\t\t\t\tgroup by u.fname\n\t\t\t\t\torder by u.fname '''\n\tcur.execute(query_trans, ([group_id]))\n\tresult = cur.fetchall()\n\tcur.close()\n\treturn jsonify(result)\n\n@mod.route('/get_user_chart_data2', methods=['GET', 'POST'])\n@login_required\ndef get_user_chart_data2():\n\tif request.method == 'POST':\n\t\tuser_id = request.form['user']\n\t\tconn = mysql.connection\n\t\tcur = mysql.connection.cursor()\n\t\tgroup_id = session['group_id']\n\t\t\n\t\tquery_trans = ''' SELECT sum(ut.share_amount) as amount, t.item\n\t\t\t\t\tFROM user_transaction ut\n\t\t\t\t\tjoin transaction t on t.transaction_id = ut.transaction_id\n\t\t\t\t\twhere t.group_id = %s and ut.user_id = %s\n\t\t\t\t\tgroup by t.item\n\t\t\t\t\torder by t.item '''\n\t\tcur.execute(query_trans, ([group_id, user_id]))\n\t\tresult = cur.fetchall()\n\t\tcurrent_app.logger.info(result)\n\t\tcur.close()\n\t\treturn jsonify(result)\n\n\n@mod.route('/lineChart', methods=['GET', 'POST'])\n@login_required\ndef lineChart():\n\tif request.method == 'POST':\n\t\tuser_id = request.form['user']\n\t\tconn = mysql.connection\n\t\tcur = mysql.connection.cursor()\n\t\tgroup_id = session['group_id']\n\t\t\n\t\tquery_trans = ''' SELECT u.fname as name, t.manual_date as date, ut.share_amount as amount\n\t\t\t\t\t\tFROM user u\n\t\t\t\t\t\tjoin user_transaction ut on ut.user_id = u.user_id\n\t\t\t\t\t\tjoin transaction t on t.transaction_id = ut.transaction_id\n\t\t\t\t\t\twhere t.group_id = %s and ut.user_id = %s\n\t\t\t\t\t\torder by t.manual_date\n\t\t\t\t\t\t'''\n\n\t\tcur.execute(query_trans, ([group_id, user_id]))\n\t\tresult = cur.fetchall()\n\n\t\tformat_data = []\n\t\tfor item in result:\n\t\t\tname = item['name']\n\t\t\tdate = item['date'].strftime('%Y-%m-%d %H:%M')\n\t\t\tamount = item['amount']\n\t\t\tformat_data.append({'name':name, 'date': date, 'amount':amount})\n\t\tcurrent_app.logger.info(format_data)\n\t\tcur.close()\n\t\treturn jsonify(format_data)\n\n############### BELOW ARE CODE FOR USER AUTH, EXPENSE MANAGEMENT ########################\n\n#Wrap session for logged in access to pages\n\n\n#wrapper for setting role based access for site\n# def roles_required(func):\n# @wraps(func)\n# def wrapper(*args, **kwargs):\n# user = session['username']\n# cur = mysql.connection.cursor()\n# #Get username\n# role_query_statement = ''' select u.name, u.username, g.group_name, g.group_id, u.password\n# \t\t\t\t\t\t from user u join user_group g on g.user_id = u.user_id \n# \t\t\t\t\t\t having u.username = %s '''\n\n# result = cur.execute(role_query_statement, [user])\n \n# user_data = cur.fetchone()\n# user_auth = user_data['authority']\n# if user_auth in ['Admin', 'admin', 'member']:\n# return func(*args, **kwargs)\n# else:\n# flash('You do not have access to content of this page, redirected to dashboard', 'danger')\n# return redirect(url_for('site.login'))\n# return wrapper\n\n\n\n\n@mod.route('/register', methods= ['GET', 'POST'])\n# @register_breadcrumb(mod, '.', 'Signup')\n@register_breadcrumb(mod, '.register', 'Signup')\ndef register():\n\tform = RegisterForm(request.form)\n\tif request.method == 'POST' and form.validate():\n\t\tfname = form.fname.data.strip()\n\t\tlname = form.lname.data.strip()\n\t\temail = form.email.data.strip().lower()\n\t\tusername = form.username.data.strip().lower()\n\t\tpassword = sha256_crypt.encrypt(str(form.password.data))\n\t\tenabled = False\n\n\t\tname = fname + ' ' + lname \n\n\t\t#create a cursor\n\t\tcur = mysql.connection.cursor()\n\n\t\t#query to check if user already exists\n\t\treg_query_username = ''' select username from user where username = %s '''\n\t\tcur.execute(reg_query_username, ([username]))\n\n\t\texisting_user = cur.fetchone()\n\t\tcurrent_app.logger.info(existing_user)\n\t\t#logic to check if user already exists, if not, the insert logics will execute\n\t\tif existing_user is None:\n\t\t\tregister_insert_query = ''' INSERT INTO user (name, fname, lname, email, username, password, enabled)\n\t\t\t\t\t\t\t\t\t\t VALUES(%s, %s, %s, %s, %s, %s, %s) '''\n\t\t\tcur.execute(register_insert_query, (name, fname, lname, email, username, password, enabled))\n\t\t\tmysql.connection.commit()\n\n\t\t\tenabled_username_query = ''' select username, enabled from user where username = %s '''\n\t\t\tcur.execute(enabled_username_query, ([username]))\n\n\t\t\tuser_data = cur.fetchone()\n\t\t\tenabled_fetch = user_data['enabled']\n\n\t\t\tif enabled_fetch == 0:\n\t\t\t\ttoken = serialize.dumps(email, salt='My-Token')\n\t\t\t\tcurrent_app.logger.info(token)\n\n\t\t\t\t#prepare email msg\n\t\t\t\temail_msg = Message('Confirm Email', sender='gupta.niraz@gmail.com', recipients=[email])\n\t\t\t\tlink = url_for('site.confirm_email', token=token, _external=True)\n\n\t\t\t\temail_msg.body = 'Your link is {}'.format(link)\n\t\t\t\tmail.send(email_msg)\n\n\t\t\t\tflash('Resistration Successful. Please check your email for activation.', 'success')\n\t\t\t\treturn render_template('site/index.html', data=token)\n\t\t\t#redirect(url_for('site.index'))\n\n\t\t\t#close cursor\n\t\t\tcur.close()\n\t\telse:\n\t\t\tflash('User already exits!', 'danger')\n\t\n\t# mysql.connection.close()\n\treturn render_template('site/register.html', form=form)\n\n\n@mod.route('/confirm_email/')\ndef confirm_email(token):\n\ttry:\n\t\temail = serialize.loads(token, salt = 'My-Token', max_age=86400)\n\texcept SignatureExpired:\n\t\treturn 'The token has expired! '\n\t# return 'The token works! '\n\tcur = mysql.connection.cursor()\n\tfirst_query = ''' SELECT username, enabled from user where email = %s '''\n\tcur.execute(first_query, ([email]))\n\tuser_data = cur.fetchone()\n\tuser_status = user_data['enabled']\n\t\n\tuser_name = user_data['username']\n\tcurrent_app.logger.info(user_status)\n\tcurrent_app.logger.info(user_name)\n\n\tif user_status == True:\n\t\tflash('Already activated', 'success')\n\t\tsession['logged_in'] = True\n\t\tsession['username'] = user_name\n\t\treturn redirect(url_for('site.dashboard'))\n\telse:\n\n\t\tquery = '''UPDATE user SET enabled = %s WHERE email = %s'''\n\t\tcur.execute(query, (True, [email]))\n\t\tmysql.connection.commit()\n\n\t\tflash('Account activated', 'success')\n\t\tsession['logged_in'] = True\n\t\tsession['username'] = user_name\n\treturn redirect(url_for('site.dashboard'))\n\n@mod.route('/reset', methods= ['GET', 'POST'])\ndef password_reset():\n\tform = EmailForm(request.form)\n\tif form.validate():\n\t\temail = form.email.data.strip()\n\n\t\tcur = mysql.connection.cursor()\n\t\tquery = '''SELECT * from user WHERE email=%s'''\n\t\tcur.execute(query, [email])\n\t\tuser = cur.fetchone()\n\n\t\ttoken = serialize.dumps(email, salt='recover_password_key_token')\n\t\tcurrent_app.logger.info(token)\n\n\t\t#prepare email msg\n\t\temail_msg = Message('Password reset requested', sender='gupta.niraz@gmail.com', recipients=[email])\n\t\tlink = url_for('site.password_reset_token', token=token, _external=True)\n\n\t\temail_msg.body = 'Please click on the link to change your password {}'.format(link)\n\t\tmail.send(email_msg)\n\n\t\tmsg = 'A link for password change request has been sent to your Inbox.'\n\t\treturn render_template('site/index.html', msg=msg)\n\n\treturn render_template('site/password_reset.html', form=form)\n\n\n@mod.route('/reset/', methods=[\"GET\", \"POST\"])\ndef password_reset_token(token):\n\ttry:\n\t\temail = serialize.loads(token, salt = 'recover_password_key_token', max_age=86400)\n\texcept SignatureExpired:\n\t\treturn 'The token has expired! '\n\n\tform = PasswordForm(request.form)\n\n\tif form.validate():\n\t\tcur = mysql.connection.cursor()\n\t\tfetch_query = '''SELECT username, enabled from user where email = %s'''\n\t\tcur.execute(fetch_query, ([email]))\n\t\tuser_data = cur.fetchone()\n\n\t\tuser_status = user_data['enabled']\n\t\tuser_name = user_data['username']\n\n\t\tnew_password = sha256_crypt.encrypt(str(form.password.data))\n\t\tcurrent_app.logger.info(new_password)\n\t\tpass_query = '''UPDATE user SET password = %s WHERE email = %s'''\n\t\tcur.execute(pass_query, (new_password, [email]))\n\n\t\tmysql.connection.commit()\n\t\tflash('Password change successful. You can login now.', 'success')\n\t\t# msg = 'Password change successful. You can login now.'\n\t\treturn redirect(url_for('site.login'))\n\treturn render_template('site/pass_reset_token.html', form=form, token=token)\n\n\n\n#User Login process and conditional routes to user login page and dashboard\n@mod.route('/login', methods = ['GET','POST'])\n# @register_breadcrumb(mod, '.', 'Login')\n@register_breadcrumb(mod, '.login', 'Login')\ndef login():\n\tif 'logged_in' in session:\n\t\treturn redirect(url_for('site.dashboard'))\n\n\telif request.method == 'POST':\n\t\t#Get data from login form\n\t\t#name = request.form['name']\n\t\tusername = request.form['username']\n\t\tform_pass = request.form['password']\n\n\t\t#login cursor\n\t\tcur = mysql.connection.cursor()\n\n\t\t#Get username\n\t\tadmin_login = '''select name, username, password, super_user, enabled from user where username = %s'''\n\t\t# group_query = ''' select u.enabled, u.name, u.username, u.password, a.authority\n\t\t# \t\t\t\t\t\t from user u join authorities a on a.user_id = u.user_id\n\t\t# \t\t\t\t\t\t having u.username = %s '''\n\t\tuser_result = cur.execute(admin_login, ([username]))\n\t\tuser_data = cur.fetchone()\n\n\t\tif user_result > 0:\n\t\t\t#get stored hash\n\t\t\tusers_name = user_data['name']\n\t\t\tuser_name = user_data['username']\n\t\t\tuser_pass = user_data['password']\n\t\t\tsuper_user = user_data['super_user']\n\t\t\tenabled = user_data['enabled']\n \n\t\t\t#Compare pass\n\t\t\tif user_name:\n\t\t\t\tif sha256_crypt.verify(form_pass, user_pass) == False:\n\t\t\t\t\tflash('password do not match!', 'danger')\n\t\t\t\t\treturn render_template('site/login.html')\n\t\t\t\telif enabled == False:\n\t\t\t\t\tflash('Your account is inactive!', 'danger')\n\t\t\t\t\treturn render_template('site/login.html')\n\n\t\t\t\telse:\n\t\t\t\t\t# if username = user_name\n\t\t\t\t\tsession['logged_in'] = True\n\t\t\t\t\tsession['username'] = username\n\t\t\t\t\t#session['authority'] = member\n\t\t\t\t\tsession['name'] = users_name\n\t\t\t\t\tsession['super_user'] = super_user\n\t\t\t\t\t#session['name'] = users_fullname\n\n\t\t\t\t\tif session['super_user'] is True:\n\t\t\t\t\t\tflash('Welcome' + ' ' + users_name, 'success')\n\t\t\t\t\t\treturn redirect(url_for('cms.dashboard'))\n\t\t\t\t\telse:\n\t\t\t\t\t\tflash('Welcome ' + ' ' + users_name, 'success')\n\t\t\t\t\t\treturn redirect(url_for('site.dashboard'))\n\n\t\t\t\t\tmysql.connection.commit()\n\t\t\telse:\n\t\t\t\terror = '.'\n\t\t\t\treturn render_template('site/login.html', error=error)\t\t\t\t\n\n\t\telse:\n\t\t\terror = 'User not found.'\n\t\t\treturn render_template('site/login.html', error=error)\n\n\t\tcur.close()\n\t\tmysql.connection.close()\n\treturn render_template('site/login.html')\n\n\n#Route to dashboard\n@mod.route('/dashboard')\n@login_required\n@register_breadcrumb(mod, '.dashboard', 'Dashboard', order=4)\ndef dashboard():\n\tcur = mysql.connection.cursor()\n\tusername = session['username']\n\tquery = \"\"\"\n\t\t\tselect user_id from user where username=%s\n\t\t\t\"\"\"\n\tcur.execute(query,([username]))\n\t_usr_id = cur.fetchone()\n\tusr_id = _usr_id['user_id']\n\ttotal_amt_data = get_total_bal(usr_id)\n\ttotal_lent_data = get_lent_bal(usr_id)\n\ttotal_expense_data = get_expense_bal(usr_id)\n\tcurrent_app.logger.info(total_expense_data)\n\tdata = {\"total\":total_amt_data, \"lent\":total_lent_data, \"expense\":total_expense_data}\n\t\n\treturn render_template('site/dashboard.html', result=data)\n\ndef get_total_bal(user_id):\n\tcur = mysql.connection.cursor()\n\tquery = \"\"\"\n\t\t\tselect sum(amount) as total_bal \n\t\t\tfrom transaction \n\t\t\twhere user_id=%s and status='unpaid'\n\t\t\t\"\"\"\n\tcur.execute(query, ([user_id]))\n\tdata=cur.fetchone()\n\treturn data\n\ndef get_lent_bal(user_id):\n\tcur = mysql.connection.cursor()\n\tquery = \"\"\"\n\t\t\tselect sum(you_lent) as lent_bal \n\t\t\tfrom transaction \n\t\t\twhere user_id=%s and status='unpaid'\n\t\t\t\"\"\"\n\tcur.execute(query, ([user_id]))\n\tdata=cur.fetchone()\n\treturn data\n\ndef get_expense_bal(user_id):\n\tcur = mysql.connection.cursor()\n\tquery = \"\"\"\n\t\t\tselect sum(share_amount) as shr_amount \n\t\t\tfrom user_transaction ut join transaction t \n\t\t\ton t.transaction_id = ut.transaction_id\n\t\t\twhere ut.user_id=%s and status='unpaid'\n\t\t\t\"\"\"\n\tcur.execute(query, ([user_id]))\n\tdata=cur.fetchone()\n\tcurrent_app.logger.info(data)\n\treturn data\n\n#Transaction prcess and routes to new transaction page\n@mod.route('/trans_form', methods= ['GET', 'POST'])\n@login_required\n#@roles_required\n# @register_breadcrumb(mod, '.trans_form', 'NewEntry', order=4)\ndef TransactionEntry():\n\t#Get user from db\n\tcur = mysql.connection.cursor()\n\n\tgroup_id = session['group_id']\n\tcurrent_app.logger.info(group_id)\n\tsql_user_auth_query = ''' select u.user_id, u.name, u.username, tg.group_name, ug.group_id\n\t\t\t\t\t\t\t\tfrom user u join user_group ug on ug.user_id = u.user_id \n\t\t\t\t\t\t\t\tjoin tbl_group tg on ug.group_id = tg.group_id\n\t\t\t\t\t\t\t\thaving ug.group_id = %s; '''\n\tcur.execute(sql_user_auth_query, ([group_id]))\n\tresult = cur.fetchall() \n\t\n\n\tform = TransactionForm(request.form)\n\tif request.method == 'POST' and form.validate():\n\t\tcomment = form.description.data\n\t\tmanual_date = form.manual_date.data\n\t\titem = form.item.data\n\t\t#payer = form.payer.data\n\t\t#payer = request.form['payer']\n\t\tamount = form.amount.data\n\t\t#status = form.status.data\n\t\tstatus = request.form['status']\n\n\t\tuser = session['username']\n\t\t_usr_data = get_users_id([user])\n\t\tusr_id = _usr_data['user_id']\n\n\t\tselected_users = request.form.getlist('person')\n\t\tcurrent_app.logger.info(item) \n\t\t\n\t\t#create a cursor\n\t\tif len(selected_users) == 0:\n\t\t\tflash('Please select atleast one member!', 'danger')\n\t\telse:\n\t\t\tcur = mysql.connection.cursor()\n\t\t\ttran_form_insert_query = ''' INSERT INTO transaction(group_id, user_id, comment, item, amount, status, manual_date)\n\t\t\t\t\t\t\t\t\t\t VALUES(%s, %s, %s, %s, %s, %s, %s) '''\n\t\t\tcur.execute(tran_form_insert_query, (group_id, usr_id, comment, item, amount, status, manual_date))\n\t\t\tmysql.connection.commit()\n\n\t\t\n\t\t\t#converted the user_id to integer by counting\n\t\t\tlist_count = []\n\t\t\tfor users in selected_users:\n\t\t\t\tlist_count.append(selected_users.count(users))\n\t\t\tcurrent_app.logger.info(list_count)\n\n\t\t\t#summed the counted users in the list\n\t\t\tsum_users = 0\n\t\t\tfor item in list_count:\n\t\t\t\tsum_users += item\n\t\t\tcurrent_app.logger.info(sum_users)\n\t\t\t#person_name = request.form[]\n\n\t\t\t#Now divided the total amount by the sum of counted users\n\t\t\t_share = amount/sum_users\n\t\t\teach_share = round(_share, 2)\n\t\t\tcurrent_app.logger.info(each_share)\n\n\t\t\t#Repeated the share amount into a list equivalent to number of users e.g. 10 is now [10,10,10,10]\n\t\t\trep_list = [each_share] * sum_users\n\t\t\tcurrent_app.logger.info(rep_list) \n\t\t\t\n\t\t\t#The amount lent by the payer\n\t\t\t_lent = _share - amount\n\t\t\tcurrent_app.logger.info(_lent)\n\t\t\t\n\t\t\t#fetch max transaction id from transaction table\n\t\t\ttran_form_max_trans_id = ''' select MAX(transaction_id) from transaction '''\n\t\t\tcur.execute( tran_form_max_trans_id )\n\t\t\ttran_id = cur.fetchone()\n\n\t\t\t#Convert tran_id from list to usable ID\n\t\t\ttran_id_val = 0\n\t\t\tfor key, val in tran_id.items():\n\t\t\t\ttran_id_val += val\n\t\t\tcurrent_app.logger.info(tran_id_val)\n\n\t\t\t#converted the transaction id into a list and repeated by number of person\n\t\t\ttran_id_list = [tran_id_val] * sum_users\n\t\t\tcurrent_app.logger.info(tran_id_list)\n\n\n\t\t\t#Everything is working till here.\n\t\t\t#zipped the three lists altogether\n\t\t\tzipAll = zip(selected_users, tran_id_list, rep_list)\n\t\t\tcurrent_app.logger.info(zipAll)\n\n\t\t\tfor x, y, z in zipAll:\n\t\t\t format_str = \"\"\"INSERT INTO user_transaction (user_id, transaction_id, share_amount)\n\t\t\t VALUES ({user_id}, '{transaction_id}', '{share_amount}'); \"\"\"\n\n\t\t\t sql_command = format_str.format(user_id=x, transaction_id=y, share_amount=z)\n\t\t\t current_app.logger.info(sql_command)\n\t\t\t cur.execute(sql_command)\n\t\t\t\n\t\t\t#Update the amt_lent column in user_transaction by matching the transaction id and user_id\n\t\t\ttran_update_amtlent_query = ''' UPDATE transaction SET you_lent=%s WHERE transaction_id = %s and user_id = %s '''\n\t\t\tcur.execute(tran_update_amtlent_query, (_lent, tran_id_val, usr_id))\n\n\t\t\t#Update the per_share in user_transaction by matching the transaction id and user_id\n\t\t\ttran_update_pershare_query = ''' UPDATE transaction SET your_share=%s WHERE transaction_id = %s and user_id = %s '''\n\t\t\tcur.execute(tran_update_pershare_query, (_share, tran_id_val, usr_id))\n\n\t\t\tmysql.connection.commit()\n\t\t\t#*********************\n\n\t\t\tflash('Record inserted!', 'success')\n\t\t\t#redirect(url_for('site.TransactionEntry', grp=group_id))\n\t\t\trender_template('site/transaction_form.html', form=form, grp=group_id)\n\n\t\t\t#close cursor\n\t\t\tcur.close()\n\t\t#mysql.connection.close()\n\treturn render_template('site/transaction_form.html', form=form, data=result)\n\n\n\n#Transactions view process and route to transaction view page\n@mod.route('/user_trans_view', methods= ['GET', 'POST'])\n@register_breadcrumb(mod, '.trans_view.user_trans_view', 'TransDetail')\n@login_required\ndef user_trans_view():\n\tuser = session['username']\n\tcur = mysql.connection.cursor()\n\tstatus = ''\n\tif request.method == 'POST':\n\t\tstat = request.form['status']\n\t\tstatus += stat\n\n\tquery_trans = ''' SELECT ut.id, t.entry_date, count(ut.user_id) as user_count, sum(ut.share_amount) as shr_amount, \n\t\t\t\t\t\tt.item, t.status, t.payer\n\t\t\t\t\tFROM user_transaction ut\n\t\t\t\t\tjoin transaction t on t.id = ut.id\n\t\t\t\t\tgroup by ut.id \n\t\t\t\t\thaving t.status LIKE %s \n\t\t\t\t\torder by ut.id '''\n\tresult = cur.execute(query_trans, ([status]))\n\tresult_2 = cur.fetchall()\n\n\n\tquery_lent_share_calc = ''' SELECT distinct ut.user_id, sum(ut.per_share) as per_share,\n\t\t\t\t\tsum(ut.amt_lent) as amt_lent, sum(ut.share_amount) as share_amt, u.name\n\t\t\t\t\tfrom user_transaction as ut\n\t\t\t\t\tinner join \n\t\t\t\t\t(\n\t\t\t\t\t\tselect status, id\n\t\t\t\t\t\tfrom transaction\n\t\t\t\t\t\tWHERE status = %s\n\t\t\t\t\t) as b\n\t\t\t\t\ton ut.id=b.id\n\t\t\t\t\tinner join \n\t\t\t\t\t(\n\t\t\t\t\t\tselect user_id, name\n\t\t\t\t\t\tfrom user\n\t\t\t\t\t) as u\n\t\t\t\t\ton ut.user_id=u.user_id\n\t\t\t\t\tGROUP by ut.user_id \n\t\t\t\t\torder by u.name '''\n\tcur.execute(query_lent_share_calc, ([status]))\n\tresult_4 = cur.fetchall()\n\n\tquery_trans_new = \"\"\" SELECT SUM(amount) as amount, payer FROM transaction \n\t\t\t\t\t\tWHERE status LIKE %s\n\t\t\t\t\t\tGROUP BY payer \"\"\"\n\tcur.execute(query_trans_new, ([status]))\n\tresult_3 = cur.fetchall()\n\n\n\ttran_view_user_role = ''' SELECT username, authority \n\t\t\t\t\t\t\t\tfrom user u join authorities a on u.user_id = a.user_id\n\t\t\t\t\t\t\t\thaving username = %s '''\n\tuser_role = cur.execute(tran_view_user_role, ([user]))\n\tuser_role_data = cur.fetchone()\n\tuser_auth = user_role_data['authority']\n\n\tif result > 0 and user_auth in ['member', 'admin', 'Admin']:\n\t\treturn render_template('site/user_trans_view.html', data_2=result_2, data_3=result_3, data_4=result_4)\n\telse:\n\t\tmsg = 'No data found or you do not have enough privilege.'\n\t\treturn render_template('site/user_trans_view.html', msg=msg)\n\t\n\t#commit\n\tmysql.connection.commit()\n\t#close conn\n\tcur.close()\n\tmysql.connection.close()\n\n\n\n#Update status of transaction\n@mod.route('/update_status', methods=['GET', 'POST'])\n@login_required\ndef update_status():\n\tif request.method == 'POST':\n\t\tget_status = request.form['status']\n\t\ttransaction_id = request.form['transaction_id']\n\t\tconn = mysql.connection\n\t\tcur = mysql.connection.cursor()\n\t\tgroup_id = session['group_id']\n\t\t\n\t\t# query_trans = ''' SELECT sum(ut.share_amount) as amount, t.item\n\t\t# \t\t\tFROM user_transaction ut\n\t\t# \t\t\tjoin transaction t on t.transaction_id = ut.transaction_id\n\t\t# \t\t\twhere t.group_id = %s and ut.user_id = %s\n\t\t# \t\t\tgroup by t.item\n\t\t# \t\t\torder by t.item '''\n\t\t# cur.execute(query_trans, ([group_id, user_id]))\n\t\t# result = cur.fetchall()\n\t\tcurrent_app.logger.info(get_status)\n\t\tcurrent_app.logger.info(transaction_id)\n\t\t# cur.close()\n\t\treturn 'Success'\n\n\n\n\n#Change role of user -- not working yet\n@mod.route('/edit_transaction/', methods=['GET', 'POST'])\n@login_required\n#@roles_required\ndef edit_transaction(transaction_id):\n\tform = TransactionForm(request.form)\n\tgroup_id = session['group_id']\n\n\tcur = mysql.connection.cursor()\n\ttran_result = cur.execute(\"SELECT * FROM transaction where transaction_id = %s\", ([transaction_id]))\n\ttran_data = cur.fetchone()\n\t\n\tform.description.data = tran_data['comment']\n\tform.manual_date.data = tran_data['manual_date']\n\tform.item.data = tran_data['item']\n\tform.status.data = tran_data['status']\n\t\n\n\tif request.method == 'POST' and form.validate():\n\t\tstatus = request.form['status']\n\t\tcomment = request.form['description']\n\t\tnew_date = request.form['manual_date']\n\t\tnew_item = request.form['item']\n\n\t\tcurrent_app.logger.info(new_date)\n\t\tcurrent_app.logger.info(new_item)\n\n\n\t\tcur = mysql.connection.cursor()\n\n\t\tstatus_update_query = ''' UPDATE transaction SET status=%s, comment=%s, item=%s, manual_date=%s WHERE transaction_id = %s '''\n\t\tcur.execute(status_update_query, (status, comment , new_item, new_date, transaction_id))\n\n\n\t\tmysql.connection.commit()\n\t\tcur.close()\n\t\tflash('Update successful', 'success')\n\t\treturn redirect(url_for('group.transactions', group_id=group_id))\n\treturn render_template('site/edit_transaction.html', form=form)\n\n\n#Delete transaction\n@mod.route('/delete_transaction/', methods=['GET', 'POST'])\n@login_required\n#@roles_required\ndef delete_transaction(transaction_id):\n\n\tusername = session['username']\n\t_usr_data = get_users_id([username])\n\tusr_id = _usr_data['user_id']\n\n\tgroup_id = session['group_id']\n\n\tcurrent_app.logger.info(group_id)\n\n\tcur = mysql.connection.cursor()\n\tquery = \"\"\" select user_id, group_id, group_admin\n\t\t\t\tfrom user_group \n\t\t\t\twhere user_id=%s and group_id=%s \"\"\"\n\tcur.execute(query, ([usr_id, group_id]))\n\tresult = cur.fetchone()\n\tcurrent_app.logger.info(result['group_admin'])\n\n\tif result['group_admin'] == 1:\n\t\t\n\t\t# Create a trigger to keep record of deleted transactions\n\n\t\tcur.execute(\"DELETE FROM user_transaction WHERE transaction_id=%s\", [transaction_id])\n\t\tcur.execute(\"DELETE FROM transaction WHERE transaction_id=%s\", [transaction_id])\n\t\tmysql.connection.commit()\n\n\t\tcur.execute(\"SELECT * FROM transaction where transaction_id = %s\", [id])\n\t\tdeleted_id = cur.fetchone()\n\t\tif deleted_id is None:\n\t\t\tflash(\"Transaction is deleted\", 'success')\n\t\t\treturn redirect(url_for('group.transactions', group_id=group_id))\n\t\telse:\n\t\t\tflash(\"Query is not working\", 'danger')\n\t\t\treturn redirect(url_for('group.transactions', group_id=group_id))\n\tflash('You cannot delete this bill', 'danger')\n\treturn redirect(url_for('group.transactions', group_id=group_id))\n\n\n#Logout process by clearing session\n@mod.route('/logout')\ndef logout():\n\t# for key in session.keys():\n\t# \tsession.pop(key)\n\tsession.clear()\n\n\tflash('You are logged out', 'success')\n\treturn redirect(url_for('site.login'))\n\n","sub_path":"it680vizapp/site/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":25104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"271464722","text":"\"\"\"\nhttps://www.codingame.com/ide/puzzle/the-river-i-\n\"\"\"\n\ndef next_num(r):\n res = r\n while r:\n res += r%10\n r //=10\n return res\n\nr1 = int(input())\nr2 = int(input())\n\nwhile r1 != r2:\n if r1 < r2:\n r1 = next_num(r1)\n else:\n r2 = next_num(r2)\n\nprint(r1)\n\n\n\"\"\"\n>>>>>>>>>>>>>>>>>>>> C++\n\n\n#include \n#include \n#include \n#include \n\nusing namespace std;\n\n/**\n * Auto-generated code below aims at helping you parse\n * the standard input according to the problem statement.\n **/\n\nint next_num(int r){\n int res = r;\n while (r>0){\n res += r%10;\n r /=10;\n }\n return res;\n}\n\n\nint main()\n{\n long long r1;\n cin >> r1; cin.ignore();\n long long r2;\n cin >> r2; cin.ignore();\n\n while (r1 != r2){\n if (r1 < r2) r1 = next_num(r1);\n else r2 = next_num(r2);\n }\n\n cout << r1 << endl;\n}\n\n\n>>>>>>>>>>>>>>>>>>>>>>>> C\n\n#include \n#include \n#include \n#include \nint next_num(int r){\n int res = r;\n while (r>0){\n res += r%10;\n r /=10;\n }\n return res;\n}\n\nint main()\n{\n long long r1;\n scanf(\"%lld\", &r1);\n long long r2;\n scanf(\"%lld\", &r2);\n\n while (r1 != r2){\n if (r1 < r2) r1 = next_num(r1);\n else r2 = next_num(r2);\n }\n\n printf(\"%d\\n\", r1);\n\n return 0;\n}\n\n\n>>>>>>>>>>>>>>>>>>>>>> JavaScript\n\n\nvar r1 = parseInt(readline());\nvar r2 = parseInt(readline());\n\nfunction next_num(r){\n var res = r;\n while (r){\n res += r%10;\n r =parseInt(r / 10);\n }\n return res;\n}\n\nwhile (r1 != r2) {\n if (r1 < r2) {r1 = next_num(r1)}\n else {r2 = next_num(r2)}\n}\n// Write an answer using console.log()\n// To debug: console.error('Debug messages...');\n\nconsole.log(r1);\n\n\"\"\"","sub_path":"codingame/puzzle/easy/the_river_i.py","file_name":"the_river_i.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"449999733","text":"# Given a string S, find and return the first instance of a unique char.\n# If no unique chars, return '_'.\n# Guarantees: 1 ≤ s.length ≤ 10^5\n# Output: The first unique char in the string\ndef firstNotRepeatingCharacter(s):\n char_count = [0] * 26 # array sized for A-Z letters\n # ix = ascii value of a char\n # value = num of occurrences\n uniques = [] # ordered list of unique chars found\n\n for char in s:\n char_ascii = ord(char) - 97 # convert from letters to 0-25 subscripts\n\n # If exists, increment counter and remove from uniques\n if char_count[char_ascii]:\n char_count[char_ascii] += 1\n if char in uniques:\n uniques.remove(char)\n # If doesn't exist, set counter to 1 and add to unique list\n else:\n char_count[char_ascii] = 1\n uniques.append(char)\n\n if uniques:\n return uniques[0]\n \n return '_'\n\n\ntests = [\"abacabad\",\n \"abacabaabacaba\",\n \"z\",\n \"bcb\",\n \"bcccccccb\",\n \"abcdefghijklmnopqrstuvwxyziflskecznslkjfabe\",\n \"zzz\",\n \"bcccccccccccccyb\",\n \"xdnxxlvupzuwgigeqjggosgljuhliybkjpibyatofcjbfxwtalc\",\n \"ngrhhqbhnsipkcoqjyviikvxbxyphsnjpdxkhtadltsuxbfbrkof\"]\nsolutions = ['c', '_', 'z', 'c', '_', 'd', '_', 'y', 'd', 'g']\n\nfor ix in range(len(tests)):\n s = tests[ix]\n expected = solutions[ix]\n actual = firstNotRepeatingCharacter(s)\n\n print('s:\\t' + tests[ix] + '\\'')\n print('expected:\\t', expected)\n print('actual:\\t\\t', actual)\n\n if (expected == actual):\n print('Test case passed! :)')\n else:\n print('Test case failed! :(')\n\n print()\n\n","sub_path":"python/first_not_repeating_char.py3","file_name":"first_not_repeating_char.py3","file_ext":"py3","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"469153440","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n \nimport pandas as pd\nimport numpy as np\nimport re\nimport time\nimport string as str\n\nimport Helper\nimport YoutubeCommentExtractor\n# import nltkModules\nimport dill \nimport importlib\nimportlib.reload(YoutubeCommentExtractor)\nimportlib.reload(Helper)\n# importlib.reload(nltkModules)\n\n\n\npd.set_option('display.max_columns', 100)\npd.set_option('display.max_colwidth', -1)\nimport seaborn as sns\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\n\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nimport nltk\n\nnltk.download('wordnet')\nnltk.download('punkt')\nnltk.download('stopwords')\n\n\nfrom nltk.corpus import stopwords\nimport spacy\nfrom spacy import displacy\nfrom spacy.displacy.render import EntityRenderer\nfrom IPython.core.display import display, HTML,Markdown\n\n\nimport streamlit as st\nfrom streamlit import components\n# import spacy_streamlit\n\nmodels = [\"en_core_web_sm\", \"en_core_web_md\"]\n\n# import en_core_web_sm\n# nlp=en_core_web_sm.load()\nnlp = spacy.load(models[0])\n\nst.beta_set_page_config(layout=\"wide\")\n\n\n# Title\nst.markdown(\"YouTube Comment Analyzer \", unsafe_allow_html=True)\n\ninputURL, inputNumber,submitButton = st.beta_columns([1,.5,.5])\n\nwith st.beta_container():\n#Get input\n youTubeURL=inputURL.text_input(label='Enter YouTube music Video URL or use default',value='https://www.youtube.com/watch?v=aJOTlE1K90k')\n noOfComments=inputNumber.number_input(label='Enter no. of comments to read or use default',value=500)\n\n#Expandable sidebar\nexp=st.sidebar.beta_expander(\"About the App\")\nexp.write('This app provides text analytics on YouTube video comments for the given video URL. The top level comments get scraped from YouTube and then classified by their sentiments and then into Spam and non-Spam (Ham) categories. \\n\\n This app also provides a list of top key phrases/topics in each of the Positive and Negative comments along with sample comments with those phrases. These phrase act as a good representation of comments\\' content without having to read them one by one. ')\n\nst.sidebar.markdown('[Github Repository](https://github.com/Preeti24/Youtube-comments)')\n\n#Load the model\nmodel = dill.load(open('fittedWinnerModel', \"rb\"))\n\n#Function to scrap reviews for the given URL\n@st.cache(suppress_st_warning=True,allow_output_mutation=True)\ndef readReviews(youTubeURL,noOfComments):\n return YoutubeCommentExtractor.read_required_no_of_comments(youTubeURL,noOfComments)\n\n# Function for Sentiment Analysis\nanalyzer=SentimentIntensityAnalyzer()\n\n@st.cache(suppress_st_warning=True)\ndef sentimentAnalysis(text):\n return analyzer.polarity_scores(text)['compound']\n\n#Noun phrase Chuncking- related functions\noptions = {\n 'colors': { '': '#FF8800'}\n}\ndef cleanhtml(raw_html):\n cleanr = re.compile('<.*?>') \n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\ndef nounPhraseChunking(text):\n doc=nlp(text)\n npList=[]\n for np in doc.noun_chunks:\n #remove english stopwords, phrases less that 2 characters long, urls by capturing .com and https and html tags\n if (np.text).lower() not in stopwords.words('english') and len(np.text)>1\\\n and \".com\" not in np.text and \"https\" not in np.text and\\\n ' {} \"\"\"\n # Newlines seem to mess with the rendering\n html = html.replace(\"\\n\", \" \")\n return WRAPPER.format(html)\ndef displayTopNComments(df,topNP):\n if df.shape[0]<5:\n x=df.shape[0]\n else:\n x=5\n for i in range(x):\n visualize_noun_phrases(df['Comment'][i],topNP)\ndef sampleComments(data):\n data['NounChunks']=data['Comment'].apply(lambda x: nounPhraseChunking(x))\n data['NounPhrase']=data['NounChunks'].apply(lambda x:extractNPPhrases(x))\n # Create a dictionary of noun phrases\n npDict={}\n nounPhrasePos={}\n nounPhraseNeg={}\n nounPhraseUnc={}\n \n for idx,nounPhrase in enumerate(data['NounPhrase']):\n if len(nounPhrase)==0:\n continue\n\n for np in nounPhrase:\n np=np.lower()\n \n if np not in npDict:\n npDict[np]=1\n else:\n npDict[np]+=1\n\n if data.iloc[idx].Sentiment=='POS':\n if np in nounPhrasePos:\n nounPhrasePos[np]+=1\n else:\n nounPhrasePos[np]=1\n\n if data.iloc[idx].Sentiment=='NEG':\n if np in nounPhraseNeg:\n nounPhraseNeg[np]+=1\n else:\n nounPhraseNeg[np]=1\n\n if data.iloc[idx].Sentiment=='UNC':\n if np in nounPhraseUnc:\n nounPhraseUnc[np]+=1\n else:\n nounPhraseUnc[np]=1\n df=pd.DataFrame(data=[npDict,nounPhraseNeg,nounPhrasePos,nounPhraseUnc]).transpose()\n df.rename(columns={0:'All',1:'Neg',2:'Pos',3:'Unc'},inplace=True)\n df.fillna(0,inplace=True)\n\n df['PosPercentage']=df.eval('Pos/All')\n df['NegPercentage']=df.eval('Neg/All')\n df['Diff']=df.eval('PosPercentage-NegPercentage')\n \n topPosNP=df[df['Diff']>0]\n topPosNP=topPosNP.sort_values(by=['Diff','All'],ascending=False)\n\n topNegNP=df[df['Diff']<0]\n topNegNP=topNegNP.sort_values(by=['Diff','All'],ascending=[True,False])\n \n if topNegNP.shape[0]>10:\n topNegNPList=topNegNP.index[:10].tolist()\n else:\n topNegNPList=topNegNP.index.tolist()\n\n if topPosNP.shape[0]>10:\n topPosNPList=topPosNP.index[:10].tolist()\n else:\n topPosNPList=topPosNP.index.tolist() \n \n \n indexListPos=[]\n indexListNeg=[]\n for idx,nounPhrase in enumerate(data['NounPhrase']):\n if len(nounPhrase)==0:\n continue\n\n for np in nounPhrase:\n np=np.lower()\n\n if np in topPosNPList and data.iloc[idx].Sentiment=='POS':\n indexListPos.append(idx)\n if np in topNegNPList and data.iloc[idx].Sentiment=='NEG':\n indexListNeg.append(idx)\n samplePosComments=data.iloc[indexListPos].sort_values(by=['Polarity'],ascending=True)\n samplePosComments.reset_index(drop=True,inplace=True)\n \n sampleNegComments=data.iloc[indexListNeg].sort_values(by=['Polarity'],ascending=True)\n sampleNegComments.reset_index(drop=True,inplace=True)\n return samplePosComments,sampleNegComments,topPosNPList,topNegNPList\n\n# This is to bring the button in center\nsubmitButton.write(\"\") \nsubmitButton.write(\"\") \nif submitButton.button(label='Submit'):\n# try:\n with st.spinner('Hold on!!! Magic is hapenning...'):\n \n data=readReviews(youTubeURL,noOfComments);\n data['Comment']=data['Comment'].apply(lambda x: cleanhtml(x))\n\n #Spam and ham classification\n data=pd.DataFrame(data={'Comment':data['Comment'],\n 'CommentDate':data['CommentDate'],\n 'Classification':model.predict(data['Comment']),\n 'Prediction probability':model.predict_proba(data['Comment'])[:,1].round(3)})\n \n# data=pd.DataFrame(data={'Comment':data['Comment'],\n# 'CommentDate':data['CommentDate'],\n# 'Classification':0,\n# 'Prediction probability':0})\n \n data['Classification']=data['Classification'].astype(int)\n \n \n #Sentiment Analysis\n data['Polarity']=data['Comment'].apply(lambda x: sentimentAnalysis(x))\n data['Sentiment']=data['Polarity'].apply(lambda x: 'POS' if x>0 else 'NEG' if x<0 else 'UNC')\n\n df=data.groupby(['Classification','Sentiment']).size().reset_index().\\\n pivot(columns='Sentiment',index='Classification',values=0)\n df.fillna(0,inplace=True)\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"limegreen\",\"yellow\",\"red\"])\n \n #Display Count plot of Spam and Ham\n col5, col6 = st.beta_columns([2,1])\n fig, (ax1, ax2) = plt.subplots(1, 2)\n df.plot(kind='bar', stacked=True,colormap=cmap,ax=ax1)\n plt.sca(ax1)\n plt.ylabel(\"No. of comments\")\n plt.xticks([0,1],labels=['Ham','Spam'],rotation='horizontal');\n \n #Display comments over time\n df_overtime=data.groupby(['CommentDate','Sentiment']).size().reset_index().\\\n pivot(columns='Sentiment',index='CommentDate',values=0)\n \n df_overtime.fillna(0,inplace=True)\n df_overtime.plot(kind='bar',stacked='True',colormap=cmap,ax=ax2);\n plt.sca(ax2)\n plt.xticks(rotation = 45, ha=\"right\")\n col5.pyplot(fig)\n\n #Display video thumbnail\n col6.video(youTubeURL)\n \n #----------------------------------------------------------------------\n #Noun phrase chuncking\n samplePosComments,sampleNegComments,topPosNPList,topNegNPList=sampleComments(data)\n samplePosComments=samplePosComments.drop_duplicates(subset=['Comment']).reset_index()\n sampleNegComments=sampleNegComments.drop_duplicates(subset=['Comment']).reset_index()\n \n col1, col2 = st.beta_columns(2)\n with col1:\n st.markdown(\"
Top POSITIVE things people are talking about \", unsafe_allow_html=True)\n st.write(topPosNPList)\n st.markdown(\"