seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24222566552 | from tkinter import*
from PIL import Image ,ImageTk
from tkinter import ttk
from tkinter import messagebox
import mysql.connector
import urllib.request
urllib.request.urlretrieve(
'https://iocl.com/images/indane_1.jpg',
"indane1.png")
urllib.request.urlretrieve(
'https://cdn5.newsnationtv.com/images/2022/01/01/lpg-gas-price-today-83.jpg',
"cylinder.jpg")
class LPGbooking:
def __init__(self,root):
self.root=root
self.root.title ("LPG Booking ")
self.root.geometry("1295x550+30+100")
#======variables========
self.var_consid=StringVar()
self.var_bookdate=StringVar()
self.var_booking_type=StringVar()
self.var_deldate=StringVar()
self.var_paidtax=StringVar()
self.var_subtotal=StringVar()
self.var_total=StringVar()
#*********Title*****************
lbl_title=Label(self.root,text="LPG BOOKING ",font=("times new roman",15,"bold"),bg="black",fg="dark orange",bd=4,relief=RIDGE)
lbl_title.place(x=0,y=0,width=1290,height=70)
#***********LOGO**************
img1=Image.open(r"indane1.png")
img1=img1.resize((200,70),Image.ANTIALIAS)
self.photoimg1=ImageTk.PhotoImage(img1)
labelimg=Label(self.root,image=self.photoimg1,bd=4,relief=RIDGE)
labelimg.place(x=0,y=0,width=200,height=70)
#**************Label Frame******************
labelframeleft=LabelFrame(self.root,bd=2,relief=RIDGE,text="LPG Booking",padx=2,font=("times new roman",14,"bold"))
labelframeleft.place(x=5,y=70,width=425,height=472)
#********************Labels and Entries*****************
#cust contact
lbl_cust_contact=Label(labelframeleft,text="Consumer ID :",font=("arial",12,"bold"),padx=2,pady=6)
lbl_cust_contact.grid(row=0,column=0,sticky="w")
entry_contact=ttk.Entry(labelframeleft,textvariable=self.var_consid,font=("arial",12,"bold"),width=20)
entry_contact.grid(row=0,column=1,sticky="w")
#fetch data button
btnFetchData=Button(labelframeleft,command=self.Fetch_cust,text="Fetch Data",font=("arial",10,"bold"),bg="black",fg="gold",width=10)
btnFetchData.place(x=320,y=4)
#booking date
booking_date=Label(labelframeleft,font=("arial",12,"bold"), text="Booking Date :",padx=2,pady=6)
booking_date.grid(row=1,column=0,sticky="w")
txt_booking_date=ttk.Entry (labelframeleft,textvariable=self.var_bookdate,font=("arial",12,"bold"))
txt_booking_date.grid(row=1,column=1)
#delivery date
lbl_deliverydate=Label(labelframeleft,font=("arial",12,"bold"), text="Delivery Date :",padx=2,pady=6)
lbl_deliverydate.grid(row=2,column=0,sticky="w")
txt_deliverydate=ttk.Entry (labelframeleft,textvariable=self.var_deldate,font=("arial",12,"bold"))
txt_deliverydate.grid(row=2,column=1)
#booking type
lblbookingtype=Label(labelframeleft,font=("arial",12,"bold"), text="Cylinder Type :",padx=2,pady=6)
lblbookingtype.grid(row=3,column=0,sticky="w")
combo_search=ttk.Combobox(labelframeleft,textvariable=self.var_booking_type,font=("arial",12,"bold"))
combo_search["value"]=("Small","Medium","Large")
combo_search.current(0)
combo_search.grid(row=3,column=1,padx=8)
#paid tax
lbltax=Label(labelframeleft,font=("arial",12,"bold"), text="Paid Tax :",padx=2,pady=6)
lbltax.grid(row=4,column=0,sticky="w")
txttax=ttk.Entry (labelframeleft,textvariable=self.var_paidtax,font=("arial",12,"bold"))
txttax.grid(row=4,column=1)
#sub Total
lblsub=Label(labelframeleft,font=("arial",12,"bold"), text="Sub Total :",padx=2,pady=6)
lblsub.grid(row=5,column=0,sticky="w")
txtsub=ttk.Entry (labelframeleft,textvariable=self.var_subtotal,font=("arial",12,"bold"))
txtsub.grid(row=5,column=1)
#Total cost
lbltotal=Label(labelframeleft,font=("arial",12,"bold"), text="Total Amount :",padx=2,pady=6)
lbltotal.grid(row=6,column=0,sticky="w")
txttotal=ttk.Entry (labelframeleft,textvariable=self.var_total,font=("arial",12,"bold"))
txttotal.grid(row=6,column=1)
#========bill button======
btnbill=Button(labelframeleft,text="BILL",command=self.total,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnbill.grid(row=10,column=0,padx=1,sticky="w")
#===========btn============
btn_frame=Frame(labelframeleft,bd=2,relief=RIDGE)
btn_frame.place(x=0,y=400,width=412,height=780)
btnadd=Button(btn_frame,text="BOOK",command=self.add_data,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnadd.grid(row=0,column=0,padx=1)
btnupdate=Button(btn_frame,text="UPDATE",command=self.update,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnupdate.grid(row=0,column=1,padx=1)
btndel=Button(btn_frame,text="DELETE",command=self.deletes,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btndel.grid(row=0,column=2,padx=1)
btnreset=Button(btn_frame,text="RESET",command=self.reset,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnreset.grid(row=0,column=3,padx=1)
#=======right side image===========
img3=Image.open(r"cylinder.jpg")
img3=img3.resize((430,200),Image.ANTIALIAS)
self.photoimg3=ImageTk.PhotoImage(img3)
labelimg=Label(self.root,image=self.photoimg3,bd=4,relief=RIDGE)
labelimg.place(x=850,y=80,width=430,height=200)
#========table frame search system=============
Table_Frame=LabelFrame(self.root,bd=2,relief=RIDGE,text="VIEW DETAILS AND SEARCH SYSTEM",font=("arial",12,"bold"),bg="white",fg="red",width=9)
Table_Frame.place(x=435,y=280,width=850,height=260)
lblsearch=Label(Table_Frame,font=("arial",12,"bold"),text="Search by :",bg="red",fg="yellow")
lblsearch.grid(row=0,column=0,sticky="w",padx=8)
self.search_var=StringVar()
combo_search=ttk.Combobox(Table_Frame,textvariable=self.search_var,font=("arial",12,"bold"),width=24,state="readonly")
combo_search["value"]=("ConsumerID")
combo_search.current(0)
combo_search.grid(row=0,column=1,padx=8)
self.txt_search=StringVar()
entry_search=ttk.Entry(Table_Frame,textvariable=self.txt_search,width=24,font=("arial",12,"bold"))
entry_search.grid(row=0,column=2,padx=8)
btnsearch=Button(Table_Frame,text="SEARCH",command=self.search,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnsearch.grid(row=0,column=3,padx=8)
btnshowall=Button(Table_Frame,text="SHOW ALL",command=self.fetch_data,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnshowall.grid(row=0,column=4,padx=8)
#=======show data table========
details_tbale=Frame(Table_Frame,bd=2,relief=RIDGE)
details_tbale.place(x=5,y=50,width=835,height=180)
scroll_x=ttk.Scrollbar(details_tbale,orient=HORIZONTAL)
scroll_y=ttk.Scrollbar(details_tbale,orient=VERTICAL)
self.book_table=ttk.Treeview(details_tbale,column=("Cons","bDate","DDate","Btype"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)
scroll_x.pack(side=BOTTOM,fill="x")
scroll_y.pack(side=RIGHT,fill="y")
scroll_x.config(command=self.book_table.xview)
scroll_y.config(command=self.book_table.yview)
self.book_table.heading("Cons",text="ConsumerID")
self.book_table.heading("bDate",text="Booking Date")
self.book_table.heading("DDate",text="Delivery Date")
self.book_table.heading("Btype",text="Booking Type")
self.book_table["show"]="headings"
self.book_table.column("Cons",width=100)
self.book_table.column("DDate",width=100)
self.book_table.column("bDate",width=100)
self.book_table.column("Btype",width=100)
self.book_table.pack(fill=BOTH,expand=1)
self.book_table.bind("<ButtonRelease-1>",self.get_cursor)
self.fetch_data()
def add_data(self):
if self.var_consid.get()=="" or self.var_bookdate=="" or self.var_deldate=="":
messagebox.showerror("Error","Please Enter the Required Fields",parent=self.root)
else:
try:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("INSERT INTO booking values(%s,%s,%s,%s)",(self.var_consid.get(),self.var_bookdate.get(),self.var_deldate.get(),self.var_booking_type.get()))
conn.commit()
self.fetch_data()
conn.close()
messagebox.showinfo("Success","Booking has been Done",parent=self.root)
except Exception as es:
messagebox.showwarning("Warning",f"Something went Wrong :{str(es)}",parent=self.root)
def fetch_data(self):
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("Select * from booking")
rows=my_cursor.fetchall()
if len(rows)!=0:
self.book_table.delete(*self.book_table.get_children())
for i in rows:
self.book_table.insert("",END,values=i)
conn.commit()
conn.close()
def get_cursor(self,event=""):
cursor_row=self.book_table.focus()
content=self.book_table.item(cursor_row)
row=content["values"]
self.var_consid.set(row[0]),
self.var_bookdate.set(row[1]),
self.var_deldate.set(row[2]),
self.var_booking_type.set(row[3])
def update(self):
if self.var_consid=="":
messagebox.showerror("Error","Please Enter Consumer ID ",parent=self.root)
else:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("UPDATE booking SET BookingDate=%s,DeliveryDate=%s,BookingType=%s WHERE ConsumerID=%s",(
self.var_bookdate.get(),
self.var_deldate.get(),
self.var_booking_type.get(),
self.var_consid.get()
))
conn.commit()
self.fetch_data()
conn.close()
messagebox.showinfo("Update","Customer Details Successfully Updated",parent=self.root)
def deletes(self):
mdel=messagebox.askyesno("LPG Booking System","Are u Sure you want to Delete the selected Booking",parent=self.root)
if mdel>0:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query="delete from booking where ConsumerID=%s"
value=(self.var_consid.get(),)
my_cursor.execute(query,value)
else:
if not mdel:
return
conn.commit()
self.fetch_data()
conn.close()
def reset(self):
# self.var_cons.set(""),
self.var_bookdate.set(""),
self.var_deldate.set(""),
self.var_consid.set(""),
self.var_paidtax.set(""),
self.var_total.set(""),
self.var_booking_type.set("")
self.var_subtotal.set("")
#==================All data fetch=============
def Fetch_cust(self):
if self.var_consid.get()=="":
messagebox.showerror("Error","Please enter Consumer ID",parent=self.root)
else:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Name from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query,value )
row=my_cursor.fetchone()
if row==None:
messagebox.showerror("Error","This Consumer ID is not Found",parent=self.root)
else:
conn.commit()
conn.close()
showDataframe=Frame(self.root,bd=4,relief=RIDGE,padx=2)
showDataframe.place(x=450,y=82,width=300,height=180)
lblName=Label(showDataframe,text="Name :",font =("arial",12,"bold"))
lblName.place(x=0,y=0)
lbl=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl.place(x=90,y=0)
# insert{ command=self.Fetch_contact } in fetch data button line 1 before font
# =============GENDER==================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Gender from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query,value )
row=my_cursor.fetchone()
lblGender=Label(showDataframe,text="Gender :",font =("arial",12,"bold"))
lblGender.place(x=0,y=30)
lbl2=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl2.place(x=90,y=30)
#===================MOBILE=====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Mobile from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblmobile=Label(showDataframe,text="Mobile :",font =("arial",12,"bold"))
lblmobile.place(x=0,y=60)
lbl3=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl3.place(x=90,y=60)
#===================Email=====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Email from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblEmail=Label(showDataframe,text="Email :",font =("arial",12,"bold"))
lblEmail.place(x=0,y=90)
lbl4=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl4.place(x=90,y=90)
# #====================IDPROOF====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select IDProof from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblidpro=Label(showDataframe,text="ID Proof :",font =("arial",12,"bold"))
lblidpro.place(x=0,y=120)
lbl4=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl4.place(x=90,y=120)
# #=======================ID NUMBER========================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select IDNumber from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblidnum=Label(showDataframe,text="ID Number :",font =("arial",12,"bold"))
lblidnum.place(x=0,y=150)
lbl5=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl5.place(x=90,y=150)
def search(self):
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
s1=str(self.search_var.get())
s2=str(self.txt_search.get())
# query1="SELECT * from customer WHERE "+s1+"=%s"
# value=(s2,)
# my_cursor.execute(query1,value)
t="SELECT * from booking WHERE "+s1+" LIKE '%"+s2+"%'"
my_cursor.execute(t)
rows=my_cursor.fetchall()
if len(rows)!=0:
self.book_table.delete(*self.book_table.get_children())
for i in rows:
self.book_table.insert("",END,values=i)
conn.commit()
conn.close()
def total(self):
if(self.var_booking_type.get()=="Small"):
q1=float(546)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
elif(self.var_booking_type.get()=="Medium"):
q1=float(870)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
elif(self.var_booking_type.get()=="Large"):
q1=float(1136)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
if __name__=="__main__":
root=Tk()
obj=LPGbooking(root)
root.mainloop() | anonymouslyfadeditzme/Anonymously-Faded | booking.py | booking.py | py | 19,472 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_na... |
2398607314 | """
Makes a movie of the previously downloaded GEOS data
"""
import os
import pathlib
from typing import List, Tuple, Union
import numpy as np
import matplotlib.pyplot as plt
import DownloadData
import ReadNetCDF4
import VideoWriter
plt.style.use('myDarkStyle.mplstyle')
# ======================================================================================================================
# Constants
FILL_VALUE = 0x3fff
FILL_VALUE2 = 1023
CMAP = 'hot'
FPS = 12
FIG_SIZE = [16, 9]
# ======================================================================================================================
class MovieFigure:
"""
A Simple class for holding the figure to made into a movie
"""
def __init__(self,
numImages: int = 1,
figsize: Tuple[float, float] = (19.2, 10.8)):
"""
Constructor
Args:
numImages: the number of images wide
figsize: the overall figure size
"""
self._fig, self._axes = plt.subplots(nrows=1,
ncols=numImages,
figsize=figsize)
self._setup()
# ==================================================================================================================
@property
def fig(self) -> plt.Figure:
"""
Returns the figure handle
"""
return self._fig
# ==================================================================================================================
def updateFigure(self,
axisNumber: int,
image: np.ndarray,
dateAndTime: str,
band: DownloadData.Band,
**plotKwargs) -> None:
"""
Updates the figure
Args:
axisNumber: the axis number to update
image: the numpy array of the image, or filepath to the .nc file
dateAndTime: the date and time of the image
band: the GEOS band
plotKwargs: the kwargs to pass to matplotlib imshow()
"""
if axisNumber >= len(self._axes):
raise IndexError(f'axisNumber={axisNumber} is out of the range [0, {len(self._axes)})')
self._axes[axisNumber].imshow(X=image, **plotKwargs)
title = f'Band {band.name.replace("_", "-")} {dateAndTime}'
self._axes[axisNumber].set_title(title)
# ==================================================================================================================
def update(self) -> None:
"""
Updates the figure handle
"""
self._fig.canvas.draw()
# ==================================================================================================================
def _setup(self) -> None:
"""
Sets up the figure axes
"""
for axis in self._axes:
axis.set_xticks([])
axis.set_yticks([])
axis.set_yticklabels([])
axis.set_xticklabels([])
axis.grid(b=False)
axis.set_title('')
plt.tight_layout()
# ======================================================================================================================
def makeMovie(dataDirs: List[str],
outputDir: str,
outputName: str,
cMax: Union[float, List[float]] = None) -> None:
"""
Makes a movie of the data found in the input directory. Expects the data to
be orginized into day directories under dataDir
Args:
dataDirs: the data directory
outputDir: the output directory to save the movie to
outputName: the name of the output movie file
cMax: list of maximum of the clim
"""
if not os.path.isdir(outputDir):
# attempt to make the output directory if it doesn't already exist
os.mkdir(outputDir)
vw = VideoWriter.VideoWriter(filename=os.path.join(outputDir, outputName),
fps=FPS,
isColor=True)
allFiles = list()
for dataDir in dataDirs:
allFiles.append(getAllImageFiles(dataDir=dataDir))
numFiles = [len(files) for files in allFiles]
if numFiles.count(numFiles[0]) != len(numFiles):
raise RuntimeError(f'Different number of image files in the data directories')
for fileIdx in range(len(allFiles[0])):
# matplotlib appears to be a memory hog for some reason, so instantiate a new fig for each set of files
# instead of simply updating...
movieFig = MovieFigure(numImages=len(dataDirs),
figsize=FIG_SIZE)
for dirIdx in range(len(allFiles)):
file = allFiles[dirIdx][fileIdx]
print(f'Processing File {file}')
image, dateAndTime, band = ReadNetCDF4.readImage(filename=str(file),
doPlot=False)
# a bit of cleanup
image[image == FILL_VALUE] = np.nan
image[image == FILL_VALUE2] = np.nan
cLimMax = None # get rid of IDE warning
if cMax is not None:
if type(cMax) is list:
cLimMax = cMax[dirIdx]
elif type(cMax) is float:
cLimMax = cMax
else:
cLimMax = np.nanmax(image)
movieFig.updateFigure(axisNumber=dirIdx,
image=image,
dateAndTime=dateAndTime,
band=band,
clim=[0, cLimMax],
cmap=CMAP)
movieFig.update()
vw.addMatplotlibFigureHandle(fig=movieFig.fig,
doPlot=False)
plt.close(movieFig.fig)
# ======================================================================================================================
def getAllImageFiles(dataDir: str) -> List[pathlib.Path]:
"""
Return all of the image files in dataDir. Assumes a folder structure of days and hours beneath
Args:
dataDir: the data directory
Returns:
list of files
"""
if not os.path.isdir(dataDir):
raise RuntimeError(f'Input directory can not be found\n\t{dataDir}')
files = list()
dayDirs = os.listdir(dataDir)
for dayDir in dayDirs:
fullDayDir = os.path.join(dataDir, dayDir)
if not os.path.isdir(fullDayDir):
continue
hourDirs = os.listdir(fullDayDir)
for hourDir in hourDirs:
fullHourDir = os.path.join(fullDayDir, hourDir)
files.extend(pathlib.Path(fullHourDir).glob('*.nc'))
return files
# ======================================================================================================================
if __name__ == '__main__':
MOVIE_NAME = 'GOES_16'
OUTPUT_DIR = os.path.join(pathlib.Path(os.path.abspath(__file__)).parent, '..', 'movie')
DATA_TOP_DIR = os.path.join(pathlib.Path(os.path.abspath(__file__)).parent, '..', 'data')
DATA_DIRS = list()
DATA_DIRS.append(os.path.join(DATA_TOP_DIR, 'BLUE_1'))
DATA_DIRS.append(os.path.join(DATA_TOP_DIR, 'SWIR_7'))
CMAX = [600, 4]
makeMovie(dataDirs=DATA_DIRS,
outputDir=OUTPUT_DIR,
outputName=MOVIE_NAME,
cMax=CMAX)
| dpilger26/GOES | scripts/MakeMovie.py | MakeMovie.py | py | 7,699 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_na... |
9754918030 | import click
import unittest
from click.testing import CliRunner
from doodledashboard.notifications import TextNotification
from parameterized import parameterized
from sketchingdev.console import ConsoleDisplay
from tests.sketchingdev.terminal.ascii_terminal import AsciiTerminal
class TestConsoleDisplayWithText(unittest.TestCase):
@parameterized.expand([
((1, 1), "",
"""
+-+
||
+-+
"""),
((10, 3), "a",
"""
+----------+
||
| a|
||
+----------+
"""),
((10, 3), "centred",
"""
+----------+
||
| centred|
||
+----------+
"""),
((10, 3), "I'm centred",
"""
+----------+
| I'm|
| centred|
||
+----------+
"""),
((10, 3), "Hello World! This is too long",
"""
+----------+
| Hello|
| World!|
| This is|
+----------+
"""),
])
def test_text_centred_in_console(self, console_size, input_text, expected_ascii_terminal):
expected_terminal = AsciiTerminal.extract_text(expected_ascii_terminal)
text_notification = TextNotification()
text_notification.set_text(input_text)
cmd = create_cmd(lambda: ConsoleDisplay(console_size).draw(text_notification))
result = CliRunner().invoke(cmd, catch_exceptions=False)
self.assertEqual(expected_terminal, result.output)
def create_cmd(func):
@click.command()
def c(f=func):
f()
return c
if __name__ == "__main__":
unittest.main()
| SketchingDev/Doodle-Dashboard-Display-Console | tests/sketchingdev/test_text_notification.py | test_text_notification.py | py | 1,699 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tests.sketchingdev.terminal.ascii_terminal.AsciiTerminal.extract_text",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tests.sketchingdev.terminal.ascii_terminal.AsciiTerm... |
72623372987 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import mock
import unittest
from cloudshell.networking.brocade.cli.brocade_cli_handler import BrocadeCliHandler
from cloudshell.networking.brocade.runners.brocade_state_runner import BrocadeStateRunner
class TestBrocadeStateRunner(unittest.TestCase):
def setUp(self):
cli_handler = mock.MagicMock()
logger = mock.MagicMock()
resource_config = mock.MagicMock()
api = mock.MagicMock()
super(TestBrocadeStateRunner, self).setUp()
self.tested_instance = BrocadeStateRunner(cli=cli_handler,
logger=logger,
resource_config=resource_config,
api=api)
def tearDown(self):
super(TestBrocadeStateRunner, self).tearDown()
del self.tested_instance
def test_cli_handler_property(self):
""" Check that property return correct instance. Should return BrocadeCliHandler """
self.assertIsInstance(self.tested_instance.cli_handler, BrocadeCliHandler)
| QualiSystems/cloudshell-networking-brocade | tests/networking/brocade/runners/test_brocade_state_runner.py | test_brocade_state_runner.py | py | 1,123 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mock.MagicMock",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",... |
3277704581 | import os
from playwright.sync_api import sync_playwright
key = "2731"
os.makedirs(f"res/{key}", exist_ok=True)
def main():
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo= 5000)
page = browser.new_page()
page.goto("https://mri.cts-mrp.eu/portal/details?productnumber=NL/H/2731/001")
# with page.expect_download() as download_info:
# page.get_by_text("Download excel").click()
# download = download_info.value
# download.save_as(f"res/{key}/{key}.xlsx")
# Selector for document download buttons: .mat-button-base.ng-star-inserted
# STUDY LOCATOR METHODS, esp. "nth" in iterator
elements = page.get_by_role("listitem").get_by_role("button").all()
count = elements.count()
print(f"Number of detected elements is: {count}")
# for doc in elements:
# for i in range(count):
# elements.nth(i).click(modifiers=["Control", "Shift"])
# handles = page.query_selector_all(".documents-list .mat-button-wrapper .mat-icon-no-color")
# with page.expect_download() as download_info:
# doc.click()
# download = download_info.value
# doc_name = download.suggested_filename
# download.save_as(f"res/{key}/{doc_name}.pdf")
browser.close()
main() | ReCodeRa/MRI_02 | MRI/pw_down_sync_single_pdf.py | pw_down_sync_single_pdf.py | py | 1,397 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.makedirs",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "playwright.sync_api.sync_playwright",
"line_number": 8,
"usage_type": "call"
}
] |
33706250276 | import sys
from PySide2.QtWidgets import QApplication, QMainWindow, QGroupBox, QRadioButton
aplicacao = QApplication(sys.argv)
janela = QMainWindow()
# setGeometry(esquerda, topo, largura, altura)
janela.setGeometry( 100, 50, 300, 200 )
janela.setWindowTitle("Primeira Janela")
# cria uma instancia de um grupo de seleção dentro da janela
group_box = QGroupBox("Selecione uma opção", janela)
group_box.move(50,50)
group_box.resize(200,100)
group_box.setStyleSheet('QGroupBox \
{background-color: yellow}')
# cria os radio buttons dentro do grupo de seleção
radio_btn_1 = QRadioButton("Opção 1", group_box)
radio_btn_1.move(10,20)
radio_btn_2 = QRadioButton("Opção 2", group_box)
radio_btn_2.move(10,40)
radio_btn_3 = QRadioButton("Opção 3", group_box)
radio_btn_3.move(10,60)
radio_btn_3.setChecked(True)
janela.show()
aplicacao.exec_()
sys.exit()
| leuribeiru/QtforPhyton | componentes_basicos/radio.py | radio.py | py | 865 | python | pt | code | 1 | github-code | 6 | [
{
"api_name": "PySide2.QtWidgets.QApplication",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets.QMainWindow",
"line_number": 6,
"usage_type": "call"
},
{
"api_name"... |
22241072161 | import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class Convolution(nn.Module):
def __init__(self, in_ch, out_ch):
super(Convolution, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, 1, 1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, 1, 1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, input):
return self.conv(input)
class Curvature(torch.nn.Module):
def __init__(self, ratio):
super(Curvature, self).__init__()
weights = torch.tensor([[[[-1/16, 5/16, -1/16], [5/16, -1, 5/16], [-1/16, 5/16, -1/16]]]])
self.weight = torch.nn.Parameter(weights).cuda()
self.ratio = ratio
def forward(self, x):
B, C, H, W = x.size()
x_origin = x
x = x.reshape(B*C,1,H,W)
out = F.conv2d(x, self.weight)
out = torch.abs(out)
p = torch.sum(out, dim=-1)
p = torch.sum(p, dim=-1)
p=p.reshape(B, C)
_, index = torch.topk(p, int(self.ratio*C), dim=1)
selected = []
for i in range(x_origin.shape[0]):
selected.append(torch.index_select(x_origin[i], dim=0, index=index[i]).unsqueeze(0))
selected = torch.cat(selected, dim=0)
return selected
class Entropy_Hist(nn.Module):
def __init__(self, ratio, win_w=3, win_h=3):
super(Entropy_Hist, self).__init__()
self.win_w = win_w
self.win_h = win_h
self.ratio = ratio
def calcIJ_new(self, img_patch):
total_p = img_patch.shape[-1] * img_patch.shape[-2]
if total_p % 2 != 0:
tem = torch.flatten(img_patch, start_dim=-2, end_dim=-1)
center_p = tem[:, :, :, int(total_p / 2)]
mean_p = (torch.sum(tem, dim=-1) - center_p) / (total_p - 1)
if torch.is_tensor(img_patch):
return center_p * 100 + mean_p
else:
return (center_p, mean_p)
else:
print("modify patch size")
def histc_fork(ij):
BINS = 256
B, C = ij.shape
N = 16
BB = B // N
min_elem = ij.min()
max_elem = ij.max()
ij = ij.view(N, BB, C)
def f(x):
with torch.no_grad():
res = []
for e in x:
res.append(torch.histc(e, bins=BINS, min=min_elem, max=max_elem))
return res
futures : List[torch.jit.Future[torch.Tensor]] = []
for i in range(N):
futures.append(torch.jit.fork(f, ij[i]))
results = []
for future in futures:
results += torch.jit.wait(future)
with torch.no_grad():
out = torch.stack(results)
return out
def forward(self, img):
with torch.no_grad():
B, C, H, W = img.shape
ext_x = int(self.win_w / 2) # 考虑滑动窗口大小,对原图进行扩边,扩展部分长度
ext_y = int(self.win_h / 2)
new_width = ext_x + W + ext_x # 新的图像尺寸
new_height = ext_y + H + ext_y
# 使用nn.Unfold依次获取每个滑动窗口的内容
nn_Unfold=nn.Unfold(kernel_size=(self.win_w,self.win_h),dilation=1,padding=ext_x,stride=1)
# 能够获取到patch_img,shape=(B,C*K*K,L),L代表的是将每张图片由滑动窗口分割成多少块---->28*28的图像,3*3的滑动窗口,分成了28*28=784块
x = nn_Unfold(img) # (B,C*K*K,L)
x= x.view(B,C,3,3,-1).permute(0,1,4,2,3) # (B,C*K*K,L) ---> (B,C,L,K,K)
ij = self.calcIJ_new(x).reshape(B*C, -1) # 计算滑动窗口内中心的灰度值和窗口内除了中心像素的灰度均值,(B,C,L,K,K)---> (B,C,L) ---> (B*C,L)
fij_packed = self.histc_fork(ij)
p = fij_packed / (new_width * new_height)
h_tem = -p * torch.log(torch.clamp(p, min=1e-40)) / math.log(2)
a = torch.sum(h_tem, dim=1) # 对所有二维熵求和,得到这张图的二维熵
H = a.reshape(B,C)
_, index = torch.topk(H, int(self.ratio*C), dim=1) # Nx3
selected = []
for i in range(img.shape[0]):
selected.append(torch.index_select(img[i], dim=0, index=index[i]).unsqueeze(0))
selected = torch.cat(selected, dim=0)
return selected
class Network(nn.Module):
def __init__(self, in_ch=3, mode='ori', ratio=None):
super(Network, self).__init__()
self.mode = mode
if self.mode == 'ori':
self.ratio = [0,0]
if self.mode == 'curvature':
self.ratio = ratio
self.ife1 = Curvature(self.ratio[0])
self.ife2 = Curvature(self.ratio[1])
if self.mode == 'entropy':
self.ratio = ratio
self.ife1 = Entropy_Hist(self.ratio[0])
self.ife2 = Entropy_Hist(self.ratio[1])
# ---- U-Net ----
self.conv1 = Convolution(in_ch, 64)
self.pool1 = nn.MaxPool2d(2) # feature map = shape(m/2,n/2,64)
self.conv2 = Convolution(64, 128)
self.pool2 = nn.MaxPool2d(2) # feature map = shapem/4,n/4,128)
self.conv3 = Convolution(128, 256)
self.pool3 = nn.MaxPool2d(2) # feature map = shape(m/8,n/8,256)
self.conv4 = Convolution(256, 512)
self.pool4 = nn.MaxPool2d(2) # feature map = shape(m/16,n/16,512)
self.conv5 = Convolution(512, 1024) # feature map = shape(m/16,n/16,1024)
self.up_conv1 = nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2, padding=0, output_padding=0)
self.conv6 = Convolution(1024, 512) # feature map = shape(m/8,n/8,512)
self.up_conv2 = nn.ConvTranspose2d(512, 256, 2, 2, 0, 0)
self.conv7 = Convolution(int(256*(2+self.ratio[1])), 256) # feature map = shape(m/4,n/4,256)
self.up_conv3 = nn.ConvTranspose2d(256, 128, 2, 2, 0, 0)
self.conv8 = Convolution(int(128*(2+self.ratio[0])), 128) # feature map = shape(m/2,n/2,128)
self.up_conv4 = nn.ConvTranspose2d(128, 64, 2, 2, 0, 0)
self.conv9 = Convolution(128, 64) # feature map = shape(m,n,64)
self.out_conv1 = nn.Conv2d(64, 1, 1, 1, 0)
def forward(self, x):
c1 = self.conv1(x)
p1 = self.pool1(c1)
c2 = self.conv2(p1)
p2 = self.pool2(c2)
c3 = self.conv3(p2)
p3 = self.pool3(c3)
c4 = self.conv4(p3)
p4 = self.pool4(c4)
c5 = self.conv5(p4)
if self.mode != 'ori':
c2 = torch.cat([c2, self.ife1(c2)])
c3 = torch.cat([c3, self.ife2(c3)])
up1 = self.up_conv1(c5)
merge1 = torch.cat([up1, c4], dim=1)
c6 = self.conv6(merge1)
up2 = self.up_conv2(c6)
merge2 = torch.cat([up2, c3], dim=1)
c7 = self.conv7(merge2)
up3 = self.up_conv3(c7)
merge3 = torch.cat([up3, c2], dim=1)
c8 = self.conv8(merge3)
up4 = self.up_conv4(c8)
merge4 = torch.cat([up4, c1], dim=1)
c9 = self.conv9(merge4)
S_g_pred = self.out_conv1(c9)
return S_g_pred
| yezi-66/IFE | unet_github/lib/Network.py | Network.py | py | 7,331 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
8381595021 | from os import system
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.axes_grid import make_axes_locatable
##############################################################################
# matplotlib configuration
linewidth = 2.0
fontsize = 12
params = { # 'backend': 'ps',
'axes.labelsize': fontsize,
'text.fontsize': fontsize,
'legend.fontsize': 0.9*fontsize,
'xtick.labelsize': 0.9*fontsize,
'ytick.labelsize': 0.9*fontsize,
'text.usetex': False,
# 'figure.figsize': fig_size
}
matplotlib.rcParams.update(params)
markers = ['o', 's', '^', 'd', 'v', '*', 'h', '<', '>']
markersize = 8
nodesize = 1000
##############################################################################
def init_plot(is_tight_layout=False, ind_fig=0, **kwargs):
plt.close("all")
fig = plt.figure(ind_fig, **kwargs)
ax = fig.add_subplot(111)
if is_tight_layout:
fig.tight_layout()
return ax
def new_plot(is_tight_layout=False, ind_fig=0):
fig = plt.figure(ind_fig)
ax = fig.add_subplot(111)
if is_tight_layout:
fig.tight_layout()
ind_fig += 1
return ax, ind_fig
def save_fig(figname, is_adjust_border=False):
#ffigname = figname+".png"
# plt.savefig(ffigname,format='PNG')
ffigname = figname+".pdf"
if is_adjust_border:
plt.subplots_adjust(left=0.12, bottom=0.1, right=0.86, top=0.9, wspace=0.2, hspace=0.2)
plt.savefig(figname+".pdf", format='PDF')
# plt.savefig(figname+".eps",format='eps',transparent=True)
#system("ps2pdf -dEPSCrop "+figname+".eps "+figname+".pdf")
#system("rm "+figname+".eps")
return ffigname
| ngctnnnn/DRL_Traffic-Signal-Control | sumo-rl/sumo/tools/contributed/sumopy/agilepy/lib_misc/matplotlibtools.py | matplotlibtools.py | py | 1,749 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "matplotlib.rcParams.update",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 36,
"usage_type": "call"
},
{
"api_n... |
31932908131 | from pyspark.ml.classification import NaiveBayes
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
data = spark.read.format("libsvm").load("file:///usr/lib/spark/data/mllib/sample_libsvm_data.txt")
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
model = nb.fit(train)
predictions = model.transform(test)
predictions.show()
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test set accuracy = " + str(accuracy))
spark.stop()
| geoffreylink/Projects | 07 Machine Learning/SparkML/sparkML_CL_naivebayes.py | sparkML_CL_naivebayes.py | py | 789 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "pyspark.sql.SparkSession.builder.getOrCreate",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 5,
"usage_type... |
9736948830 | import pickle
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_addons as tfa
import health_doc
import matplotlib.pyplot as plt
import gc
from imp import reload
from doc_preprocessing import get_data_from_kfold
import BERT
reload(BERT)
from BERT import make_model, model_fit, model_save, model_load
from BERT import get_tokenizer, get_tokenized_data, get_model_result, calc_score
# model
# 0: Normal multi-label classification
# 1: Knowledge Distillation
mode = 0
if (mode):
# ### Get Teacher model prediction
with open('id_teacher_predict','rb') as f:
id_teacher_predict = pickle.load(f)
if __name__ == '__main__':
# ### Loading HealthDoc dataset
dataset_path = "../dataset/HealthDoc/"
dataset_id, dataset_label, dataset_content, dataset_label_name = health_doc.loading(dataset_path)
# ### Loading K-fold list
with open('k_id', 'rb') as f:
k_id = pickle.load(f)
with open('k_label', 'rb') as f:
k_label = pickle.load(f)
K = len(k_id)
tokenizer = get_tokenizer() # get BERT tokenizer
for cv_times in range(10):
cv_micro_f1 = []
cv_macro_f1 = []
cv_accuray = []
cv_weighted_f1 = []
cv_label_f1 = []
for testing_time in range(K):
# ### Split data for train and test
subset_test = [testing_time]
subset_train = np.delete(np.arange(K), subset_test)
x_train, y_train = get_data_from_kfold(k_id, k_label, subset_train)
x_test, y_test = get_data_from_kfold(k_id, k_label, subset_test)
model_path = f'/content/model/{subset_test[0]}/'
# ### Training Model
#x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.15)
# get tokenized data with BERT input format
x_train_vec = get_tokenized_data(x_train, dataset_content, tokenizer)
x_test = get_tokenized_data(x_test, dataset_content, tokenizer)
#x_val = getTokenized(x_val, dataset_content, tokenizer)
tf.keras.backend.clear_session()
model = make_model(9)
if (mode):
y_train_teacher = np.empty(x_train.shape+(9,))
for i, x in enumerate(x_train):
y_train_teacher[i,:] = id_teacher_predict[x]
print('Training Multi-label model with KD')
history = model_fit(model, x_train_vec, y_train_teacher)
else:
print('Training Multi-label model without KD')
history = model_fit(model, x_train_vec, y_train)
gc.collect()
# ### Predict Result
y_pred = get_model_result(model, x_test)
# ### Calculate Predict Reslut
micro_f1, macro_f1, weighted_f1, subset_acc = calc_score(y_test, y_pred)
cv_micro_f1.append(micro_f1)
cv_macro_f1.append(macro_f1)
cv_weighted_f1.append(weighted_f1)
cv_accuray.append(subset_acc)
label_f1=[]
for i, label_name in enumerate(dataset_label_name):
label_f1.append(f1_score(y_test[:,i], y_pred[:,i]))
print(f'{label_name:<15}:{label_f1[-1]: .4f}')
cv_label_f1.append(label_f1)
with open('multi-times cv result.csv', 'a') as f:
f.write(f'{sum(cv_micro_f1)/K: .4f},')
f.write(f'{sum(cv_macro_f1)/K: .4f},')
f.write(f'{sum(cv_weighted_f1)/K: .4f},')
f.write(f'{sum(cv_accuray)/K: .4f},')
label_f1_mean = np.mean(cv_label_f1, axis=0)
for f1_mean in label_f1_mean:
f.write(f'{f1_mean: .4f},')
f.write('\n') | Szu-Chi/NLP_Final_Hierarchical_Transfer_Learning | BERT_multi_student.py | BERT_multi_student.py | py | 4,067 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "imp.reload",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "health_doc.loading",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_num... |
13042124891 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 16:42:07 2018
@author: lud
"""
import matplotlib
#import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import tkinter as Tk
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
from argparse import ArgumentParser
import os
def cuboid_data2(o, size=(1,1,1)):
X = [[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]
X = np.array(X).astype(float)
for i in range(3):
X[:,:,i] *= size[i]
X += np.array(o)
return X
def plotCubeAt2(positions,sizes=None,colors=None, **kwargs):
if not isinstance(colors,(list,np.ndarray)): colors=["C0"]*len(positions)
if not isinstance(sizes,(list,np.ndarray)): sizes=[(1,1,1)]*len(positions)
g = []
for p,s,c in zip(positions,sizes,colors):
g.append( cuboid_data2(p, size=s) )
return Poly3DCollection(np.concatenate(g),
facecolors=np.repeat(colors,6), **kwargs)
def main(path, width, depth, height):
#get all data files
source_files = []
for file in os.listdir(path):
if file.endswith(".csv"):
source_files.append(os.path.join(path, file))
#get data
def getData(df):
if len(df.columns < 7):
df['6'] = 0
sizes = [tuple(x) for x in df.iloc[:,[1,2,3]].values]
positions = [tuple(x) for x in df.iloc[:,[4,5,6]].values]
colors = ["limegreen"]*df.shape[0]
pc = plotCubeAt2(positions,sizes,colors=colors, edgecolor="k", linewidth = 0.4)
return pc
#create figure
fig = Figure()
root = Tk.Tk()
root.wm_title("Plot boxes")
canvas = FigureCanvasTkAgg(fig, master=root)
ax = fig.add_subplot(111,projection='3d')
ax.set_aspect('equal')
ax.set_xlim([0,width])
ax.set_ylim([0,depth])
ax.set_zlim([0,height])
if len(source_files) > 0:
box_data = pd.read_csv(source_files[0], header = None)
else:
box_data = pd.DataFrame(np.full((1,6),0,dtype = int))
ax.add_collection3d(getData(box_data))
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def refresh(df):
ax.collections.clear()
ax.add_collection(getData(df))
canvas.draw()
def ok():
newfile = tkvar.get()
box_data = pd.read_csv(newfile, header = None)
refresh(box_data)
def option_changed(*args):
newfile = tkvar.get()
box_data = pd.read_csv(newfile, header = None)
refresh(box_data)
# Create a Tkinter variable
tkvar = Tk.StringVar(root)
if len(source_files) > 0:
tkvar.set(source_files[0])
else:
tkvar.set('No file')
tkvar.trace("w", option_changed)
popupMenu = Tk.OptionMenu(root, tkvar, '', *source_files)
popupMenu.pack(side=Tk.TOP)
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
root.mainloop()
# main('E:\\Projects\\BinPacking\\test',800,1200,2055)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-p", "--path", dest="layer_data_path",
help="find data from path", metavar="PATH")
parser.add_argument("-w", "--width", dest="width", type = int, default=800,
help="plane width, default 800")
parser.add_argument("-d", "--depth", dest="depth", type = int, default=1200,
help="plane depth, default 1200")
parser.add_argument("-hei", "--height", dest="height", type = int, default=2055,
help="bin height, default 2055")
args = parser.parse_args()
main(args.layer_data_path, args.width, args.depth, args.height) | stevenluda/cuboidPlotter | PlotCuboids.py | PlotCuboids.py | py | 4,848 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_numbe... |
32713874308 | import scrapy
class KistaSpider(scrapy.Spider):
name = "kista"
def start_requests(self):
urls = ['https://www.hemnet.se/bostader?location_ids%5B%5D=473377&item_types%5B%5D=bostadsratt',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
yield {
'sold': response.css("span.result-type-toggle__sold-count::text").re(r'\d+'),
'for_sell': response.css("span.result-type-toggle__for-sale-count::text").re(r'\d+')
} | theone4ever/hemnet | hemnet/spiders/kista_bostadsratt_spider.py | kista_bostadsratt_spider.py | py | 547 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 11,
"usage_type": "call"
}
] |
75177510266 | import os
import string
import json
from collections import namedtuple
from sys import stdout
from lex.oed.languagetaxonomy import LanguageTaxonomy
from apps.tm.models import Lemma, Wordform, Definition, Language, ProperName
from apps.tm.build import buildconfig
LEMMA_FIELDS = buildconfig.LEMMA_FIELDS
BlockData = namedtuple('BlockData', LEMMA_FIELDS)
def populate_db():
"""
Populate the database table for Language, Lemma, Wordform, and Definition
"""
stdout.write('Emptying the tables...\n')
empty_tables()
stdout.write('Populating Language records...\n')
populate_language()
stdout.write('Populating Lemma, Wordform, and Definition records...\n')
populate_lexical()
stdout.write('Populating ProperName records...\n')
populate_proper_names()
def empty_tables():
"""
Empty the database tables of any existing content
"""
Wordform.objects.all().delete()
Lemma.objects.all().delete()
Definition.objects.all().delete()
Language.objects.all().delete()
ProperName.objects.all().delete()
def populate_language():
"""
Populate the Language table
"""
taxonomy = LanguageTaxonomy()
taxonomy.families = set(buildconfig.LANGUAGE_FAMILIES)
max_length = Language._meta.get_field('name').max_length
language_objects = []
for language in taxonomy.languages():
name = language.name[:max_length]
language_objects.append(Language(id=language.id, name=name, family=None))
Language.objects.bulk_create(language_objects)
for language in taxonomy.languages():
family = taxonomy.family_of(language.name)
if family is not None:
src = Language.objects.get(id=language.id)
target = Language.objects.get(id=family.id)
src.family = target
src.save()
def populate_lexical():
"""
Populate the Lemma, Wordform, and Definition tables
"""
in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'refined')
frequency_cutoff = buildconfig.FREQUENCY_CUTOFF
taxonomy = LanguageTaxonomy()
lemma_counter = 0
definition_counter = 0
for letter in string.ascii_lowercase:
stdout.write('Inserting data for %s...\n' % letter)
blocks = []
in_file = os.path.join(in_dir, letter + '.json')
with open(in_file, 'r') as filehandle:
for line in filehandle:
data = json.loads(line.strip())
blocks.append(BlockData(*data))
lemmas = []
wordforms = []
definitions = []
for i, block in enumerate(blocks):
lang_node = taxonomy.node(language=block.language)
if lang_node is None:
language_id = None
else:
language_id = lang_node.id
if block.definition and block.f2000 < frequency_cutoff:
definition_counter += 1
definitions.append(Definition(id=definition_counter,
text=block.definition[:100]))
definition_id = definition_counter
else:
definition_id = None
lemma_counter += 1
lemmas.append(Lemma(id=lemma_counter,
lemma=block.lemma,
sort=block.sort,
wordclass=block.wordclass,
firstyear=block.start,
lastyear=block.end,
refentry=block.refentry,
refid=block.refid,
thesaurus_id=block.htlink,
language_id=language_id,
definition_id=definition_id,
f2000=_rounder(block.f2000),
f1950=_rounder(block.f1950),
f1900=_rounder(block.f1900),
f1850=_rounder(block.f1850),
f1800=_rounder(block.f1800),
f1750=_rounder(block.f1750),))
for typelist in (block.standard_types,
block.variant_types,
block.alien_types):
for typeunit in typelist:
wordforms.append(Wordform(sort=typeunit[0],
wordform=typeunit[1],
wordclass=typeunit[2],
lemma_id=lemma_counter,
f2000=_rounder(typeunit[4]),
f1900=_rounder(typeunit[5]),
f1800=_rounder(typeunit[6]),))
if i % 1000 == 0:
Definition.objects.bulk_create(definitions)
Lemma.objects.bulk_create(lemmas)
Wordform.objects.bulk_create(wordforms)
definitions = []
lemmas = []
wordforms = []
Definition.objects.bulk_create(definitions)
Lemma.objects.bulk_create(lemmas)
Wordform.objects.bulk_create(wordforms)
def populate_proper_names():
"""
Populate the ProperName table
"""
in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'proper_names')
in_file = os.path.join(in_dir, 'all.txt')
names = []
counter = 0
with open(in_file) as filehandle:
for line in filehandle:
data = line.strip().split('\t')
if len(data) == 3:
counter += 1
sortable, name, common = data
if common.lower() == 'true':
common = True
else:
common = False
names.append(ProperName(lemma=name,
sort=sortable,
common=common))
if counter % 1000 == 0:
ProperName.objects.bulk_create(names)
names = []
ProperName.objects.bulk_create(names)
def _rounder(n):
n = float('%.2g' % n)
if n == 0 or n > 1:
return int(n)
else:
return n
| necrop/wordrobot | apps/tm/build/lexicon/populatedb.py | populatedb.py | py | 6,316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "apps.tm.build.buildconfig.LEMMA_FIELDS",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "apps.tm.build.buildconfig",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.namedtuple",
"line_number": 12,
"usage_type": "call"
... |
25018394942 | import datetime
import hashlib
import json
from urllib.parse import urlparse
import requests
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
import config
import crypto
class Blockchain:
def __init__(self, key_path=None):
# Initialize a chain which will contain blocks
self.chain = [] # a simple list containing blovks
# Create a list which contains a list of transactions before they
# are added to the block. Think of it as a cache of transactions which
# happened, but are not yet written to a block in a blockchain.
self.transactions = []
# Create a genesis block - the first block
# Previous hash is 0 because this is a genesis block!
self.create_block(proof=1, previous_hash='0')
# Create a set of nodes
self.nodes = set()
if key_path:
self.private_key = crypto.load_private_key(key_path)
self.address = self.generate_address(self.private_key.public_key())
def create_block(self, proof, previous_hash):
# Define block as a dictionary
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
# Here we can add any additional data related to the currency
'transactions': self.transactions
}
# Now we need to empty the transactions list, since all those transactions
# are now contained in the block.
self.transactions = []
# Append block to the blockchain
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def get_address(self):
return self.address
def proof_of_work(self, previous_proof):
new_proof = 1 # nonce value
check_proof = False
while check_proof is False:
# Problem to be solved (this makes the minig hard)
# operation has to be non-symetrical!!!
hash_operation = hashlib.sha256(str(config.BLOCKCHAIN_PROBLEM_OPERATION_LAMBDA(
previous_proof, new_proof)).encode()).hexdigest()
# Check if first 4 characters are zeros
if hash_operation[:len(config.LEADING_ZEROS)] == config.LEADING_ZEROS:
check_proof = True
else:
new_proof += 1
# Check proof is now true
return new_proof
def hash_of_block(self, block):
# Convert a dictionary to string (JSON)
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
# 1 Check the previous hash
block = chain[block_index]
if block['previous_hash'] != self.hash_of_block(previous_block):
return False
# 2 Check all proofs of work
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(config.BLOCKCHAIN_PROBLEM_OPERATION_LAMBDA(
previous_proof, proof)).encode()).hexdigest()
if hash_operation[:len(config.LEADING_ZEROS)] != config.LEADING_ZEROS:
return False
# Update variables
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount, private_key):
# Create a transaction dictionary
transaction = {
'sender': sender,
'receiver': receiver,
'amount': amount
}
# Sign the transaction
signature = private_key.sign(
json.dumps(transaction, sort_keys=True).encode(),
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
# Add the signature and public key to the transaction
transaction['signature'] = signature
transaction['public_key'] = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# Add the transaction to the list of transactions
self.transactions.append(transaction)
# Return the index of the next block in the blockchain
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
# Add to the list of nodes
# parsed_url() method returns ParseResult object which has an attribute netloc
# which is in a format adress:port eg. 127.0.0.1:5000
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
# Find the largest chain (send a request)
response = requests.get(f'http://{node}/get-chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check chain if it is the longest one and also a valid one
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
# Replace the chain
self.chain = longest_chain
return True
# Otherwise, the chain is not replaced
return False
def save_blockchain(self, filename):
with open(filename, 'w') as file:
json.dump(self.chain, file, indent=4)
def load_blockchain(self, filename):
with open(filename, 'r') as file:
self.chain = json.load(file)
def generate_address(self, public_key):
public_key_bytes = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
return hashlib.sha256(public_key_bytes).hexdigest()
| ivana-dodik/Blockchain | EP -- zadatak 03/bez master key/blockchain.py | blockchain.py | py | 6,409 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "crypto.load_private_key",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "... |
33595739631 | from flask import Flask, render_template, request, redirect, url_for
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Movie
app = Flask(__name__)
engine = create_engine('sqlite:///books-collection.db?check_same_thread=False')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/movies')
def showMovies():
movies = session.query(Movie).all()
return render_template("movies.html", movies=movies)
@app.route('/movies/new/', methods=['GET', 'POST'])
def newMovie():
if request.method == 'POST':
newMovie = Movie(title=request.form['name'], author=request.form['author'], cast=request.form['cast'], price=request.form['price'])
session.add(newMovie)
session.commit()
return redirect(url_for('showMovies'))
else:
return render_template('newMovie.html')
# Эта функция позволит нам обновить книги и сохранить их в базе данных.
@app.route("/movies/<int:movie_id>/edit/", methods=['GET', 'POST'])
def editMovie(movie_id):
editedMovie = session.query(Movie).filter_by(id=movie_id).one()
if request.method == 'POST':
if request.form['name'] or request.form['author'] or request.form['cast'] or request.form['price']:
editedMovie.title = request.form['name']
editedMovie.title = request.form['author']
editedMovie.title = request.form['cast']
editedMovie.title = request.form['price']
return redirect(url_for('showMovies'))
else:
return render_template('editMovie.html', movie=editedMovie)
# Эта функция для удаления книг
@app.route('/movies/<int:movie_id>/delete/', methods=['GET', 'POST'])
def deleteMovie(movie_id):
movieToDelete = session.query(Movie).filter_by(id=movie_id).one()
if request.method == 'POST':
session.delete(movieToDelete)
session.commit()
return redirect(url_for('showMovies', movie_id=movie_id))
else:
return render_template('deleteMovie.html', movie=movieToDelete)
if __name__ == '__main__':
app.debug = True
app.run(port=4996)
| mrSlavik22mpeitop/stepik_selenium | flask_app_mpei.py | flask_app_mpei.py | py | 2,261 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "database_setup.Base.metadata",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "d... |
13749339342 | import ROOT
#from root_numpy import root2array, root2rec, tree2rec
import pylab,numpy,pickle
import matplotlib
pylab.rcParams['font.size'] = 14.0
pylab.rcParams['axes.labelsize']=18.0
pylab.rcParams['axes.titlesize']=20.0
pylab.rcParams['ytick.labelsize']='large'
pylab.rcParams['xtick.labelsize']='large'
pylab.rcParams['lines.markeredgewidth']=1.0
pylab.rc ('text', usetex=True)
pylab.rc ('font', family='serif')
pylab.rc ('font', serif='Computer Modern Roman')
log_sigma_days = numpy.array([-5,-4,-3,-2,-1,-0.52287874528033762,0,1])
### NEW GENIE 1460 Included ###
dec0_e3_foldedspectrum = (1072.916206382002,0)
dec16_e3_foldedspectrum = (1545.0315486757047,0)
dec30_e3_foldedspectrum = (1803.4879220886971,0)
dec45_e3_foldedspectrum = (1955.9670994116407,0)
dec60_e3_foldedspectrum = (2117.1599069802728,0)
dec75_e3_foldedspectrum = (2228.3197855702933,0)
sa_avg_foldedspectrum = (1654.0807981564465,0)
sys_adjustment = 0.89559693491089454
### Int(EffaE-3) (JF,RH)###
#samp2_e3_foldedspectrum_sum = (1759.219287256351,0) ## 100 GeV flux equal to 1.0 GeV^-1 cm^-2 s^-1
#samp2_e35_foldedspectrum_sum = (2925.5560058208703,0) ##
#samp2_e25_foldedspectrum_sum = (1320.5883336274608,0) ##
sens_e3_dec0_meansrc_events = numpy.array([6.4656,6.70643,6.7344,7.38432,10.4106,13.2816,16.2928,28.1549])
sens_e3_dec16_meansrc_events = numpy.array([6.4384,6.62176,6.79315,7.4096,10.5558,13.0896,16.5709,30.3184])
sens_e3_dec30_meansrc_events = numpy.array([7.632,7.32,7.54048,8.00864,10.68,12.6272,16.0406,27.1056])
sens_e3_dec45_meansrc_events = numpy.array([6.86976,6.87104,7.09792,8.60768,11.3456,12.983,16.1408,27.0288])
sens_e3_dec60_meansrc_events = numpy.array([6.77216,6.54144,7.29088,8.584,11.0262,13.2019,15.5658,24.368])
sens_e3_dec75_meansrc_events = numpy.array([5.6608,5.64512,5.95296,7.37824,10.8947,12.7984,15.9766,28.8221])
ul_e3_dec0_meansrc_events = numpy.array([7.5456,8.09952,9.06432,11.376,17.5674,22.2304,29.9581,60.232])
ul_e3_dec16_meansrc_events = numpy.array([7.77754,8.51104,9.67872,11.8336,18.1984,23.208,30.528,64.568])
ul_e3_dec30_meansrc_events = numpy.array([8.95392,9.34349,10.2138,12.5501,18.1462,22.568,29.6342,59.744])
ul_e3_dec45_meansrc_events = numpy.array([8.45888,8.73325,9.74496,12.8112,19.0477,22.5107,29.5024,59.3357])
ul_e3_dec60_meansrc_events = numpy.array([8.17261,8.74912,10.1846,13.3968,19.3747,23.0784,30.0032,57.7504])
ul_e3_dec75_meansrc_events = numpy.array([7.30272,7.66144,8.52512,11.688,19.0272,24.0032,31.9216,64.608])
ilow_en_bins = pickle.load(open("./pickles/effarea_low_energy_bins.pkl",'r'))
high_en_bins = pickle.load(open("./pickles/effarea_high_energy_bins.pkl",'r'))
genie_avg_area = pickle.load(open("./pickles/g1460_numu_effarea_avg.pkl",'r'))
genie_dec0_area = pickle.load(open("./pickles/g1460_numu_effarea_dec0.pkl",'r'))
genie_dec16_area = pickle.load(open("./pickles/g1460_numu_effarea_dec16.pkl",'r'))
genie_dec30_area = pickle.load(open("./pickles/g1460_numu_effarea_dec30.pkl",'r'))
genie_dec45_area = pickle.load(open("./pickles/g1460_numu_effarea_dec45.pkl",'r'))
genie_dec60_area = pickle.load(open("./pickles/g1460_numu_effarea_dec60.pkl",'r'))
genie_dec75_area = pickle.load(open("./pickles/g1460_numu_effarea_dec75.pkl",'r'))
nugen_avg_area = pickle.load(open("./pickles/g1460_nugmu_effarea_avg.pkl",'r'))
nugen_dec0_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec0.pkl",'r'))
nugen_dec16_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec16.pkl",'r'))
nugen_dec30_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec30.pkl",'r'))
nugen_dec45_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec45.pkl",'r'))
nugen_dec60_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec60.pkl",'r'))
nugen_dec75_area = pickle.load(open("./pickles/g1460_nugmu_effarea_dec75.pkl",'r'))
sa0 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(95.))) - (1-numpy.cos(numpy.deg2rad(80.))))
sa16 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(80.))) - (1-numpy.cos(numpy.deg2rad(65.))))
sa30 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(65.))) - (1-numpy.cos(numpy.deg2rad(50.))))
sa45 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(50.))) - (1-numpy.cos(numpy.deg2rad(35.))))
sa60 = 2*numpy.pi*((1-numpy.cos(numpy.deg2rad(35.))) - (1-numpy.cos(numpy.deg2rad(20.))))
sa75 = 2*numpy.pi*(1-numpy.cos(numpy.deg2rad(20.)))
saTotal = 2*numpy.pi*(1-numpy.cos(numpy.deg2rad(95.)))
sky_frac = [0.23989563791056959, 0.22901050354066707, 0.20251868181221927, 0.16222554659621455, 0.11087700847006936, 0.055472621670260208]
fluxnorm_dec16_e3 = ul_e3_dec16_meansrc_events/dec16_e3_foldedspectrum[0]
fluxnorm_dec0_e3 = ul_e3_dec0_meansrc_events/dec0_e3_foldedspectrum[0]
fluxnorm_dec30_e3 = ul_e3_dec30_meansrc_events/dec30_e3_foldedspectrum[0]
fluxnorm_dec45_e3 = ul_e3_dec45_meansrc_events/dec45_e3_foldedspectrum[0]
fluxnorm_dec60_e3 = ul_e3_dec60_meansrc_events/dec60_e3_foldedspectrum[0]
fluxnorm_dec75_e3 = ul_e3_dec75_meansrc_events/dec75_e3_foldedspectrum[0]
uls = [ul_e3_dec0_meansrc_events,ul_e3_dec16_meansrc_events,ul_e3_dec30_meansrc_events,ul_e3_dec45_meansrc_events,ul_e3_dec60_meansrc_events,ul_e3_dec75_meansrc_events]
event_ul_avg_list = [uls[i]*sky_frac[i] for i in range(len(sky_frac))]
event_ul_avg = numpy.array([0.,0.,0.,0.,0.,0.,0.,0.])
for listy in event_ul_avg_list:
event_ul_avg+=listy
fluxnorm_sa_avg_e3 = event_ul_avg / sa_avg_foldedspectrum[0]
#fluxnorm_0 = sens_bdt0_e3_meansrc_events/samp2_e3_foldedspectrum_sum[0]
#fluxnorm_0_disco = disco_bdt0_e3_meansrc_events/samp2_e3_foldedspectrum_sum[0]
#fluxnorm_0_25 = sens_bdt0_e25_meansrc_events/samp2_e25_foldedspectrum_sum[0]
#fluxnorm_0_35 = sens_bdt0_e35_meansrc_events/samp2_e35_foldedspectrum_sum[0]
pylab.figure()
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2,label="Averaged")
pylab.plot(log_sigma_days,ul_e3_dec0_meansrc_events,'k--',lw=2,label=r"$\delta=0^{\circ}$")
pylab.plot(log_sigma_days,ul_e3_dec30_meansrc_events,'k-.',lw=2,label=r"$\delta=30^{\circ}$")
pylab.plot(log_sigma_days,ul_e3_dec60_meansrc_events,'k:',lw=2,label=r"$\delta=60^{\circ}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,3,60])
pylab.grid()
pylab.legend(loc="upper left")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.title(r"Upper Limit $E^{-3}$ 90% C.L.")
pylab.savefig("LowEnTransient_NEventUpperLimit_E3_G1460_MultiDec")
fig1=pylab.figure()
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2)
#pylab.plot(0.77011529478710161,13.5279,"w*",ms=20.0,label="Most Significant Flare")
#pylab.plot(log_sigma_days,disco_bdt0_e3_meansrc_events,'k-',lw=2,label="Discovery Potential")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,0,62])
pylab.grid()
pylab.legend(loc="upper left")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.title(r"Upper Limit $E^{-3}$ 90$\%$ C.L.")
pylab.savefig("LowEnTransient_NEventUpperLimit_E3_G1460_Avg.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,fluxnorm_sa_avg_e3,'k-',lw=2,label=r"$E^{-3.0}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
pylab.axis([-5,1,0.00,0.037483054073961818])
pylab.yticks([0.0060456538828970677,0.012091307765794135,0.018136961648691202,0.024182615531588271,0.030228269414485337,0.036273923297382403],["0.6","1.21","1.81","2.42","3.02","3.63"])
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.grid()
#pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_Avg.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,event_ul_avg,'k-',lw=2,label=r"$E^{-3.0}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel("NSrc Events")
pylab.axis([-5,1,0.00,62])
pylab.yticks([ 0., 10., 20., 30., 40., 50., 60.])
pylab.grid()
ax2 = ax.twinx()
ax2.set_ylim(0,0.037483054073961818)
ax2.set_xlim(-5,1)
ax2.set_yticks([0.0060456538828970677,0.012091307765794135,0.018136961648691202,0.024182615531588271,0.030228269414485337,0.036273923297382403])
ax2.set_yticklabels(["0.6","1.21","1.81","2.42","3.02","3.63"])
ax2.set_ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
#pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_Avg_DoubleY.pdf")
figgy=pylab.figure()
ax = figgy.add_subplot(111)
pylab.plot(log_sigma_days,fluxnorm_dec0_e3,'k--',lw=2,label=r"$\delta = 0^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec16_e3,'k-',lw=2,label=r"$\delta = 16^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec30_e3,'k-.',lw=2,label=r"$\delta = 30^{\circ}$")
pylab.plot(log_sigma_days,fluxnorm_dec60_e3,'k:',lw=2,label=r"$\delta = 60^{\circ}$")
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad$ (Days)')
pylab.ylabel(r"$\frac{dN}{dE}$ @ 100 GeV ($10^{-2}$GeV$^{-1}$ cm$^{-2}$)")
pylab.axis([-5,1,0.00,0.058])
pylab.yticks([0.00 , 0.00828571, 0.01657143, 0.02485714, 0.03314286, 0.04142857, 0.04971429, 0.058],["0.0","0.83","1.7","2.5","3.3","4.1","5.0","5.8"])
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
matplotlib.pyplot.gcf().subplots_adjust(right=.85)
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Time-Integrated Flux Upper Limit $E^{-3}$")
pylab.savefig("LowEnTransient_FluxUpperLimit_E3_G1460_MultiDec.pdf")
'''
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,fluxnorm_0,'b-',lw=2,label='Sensitivity (90% C.L.)')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel(r"$\frac{dN}{dE}$ [$GeV^{-1} cm^{-2} s^{-1}$] @ 100 GeV Pivot Energy")
#pylab.axis([-5,1,5e3,5e4])
pylab.yticks([0.001,0.005,0.01,0.015,0.02,0.025],["$1e-3$","$5.0e-3$","$1.0e-2$","1.5e-2","2.0e-2","2.5e-2"])
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Flux Sensitivity (MergedSim) $E^{-3}$")
pylab.savefig("LowEnTransient_FluenceSensitivity_E3_MergedSim_FinalCut")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,fluxnorm_0,'b-',lw=2,label='Sensitivity (90% C.L.)')
pylab.plot(log_sigma_days,fluxnorm_0_disco,'k-',lw=2,label='Discovery Potential')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel(r"$\frac{dN}{dE}$ [$GeV^{-1} cm^{-2} s^{-1}$] @ 100 GeV Pivot Energy")
#pylab.axis([-5,1,5e3,5e4])
pylab.yticks([0.001,0.005,0.01,0.015,0.02,0.025],["$1e-3$","$5.0e-3$","$1.0e-2$","1.5e-2","2.0e-2","2.5e-2"])
pylab.grid()
pylab.legend(loc="upper left")
pylab.title(r"Flux Sensitivity (MergedSim) $E^{-3}$")
pylab.savefig("LowEnTransient_FluenceSensitivityAndDisco_E3_MergedSim_FinalCut")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,merged_samp1_e2_meansrc_events,'g-',lw=2,label='Sample 1')
pylab.plot(log_sigma_days,merged_samp2_e2_meansrc_events,'b-',lw=2,label='Sample 2')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-6,1,3,15])
pylab.grid()
pylab.title("Sensitivity (MergedSim)")
pylab.legend(loc='upper left')
pylab.savefig("LowEnTransient_DiscoPotential_E2_MergedSim_SampleComparison")
pylab.figure(figsize=(10,8))
pylab.plot(log_sigma_days,nugen_samp1_e2_meansrc_events,'g--',lw=2,label='Sample 1 (Nugen)')
pylab.plot(log_sigma_days,nugen_samp2_e2_meansrc_events,'b--',lw=2,label='Sample 2 (Nugen)')
pylab.plot(log_sigma_days,merged_samp1_e2_meansrc_events,'g-',lw=2,label='Sample 1 (MergedSim)')
pylab.plot(log_sigma_days,merged_samp2_e2_meansrc_events,'b-',lw=2,label='Sample 2 (MergedSim)')
pylab.xlabel(r'$Log_{10}(\sigma_{\omega})\quad (Days)$')
pylab.ylabel("NSrc Events")
pylab.axis([-6,1,3,15])
pylab.grid()
pylab.title("Sensitivity")
pylab.legend(loc='upper left')
pylab.savefig("LowEnTransient_DiscoPotential_E2_NugenANDMerged_SampleComparison")
'''
| daughjd/bashscripts | PaperPlotter.py | PaperPlotter.py | py | 11,938 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pylab.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pylab.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pylab.rcParams",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pylab.rcPara... |
1482920507 | # 从爬虫生成的Excel表格中读取数据并生成词云图
import os
import sys
import PIL
import jieba
import openpyxl
import wordcloud
import configparser
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from multiprocessing import Pool
# 定义一些参数,参数的详细介绍见GitHub上的readme.md
config_file = 'config/config.ini'
config_Section_Name = 'GC_DEFAULT' # 要读取的配置页名
stop_Word = ['!', '!', ':', '*', ',', ',', '?','《','》',
'。', ' ', '的', '了', '是', '啊', '吗', '吧','这','你','我','他','就'] # 停用词表
def read_Danmu(workbook_Name, sheet_Name): # 从Excel表中读取数据
try:
workbook = openpyxl.load_workbook(workbook_Name)
worksheet = workbook[sheet_Name] # 当然也可以通过索引读sheet,为了可读性选择用名称
data = worksheet.iter_rows(values_only=1)
return data
#若报错,则返回空迭代器
except openpyxl.utils.exceptions.InvalidFileException:
print(f"输入文件的路径或格式错误,请打开{config_file}文件重新配置路径\n")
return iter(())
except KeyError:
print(f"工作表页名错误,请检查Sheet的名字和{config_file}中是否一致\n")
return iter(())
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(f"发生错误: {exc_type} - {exc_value}")
return iter(())
def cut_words(row):
try:
# 每行第一列是弹幕,第二列是出现次数
sentence = row[0]
count = row[1]
# 运用jieba 进行分词,将结果储存在Counter中,再将其中词语的出现次数翻count倍
words = jieba.lcut(sentence)
# 去除停用词表中的词
cut_Words = pd.Series(words)
cut_Words = cut_Words[~cut_Words.isin(stop_Word)]
# 将分词存入计数器中
new_Counter = Counter(cut_Words.tolist())
for item in new_Counter:
new_Counter[item] *= count # 弹幕中词语出现数 = 弹幕出现次数*弹幕中词语出现次数
return new_Counter
except TypeError:
return Counter() #遇见异常输入的情况,返回空计数器。
def generate_Word_Cloud(counter): # 生成词云图
try:
if not counter: # 如果计数器对象为空,则给出提示并退出函数
return "输入的词频为空!"
img = PIL.Image.open(pic_Path).convert('RGBA') # 解决灰度图像ERROR
pic = np.array(img)
image_colors = wordcloud.ImageColorGenerator(pic)
word_Cloud = wordcloud.WordCloud(
font_path=font_Path, mask=pic, width=WC_Width, height=WC_Height, mode="RGBA", background_color='white')
word_Cloud.generate_from_frequencies(counter)
plt.imshow(word_Cloud.recolor(color_func=image_colors),
interpolation='bilinear')
word_Cloud.to_file(output_Path)
plt.axis('off')
plt.show()
return f"词云图生成完成,请前往{output_Path}查看"
except FileNotFoundError : #pic_Path 或 font_Path错误的情况
return f"图片或字体路径错误,请前往{config_file}核查。"
except TypeError or ValueError : #WC_Width 或WC_Height类型或数组错误的情况
return f"图片的Height与Width设置有误,请前往{config_file}核查。"
except PIL.UnidentifiedImageError :
return f"不支持该类型的图片,请修改图片路径。"
except Exception as e:
return f"生成词云图时发生错误:{e}"
def main():
rows = read_Danmu(workbook_Name, sheet_Name)
word_counts = Counter()
# 利用线程池优化分词速度,在生成所有弹幕的词云图是能节省时间
with Pool() as pool:
cut_words_results = pool.map(cut_words, rows)
for result in cut_words_results:
word_counts.update(result)
print(generate_Word_Cloud(word_counts))
if __name__ == "__main__":
# 读取参数的配置
config = configparser.ConfigParser()
if not os.path.exists(config_file):
print(f"配置文件 {config_file} 不存在!")
exit(1)
config.read(config_file)
workbook_Name = config.get(config_Section_Name, 'workbook_name',
fallback='output/Top_20_danmu.xlsx') # 要读取的Excel表的名称,默认为crawler.py生成的文件
# 要读取的Excel表的页的名称,可从['Top 20', '所有弹幕']中选择
sheet_Name = config.get(config_Section_Name, 'sheet_Name', fallback='所有弹幕')
WC_Width = config.getint(
config_Section_Name, 'WC_Width', fallback=1200) # 词云图的宽度
WC_Height = config.getint(
config_Section_Name, 'WC_Height', fallback=1200) # 词云图的高度
font_Path = config.get(config_Section_Name, 'font_Path',
fallback="config/msyh.ttc") # 字体存储路径
pic_Path = config.get(config_Section_Name, 'pic_Path',
fallback="config/m.png") # 词云背景图路径
output_Path = config.get(
config_Section_Name, 'output_Path', fallback="output/word_could.png")
main()
| AyaGuang/bilibili-Danmu-Crawler | 102101430/generate_Cloud.py | generate_Cloud.py | py | 5,425 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "openpyxl.utils",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "jieba.lcut",
... |
16439987677 | import math
from datetime import datetime, timedelta
from decimal import Decimal
from financial.input import (
FinancialDataInput,
FinancialStatisticsInput,
NullFinancialDataInput,
NullFinancialStatisticsInput,
)
from financial.model import FinancialData, db
class FinancialDataInputValidationService:
def __init__(self, request_args):
self.validation_errors = []
self.financial_data = self.validate_and_parse_financial_data_input(request_args)
def validate_and_parse_financial_data_input(
self, request_args
) -> FinancialDataInput | NullFinancialDataInput:
# default start_date is 14 days ago
start_date = request_args.get(
"start_date", (datetime.now() + timedelta(days=-14)).strftime("%Y-%m-%d")
)
# default end_date is today
end_date = request_args.get("end_date", datetime.now().strftime("%Y-%m-%d"))
for field_name, date in (("start_date", start_date), ("end_date", end_date)):
try:
datetime.strptime(date, "%Y-%m-%d")
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid date")
return NullFinancialDataInput()
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
if start_date > end_date:
self.validation_errors.append("start_date is after end_date")
return NullFinancialDataInput()
# use "IBM" as default symbol
symbol = request_args.get("symbol", "IBM")
if symbol not in ["IBM", "AAPL"]:
self.validation_errors.append("symbol is not valid")
return NullFinancialDataInput()
limit = request_args.get("limit", "5")
# Use 1 as default page number. Page 1 is the first page.
page = request_args.get("page", "1")
for field_name, value in [("limit", limit), ("page", page)]:
try:
int(value)
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid integer")
return NullFinancialDataInput()
return FinancialDataInput(
start_date=start_date,
end_date=end_date,
symbol=symbol,
limit=int(limit),
page=int(page),
)
class FinancialStatisticsInputValidationService:
def __init__(self, request_args):
self.validation_errors = []
self.financial_statistics = self.validate_and_parse_financial_statistics_input(
request_args
)
def validate_and_parse_financial_statistics_input(
self, request_args
) -> FinancialStatisticsInput | NullFinancialStatisticsInput:
# check if all required fields are present
for required_field in ("start_date", "end_date", "symbol"):
if required_field not in request_args:
self.validation_errors.append(f"{required_field} is required")
return NullFinancialStatisticsInput()
start_date = request_args.get("start_date")
end_date = request_args.get("end_date")
for field_name, date in (("start_date", start_date), ("end_date", end_date)):
try:
datetime.strptime(date, "%Y-%m-%d")
except ValueError:
self.validation_errors.append(f"{field_name} is not a valid date")
return NullFinancialStatisticsInput()
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
if start_date > end_date:
self.validation_errors.append("start_date is after end_date")
return NullFinancialStatisticsInput()
symbol = request_args.get("symbol")
# symbol only allows IBM and AAPL
if symbol not in ("IBM", "AAPL"):
self.validation_errors.append("symbol is not valid")
return NullFinancialStatisticsInput()
return FinancialStatisticsInput(
start_date=start_date, end_date=end_date, symbol=symbol
)
class GetFinancialDataService:
"""Service to get financial data from database"""
def __init__(self, financial_data_input: FinancialDataInput):
self.financial_data_input = financial_data_input
self.financial_data_output = []
self.pagination = {}
def get_financial_data(self) -> None:
financial_data = db.session.scalars(
db.select(FinancialData)
.where(
FinancialData.symbol == self.financial_data_input.symbol,
FinancialData.date >= self.financial_data_input.start_date,
FinancialData.date <= self.financial_data_input.end_date,
)
.order_by(FinancialData.date)
).all()
self.format_pagination(len(financial_data))
self.format_financial_data(financial_data)
def format_financial_data(self, financial_data: list[FinancialData]) -> None:
start_index = (
self.financial_data_input.page - 1
) * self.financial_data_input.limit
end_index = start_index + self.financial_data_input.limit
self.financial_data_output = [
{
"symbol": row.symbol,
"date": row.date.strftime("%Y-%m-%d"),
"open_price": row.open_price,
"close_price": row.close_price,
"volume": row.volume,
}
for row in financial_data[start_index:end_index]
]
def format_pagination(self, total_length: int) -> None:
# page starts at 1
self.pagination = {
"total": total_length,
"limit": self.financial_data_input.limit,
"page": self.financial_data_input.page,
"pages": math.ceil(total_length / self.financial_data_input.limit),
}
class CalculateFinancialStatisticsService:
"""Service to get financial data from database and calculate financial statistics"""
def __init__(self, financial_statistics_input: FinancialStatisticsInput):
self.financial_statistics_input = financial_statistics_input
self.financial_statistics_output = {}
def calculate_financial_statistics(self) -> None:
financial_data = db.session.scalars(
db.select(FinancialData).where(
FinancialData.symbol == self.financial_statistics_input.symbol,
FinancialData.date >= self.financial_statistics_input.start_date,
FinancialData.date <= self.financial_statistics_input.end_date,
)
).all()
self.format_financial_statistics(financial_data)
def format_financial_statistics(self, financial_data: list[FinancialData]) -> None:
self.financial_statistics_output = {
"symbol": self.financial_statistics_input.symbol,
"start_date": self.financial_statistics_input.start_date.strftime(
"%Y-%m-%d"
),
"end_date": self.financial_statistics_input.end_date.strftime("%Y-%m-%d"),
"average_daily_open_price": str(
self.calculate_average_daily_open_price(financial_data)
),
"average_daily_close_price": str(
self.calculate_average_daily_close_price(financial_data)
),
"average_daily_volume": str(
self.calculate_average_daily_volume(financial_data)
),
}
def calculate_average_daily_volume(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily volume. Round to nearest integer"""
return round(sum(row.volume for row in financial_data) / len(financial_data))
def calculate_average_daily_open_price(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily open price. Round to 2 decimal places"""
return round(
(sum(row.open_price for row in financial_data) / len(financial_data)), 2
)
def calculate_average_daily_close_price(
self, financial_data: list[FinancialData]
) -> Decimal:
"""Calculate average daily close price. Round to 2 decimal places"""
return round(
(sum(row.close_price for row in financial_data) / len(financial_data)), 2
)
| pevenc12/python_assignment | financial/services.py | services.py | py | 8,468 | python | en | code | null | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.d... |
4583110582 | from __future__ import division
from copy import deepcopy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import numpy as np
import torch
def average_rule(keys, Temp_state_dict, neighbors):
aggr_state_dict = {}
# aggr_state_dict= torch.sum(Temp_state_dict, 0)
for key in keys:
temp_state_dict = [deepcopy(Temp_state_dict[key][i]) for i in neighbors]
aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
return aggr_state_dict
def median_rule(keys, Temp_state_dict, neighbors):
aggr_state_dict = {}
for key in keys:
temp_state_dict = [Temp_state_dict[key][i] for i in neighbors]
aggr_state_dict[key], _ = torch.median(torch.stack(temp_state_dict), 0)
return aggr_state_dict
def actor_rule(agent_id, policy, Model_actor, Model_critic, Model_critic_2, ram, keys, ActorDict, neighbors, alpha, Accumu_Q_actor, filter, normalize=False, softmax=False):
random_batch_size = 256
# gamma = 1
s1, a1, s2, _, _ = ram.sample(random_batch_size)
# s1 = Variable(torch.from_numpy(np.float32(s1))).to(device)
for neigh in neighbors:
if policy == "TD3":
pred_a1 = Model_actor[neigh](s1)
Q_actor = Model_critic[agent_id].Q1(s1, pred_a1).mean()
# Accumu_loss_actor[agent_id, neigh] = (1 - gamma) * Accumu_loss_actor[agent_id, neigh] + gamma * loss_actor
Accumu_Q_actor[agent_id, neigh] = Q_actor
elif policy == "DDPG":
pred_a1 = Model_actor[neigh](s1)
Q_actor = Model_critic[agent_id].forward(s1, pred_a1).mean()
# Accumu_loss_actor[agent_id, neigh] = (1 - gamma) * Accumu_loss_actor[agent_id, neigh] + gamma * loss_actor
Accumu_Q_actor[agent_id, neigh] = Q_actor
elif policy == "PPO":
pass
elif policy == "SAC":
# Prediction π(a|s), logπ(a|s), π(a'|s'), logπ(a'|s'), Q1(s,a), Q2(s,a)
_, pi, log_pi = Model_actor[neigh](s1)
# Min Double-Q: min(Q1(s,π(a|s)), Q2(s,π(a|s))), min(Q1‾(s',π(a'|s')), Q2‾(s',π(a'|s')))
min_q_pi = torch.min(Model_critic[agent_id](s1, pi), Model_critic_2[agent_id](s1, pi)).squeeze(1)
# SAC losses
para = 0.2
policy_loss = (para * log_pi - min_q_pi).mean()
Accumu_Q_actor[agent_id, neigh] = -policy_loss
else:
raise NameError("Policy name is not defined!")
Q = deepcopy(Accumu_Q_actor[agent_id, :])
min_Q = np.min(Accumu_Q_actor[agent_id, neighbors])
max_Q = np.max(Accumu_Q_actor[agent_id, neighbors])
if normalize:
# Q = np.array([Q[neigh] - min_Q if neigh in neighbors else 0 for neigh in range(len(Q))])
# Q = Q / (max_Q - min_Q)
Q = [Q[neigh] - max_Q if neigh in neighbors else 0 for neigh in range(len(Q))]
Q = [np.exp(Q[neigh]) if neigh in neighbors else 0 for neigh in range(len(Q))]
if softmax:
if not normalize:
Q = [Q[neigh] - max_Q if neigh in neighbors else 0 for neigh in range(len(Q))]
Q = [np.exp(Q[neigh]) if neigh in neighbors else 0 for neigh in range(len(Q))]
if filter:
Q = [Q[neigh] if Q[neigh] >= Q[agent_id] else 0 for neigh in range(len(Q))]
Q[agent_id] *= alpha[agent_id]
sum_Q = sum(Q)
Weight = Q / sum_Q
# in case sum is not 1
Weight[agent_id] = 1 - sum(Weight[:agent_id]) - sum(Weight[agent_id + 1:])
print("agent %d, actor weight, loss" % agent_id, Weight, Accumu_Q_actor[agent_id, :])
aggr_state_dict = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict = [ActorDict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict[key] = torch.sum(torch.stack(temp_state_dict), 0)
# filtering
# aggr_actor = deepcopy(Model_actor[agent_id])
# aggr_actor.load_state_dict(aggr_state_dict)
# pred_a1 = aggr_actor(s1)
# Q_actor = Model_critic[agent_id].Q1(s1, pred_a1).mean()
# if Q_actor > Accumu_Q_actor[agent_id, agent_id]:
# print("agent %d, return aggregate model" % agent_id)
# return aggr_state_dict
# else:
# return Model_actor[agent_id].state_dict()
return aggr_state_dict
def critic_rule(agent_id, policy, Model_actor, Model_critic, Model_critic_2, Model_target_critic, Model_target_critic_2, ram, keys, CriticDict, Critic2Dict, neighbors, alpha, Accumu_loss_critic, filter, softmax=False):
random_batch_size = 256
GAMMA = 0.99
gamma = 1
s1, a1, s2, r1, not_done = ram.sample(random_batch_size)
if policy == "SAC":
r1, not_done = r1.squeeze(1), not_done.squeeze(1)
for neigh in neighbors:
# Use target actor exploitation policy here for loss evaluation
if policy == "TD3":
a2_k = Model_actor[agent_id](s2).detach()
target_Q1, target_Q2 = Model_target_critic[agent_id].forward(s2, a2_k)
target_Q = torch.min(target_Q1, target_Q2)
# y_exp = r + gamma*Q'( s2, pi'(s2))
y_expected = r1 + not_done * GAMMA * target_Q
# y_pred = Q( s1, a1)
y_predicted_1, y_predicted_2 = Model_critic[neigh].forward(s1, a1)
# compute critic loss, and update the critic
loss_critic = F.mse_loss(y_predicted_1, y_expected) + F.mse_loss(y_predicted_2, y_expected)
elif policy == "DDPG":
a2_k = Model_actor[agent_id](s2).detach()
target_Q = Model_target_critic[agent_id].forward(s2, a2_k)
# y_exp = r + gamma*Q'( s2, pi'(s2))
y_expected = r1 + not_done * GAMMA * target_Q
# y_pred = Q( s1, a1)
y_predicted = Model_critic[neigh].forward(s1, a1)
# compute critic loss, and update the critic
loss_critic = F.mse_loss(y_predicted, y_expected)
elif policy == "PPO":
pass
elif policy == "SAC":
para = 0.2
# Prediction π(a|s), logπ(a|s), π(a'|s'), logπ(a'|s'), Q1(s,a), Q2(s,a)
_, next_pi, next_log_pi = Model_actor[agent_id](s2)
q1 = Model_critic[neigh](s1, a1).squeeze(1)
q2 = Model_critic_2[neigh](s1, a1).squeeze(1)
min_q_next_pi = torch.min(Model_target_critic[agent_id](s2, next_pi),
Model_target_critic_2[agent_id](s2, next_pi)).squeeze(1)
v_backup = min_q_next_pi - para * next_log_pi
q_backup = r1 + GAMMA * not_done * v_backup
qf1_loss = F.mse_loss(q1, q_backup.detach())
qf2_loss = F.mse_loss(q2, q_backup.detach())
loss_critic = qf1_loss + qf2_loss
else:
raise NameError("Policy name is not defined!")
Accumu_loss_critic[agent_id, neigh] = (1 - gamma) * Accumu_loss_critic[agent_id, neigh] + gamma * loss_critic
loss = deepcopy(Accumu_loss_critic[agent_id, :])
# if normalize:
# min_Q = np.min(loss)
# max_Q = np.max(loss)
# loss = (loss - min_Q) / (max_Q - min_Q)
reversed_Loss = np.zeros(len(Model_actor))
for neigh in neighbors:
if filter:
if Accumu_loss_critic[agent_id, neigh] <= Accumu_loss_critic[agent_id, agent_id]:
reversed_Loss[neigh] = 1 / loss[neigh]
else:
# if softmax:
# reversed_Loss[neigh] = np.exp(-loss[neigh]) # 1 / np.exp(loss[neigh])
# else:
reversed_Loss[neigh] = 1 / loss[neigh]
reversed_Loss[agent_id] *= alpha[agent_id]
sum_reversedLoss = sum(reversed_Loss)
# Weight = np.zeros(numAgent)
# for neigh in range(0, numAgent):
Weight = reversed_Loss / sum_reversedLoss
# in case sum is not 1
Weight[agent_id] = 1 - sum(Weight[:agent_id]) - sum(Weight[agent_id + 1:])
print("agent %d, critic weight, loss, reversedloss" % agent_id, Weight, loss, reversed_Loss)
# weight = torch.from_numpy(weight)
aggr_state_dict = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict = [CriticDict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict[key] = torch.sum(torch.stack(temp_state_dict), 0)
if policy == "SAC":
aggr_state_dict_2 = {}
for key in keys:
# temp_state_dict = [ActorDict[key][i] * Weight[i] * len(neighbors) for i in neighbors]
# aggr_state_dict[key] = torch.mean(torch.stack(temp_state_dict), 0)
temp_state_dict_2 = [Critic2Dict[key][i] * Weight[i] for i in neighbors]
aggr_state_dict_2[key] = torch.sum(torch.stack(temp_state_dict_2), 0)
return aggr_state_dict, aggr_state_dict_2
return aggr_state_dict
| cbhowmic/resilient-adaptive-RL | aggregateMethods.py | aggregateMethods.py | py | 9,022 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
... |
22618188640 | # encoding: utf-8
# pylint: disable=redefined-outer-name,missing-docstring
import pytest
from tests import utils
from app import create_app
@pytest.yield_fixture(scope='session')
def flask_app():
app = create_app(flask_config='testing')
from app.extensions import db
with app.app_context():
db.create_all()
yield app
db.drop_all()
@pytest.yield_fixture()
def db(flask_app):
# pylint: disable=unused-argument,invalid-name
from app.extensions import db as db_instance
yield db_instance
db_instance.session.rollback()
@pytest.fixture(scope='session')
def flask_app_client(flask_app):
flask_app.test_client_class = utils.AutoAuthFlaskClient
flask_app.response_class = utils.JSONResponse
return flask_app.test_client()
@pytest.yield_fixture(scope='session')
def regular_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
regular_user_instance = utils.generate_user_instance(
username='regular_user'
)
db.session.add(regular_user_instance)
db.session.commit()
yield regular_user_instance
db.session.delete(regular_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def readonly_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
readonly_user_instance = utils.generate_user_instance(
username='readonly_user',
is_readonly=True
)
db.session.add(readonly_user_instance)
db.session.commit()
yield readonly_user_instance
db.session.delete(readonly_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def admin_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
admin_user_instance = utils.generate_user_instance(
username='admin_user',
is_admin=True
)
db.session.add(admin_user_instance)
db.session.commit()
yield admin_user_instance
db.session.delete(admin_user_instance)
db.session.commit()
| DurandA/pokemon-battle-api | tests/conftest.py | conftest.py | py | 2,085 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "app.create_app",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.app_context",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.extensions.db.create_all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "app.exten... |
12611135709 | import pytest
from utils import *
from fireplace.exceptions import GameOver
LORD_JARAXXUS = "EX1_323"
LORD_JARAXXUS_HERO = "EX1_323h"
LORD_JARAXXUS_WEAPON = "EX1_323w"
INFERNO = "EX1_tk33"
INFERNO_TOKEN = "EX1_tk34"
def test_jaraxxus():
game = prepare_game(CardClass.WARRIOR, CardClass.WARRIOR)
game.player1.hero.power.use()
game.player1.give(LIGHTS_JUSTICE).play()
assert game.player1.weapon.id == LIGHTS_JUSTICE
game.end_turn()
game.end_turn()
assert game.player1.hero.health == 30
assert game.player1.hero.armor == 2
game.player1.give(LORD_JARAXXUS).play()
assert game.player1.hero.id == LORD_JARAXXUS_HERO
assert game.player1.weapon.id == LORD_JARAXXUS_WEAPON
assert game.player1.hero.health == 15
assert game.player1.hero.armor == 0
assert game.player1.hero.power.id == INFERNO
assert len(game.player1.field) == 0
game.end_turn()
game.end_turn()
game.player1.hero.power.use()
assert len(game.player1.field) == 1
assert game.player1.field[0].id == INFERNO_TOKEN
def test_jaraxxus_cult_master():
game = prepare_game()
game.player1.discard_hand()
game.player1.summon("EX1_595")
game.player1.give(LORD_JARAXXUS).play()
assert len(game.player1.field) == 1
assert not game.player1.hand
def test_jaraxxus_knife_juggler():
game = prepare_game()
juggler = game.player1.summon("NEW1_019")
game.player1.give(LORD_JARAXXUS).play()
assert game.player2.hero.health == 30
assert juggler.health == 2
def test_jaraxxus_molten_giant():
game = prepare_game()
jaraxxus = game.player1.give("EX1_323")
molten = game.player1.give("EX1_620")
jaraxxus.play()
assert game.player1.hero.health == 15
assert molten.cost == 20
def test_jaraxxus_mirror_entity():
game = prepare_game()
mirror = game.player1.give("EX1_294")
mirror.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert not game.player1.secrets
assert game.player2.hero.id == LORD_JARAXXUS_HERO
assert len(game.player1.field) == 1
assert game.player1.field[0].id == LORD_JARAXXUS
def test_jaraxxus_repentance():
game = prepare_game()
repentance = game.player1.give("EX1_379")
repentance.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert not game.player1.secrets
assert game.player2.hero.id == LORD_JARAXXUS_HERO
assert game.player2.hero.health == game.player2.hero.max_health == 1
def test_jaraxxus_snipe():
game = prepare_game()
snipe = game.player1.give("EX1_609")
snipe.play()
game.end_turn()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
assert len(game.player1.secrets) == 1
assert game.player2.hero.health == 15
def test_jaraxxus_sacred_trial():
game = prepare_game()
trial = game.player1.give("LOE_027")
trial.play()
game.end_turn()
game.player2.give(WISP).play()
game.player2.give(WISP).play()
game.player2.give(WISP).play()
jaraxxus = game.player2.give(LORD_JARAXXUS)
jaraxxus.play()
# Will not trigger as 4th minion due to timing
assert trial in game.player1.secrets
assert not game.player2.hero.dead
game.end_turn()
game.end_turn()
wisp4 = game.player2.summon(WISP)
assert not wisp4.dead
jaraxxus = game.player2.give(LORD_JARAXXUS)
with pytest.raises(GameOver):
jaraxxus.play()
assert trial not in game.player1.secrets
assert game.player2.hero.dead
| jleclanche/fireplace | tests/test_jaraxxus.py | test_jaraxxus.py | py | 3,302 | python | en | code | 645 | github-code | 6 | [
{
"api_name": "pytest.raises",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "fireplace.exceptions.GameOver",
"line_number": 124,
"usage_type": "argument"
}
] |
72474001467 | import random
import numpy as np
from math import sqrt, log
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x1_list = []
x2_list = []
y_list = []
counter = 0
def drawFunc(minX, minY, maxX, maxY, ax = None):
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#ax.set_xlabel('x1')
#ax.set_ylabel('x2')
#ax.set_zlabel('f(x1,x2)')
x1_array = np.arange(minX, maxX, 0.1)
x2_array = np.arange(minY, maxY, 0.1)
x1_array, x2_array = fill_arrays(x1_array, x2_array)
R = fill_z(x1_array, x2_array)
x1_array = np.arange(minX, maxX, 0.1)
x2_array = np.arange(minY, maxY, 0.1)
x1_array, x2_array = np.meshgrid(x1_array, x2_array)
#R = f(x1_array, x2_array)
#drawBoder(ax, x1_array, g1_1)
#drawBoder(ax, x1_array, g2_1)
#drawBoder(ax, x1_array, g3_1)
#drawBoder(ax, x1_array, g4_1)
#print(R)
ax.plot_surface(x1_array, x2_array, R, alpha = 0.6)
#plt.show()
def fill_arrays(x, y):
final_y = []
final_x = []
for i in range(len(y)):
final_y.append([])
for j in range(len(x)):
if (barier(x[j], y[i])):
#if f(x[j], y[i]) > 50:
#print("i =", i, "j =", j)
#print("x =", x[j], "y =", y[i], "f =", f(x[j], y[i]))
final_y[i].append(x[j])
else: final_y[i].append(0)
for i in range(len(x)):
final_x.append([])
for j in range(len(y)):
if (barier(x[j], y[i])):
final_x[i].append(y[j])
else: final_x[i].append(0)
#for i in range(len(final_x)):
# print(i,")", final_x[i])
return final_y, final_x
def fill_z(x, y):
z = []
for i in range(len(x)):
z.append([])
for j in range(len(x[i])):
if (x[i][j] != 0 and y[j][i] != 0):
z[i].append(f(x[i][j], y[j][i]))
else: z[i].append(0.0)
#print("i =", i, "j =", j)
#print("x =", x[i][j], "y =", y[j][i], "z =", z[i][j])
#for i in range(len(z)):
# print(i,")", z[i])
r = np.array(z)
#for i in range(len(z)):
# r.__add__(np.array[z[i]])
return r
def fill_F2(x, y):
z = []
for i in range(len(x)):
z.append([])
for j in range(len(x[i])):
if (barier(x[i][j], y[i][j])):
z[i].append(f(x[i][j], y[i][j]))
else: z[i].append(0.0)
r = np.array(z)
#for i in range(len(z)):
# r.__add__(np.array[z[i]])
#print(r)
return r
def g1_1(x1):
return (-3*x1 + 6) / 2
def g2_1(x1):
return (-x1 - 3) / (-1)
def g3_1(x1):
return (x1 - 7) / (-1)
def g4_1(x1):
return (2*x1 - 4) / 3
def drawBoder(ax, x1, g):
zs = np.arange(0, 80, 35)
X, Z = np.meshgrid(x1, zs)
Y = g(X)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, alpha = 0.4)
def show(x1_list, x2_list):
N = int(x1_list.__len__())
if (N <= 0):
return
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
#x1_array = np.arange(min(x1_list) - 0.1, max(x1_list) + 0.1, 0.1)
#x2_array = np.arange(min(x2_list) - 0.1, max(x2_list) + 0.1, 0.1)
#x1_array, x2_array = np.meshgrid(x1_array, x2_array)
#R = f(x1_array, x2_array)
#ax.plot_surface(x1_array, x2_array, R, color='b', alpha=0.5)
drawFunc(0, 0, 5, 5, ax)
x1_list2 = []
x2_list2 = []
f_list = []
ax.scatter(x1_list[0], x2_list[0], f(x1_list[0], x2_list[0]), c='black')
x1_list2.append(x1_list[0])
x2_list2.append(x2_list[0])
f_list.append(f(x1_list[0], x2_list[0]))
for n in range(1, N - 1):
ax.scatter(x1_list[n], x2_list[n], f(x1_list[n], x2_list[n]), c='red')
x1_list2.append(x1_list[n])
x2_list2.append(x2_list[n])
f_list.append(f(x1_list[n], x2_list[n]))
ax.scatter(x1_list[N - 1], x2_list[N - 1], f(x1_list[N - 1], x2_list[N - 1]), c='green')
x1_list2.append(x1_list[N - 1])
x2_list2.append(x2_list[N - 1])
f_list.append(f(x1_list[N - 1], x2_list[n]))
ax.plot(x1_list2, x2_list2, f_list, color="black")
plt.show()
# <---------- f
def f(x1, x2):
return (x1-6)**2 +(x2-7)**2
def f_x1(x1, x2):
return 2*x1 - 12
def f_x2(x1, x2):
return 2*x2 - 14
# -------------->
# <---------- gi
def g1(x1, x2):
return -3*x1 - 2*x2 + 6
def g2(x1, x2):
return -x1 + x2 - 3
def g3(x1, x2):
return x1 + x2 - 7
def g4(x1, x2):
return 2*x1 - 3*x2 - 4
# -------------->
# <---------- gi_bool
def g1_bool(x1, x2):
return -3*x1 - 2*x2 + 6 <= 0
def g2_bool(x1, x2):
return -x1 + x2 - 3 <= 0
def g3_bool(x1, x2):
return x1 + x2 - 7 <= 0
def g4_bool(x1, x2):
return 2*x1 - 3*x2 - 4 <= 0
def barier(x1, x2):
return (g1_bool(x1, x2) and g2_bool(x1, x2) and g3_bool(x1, x2) and g4_bool(x1, x2))
# -------------->
# <---------- X
def F(x1, x2, r):
return f(x1,x2) + P(x1, x2, r)
def F_x1(x1, x2, r):
return f_x1(x1, x2) + P_x1(x1, x2, r)
def F_x2(x1, x2, r):
return f_x2(x1, x2) + P_x2(x1, x2, r)
# -------------->
# <-------------- P
def P(x1, x2, r):
sum = 1/g1(x1, x2) + 1/g2(x1, x2) + 1/g3(x1, x2) + 1/g4(x1, x2)
return -r*sum
def P_x1(x1, x2, r):
sum = 3/(g1(x1, x2)**2) + 1/(g2(x1, x2)**2) - 1/(g3(x1, x2)**2) - 1/(g4(x1, x2)**2)
return -r*sum
def P_x2(x1, x2, r):
sum = 2/(g1(x1, x2)**2) - 1/(g2(x1, x2)**2) - 1/(g3(x1, x2)**2) + 3/(g4(x1, x2)**2)
return -r*sum
# ------------>
def gradient(x1, x2, r):
i = F_x1(x1, x2, r)
j = F_x2(x1, x2, r)
return [i, j]
def module_of_gradient(grad):
i = 0; j = 1
return sqrt(grad[i]**2 + grad[j]**2)
def method_of_gradient_descent_with_a_constant_step(x1, x2, e, M, r):
global counter
k = 0
counter += 1
x1_next = x1
x2_next = x2
while True:
counter += 2
grad = gradient(x1, x2, r)
module_grad = module_of_gradient(grad)
if ((module_grad < e) and (k >= M)):
return (x1_next, x2_next)
gamma = 0.1
x1_next = x1 - gamma * grad[0]
x2_next = x2 - gamma * grad[1]
counter += 2
while (F(x1_next, x2_next, r) - F(x1, x2, r) >= 0 or not barier(x1_next, x2_next)):
gamma /= 4
x1_next = x1 - gamma * grad[0]
x2_next = x2 - gamma * grad[1]
counter += 1
#print(grad, 'x1 =', x1, 'x2 =', x2, 'x1_next =', x1_next, 'x2_next =', x2_next, 'gamma =', gamma)
x1_list.append(x1); x2_list.append(x2)
if ((sqrt(abs(x1_next - x1)**2 + abs(x2_next - x2)**2) <= e)
& (abs(F(x1_next, x2_next, r) - F(x1, x2, r)) <= e)):
return (x1_next, x2_next)
x1 = x1_next
x2 = x2_next
k += 1
def barrier_function_method(x1, x2, r, C, e, M, k):
min_x1, min_x2 = method_of_gradient_descent_with_a_constant_step(x1, x2, e, M, r)
#print("x1 =", min_x1, "x2 =", min_x2)
fine = P(min_x1, min_x2, r)
#print("fine =", fine)
if (abs(fine) <= e):
return [(round(min_x1, round_num),
round(min_x2, round_num),
round(f(min_x1, min_x2), round_num)),
k]
k += 1
r = r/C
return barrier_function_method(min_x1, min_x2, r, C, e, M, k)
round_num = 4
x1 = 2.5
x2 = 1
e = 0.0001
M = 100
r = 1
c = 10
k = 0
result = barrier_function_method(x1, x2, r, c, e, M, k)
print(f"Barrier function method: {result[0]}; count of iteractions = {result[1]}")
print('Count of compute function =', counter + 1)
show(x1_list, x2_list)
#drawFunc(0, 0, 5, 5)
| AlexSmirno/Learning | 6 Семестр/Оптимизация/Lab_6_grad.py | Lab_6_grad.py | py | 7,739 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_numbe... |
7357205248 | import requests
import json
import nestConfig
#AWS Constants
url = nestConfig.get_URL()
query = '''
mutation Mutation($id: String!) {
checkIn(id: $id) {
code
message
}
}
'''
def checkIn(nestID):
#Ensure nest is connected to the backend
content = json.dumps({'id':nestID}) #Assign nest name to be checked
try:
res = requests.post(url, json={'query': query, 'variables': content})
except Exception as error:
return None
if res.status_code == 200:
print(res.status_code)
else:
raise Exception("Query failed to run by returning code of {}.".format(res.text))
return None
| EzequielRosario/ImperiumBinarium-Files | NestFunctions/HourlyCheckIn.py | HourlyCheckIn.py | py | 642 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nestConfig.get_URL",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
}
] |
34836695873 | #!/usr/bin/env python3
"""Tools to define Templates.
Templates are very similar to plugins, but use jinja to transform `.enbt` template files upon installation.
"""
__author__ = "Miguel Hernández-Cabronero"
__since__ = "2021/08/01"
import sys
import argparse
import inspect
import os
import glob
import shutil
import tempfile
import jinja2
import stat
from .installable import Installable, InstallableMeta
import enb.config
from enb.config import options
class MetaTemplate(InstallableMeta):
def __init__(cls, *args, **kwargs):
if cls.__name__ != "Template":
cls.tags.add("template")
super().__init__(*args, **kwargs)
class Template(Installable, metaclass=MetaTemplate):
"""
Base class to define templates. Subclasses must be defined in the __plugin__.py file of the template's
source dir.
- Templates copy the source dir's contents (except for __plugin__.py) and then transforms
any `*.enbt` file applying jinja and removing that extension.
- Templates may require so-called fields in order to produce output.
These fields can be automatically taken from enb.config.ini (e.g., file-based configuration),
passed as arguments to the template installation CLI, and programmatically.
- One or more templates can be installed into an existing directory, the __plugin__.py file is not written
by default to the installation dir.
"""
# Map of required field names to their corresponding help
required_fields_to_help = dict()
# Files in the template's source dir ending with templatable_extension
# are subject to jinja templating upon installation.
templatable_extension = ".enbt"
@classmethod
def get_fields(cls, original_fields=None):
try:
return cls._fields
except AttributeError:
# If there are required fields, satisfy them or fail
fields = dict(original_fields) if original_fields is not None else dict()
if cls.required_fields_to_help:
ini_cli_fields, unused_options = cls.get_field_parser().parse_known_args()
# Syntax is "plugin install <template> <installation>, so
# four non-parsed options are expected
assert len(unused_options) >= 4, (sys.argv, ini_cli_fields, unused_options)
unused_options = unused_options[4:]
for field_name in cls.required_fields_to_help:
if field_name not in fields:
try:
fields[field_name] = getattr(ini_cli_fields, field_name)
assert fields[field_name] is not None
except (KeyError, AssertionError) as ex:
raise SyntaxError(
f"Missing field {repr(field_name)}. Help for {field_name}:\n"
f"{cls.required_fields_to_help[field_name]}\n\n"
f"Invoke again with --{field_name}=\"your value\" or with -h for additional help.\n") from ex
if unused_options:
print(f"Warning: unused option{'s' if len(unused_options) > 1 else ''}. \n - ", end="")
print('\n - '.join(repr(o) for o in unused_options))
print(f"NOTE: You can use '' or \"\" to define fields with spaces in them.")
print()
cls._fields = fields
return fields
@classmethod
def install(cls, installation_dir, overwrite_destination=False, fields=None):
"""Install a template into the given dir. See super().install for more information.
:param installation_dir: directory where the contents of the template are placed.
It will be created if not existing.
:param overwrite_destination: if False, a SyntaxError is raised if any of the
destination contents existed prior to this call. Note that installation_dir
can already exist, it is the files and directories moved into it that can
trigger this SyntaxError.
:param fields: if not None, it must be a dict-like object containing a field to field value
mapping. If None, it is interpreted as an empty dictionary.
Required template fields not present in fields will be then read from the CLI
arguments. If those are not provided, then the default values read from `*.ini`
configuration files. If any required field cannot not satisfied after this,
a SyntaxError is raised.
"""
# If there are required fields, satisfy them or fail
fields = cls.get_fields(original_fields=fields)
template_src_dir = os.path.dirname(os.path.abspath(inspect.getfile(cls)))
for input_path in glob.glob(os.path.join(template_src_dir, "**", "*"), recursive=True):
if "__pycache__" in input_path:
continue
if os.path.basename(input_path) == "__plugin__.py":
continue
# By default, the original structure and file names are preserved.
output_path = os.path.abspath(input_path).replace(
os.path.abspath(template_src_dir),
os.path.abspath(installation_dir))
# Directories are created when found
if os.path.isdir(input_path):
os.makedirs(output_path, exist_ok=True)
continue
input_is_executable = os.access(input_path, os.X_OK)
# Files ending in '.enbt' will be identified as templates, processed and stripped of their extension.
is_templatable = os.path.isfile(input_path) \
and os.path.basename(input_path).endswith(cls.templatable_extension)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
if is_templatable:
with tempfile.NamedTemporaryFile(mode="w+") as templated_file:
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(input_path))),
autoescape=jinja2.select_autoescape())
template = jinja_env.get_template(os.path.basename(input_path))
templated_file.write(template.render(**fields))
templated_file.flush()
templated_file.seek(0)
if os.path.exists(output_path[:-len(cls.templatable_extension)]) and not options.force:
raise ValueError(
f"Error installing template {cls.name}: output file {repr(output_path)} already exists "
f"and options.force={options.force}. Run with -f to overwrite.")
with open(output_path[:-len(cls.templatable_extension)], "w") as output_file:
output_file.write(templated_file.read())
if input_is_executable:
os.chmod(output_path[:-len(cls.templatable_extension)],
os.stat(output_path[:-len(cls.templatable_extension)]).st_mode | stat.S_IEXEC)
else:
if os.path.exists(output_path) and not options.force:
raise ValueError(
f"Error installing template {cls.name}: output file {repr(output_path)} already exists "
f"and options.force={options.force}. Run with -f to overwrite.")
shutil.copy(input_path, output_path)
cls.build(installation_dir=installation_dir)
print(f"Template {repr(cls.name)} successfully installed into {repr(installation_dir)}.")
@classmethod
def get_field_parser(cls):
description = f"Template {repr(cls.name)} installation help."
if cls.required_fields_to_help:
description += f"\n\nFields are automatically read from the following paths (in this order):\n"
description += "\n".join(enb.config.ini.used_config_paths)
# defined_description = f"\n\nAlready refined fields:"
defined_field_lines = []
for field_name in sorted(cls.required_fields_to_help.keys()):
try:
defined_field_lines.append(f" {field_name} = {enb.config.ini.get_key('template', field_name)}")
except KeyError:
pass
if defined_field_lines:
description += f"\n\nFile-defined fields:\n"
description += "\n".join(defined_field_lines)
parser = argparse.ArgumentParser(
prog=f"enb plugin install {cls.name}",
description=description,
formatter_class=argparse.RawTextHelpFormatter)
required_flags_group = parser.add_argument_group(
"Required flags (use '' or \"\" quoting for fields with spaces)")
for field_name, field_help in cls.required_fields_to_help.items():
try:
default_field_value = enb.config.ini.get_key("template", field_name)
except KeyError:
default_field_value = None
if field_help[-1] != ".":
field_help += "."
required_flags_group.add_argument(
f"--{field_name}",
default=default_field_value,
help=field_help,
metavar=field_name)
# This argument is for showing help to the user only, since it will have already been parsed
# by enb.config.ini by the time this is called.
parser.add_argument(f"--ini", nargs="*", required=False, type=str,
help="Additional .ini paths with a [field] section containing field = value lines")
return parser
| miguelinux314/experiment-notebook | enb/plugins/template.py | template.py | py | 9,816 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "installable.InstallableMeta",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "installable.Installable",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.... |
6679634602 | import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
import argparse, sys
import joblib
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score, ConfusionMatrixDisplay
from medmnistutils.evaluationmetrics import accuracy, roc, presenf1cfsmtx
from medmnistutils.medmnistdataloader import PathMNIST, OrganMNIST3D, PneumoniaMNIST, VesselMNIST3D, OCTMNIST
#from medmnistutils.jiaodaresnet import ResNet18 as jiaodaresnet18
#from nets.unknownthreedresnet import resnet18
from medmnistutils.blingblingresnet import resnet18 as blingblingresnet18
from medmnistutils.O2Uzidairesnet import ResNet18 as O2Uresnet18
from medmnistutils.yixianresnet import resnet18 as yixian3dresnet18
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='OCTMNIST', help='PathMNIST, OCTMNIST, PneumoniaMNIST, OrganMNIST3D, VesselMNIST3D')
parser.add_argument('--noise_rate', type=float, default=0.4, help='noise rate')
parser.add_argument('--batchsize', type=int, default=128, help='128')
parser.add_argument('--num_epochs', type=int, default=200, help='number of epochs')
#args = parser.parse_args(args=[])
args = parser.parse_args()
if args.dataset =='PathMNIST': #2D, 9 classes, 89,996 / 10,004 / 7,180
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = PathMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = PathMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = PathMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
if args.dataset =='OCTMNIST': #2D, 4 classes,
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = OCTMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = OCTMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = OCTMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
elif args.dataset =='PneumoniaMNIST': #2D, 2 class, 4,708 / 524 / 624
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = PneumoniaMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = PneumoniaMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = PneumoniaMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
elif args.dataset =='OrganMNIST3D': #3D, 11 class, 972 / 161 / 610
train_dataset = OrganMNIST3D(split = 'train', root = '../../medmnistdata', transform=None, noise_rate=args.noise_rate)
val_dataset = OrganMNIST3D(split = 'val', root = '../../medmnistdata', transform=None)
test_dataset = OrganMNIST3D(split = 'test', root = '../../medmnistdata', transform=None)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = yixian3dresnet18(num_classes = train_dataset.num_classes)
elif args.dataset =='VesselMNIST3D': #3D, 2 class, 1,335 / 192 / 382
train_dataset = VesselMNIST3D(split = 'train', root = '../../medmnistdata', transform=None, noise_rate=args.noise_rate)
val_dataset = VesselMNIST3D(split = 'val', root = '../../medmnistdata', transform=None)
test_dataset = VesselMNIST3D(split = 'test', root = '../../medmnistdata', transform=None)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = yixian3dresnet18(num_classes = train_dataset.num_classes)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
error = nn.CrossEntropyLoss()
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
###############################################################################
验证准确率列表 = []
测试准确率列表= []
###############################################################################
#main loop
for epoch in range(args.num_epochs):
#train
model.train()
for images, labels, _ in train_loader:
images, labels = images.to(device), labels.to(device)
labels = labels.squeeze().long()
outputs = model(images)
loss = error(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#evaluation
valaccuracy = accuracy(model, val_loader)
testaccuracy = accuracy(model, test_loader)
print('epoch', epoch+1, 'val accuracy', valaccuracy, 'test accuracy', testaccuracy)
###############################################################################
#以下都是不需要的
###############################################################################
验证准确率列表.append(valaccuracy)
测试准确率列表.append(testaccuracy)
实验名 = '20230924baselineexp1'
resultdict = dict()
#模型
resultdict['model'] = model
#acc变化图
resultdict['valacclist'] = 验证准确率列表
resultdict['testacclist'] = 测试准确率列表
验证准确率列表 = [x*100 for x in 验证准确率列表]
测试准确率列表 = [x*100 for x in 测试准确率列表]
plt.plot(验证准确率列表, label = 'validation set')
plt.plot(测试准确率列表, label = 'test set')
plt.xlim((0,200))
plt.ylim((0,100))
#plt.title('origingal method on ' + args.dataset + ' under noise rate ' + str(args.noise_rate))
plt.xlabel('Epoch')
plt.ylabel('Accuracy (%)')
acc变化图文件名 = 实验名 + '_acccurve_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.legend()
plt.savefig(acc变化图文件名)
plt.show()
#ROC曲线图
resultdict['valfprdict'], resultdict['valtprdict'], resultdict['valaucdict'] = roc(model, val_loader)
resultdict['testfprdict'], resultdict['testtprdict'], resultdict['testaucdict'] = roc(model, test_loader)
plt.plot(resultdict['valfprdict']["micro"], resultdict['valtprdict']["micro"],
label='validation set, AUC ' + str(round(100*resultdict['valaucdict']["micro"],2)))
plt.plot(resultdict['testfprdict']["micro"], resultdict['testtprdict']["micro"],
label='test set, AUC ' + str(round(100*resultdict['testaucdict']["micro"],2)))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
ROC文件名 = 实验名 + '_roccurve_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(ROC文件名)
plt.show()
#confusion matrix图
resultdict['valprecision'], resultdict['valrecall'], resultdict['valf1'], resultdict['valtruelist'], resultdict['valpredlist'], resultdict['valcfsmtx'] = presenf1cfsmtx(model, val_loader)
ConfusionMatrixDisplay.from_predictions(resultdict['valtruelist'], resultdict['valpredlist'], cmap = plt.cm.Blues, colorbar = False)
cfsmtx文件名 = 实验名 + '_valconfusionmatrix_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(cfsmtx文件名)
plt.show()
resultdict['testprecision'], resultdict['testrecall'], resultdict['testf1'], resultdict['testtruelist'], resultdict['testpredlist'], resultdict['testcfsmtx'] = presenf1cfsmtx(model, test_loader)
ConfusionMatrixDisplay.from_predictions(resultdict['testtruelist'], resultdict['testpredlist'], cmap = plt.cm.Blues, colorbar = False)
cfsmtx文件名 = 实验名 + '_testconfusionmatrix_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(cfsmtx文件名)
plt.show()
#txt
txt文件名 = 实验名 + '_txt_' + args.dataset + '_' + str(args.noise_rate) + '.txt'
with open (txt文件名, 'a', encoding='utf-8') as txt:
txt.write('最后一轮acc' + "\n" )
txt.write(str(round(验证准确率列表[-1],2)) + "\n" )
txt.write(str(round(测试准确率列表[-1],2)) + "\n" )
txt.write('最后十轮acc平均' + "\n" )
txt.write(str(round(sum(验证准确率列表[-11:-1])/len(验证准确率列表[-11:-1]),2)) + "\n" )
txt.write(str(round(sum(测试准确率列表[-11:-1])/len(验证准确率列表[-11:-1]),2)) + "\n" )
txt.write('precision' + "\n" )
txt.write(str(round(100*resultdict['valprecision'],2)) + "\n" )
txt.write(str(round(100*resultdict['testprecision'],2)) + "\n" )
txt.write('recall' + "\n" )
txt.write(str(round(100*resultdict['valrecall'],2)) + "\n" )
txt.write(str(round(100*resultdict['testrecall'],2)) + "\n" )
txt.write('f1' + "\n" )
txt.write(str(round(100*resultdict['valf1'],2)) + "\n" )
txt.write(str(round(100*resultdict['testf1'],2)) + "\n" )
#保存整个文件
resultdict文件名 = 实验名 + '_resultdict_' + args.dataset + '_' + str(args.noise_rate)
joblib.dump(resultdict, resultdict文件名)
| gdqb233/inm363 | baseline.py | baseline.py | py | 11,328 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.backends.cudnn.benchmark",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.cudnn",
"line_number": 17,
"usage_type": "name"
},
{
"a... |
10862974654 | """
Run the model end to end
"""
import argparse
import sys
import torch
from pathlib import Path
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from smallteacher.data import DataModule, train_augmentations
from smallteacher.models import FullySupervised, SemiSupervised
from smallteacher.constants import Metrics
from smallteacher.config import BEST_MODEL_NAME
from smallssd.data import LabelledData, UnlabelledData
from smallssd.config import DATAFOLDER_PATH
from smallssd.keys import CLASSNAME_TO_IDX
import mlflow
import mlflow.pytorch
def parse_args(args):
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description="Simple training script for training a pytorch lightning model."
)
parser.add_argument(
"--model",
help="Chooses model architecture",
type=str,
default="FRCNN",
choices=["FRCNN", "RetinaNet", "SSD"],
)
parser.add_argument(
"--workers", help="Number of dataloader workers", type=int, default="1"
)
parser.add_argument(
"--mlflow_experiment", type=str, default="pytorch_lightning_experiment"
)
parser.add_argument("--seed", type=int, default="42")
return parser.parse_args(args)
def get_checkpoint(version: int) -> Path:
return list(
Path(f"lightning_logs/version_{version}/checkpoints").glob("best_model*.ckpt")
)[0]
def train_fully_supervised(datamodule, model_name) -> int:
model = FullySupervised(
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
fully_supervised_trainer = pl.Trainer(
callbacks=[
EarlyStopping(monitor=Metrics.MAP, mode="max", patience=10),
ModelCheckpoint(filename=BEST_MODEL_NAME, monitor=Metrics.MAP, mode="max"),
],
gpus=torch.cuda.device_count(),
)
fully_supervised_trainer.fit(model, datamodule=datamodule)
best_model = FullySupervised.load_from_checkpoint(
get_checkpoint(fully_supervised_trainer.logger.version),
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
fully_supervised_trainer.test(best_model, datamodule=datamodule)
return fully_supervised_trainer.logger.version
def train_teacher_student(datamodule, model_name, model_checkpoint) -> int:
unlabelled_ds = UnlabelledData(root=DATAFOLDER_PATH)
datamodule.add_unlabelled_training_dataset(unlabelled_ds)
org_model = FullySupervised.load_from_checkpoint(
model_checkpoint,
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
model = SemiSupervised(
trained_model=org_model.model,
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
trainer = pl.Trainer(
gpus=torch.cuda.device_count(),
callbacks=[
EarlyStopping(monitor=Metrics.MAP, mode="max", patience=10),
ModelCheckpoint(filename=BEST_MODEL_NAME, monitor=Metrics.MAP, mode="max"),
],
)
trainer.fit(model, datamodule=datamodule)
best_model = SemiSupervised.load_from_checkpoint(
get_checkpoint(trainer.logger.version),
model_base=model_name,
num_classes=len(CLASSNAME_TO_IDX),
)
trainer.test(best_model, datamodule=datamodule)
return best_model
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
mlflow.set_experiment(experiment_name=args.mlflow_experiment)
pl.seed_everything(args.seed)
datamodule = DataModule(
*LabelledData(root=DATAFOLDER_PATH, eval=False).split(
transforms=[train_augmentations, None]
),
test_dataset=LabelledData(root=DATAFOLDER_PATH, eval=True),
num_workers=args.workers,
)
mlflow.pytorch.autolog()
with mlflow.start_run(run_name=f"{args.model}_fully_supervised"):
version_id = train_fully_supervised(datamodule, args.model)
best_model_checkpoint = get_checkpoint(version_id)
with mlflow.start_run(run_name=f"{args.model}_teacher_student"):
best_model = train_teacher_student(
datamodule, args.model, best_model_checkpoint
)
mlflow.pytorch.log_model(best_model.model, artifact_path="model")
if __name__ == "__main__":
main()
| SmallRobotCompany/smallteacher | smallssd/end_to_end.py | end_to_end.py | py | 4,407 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "smallteacher.models... |
6397362139 | import sys
from math import sqrt
from itertools import compress
# 利用byte求质数
def get_primes_3(n):
""" Returns a list of primes < n for n > 2 """
sieve = bytearray([True]) * (n // 2)
for i in range(3, int(n ** 0.5) + 1, 2):
if sieve[i // 2]:
sieve[i * i // 2::i] = bytearray((n - i * i - 1) // (2 * i) + 1)
return [2, *compress(range(3, n, 2), sieve[1:])]
def is_prime(n):
# Only used to test odd numbers.
return all(n % d for d in range(3, round(sqrt(n)) + 1, 2))
def f(a, b):
'''
Won't be tested for b greater than 10_000_000
>>> f(3, 3)
The number of prime numbers between 3 and 3 included is 1
>>> f(4, 4)
The number of prime numbers between 4 and 4 included is 0
>>> f(2, 5)
The number of prime numbers between 2 and 5 included is 3
>>> f(2, 10)
The number of prime numbers between 2 and 10 included is 4
>>> f(2, 11)
The number of prime numbers between 2 and 11 included is 5
>>> f(1234, 567890)
The number of prime numbers between 1234 and 567890 included is 46457
>>> f(89, 5678901)
The number of prime numbers between 89 and 5678901 included is 392201
>>> f(89, 5678901)
The number of prime numbers between 89 and 5678901 included is 392201
'''
count = 0
for i in range(a,b+1):
if is_prime(i):
count+=1
less_a_primes = get_primes_3(a + 1)
less_b_primes = get_primes_3(b + 1)
for item in less_a_primes:
if item < a:
less_b_primes.remove(item)
count = len(less_b_primes)
print(f'The number of prime numbers between {a} and {b} included is {count}')
if __name__ == '__main__':
import doctest
doctest.testmod()
| YuanG1944/COMP9021_19T3_ALL | 9021 Python/review/mid-examples/2017S1_Sol/5.py | 5.py | py | 1,735 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "itertools.compress",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "doctest.testmod",
"line_number": 58,
"usage_type": "call"
}
] |
21480418170 | from collections import namedtuple
from functools import partial
from itertools import count, groupby, zip_longest
import bpy
import numpy as np
import re
from .log import log, logd
from .helpers import (
ensure_iterable,
get_context,
get_data_collection,
get_layers_recursive,
load_property,
reshape,
save_property,
select_only,
swap_names,
titlecase,
)
logs = partial(log, category="SAVE")
custom_prop_pattern = re.compile(r'(.+)?\["([^"]+)"\]')
prop_pattern = re.compile(r'(?:(.+)\.)?([^"\.]+)')
class GRET_OT_property_warning(bpy.types.Operator):
"""Changes won't be saved"""
bl_idname = 'gret.property_warning'
bl_label = "Not Overridable"
bl_options = {'INTERNAL'}
def draw_warning_if_not_overridable(layout, bid, data_path):
"""Adds a warning to a layout if the requested property is not available or not overridable."""
if bid and bid.override_library:
try:
if not bid.is_property_overridable_library(data_path):
layout.operator(GRET_OT_property_warning.bl_idname,
icon='ERROR', text="", emboss=False, depress=True)
return True
except TypeError:
pass
return False
class PropertyWrapper(namedtuple('PropertyWrapper', 'struct prop_name is_custom')):
"""Provides read/write access to a property given its data path."""
__slots__ = ()
@classmethod
def from_path(cls, struct, data_path):
# To set a property given a data path it's necessary to split the struct and attribute name.
# `struct.path_resolve(path, False)` returns a bpy_prop, and bpy_prop.data holds the struct.
# Unfortunately it knows but doesn't expose the attribute name (see `bpy_prop.__str__`)
# It's also necessary to determine if it's a custom property, the interface is different.
# Just parse the data path with a regular expression instead.
try:
prop_match = custom_prop_pattern.fullmatch(data_path)
if prop_match:
if prop_match[1]:
struct = struct.path_resolve(prop_match[1])
prop_name = prop_match[2]
if prop_name not in struct:
return None
return cls(struct, prop_name, True)
prop_match = prop_pattern.fullmatch(data_path)
if prop_match:
if prop_match[1]:
struct = struct.path_resolve(prop_match[1])
prop_name = prop_match[2]
if not hasattr(struct, prop_name):
return None
return cls(struct, prop_name, False)
except ValueError:
return None
@property
def data_path(self):
return f'["{self.prop_name}"]' if self.is_custom else self.prop_name
@property
def title(self):
if self.is_custom:
return titlecase(self.prop_name) # Custom property name should be descriptive enough
else:
return f"{getattr(self.struct, 'name', self.struct.bl_rna.name)} {titlecase(self.prop_name)}"
@property
def default_value(self):
if self.is_custom:
return self.struct.id_properties_ui(self.prop_name).as_dict()['default']
else:
prop = self.struct.bl_rna.properties[self.prop_name]
if getattr(prop, 'is_array', False):
return reshape(prop.default_array, prop.array_dimensions)
return getattr(prop, 'default', None)
@property
def value(self):
if self.is_custom:
return self.struct[self.prop_name]
else:
return save_property(self.struct, self.prop_name)
@value.setter
def value(self, new_value):
if self.is_custom:
self.struct[self.prop_name] = new_value
else:
load_property(self.struct, self.prop_name, new_value)
class PropOp(namedtuple('PropOp', 'prop_wrapper value')):
__slots__ = ()
def __new__(cls, struct, data_path, value=None):
prop_wrapper = PropertyWrapper.from_path(struct, data_path)
if not prop_wrapper:
raise RuntimeError(f"Couldn't resolve {data_path}")
saved_value = prop_wrapper.value
if value is not None:
prop_wrapper.value = value
return super().__new__(cls, prop_wrapper, saved_value)
def revert(self, context):
self.prop_wrapper.value = self.value
class PropForeachOp(namedtuple('PropForeachOp', 'collection prop_name values')):
__slots__ = ()
def __new__(cls, collection, prop_name, value=None):
assert isinstance(collection, bpy.types.bpy_prop_collection)
if len(collection) == 0:
# Can't investigate array type if there are no elements (would do nothing anyway)
return super().__new__(cls, collection, prop_name, np.empty(0))
prop = collection[0].bl_rna.properties[prop_name]
element_type = type(prop.default)
num_elements = len(collection) * prop.array_length
saved_values = np.empty(num_elements, dtype=element_type)
collection.foreach_get(prop_name, saved_values)
if value is not None:
values = np.full(num_elements, value, dtype=element_type)
collection.foreach_set(prop_name, values)
return super().__new__(cls, collection, prop_name, saved_values)
def revert(self, context):
if self.values.size > 0:
self.collection.foreach_set(self.prop_name, self.values)
class CallOp(namedtuple('CallOp', 'func args kwargs')):
__slots__ = ()
def __new__(cls, func, *args, **kwargs):
assert callable(func)
return super().__new__(cls, func, args, kwargs)
def revert(self, context):
self.func(*self.args, **self.kwargs)
class SelectionOp(namedtuple('SelectionOp', 'selected_objects active_object collection_hide '
'layer_hide object_hide')):
__slots__ = ()
def __new__(cls, context):
return super().__new__(cls,
selected_objects=context.selected_objects[:],
active_object=context.view_layer.objects.active,
collection_hide=[(cl, cl.hide_select, cl.hide_viewport, cl.hide_render)
for cl in bpy.data.collections],
layer_hide=[(layer, layer.hide_viewport, layer.exclude)
for layer in get_layers_recursive(context.view_layer.layer_collection)],
object_hide=[(obj, obj.hide_select, obj.hide_viewport, obj.hide_render)
for obj in bpy.data.objects])
def revert(self, context):
for collection, hide_select, hide_viewport, hide_render in self.collection_hide:
try:
collection.hide_select = hide_select
collection.hide_viewport = hide_viewport
collection.hide_render = hide_render
except ReferenceError:
pass
for layer, hide_viewport, exclude in self.layer_hide:
try:
layer.hide_viewport = hide_viewport
layer.exclude = exclude
except ReferenceError:
pass
for obj, hide_select, hide_viewport, hide_render in self.object_hide:
try:
obj.hide_select = hide_select
obj.hide_viewport = hide_viewport
obj.hide_render = hide_render
except ReferenceError:
pass
select_only(context, self.selected_objects)
try:
context.view_layer.objects.active = self.active_object
except ReferenceError:
pass
class CollectionOp(namedtuple('CollectionOp', 'collection remove_func_name items is_whitelist')):
__slots__ = ()
def __new__(cls, collection, items=None):
assert isinstance(collection, bpy.types.bpy_prop_collection)
# Find out if there's a remove-like function available
for func_name in ('remove', 'unlink', ''):
func = collection.bl_rna.functions.get(func_name)
if (func is not None
and sum(param.is_required for param in func.parameters) == 1
and func.parameters[0].type == 'POINTER'):
break
if not func_name:
raise RuntimeError(f"'{collection.bl_rna.name}' is not supported")
if items is None:
# On reverting, remove all but the current items
return super().__new__(cls, collection, func_name, set(collection), True)
else:
# On reverting, remove the specified items
return super().__new__(cls, collection, func_name, set(items), False)
def revert(self, context):
# Allow passing in object names instead of object references
# Compare types, don't use `isinstance` as that will throw on removed objects
items = set(self.collection.get(el) if type(el) == str else el for el in self.items)
items.discard(None)
remove_func = getattr(self.collection, self.remove_func_name)
if self.is_whitelist:
# Remove items not in the set
for item in set(self.collection) - items:
logs("Removing", item)
remove_func(item)
else:
# Remove items in the set
for item in items:
try:
logs("Removing", item)
remove_func(item)
except ReferenceError:
pass
class RenameOp(namedtuple('RenameOp', 'bid name other_bid')):
__slots__ = ()
def __new__(cls, bid, name, start_num=0, name_format="{name}{num}"):
data_collection = get_data_collection(bid)
if data_collection is None:
raise RuntimeError(f"Type {type(bid).__name__} is not supported")
saved_name = bid.name
bid.tag = True # Not strictly necessary, tagging allows custom naming format to work
for num in count(start=start_num):
new_name = name if (num == start_num) else name_format.format(name=name, num=num)
other_bid = data_collection.get(new_name)
if not other_bid or bid == other_bid:
bid.name = new_name
return super().__new__(cls, bid, saved_name, None)
elif other_bid and not other_bid.tag:
swap_names(bid, other_bid)
return super().__new__(cls, bid, saved_name, other_bid)
def revert(self, context):
if self.other_bid:
try:
swap_names(self.bid, self.other_bid)
except ReferenceError:
pass
self.bid.name = self.name # Ensure the name is reverted if swap_names failed
self.bid.tag = False
class SaveState:
"""Similar to an undo stack. See SaveContext for example usage."""
def __init__(self, context, name, refresh=False):
self.context = context
self.name = name
self.refresh = refresh
self.operations = []
def revert(self):
while self.operations:
self._pop_op()
if self.refresh:
# Might be necessary in some cases where context.scene.view_layers.update() is not enough
self.context.scene.frame_set(self.context.scene.frame_current)
def _push_op(self, op_cls, *args, **kwargs):
try:
self.operations.append(op_cls(*args, **kwargs))
logs("Push", self.operations[-1], max_len=90)
except Exception as e:
logs(f"Error pushing {op_cls.__name__}: {e}")
def _pop_op(self):
op = self.operations.pop()
try:
logs("Pop", op, max_len=90)
op.revert(self.context)
except Exception as e:
logs(f"Error reverting {op.__class__.__name__}: {e}")
def prop(self, struct, data_paths, values=[None]):
"""Save the specified properties and optionally assign new values."""
if isinstance(data_paths, str):
data_paths = data_paths.split()
if not isinstance(values, list):
values = [values]
if len(values) != 1 and len(values) != len(data_paths):
raise ValueError("Expected either a single value or as many values as data paths")
for data_path, value in zip_longest(data_paths, values, fillvalue=values[0]):
self._push_op(PropOp, struct, data_path, value)
def prop_foreach(self, collection, prop_name, value=None):
"""Save the specified property for all elements in the collection."""
self._push_op(PropForeachOp, collection, prop_name, value)
def selection(self):
"""Save the current object selection."""
self._push_op(SelectionOp, self.context)
def temporary(self, collection, items):
"""Mark one or more items for deletion."""
self._push_op(CollectionOp, collection, ensure_iterable(items))
def temporary_bids(self, bids):
"""Mark one or more IDs for deletion."""
for bid_type, bids in groupby(ensure_iterable(bids), key=lambda bid: type(bid)):
if bid_type is not type(None):
self._push_op(CollectionOp, get_data_collection(bid_type), bids)
def keep_temporary_bids(self, bids):
"""Keep IDs that were previously marked for deletion."""
bids = set(ensure_iterable(bids))
for op in reversed(self.operations):
if isinstance(op, CollectionOp) and not op.is_whitelist:
op.items.difference_update(bids)
def collection(self, collection):
"""Remember the current contents of a collection. Any items created later will be removed."""
self._push_op(CollectionOp, collection)
def viewports(self, header_text=None, show_overlays=None, **kwargs):
"""Save and override 3D viewport settings."""
for area in self.context.screen.areas:
if area.type == 'VIEW_3D':
# Don't think there's a way to find out the current header text, reset on reverting
self._push_op(CallOp, area.header_text_set, None)
area.header_text_set(header_text)
for space in area.spaces:
if space.type == 'VIEW_3D':
if show_overlays is not None:
self._push_op(PropOp, space.overlay, 'show_overlays', show_overlays)
for field_name, field_value in kwargs.items():
self._push_op(PropOp, space.shading, field_name, field_value)
def rename(self, bid, name):
"""Save the IDs current name and give it a new name."""
self._push_op(RenameOp, bid, name)
def clone_obj(self, obj, to_mesh=False, parent=None, reset_origin=False):
"""Clones or converts an object. Returns a new, visible scene object with unique data."""
if to_mesh:
dg = self.context.evaluated_depsgraph_get()
new_data = bpy.data.meshes.new_from_object(obj, preserve_all_data_layers=True, depsgraph=dg)
self.temporary_bids(new_data)
new_obj = bpy.data.objects.new(obj.name + "_", new_data)
self.temporary_bids(new_obj)
else:
new_data = obj.data.copy()
self.temporary_bids(new_data)
new_obj = obj.copy()
self.temporary_bids(new_obj)
new_obj.name = obj.name + "_"
new_obj.data = new_data
assert new_data.users == 1
if obj.type == 'MESH':
# Move object materials to mesh
for mat_index, mat_slot in enumerate(obj.material_slots):
if mat_slot.link == 'OBJECT':
new_data.materials[mat_index] = mat_slot.material
new_obj.material_slots[mat_index].link = 'DATA'
# New objects are moved to the scene collection, ensuring they're visible
self.context.scene.collection.objects.link(new_obj)
new_obj.hide_set(False)
new_obj.hide_viewport = False
new_obj.hide_render = False
new_obj.hide_select = False
new_obj.parent = parent
if reset_origin:
new_data.transform(new_obj.matrix_world)
bpy.ops.object.origin_set(get_context(new_obj), type='ORIGIN_GEOMETRY', center='MEDIAN')
else:
new_obj.matrix_world = obj.matrix_world
return new_obj
class SaveContext:
"""
Saves state of various things and keeps track of temporary objects.
When leaving scope, operations are reverted in the order they were applied.
Example usage:
with SaveContext(bpy.context, "test") as save:
save.prop_foreach(bpy.context.scene.objects, 'location')
bpy.context.active_object.location = (1, 1, 1)
"""
def __init__(self, *args, **kwargs):
self.save = SaveState(*args, **kwargs)
def __enter__(self):
return self.save
def __exit__(self, exc_type, exc_value, traceback):
self.save.revert()
class StateMachineBaseState:
def __init__(self, owner):
self.owner = owner
def on_enter(self):
pass
def on_exit(self):
pass
class StateMachineMixin:
"""Simple state machine."""
state_stack = None
state_events_on_reentry = True
@property
def state(self):
return self.state_stack[-1] if self.state_stack else None
def pop_state(self, *args, **kwargs):
if self.state:
self.state_stack.pop().on_exit(*args, **kwargs)
if self.state_events_on_reentry and self.state:
self.state.on_enter()
def push_state(self, state_class, *args, **kwargs):
assert state_class
new_state = state_class(self)
if self.state_events_on_reentry and self.state:
self.state.on_exit()
if self.state_stack is None:
self.state_stack = []
self.state_stack.append(new_state)
if new_state:
new_state.on_enter(*args, **kwargs)
class DrawHooksMixin:
space_type = bpy.types.SpaceView3D
draw_post_pixel_handler = None
draw_post_view_handler = None
def hook(self, context):
if not self.draw_post_pixel_handler and hasattr(self, "on_draw_post_pixel"):
self.draw_post_pixel_handler = self.space_type.draw_handler_add(self.on_draw_post_pixel,
(context,), 'WINDOW', 'POST_PIXEL')
if not self.draw_post_view_handler and hasattr(self, "on_draw_post_view"):
self.draw_post_pixel_handler = self.space_type.draw_handler_add(self.on_draw_post_view,
(context,), 'WINDOW', 'POST_VIEW')
def unhook(self):
if self.draw_post_pixel_handler:
self.space_type.draw_handler_remove(self.draw_post_pixel_handler, 'WINDOW')
self.draw_post_pixel_handler = None
if self.draw_post_view_handler:
self.space_type.draw_handler_remove(self.draw_post_view_handler, 'WINDOW')
self.draw_post_view_handler = None
def show_window(width=0.5, height=0.5):
"""Open a window at the cursor. Size can be pixels or a fraction of the main window size."""
# Hack from https://blender.stackexchange.com/questions/81974
with SaveContext(bpy.context, "show_window") as save:
render = bpy.context.scene.render
prefs = bpy.context.preferences
main_window = bpy.context.window_manager.windows[0]
save.prop(prefs, 'is_dirty view.render_display_type')
save.prop(render, 'resolution_x resolution_y resolution_percentage')
render.resolution_x = int(main_window.width * width) if width <= 1.0 else int(width)
render.resolution_y = int(main_window.height * height) if height <= 1.0 else int(height)
render.resolution_percentage = 100
prefs.view.render_display_type = 'WINDOW'
bpy.ops.render.view_show('INVOKE_DEFAULT')
return bpy.context.window_manager.windows[-1]
def show_text_window(text, title, width=0.5, height=0.5, font_size=16):
"""Open a window at the cursor displaying the given text."""
# Open a render preview window, then modify it to show a text editor instead
window = show_window(width, height)
area = window.screen.areas[0]
area.type = 'TEXT_EDITOR'
space = area.spaces[0]
assert isinstance(space, bpy.types.SpaceTextEditor)
# Make a temporary text
string = text
text = bpy.data.texts.get(title) or bpy.data.texts.new(name=title)
text.use_fake_user = False
text.from_string(string)
text.cursor_set(0)
# Minimal interface
if font_size is not None:
space.font_size = font_size
space.show_line_highlight = True
space.show_line_numbers = False
space.show_margin = False
space.show_region_footer = False
space.show_region_header = False
space.show_region_ui = False
space.show_syntax_highlight = False
space.show_word_wrap = True
space.text = text
def register(settings, prefs):
bpy.utils.register_class(GRET_OT_property_warning)
def unregister():
bpy.utils.unregister_class(GRET_OT_property_warning)
| greisane/gret | operator.py | operator.py | py | 21,651 | python | en | code | 298 | github-code | 6 | [
{
"api_name": "functools.partial",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "log.log",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "re.compile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_numbe... |
16404587226 | from ksz.src import plot
import matplotlib.pyplot as plt
data_path_list = [
'/data/ycli/dr12/galaxy_DR12v5_LOWZ_North_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_LOWZ_South_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_CMASS_North_TOT_wMASS.dat',
'/data/ycli/dr12/galaxy_DR12v5_CMASS_South_TOT_wMASS.dat',
#'/data/ycli/6df/6dFGS_2MASS_RA_DEC_Z_J_K_bJ_rF_GOOD.cat',
#'/data/ycli/group_catalog/6dFGS_M_group.dat',
#'/data/ycli/group_catalog/6dFGS_L_group.dat',
'/data/ycli/group_catalog/SDSS_M_group.dat',
#'/data/ycli/group_catalog/SDSS_L_group.dat',
'/data/ycli/cgc/CGC_wMASS.dat',
]
label_list = [
'LOWZ North CGC',
'LOWZ South CGC',
'CMASS North',
'CMASS South',
#'6dF',
#'6dF mass-weighted halo center',
#'6dF luminosity-weighted halo center',
'DR13 Group',
#'dr13 luminosity-weighted halo center',
'DR7 CGC',
]
ap_list = [
7.,
7.,
#0.,
#0.,
8.,
#11.,
11.,
#11.,
7.,
#7.,
]
#plot.plot_stellarmass_hist(data_path_list, label_list)
plot.plot_halomass_hist(data_path_list, label_list)
#plot.plot_rvir_hist(data_path_list, label_list, rho_crit = 2.775e11, ap_list=ap_list)
#plot.plot_z_hist(data_path_list, label_list)
plt.show()
| YichaoLi/pksz | plot_pipe/plot_stellar_mass.py | plot_stellar_mass.py | py | 1,401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ksz.src.plot.plot_halomass_hist",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ksz.src.plot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "m... |
28153479584 | import src.fileIO as io
import src.chris as chris
import src.filepaths as fp
import src.analysis as anal
import src.plotting as plot
from pathlib import Path
def batch_calculate_peak_wavelength(parent_directory,
batch_name,
file_paths,
directory_paths,
plot_files):
'''
Calculate sample batch peak wavelength and error, from individual files
within batch.
Args:
parent_directory: <string> parent directory identifier
batch_name: <string> batch name string
file_paths: <array> array of target file paths
directory_paths: <dict> dictionary containing required paths
plot_files: <string> "True" or "False" for plotting output
Returns:
results_dictionary: <dict>
Batch Name
File Names
File Paths
Secondary Strings
Individual file values for:
Background Files
Region Trim Index: <array> min, max indices
popt: <array> fano fit parameters:
peak, gamma, q, amplitude, damping
pcov: <array> fano fit errors
peak, gamma, q, amplitude, damping
'''
batch_dictionary = fp.update_batch_dictionary(
parent=parent_directory,
batch_name=batch_name,
file_paths=file_paths)
for file in file_paths:
wavelength, raw_intensity = io.read_GMR_file(file_path=file)
sample_parameters = fp.sample_information(file_path=file)
background_file, background_parameters = fp.find_background(
background_path=directory_paths['Background Path'],
sample_details=sample_parameters,
file_string='.txt')
print(background_file)
if len(background_file) == 0:
normalised_intensity = anal.normalise_intensity(
raw_intensity=anal.timecorrected_intensity(
raw_intensity=raw_intensity,
integration_time=sample_parameters[
f'{parent_directory} Integration Time']))
else:
_, background_raw_intensity = io.read_GMR_file(
file_path=background_file[0])
background_parent = background_parameters['Parent Directory']
normalised_intensity = anal.bg_normal_intensity(
intensity=raw_intensity,
background_intensity=background_raw_intensity,
integration_time=sample_parameters[
f'{parent_directory} Integration Time'],
background_integration_time=background_parameters[
f'{background_parent} Integration Time'])
out_string = sample_parameters[f'{parent_directory} Secondary String']
plot.spectrumplt(
wavelength=wavelength,
intensity=normalised_intensity,
out_path=Path(f'{directory_paths["Results Path"]}/{batch_name}_{out_string}'))
peak_results = chris.calc_peakwavelength(
wavelength=wavelength,
normalised_intensity=normalised_intensity,
sample_details=sample_parameters,
plot_figure=plot_files,
out_path=Path(
f'{directory_paths["Results Path"]}'
f'/{batch_name}_{out_string}_Peak.png'))
batch_dictionary.update(
{f'{out_string} File': sample_parameters})
batch_dictionary.update(
{f'{out_string} Background': background_parameters})
batch_dictionary.update(peak_results)
return batch_dictionary
if __name__ == '__main__':
''' Organisation '''
root = Path().absolute()
info, directory_paths = fp.get_directory_paths(root_path=root)
file_paths = fp.get_files_paths(
directory_path=directory_paths['Spectrum Path'],
file_string='.txt')
parent, batches = fp.get_all_batches(file_paths=file_paths)
''' Batch Processing '''
for batch, filepaths in batches.items():
out_file = Path(
f'{directory_paths["Results Path"]}'
f'/{batch}_Peak.json')
if out_file.is_file():
pass
else:
results_dictionary = batch_calculate_peak_wavelength(
parent_directory=parent,
batch_name=batch,
file_paths=filepaths,
directory_paths=directory_paths,
plot_files=info['Plot Files'])
io.save_json_dicts(
out_path=out_file,
dictionary=results_dictionary)
| jm1261/PeakFinder | batch_peakfinder.py | batch_peakfinder.py | py | 4,669 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.filepaths.update_batch_dictionary",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "src.filepaths",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "src.fileIO.read_GMR_file",
"line_number": 43,
"usage_type": "call"
},
{
"api_... |
28610424615 | from __future__ import annotations
import json
import subprocess
import collections
import concurrent.futures
from os import path, system
from datetime import datetime
root_path = path.abspath("src/test_cases/UI")
report_path = path.abspath("src/reports/concurrent_test_logs")
def generate_pytest_commands():
config_run_test_dir = path.dirname(__file__)
with open(path.join(config_run_test_dir, "config_run_multiple_test.json")) as f:
config_data = json.load(f)
list_test_suite = config_data['test_suite']
pytest_run_cmds = []
for suite in list_test_suite:
test_name = suite['test']['name'].replace(".", "::")
browser_name = suite['test']['browser']
test_suite_option = f"{suite['name']}::{test_name}"
options_cmd = collections.namedtuple('OptionCmd', ['test_name', 'browser'])
pytest_run_cmds.append(options_cmd(test_suite_option, browser_name))
return pytest_run_cmds
def execute_pytest_cmd(option_cmd):
run_cmd_process = subprocess.run(["pytest", f"{root_path}\\{option_cmd.test_name}",
f"--browser={option_cmd.browser}"],
capture_output=True)
return run_cmd_process.stdout
list_options_cmd = generate_pytest_commands()
with concurrent.futures.ThreadPoolExecutor(max_workers=len(list_options_cmd)) as executor:
running_cmd = {executor.submit(execute_pytest_cmd, options): options for options in list_options_cmd}
for completed_cmd in concurrent.futures.as_completed(running_cmd):
test_ran = running_cmd[completed_cmd].test_name.split("::")[-1]
browser_ran = running_cmd[completed_cmd].browser
try:
time_logging = datetime.now().strftime("%Y.%m.%d_(%H-%M-%S.%f)")
with open(f"{report_path}\\Result_{test_ran}_{time_logging}.log", "wb") as f:
f.write(completed_cmd.result())
except Exception as exc:
print(f"Pytest ran with error {exc}.")
| huymapmap40/pytest_automation | src/config/parallel_test/run_parallel_test.py | run_parallel_test.py | py | 2,068 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
10528777232 | import pygame
from pygame.sprite import Sprite
class Tiro(Sprite):
"""Class para manipular os tiros disparados pela nave"""
def __init__(self, ik_game):
"""Cria um disparo na posição atual da nave"""
super().__init__()
self.screen = ik_game.screen
self.configuracoes = ik_game.configuracoes
self.cor = self.configuracoes.tiro_cor
# Cria um disparo rect na posição (0, 0) e reposiciona no local certo
self.rect = pygame.Rect(0, 0, self.configuracoes.tiro_width,
self.configuracoes.tiro_height)
self.rect.midtop = ik_game.nave.rect.midtop
# Armazena a posição do disparo como um decimal
self.y = float(self.rect.y)
def update(self):
"""Move o tiro para cima na tela"""
# Atualiza a posição decimal do disparo
self.y -= self.configuracoes.tiro_vel
# Atualiza a posição rect
self.rect.y = self.y
def draw_tiro(self):
"""Desenha o tiro na tela"""
pygame.draw.rect(self.screen, self.cor, self.rect)
| ruansmachado/Invasao_Klingon | tiro.py | tiro.py | py | 1,130 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.Rect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"l... |
1040065850 | import numpy as np
import pandas as pd
import operator
from sklearn import preprocessing
data = pd.read_csv("data.csv",header=None)
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
def classify(v,k,distance):
target_values = data.iloc[:,-1]
nearest_neighbors = knn(data,k,v,distance)
classification_values = {}
for index in nearest_neighbors:
if target_values[index] not in classification_values.keys():
classification_values[target_values[index]] = 1
else:
classification_values[target_values[index]] += 1
return max(classification_values.items(),key=operator.itemgetter(1))[0]
def knn(vectors,k,vector_to_classify,distance):
distances = []
for i in range(0,len(vectors)):
x = vectors.loc[i,:]
x = x[0:len(x)-1]
x = min_max_scaler.fit_transform(x.values.astype(float).reshape(-1,1))[:,0]
distances.append({"index": i,
"value": distance(x,vector_to_classify)})
distances = sorted(distances,key=lambda x:x['value'], reverse=True)
indexes = list(map(lambda distance: distance['index'],distances[0:k]))
return indexes
def euclidean_distance(x,y):
summation = 0
for i in range(0,x.size):
summation += ((x[i] - y[i])**2)
return (summation)**(1/2)
def manhattan_distance(x,y):
summation = 0
for i in range(0,x.size):
summation += abs(x[i]-y[i])
return summation
def maximum_metric(x,y):
max_distance = 0
for i in range(0,x.size):
difference = abs(x[i]-y[i])
if(difference > max_distance):
max_distance = difference
return max_distance
vectors_to_classify = [np.array([1100000,60,1,2,1,500]),
np.array([1100000,60,1,2,1,500]),
np.array([1800000,65,1,2,1,1000]),
np.array([2300000,72,1,3,1,1400]),
np.array([3900000,110,2,3,1,1800])]
distances = [{'name':'Euclidean Distance','function':euclidean_distance},
{'name':'Manhattan Distance','function':manhattan_distance},
{'name':'Maximum Metric','function':maximum_metric}]
for distance in distances:
print("Distance " + str(distance['name']))
for k in [1,3,5]:
print("K = " + str(k))
for v in vectors_to_classify:
v = min_max_scaler.fit_transform(v.astype(float).reshape(-1,1))[:,0]
print(classify(v,k,distance['function']))
| egjimenezg/DataAnalysis | knn/knn.py | knn.py | py | 2,364 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": ... |
25508690525 | #!/usr/bin/env python3
import requests
import os
url = 'http://localhost/upload/'
path = os.getcwd() + '/supplier-data/images/'
only_jpeg = []
for file in os.listdir(path):
name, ext = os.path.splitext(file)
if ext == '.jpeg':
only_jpeg.append(os.path.join(path,file))
for jpeg in only_jpeg:
with open(jpeg, 'rb') as opened:
r = requests.post(url, files={'file': opened})
| paesgus/AutomationTI_finalproject | supplier_image_upload.py | supplier_image_upload.py | py | 393 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,... |
9637017975 | from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
from time import sleep
class InternetSpeed:
def __init__(self, edge_driver_path):
self.driver = webdriver.Edge(service=Service(edge_driver_path))
self.down = 0
self.up = 0
self.get_internet_speed()
def get_internet_speed(self):
speedtest_url = "https://www.speedtest.net/"
self.driver.get(speedtest_url)
sleep(10)
start_test = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[1]/a')
start_test.click()
sleep(60)
self.down = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[2]/div/div[2]/span').text
self.up = self.driver.find_element(by=By.XPATH,
value='//*[@id="container"]/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[3]/div/div[2]/span').text
print(self.down)
print(self.up)
self.driver.quit() | na-lin/100-days-of-Python | Day51_Internet-Speed-Twitter-Complaint-Bot/internet_speed.py | internet_speed.py | py | 1,309 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Edge",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.edge.service.Service",
"line_number": 9,
"usage_type": "call"
},
{
"a... |
74432928827 | """
This file is part of Candela.
Candela is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Candela is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Candela. If not, see <http://www.gnu.org/licenses/>.
"""
import curses
import sys
import signal
import threading
import textwrap
import platform
import constants
class Shell():
"""
The main Candela class
Controls the shell by taking control of the current terminal window.
Performs input and output to the user
"""
def __init__(self, scriptfile=None):
"""
Create an instance of a Shell
This call takes over the current terminal by calling curses.initscr()
Sets global shell state, including size information, menus, stickers,
the header, and the prompt.
Kwargs:
scriptfile - the name of the script file to run. If not None and the
file exists, the script will be immediately run.
"""
self._register_sigint_handler()
self.script_lines = self._parse_script_file(scriptfile)
self.script_counter = 0
self.scriptfile = ""
self.stdscr = curses.initscr()
self.stdscr.keypad(1)
self.platform = self._get_platform()
# holds the backlog of shell output
self.backbuffer = []
self.height,self.width = self.stdscr.getmaxyx()
# the list of menus in the shell app
self.menus = []
# the currently visible stickers in the app
self.stickers = []
# should the command menu be shown
self.should_show_help = True
# for commands with only positional args, show the
# name of the next argument as the user types
self.should_show_hint = False
# dictionary of functions to call on key events
# keys are chars representing the pressed keys
self.keyevent_hooks = {}
# the text to stick in the upper left corner of the window
self.header = ""
self._header_bottom = 0
self._header_right = 0
self._header_right_margin = 50
self.prompt = "> "
def _parse_script_file(self, filename):
"""
Open a file if it exists and return its contents as a list of lines
Args:
filename - the file to attempt to open
"""
self.scriptfile = filename
try:
f = open(filename, 'r')
script_lines = f.readlines()
script_lines = [a.strip('\n') for a in script_lines]
f.close()
except Exception as e:
return
return script_lines
def runscript(self, scriptfile):
"""
Set up the global shell state necessary to run a script from a file
Args:
scriptfile - the string name of the file containing the script.
paths are relative to system cwd
"""
self.script_lines = self._parse_script_file(scriptfile)
self.script_counter = 0
def get_helpstring(self):
"""
Get the help string for the current menu.
This string contains a preformatted list of commands and their
descriptions from the current menu.
"""
_menu = self.get_menu()
if not _menu:
return
helpstring = "\n\n" + _menu.title + "\n" + "-"*20 + "\n" + _menu.options()
return helpstring
def sticker(self, output, new_output="", pos=None):
"""
Place, change, or remove a sticker from the shell window.
Candela has the concept of a sticker - a small block of text that
is "stuck" to the window. They can be used to convey persistent
information to the shell user.
If only output is specified, this creates a new sticker with the string
output. If output and new_output are specified, and there is an existing
sticker whose text is the same as output, this will replace that
sticker's text with new_output.
Args:
output - The text of the sticker to manipulate
Kwargs:
new_output - The text that will replace the text of the chosen sticker
pos - The (y, x) tuple indicating where to place the sticker
"""
if len(self.stickers) > 0:
sort = sorted(self.stickers, key=lambda x: x[1][0], reverse=True)
ht = sort[0][1][0]+1
else:
ht = 3
pos = pos or (ht, self.width - 20)
match = None
for text,_pos in self.stickers:
if output == text:
match = (text,_pos)
break
if match:
self.remove_sticker(match[0])
sticker = (new_output or output, match[1] if match else pos)
self.stickers.append(sticker)
self._update_screen()
def remove_sticker(self, text):
"""
Remove the sticker with the given text from the window
Args:
text - The text of the sticker to remove
"""
self.stickers = [a for a in self.stickers if a[0] != text]
def _print_stickers(self):
"""
Print all current stickers at the appropriate positions
"""
for text,pos in self.stickers:
_y,_x = pos
if _x + len(text) > self.width:
_x = self.width - len(text) - 1
self.stdscr.addstr(_y, _x, text)
def _print_header(self):
"""
Print the header in the appropriate position
"""
ht = 0
for line in self.header.split("\n"):
self.stdscr.addstr(ht, 0, line + (" "*self._header_right_margin))
if len(line) > self._header_right:
self._header_right = len(line)
ht += 1
self.stdscr.addstr(ht, 0, " "*(self._header_right+self._header_right_margin))
self._header_bottom = ht
self.mt_width = self._header_right + 49
def clear(self):
"""
Remove all scrollback text from the window
"""
backbuffer = list(self.backbuffer)
printstring = "\n"
for i in range(self.height):
self.put(printstring)
def _print_backbuffer(self):
"""
Print the previously printed output above the current command line.
candela.shell.Shell stores previously printed commands and output
in a backbuffer. Like a normal shell, it handles printing these lines
in reverse order to allow the user to see their past work.
"""
rev = list(self.backbuffer)
rev.reverse()
for i, tup in zip(range(len(rev)), rev):
string, iscommand = tup
ypos = self.height-2-i
if ypos > 0:
printstring = string
if iscommand:
printstring = "%s%s" % (self.prompt, string)
self.stdscr.addstr(ypos,0,printstring)
def _print_help(self):
"""
Print the menu help box for the current menu
"""
_helpstring = self.get_helpstring()
if not _helpstring:
return
helpstrings = [" %s" % a for a in _helpstring.split("\n")]
ht = 0
longest = len(max(helpstrings, key=len))
_x = self._header_right + self._header_right_margin
if _x + longest > self.width:
_x = self.width - longest - 1
for line in helpstrings:
self.stdscr.addstr(ht, _x, line + " "*15)
ht += 1
def put(self, output, command=False):
"""
Print the output string on the bottom line of the shell window
Also pushes the backbuffer up the screen by the number of lines
in output.
Args:
output - The string to print. May contain newlines
Kwargs:
command - False if the string was not a user-entered command,
True otherwise (users of Candela should always use False)
"""
self._update_screen()
if not output:
return
output = str(output)
_x,_y = (self.height-1, 0)
lines = []
for line in output.split('\n'):
if len(line) > self.width - 3:
for line in textwrap.wrap(line, self.width-3):
lines.append(line)
else:
lines.append(line)
for line in lines:
# put the line
self.stdscr.addstr(_x, _y, line)
# add it to backbuffer
backbuf_string = line
to_append = (backbuf_string, command)
if line != self.prompt:
index = 0
if len(self.backbuffer) >= 200:
index = 1
self.backbuffer = self.backbuffer[index:] + [to_append]
def _input(self, prompt):
"""
Handle user input on the shell window.
Works similarly to python's raw_input().
Takes a prompt and returns the raw string entered before the return key
by the user.
The input is returned withnewlines stripped.
Args:
prompt - The text to display prompting the user to enter text
"""
self.put(prompt)
keyin = ''
buff = ''
hist_counter = 1
while keyin != 10:
keyin = self.stdscr.getch()
_y,_x = self.stdscr.getyx()
index = _x - len(self.prompt)
#self.stdscr.addstr(20, 70, str(keyin)) # for debugging
try:
if chr(keyin) in self.keyevent_hooks.keys():
cont = self.keyevent_hooks[chr(keyin)](chr(keyin), buff)
if cont == False:
continue
except:
pass
if keyin in [127, 263]: # backspaces
del_lo, del_hi = self._get_backspace_indices()
buff = buff[:index+del_lo] + buff[index+del_hi:]
self._redraw_buffer(buff)
self.stdscr.move(_y, max(_x+del_lo, len(self.prompt)))
elif keyin in [curses.KEY_UP, curses.KEY_DOWN]: # up and down arrows
hist_counter,buff = self._process_history_command(keyin, hist_counter)
elif keyin in [curses.KEY_LEFT, curses.KEY_RIGHT]: # left, right arrows
if keyin == curses.KEY_LEFT:
newx = max(_x - 1, len(self.prompt))
elif keyin == curses.KEY_RIGHT:
newx = min(_x + 1, len(buff) + len(self.prompt))
self.stdscr.move(_y, newx)
elif keyin == curses.KEY_F1: # F1
curses.endwin()
sys.exit()
elif keyin in [9]: # tab
choices = self._tabcomplete(buff)
if len(choices) == 1:
if len(buff.split()) == 1 and not buff.endswith(' '):
buff = choices[0]
else:
if len(buff.split()) != 1 and not buff.endswith(' '):
buff = ' '.join(buff.split()[:-1])
if buff.endswith(' '):
buff += choices[0]
else:
buff += ' ' + choices[0]
elif len(choices) > 1:
self.put(" ".join(choices))
elif len(choices) == 0:
pass
self._redraw_buffer(buff)
elif keyin >= 32 and keyin <= 126: # ascii input
buff = buff[:index-1] + chr(keyin) + buff[index-1:]
self._redraw_buffer(buff)
self.stdscr.move(_y, min(_x, len(buff) + len(self.prompt)))
if self.should_show_hint and keyin == 32:
command = self._get_command(buff)
if hasattr(command, 'definition') and '-' not in command.definition:
try:
nextarg = command.definition.split()[len(buff.split())]
self.stdscr.addstr(_y, _x+1, nextarg)
self.stdscr.move(_y, _x)
except:
pass
self.put(buff, command=True)
self.stdscr.refresh()
return buff
def _get_backspace_indices(self):
if self.platform == "Linux":
return (0, 1)
elif self.platform == "Darwin":
return (-len(self.prompt)-1, -len(self.prompt))
def _tabcomplete(self, buff):
"""
Get a list of possible completions for the current buffer
If the current buffer doesn't contain a valid command, see if the
buffer is a prefix of any valid commands. If so, return those as possible
completions. Otherwise, delegate the completion finding to the command object.
Args:
buff - The string buffer representing the current unfinished command input
Return:
A list of completion strings for the current token in the command
"""
menu = self.get_menu()
commands = []
if menu:
commands = menu.commands
output = []
if len(buff.split()) <= 1 and ' ' not in buff:
for command in commands:
if command.name.startswith(buff):
output.append(command.name)
for alias in command.aliases:
if alias.startswith(buff):
output.append(alias)
else:
command = self._get_command(buff)
if command:
output = command._tabcomplete(buff)
return output
def _get_command(self, buff):
"""
Get the command instance referenced by string in the current input buffer
Args:
buff - The string version of the current command input buffer
Return:
The Command instance corresponding to the buffer command
"""
menu = self.get_menu()
commands = []
if menu:
commands = menu.commands
if len(commands) == 0:
self.put("No commands found. Maybe you forgot to set self.menus or self.menu?")
self.put("Hint: use F1 to quit")
for command in commands:
if command.name == buff.split()[0] or buff.split()[0] in command.aliases:
return command
return None
def _redraw_buffer(self, buff):
"""
Clear the bottom line and re-print the given string on that line
Args:
buff - The line to print on the cleared bottom line
"""
self.stdscr.addstr(self.height-1, 0, " "*(self.width-3))
self.stdscr.addstr(self.height-1, 0, "%s%s" % (self.prompt, buff))
def _process_history_command(self, keyin, hist_counter):
"""
Get the next command from the backbuffer and return it
Also return the modified buffer counter.
Args:
keyin - The key just pressed
hist_counter - The current position in the backbuffer
"""
hist_commands = [(s,c) for s,c in self.backbuffer if c]
if not hist_commands:
return hist_counter, ""
buff = hist_commands[-hist_counter][0]
self.stdscr.addstr(self.height-1, 0, " "*(self.width-3))
self.stdscr.addstr(self.height-1, 0, "%s%s" % (self.prompt, buff))
if keyin == curses.KEY_UP and hist_counter < len(hist_commands):
hist_counter += 1
elif keyin == curses.KEY_DOWN and hist_counter > 0:
hist_counter -= 1
return hist_counter, buff
def _script_in(self):
"""
Substitute for _input used when reading from a script.
Returns the next command from the script being read.
"""
if not self.script_lines:
return None
if self.script_counter < len(self.script_lines):
command = self.script_lines[self.script_counter]
self.script_counter += 1
else:
command = None
return command
def main_loop(self):
"""
The main shell IO loop.
The sequence of events is as follows:
get an input command
split into tokens
find matching command
validate tokens for command
run command
This loop can be broken out of only with by a command returning
constants.CHOICE_QUIT or by pressing F1
"""
ret_choice = None
while ret_choice != constants.CHOICE_QUIT:
success = True
ret_choice = constants.CHOICE_INVALID
choice = self._script_in()
if choice:
self.put("%s%s" % (self.prompt, choice))
else:
choice = self._input(self.prompt)
tokens = choice.split()
if len(tokens) == 0:
self.put("\n")
continue
command = self._get_command(choice)
if not command:
self.put("Invalid command - no match")
continue
try:
args, kwargs = command.parse_command(tokens)
success, message = command.validate(*args, **kwargs)
if not success:
self.put(message)
else:
ret_choice = command.run(*args, **kwargs)
if ret_choice == constants.CHOICE_INVALID:
self.put("Invalid command")
else:
menus = [a.name for a in self.menus]
if str(ret_choice).lower() in menus:
self.menu = ret_choice.lower()
else:
self.put("New menu '%s' not found" % ret_choice.lower())
except Exception as e:
self.put(e)
return self
def get_menu(self):
"""
Get the current menu as a Menu
"""
if not self.menus: return
try:
return [a for a in self.menus if a.name == self.menu][0]
except:
return
def defer(self, func, args=(), kwargs={}, timeout_duration=10, default=None):
"""
Create a new thread, run func in the thread for a max of
timeout_duration seconds
This is useful for blocking operations that must be performed
after the next window refresh.
For example, if a command should set a sticker when it starts executing
and then clear that sticker when it's done, simply using the following
will not work:
def _run(*args, **kwargs):
self.sticker("Hello!")
# do things...
self.remove_sticker("Hello!")
This is because the sticker is both added and removed in the same
refresh loop of the window. Put another way, the sticker is added and
removed before the window gets redrawn.
defer() can be used to get around this by scheduling the sticker
to be removed shortly after the next window refresh, like so:
def _run(*args, **kwargs):
self.sticker("Hello!")
# do things...
def clear_sticker():
time.sleep(.1)
self.remove_sticker("Hello!")
self.defer(clear_sticker)
Args:
func - The callback function to run in the new thread
Kwargs:
args - The arguments to pass to the threaded function
kwargs - The keyword arguments to pass to the threaded function
timeout_duration - the amount of time in seconds to wait before
killing the thread
default - The value to return in case of a timeout
"""
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = func(*args, **kwargs)
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return it.result
else:
return it.result
def end(self):
"""
End the current Candela shell and safely shut down the curses session
"""
curses.endwin()
def _register_sigint_handler(self):
"""
Properly handle ^C and any other method of sending SIGINT.
This avoids leaving the user with a borked up terminal.
"""
def signal_handler(signal, frame):
self.end()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def _update_screen(self):
"""
Refresh the screen and redraw all elements in their appropriate positions
"""
self.height,self.width = self.stdscr.getmaxyx()
self.stdscr.clear()
self._print_backbuffer()
if self.width < self._header_right + 80 or self.height < self._header_bottom + 37:
pass
else:
self._print_header()
if self.should_show_help:
self._print_help()
self._print_stickers()
self.stdscr.refresh()
def _get_platform(self):
"""
Return the platform name. This is fine, but it's used in a hacky way to
get around a backspace-cooking behavior in Linux (at least Ubuntu)
"""
return platform.uname()[0]
| emmettbutler/candela | candela/shell.py | shell.py | py | 21,960 | python | en | code | 71 | github-code | 6 | [
{
"api_name": "curses.initscr",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "textwrap.wrap",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "curses.KEY_UP",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_DOWN",
... |
15206966945 | # -*- coding: utf-8 -*-
"""
Ventricular tachycardia, ventricular bigeminy, Atrial fibrillation,
Atrial fibrillation, Ventricular trigeminy, Ventricular escape ,
Normal sinus rhythm, Sinus arrhythmia, Ventricular couplet
"""
import tkinter as tk
import scipy.io as sio
from PIL import Image, ImageTk
class App():
ancho=760
alto=760
estado=False
contadores=[0,0,0,0,0,0,0,0,0]#son los que van a contar el numero de dato que se ejecuta
#se va a cosiacar las señales
Signal=0
def __init__(self):
#cargar las variables .mat
self.raiz=tk.Tk()
self.importData()
self.frame=tk.Frame(self.raiz,bg="white")
self.frame.config(width=self.ancho,height=self.alto)
self.frame.pack()
self.titulo=tk.Label(self.frame,bg="white",text="Dispositivo Generador de Arritmias Cardiacas")
self.titulo.config(font=("Grotesque",24))
self.titulo.place(x=0,y=0,width=self.ancho,height=self.alto//16)
self.opcion = tk.IntVar()
names=["Taquicardia ventricualar","Bigeminismo Ventricular","Fibrilacion atrial","Flutter atrial","Trigeminismo Ventricular",
"Escape Ventricular","Ritmo Sinusal","Arritmia Sinusal","Couplet Ventricular"]
for i in range(1,10):
tk.Radiobutton(self.frame, text=names[i-1],font=("Grotesque",16) ,variable=self.opcion,bg="white",anchor="w",
value=i, command=self.selec).place(x=50,y=self.alto//8+(i-1)*self.alto//20,
width=self.ancho//2.5,height=self.alto//32)
temp=Image.open('LOGO_UMNG.png')
temp=temp.resize((200, 250), Image.ANTIALIAS)
self.imagen = ImageTk.PhotoImage(temp)
tk.Label(self.raiz, image=self.imagen,bg="white").place(x=450,y=140)
self.nombres=tk.Label(self.frame,bg="white",text="Juan Camilo Sandoval Cabrera\nNohora Camila Sarmiento Palma",anchor="e")
self.nombres.config(font=("Grotesque",12))
self.nombres.place(x=420,y=420,width=self.ancho//3,height=self.alto//16)
tk.Button(self.frame, text="Iniciar",font=("Grotesque",16),command=self.Estado_DataON).place(x=270,y=600)
tk.Button(self.frame, text="Pausar",font=("Grotesque",16),command=self.Estado_DataOFF).place(x=400,y=600)
self.titulo.after(700,self.Enviar_Data)
def Estado_DataON(self):
self.estado=True
def Estado_DataOFF(self):
self.estado=False
def Enviar_Data(self):
delay=3
op=self.opcion.get()
c=op-1
if self.estado:
print(self.Signal[0,self.contadores[c]])
self.contadores[c]+=1
if c==7:
delay=4
self.titulo.after(delay,self.Enviar_Data)
def selec(self):
op=self.opcion.get()#el lunes hacer el selector
if op==1:
self.Signal=self.VT #variables de las señales
elif op==2:
self.Signal=self.VB #variables de las señales
elif op==3:
self.Signal=self.AFIB #variables de las señales
elif op==4:
self.Signal=self.AFL #variables de las señales
elif op==5:
self.Signal=self.VTRI #variables de las señales
elif op==6:
self.Signal=self.VES #variables de las señales
elif op==7:
self.Signal=self.S #variables de las señales
elif op==8:
self.Signal=self.SARR #variables de las señales
elif op==9:
self.Signal=self.VCOUP #variables de las señales
def iniciar(self):
self.raiz.mainloop()
def importData(self):
AFIB=sio.loadmat('AFIB.mat')
self.AFIB=AFIB['SignalNorm']
AFL=sio.loadmat('AFL.mat')
self.AFL=AFL['SignalNorm']
S=sio.loadmat('S.mat')
self.S=S['SignalNorm']
VES=sio.loadmat('VS.mat')
self.VES=VES['SignalNorm']
VCOUP=sio.loadmat('VCop.mat')
self.VCOUP=VCOUP['SignalNorm']
VT=sio.loadmat('TV.mat')
self.VT=VT['SignalNorm']
SARR=sio.loadmat('SARR.mat')
self.SARR=SARR['SignalNorm']
VB=sio.loadmat('VB.mat')
self.VB=VB['SignalNorm']
#VT=sio.loadmat('VT.mat')#SE PERDIO
#self.VT=VT['SignalNorm']
VTRI=sio.loadmat('VTRI.mat')
self.VTRI=VTRI['SignalNorm']
def main():
mi_app = App()
mi_app.iniciar()
if __name__ == '__main__':
main()
| Sandovaljuan99/INMEDUMG | Cardiac arrhythmia simulator/IGPY.py | IGPY.py | py | 4,934 | python | es | code | 1 | github-code | 6 | [
{
"api_name": "tkinter.Tk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tkinter.IntVar",
"line_num... |
28558999835 | from helper import is_prime, find_prime_factors, int_list_product
def smallest_multiple(n):
ls = list()
for i in range(2,n):
pf = find_prime_factors(i)
for l in ls:
for f in pf:
if(l == f):
pf.remove(f)
break
for f in pf:
ls.append(f)
ls.sort()
return int_list_product(ls)
print(str(smallest_multiple(20))) | thejefftrent/ProjectEuler.py | 5.py | 5.py | py | 436 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "helper.find_prime_factors",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "helper.int_list_product",
"line_number": 18,
"usage_type": "call"
}
] |
42663501049 | import os
import torch
import datetime
import numpy as np
import pandas as pd
from src.attn_analysis import gradcam
from src.attn_analysis import iou_analysis
from src.attn_analysis import blue_heatmap
from src.attn_analysis import extract_disease_reps
from src.attn_analysis import make_2d_plot_and_3d_gif
import warnings
warnings.filterwarnings('ignore')
class AttentionAnalysis(object):
def __init__(self, results_dir_force,
base_results_dir, task,
attention_type, attention_type_args,
setname, valid_results_dir,
custom_net, custom_net_args, params_path,
stop_epoch, which_scans, dataset_class, dataset_args):
"""
Variables:
<results_dir_force>: path to a results directory. If this is a valid
path, then all results will be stored in here. If this is NOT a
valid path, a new directory for the new results will be created
based on <base_results_dir>.
<base_results_dir>: path to the base results directory. A new directory
will be created within this directory to store the results of
this experiment.
<task>: a list of strings. The strings may include 'iou_analysis',
'blue_heatmaps', and/or 'attn_plots'.
If <task> contains 'iou_analysis' then calculate approximate IOU
statistics for the final epoch of a model.
Specifically, the 'IOU' is calculated as the ratio of raw scores
within the allowed area to raw scores outside of the allowed
area.
Produces iou_wide_df, a dataframe with the following 5 columns:
'Epoch': int, the epoch in which the IOU was calculated.
'IOU': float, the 'IOU' value for this label's attention map vs. the
segmentation ground truth (which in this case is the approximate
attention ground truth.)
'Label': string for the label for which IOU was calculated e.g. 'airplane'
'VolumeAccession': volume accession number
'LabelsPerImage': total number of labels present in this image
Also produces dfs that summarize the IOU across different ways
of grouping the data.
If <task> contains 'blue_heatmaps' then make a blue heatmap showing
the disease scores for each slice.
If <task> contains 'attn_plots' then make visualizations of the
attention superimposed on the CT scan (as a 3D gif, and as a 2D plot
for the slice with the highest score for that disease). Also if
doing Grad-CAM, make a 2d debugging plot.
<attention_type>: str; either
'gradcam-vanilla' for vanilla Grad-CAM, or
'hirescam' for HiResCAM, in which feature maps and gradients are
element-wise multiplied and then we take the avg over the
feature dimension, or
'hirescam-check' for alternative implementation of HiResCAM
attention calculation, which can be used in a model that
has convolutional layers followed by a single FC layer.
In this implementation, the HiResCAM attention is calculated
during the forward pass of the model by element-wise multiplying
the final FC layer weights (the gradients) against the final
representation. This option is called 'hirescam-check'
because for models that meet the architecture requirements this
implementation is a 'check' on the 'hirescam' option which
actually accesses the gradients.
'hirescam-check' and 'hirescam' on the output of the last conv
layer produce identical results on AxialNet as expected, since
AxialNet is a CNN with one FC layer at the end.
<attention_type_args>: dict; additional arguments needed to calculate
the specified kind of attention. If the attention_type is one of the
GradCAMs then in this dict we need to specify
'model_name' and 'target_layer_name' (see gradcam.py for
more documentation)
<setname>: str; which split to use e.g. 'train' or 'val' or 'test'; will
be passed to the <dataset_class>
<valid_results_dir>: path to a directory that contains the validation
set IOU analysis results. Only needed if setname=='test' because we
need to use validation set per-label thresholds to calculate
results.
<custom_net>: a PyTorch model
<custom_net_args>: dict; arguments to pass to the PyTorch model
<params_path>: str; path to the model parameters that will be loaded in
<stop_epoch>: int; epoch at which the model saved at <params_path> was
saved
<which_scans>: a pandas DataFrame specifying what scans and/or
abnormalities to use.
It can be an empty pandas DataFrame, in which case all available
scans in the set will be used and named with whatever volume
accession they were saved with (real or fake).
Or, it can be a filled in pandas DataFrame, with columns
['VolumeAcc','VolumeAcc_ForOutput','Abnormality'] where
VolumeAcc is the volume accession the scan was saved with,
VolumeAcc_ForOutput is the volume accession that should be used in
the file name of any output files of this module (e.g. a DEID acc),
and Abnormality is either 'all' to save all abnormalities for that
scan, or it's comma-separated names of specific abnormalities to
save for that scan.
<dataset_class>: a PyTorch dataset class
<dataset_args>: dict; arguments to pass to the <dataset_class>"""
self.base_results_dir = base_results_dir
self.task = task
for specific_task in self.task:
assert ((specific_task == 'iou_analysis')
or (specific_task == 'blue_heatmaps')
or (specific_task == 'attn_plots'))
assert len(self.task) <= 2
if 'blue_heatmaps' in self.task:
#only allow calculation of the blue_heatmaps if we are using
#attention_type hirescam-check. Why? Because for both the blue
#heatmaps and the hirescam-check visualizations, we need to run
#the model to get out. And in gradcam we need to run the model again
#later so we get a memory error if we try to do this after getting
#out.
assert attention_type == 'hirescam-check'
self.attention_type = attention_type
assert self.attention_type in ['gradcam-vanilla','hirescam','hirescam-check']
self.attention_type_args = attention_type_args
if self.attention_type in ['gradcam-vanilla','hirescam']:
assert 'model_name' in self.attention_type_args.keys()
assert 'target_layer_name' in self.attention_type_args.keys()
self.setname = setname
self.valid_results_dir = valid_results_dir
self.custom_net = custom_net
self.custom_net_args = custom_net_args #dict of args
self.params_path = params_path
self.stop_epoch = stop_epoch
self.which_scans = which_scans
self.CTDatasetClass = dataset_class
self.dataset_args = dataset_args #dict of args
self.device = torch.device('cuda:0')
self.verbose = self.dataset_args['verbose'] #True or False
#Run
self.set_up_results_dirs(results_dir_force)
self.run()
def set_up_results_dirs(self, results_dir_force):
if os.path.isdir(results_dir_force):
results_dir = results_dir_force
else:
#If you're not forcing a particular results_dir, then make a new
#results dir:
#Example params_path = '/home/rlb61/data/img-hiermodel2/results/2020-09/2020-09-27_AxialNet_Mask_CORRECT_dilateFalse_nearest/params/AxialNet_Mask_CORRECT_dilateFalse_nearest_epoch23'
old_results_dir = os.path.split(os.path.split(os.path.split(self.params_path)[0])[0])[1] #e.g. '2020-09-27_AxialNet_Mask_CORRECT_dilateFalse_nearest'
date = datetime.datetime.today().strftime('%Y-%m-%d')
results_dir = os.path.join(self.base_results_dir,date+'_'+self.setname.capitalize()+'AttnAnalysis_of_'+old_results_dir)
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
#Subdirs for particular analyses:
if 'iou_analysis' in self.task:
self.iou_analysis_dir = os.path.join(results_dir,'iou_analysis_'+self.attention_type)
if not os.path.exists(self.iou_analysis_dir): os.mkdir(self.iou_analysis_dir)
if 'blue_heatmaps' in self.task:
#Note that the blue heatmaps depend only on the model, and not on the
#attention type
self.blue_heatmaps_dir = os.path.join(results_dir,'blue_heatmaps')
if not os.path.exists(self.blue_heatmaps_dir): os.mkdir(self.blue_heatmaps_dir)
if 'attn_plots' in self.task:
self.attn_2dplot_dir = os.path.join(results_dir,'attn_2dplot_'+self.attention_type)
self.attn_3dgif_dir = os.path.join(results_dir,'attn_3dgif_dir_'+self.attention_type)
for directory in [self.attn_2dplot_dir,self.attn_3dgif_dir]:
if not os.path.exists(directory): os.mkdir(directory)
for key in ['g1p1', 'g1p0', 'g0p1', 'g0p0']:
if not os.path.exists(os.path.join(self.attn_2dplot_dir,key)):
os.mkdir(os.path.join(self.attn_2dplot_dir,key))
if not os.path.exists(os.path.join(self.attn_3dgif_dir,key)):
os.mkdir(os.path.join(self.attn_3dgif_dir,key))
if self.attention_type in ['gradcam-vanilla','hirescam']:
self.gradcam_debug_dir = os.path.join(results_dir,self.attention_type+'_debug_dir')
if not os.path.exists(self.gradcam_debug_dir): os.mkdir(self.gradcam_debug_dir)
else: #even if attn_plots is not in task, we need to have a placeholder for
#this directory to avoid an error later:
self.gradcam_debug_dir = None
def run(self):
self.load_model()
self.load_dataset()
self.load_chosen_indices()
if 'blue_heatmaps' in self.task:
self.blue_heatmap_baseline = blue_heatmap.get_baseline(self.chosen_dataset, self.model, self.blue_heatmaps_dir)
if 'iou_analysis' in self.task:
thresh_perf_df_filename = 'Determine_Best_Threshold_For_Each_Label_Epoch'+str(self.stop_epoch)+'.csv'
valid_thresh_perf_df_path = os.path.join(os.path.join(self.valid_results_dir,'iou_analysis_'+self.attention_type), thresh_perf_df_filename)
self.iou_analysis_object = iou_analysis.DoIOUAnalysis(self.setname, self.stop_epoch,
self.label_meanings, self.iou_analysis_dir, valid_thresh_perf_df_path)
self.loop_over_dataset_and_labels()
if 'iou_analysis' in self.task:
self.iou_analysis_object.do_all_final_steps()
######################################################
# Methods to Load Model, Dataset, and Chosen Indices #----------------------
######################################################
def load_model(self):
print('Loading model')
self.model = self.custom_net(**self.custom_net_args).to(self.device)
check_point = torch.load(self.params_path, map_location='cpu') #map to CPU to avoid memory issue #TODO check if you need this
self.model.load_state_dict(check_point['params'])
self.model.eval()
#If everything loads correctly you will see the following message:
#IncompatibleKeys(missing_keys=[], unexpected_keys=[])
def load_dataset(self):
print('Loading dataset')
self.chosen_dataset = self.CTDatasetClass(setname = self.setname, **self.dataset_args)
self.label_meanings = self.chosen_dataset.return_label_meanings()
def load_chosen_indices(self):
print('Loading chosen indices')
if len([x for x in self.which_scans.columns.values.tolist() if x in ['VolumeAcc','VolumeAcc_ForOutput','Abnormality']])==3:
#you did specify which scans to use, so figure out what indices
#you need to query in the dataset to get those chosen scans:
for df_idx in range(self.which_scans.shape[0]):
volume_acc = self.which_scans.at[df_idx,'VolumeAcc']
self.which_scans.at[df_idx,'ChosenIndex'] = np.where(self.chosen_dataset.volume_accessions == volume_acc)[0][0]
else:
assert (self.which_scans == pd.DataFrame()).all().all()
#you didn't specify which scans to use, so use all the scans in the dataset
self.which_scans['ChosenIndex'] = [x for x in range(len(self.chosen_dataset))]
self.which_scans['ChosenIndex'] = self.which_scans['ChosenIndex'].astype('int')
###########
# Looping #-----------------------------------------------------------------
###########
def loop_over_dataset_and_labels(self):
if (self.task == ['iou_analysis'] and self.iou_analysis_object.loaded_from_existing_file):
return #don't need to loop again if iou_wide_df already created
print('Looping over dataset and labels')
five_percent = max(1,int(0.05*self.which_scans.shape[0]))
#Iterate through the examples in the dataset. df_idx is an integer
for df_idx in range(self.which_scans.shape[0]):
if self.verbose: print('Starting df_idx',df_idx)
idx = self.which_scans.at[df_idx,'ChosenIndex'] #int, e.g. 5
example = self.chosen_dataset[idx]
ctvol = example['data'].unsqueeze(0).to(self.device) #unsqueeze to create a batch dimension. out shape [1, 135, 3, 420, 420]
gr_truth = example['gr_truth'].cpu().data.numpy() #out shape [80]
volume_acc = example['volume_acc'] #this is a string, e.g. 'RHAA12345_5.npz'
attn_gr_truth = example['attn_gr_truth'].data.cpu().numpy() #out shape [80, 135, 6, 6]
#Get out and x_perslice_scores when using attention_type hirescam-check
out = self.get_out_and_blue_heatmaps(ctvol, gr_truth, volume_acc)
if self.verbose: print('Analyzing',volume_acc)
#volume_acc sanity check and conversion to FAKE volume acc if indicated
if 'VolumeAcc' in self.which_scans.columns.values.tolist():
intended_volume_acc = self.which_scans.at[df_idx,'VolumeAcc']
assert volume_acc == intended_volume_acc
#Now, because which_scans is not empty, you can switch volume_acc
#from the actual volume acc e.g. RHAA12345_6 to the fake ID,
#because from here onwards, the volume acc is only used in file
#names:
volume_acc = self.which_scans.at[df_idx,'VolumeAcc_ForOutput'].replace('.npz','').replace('.npy','') #e.g. fake ID 'val12345'
#Now organize the labels for this particular image that you want to
#make heatmap visualizations for into g1p1, g1p0, g0p1, and g0p0
#g1p1=true positive, g1p0=false negative, g0p1=false positive, g0p0=true negative
#we pass in volume_acc twice because the variable volume_acc could
#be fake OR real, depending on the preceding logic, but
#example['volume_acc'] is guaranteed to always be real.
label_indices_dict = make_label_indices_dict(volume_acc, example['volume_acc'], gr_truth, self.params_path, self.label_meanings)
for key in ['g1p1', 'g1p0', 'g0p1', 'g0p0']:
chosen_label_indices = label_indices_dict[key] #e.g. [32, 37, 43, 46, 49, 56, 60, 62, 64, 67, 68, 71]
if (('Abnormality' not in self.which_scans.columns.values.tolist()) or (self.which_scans.at[df_idx,'Abnormality'] == 'all')): #plot ALL abnormalities
pass
else: #plot only chosen abnormalities
chosen_abnormalities = self.which_scans.at[df_idx,'Abnormality'].split(',')
chosen_label_indices = [x for x in chosen_label_indices if self.label_meanings[x] in chosen_abnormalities]
#Calculate label-specific attn and make label-specific attn figs
for chosen_label_index in chosen_label_indices:
#Get label_name and seg_gr_truth:
label_name = self.label_meanings[chosen_label_index] #e.g. 'lung_atelectasis'
seg_gr_truth = attn_gr_truth[chosen_label_index,:,:,:] #out shape [135, 6, 6]
#segprediction is the raw attention. slice_idx is the index of
#the slice with the highest raw score for this label
segprediction, x_perslice_scores_this_disease = self.return_segprediction(out, ctvol, gr_truth, volume_acc, chosen_label_index) #out shape [135, 6, 6]
segprediction_clipped_and_normed = clip_and_norm_volume(segprediction)
if 'iou_analysis' in self.task:
if key in ['g1p1','g1p0']: #TODO: implement IOU analysis for other options! also make this more efficient so no excessive calculations are done
if self.verbose: print('Adding example to IOU analysis')
self.iou_analysis_object.add_this_example_to_iou_wide_df(segprediction_clipped_and_normed,
seg_gr_truth, volume_acc, label_name, num_labels_this_ct=int(gr_truth.sum()))
if 'attn_plots' in self.task:
if self.verbose: print('Making 2D and 3D attn figures')
make_2d_plot_and_3d_gif.plot_attn_over_ct_scan(ctvol,
segprediction_clipped_and_normed, x_perslice_scores_this_disease, volume_acc,
label_name, os.path.join(self.attn_2dplot_dir,key), os.path.join(self.attn_3dgif_dir,key))
#Report progress
if df_idx % five_percent == 0:
print('Done with',df_idx,'=',round(100*df_idx/self.which_scans.shape[0],2),'%')
del example, ctvol, gr_truth, volume_acc, attn_gr_truth, out
def get_out_and_blue_heatmaps(self, ctvol, gr_truth, volume_acc):
"""Calculate 'out' which will be used for:
1. the blue heatmap figure (the 'x_perslice_scores') which is
specific to a particular scan, NOT a particular label;
2. the 'hirescam-check' attention (the 'disease_reps')
Note that we don't do this within the label for loop below
because it's computationally wasteful to run a fixed model again
and again on the same input CT scan.
To avoid memory issues of running the model twice,
for determining true positives/false positives/true negatives/false
negatives, we use the pre-calculated predicted probabilities that were
saved when the model was first run.
out['out'] contains the prediction scores and has shape [1,80]
out['disease_reps'] contains the 'hirescam-check' attention for
all diseases and has shape [80, 135, 16, 6, 6]
out['x_perslice_scores'] contains the abnormality scores for each
slice and has shape [1, 80, 135]"""
if self.attention_type == 'hirescam-check':
out = self.model(ctvol)
if 'blue_heatmaps' in self.task:
if self.verbose: print('Making blue heatmap')
blue_heatmap.visualize_slicediseases(out['out'], gr_truth,
out['x_perslice_scores'].cpu().data.numpy(),
volume_acc, self.blue_heatmaps_dir, self.label_meanings,
self.blue_heatmap_baseline)
return out
else:
return None
def return_segprediction(self, out, ctvol, gr_truth, volume_acc, chosen_label_index):
"""Return the <segprediction> which is a volume of scores for a particular
label"""
if self.attention_type == 'hirescam-check':
return extract_disease_reps.return_segprediction_from_disease_rep(out, chosen_label_index)
elif self.attention_type in ['gradcam-vanilla','hirescam']:
#note that if 'make_figure' is in self.task, then a 2d debugging
#figure for Grad-CAM will also be saved in this step
return gradcam.RunGradCAM(self.attention_type, self.model, self.device,
self.label_meanings, self.gradcam_debug_dir, self.task,
**self.attention_type_args).return_segprediction_from_grad_cam(ctvol, gr_truth, volume_acc, chosen_label_index)
def make_label_indices_dict(possibly_fake_volume_acc, real_volume_acc, gr_truth, params_path, label_meanings):
"""Based on the <gr_truth> and the predicted probability that was
pre-calculated, figure out which abnormalities are true positives (g1p1),
false negatives (g1p0), false positives (g0p1), and true negatives (g0p0).
g stands for ground truth and p stands for predicted probability.
The predicted probabilities are read in from the predicted probabilities
that were saved from the final model when it was done training.
The path for these is inferred from params_path based on known
directory structure. We also need to use this pre-calculated file because
we need to get the median predicted probability for each abnormality.
The predicted probabilities are binarized as 0 or 1 according to being
above or below the median (50th percentile) for that abnormality.
Returns a dictionary with keys g1p1, g1p0, g0p1, and g0p0
and values that are numpy arrays of numeric indices of the corresponding
abnormalities e.g. array([32, 37, 64, 67, 68, 71])"""
#Infer paths to the precomputed pred probs based on known directory organization:
#e.g. precomputed_path = '/home/rlb61/data/img-hiermodel2/results/results_2019-2020/2020-10/2020-10-09_WHOLEDATA_BodyAvg_Baseline_FreshStart/pred_probs'
precomputed_path = os.path.join(os.path.split(os.path.split(params_path)[0])[0],'pred_probs')
files = os.listdir(precomputed_path) #e.g. ['valid_grtruth_ep4.csv', 'valid_predprob_ep4.csv']
pred_probs_file = [x for x in files if 'predprob' in x][0] #e.g. 'valid_predprob_ep4.csv'
gr_truth_file = [x for x in files if 'grtruth' in x][0] #e.g. 'valid_grtruth_ep4.csv'
#Open the pred probs and gr truth for this data subset
#Each of them has volume accesions as the index, and abnormalities as
#the columns. Example shape: [2085,80]
pred_probs_all = pd.read_csv(os.path.join(precomputed_path, pred_probs_file),header=0,index_col=0)
gr_truth_all = pd.read_csv(os.path.join(precomputed_path, gr_truth_file),header=0,index_col=0)
#Sanity checks:
for df in [pred_probs_all, gr_truth_all]:
assert df.columns.values.tolist()==label_meanings
assert (gr_truth_all.loc[real_volume_acc,:]==gr_truth).all()
#Calculate the medians of the different abnormalities across the whole
#data subset.
medians = np.median(pred_probs_all,axis=0) #np array, e.g. shape [80]
#Select out the predicted probabilities for just this scan
pred_probs = pred_probs_all.loc[real_volume_acc,:] #pd Series w abn labels and float values, e.g. shape [80]
#Get binary vector that's equal to 1 if the corresponding abnormality
#has a pred prob greater than the median
pred_probs_geq = (pred_probs >= medians).astype('int') #pd Series w abn labels and binary int values, e.g. shape [80]
#Now divide up the abnormalities for this particular CT scan based on whether
#they are above or below the median pred prob, and whether the gr truth
#is 1 or 0
g0p0 = np.intersect1d(np.where(gr_truth==0)[0], np.where(pred_probs_geq==0)[0])
g0p1 = np.intersect1d(np.where(gr_truth==0)[0], np.where(pred_probs_geq==1)[0])
g1p0 = np.intersect1d(np.where(gr_truth==1)[0], np.where(pred_probs_geq==0)[0])
g1p1 = np.intersect1d(np.where(gr_truth==1)[0], np.where(pred_probs_geq==1)[0])
#Checks
assert len(g1p0)+len(g1p1)==int(gr_truth.sum())
assert len(g0p0)+len(g0p1)+len(g1p0)+len(g1p1)==len(gr_truth)
label_indices_dict = {'g0p0':g0p0.tolist(),
'g0p1':g0p1.tolist(),
'g1p0':g1p0.tolist(),
'g1p1':g1p1.tolist()}
#uncomment the next line to print detailed info to the terminal:
#print_for_future_reference(params_path, label_indices_dict, possibly_fake_volume_acc, pred_probs, medians, label_meanings)
return label_indices_dict
def print_for_future_reference(params_path, label_indices_dict, possibly_fake_volume_acc, pred_probs, medians, label_meanings):
model_description = os.path.split(params_path)[1]
for key in list(label_indices_dict.keys()): #the keys are ['g0p0','g0p1','g1p0','g1p1']
for idx in label_indices_dict[key]:
print('\t'.join([model_description, possibly_fake_volume_acc, key, label_meanings[idx], str(round(pred_probs[idx],4)),'median:',str(round(medians[idx],4))]))
#############
# Functions #-------------------------------------------------------------------
#############
def clip_and_norm_volume(volume):
volume = np.maximum(volume, 0) #ReLU operation
volume = volume - np.min(volume)
if np.max(volume)!=0:
volume = volume / np.max(volume)
return volume
| rachellea/explainable-ct-ai | src/run_attn_analysis.py | run_attn_analysis.py | py | 26,139 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
27070910668 | import datetime as dt
import random
import pytest
from scheduler import Scheduler, SchedulerError
from scheduler.base.definition import JobType
from scheduler.threading.job import Job
from ...helpers import foo
@pytest.mark.parametrize(
"empty_set",
[
False,
True,
],
)
@pytest.mark.parametrize(
"any_tag",
[
None,
False,
True,
],
)
@pytest.mark.parametrize(
"n_jobs",
[
0,
1,
2,
3,
10,
],
)
def test_get_all_jobs(n_jobs, any_tag, empty_set):
sch = Scheduler()
assert len(sch.jobs) == 0
for _ in range(n_jobs):
sch.once(dt.datetime.now(), foo)
assert len(sch.jobs) == n_jobs
if empty_set:
if any_tag is None:
jobs = sch.get_jobs()
else:
jobs = sch.get_jobs(any_tag=any_tag)
else:
if any_tag is None:
jobs = sch.get_jobs(tags={})
else:
jobs = sch.get_jobs(tags={}, any_tag=any_tag)
assert len(jobs) == n_jobs
@pytest.mark.parametrize(
"job_tags, select_tags, any_tag, returned",
[
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}],
{"a", "1"},
True,
[True, True, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}],
{"b", "1"},
True,
[True, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}],
{"3"},
True,
[False, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}],
{"2", "3"},
True,
[False, True, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}],
{"a", "1"},
False,
[False, False, True],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}],
{"b", "1"},
False,
[False, False, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}],
{"1", "3"},
False,
[False, True, False],
],
[
[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}],
{"2", "3"},
False,
[False, True, False],
],
],
)
def test_get_tagged_jobs(job_tags, select_tags, any_tag, returned):
sch = Scheduler()
jobs = [sch.once(dt.timedelta(), lambda: None, tags=tags) for tags in job_tags]
res = sch.get_jobs(tags=select_tags, any_tag=any_tag)
for job, ret in zip(jobs, returned):
if ret:
assert job in res
else:
assert job not in res
| DigonIO/scheduler | tests/threading/scheduler/test_sch_get_jobs.py | test_sch_get_jobs.py | py | 2,720 | python | en | code | 51 | github-code | 6 | [
{
"api_name": "scheduler.Scheduler",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "helpers.foo",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.now",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
71548544188 | import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from collections import OrderedDict
from modules import CompactBasicBlock, BasicBlock, Bottleneck, DAPPM, segmenthead, GhostBottleneck
bn_mom = 0.1
BatchNorm2d = nn.BatchNorm2d
class CompactDualResNet(nn.Module):
def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=True):
super(CompactDualResNet, self).__init__()
highres_planes = planes * 2
self.augment = augment
self.conv1 = nn.Sequential(
nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
)
self.relu = nn.ReLU(inplace=False)
self.layer1 = self._make_layer(block, planes, planes, layers[0])
self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)
self.layer4 = self._make_layer(CompactBasicBlock, planes * 4, planes * 8, layers[3], stride=2)
self.compression3 = nn.Sequential(
nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),
BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.compression4 = nn.Sequential(
nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=False),
BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.down3 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 4, momentum=bn_mom),
)
self.down4 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 4, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),
BatchNorm2d(planes * 8, momentum=bn_mom),
)
self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)
self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)
self.layer5_ = self._make_ghost_bottleneck(GhostBottleneck, highres_planes , highres_planes, 1)
self.layer5 = self._make_ghost_bottleneck(GhostBottleneck, planes * 8, planes * 8, 1, stride=2)
self.spp = DAPPM(planes * 16, spp_planes, planes * 4)
if self.augment:
self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes)
self.final_layer = segmenthead(planes * 4, head_planes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
if i == (blocks-1):
layers.append(block(inplanes, planes, stride=1, no_relu=True))
else:
layers.append(block(inplanes, planes, stride=1, no_relu=False))
return nn.Sequential(*layers)
def _make_divisible(self, v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _make_ghost_bottleneck(self, block, inplanes, planes, blocks, stride=1):
if stride != 1 or inplanes != planes * 2:
out_channel = planes * 2
else:
out_channel = planes
cfg = [[3, 96, out_channel, 0, 1]] # k, t, c, SE, s
input_channel = inplanes
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = c
hidden_channel = self._make_divisible(exp_size, 4)
layers.append(block(input_channel, hidden_channel, output_channel, k, s, se_ratio=se_ratio))
input_channel = output_channel
return nn.Sequential(*layers)
def forward(self, x):
width_output = x.shape[-1] // 8
height_output = x.shape[-2] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(self.relu(x))
layers.append(x)
x = self.layer3(self.relu(x))
layers.append(x)
x_ = self.layer3_(self.relu(layers[1]))
x = x + self.down3(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression3(self.relu(layers[2])),
size=[height_output, width_output],
mode='bilinear')
if self.augment:
temp = x_
x = self.layer4(self.relu(x))
layers.append(x)
x_ = self.layer4_(self.relu(x_))
x = x + self.down4(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression4(self.relu(layers[3])),
size=[height_output, width_output],
mode='bilinear')
x_ = self.layer5_(self.relu(x_))
x = F.interpolate(
self.spp(self.layer5(self.relu(x))),
size=[height_output, width_output],
mode='bilinear')
x_ = self.final_layer(x + x_)
if self.augment:
x_extra = self.seghead_extra(temp)
return [x_extra, x_]
else:
return x_
def get_seg_model(cfg, **kwargs):
model = CompactDualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)
return model
if __name__ == '__main__':
import time
device = torch.device('cuda')
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
model = CompactDualResNet(BasicBlock, [2, 2, 2, 2], num_classes=11, planes=32, spp_planes=128, head_planes=64)
model.eval()
model.to(device)
iterations = None
#input = torch.randn(1, 3, 1024, 2048).cuda()
input = torch.randn(1, 3, 720, 960).cuda()
with torch.no_grad():
for _ in range(10):
model(input)
if iterations is None:
elapsed_time = 0
iterations = 100
while elapsed_time < 1:
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
iterations *= 2
FPS = iterations / elapsed_time
iterations = int(FPS * 6)
print('=========Speed Testing=========')
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations * 1000
torch.cuda.empty_cache()
FPS = 1000 / latency
print(FPS)
| himlen1990/cddrnet | utils/speed_test/cddrnet_eval_speed.py | cddrnet_eval_speed.py | py | 8,667 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
... |
968977222 | import pyodbc
import pandas as pd
# Connection steps to the server
from OnlineBankingPortalCSV2_code import Accounts, Customer
server = 'LAPTOP-SELQSNPH'
database = 'sai'
username = 'maram'
password = 'dima2k21'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = cnxn.cursor()
# import data from csv
data = pd.read_csv (r'C:\Users\maram\PycharmProjects\pythonProject\OnlineBankingPortal_data_file3.csv')
# Transactions table
Transactions = pd.DataFrame(data, columns = ['Transaction_id','Acc_number','Transaction_type_code','Transaction_type_desc','Transaction_date','Card_number'])
Transactions = Transactions.astype('str')
Transactions['Transaction_id']=Transactions.groupby(['Transaction_date','Card_number'],sort=False).ngroup()+300
# Merge data inorder to get the required Id's
Merge_Transactions_Accounts=pd.merge(Transactions,Accounts,on='Acc_number')
Transactions['Account_id']=Merge_Transactions_Accounts.Account_id
Transactions['Customer_id']=Merge_Transactions_Accounts.Customer_id
print(Transactions)
Transactions['Transaction_date'] = Transactions['Transaction_date'].astype('datetime64[ns]')
# Cards table
Cards = pd.DataFrame(data, columns = ['Acc_number','Card_id','Card_number','Maximum_limit','Expiry_Date','Credit_score'])
Cards = Cards.astype('str')
Cards['Expiry_Date']= Cards['Expiry_Date'].astype('datetime64[ns]')
# Merge data inorder to get the required Id's
Merge_Cards_Accounts=pd.merge(Cards,Accounts,on='Acc_number')
Cards['Customer_id']=Merge_Cards_Accounts.Customer_id
Cards = Cards[Cards.Card_number != 'nan']
Cards['Card_id'] = Cards.groupby(['Card_number'],sort=False).ngroup()+400
Cards = Cards.drop_duplicates(subset=None, keep="first", inplace=False)
# Convert Credit score and Maximum limit from string->float->int
Cards['Credit_score']=Cards['Credit_score'].astype(float)
Cards['Credit_score']=Cards['Credit_score'].astype(int)
Cards['Maximum_limit']=Cards['Maximum_limit'].astype(float)
Cards['Maximum_limit']=Cards['Maximum_limit'].astype(int)
print(Cards)
# Transaction_details Table
Transaction_details = pd.DataFrame(data, columns = ['Transaction_Amount','Merchant_details','Acc_number','Transaction_date'])
Transaction_details = Transaction_details.astype('str')
# Merge data inorder to get the required Id's
Merge_Transaction_details_Transactions=pd.concat([Transactions,Transaction_details], ignore_index=True)
Transaction_details['Transaction_id']=Merge_Transaction_details_Transactions.Transaction_id
# Convert Transaction_id from string->float->int
Transaction_details['Transaction_id']=Transaction_details['Transaction_id'].astype(float)
Transaction_details['Transaction_id']=Transaction_details['Transaction_id'].astype(int)
print(Transaction_details)
# inserting data into tables
for row in Transactions.itertuples():
cursor.execute('''
INSERT INTO Transactions (Customer_id,Account_id,Acc_number,Transaction_type_code,Transaction_type_desc,Transaction_date)
VALUES (?,?,?,?,?,?)
''',
row.Customer_id,
row.Account_id,
row.Acc_number,
row.Transaction_type_code,
row.Transaction_type_desc,
row.Transaction_date,
)
for row in Cards.itertuples():
cursor.execute('''
INSERT INTO Cards (Customer_id,Acc_number,Card_number,Maximum_limit,Expiry_Date,Credit_score)
VALUES (?,?,?,?,?,?)
''',
row.Customer_id,
row.Acc_number,
row.Card_number,
row.Maximum_limit,
row.Expiry_Date,
row.Credit_score
)
for row in Transaction_details.itertuples():
cursor.execute('''
INSERT INTO Transaction_details (Transaction_id,Transaction_Amount,Merchant_details,Acc_number)
VALUES (?,?,?,?)
''',
row.Transaction_id,
row.Transaction_Amount,
row.Merchant_details,
row.Acc_number
)
cnxn.commit() | divyamaram/Database-Managment-systems | OnlineBankingPortalCSV3_code.py | OnlineBankingPortalCSV3_code.py | py | 4,255 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyodbc.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"l... |
35035790893 | import csv
import json
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
from math import ceil
from wand.image import Image as WImage
from subprocess import Popen
def make_json(csvFilePath,keyName,alldata):
# create a dictionary
data = {}
# Open a csv reader called DictReader
with open(csvFilePath, encoding='utf-8') as csvf:
next(csvf)
csvReader = csv.DictReader(csvf, delimiter='\t')
# Convert each row into a dictionary
# and add it to data
for rows in csvReader:
# Assuming a column named 'No' to
# be the primary key
key = rows['CATEGORY']
data[key] = rows
alldata[keyName] = data
jsonfile = json.dumps(alldata)
return jsonfile
def plots(Sample,file,normal,listSample):
#listSample = [row[1] for row in batch]
rows = []
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
if Sample == 'ALL' and not(normal):
ROWS = 3
COLS = ceil(np.size(listSample)/ROWS)
fig = plt.figure(figsize = (20, 15))
for row in range(ROWS):
cols = []
for col in range(COLS):
index = row * COLS + col
if index<np.size(listSample):
img = WImage(filename=path+listSample[index]+file)
a = fig.add_subplot(COLS, ROWS, index+1)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
a.set_title(listSample[index])
else:
fig = plt.figure(figsize = (15, 10))
a = fig.add_subplot(1, 1, 1)
if not(normal):
index = listSample.index(Sample)
img = WImage(filename=path+listSample[index]+file)
a.set_title(listSample[index])
else:
img = WImage(filename=path+Sample+file)
imgplot = plt.imshow(img)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
def multiPage(Sample,file,page,normal,listSample):
page = page-1
#listSample = [row[1] for row in batch]
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
fig = plt.figure(figsize = (20, 15))
a = fig.add_subplot(1, 1, 1)
if not(normal):
index = listSample.index(Sample)
img = WImage(filename=path+listSample[index]+file+"["+str(page)+"]")
a.set_title(listSample[index])
else:
img = WImage(filename=path+Sample+file+"["+str(page)+"]")
imgplot = plt.imshow(img)
plt.axis('off')
plt.grid(b=None)
imgplot = plt.imshow(img)
def tableShow(Sample,file, cols,listSample):
path = "/storage/gluster/vol1/data/PUBLIC/SCAMBIO/ABT414_WES_Analysis/ABT414_Flank/ABT414_Flank/"
if Sample == 'ALL':
for index in range(np.size(listSample)):
print('\n'+listSample[index]+'\n')
table = []
filePath = path+listSample[index]+file
with open (filePath, 'r') as f:
for row in csv.reader(f,delimiter='\t'):
if np.size(row)>1:
content = [row[i] for i in cols]
table.append(content)
print(tabulate(table,headers="firstrow"))
else:
print(Sample+'\n')
table = []
filePath = path+Sample+file
with open (filePath, 'r') as f:
for row in csv.reader(f,delimiter='\t'):
if np.size(row)>1:
content = [row[i] for i in cols]
table.append(content)
print(tabulate(table,headers="firstrow"))
def commandsParallel(commands,commdsSize,commdsParallel):
if commdsParallel>commdsSize:
commdsParallel = commdsSize
print ("Numbers of samples in parallel: "+ str(commdsParallel))
itersPar = ceil(commdsSize/commdsParallel)
print("Numbers of iterations: "+ str(itersPar))
for i in range(itersPar):
try:
processes = [Popen(commands[(i*commdsParallel)+j], shell=True) for j in range(commdsParallel)]
except IndexError:
pass
exitcodes = [p.wait() for p in processes] | miccec/ExomePipeline | interactPlots.py | interactPlots.py | py | 4,422 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.DictReader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 4... |
28160427846 | import asyncio
from time import time
from httpx import RequestError
from loguru import logger
from src.client import IteriosApiClient
from src.exceptions import FailedResponseError
from src.helpers import (
get_random_country, get_random_dep_city, get_search_start_payload, get_timing_results,
setup_logger,
)
from src.settings import settings
async def start_search(index: int):
logger.info(f'Start search #{index}')
start_time = time()
try:
async with IteriosApiClient() as client:
country = get_random_country()
dep_city = get_random_dep_city()
main_reference = await client.get_main_reference(
country_iso=country['iso_code'], dep_city_id=dep_city['id'],
)
payload = get_search_start_payload(
country_id=country['id'], dep_city_id=dep_city['id'], main_reference=main_reference,
)
await client.start_search(payload)
except (FailedResponseError, RequestError) as error:
logger.error(f'Fail search #{index} ({repr(error)})')
return index, None
elapsed_time = round(time() - start_time, 2)
logger.info(f'Finish search #{index} in {elapsed_time}s')
return index, elapsed_time
async def main():
logger.info(f'Test with {settings.request_count} requests')
requests = [
start_search(index)
for index in range(1, settings.request_count + 1)
]
timings = await asyncio.gather(*requests)
last_time = None
for timing in timings:
index, elapsed_time = timing
if not elapsed_time:
logger.info(f'#{index} - fail')
continue
if last_time:
difference = round(elapsed_time - last_time, 2)
logger.info(f'#{index} - {elapsed_time}s ({difference:+}s)')
else:
logger.info(f'#{index} - {elapsed_time}s')
last_time = elapsed_time
elapsed_times = [timing[1] for timing in timings]
results = get_timing_results(elapsed_times)
logger.info(f"Results: min({results['min']}s), max({results['max']}s), average({results['average']}s), fails({results['failed']}/{results['total']})") # noqa: E501
if __name__ == '__main__':
setup_logger()
asyncio.run(main())
| qwanysh/iterios-stress | start_search.py | start_search.py | py | 2,281 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "loguru.logger.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.client.IteriosApiClien... |
5838127346 | from datetime import datetime
from maico.sensor.stream import Confluence
from maico.sensor.targets.human import Human
from maico.sensor.targets.human_feature import MoveStatistics
from maico.sensor.targets.first_action import FirstActionFeature
import maico.sensor.streams.human_stream as hs
class OneToManyStream(Confluence):
KINECT_FPS = 30
FRAMES_FOR_MOVE = 15
MOVES_FOR_STAT = 4
def __init__(self, human_stream):
self._observation_begin = None
# hyper parameters (it will be arguments in future)
self.move_threshold = 0.1 # above this speed, human act to move (not searching items)
self.move_stream = hs.MoveStream(human_stream, self.FRAMES_FOR_MOVE, self.KINECT_FPS, self.move_threshold)
self.move_stat_stream = hs.MoveStatisticsStream(self.move_stream, self.MOVES_FOR_STAT)
super(OneToManyStream, self).__init__(human_stream, self.move_stat_stream)
def notify(self, target):
key = target.__class__
if key is Human:
self._pool[key] = [target] # store only 1 (latest) human
if self._observation_begin is None:
self._observation_begin = datetime.utcnow() # remember first human
else:
if key not in self._pool:
self._pool[key] = []
self._pool[key].append(target)
if self.is_activated():
t = self.merge()
self.out_stream.push(t)
self.reset()
def is_activated(self):
hs = self.get(Human)
stats = self.get(MoveStatistics)
if len(hs) == 1 and len(stats) == 1:
return True
else:
return False
def merge(self):
h = self.get(Human)[0]
stat = self.get(MoveStatistics)[0]
staying_time = (datetime.utcnow() - self._observation_begin).total_seconds()
feature = FirstActionFeature(
_id=h._id,
staying_time=staying_time,
mean_moving_rate=stat.moving_time.sum_ / stat.seconds.sum_,
max_moving_rate=stat.moving_time.max_ / stat.seconds.mean_,
min_moving_rate=stat.moving_time.min_ / stat.seconds.mean_,
mean_moving_speed=stat.moving_speed.mean_
)
return feature
| tech-sketch/maico | maico/sensor/streams/one_to_many_stream.py | one_to_many_stream.py | py | 2,289 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "maico.sensor.stream.Confluence",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "maico.sensor.streams.human_stream.MoveStream",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "maico.sensor.streams.human_stream",
"line_number": 21,
"usage_... |
11948273979 | #!/usr/bin/python3.8
# -*- coding: utf-8 -*-
#
# SuperDrive
# a live processing capable, clean(-ish) implementation of lane &
# path detection based on comma.ai's SuperCombo neural network model
#
# @NamoDev
#
# ============================================================================ #
# Parse arguments
import os
import warnings
import argparse
apr = argparse.ArgumentParser(description = "Predicts lane line and vehicle path using the SuperCombo neural network!")
apr.add_argument("--input", type=str, dest="inputFile", help="Input capture device or video file", required=True)
apr.add_argument("--disable-gpu", dest="disableGPU", action="store_true", help="Disables the use of GPU for inferencing")
apr.add_argument("--disable-warnings", dest="disableWarnings", action="store_true", help="Disables console warning messages")
apr.add_argument("--show-opencv-window", dest="showOpenCVVisualization", action="store_true", help="Shows OpenCV frame visualization")
args = apr.parse_args()
# Where are we reading from?
CAMERA_DEVICE = str(args.inputFile)
# Do we want to disable GPU?
if args.disableGPU == True:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# Do we want to disable warning messages?
if args.disableWarnings == True:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
warnings.filterwarnings("ignore")
# ============================================================================ #
import cv2
import sys
import time
import pathlib
import numpy as np
import tensorflow as tf
from parser import parser
import savitzkygolay as sg
from undistort.undistort import undistort
from timeit import default_timer as timer
# OpenPilot transformations (needed to get the model to output correct results)
from common.transformations.model import medmodel_intrinsics
from common.transformations.camera import transform_img, eon_intrinsics
# Are we running TF on GPU?
if tf.test.is_gpu_available() == True:
isGPU = True
tfDevice = "GPU"
else:
isGPU = False
tfDevice = "CPU"
# Initialize undistort
undist = undistort(frame_width=560, frame_height=315)
# Initialize OpenCV capture and set basic parameters
cap = cv2.VideoCapture(CAMERA_DEVICE)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
# Load Keras model for lane detection
#
# path = [y_pos of path plan along x=range(0,192) |
# std of y_pos of path plan along x=range(0,192) |
# how many meters it can see]
# 12 * 128 * 256 is 2 consecutive imgs in YUV space of size 256 * 512
lanedetector = tf.keras.models.load_model(str(pathlib.Path(__file__).parent.absolute()) + "/supercombo.keras")
# We need a place to keep two separate consecutive image frames
# since that's what SuperCombo uses
fr0 = np.zeros((384, 512), dtype=np.uint8)
fr1 = np.zeros((384, 512), dtype=np.uint8)
# SuperCombo requires a feedback of state after each prediction
# (to improve accuracy?) so we'll allocate space for that
state = np.zeros((1, 512))
# Additional inputs to the steering model
#
# "Those actions are already there, we call it desire.
# It's how the lane changes work" - @Willem from Comma
#
# Note: not implemented in SuperDrive (yet)
desire = np.zeros((1, 8))
# We want to keep track of our FPS rate, so here's
# some variables to do that
fpsActual = 0;
fpsCounter = 0;
fpsTimestamp = 0;
# OpenCV named windows for visualization (if requested)
cv2.namedWindow("SuperDrive", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("Vision path", cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow("Vision path", 200, 500)
# Main loop here
while True:
# Get frame start time
t_frameStart = timer()
# FPS counter logic
fpsCounter += 1
if int(time.time()) > fpsTimestamp:
fpsActual = fpsCounter
fpsTimestamp = int(time.time())
fpsCounter = 0
# Read frame
(ret, frame) = cap.read()
# Resize incoming frame to smaller size (to save resource in undistortion)
frame = cv2.resize(frame, (560, 315))
# Undistort incoming frame
# This is standard OpenCV undistortion using a calibration matrix.
# In this case, a Logitech C920 is used (default for undistortion helper).
# Just perform chessboard calibration to get the matrices!
frame = undist.frame(frame)
# Crop the edges out and try to get to (512,256), since that's what
# the SuperCombo model uses. Note that this is skewed a bit more
# to the sky, since my camera can "see" the hood and that probably won't
# help us in the task of lane detection, so we crop that out
frame = frame[14:270, 24:536]
# Then we want to convert this to YUV
frameYUV = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
# Use Comma's transformation to get our frame into a format that SuperCombo likes
frameYUV = transform_img(frameYUV, from_intr=eon_intrinsics,
to_intr=medmodel_intrinsics, yuv=True,
output_size=(512, 256)).astype(np.float32) \
/ 128.0 - 1.0
# We want to push our image in fr1 to fr0, and replace fr1 with
# the current frame (to feed into the network)
fr0 = fr1
fr1 = frameYUV
# SuperCombo input shape is (12, 128, 256): two consecutive images
# in YUV space. We concatenate fr0 and fr1 together to get to that
networkInput = np.concatenate((fr0, fr1))
# We then want to reshape this into the shape the network requires
networkInput = networkInput.reshape((1, 12, 128, 256))
# Build actual input combination
input = [networkInput, desire, state]
# Then, we can run the prediction!
# TODO: this is somehow very slow(?)
networkOutput = lanedetector.predict(input)
# Parse output and refeed state
parsed = parser(networkOutput)
state = networkOutput[-1]
# Now we have all the points!
# These correspond to points with x = <data in here>, y = range from
# 0 to 192 (output of model)
leftLanePoints = parsed["lll"][0]
rightLanePoints = parsed["rll"][0]
pathPoints = parsed["path"][0]
# We may also want to smooth this out
leftLanePoints = sg.savitzky_golay(leftLanePoints, 51, 3)
rightLanePoints = sg.savitzky_golay(rightLanePoints, 51, 3)
pathPoints = sg.savitzky_golay(pathPoints, 51, 3)
# Compute position on current lane
currentPredictedPos = (-1) * pathPoints[0]
# Compute running time
p_totalFrameTime = round((timer() - t_frameStart) * 1000, 2)
print("Frame processed on " + tfDevice + " \t" + str(p_totalFrameTime) + " ms\t" + str(fpsActual) + " fps")
# Output (enlarged) frame with text overlay
if args.showOpenCVVisualization == True:
canvas = frame.copy()
canvas = cv2.resize(canvas, ((700, 350)))
cv2.putText(canvas, "Vision processing time: " + str(p_totalFrameTime) + " ms (" + str(fpsActual) + " fps)", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.putText(canvas, "Device: " + tfDevice, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.putText(canvas, "Position: " + str(round(currentPredictedPos, 3)) + " m off centerline", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
# Create canvas for graph plotting
plotCanvas = np.zeros((500, 200, 3), dtype=np.uint8)
# Plot points!
ppmY = 10
ppmX = 20
# We know we can only display 500 / ppmY = 50 meters ahead
# so limiting our loop will allow for a faster processing time
for i in range(51):
cv2.circle(plotCanvas, (int(100 - abs(leftLanePoints[i] * ppmX)), int(i * ppmY)), 2, (160, 160, 160), -1)
cv2.circle(plotCanvas, (int(100 + abs(rightLanePoints[i] * ppmX)), int(i * ppmY)), 2, (160, 160, 160), -1)
cv2.circle(plotCanvas, (int(100 - (pathPoints[i] * ppmX)), int(i * ppmY)), 4, (10, 255, 10), -1)
# Flip plot path for display
plotCanvas = cv2.flip(plotCanvas, 0)
# Add some texts for distance
cv2.putText(plotCanvas, "0 m", (10, 490), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "10 m", (10, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "20 m", (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "30 m", (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "40 m", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.putText(plotCanvas, "50 m", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (200,200,200), 1)
cv2.imshow("SuperDrive", canvas)
cv2.imshow("Vision path", plotCanvas)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
| kaishijeng/SuperDrive | drive.py | drive.py | py | 8,715 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
... |
28039146623 | #! /usr/bin/env python3
__author__ = 'Amirhossein Kargaran 9429523 '
import os
import sys
import socket
import pickle
import select
import signal
import threading
import time
from threading import Thread
from datetime import datetime
# Local modules
from APIs.logging import Log
from APIs.logging import Color
from APIs.security import *
from Crypto.Random import random
from filelock import FileLock
file_path = "result.txt"
lock_path = "result.txt.lock"
lock = FileLock(lock_path, timeout=1)
# Declare Global variables
PORT = 5558
TERMINATE = False
CLI_HASH = {}
KEY = ''
ll = list()
class Server():
def __init__(self):
self.HOST_IP = '0.0.0.0'
self.HOST_PORT = '8081'
self.MAX_USR_ACCPT = '100'
def show_help(self):
msg = '''
AVAILABLE COMMANDS:
\h Print these information
\d Set default configuration
\sd Show default configuration
\sc Show current configuration
\sau Show active users
\sac Show active chat rooms
\sf Shutdown server forcefully
\monitor Enables monitor mode'''
print(msg)
def show_config(self, type_='default'):
if type_ in ('active', 'ACTIVE'):
msg = '''
Active configuration of the server :
HOST IP = ''' + self.HOST_IP + '''
HOST PORT = ''' + self.HOST_PORT + '''
MAX USER ALLOWED = ''' + self.MAX_USR_ACCPT
logging.log('Showing Active server configuration')
print(msg)
else:
msg = '''
Default configuration of the server:
HOST IP = 0.0.0.0
HOST PORT = 8081
MAX USER ALLOWED = 100'''
print(msg)
def set_usr_config(self, parameters):
if parameters:
if sys.argv[1] in ('-h', '--help'):
self.show_help()
try:
self.HOST_IP = sys.argv[1]
self.HOST_PORT = sys.argv[2]
self.MAX_USR_ACCPT = sys.argv[3]
except:
print('USAGE:\nscript ip_address port_number max_usr_accpt')
sys.exit(0)
else:
self.HOST_IP = input('Enter host IP : ')
self.HOST_PORT = input('Enter host PORT : ')
self.MAX_USR_ACCPT = input('Enter max number of users server would accept : ')
def update_active_users(self):
self.user_list = []
for cli_obj in CLI_HASH.values():
self.user_list.append(cli_obj.userName)
def signal_handler(self, signal, frame):
print(' has been pressed.\n')
def srv_prompt(self):
# TODO: Add feature to view server socket status
global TERMINATE
while True:
opt = input(Color.PURPLE + '\nenter command $ ' + Color.ENDC)
if opt == '\h':
self.show_help()
elif opt == '\monitor':
print('Monitoring mode ENABLED!')
logging.silent_flag = False
signal.signal(signal.SIGINT, self.signal_handler)
signal.pause()
print('Monitoring mode DISABLED')
logging.silent_flag = True
elif opt == '\sd':
self.show_config(type_='default')
elif opt == '\sc':
self.show_config(type_='active')
elif opt == '\sau':
self.update_active_users()
logging.log(self.user_list)
print(self.user_list)
elif opt == '\sf':
print(Color.WARNING +
'WARNING: All users will be disconnected with out any notification!!' +
Color.ENDC)
opt = input('Do you really want to close server?[Y/N] ')
if opt == 'Y':
logging.log('Shuting down server...')
print('Shuting down server...')
TERMINATE = True
sys.exit(0)
else:
logging.log('Aborted.')
print('Aborted.')
pass
elif opt == '':
pass
else:
print('COMMAND NOT FOUND!!')
def init_clients(self):
global CLI_HASH
while not TERMINATE:
try:
self.server.settimeout(1)
conn, addr = self.server.accept()
except socket.timeout:
pass
except Exception as e:
raise e
else:
logging.log(
'A connection from [{}.{}] has been received.'.format(
addr[0], addr[1]))
cli_obj = Client(conn, addr, self)
CLI_HASH[conn] = cli_obj
threading._start_new_thread(cli_obj.run, ('',))
try:
print('Server has stopped listening on opened socket.')
print('Broadcasting connection termination signal..')
msg = "Sorry! We are unable to serve at this moment."
for cli_socket in CLI_HASH.keys():
try:
cli_socket.send(msg.encode())
except:
cli_socket.close()
CLI_HASH.pop(cli_socket)
except:
pass
def init(self):
logging.log('Initializing server')
if len(sys.argv) == 1:
self.show_config(type_='default')
opt = input('Set these default config?[Y/n] ')
if opt == '':
opt = 'Y'
if opt in ('Y', 'y', 'yes', 'Yes', 'YES'):
print("Setting up default configurations...")
else:
self.set_usr_config(parameters=False)
else:
self.set_usr_config(parameters=True)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.server.bind((self.HOST_IP, int(self.HOST_PORT)))
self.server.listen(int(self.MAX_USR_ACCPT))
except:
print('Unable to bind HOST IP and PORT.\nPlease check your configuration')
sys.exit('EMERGENCY')
print('\nServer is listening at {}:{}'.format(self.HOST_IP, self.HOST_PORT))
print('Server is configured to accept %s clients.' %(str(self.MAX_USR_ACCPT)))
#thread_srv = threading.Thread(target=self.srv_prompt, args=())
thread_cli = threading.Thread(target=self.init_clients, args=())
thread_cli.start()
self.srv_prompt()
for thread in (thread_srv, thread_cli):
thread.join()
print('Server and Client threads are exited.')
class Client():
def __init__(self, conn, addr, srv_obj):
global PORT
self.srv_obj = srv_obj
self.conn = conn
self.addr = addr
self.userName = '-N/A-'
self.PUBLIC_KEY = None
self.KEY = ''
self.items_file='result.txt'
self.port = PORT
PORT = PORT +1
self.EnSharedKey =""
def validate_user(self):
pass
def features(self, msg):
if msg == '@getonline':
self._loop_break_flag = True
self.conn.send(
AES_.encrypt(self.KEY, str(self.srv_obj.user_list)))
if msg.split()[0][1:] in self.srv_obj.user_list:
self._loop_break_flag = True
for _conn in CLI_HASH:
if CLI_HASH[_conn].userName == msg.split()[0][1:]:
try:
self.IND_SOCK = _conn
msg_send = "<" + self.userName + "@" + self.addr[0] +\
"> [IND] " + ' '.join(msg.split()[1:])
self.broadcast(msg_send, IND_FLAG=True)
except Exception as e:
logging.log(msg_type='EXCEPTION', msg=e)
def getSharedKey(self):
TOKEN_CHAR_LIST = "abcdefghij!@#$%"
# Generate unique symmetric 10bit key for each client
passphrase = ''.join(random.sample(TOKEN_CHAR_LIST, 10))
shared_key = hasher(passphrase)
EnSharedKey = RSA_.encrypt(self.PUBLIC_KEY, shared_key)
if EnSharedKey:
return (shared_key, EnSharedKey)
else:
logging.log("Unable to encrypt shared key with RSA.", msg_type='ERROR')
def result(self , *args):
file = open(self.items_file,"r")
fileList = file.readlines()
file.close()
self.broadcast(fileList)
def time1 (self):
self.sock.listen(1)
flag = 1
try :
while True:
print('waiting for a connection')
connection, client_address = self.sock.accept()
try:
print('connection from', client_address)
while True:
data = connection.recv(64)
if flag == 1 :
self.Token, self.STRTOKEN = pickle.loads(data)
if data:
if (self.Token == self.KEY and self.STRTOKEN=="TOKEN") :
print("This user is Valid")
flag = 0
else:
print("This user is not Valid")
connection.close()
return
else :
if data.decode()=="bye" :
try:
with lock.acquire(timeout=10):
wfile = open(self.items_file, 'w+')
for ilist in ll:
wfile.write(str(ilist) + "\n")
wfile.close()
lock.release()
except :
print("Another instance of this application currently holds the lock.")
if data :
print(str(self.userName)+ " : " + str(data.decode()))
ll.append(str(self.userName)+ " : " + str(data.decode()))
else:
return
finally:
connection.close()
except :
"what the fuck ?"
def time2 (self):
while True:
try:
self._loop_break_flag = False
msg = self.conn.recv(20000)
if msg:
if msg.split()[0][0] == '@':
self.srv_obj.update_active_users()
self.features(msg)
if not self._loop_break_flag:
self.result()
else:
self.remove()
pass
except Exception as e:
logging.log(msg_type='EXCEPTION', msg='[{}] {}'.format(self.userName, e))
def run(self, *args):
data = self.conn.recv(4000)
if data:
self.userName, self.PUBLIC_KEY = pickle.loads(data)
if self.PUBLIC_KEY:
self.KEY, self.EnSharedKey = self.getSharedKey()
else:
tmp_conn = "{}:{}".format(self.addr[0], self.addr[1])
logging.log(
"Public key has not been received from [{}@{}]".format(
self.userName, tmp_conn))
logging.log(
"[0.0.0.0:8081 --> {}] Socket has been terminated ".format(tmp_conn))
self.remove()
if self.KEY == '':
logging.log("Symmetric key generation failed")
tmp_msg = "symmetric key {} has been sent to {}".format(self.KEY, self.userName)
logging.log(tmp_msg)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_hostname = socket.gethostname()
local_fqdn = socket.getfqdn()
ip_address = socket.gethostbyname(local_hostname)
print("working on %s (%s) with %s" % (local_hostname, local_fqdn, ip_address))
server_address = (ip_address, self.port)
print('starting up on %s port %s' % server_address)
self.sock.bind(server_address)
EnSharedKey = (self.port , self.EnSharedKey)
EnSharedKey = pickle.dumps(EnSharedKey)
self.conn.send(EnSharedKey)
Thread(target=self.time1()).start()
Thread(target=self.time2()).start()
def broadcast(self, msg, IND_FLAG=False):
msg = pickle.dumps(msg)
if IND_FLAG:
self.IND_SOCK.send(msg)
return
for cli_socket in CLI_HASH.keys():
if 1==1 :
try:
cli_socket.send(msg)
except:
raise Exception
cli_socket.close()
self.remove()
def remove(self):
if self.conn in CLI_HASH.keys():
self.conn.close()
CLI_HASH.pop(self.conn)
self.srv_obj.update_active_users()
print(self.srv_obj.user_list)
sys.exit()
if __name__ == "__main__":
try:
logging = Log(f_name='server_chatroom_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
logging.logging_flag = True
logging.silent_flag = True
logging.validate_file()
server = Server()
server.init()
except SystemExit as e:
if e.code != 'EMERGENCY':
raise
else:
print(sys.exc_info())
print('Something went wrong!!\nPlease contact developers.')
os._exit(1)
except:
raise Exception
print('Something went wrong!!\nPlease contact developers\nTerminating the process forcefully..')
time.sleep(1)
os._exit(1)
| kargaranamir/Operating-Systems | Project II/Code/chatServer.py | chatServer.py | py | 14,141 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "filelock.FileLock",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_nu... |
73357295548 | from .MainDataToICS import MainDataToICS
from .WebJWC import WebJWC
import time
import os
from hashlib import md5
import random
import json
def getData(id,password):
web = WebJWC(id,password)
print('TOPO1')
web.runDriver()
time.sleep(1)
print('TOPO2')
web.loginIn()
time.sleep(1)
print('TOPO3')
web.getBody()
time.sleep(1)
print('TOPO4')
web.dataInBs4()
print('TOPO4')
web.close()
def makeIcs(id,year,month,day):
test = MainDataToICS(id,year,month,day)
log = test.makeIcs()
data = ''
for i in log:
for k,v in i.items():
data += '%s:%s \n'%(k,v)
data+='\n'
data = '导入失败数:%d\n'%len(log)+'请手动导入以下课程:\n%s'%data
return data
def makeApi(id):
with open('./CQUClassICS/res/jsonData/user.json','r',encoding='utf-8') as fp:
SQ = json.load(fp)
fp.close()
if id not in SQ[0].keys():
SQ[0][id]=str(random.randint(1,1<<16))
with open('./CQUClassICS/res/jsonData/user.json','w',encoding='utf-8') as fp:
json.dump(SQ,fp,ensure_ascii=False)
fp.close()
with open('./CQUClassICS/res/icsData/%s.ics'%id,'rb') as fp:
data = fp.read()
md5v = md5()
md5v.update((id+SQ[0][id]).encode('utf8'))
ids = md5v.hexdigest()
open('./CQUClassICS/res/api/%s.ics'%ids,'wb').write(data)
return ids
def test():
print(os.path.abspath('Event.py'))
print(os.path.abspath('')) | CQU-CSA/CQUScheduleCalendar | DjangoICS/CQUClassICS/src/MainICS.py | MainICS.py | py | 1,500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "WebJWC.WebJWC",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 1... |
37213848810 | from collections import Counter, defaultdict
import pandas as pd
import os
import csv
import json
# get phoneme features from PHOIBLE
# note the path is resolved-phoible.csv that is corrected for mismatches between phonemes in PHOIBLE and the XPF Corpus
phoneme_features = pd.read_csv("Data/resolved-phoible.csv")
phoneme_features.drop(["InventoryID", "Glottocode","ISO6393","LanguageName","SpecificDialect","GlyphID","Allophones","Marginal","Source"], axis="columns", inplace=True)
phoneme_features = phoneme_features.rename(columns={'periodicGlottalSource':'voice'})
# list of all feature names in PHOIBLE table
features = phoneme_features.copy()
features.drop(["Phoneme","voice"],axis="columns", inplace=True)
features = features.columns.values.tolist()
# global variables
to_feat = {} #dictonary of phoneme: feature representation
phon_model = {} #dictionary of feature representation: {possible phonemes: # of occurrences}
def change_to_feat(phoneme, previous):
'''
Takes in a character string representing the IPA form of the phoneme and returns a feature representation of the phoneme based on PHOIBLE features
Input: phoneme - character string representing current phoneme
next - character string representing phoneme that follows
Output: feature representation of the phoneme - character string ('feature1/[+,-,NA]|feature2/[+,-,NA]|etc...')
each feature name/value pair is joined with '/' while separate feat/value pairs are joined with '|'
can split the string representation using these characters
'''
global to_feat
global phon_model
# create and add feature representation to to_feat dictionary if not already in it
if to_feat.get(phoneme) is None:
row = phoneme_features[phoneme_features["Phoneme"] == phoneme]
feat = []
#creates feature representations for only obstruents
if not row.empty:
if row["sonorant"].values.tolist()[0] == '-':
for f in features:
t = row[f].values.tolist()[0]
feat.append(t+'/'+f)
feat = '|'.join(feat)
to_feat[phoneme] = feat
else:
to_feat[phoneme] = phoneme
else:
to_feat[phoneme] = phoneme
#get feature
feat = to_feat.get(phoneme)
if previous != '':
#context
con = " ".join([previous, feat])
#add feature to phoneme model if it doesn't already exist
if phon_model.get(con) is None:
phon_model[con] = defaultdict(int)
# increment occurrence in phoneme model
phon_model[con][phoneme] += 1
return feat
def nphone_model(wordseglist, n=4, wordlen=8):
'''
Create n-gram models for the given word list of phonemes.
Params:
- wordseglist: a list of words, where each word is a list of a string of the IPA representation
such as [["b a"], ["d o"]]
- n: Number of preceding segments in context
- wordlen: Maximum length of words to use, including the word-initial and word-final tokens
Returns:
- consonant_vowel: A dictionary representing the CV n-gram model. Each key is a string representing
the context (perfect representation of n segments). Each value is another dictionary,
where the keys are whether the next segment is consonant, vowel, or word-final token,
and the values are the counts.
- consonant: A dictionary representing the consonant n-gram model. Each key is a string representing
the context (imperfect representation of n segments). Each value is another dictionary,
where the keys are the next consonant, and the values are the counts.
- vowel: A dictionary representing the vowel n-gram model. Each key is a string representing
the context (perfect representation of n segments). Each value is another dictionary,
where the keys are the next vowel, and the values are the counts.
'''
model = {}
prev_context = []
for word in wordseglist: # each word is a list of exactly one string, the word
prev_context = ['[_w'] # start of word
prev_phon = {}
# don't use words that aren't perfectly translated to IPA
if '@' in word.split(" "):
continue
# don't use words that aren't the same length as generated words
# n - 1 because [_w is included in generated words
# wordlen - 2 because both [_w and ]_w are included in generated words
if len(word.split(" ")) < (n - 1) or len(word.split(" ")) > (wordlen - 2):
continue
word = word.replace(" ː", "ː")
prev_p = ''
str_context = ''
for phoneme in word.split(" "):
if len(prev_context) == n:
prev_context.insert(0,prev_p)
f = []
for i in range(len(prev_context)-1):
f.append(change_to_feat(prev_context[i+1],prev_context[i]))
#con.extend(prev_context)
# if prev_context[0] == "[_w":
# f = ['[_w']
# for i in range(len(prev_context)-1):
# f.append(change_to_feat(prev_context[i+1],prev_context[i]))
# else:
# con = [prev_phon[" ".join(prev_context)]]
# con.extend(prev_context)
# f = []
# for i in range(len(prev_context)-1):
# f.append(change_to_feat(prev_context[i+1],prev_context[i]))
str_context = " ".join(f)
if model.get(str_context) is None:
model[str_context] = defaultdict(int)
model[str_context][phoneme] += 1
prev_context.pop(0)
prev_p = prev_context[0]
prev_context.pop(0) # remove earliest segment from context
# update context
prev_context.append(phoneme)
if len(prev_context) == n:
prev_phon[" ".join(prev_context)] = prev_p
# add word-final context once you've reached the end of the word
# remove voicing information at end of the word
if len(prev_context) >= n:
f = []
for i in range(len(prev_context)):
if i==0:
f.append(change_to_feat(prev_context[i],prev_phon[" ".join(prev_context)]))
else:
f.append(change_to_feat(prev_context[i],prev_context[i-1]))
str_context = " ".join(f)
if model.get(str_context) is None:
model[str_context] = defaultdict(int)
model[str_context][']_w'] += 1
return model
def main():
'''
NOTE: this file handles reading in data differently
#TODO: write down what code creates the word list used for this
'''
global to_feat
global phon_model
word_lists = []
lang_codes = []
identity ='5000_3' ##TODO: change this depending on inputs to translate04.py
f_name = "Data/word_list"+identity+".tsv"
# READ IN THE WORD LIST
tsv_file = open(f_name)
read_tsv = csv.reader(tsv_file, delimiter="\t")
for line in read_tsv:
line[1]=line[1].strip('\n')
word_lists.append(line)
# SPLIT LIST PER LANGUAGE
word_lists = word_lists[1:]
split_list = {}
l = []
for i in range(len(word_lists)):
lang_code = word_lists[i][0]
if split_list.get(lang_code) is None:
split_list[lang_code] = [word_lists[i][1]]
else:
split_list[lang_code].append(word_lists[i][1])
# GO THROUGH EACH LANGUAGE (can adjust the word length here)
for lang in split_list:
print(lang)
lang_codes.append(lang)
curr_list = split_list[lang]
model = nphone_model(curr_list,wordlen=10)
outfile = "./Data/utf8_ngram_models/"
if not os.path.exists(outfile):
os.mkdir(outfile)
for key, value in model.items():
k = key.split(" ")
if len(k) != 4:
print('oh no :(')
# save output model
with open(outfile + lang + "_model.json", 'w+', encoding='utf8') as fout:
json.dump(model, fout, ensure_ascii=False)
# CHANGE phon_model from # occurrence to probability
for feat in phon_model:
total = sum(phon_model.get(feat).values(),0.0)
phon_model[feat] = {k: v / total for k,v in phon_model.get(feat).items()}
# save phon_model
with open(outfile + lang + "_phon_model.json", 'w+', encoding='utf8') as fout:
json.dump(phon_model, fout, ensure_ascii=False)
# save feature conversion dict
with open(outfile + lang + "_to_feat.json", 'w+', encoding='utf8') as fout:
json.dump(to_feat, fout, ensure_ascii=False)
# reset to_feat and phon_model after each language
to_feat = {}
phon_model = {}
# save a list of all language codes used in this analysis
o_name = "Data/lang_codes" + identity + ".tsv"
with open(o_name, 'w+', newline='') as f:
write = csv.writer(f, delimiter="\t")
write.writerows(lang_codes)
return None
if __name__ == "__main__":
main() | daniela-wiepert/XPF-soft-constraints | FD/Code/ngram_model_fd.py | ngram_model_fd.py | py | 9,512 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "colle... |
25538067967 | import streamlit as st
import pandas as pd
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
hide_st_style = """
<style>
footer {visibility: hidden;}
#MainMenu {visibility: hidden;}
header {visibility: hidden;}
#stException {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
import preprocessor, helper
#df2 = pd.read_csv("athlete_events.csv")
df = pd.read_csv('athlete_events.csv')
region_df = pd.read_csv('noc_regions.csv')
process_data = preprocessor.preprocess(df, region_df)
st.sidebar.image("https://i.ibb.co/mDH38WV/olympics-logo.png")
st.sidebar.title("Olympics Analysis")
user_menu = st.sidebar.radio(
'Select an option ',
('Overall Analysis','Medal Tally','country-wise-analysis','athlete-wise-analysis' )
)
st.sidebar.write(' ##### Developed by Somnath Paul')
# default home page display
# if user_menu radio button is
if user_menu == 'Medal Tally':
# year & country
year, country = helper.country_year_list(df,region_df)
# check box for year selection
selected_year = st.sidebar.selectbox("select year", year)
selected_country = st.sidebar.selectbox("select country", country)
# fetch dataframe for selected options
medal_df, title = helper.fetch_medal_tally(selected_year, selected_country, df, region_df,)
# display dataframe
st.title(title)
st.dataframe(medal_df)
elif user_menu == 'Overall Analysis':
cities, len_cities, country, len_countries, events, len_of_events, sports, len_of_sports, year, len_of_year, athletes, len_of_athletes = helper.overall_analysis(df, region_df)
st.title("STATISTICS :")
# first column
col1, col2= st.columns(2)
with col1:
st.write(""" ### Hosted Counties""")
st.title(len_cities)
with col2:
st.write(""" ### Counties Participated """)
st.title(len_countries)
# second columns
col1, col2, col3, col4 = st.columns(4)
with col1:
st.write("""### Sports""")
st.title(len_of_sports)
with col2:
st.write(""" ### Events""")
st.title(len_of_events)
with col3:
st.write(""" ### Editions""")
st.title(len_of_year)
with col4:
st.write(""" ### Athletes""")
st.title(len_of_athletes)
# graph 1
# number of countries participated
df_10 = helper.graph_1(df, region_df)
fig = px.line(df_10, x="Year", y="Count")
st.title("Countries participated in each year")
st.plotly_chart(fig)
# graph 2
# number of sports played in each year
df_11 = helper.graph_2(df, region_df)
fig = px.line(df_11, x="Year", y="Count")
st.title("Sports played in each year")
st.plotly_chart(fig)
# graph 3
# number of events played in each year
# events has many under one sport
df_12 = helper.graph_3(df, region_df)
fig = px.line(df_12, x="Year", y="Count")
st.title("Events played in each year")
st.plotly_chart(fig)
# graph 4 : heatmap
x_1 = helper.graph_4(df, region_df)
fig = px.imshow(x_1)
st.title("Over the year how many events played / sports")
st.plotly_chart(fig)
# table 2:
top_players = helper.table_2(df, region_df)
st.title("Top 10 player won medals")
st.dataframe(top_players.head(10))
elif user_menu == 'country-wise-analysis':
countries = helper.countries(df, region_df)
countries.insert(0, 'Not Selected')
options = st.selectbox("Select country",countries)
if options == 'Not Selected':
st.error('Please select country')
else:
df_13= helper.country_wise_analysis(df, region_df, options)
# line chart
fig = px.line(df_13, x='Year', y='Medal')
st.subheader(f'Number of medals won by {options} over the year')
st.plotly_chart(fig)
df_20 = helper.countries_good_at(df, region_df, options)
st.subheader(f'Medals won by {options} under different sports')
st.dataframe(df_20)
df_30 = helper.player_good_at_by_countries(df, region_df, options)
st.subheader(f'Medals won by players for {options}')
st.dataframe(df_30)
else:
# athletics wise analysis
x1, x2, x3, x4 = helper.pdf_histogram(process_data)
# histogram (PDF) of age in plotly
import plotly.figure_factory as ff
gl=['Gold player age', 'Silver player age', 'Bronze player age', 'Overall player age']
fig = ff.create_distplot([x1, x2, x3, x4], show_hist=False, show_rug=False, group_labels=gl)
st.title("Athlete Wise Analysis")
st.write(""" #### Age - Medals wise analysis :""")
st.plotly_chart(fig)
st.write(""" #### Player who won gold [ weight - height ]:""")
height_gold, weight_gold, height_silver,weight_silver, height_bronze,weight_bronze = helper.Player_who_won_gold(process_data)
plt.scatter(height_gold,weight_gold,color='gold')
plt.scatter(height_silver,weight_silver ,color='lightsteelblue')
plt.scatter(height_bronze,weight_bronze ,color='lavender')
plt.legend(["Gold" , "Silver", "Bronze"], bbox_to_anchor = (1 , 1))
st.pyplot(plt)
# Men vs Women participation over the years plot
df_73, df_74 = helper.Men_Women_participation(process_data)
st.write("### Men vs Women participation over the years")
plt.figure(figsize=(8,5))
plt.plot( df_73['Year'], df_73['Sex'], color='olive')
plt.plot( df_74['Year'], df_74['Sex'])
plt.legend(["Male" , "Female"], bbox_to_anchor = (1 , 1))
st.pyplot(plt)
# athletics age sport wise analysis
sports = process_data['Sport'].unique().tolist()
sports.insert(0, 'Not Selected')
sport = st.selectbox("Select a sport",sports)
if sport == 'Not Selected':
st.error('Please select sport')
else:
y1 = helper.age_histogram_sports(process_data, sport)
# labels
gl=[sport]
st.write(""" #### Age - sport wise analysis :""")
fig = ff.create_distplot([y1], show_hist=False, show_rug=False, group_labels=gl)
st.plotly_chart(fig)
| Somnathpaul/Olympic-data-analysis | main.py | main.py | py | 6,253 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "seaborn.set_style",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",... |
29534323943 | from scipy.interpolate import Rbf # radial basis functions
import matplotlib.pyplot as plt
import numpy as np
x = [1555,1203,568,1098,397,564,1445,337,1658,1517,948]
y = [860,206,1097,425,594,614,553,917,693,469,306]
x = [0.9, 0.6, 0.1, 0.5, 0.04, 0.1, 0.82, 0.0, 1.0, 0.89, 0.46]
y = [0.73, 0.0, 1.0, 0.24, 0.43, 0.45, 0.38, 0.7, 0.54, 0.29, 0.11]
z = [1]*len(x)
rbf_adj = Rbf(x, y, z, function='gaussian')
x_fine = np.linspace(0, 1, 81)
y_fine = np.linspace(0, 1, 82)
x_grid, y_grid = np.meshgrid(x_fine, y_fine)
z_grid = rbf_adj(x_grid.ravel(), y_grid.ravel()).reshape(x_grid.shape)
plt.gca().invert_yaxis()
#plt.gca().invert_xaxis()
plt.pcolor(x_fine, y_fine, z_grid);
plt.plot(x, y, 'ok');
plt.xlabel('x'); plt.ylabel('y'); plt.colorbar();
plt.title('Heat Intensity Map');
plt.show() | twilly27/DatacomProject | Project/HeatMapping.py | HeatMapping.py | py | 795 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.interpolate.Rbf",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
... |
35914457874 | class Solution(object):
# @param nestedList a list, each element in the list
# can be a list or integer, for example [1,2,[1,2]]
# @return {int[]} a list of integer
def flatten(self, nestedList: list) -> list:
import collections
stack = collections.deque([nestedList])
result = []
while stack:
front = stack.popleft()
if isinstance(front, list):
while front:
stack.appendleft(front.pop())
else:
result.append(front)
return result
| Super262/LintCodeSolutions | data_structures/stack/problem0022.py | problem0022.py | py | 575 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
14807526088 | import time
import multiprocessing
def work():
for i in range(10):
print("工作中...")
time.sleep(0.2)
if __name__ == '__main__':
work_process = multiprocessing.Process(target=work)
work_process.daemon=True
work_process.start()
# 程序等待1秒
time.sleep(1)
print("程序结束")
| kids0cn/leetcode | Python语法/python多线程多进程/4.守护进程.py | 4.守护进程.py | py | 333 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
}
] |
20495057760 | import sys, iptc, re, socket
single_options = False
predesigned_rules = ['BlockIncomingSSH', 'BlockOutgoingSSH', 'BlockAllSSH', 'BlockIncomingHTTP', 'BlockIncomingHTTPS',\
'BlockIncomingPing', 'BlockInvalidPackets', 'BlockSYNFlooding', 'BlockXMASAttack', 'ForceSYNPackets']
accepted_protocols = ['ah','egp','esp','gre','icmp','idp','igmp','ip','pim','pum','pup','raw','rsvp','sctp','tcp','tp','udp']
ipsrc = None
ipsrc_range = None
ipdst = None
ipdst_range = None
portsrc = None
portsrc_range = None
portdst = None
portdst_range = None
protocol = None
interfacein = None
interfaceout = None
target = None
custom_position = 0
direction = None
checker = False
############################### List of Predefined Rules #############################
def block_incoming_ssh():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_outgoing_ssh():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_all_ssh():
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "22"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain1.insert_rule(rule)
chain2.insert_rule(rule)
print("Successfully Created")
def block_incoming_http():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "80"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_incoming_https():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = "443"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_incoming_ping():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "icmp"
match = rule.create_match("icmp")
match.icmp_type = "echo-reply"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_invalid_packets():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
match = rule.create_match("state")
match.state = "iNVALID"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def syn_flooding():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.tcp_flags = [ 'FIN,SYN,RST,ACK', 'SYN' ]
match = rule.create_match("limit")
match.limit = "10/second"
target = iptc.Target(rule, "ACCEPT")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def block_xmas_attack():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.tcp_flags = [ 'ALL', 'ALL' ]
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
def force_syn_packets():
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.syn = "!1"
match = rule.create_match("state")
match.state = "NEW"
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
print("Successfully Created")
# Function to delete rules
all_rules_deleted = True
def delete_rules(table):
global all_rules_deleted
all_rules_deleted = True
for chain in table.chains:
#print(chain.name)
for rule in chain.rules:
try:
chain.delete_rule(rule)
print(rule.protocol, rule.src, rule.dst, rule.target.name, "is DELETED")
except:
all_rules_deleted = False
if(all_rules_deleted==False):
#print("First Iteration Failed")
delete_rules(table)
# Function to delete a single rule
def delete_rule(rule, table, direction = None):
if(direction == 'input'):
chain = iptc.Chain(table, "INPUT")
deleted1 = False
for index, rule in enumerate(chain.rules):
if(int(rule_number) == index):
try:
chain.delete_rule(rule)
print("Rule Successfully Deleted for Input")
deleted1 = True
except:
sys.exit("The rule could not be deleted for Input. Please, try again.")
if(deleted1 == False):
print("The Rule Could Not Be Found for Input")
elif (direction == 'output'):
chain = iptc.Chain(table, "OUTPUT")
deleted1 = False
for index, rule in enumerate(chain.rules):
if(int(rule_number) == index):
try:
chain.delete_rule(rule)
print("Rule Successfully Deleted for Output")
deleted1 = True
except:
sys.exit("The rule could not be deleted for Input. Please, try again.")
if(deleted1 == False):
print("The Rule Could Not Be Found for Output")
else:
sys.exit("Delete rule function error. Incorrect parameter")
# First check, for options that should be used alone
for index, value in enumerate(sys.argv):
if(value == '-l' ):
if (len(sys.argv)) != 2:
sys.exit("The option -l does not accept additional options. Please, type: myFirewall -l")
single_options = True
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
#print ("Chain ",chain.name)
rule_type = chain.name[:3]
for index, rule in enumerate(chain.rules):
dport = None
sport = None
ip_src_range = None
ip_dst_range = None
match_state = None
match_tcp_flags = None
for match in rule.matches:
if (match.dport != None):
dport = match.dport
if (match.sport != None):
sport = match.sport
if (match.src_range != None):
ip_src_range = match.src_range
if (match.dst_range != None):
ip_dst_range = match.dst_range
if (match.state != None):
match_state = match.state
if (match.tcp_flags != None):
match_tcp_flags = match.tcp_flags[match.tcp_flags.find(' ')+1:]
if(ip_src_range != None):
source_ip = ip_src_range
else:
source_ip = rule.src
if(ip_dst_range != None):
destination_ip = ip_dst_range
else:
destination_ip = rule.dst
print ("==========================================")
print ("RULE("+ rule_type+")", index, "||", "proto:", rule.protocol + " ||", "sport:", str(sport) + " ||",
"dport:", str(dport) + " ||", "src:", source_ip + " ||", "dst:", destination_ip + " ||\n", "|| inInt:",
str(rule.in_interface) + " ||", "outInt:", str(rule.out_interface) + " ||",
"tcpflags:", str(match_tcp_flags) + " ||", "state:", str(match_state) + " ||", "Target:", rule.target.name)
print ("==========================================")
elif(value == '-r'):
if (len(sys.argv)) != 2:
sys.exit("The option -r does not accept additional options. Please, type: myFirewall -r")
single_options = True
table1 = iptc.Table(iptc.Table.FILTER)
delete_rules(table1)
table2 = iptc.Table(iptc.Table.MANGLE)
delete_rules(table2)
table3 = iptc.Table(iptc.Table.NAT)
delete_rules(table3)
table4 = iptc.Table(iptc.Table.RAW)
delete_rules(table4)
table5 = iptc.Table(iptc.Table.SECURITY)
delete_rules(table5)
elif(value == '-d'):
if (len(sys.argv) != 3 and len(sys.argv) != 4):
sys.exit("The option -d does not accept these options. Please, type: myFirewall -d RuleNumer [-in|-out]")
single_options = True
table = iptc.Table(iptc.Table.FILTER)
rule_number = sys.argv[2]
if(len(sys.argv) == 4):
if (sys.argv[3] == '-in'):
delete_rule(rule_number, table, direction = 'input')
elif (sys.argv[3] == '-out'):
delete_rule(rule_number, table, direction = 'output')
else:
sys.exit("Incorrect parameter. Please, type: myFirewall -d RuleNumer [-in|-out]")
else:
delete_rule(rule_number, table, direction = 'input')
delete_rule(rule_number, table, direction = 'output')
#for chain in table.chains:
#for rule in chain.rules:
# chain.delete_rule(rule)
elif(value == '-all'):
if ((len(sys.argv) != 3) and (sys.argv[index+1]!='ACCEPT') and (sys.argv[index+1]!='DROP')):
sys.exit("The -all option lets the user to ACCEPT or DROP all packets, independently of ports,"+\
" protocols or IPs. Please, specify a ACCEPT or DROP argument")
else:
single_options = True
rule = iptc.Rule()
rule.target = rule.create_target(sys.argv[index+1])
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
chain1.insert_rule(rule)
chain2.insert_rule(rule)
elif(value == '-rule'):
single_options = True
if (len(sys.argv)) != 3:
if (len(sys.argv) == 2):
print("The list of rules available is:\n")
for i in predesigned_rules:
print(i)
else:
sys.exit("The option -r does not accept additional options. Please, type: -rule RULE")
elif(sys.argv[index+1] == 'BlockIncomingSSH'):
block_incoming_ssh()
elif(sys.argv[index+1] == 'BlockOutgoingSSH'):
block_outgoing_ssh()
elif(sys.argv[index+1] == 'BlockAllSSH'):
block_all_ssh()
elif(sys.argv[index+1] == 'BlockIncomingHTTP'):
block_incoming_http()
elif(sys.argv[index+1] == 'BlockIncomingHTTPS'):
block_incoming_https()
elif(sys.argv[index+1] == 'BlockIncomingPing'):
block_incoming_ping()
elif(sys.argv[index+1] == 'BlockInvalidPackets'):
block_invalid_packets()
elif(sys.argv[index+1] == 'BlockSYNFlooding'):
syn_flooding()
elif(sys.argv[index+1] == 'BlockXMASAttack'):
block_xmas_attack()
elif(sys.argv[index+1] == 'ForceSYNPackets'):
force_syn_packets()
else:
print("Rule not available. The list of available rules is:\n")
for i in predesigned_rules:
print(i)
print("")
if(not single_options):
# Iterator to retrieve all information and create a Rule
for index, value in enumerate(sys.argv):
if(value == '-ipsrc'):
match_single = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
match_range = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))-(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
if((match_single==None) and (match_range==None)):
sys.exit("The IP address format is incorrect")
else:
checker = True
if(match_single!=None):
ipsrc = sys.argv[index+1]
if(match_range!=None):
ipsrc_range = sys.argv[index+1]
elif(value == '-ipdst'):
match_single = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
match_range = re.search('^(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))-(([0-9]?[0-9]\.)|(1[0-9][0-9]\.)|(2[0-5][0-5]\.)){3}(([0-9]?[0-9])|(1[0-9][0-9])|(2[0-5][0-5]))$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The IP address format is incorrect")
else:
checker = True
if(match_single!=None):
ipdst = sys.argv[index+1]
if(match_range!=None):
ipdst_range = sys.argv[index+1]
elif(value == '-portsrc'):
match_single = re.search('^[0-9]+$', sys.argv[index+1])
match_range = re.search('^[0-9]+:[0-9]+$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The Port/Port range format is incorrect")
checker = True
if(match_single != None):
if(int(sys.argv[index+1])<65536 and int(sys.argv[index+1])>0):
portsrc = sys.argv[index+1]
else:
sys.exit("The specified port is out of the boundaries. Please, type a value between 1 and 65535")
elif(match_range != None):
first_port_group = int(sys.argv[index+1][:sys.argv[index+1].find(':')])
second_port_group = int(sys.argv[index+1][sys.argv[index+1].find(':')+1:])
if(((first_port_group<65536) and (first_port_group>0) and (second_port_group<65536) and (second_port_group>0))):
portsrc_range = sys.argv[index+1]
else:
sys.exit("The specified port range is out of the boundaries. Please, type values between 1 and 65535")
else:
sys.exit("Port incorrectly parsed")
elif(value == '-portdst'):
match_single = re.search('^[0-9]+$', sys.argv[index+1])
match_range = re.search('^[0-9]+:[0-9]+$', sys.argv[index+1])
if(match_single==None and match_range==None):
sys.exit("The Port/Port range format is incorrect")
checker = True
if(match_single != None):
if(int(sys.argv[index+1])<65536 and int(sys.argv[index+1])>0):
portdst = sys.argv[index+1]
else:
sys.exit("The specified port is out of the boundaries. Please, type a value between 1 and 65535")
elif(match_range != None):
first_port_group = int(sys.argv[index+1][:sys.argv[index+1].find(':')])
second_port_group = int(sys.argv[index+1][sys.argv[index+1].find(':')+1:])
if(((first_port_group<65536) and (first_port_group>0) and (second_port_group<65536) and (second_port_group>0))):
portdst_range = sys.argv[index+1]
else:
sys.exit("The specified port range is out of the boundaries. Please, type values between 1 and 65535")
else:
sys.exit("Port incorrectly parsed")
elif(value == '-proto'):
accepted = False
for i in accepted_protocols:
if(i == sys.argv[index+1]):
accepted = True
else:
protocol = sys.argv[index+1]
if(not accepted):
sys.exit("The protocol provided is not accepted. The list of accepted protocols is:",'ah',
'egp','esp','gre','icmp','idp','igmp','ip','pim','pum','pup','raw','rsvp','sctp','tcp','tp','udp')
checker = True
elif(value == '-intin'):
available_interface = False
for i in socket.if_nameindex():
if(i[1] == sys.argv[index+1]):
available_interface = True
if(available_interface == False):
sys.exit("The selected interface is not available on this system")
else:
interfacein = sys.argv[index+1]
checker = True
elif(value == '-intout'):
available_interface = False
for i in socket.if_nameindex():
if(i[1] == sys.argv[index+1]):
available_interface = True
if(available_interface == False):
sys.exit("The selected interface is not available on this system")
else:
interfaceout = sys.argv[index+1]
checker = True
elif(value == '-pos'):
match = re.search('^[0-9]*$', sys.argv[index+1])
if(match==None):
sys.exit("Incorrect position format. Please, type an integer >= 0")
else:
custom_position = sys.argv[index+1]
checker = True
elif(value == '-t'):
if(sys.argv[index+1] == "ACCEPT"):
target = "ACCEPT"
elif(sys.argv[index+1] == "DROP"):
target = "DROP"
else:
sys.exit('Incorrect target option. Please, choose between "ACCEPT" and "DROP"')
checker = True
elif(value == '-in'):
direction = 'incoming'
elif(value == '-out'):
direction = 'outgoing'
else:
if(checker == True or index==0):
checker = False
else:
sys.exit("Incorrect option: " + value)
rule = iptc.Rule()
if(ipsrc != None):
rule.src = ipsrc
if(ipsrc_range != None or ipdst_range != None):
match = rule.create_match("iprange")
if(ipsrc_range != None):
match.src_range = ipsrc_range
else:
match.dst_range = ipdst_range
if(ipdst != None):
rule.dst = ipdst
if(protocol != None):
rule.protocol = protocol
if(protocol == "tcp" or protocol == "udp"):
match = rule.create_match(protocol)
if(portsrc != None or portdst != None):
if(protocol == None):
protocol = "tcp"
rule.protocol = protocol
match = rule.create_match(protocol)
if(portsrc != None):
match.sport = portsrc
if(portdst != None):
match.dport = portdst
if(portsrc_range != None or portdst_range != None):
if(protocol == None):
protocol = "tcp"
rule.protocol = protocol
match = rule.create_match(protocol)
if(portsrc_range != None):
match.sport = portsrc_range
if(portdst_range != None):
match.dport = portdst_range
if(interfacein != None):
rule.in_interface = interfacein
if(interfaceout != None):
rule.out_interface = interfaceout
if(target != None):
rule.target = rule.create_target(target)
else:
sys.exit('You must specify a target: -t "ACCEPT" or -t "DROP"')
if(direction == None):
chain1 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain2 = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
try:
chain1.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries for existing Input table. Please, choose a value between 0 and (Max.AmountOfRules-1)")
try:
chain2.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries for Output table. Please, choose a value between 0 and (Max.AmountOfRules-1)")
elif(direction == "incoming"):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
try:
chain.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries. Please, choose a value between 0 and (Max.AmountOfRules-1)")
elif(direction == "outgoing"):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
try:
chain.insert_rule(rule, position=int(custom_position))
except:
sys.exit("Index of insertion out of boundaries. Please, choose a value between 0 and (Max.AmountOfRules-1)")
| syerbes/myFirewall | myFirewall.py | myFirewall.py | py | 21,668 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "iptc.Chain",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "iptc.Table",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "iptc.Rule",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "iptc.Target",
"line_number": 37,
... |
2254678822 | import urllib
from xml.dom import minidom
import re
def buildResponse(node_list):
return_string = ""
for i in node_list:
return_string = return_string + i + "\n"
return return_string.strip()
def buildURL(key, word):
return "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/" + word + "?key=" + key
def getXML(word):
url = buildURL("1a276aec-1aa8-42d4-9575-d29c2d4fb105", word)
response = urllib.urlopen(url).read()
data = minidom.parseString(str(response))
return data
def getDefinition(word):
data = getXML(word)
itemlist = data.getElementsByTagName('def')
node_list = []
for i in itemlist:
dts = i.getElementsByTagName('dt')
node_list.append(str(dts[0].childNodes[0].nodeValue))
if len(node_list) < 3:
return buildResponse(node_list)
else:
return buildResponse(node_list[:3])
| sarthfrey/Texty | dictionaryDef.py | dictionaryDef.py | py | 819 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "urllib.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 17,
"usage_type": "name"
}
] |
37530932561 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from curriculum.serializers.curriculum_serializers import SubjectLevelListSerializer, SubjectLevelSerializer, SubjectLevelWriteSerializer
from rest_framework.exceptions import NotFound
from rest_framework.views import APIView
from curriculum.models import SubjectLevel
#
# SUBJECT LEVEL VIEWS
#
class SubjectLevelList(APIView):
"""
List all SubjectLevels, or create a new one.
"""
def get(self, request, school_pk=None, format=None):
subject_levels = SubjectLevel.objects.all()
if school_pk:
subject_levels = subject_levels.filter(
subject__school__id=school_pk)
subject = request.query_params.get('subject', None)
level = request.query_params.get('level', None)
if subject:
subject_levels = subject_levels.filter(subject_id=subject)
if level:
subject_levels = subject_levels.filter(level_id=level)
serializer = SubjectLevelListSerializer(subject_levels, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = SubjectLevelWriteSerializer(data=request.data)
if serializer.is_valid():
new_subject_level = serializer.save()
new_serializer = SubjectLevelListSerializer(new_subject_level)
return Response(new_serializer.data, status=status.HTTP_201_CREATED)
return Response(new_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SubjectLevelDetail(APIView):
"""
Retrieve, update or delete a SubjectLevel.
"""
def get_object(self, subject_level_pk):
try:
return SubjectLevel.objects.get(id=subject_level_pk)
except SubjectLevel.DoesNotExist:
raise NotFound(detail="Object with this ID not found.")
def get(self, request, subject_level_pk, format=None):
subject_level = self.get_object(subject_level_pk)
serializer = SubjectLevelSerializer(subject_level)
return Response(serializer.data)
def put(self, request, subject_level_pk, format=None):
subject_level = self.get_object(subject_level_pk)
serializer = SubjectLevelWriteSerializer(
subject_level, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Partially update a specific entry by primary key
def patch(self, request, subject_level_pk):
subject_level = self.get_object(subject_level_pk)
serializer = SubjectLevelWriteSerializer(
subject_level, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, subject_level_pk, format=None):
subject_level = self.get_object(subject_level_pk)
subject_level.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| markoco14/student-mgmt | curriculum/views/subject_level_views.py | subject_level_views.py | py | 3,238 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "curriculum.models.SubjectLevel.objects.all",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "curriculum.models.SubjectLevel.objects",
"line_number": 21,
"usag... |
31484686923 | import torch
from models.conformer.activation import GLU, Swish
class DepthWiseConvolution(torch.nn.Module):
def __init__(self, in_channels, kernel_size, stride, padding):
super(DepthWiseConvolution, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, in_channels, kernel_size, stride, padding, groups=in_channels)
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.conv(x)
x = x.permute(0, 2, 1)
return x
class PointWiseConvolution(torch.nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(PointWiseConvolution, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, 1, stride, 0)
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.conv(x)
x = x.permute(0, 2, 1)
return x
class Permute(torch.nn.Module):
def __init__(self, dims):
super(Permute, self).__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
class ConvolutionModule(torch.nn.Module):
def __init__(self, d_model, dropout, kernel_size=3):
super(ConvolutionModule, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.LayerNorm(d_model),
PointWiseConvolution(d_model, 2 * d_model),
GLU(),
DepthWiseConvolution(d_model, kernel_size, 1, int(kernel_size / 2)),
Permute((0, 2, 1)),
torch.nn.BatchNorm1d(d_model),
Permute((0, 2, 1)),
Swish(),
PointWiseConvolution(d_model, d_model),
torch.nn.Dropout(dropout),
)
def forward(self, x):
return self.conv(x)
| m-koichi/ConformerSED | src/models/conformer/convolution.py | convolution.py | py | 1,709 | python | en | code | 25 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number"... |
18385696956 |
# Import the libraries
import cv2
import os
import numpy as np
class Auxiliary(object):
"""
Class that provides some auxiliary functions.
"""
def __init__(self, size_x=100, size_y=100, interpolation=cv2.INTER_CUBIC):
"""
Set the default values for the image size and the interpolation method.
Available interpolation methods provided by OpenCV: INTER_CUBIC, INTER_AREA, INTER_LANCZOS4, INTER_LINEAR, INTER_NEAREST
:param size_x: Set the default image width (default = 100).
:param size_y: Set the default image height (default = 100).
:param interpolation: Set the default interpolation method (default cv2.INTER_CUBIC).
"""
self.size_x = size_x
self.size_y = size_y
self.interpolation = interpolation
# Declare all supported files
self.supported_files = ["png", "jpg", "jpeg"]
def set_default_size(self, size_x, size_y):
"""
Set the default size.
:param size_x: Image width.
:param size_y: Image height.
"""
if size_x > 0:
self.size_x = size_x
if size_y > 0:
self.size_y = size_y
def get_default_size(self):
"""
Get the default image size defined (default is 100x100).
"""
return self.size_x, self.size_y
def get_interpolation_method_name(self):
"""
Get the selected interpolation method name.
:return: A string containing the interpolation method name.
"""
if self.interpolation == cv2.INTER_CUBIC:
return "cv2.INTER_CUBIC"
if self.interpolation == cv2.INTER_AREA:
return "cv2.INTER_AREA"
if self.interpolation == cv2.INTER_LANCZOS4:
return "cv2.INTER_LANCZOS4"
if self.interpolation == cv2.INTER_LINEAR:
return "cv2.INTER_LINEAR"
if self.interpolation == cv2.INTER_NEAREST:
return "cv2.INTER_NEAREST"
raise NameError("Invalid interpolation method name")
return ""
@staticmethod
def calc_accuracy(recognized_images, total_face_images):
"""
Calculates the accuracy (percentage) using the formula:
acc = (recognized_images / total_face_images) * 100
:param recognized_images: The number of recognized face images.
:param total_face_images: The number of total face images.
:return: The accuracy.
"""
try:
return (float(recognized_images) /
float(total_face_images)) * 100.0
except ZeroDivisionError:
return 0.0
@staticmethod
def write_text_file(content, file_name):
"""
Write the content to a text file based on the file name.
:param content: The content as a string.
:param file_name: The file name (e.g. home/user/test.txt)
"""
# Save the text file
text_file = open(file_name, "w")
text_file.write(content)
text_file.close()
@staticmethod
def is_grayscale(image):
"""
Check if an image is in grayscale.
:param image: The image.
:return: True if the image is in grayscale.
"""
if len(image.shape) <= 2:
return True
h, w = image.shape[:2] # rows, cols, channels
for i in range(w):
for j in range(h):
p = image[i, j]
if p[0] != p[1] != p[2]:
return False
return True
@staticmethod
def to_grayscale(image):
"""
Convert an image to grayscale
:param image: The image.
:return: The image in grayscale.
"""
if image is None:
print("Invalid Image: Could not convert to grayscale")
return None
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
@staticmethod
def load_image(path):
"""
Load an image based on the path passed by parameter.
:param path: The path to the image file.
:return: The image object.
"""
return cv2.imread(path)
@staticmethod
def save_image(file_name, image):
"""
Save an image based on the fileName passed by parameter.
:param file_name: The file name.
:param image: The image.
"""
cv2.imwrite(file_name, image)
@staticmethod
def resize_image(image, size_x, size_y, interpolation_method):
"""
Resize an image.
:param image: The image object.
:param size_x: The image width.
:param size_y: The image height.
:param interpolation_method: The interpolation method.
:return: The resized image.
"""
if image is None:
print("Invalid Image: Could not be resized")
return -1
rows, cols = image.shape
if rows <= 0 or cols <= 0:
print("Invalid Image Sizes: Could not be resized")
return -1
return cv2.resize(image, (size_x, size_y),
interpolation=interpolation_method)
def preprocess_image(self, path):
"""
Preprocess an image. Load an image, convert to grayscale and resize it.
:param path: The image path.
:return: The preprocessed image.
"""
# Load the image
image = self.load_image(path)
if image is None:
print("Could not load the image:", path)
return None
# Convert to grayscale
image = self.to_grayscale(image)
# Resize the image
image = self.resize_image(
image, self.size_x, self.size_y, self.interpolation)
# Return the processed image
return image
@staticmethod
def concatenate_images(left_image, right_image):
"""
Concatenate two images side by side (horizontally) and returns a new one.
:param left_image: The image that should be put to the left.
:param right_image: The image that should be put to the right.
:return: The new concatenated image.
"""
try:
return np.concatenate((left_image, right_image), axis=1)
except ValueError:
return None
def extract_images_paths(self, path):
"""
Extract all paths for each image in a directory.
:param path: The directory path.
:return: A list with all file paths.
"""
paths = []
# In the path folder search for all files in all directories
for dir_name, dir_names, file_names in os.walk(path):
# For each file found
for file_name in file_names:
# Check if it is a valid image file
if file_name.split(".")[1] in self.supported_files:
# Creates the filePath joining the directory name and the
# file name
paths.append(os.path.join(dir_name, file_name))
return paths
@staticmethod
def extract_files_paths(path):
"""
Extract all paths for all files type.
:param path: The directory path.
:return: A list with all paths for all files.
"""
paths = []
# In the path folder search for all files in all directories
for dir_name, dir_names, file_names in os.walk(path):
# For each file found
for file_name in file_names:
# Creates the filePath joining the directory name and the file
# name
paths.append(os.path.join(dir_name, file_name))
return paths
def load_all_images_for_train(self, train_path):
"""
Load all images for training.
:param train_path: The train path.
:return: Three lists with the images, labels and file names.
"""
images = []
labels = []
file_name = []
paths = self.extract_images_paths(train_path)
# For each file path
for file_path in paths:
# Check if it is a valid image file
if file_path.split(".")[1] in self.supported_files:
# Get the subject id (label) based on the format:
# subjectID_imageNumber.png
path_split = file_path.split("/")
temp_name = path_split[len(path_split) - 1]
subject_id = int(temp_name.split("_")[0])
images.append(self.preprocess_image(file_path))
labels.append(subject_id)
file_name.append(temp_name.split(".")[0])
return images, labels, file_name
def load_all_images_for_test(self, test_path):
"""
Load all images for test.
:param test_path: The test path.
:return: Three lists with the images, labels and file names.
"""
images = []
labels = []
file_name = []
paths = self.extract_images_paths(test_path)
# For each file path
for file_path in paths:
# Check if it is a valid image file
if file_path.split(".")[1] in self.supported_files:
# Get the subject id (label)
# IMPORTANT: it follows the pattern: imageNumber_subjectID.png
# It is different from the pattern on the training set
path_split = file_path.split("/")
temp_name = path_split[len(path_split) - 1]
subject_id = temp_name.split("_")[1]
subject_id = int(subject_id.split(".")[0])
image = self.preprocess_image(file_path)
if image is None:
return None, None, None
images.append(image)
labels.append(subject_id)
file_name.append(temp_name.split(".")[0])
return images, labels, file_name
| kelvins/Reconhecimento-Facial | FaceRecognition/classes/auxiliary.py | auxiliary.py | py | 9,896 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "cv2.INT... |
44295661280 | import numpy as np
from lib import EulerUtils as eu
# Problem 36 solution!
def checkIfNumberIsPalindromeInBothBases(number):
numberString = str(number)
baseTwoString = "{0:b}".format(number)
if (eu.isPalindrome(numberString) and eu.isPalindrome(baseTwoString)):
return True
else:
return False
sum = sum(x for x in range(1000000) if checkIfNumberIsPalindromeInBothBases(x))
print (sum)
| Renoh47/ProjectEuler | project euler python/problem36.py | problem36.py | py | 426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "lib.EulerUtils.isPalindrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lib.EulerUtils",
"line_number": 11,
"usage_type": "name"
}
] |
70007062267 | from typing import Any, Dict
import os
import json
import httpx
from odt.config import PipeConfig
_TEMPFILENAME = "lgbm_tmp_model.txt"
class ODTManager:
def __init__(self, server_host: str) -> None:
self.server_host = server_host
def update_config(self, config: PipeConfig):
# serialization from pydandic with .json method doesn't work internally
json_data = json.loads(config.json())
r = httpx.post(f"{self.server_host}/config", json=json_data)
if r.status_code == 200:
print("Updating config succeeded!")
else:
raise Exception(f"Something went wrong updating the config, status code {r.status_code}")
def update_model(self, model: Any):
model.save_model(_TEMPFILENAME)
with open(_TEMPFILENAME, "r") as f:
in_mem_model: str = f.read()
os.remove(_TEMPFILENAME)
r = httpx.post(f"{self.server_host}/model", content=bytes(in_mem_model, encoding='utf-8'))
if r.status_code == 200:
print("Updating model succeeded!")
else:
raise Exception(f"Something went wrong updating the model, status code {r.status_code}")
def update_config_and_model(self, config: PipeConfig, model: Any):
self.update_config(config)
self.update_model(model)
def get_prediction(self, data: Dict[str, Any]) -> float:
r = httpx.post(f"{self.server_host}/predict", json=data)
return r.json()
| Tsoubry/fast-lightgbm-inference | rust-transformer/python/odt/manage.py | manage.py | py | 1,506 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "odt.config.PipeConfig",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "httpx.post",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_nu... |
36021205025 | from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from sendQueries import SendQueriesHandler
from ResponseHandler import ResponseHandler
class HomeHandler(webapp.RequestHandler):
def get(self):
self.response.out.write("Hello!")
appRoute = webapp.WSGIApplication( [
('/', HomeHandler),
('/response', ResponseHandler),
('/sendQueries', SendQueriesHandler),
], debug=True)
def main():
run_wsgi_app(appRoute)
if __name__ == '__main__':
main()
| stolksdorf/lifetracker | web/home.py | home.py | py | 508 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.webapp.WSGIApplication",
"line_number": 11,
... |
30827683895 | import os
import logging
from novelwriter.enum import nwItemLayout, nwItemClass
from novelwriter.error import formatException
from novelwriter.common import isHandle, sha256sum
logger = logging.getLogger(__name__)
class NWDoc():
def __init__(self, theProject, theHandle):
self.theProject = theProject
# Internal Variables
self._theItem = None # The currently open item
self._docHandle = None # The handle of the currently open item
self._fileLoc = None # The file location of the currently open item
self._docMeta = {} # The meta data of the currently open item
self._docError = "" # The latest encountered IO error
self._prevHash = None # Previous sha256sum of the document file
self._currHash = None # Latest sha256sum of the document file
if isHandle(theHandle):
self._docHandle = theHandle
if self._docHandle is not None:
self._theItem = self.theProject.projTree[theHandle]
return
def __repr__(self):
return f"<NWDoc handle={self._docHandle}>"
def __bool__(self):
return self._docHandle is not None and bool(self._theItem)
##
# Class Methods
##
def readDocument(self, isOrphan=False):
"""Read the document specified by the handle set in the
contructor, capturing potential file system errors and parse
meta data. If the document doesn't exist on disk, return an
empty string. If something went wrong, return None.
"""
self._docError = ""
if self._docHandle is None:
logger.error("No document handle set")
return None
if self._theItem is None and not isOrphan:
logger.error("Unknown novelWriter document")
return None
docFile = self._docHandle+".nwd"
logger.debug("Opening document: %s", docFile)
docPath = os.path.join(self.theProject.projContent, docFile)
self._fileLoc = docPath
theText = ""
self._docMeta = {}
self._prevHash = sha256sum(docPath)
if os.path.isfile(docPath):
try:
with open(docPath, mode="r", encoding="utf-8") as inFile:
# Check the first <= 10 lines for metadata
for i in range(10):
inLine = inFile.readline()
if inLine.startswith(r"%%~"):
self._parseMeta(inLine)
else:
theText = inLine
break
# Load the rest of the file
theText += inFile.read()
except Exception as exc:
self._docError = formatException(exc)
return None
else:
# The document file does not exist, so we assume it's a new
# document and initialise an empty text string.
logger.debug("The requested document does not exist")
return ""
return theText
def writeDocument(self, docText, forceWrite=False):
"""Write the document specified by the handle attribute. Handle
any IO errors in the process Returns True if successful, False
if not.
"""
self._docError = ""
if self._docHandle is None:
logger.error("No document handle set")
return False
self.theProject.ensureFolderStructure()
docFile = self._docHandle+".nwd"
logger.debug("Saving document: %s", docFile)
docPath = os.path.join(self.theProject.projContent, docFile)
docTemp = os.path.join(self.theProject.projContent, docFile+"~")
if self._prevHash is not None and not forceWrite:
self._currHash = sha256sum(docPath)
if self._currHash is not None and self._currHash != self._prevHash:
logger.error("File has been altered on disk since opened")
return False
# DocMeta Line
if self._theItem is None:
docMeta = ""
else:
docMeta = (
f"%%~name: {self._theItem.itemName}\n"
f"%%~path: {self._theItem.itemParent}/{self._theItem.itemHandle}\n"
f"%%~kind: {self._theItem.itemClass.name}/{self._theItem.itemLayout.name}\n"
)
try:
with open(docTemp, mode="w", encoding="utf-8") as outFile:
outFile.write(docMeta)
outFile.write(docText)
except Exception as exc:
self._docError = formatException(exc)
return False
# If we're here, the file was successfully saved, so we can
# replace the temp file with the actual file
try:
os.replace(docTemp, docPath)
except OSError as exc:
self._docError = formatException(exc)
return False
self._prevHash = sha256sum(docPath)
self._currHash = self._prevHash
return True
def deleteDocument(self):
"""Permanently delete a document source file and related files
from the project data folder.
"""
self._docError = ""
if self._docHandle is None:
logger.error("No document handle set")
return False
chkList = [
os.path.join(self.theProject.projContent, f"{self._docHandle}.nwd"),
os.path.join(self.theProject.projContent, f"{self._docHandle}.nwd~"),
]
for chkFile in chkList:
if os.path.isfile(chkFile):
try:
os.unlink(chkFile)
logger.debug("Deleted: %s", chkFile)
except Exception as exc:
self._docError = formatException(exc)
return False
return True
##
# Getters
##
def getFileLocation(self):
"""Return the file location of the current document.
"""
return self._fileLoc
def getCurrentItem(self):
"""Return a pointer to the currently open NWItem.
"""
return self._theItem
def getMeta(self):
"""Parse the document meta tag and return the name, parent,
class and layout meta values.
"""
theName = self._docMeta.get("name", "")
theParent = self._docMeta.get("parent", None)
theClass = self._docMeta.get("class", None)
theLayout = self._docMeta.get("layout", None)
return theName, theParent, theClass, theLayout
def getError(self):
"""Return the last recorded exception.
"""
return self._docError
##
# Internal Functions
##
def _parseMeta(self, metaLine):
"""Parse a line from the document starting with the characters
%%~ that may contain meta data.
"""
if metaLine.startswith("%%~name:"):
self._docMeta["name"] = metaLine[8:].strip()
elif metaLine.startswith("%%~path:"):
metaVal = metaLine[8:].strip()
metaBits = metaVal.split("/")
if len(metaBits) == 2:
if isHandle(metaBits[0]):
self._docMeta["parent"] = metaBits[0]
if isHandle(metaBits[1]):
self._docMeta["handle"] = metaBits[1]
elif metaLine.startswith("%%~kind:"):
metaVal = metaLine[8:].strip()
metaBits = metaVal.split("/")
if len(metaBits) == 2:
if metaBits[0] in nwItemClass.__members__:
self._docMeta["class"] = nwItemClass[metaBits[0]]
if metaBits[1] in nwItemLayout.__members__:
self._docMeta["layout"] = nwItemLayout[metaBits[1]]
else:
logger.debug("Ignoring meta data: '%s'", metaLine.strip())
return
# END Class NWDoc
| vaelue/novelWriter | novelwriter/core/document.py | document.py | py | 7,928 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "novelwriter.common.isHandle",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
12903245570 | import faust
import uuid
app = faust.App(
'greetings',
broker='kafka://localhost:9092',
)
class Greeting(faust.Record, serializer='json', isodates=True):
message: str
uuid: str
greetings_topic = app.topic('greetings', value_type=Greeting)
@app.agent(greetings_topic)
async def get_greetings(greetings):
"""Receives the message and prints the greeting in the logger
"""
async for greeting in greetings:
print(greeting.message)
print(greeting.uuid)
@app.timer(5)
async def produce():
for i in range(100):
await get_greetings.send(value={
"message": f'hello from {i}',
"uuid": uuid.uuid1()
})
if __name__ == '__main__':
app.main()
| tyao117/faust-fastapi | faust_hello_world.py | faust_hello_world.py | py | 747 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "faust.App",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "faust.Record",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid1",
"line_number": 29,
"usage_type": "call"
}
] |
17661406387 | from collections import defaultdict, deque
from enum import Enum
def read(filename):
with open(filename) as f:
insts = (line.strip().split(' ') for line in f)
return [(inst[0], tuple(inst[1:])) for inst in insts]
def isint(exp):
try:
int(exp)
return True
except ValueError:
return False
def val(exp, regs):
if isint(exp):
return int(exp)
return regs[exp]
class State(Enum):
ENDED = 1
STUCK = 2
RUNNING = 3
class Program(object):
def __init__(self, id, insts, inq, outq):
self.regs = defaultdict(int)
self.regs['p'] = id
self.pc = 0
self.insts = insts
self.inq = inq
self.outq = outq
self.snd_count = 0
def step(self):
if not (0 <= self.pc < len(self.insts)):
return State.ENDED
op, args = self.insts[self.pc]
if op == 'snd':
self.outq.append(val(args[0], self.regs))
self.pc += 1
self.snd_count += 1
return State.RUNNING
elif op == 'set':
self.regs[args[0]] = val(args[1], self.regs)
self.pc += 1
return State.RUNNING
elif op == 'add':
self.regs[args[0]] += val(args[1], self.regs)
self.pc += 1
return State.RUNNING
elif op == 'mul':
self.regs[args[0]] *= val(args[1], self.regs)
self.pc += 1
return State.RUNNING
elif op == 'mod':
self.regs[args[0]] = self.regs[args[0]] % val(args[1], self.regs)
self.pc += 1
return State.RUNNING
elif op == 'rcv':
if len(self.inq) == 0:
return State.STUCK
else:
self.regs[args[0]] = self.inq.popleft()
self.pc += 1
return State.RUNNING
elif op == 'jgz':
x = val(args[0], self.regs)
if x > 0:
self.pc += val(args[1], self.regs)
else:
self.pc += 1
return State.RUNNING
def process(prog_a, prog_b, nsteps=100000):
for i in range(nsteps):
res_a = prog_a.step()
res_b = prog_b.step()
queue_a = deque()
queue_b = deque()
insts = read('input-18.txt')
prog_a = Program(0, insts, queue_a, queue_b)
prog_b = Program(1, insts, queue_b, queue_a)
process(prog_a, prog_b)
print(prog_b.snd_count) | pdhborges/advent-of-code | 2017/18.py | 18.py | py | 2,447 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "collections.deque... |
1090949893 | from keras.applications import resnet50
from keras.applications import mobilenetv2
from keras.applications import mobilenet
from keras.applications import vgg19
# from keras_squeezenet import SqueezeNet
import conv.networks.get_vgg16_cifar10 as gvc
import conv.networks.gen_conv_net as gcn
# import conv.networks.MobileNet as mobilenet
import conv.networks.MobileNet_for_mobile as mobilenet_for_mobile
import conv.networks.VGG19_for_mobile as vgg19_for_mobile
import conv.networks.SqueezeNet as sqn
import conv.networks.DenseNet as dn
import conv.networks.ResNet50 as rn50
from keras_applications.imagenet_utils import decode_predictions
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from keras.engine.input_layer import Input
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
from keras.models import Model
from keras import optimizers
import keras
import numpy as np
from os import listdir
from os.path import isfile, join
import os
import matplotlib.image as mpimg
import time
# SqueezeNet: https://github.com/rcmalli/keras-squeezenet/blob/master/examples/example_keras_squeezenet.ipynb
# https://keras.io/applications/
def get_all_nets(network_name, include_top=True, num_filter=4):
if(network_name=="ResNet50"):
model = resnet50.ResNet101(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
# if(include_top==False):
# model.pop()
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(weights='imagenet',
include_top=include_top,# pooling='avg',
input_shape=(224, 224, 3))
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, weights='imagenet',
input_shape=(224, 224, 3), num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
elif(network_name=="VGG19_for_mobile"):
model = vgg19_for_mobile.VGG19(
include_top=include_top, weights='imagenet',
input_shape=(224, 224, 3), num_filter=num_filter)
elif(network_name=="SqueezeNet"):
model = SqueezeNet(weights='imagenet',
include_top=include_top, input_shape=(224, 224, 3))
# if(include_top==False):
# model.pop()
# model.pop()
# model.pop()
# model.pop()
if(include_top):
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_nets_wo_weights(network_name, num_classes, include_top=False,
input_shape=(32, 32, 3), num_filter=4, use_bias=False):
if(network_name=="ResNet50"):
model = rn50.ResNet50(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_vert_filters=num_filter)
elif(network_name=="DenseNet121"):
model = dn.DenseNet121(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes)
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(input_shape=input_shape,
include_top=include_top, weights=None,
classes=num_classes)
elif(network_name=="SqueezeNet"):
model = sqn.SqueezeNet(input_shape=input_shape,
include_top=include_top, weights=None, num_filter=num_filter,
use_bias=use_bias, classes=num_classes)
elif(network_name=="vgg"):
model = gvc.get_conv_vert_net(x_shape=input_shape,
num_classes=num_classes, num_vert_filters=num_filter,
use_bias=use_bias)
elif(network_name=="conv"):
model = gcn.get_conv_vert_net(input_shape=input_shape,
num_classes=num_classes,
num_extra_conv_layers=2, num_ver_filter=num_filter,
use_bias=use_bias)
if(include_top == False):
x = model.output
# x = keras.layers.GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
# x = Activation('relu')(x)
x = Dropout(0.5)(x)
# x = Dense(num_output)(x)
# x = Activation('softmax')(x)
x = keras.layers.Dense(num_classes, activation='softmax',
use_bias=True, name='Logits')(x)
full_model = Model(inputs = model.input,outputs = x)
else:
full_model = model
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
full_model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return full_model
def get_box_nets(network_name, num_classes, include_top=False,
input_shape=(32, 32, 3), num_filter=4, num_layer=4, use_bias=False):
if(network_name=="ResNet50"):
model = resnet50.ResNet50(include_top=include_top,
input_shape=input_shape, weights=None)
# if(include_top==False):
# model.pop()
elif(network_name=="DenseNet121"):
model = dn.DenseNet121(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter,
num_layer=num_layer)
elif(network_name=="MobileNetV2"):
model = mobilenetv2.MobileNetV2(include_top=include_top,
input_shape=input_shape, weights=None,
classes=num_classes)
elif(network_name=="MobileNet"):
model = mobilenet.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter, num_layers=num_layer)
elif(network_name=="MobileNet_for_mobile"):
model = mobilenet_for_mobile.MobileNet(
include_top=include_top, input_shape=input_shape, weights=None,
classes=num_classes, num_filter=num_filter)
elif(network_name=="VGG19"):
model = vgg19.VGG19(input_shape=input_shape,
include_top=include_top, weights=None,
classes=num_classes)
elif(network_name=="SqueezeNet"):
model = sqn.SqueezeNet(input_shape=input_shape,
include_top=include_top, weights=None, num_filter=num_filter,
use_bias=use_bias, classes=num_classes,
num_layers=num_layer)
elif(network_name=="vgg"):
model = gvc.get_conv_vert_net(x_shape=input_shape,
num_classes=num_classes, num_vert_filters=num_filter,
use_bias=use_bias)
elif(network_name=="conv"):
model = gcn.get_conv_vert_net(input_shape=input_shape,
num_classes=num_classes,
num_extra_conv_layers=num_layers, num_ver_filter=num_filter,
use_bias=use_bias)
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def preprocess_image(network_name, x):
if(network_name=="ResNet50"):
x = resnet50.preprocess_input(x)
elif(network_name=="MobileNetV2"):
x = mobilenetv2.preprocess_input(x)
elif(network_name=="MobileNet"):
x = mobilenet.preprocess_input(x)
elif(network_name=="VGG19"):
x = vgg19.preprocess_input(x)
elif(network_name=="SqueezeNet"):
x = imagenet_utils.preprocess_input(x)
return x
def preprocess_image_fn(network_name):
if(network_name=="ResNet50"):
x = resnet50.preprocess_input
elif(network_name=="MobileNetV2"):
x = mobilenetv2.preprocess_input
elif(network_name=="MobileNet"):
x = mobilenet.preprocess_input
elif(network_name=="VGG19"):
x = vgg19.preprocess_input
elif(network_name=="SqueezeNet"):
x = imagenet_utils.preprocess_input
return x
def decodepred(network_name, preds):
if(network_name=="ResNet50"):
preds = resnet50.decode_predictions(preds, top=3)[0]
elif(network_name=="MobileNetV2"):
preds = mobilenetv2.decode_predictions(preds, top=3)[0]
elif(network_name=="MobileNet"):
preds = mobilenet.decode_predictions(preds, top=3)[0]
elif(network_name=="VGG19"):
preds = vgg19.decode_predictions(preds, top=3)[0]
elif(network_name=="SqueezeNet"):
preds = imagenet_utils.decode_predictions(preds, top=3)[0]
return x
def analyse_model(model):
print("All functions ", dir(model))
print("Summary model ", model.summary())
print("Layer details ", dir(model.layers[2]))
for i, layer in enumerate(model.layers):
print("Length in each layer ", i, layer.name,
layer.input_shape, layer.output_shape,
len(layer.weights))
if(len(layer.weights)):
for j, weight in enumerate(layer.weights):
print("Weights ", j, weight.shape)
return
def add_classifier(base_model, num_output):
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = keras.layers.GlobalAveragePooling2D()(x)
# x = Dense(16, kernel_regularizer=regularizers.l2(0.01))(x)
# x = Activation('relu')(x)
# x = Dropout(0.5)(x)
# x = Dense(num_output)(x)
# x = Activation('softmax')(x)
x = keras.layers.Dense(num_output, activation='softmax',
use_bias=True, name='Logits')(x)
model = Model(inputs = base_model.input,outputs = x)
# initiate RMSprop optimizer
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_all_prediction(image_filelist):
prediction_list = []
for filename in image_filelist:
# img = image.load_img(os.path.join(imagenet_path, filename), target_size=(224, 224))
img = image.load_img(os.path.join(imagenet_path, filename), target_size=(227, 227)) # Squeezenet
# img1 = mpimg.imread(os.path.join(imagenet_path, filename))
# print(img1.shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = imagenet_utils.preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', filename, imagenet_utils.decode_predictions(preds, top=3)[0])
print("Pred values ", np.argmax(preds))
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
prediction_list.append(preds)
return prediction_list
if __name__ == '__main__':
network_types_list = ["MobileNetV2"]#, "ResNet50", "MobileNetV2", "VGG19"] # , "SqueezeNet"
for network_type in network_types_list:
print("Network Type ", network_type)
model = get_all_nets(network_type, include_top=True)
analyse_model(model)
# model = get_all_nets(network_type, include_top=False)
# model = add_classifier(model)
imagenet_path = "/mnt/additional/aryan/imagenet_validation_data/ILSVRC2012_img_val/"
# http://www.image-net.org/challenges/LSVRC/2012/
# https://cv-tricks.com/tensorflow-tutorial/keras/
# Finding actual predictions
# http://machinelearninguru.com/deep_learning/data_preparation/hdf5/hdf5.html
image_filelist = [f for f in listdir(imagenet_path) if isfile(join(imagenet_path, f))]
print("Number of files ", len(image_filelist))
start_time = time.time()
get_all_prediction(image_filelist[:10])
total_time = time.time() - start_time
print("Total prediction time ", total_time)
print("File list ", image_filelist[:10]) | nitthilan/ml_tutorials | conv/networks/get_all_imagenet.py | get_all_imagenet.py | py | 11,760 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.applications.resnet50.ResNet101",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "keras.applications.resnet50",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "keras.applications.mobilenetv2.MobileNetV2",
"line_number": 53,
"usage_... |
24423662765 | #! /usr/bin/env python3
from typing import Any, Dict
import rospy
import dynamic_reconfigure.server
from example_package_with_dynamic_reconfig.cfg import ExampleDynamicParametersConfig
def dynamic_reconfigure_callback(config: Dict[str, Any], level: Any) -> Dict[str, Any]:
return config
if __name__ == "__main__":
try:
rospy.init_node("package_with_dynamic_reconfig", log_level=rospy.WARN)
dynamic_reconfigure_srv = dynamic_reconfigure.server.Server(ExampleDynamicParametersConfig,
dynamic_reconfigure_callback)
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Shutting down.")
| keivanzavari/dynamic-reconfigure-editor | example/example_package_with_dynamic_reconfig/src/example_package_with_dynamic_reconfig/node.py | node.py | py | 710 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rospy.init_node",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rospy.WARN",
"line_number": ... |
71971273469 | import logging
from kubernetes import client
from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements
from kubeflow.fairing.constants import constants
logger = logging.getLogger(__name__)
def get_resource_mutator(cpu=None, memory=None, gpu=None, gpu_vendor='nvidia'):
"""The mutator for getting the resource setting for pod spec.
The useful example:
https://github.com/kubeflow/fairing/blob/master/examples/train_job_api/main.ipynb
:param cpu: Limits and requests for CPU resources (Default value = None)
:param memory: Limits and requests for memory (Default value = None)
:param gpu: Limits for GPU (Default value = None)
:param gpu_vendor: Default value is 'nvidia', also can be set to 'amd'.
:returns: object: The mutator function for setting cpu and memory in pod spec.
"""
def _resource_mutator(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument
if cpu is None and memory is None and gpu is None:
return
if pod_spec.containers and len(pod_spec.containers) >= 1:
# All cloud providers specify their instace memory in GB
# so it is peferable for user to specify memory in GB
# and we convert it to Gi that K8s needs
limits = {}
if cpu:
limits['cpu'] = cpu
if memory:
memory_gib = "{}Gi".format(round(memory/1.073741824, 2))
limits['memory'] = memory_gib
if gpu:
limits[gpu_vendor + '.com/gpu'] = gpu
if pod_spec.containers[0].resources:
if pod_spec.containers[0].resources.limits:
pod_spec.containers[0].resources.limits = {}
for k, v in limits.items():
pod_spec.containers[0].resources.limits[k] = v
else:
pod_spec.containers[0].resources = V1ResourceRequirements(limits=limits)
return _resource_mutator
def mounting_pvc(pvc_name, pvc_mount_path=constants.PVC_DEFAULT_MOUNT_PATH):
"""The function has been deprecated, please use `volume_mounts`.
"""
logger.warning("The function mounting_pvc has been deprecated, \
please use `volume_mounts`")
return volume_mounts('pvc', pvc_name, mount_path=pvc_mount_path)
def volume_mounts(volume_type, volume_name, mount_path, sub_path=None):
"""The function for pod_spec_mutators to mount volumes.
:param volume_type: support type: secret, config_map and pvc
:param name: The name of volume
:param mount_path: Path for the volume mounts to.
:param sub_path: SubPath for the volume mounts to (Default value = None).
:returns: object: function for mount the pvc to pods.
"""
mount_name = str(constants.DEFAULT_VOLUME_NAME) + volume_name
def _volume_mounts(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument
volume_mount = client.V1VolumeMount(
name=mount_name, mount_path=mount_path, sub_path=sub_path)
if pod_spec.containers[0].volume_mounts:
pod_spec.containers[0].volume_mounts.append(volume_mount)
else:
pod_spec.containers[0].volume_mounts = [volume_mount]
if volume_type == 'pvc':
volume = client.V1Volume(
name=mount_name,
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=volume_name))
elif volume_type == 'secret':
volume = client.V1Volume(
name=mount_name,
secret=client.V1SecretVolumeSource(secret_name=volume_name))
elif volume_type == 'config_map':
volume = client.V1Volume(
name=mount_name,
config_map=client.V1ConfigMapVolumeSource(name=volume_name))
else:
raise RuntimeError("Unsupport type %s" % volume_type)
if pod_spec.volumes:
pod_spec.volumes.append(volume)
else:
pod_spec.volumes = [volume]
return _volume_mounts
def add_env(env_vars):
"""The function for pod_spec_mutators to add custom environment vars.
:param vars: dict of custom environment vars.
:returns: object: function for add environment vars to pods.
"""
def _add_env(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument
env_list = []
for env_name, env_value in env_vars.items():
env_list.append(client.V1EnvVar(name=env_name, value=env_value))
if pod_spec.containers and len(pod_spec.containers) >= 1:
if pod_spec.containers[0].env:
pod_spec.containers[0].env.extend(env_list)
else:
pod_spec.containers[0].env = env_list
return _add_env
def get_node_selector(node_selector):
"""This function for pod_spec_mutators to designate node selector.
:param node_selector: dict of selection constraint
:return: obejct: The mutator fucntion for setting node selector
"""
def _node_selector(kube_master, pod_spec, namespace): #pylint:disable=unused-argument
if node_selector is None:
return
if pod_spec.containers and len(pod_spec.containers) >= 1:
pod_spec.node_selector = node_selector
return _node_selector
| kubeflow/fairing | kubeflow/fairing/kubernetes/utils.py | utils.py | py | 5,342 | python | en | code | 336 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "kubernetes.client.models.v1_resource_requirements.V1ResourceRequirements",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "kubeflow.fairing.constants.constants.PVC_DEFAULT_MOUNT_... |
20785922085 | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('reviews.views',
url(r'^$', 'home', name='home'),
url(r'^courses/$', 'courses', name='courses'),
url(r'^courses/find/$', 'find_course', name='find_course'),
url(r'^courses/search/$', 'search', name='search'),
url(r'^courses/add/$', 'add_course', name='add_course'),
url(r'^courses/(?P<course_id>\d+)/$', 'course', name="course"),
url(r'^courses/(?P<course_id>\d+)/choose_class/$', 'choose_class_to_review', name='choose_class'),
url(r'^courses/(?P<class_id>\d+)/review/$', 'review_course', name="review_course"),
url(r'^courses/(?P<class_id>\d+)/review/(?P<review_id>\d+)/edit/$', 'review_course', name="edit_review"),
url(r'^courses/(?P<course_id>\d+)/edit/$', 'edit_course', name="edit_course"),
url(r'^depts/$', 'departments', name='departments'),
url(r'^depts/(?P<dept_abb>.+)/$', 'department', name='department'),
url(r'^instructors/$', 'instructors', name='instructors'),
url(r'^instructors/add/$', 'add_instructor', name='add_instructor'),
url(r'^instructors/(?P<instructor_id>\d+)/$', 'instructor', name='instructor'),
url(r'^tags/$', 'tags', name='tags'),
url(r'^tags/(?P<tag_name>\w+)/$', 'tag', name='tag'),
url(r'^allreviews/$', 'reviews', name='reviews'),
url(r'^students/$', 'students', name='students'),
url(r'^students/(?P<student_id>\d+)/$', 'student', name='student'),
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout_page', name='logout'),
)
urlpatterns += patterns('',
url(r'^admin/', include(admin.site.urls)),
)
| aldeka/ClassShare | classshare/urls.py | urls.py | py | 1,746 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.defaults.patterns",
"line_number": 6,
"usage_type": "call"
},
{... |
16475584397 | import sqlite3
from sqlite3 import Error
class Data():
__error = None
__result = None
def __init__(self, db):
try:
self.con = sqlite3.connect(db, check_same_thread = False)
self.cur = self.con.cursor()
except Error as e:
print(e)
def clean_db(self):
try:
self.cur.execute("DELETE FROM file;")
self.con.commit()
self.cur.execute("DELETE FROM SQLITE_SEQUENCE WHERE name='file';")
self.con.commit()
self.cur.execute("DELETE FROM directory;")
self.con.commit()
self.cur.execute("DELETE FROM SQLITE_SEQUENCE WHERE name='directory';")
self.con.commit()
except Error as e:
self.__error = e
return self.__error
def create_tables(self):
tb_directory ='CREATE TABLE "directory" ("id_directory" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "name" TEXT)'
tb_evidence ='CREATE TABLE "evidence" ("case_number" INTEGER, "examiner_name" TEXT, "description" TEXT, "note" TEXT)'
tb_pull_log ='CREATE TABLE "pull_log" ("id_log" INTEGER PRIMARY KEY AUTOINCREMENT, "file" TEXT, "from" TEXT, "to" TEXT, "md5_source" TEXT, "sha1_source" TEXT, "date" TEXT )'
tb_file = 'CREATE TABLE "file" ("id_file" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "id_directory" INTEGER NOT NULL, "name" TEXT, "permision" TEXT, "date" TEXT, "Size" REAL)'
try:
self.cur.execute(tb_directory)
self.cur.execute(tb_file)
self.cur.execute(tb_evidence)
self.cur.execute(tb_pull_log)
except Error as e:
self.__error = e
return self.__error
def insert_log_pull(self, file, from_path, to_path, md5_source, sha1_source, date):
try:
query = "INSERT INTO pull_log (`file`,`from`, `to`, md5_source, sha1_source,`date` )VALUES ('%s','%s','%s','%s','%s','%s')"%(file, from_path, to_path, md5_source, sha1_source, date)
self.cur.execute(query)
self.con.commit()
except Error as e:
self.__error = e
return self.__error
def insert_evidence(self, case_number, examiner_name, description, note):
try:
query = "INSERT INTO evidence (case_number, examiner_name, description, note) VALUES ('%s','%s','%s','%s')"%(case_number, examiner_name, description, note)
self.cur.execute(query)
self.con.commit()
except Error as e:
self.__error = e
return self.__error
def select_evidence(self):
try:
self.cur.execute("SELECT * from evidence")
self.__result = self.cur.fetchone()
return self.__result
except Error as e:
print(e)
def select_pull_log(self):
try:
self.cur.execute("SELECT * from pull_log")
self.__result = self.cur.fetchall()
return self.__result
except Error as e:
print(e)
def select_all_data(self, order):
try:
select = "SELECT directory.name as loc, directory.id_directory, file.name as file, file.permision, file.Size, file.date"
frm = " FROM directory, file"
where = " WHERE directory.id_directory=file.id_directory ORDER BY "+order+" DESC"
self.cur.execute(select+frm+where)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
def select_by_extention(self, ext, order):
try:
select = "SELECT directory.name as loc, directory.id_directory, file.name as file, file.permision, file.Size, file.date"
frm = " FROM directory, file"
where = " WHERE directory.id_directory=file.id_directory and file.name like'%"+ext+"%' ORDER BY "+order+" DESC"
self.cur.execute(select+frm+where)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
def insert_dir(self, dir):
try:
self.cur.execute('INSERT INTO `directory` (`name`) VALUES ("%s")' % (dir))
self.con.commit()
except Exception as e :
self.__error=e.args[0]
return self.__error
def insert_sub_dir(self, id_dir, name):
try:
self.cur.execute('INSERT INTO `sub_directory` (`id_directory`,`name`) VALUES (%s,"%s")' % (id_dir, name))
self.con.commit()
except Exception as e :
self.__error=e.args[0]
return self.__error
def insert_file(self, id_dir, name, permision, date, size):
try:
self.cur.execute('INSERT INTO `file` (`id_directory`,`name`, `permision`, `date`, `size`) VALUES (%s,"%s", "%s", "%s", "%s")' % (id_dir, name, permision, date, size))
self.con.commit()
except Exception as e :
self.__error=e.args[0]
return self.__error
def select_name_by_id_dir(self, id_dir):
try:
query = 'SELECT `name` FROM sub_directory WHERE id_directory =%s'%(id_dir)
self.cur.execute(query)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
def select_name_dir_subDir(self, id_dir):
try:
query = 'SELECT directory.`name`, sub_directory.name FROM sub_directory, `directory` WHERE sub_directory.id_directory=directory.id_directory and directory.id_directory=%s' %(id_dir)
self.cur.execute(query)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
def select_id_dir_by_name(self, name):
try:
query = 'SELECT `id_directory` FROM directory WHERE name ="%s"'%(name)
self.cur.execute(query)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
def search(self, key, order):
try:
select = "SELECT directory.name as loc, directory.id_directory, file.name as file, file.permision, file.Size, file.date"
frm = " FROM directory, file"
where = " WHERE directory.id_directory=file.id_directory AND file.name like'%"+key+"%'"+" OR file.date like'%"+key+"%'"+" OR directory.name like'%"+key+"%' GROUP BY id_file"+" ORDER BY "+order+" DESC"
self.cur.execute(select+frm+where)
self.__result = self.cur.fetchall()
return self.__result
except Exception as e:
self.__error = e.args[0]
return self.__error
| madePersonal/Android_forensic_tools | Data.py | Data.py | py | 6,992 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sqlite3.Error",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sqlite3.Error",
"line... |
3480167544 | import json
import boto3
from smart_open import smart_open, codecs
from ConfigParser import ConfigParser
import psycopg2
def publish_message(producerInstance, topic_name, key, value):
"Function to send messages to the specific topic"
try:
producerInstance.produce(topic_name,key=key,value=value)
producerInstance.flush()
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
def config(filename='database.ini', section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
def insert_data(finaldict,tablename):
conn = None
try:
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
curs = conn.cursor()
query = curs.mogrify("INSERT INTO {} ({}) VALUES {}".format(
tablename,
', '.join(finaldict[0].keys()),
', '.join(["%s"] * len(finaldict))
), [tuple(v.values()) for v in finaldict])
print(query)
curs.execute(query)
conn.commit()
curs.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def get_event_files(tableprefix):
return list(my_bucket.objects.filter(Prefix=tableprefix))
client = boto3.client('s3')
resource = boto3.resource('s3')
my_bucket = resource.Bucket('gdelt-sample-data')
events_files = get_event_files("events")
gkg_files = get_event_files("gkg")
mentions_files = get_event_files("mentions")
gkg_obj = codecs.getreader('utf-8')(gkg_files[0].get()['Body'])
event_obj = codecs.getreader('utf-8')(events_files[0].get()['Body'])
mention_obj = codecs.getreader('utf-8')(mentions_files[0].get()['Body'])
events_columns = ['GlobalEventID', 'Day', 'MonthYear', 'Year', 'FractionDate',
'Actor1Code', 'Actor1Name', 'Actor1CountryCode',
'Actor1KnownGroupCode', 'Actor1EthnicCode',
'Actor1Religion1Code', 'Actor1Religion2Code',
'Actor1Type1Code', 'Actor1Type2Code', 'Actor1Type3Code',
'Actor2Code', 'Actor2Name', 'Actor2CountryCode',
'Actor2KnownGroupCode', 'Actor2EthnicCode',
'Actor2Religion1Code', 'Actor2Religion2Code',
'Actor2Type1Code', 'Actor2Type2Code', 'Actor2Type3Code',
'IsRootEvent', 'EventCode', 'EventBaseCode',
'EventRootCode', 'QuadClass', 'GoldsteinScale',
'NumMentions', 'NumSources', 'NumArticles', 'AvgTone',
'Actor1Geo_Type', 'Actor1Geo_Fullname',
'Actor1Geo_CountryCode', 'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code', 'Actor1Geo_Lat', 'Actor1Geo_Long',
'Actor1Geo_FeatureID', 'Actor2Geo_Type',
'Actor2Geo_Fullname', 'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code', 'Actor2Geo_ADM2Code',
'Actor2Geo_Lat', 'Actor2Geo_Long', 'Actor2Geo_FeatureID',
'ActionGeo_Type', 'ActionGeo_Fullname',
'ActionGeo_CountryCode', 'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code', 'ActionGeo_Lat', 'ActionGeo_Long',
'ActionGeo_FeatureID', 'DATEADDED', 'SOURCEURL']
gkg = ["recordid","date" , "srccollectionidentifier","srccommonname","documentid","counts","countsv1","themes","enhancedthemes",
"locations", "enhancedlocation","persons","enhancedpersons","organizations","enhancedorganizations","tone","enhanceddates",
"gcam","sharingimage","relatedimages", "socialimageembeds", "socialvideoembeds", "quotations", "allnames", "amounts","translationinfo",
"extrasxml"]
mentions = ["GlobalEventID","EventTimeDate","MentionTimeDate","MentionType","MentionSourceName","MentionIdentifier","SentenceID",
"Actor1CharOffset","Actor2CharOffset","ActionCharOffset","InRawText","Confidence","MentionDocLen","MentionDocTone"]
gkg_finaldict=[]
for record in gkg_obj:
features = record.strip().split("\t")
if(len(features)==27):
tmpDict = dict()
tmpDict = dict({gkg[i]:features[i].encode("utf-8") for i in range(len(gkg))})
gkg_finaldict.append(tmpDict)
for i in range(0,len(gkg_finaldict),1000):
insert_data(gkg_finaldict[i:i+1000],"public.gkg")
event_finaldict=[]
for record in event_obj:
features = record.strip().split("\t")
if(len(features)==61):
tmpDict = dict()
tmpDict = dict({events_columns[i]: features[i].encode("utf-8") for i in range(len(events_columns))})
event_finaldict.append(tmpDict)
for i in range(0,len(event_finaldict),1000):
insert_data(event_finaldict[i:i+1000],"public.events")
mentions_finaldict=[]
for record in mention_obj:
features = record.strip().split("\t")
print(record)
if(len(features)==14):
tmpDict = dict()
tmpDict = dict({mentions[i]: features[i].encode("utf-8") for i in range(len(mentions))})
mentions_finaldict.append(tmpDict)
for i in range(0,len(mentions_finaldict),1000):
insert_data(mentions_finaldict[i:i+1000],"public.mentions")
| vikash4281/Corpus-Callosum | Ingestion/Streaming.py | Streaming.py | py | 5,581 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ConfigParser.ConfigParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "psycopg2.DatabaseError",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name":... |
28193366899 | from datetime import datetime, time
import sys
from time import sleep
import datefunc
def choose_date(now):
datefunc.clear_terminal()
option = input("Choose counter:\n 1 - time to pay,\n 2 - time to vacation,\n 3 - time to end of working day \n")
datefunc.clear_terminal()\
if option == '1' or option == 1:
return datefunc.time_to_pay(now)
if option == '2' or option == 2:
return datefunc.time_to_vacation()
if option == '3' or option == 3:
return datefunc.time_end_workingday()
else:
print('fuck yourself')
sys.exit()
def main():
now = datetime.now()
# print(now.today().weekday())
req = choose_date(now)
while req>now:
print("%dd %dh %dm %ds" % datefunc.daysHoursMinutesSecondsFromSeconds(datefunc.dateDiffInSeconds(now, req)))
datefunc.clear_terminal()
now = datetime.now()
print("Thank you")
if __name__ == "__main__":
main()
| NikitaTymofeiev-dev/simpleApp | main.py | main.py | py | 1,046 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datefunc.clear_terminal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datefunc.clear_terminal",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datefunc.time_to_pay",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "d... |
39620320183 | from Folder_de_Testes.base import Fox_HEIGHT, Fox_WIDTH
import pygame
import random
#Parametros gerais
WIDTH = 880
HEIGHT = 400
gravity = 1
def randon_sizes_for_walls(xpos):
protection = 200
altura = random.randint(200, 400)
wall = Wall(False, xpos, altura)
inversal_wall = Wall(True, xpos,HEIGHT - altura - protection)
return (wall, inversal_wall)
class Fox(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
count_fox = 0
Fox_WIDTH = 170
Fox_HEIGHT = 100
self.gravity = 1
Fox1 = pygame.image.load('Folder_de_Testes/assets/img/raposa 1.png').convert_alpha()
Fox1 = pygame.transform.scale(Fox1, (Fox_WIDTH, Fox_HEIGHT))
Fox2 = pygame.image.load('Folder_de_Testes/assets/img/raposa2.png').convert_alpha()
Fox2 = pygame.transform.scale(Fox2, (Fox_WIDTH, Fox_HEIGHT))
Fox3 = pygame.image.load('Folder_de_Testes/assets/img/raposa 3.png').convert_alpha()
Fox3 = pygame.transform.scale(Fox3, (Fox_WIDTH, Fox_HEIGHT))
self.flying_one = pygame.image.load('Folder_de_Testes/assets/img/raposafinal.png').convert_alpha()
self.flying_one = pygame.transform.scale(self.flying_one, (100, 100))
self.images = [Fox1,Fox2,Fox3]
self.count_fox = count_fox
self.image = Fox1
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH / 4
self.rect.bottom = HEIGHT - 100
self.speedy = 1
self.now_on_windon = 0
self.speed_modifier = 0.0
def update(self):
self.rect.y += self.speedy
self.speedy += self.gravity + 0.1 * (-self.speedy)
self.mask = pygame.mask.from_surface(self.image)
self.count_fox += 1
#print(self.speed_modifier)
if self.speed_modifier > -12:
self.speed_modifier -= 0.0024
if self.count_fox >= 10 and self.rect.bottom > HEIGHT:
self.now_on_windon = (self.now_on_windon + 1) % 3
self.image = self.images[self.now_on_windon]
self.count_fox = 0
elif self.speedy <0 :
self.image = self.flying_one
#print(self.speedy)
#print(self.count_fox)
# Mantem dentro da tela
if self.rect.bottom > HEIGHT:
self.rect.bottom = HEIGHT
#self.speedy = 1
#game = False
if self.rect.top < 0:
self.rect.top = 0
def pulo(self):
self.speedy = -16 + self.speed_modifier
fox_group = pygame.sprite.Group()
fox = Fox()
fox_group.add(fox)
class Wall_meteor_fisic(pygame.sprite.Sprite):
def __init__(self, img):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
Wall_WIDTH = 50
Wall_HEIGHT = random.randint(50, 250)
self.image = img
self.rect = self.image.get_rect()
self.rect.x = (WIDTH-Wall_WIDTH)
self.rect.y = random.randint(10,300)
self.speedx = random.randint(-5, -3)
Wall_HEIGHT = random.randint(50, 250)
def update(self):
# Atualizando a posição do meteoro
self.rect.x += self.speedx
Wall_WIDTH = 50
# Se o meteoro passar do final da tela, volta para cima e sorteia
# novas posições e velocidades
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.x = (WIDTH-Wall_WIDTH)
self.rect.y = random.randint(10,300)
self.speedx = random.randint(-5, -3)
class Invible_wall:
def __init__(self,img):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
class Wall(pygame.sprite.Sprite):
def __init__(self, inversal,posx, posy):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
wall_HEIGHT = 80
wall_WIDTH = 80
self.image = pygame.image.load('Folder_de_Testes/assets/img/Tree.png').convert_alpha()
self.image = pygame.transform.scale(self.image, (wall_WIDTH, wall_HEIGHT))
self.rect = self.image.get_rect()
self.rect[0] = posx
if inversal:
self.image = pygame.transaform.flip(self.image,False, True)
self.rect[1] = (self.rect[3] - posy)
else:
self.rect[1] = HEIGHT - posy
self.mask = pygame.mask.from_surface(self.image)
self.speedx = random.randint(-5, -3)
def update(self):
self.rect[0] += self.speedx
class Coin(pygame.sprite.Sprite):
def __init__(self):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
coin_HEIGHT = 50
coin_WIDTH = 50
self.image = pygame.image.load('Folder_de_Testes/assets/img/coin.png').convert_alpha()
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = (WIDTH-coin_WIDTH)
self.rect.y = (HEIGHT - coin_HEIGHT)
self.speedx = random.randint(-5, -3)
METEOR_HEIGHT = random.randint(50, 250)
def update(self):
# Atualizando a posição do meteoro
METEOR_HEIGHT = random.randint(50, 250)
self.rect.x += self.speedx
coin_WIDTH = 50
# Se o meteoro passar do final da tela, volta para cima e sorteia
# novas posições e velocidades
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.x = (WIDTH-coin_WIDTH)
self.rect.y = (HEIGHT - METEOR_HEIGHT)
self.speedx = random.randint(-5, -3)
class Predator(pygame.sprite.Sprite):
def __init__(self):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
coin_HEIGHT = 50
coin_WIDTH = 50
self.image = pygame.image.load('Folder_de_Testes/assets/img/piranha.png').convert_alpha()
self.image = pygame.transform.scale(self.image, (coin_WIDTH, coin_HEIGHT))
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = (WIDTH-coin_WIDTH)
self.rect.y = random.randint(10, 300)
self.speedx = random.randint(-5, -3)
METEOR_HEIGHT = random.randint(50, 250)
def update(self):
# Atualizando a posição do meteoro
METEOR_HEIGHT = random.randint(50, 250)
self.rect.x += self.speedx
coin_WIDTH = 50
# Se o meteoro passar do final da tela, volta para cima e sorteia
# novas posições e velocidades
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.x = (WIDTH-coin_WIDTH)
self.rect.y = (HEIGHT - METEOR_HEIGHT)
self.speedx = random.randint(-5, -3) | RodrigoAnciaes/Flying_Fox_game | Folder_de_Testes/personagens.py | personagens.py | py | 7,008 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygam... |
40787987363 | ## import libraries
from tkinter import *
from gtts import gTTS
from playsound import playsound
################### Initialized window####################
root = Tk()
root.geometry('350x300')
root.resizable(0,0)
root.config(bg = 'light yellow')
root.title('DataFlair - TEXT_TO_SPEECH')
##heading
Label(root, text = 'HELIGA TEKST' , font='arial 20 bold' , bg ='white smoke').pack()
Label(root, text ='DataFlair' , font ='arial 15 bold', bg = 'blue').pack(side = BOTTOM)
#label
Label(root, text ='Sisesta Tekst', font ='arial 15 bold', bg ='white').place(x=20,y=60)
##text variable
Msg = StringVar()
#Entry
entry_field = Entry(root,textvariable =Msg, width ='50')
entry_field.place(x=20 , y=100)
###################define function##############################
def Tekst():
Message = entry_field.get()
speech = gTTS(text = Message, lang ='et', slow = True)
speech.save('DataFlair.mp3')
playsound('DataFlair.mp3')
def Exit():
root.destroy()
def Reset():
Msg.set("")
#Button
Button(root, text = "ESITA" , font = 'arial 15 bold', command = Tekst, bg = 'light blue', width =6).place(x=25, y=140)
Button(root,text = 'VÄLJU',font = 'arial 15 bold' , command = Exit, bg = 'green').place(x=100,y=140)
Button(root, text = 'UUESTI', font='arial 15 bold', command = Reset, bg = 'yellow' ).place(x=175 , y =140)
#infinite loop to run program
root.mainloop()
| program444/HELIGA-TEKST- | Text-to-Speech.py | Text-to-Speech.py | py | 1,453 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gtts.gTTS",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "playsound.playsound",
"line_number": 44,
"usage_type": "call"
}
] |
23917961666 | from langchain.document_loaders import WebBaseLoader
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
import os
from langchain.chat_models import JinaChat
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.llms import AI21
# create a new instance of chatbot and saves it as a JSON file
def createNewBot(name, fileType, path, url):
loader = None
if fileType == 'web':
loader = WebBaseLoader(url)
elif fileType == 'doc':
loader = PyPDFLoader(path)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
all_splits = text_splitter.split_documents(data)
embeddings = HuggingFaceEmbeddings()
persistentDir = "bots/" + name + "/vectorstore/"
vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings, persist_directory=persistentDir)
# print(vectorstore)
# jina_api_key = os.environ['JINA_API_KEY']
# chat = JinaChat(temperature=0, jinachat_api_key=jina_api_key)
# chat = ChatAnyscale(model_name='meta-llama/Llama-2-7b-chat-hf', temperature=1.0, anyscale_api_key=os.environ["ANYSCALE_API_KEY"])
chat = AI21(ai21_api_key=os.getenv("AI21_API_KEY"))
# memory = ConversationSummaryMemory(llm=chat,memory_key="chat_history",return_messages=True)
retriever = vectorstore.as_retriever()
template = (
r"""You are a helpful English speaking assistant. Use the following pieces of context to answer the users question. If you cannot find the answer from the pieces of context, just say that you don't know, don't try to make up an answer.
----------------
{context}
"""
)
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
# finalChain = ConversationalRetrievalChain.from_llm(chat, retriever=retriever, memory = memory, combine_docs_chain_kwargs={'prompt': chat_prompt})
finalChain = RetrievalQAWithSourcesChain.from_chain_type(chat, retriever=retriever)
# print(finalChain.retriever)
# SAVING DOESNT WORK OUT BECAUSE LANGCHAIN HAS YET TO SUPPORT THIS
chainSaveFolder = "bots/" + name + '/'
botSavePath = chainSaveFolder + name + '.json'
finalChain.save(botSavePath)
# retrieverSavePath = chainSaveFolder + name + '_retriever.json'
# with open(retrieverSavePath, "w") as f:
# # json.dump(finalChain.retriever.to_json(), f, indent = 2)
# json.dump(vectorstore, f, indent = 2)
return finalChain | luongthang0105/rag-cla | create_bot.py | create_bot.py | py | 2,864 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "langchain.document_loaders.WebBaseLoader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "langchain.document_loaders.PyPDFLoader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "langchain.text_splitter.RecursiveCharacterTextSplitter",
"lin... |
38486654704 | from datetime import datetime, timezone, timedelta
def stem(label: str, blacklist: list):
'''
This function stems a given event label.
Inputs:
- label: single label to stem
- blacklist: list of terms, that should be excluded from the label
Return: stemmed label
'''
parts = label.split(' ')
parts = list(filter(lambda x: x not in blacklist, parts))
return ' '.join(parts)
def time_dif(x: tuple, interval: str):
'''
Calculate the differences between two points in time.
Inputs:
- x: tuple of two datetime objects
- interval: indicator of the return type; accepted values: 'd', 'h', 's'
Return: interval in days, hours or seconds
'''
res = time_wrap(x[0], x[1])
days = res.days
hours = res.seconds//60//60
seconds = res.seconds
if interval is 'd':
return days
elif interval is 'h':
return hours + (days * 24)
elif interval is 's':
return seconds + (days * 24 * 60 * 60)
def number_of_non_workdays(start, end):
'''
Compute the number of days between two points in time, excluding weekends.
Input:
- start: datetime object
- end: datetime object
Return:
int: number of days
'''
# 0: Monday
days = []
while(start <= end):
days.append(start.weekday())
start = start + timedelta(days=1)
days = len(list(filter(lambda x: x > 4, days)))
return days
def time_wrap(start: datetime, end: datetime, s_hour = 8, e_hour = 18):
'''
Return the temporal difference between two points in time, adjusted to a given workschedule.
Input:
- start: datetime object
- end: datetime object
- s_hour: start of workschedule
- e_hour: end of workschedule
Return:
- timedelta in seconds
'''
# worktime after start event
e_time = datetime(start.year, start.month, start.day, e_hour)
start = start.replace(tzinfo=None)
t1 = (e_time - start).seconds
# worktime before end event
end = end.replace(tzinfo=None)
s_time = datetime(start.year, start.month, start.day, s_hour)
t3 = (end - s_time).seconds
# calculate days between start and end exclusive non-working days
days_total = (end - start).days
non_workingdays = number_of_non_workdays(start, end)
working_days = days_total - non_workingdays
if working_days > 1:
working_days -= 1 # consider only complete day in between
total_hours_between = (e_hour - s_hour) * working_days
# convert into seconds
t2 = total_hours_between * 60 * 60
else:
# in this case, there is no full working day between start and end
t2 = 0
total_dif = t1 + t2 + t3
return timedelta(seconds=total_dif) | bptlab/bpi-challenge-2020 | src/util.py | util.py | py | 2,829 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "datetime.timedelta",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
70416777467 | from multiprocessing import Value, Queue, Process
from config import config
from spider.HtmlCrawl import IpCrawl
from usable.usable import usable
from db.db_select import save_data
def startProxyCrawl(queue,db_proxy_num):
crawl = IpCrawl(queue,db_proxy_num)
crawl.run()
def validator(queue1,queue2):
pass
if __name__ == "__main__":
DB_PROXY_NUM = Value('i', 0)
q1 = Queue(maxsize=config.TASK_QUEUE_SIZE)
q2 = Queue()
p1 = Process(target=startProxyCrawl, args=(q1, DB_PROXY_NUM))
p2 = Process(target=usable, args=(q1, q2))
p3 = Process(target=save_data, args=(q2, DB_PROXY_NUM))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join() | queenswang/IpProxyPool | proxyspider.py | proxyspider.py | py | 703 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spider.HtmlCrawl.IpCrawl",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Queue",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "c... |
4495169101 | # -*- coding: utf-8 -*-
"""
Tests for CSV Normalizer
"""
import csv
from io import StringIO
from _pytest.capture import CaptureFixture
from pytest_mock import MockFixture
from src.csv_normalizer import main
def test_outputs_normalized_csv(mocker: MockFixture, capsys: CaptureFixture[str]) -> None:
with open("tests/sample.csv", encoding="utf-8", newline="") as csv_file:
mocker.patch("sys.stdin", csv_file)
main()
captured = capsys.readouterr()
assert len(captured.out) > 0
assert len(captured.err) == 0
written_csv = csv.reader(StringIO(captured.out))
with open("tests/output-sample.csv", encoding="utf-8", newline="") as expected_csv_file:
expected_csv = csv.reader(expected_csv_file)
for written_line, expected_line in zip(written_csv, expected_csv):
assert written_line == expected_line
def test_handles_error_properly(mocker: MockFixture, capsys: CaptureFixture[str]) -> None:
with open("tests/sample-with-broken-fields.csv", encoding="utf-8", newline="") as csv_file:
mocker.patch("sys.stdin", csv_file)
main()
captured = capsys.readouterr()
assert len(captured.err) > 0
expected_errors = [
"Invalid timestamp: 4/1/11 11:00:00 �M",
"invalid literal for int() with base 10: '9412�'",
"Duration is in an invalid format: 123:32.123",
"Duration has an invalid value: 1:a:32.123",
"Duration is in an invalid format: 132:33.123",
"Duration has an invalid value: 1:a:33.123",
]
errors = captured.err.splitlines()
assert len(errors) == len(expected_errors)
for error, expected_error in zip(errors, expected_errors):
assert error == expected_error
assert len(captured.out) > 0
written_csv = csv.reader(StringIO(captured.out))
with open(
"tests/output-sample-with-broken-fields.csv", encoding="utf-8", newline=""
) as expected_csv_file:
expected_csv = csv.reader(expected_csv_file)
for written_line, expected_line in zip(written_csv, expected_csv):
assert written_line == expected_line
| felipe-lee/csv_normalization | tests/test_csv_normalizer.py | test_csv_normalizer.py | py | 2,253 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytest_mock.MockFixture",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "_pytest.capture.CaptureFixture",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "src.csv_normalizer.main",
"line_number": 18,
"usage_type": "call"
},
{
"ap... |
29041072051 | from flask import Flask, jsonify
from datetime import datetime
import requests
from flask import request
app = Flask(__name__)
logs = []
@app.route("/list", methods=["POST"])
def list():
r = request.data.decode("utf-8")
logs.append(r)
return jsonify(success=True)
@app.route("/usage.log")
def home():
return "<br>".join(logs)
if __name__ == "__main__":
app.run()
| maciejgrosz/containers_network_communication | loggerservice/loggerservice.py | loggerservice.py | py | 390 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.data.decode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.re... |
27215126875 | import pytest
from hbutils.system import telnet, wait_for_port_online
@pytest.mark.unittest
class TestSystemNetworkTelnet:
def test_telnet(self):
assert telnet('127.0.0.1', 35127)
assert telnet('127.0.0.1', 35128)
assert not telnet('127.0.0.1', 35129, timeout=1.0)
def test_wait_for_port_online(self):
wait_for_port_online('127.0.0.1', 35127)
wait_for_port_online('127.0.0.1', 35128)
with pytest.raises(TimeoutError):
wait_for_port_online('127.0.0.1', 35129, timeout=2.0, interval=0.1)
| HansBug/hbutils | test/system/network/test_telnet.py | test_telnet.py | py | 559 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "hbutils.system.telnet",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hbutils.system.telnet",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "hbutils.system.telnet",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "hbut... |
43279150633 | from django.contrib import admin
from django.urls import path
from . import views
app_name = 'task'
urlpatterns=[
# path('', views.index, name='index')
path('', views.TasksView.as_view(), name='index'),
path('addtask/', views.add_task, name='addtask'),
path('remover/', views.remove_all_task, name='rm_task'),
path('rm/<int:task_pk>', views.remove_1_task, name='rm'),
path('done/<int:task_pk>', views.done_task, name='done')
]
| eh97979/Task-manager | task_project/task/urls.py | urls.py | py | 456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
3705027331 | import bitstring
def shift_check( filename ):
f = open(filename, 'rb')
bits = bitstring.Bits( f )
f.close()
bits_array = bitstring.BitArray( bits )
skip =8*3
for k in range(8):
start = k + skip
stop = start+ 200*8
shifted = bits_array[start:stop]
byte_data = shifted.bytes
try:
print("offset {}".format(k))
print( byte_data.decode('utf-8'))
except:
print("Not ascii at offset {}".format(k))
pass
if __name__ == "__main__":
shift_check("out.txt") | tj-oconnor/spaceheroes_ctf | forensics/forensics-rf-math/solve/shifty.py | shifty.py | py | 583 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "bitstring.Bits",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bitstring.BitArray",
"line_number": 7,
"usage_type": "call"
}
] |
3232593391 | import logging
class LogDB:
def __init__(self,fileName):
self.fileName = fileName
self.loglist = []
self.files = None
self.final = {}
def log(self, message=None ):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, filename=self.fileName)
logging.warning(message)
def show_tracker_logs(self):
with open(self.fileName) as f:
f = f.readlines()
for line in f:
print(line)
def update_files(self, files_seeder):
self.files = files_seeder
def log_file(self,fileName):
if fileName in self.files.keys():
print(self.files[fileName])
else:
print(f'{fileName} not found')
def add_logs2file(self,fileName, logmsg):
"""adds the log message related to one specific file to its key in a dictionary"""
if fileName not in self.files.keys():
self.final[fileName].append(logmsg)
else:
self.final[fileName] = []
self.final[fileName].append(logmsg)
def logs_of_the_file(self,fileName):
if fileName in self.files.keys() :
print(self.files[fileName])
if fileName in self.final.keys():
print(self.final[fileName])
else:
print('No log yet')
else:
print(f'{fileName} not found')
def all_logs(self):
for fileName in self.files.keys():
self.logs_of_the_file(fileName)
| reza2002801/Torrent | logDB.py | logDB.py | py | 1,524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 13,
"usage_type": "call"
}
] |
71924390587 | from setuptools import setup, find_packages
from os.path import join
name = 'menhir.simple.livesearch'
version = '0.1'
readme = open("README.txt").read()
history = open(join("docs", "HISTORY.txt")).read()
setup(name = name,
version = version,
description = 'Dolmen simple extension : livesearch',
long_description = readme[readme.find('\n\n'):] + '\n' + history,
keywords = 'Grok Zope3 CMS Dolmen',
author = 'Souheil Chelfouh',
author_email = 'souheil@chelfouh.com',
url = 'http://tracker.trollfot.org/',
download_url = 'http://pypi.python.org/pypi/menhir.simple.livesearch',
license = 'GPL',
packages=find_packages('src', exclude=['ez_setup']),
package_dir={'': 'src'},
namespace_packages = ['menhir', 'menhir.simple'],
include_package_data = True,
platforms = 'Any',
zip_safe = True,
install_requires=[
'setuptools',
'grok',
'dolmen.app.layout',
'dolmen.app.search',
'hurry.jquery',
'megrok.resource',
'zope.component',
'zope.interface',
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Grok',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| trollfot/menhir.simple.livesearch | setup.py | setup.py | py | 1,479 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 19,
"usage_type": "call"
}
] |
28339877749 | from itertools import product
k,m = list(map(int,input().split()))
arr = []
cart_prod = []
maxS=0
for _ in range(k):
lstN = list(map(int,input().split()[1:]))
arr.append(lstN)
cart_prod = list(product(*arr))
for elem in cart_prod:
sum1=0
for i in elem:
sum1+=i**2
if sum1%m>maxS:
maxS = sum1%m
print(maxS)
| t3chcrazy/Hackerrank | maximize-it.py | maximize-it.py | py | 358 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.product",
"line_number": 9,
"usage_type": "call"
}
] |
20538458179 | # https://leetcode.com/problems/last-stone-weight/
"""
Time complexity:- O(N logN)
Space Complexity:- O(N)
"""
import heapq
from typing import List
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
# Create a max heap (negate each element to simulate a min heap)
h = [-x for x in stones]
heapq.heapify(h)
# Continue the process until only one or no stone is left
while len(h) > 1 and h[0] != 0:
# Pop the two largest stones from the max heap
stone1 = heapq.heappop(h)
stone2 = heapq.heappop(h)
# Calculate the weight difference and push it back into the max heap
diff = stone1 - stone2
heapq.heappush(h, diff)
# If there is at least one stone remaining, return its weight
return -h[0]
| Amit258012/100daysofcode | Day60/last_stone_weight.py | last_stone_weight.py | py | 844 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "heapq.heapify",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_num... |
74977721787 |
import logging
import psycopg2
from dipper.sources.Source import Source
LOG = logging.getLogger(__name__)
class PostgreSQLSource(Source):
"""
Class for interfacing with remote Postgres databases
"""
files = {}
def __init__(
self,
graph_type,
are_bnodes_skolemized,
data_release_version=None,
name=None,
ingest_title=None,
ingest_url=None,
ingest_logo=None,
ingest_description=None,
license_url=None,
data_rights=None,
file_handle=None
):
super().__init__(
graph_type=graph_type,
are_bnodes_skized=are_bnodes_skolemized,
data_release_version=data_release_version,
name=name,
ingest_title=ingest_title,
ingest_url=ingest_url,
ingest_logo=ingest_logo,
ingest_description=ingest_description,
license_url=license_url,
data_rights=data_rights,
file_handle=file_handle)
# used downstream but handled in Source
# globaltt = self.globaltt
# globaltcid = self.globaltcid
# all_test_ids = self.all_test_ids
def fetch_from_pgdb(self, tables, cxn, limit=None):
"""
Will fetch all Postgres tables from the specified database
in the cxn connection parameters.
This will save them to a local file named the same as the table,
in tab-delimited format, including a header.
:param tables: Names of tables to fetch
:param cxn: database connection details
:param limit: A max row count to fetch for each table
:return: None
"""
con = None
try:
con = psycopg2.connect(
host=cxn['host'], database=cxn['database'], port=cxn['port'],
user=cxn['user'], password=cxn['password'])
cur = con.cursor()
for tab in tables:
LOG.info("Fetching data from table %s", tab)
self._getcols(cur, tab)
query = ' '.join(("SELECT * FROM", tab))
countquery = ' '.join(("SELECT COUNT(*) FROM", tab))
if limit is not None:
query = ' '.join((query, "LIMIT", str(limit)))
countquery = ' '.join((countquery, "LIMIT", str(limit)))
cur.execute(countquery)
tablerowcount = cur.fetchone()[0]
outfile = '/'.join((self.rawdir, tab))
# download the file
LOG.info("COMMAND:%s", query)
outputquery = "COPY ({0}) TO STDOUT WITH DELIMITER AS '\t' CSV HEADER"\
.format(query)
with open(outfile, 'w') as tsvfile:
cur.copy_expert(outputquery, tsvfile)
filerowcount = self.file_len(outfile)
if (filerowcount - 1) < tablerowcount:
raise Exception(
"Download from {} failed, {} != {}"
.format(cxn['host'] + ':' + cxn['database'],
(filerowcount - 1), tablerowcount))
if (filerowcount - 1) > tablerowcount:
LOG.warning(
"Fetched from %s more rows in file (%s) than reported "
"in count(%s)",
cxn['host'] + ':' + cxn['database'],
(filerowcount - 1), tablerowcount)
finally:
if con:
con.close()
def fetch_query_from_pgdb(self, qname, query, con, cxn, limit=None):
"""
Supply either an already established connection, or connection parameters.
The supplied connection will override any separate cxn parameter
:param qname: The name of the query to save the output to
:param query: The SQL query itself
:param con: The already-established connection
:param cxn: The postgres connection information
:param limit: If you only want a subset of rows from the query
:return:
"""
if con is None and cxn is None:
raise ValueError("ERROR: you need to supply connection information")
if con is None and cxn is not None:
con = psycopg2.connect(
host=cxn['host'], database=cxn['database'], port=cxn['port'],
user=cxn['user'], password=cxn['password'])
outfile = '/'.join((self.rawdir, qname))
cur = con.cursor()
# wrap the query to get the count
countquery = ' '.join(("SELECT COUNT(*) FROM (", query, ") x"))
if limit is not None:
countquery = ' '.join((countquery, "LIMIT", str(limit)))
cur.execute(countquery)
tablerowcount = cur.fetchone()[0]
# download the file
LOG.debug("COMMAND:%s", query)
outputquery = \
"COPY ({0}) TO STDOUT WITH DELIMITER AS '\t' CSV HEADER".format(query)
with open(outfile, 'w') as tsvfile:
cur.copy_expert(outputquery, tsvfile)
# Regenerate row count to check integrity
filerowcount = self.file_len(outfile)
if (filerowcount-1) < tablerowcount:
raise Exception(
"Download from {} failed, {} != {}"
.format(cxn['host'] + ':' + cxn['database'],
(filerowcount-1), tablerowcount))
if (filerowcount-1) > tablerowcount:
LOG.warning(
"Fetched from %s more rows in file (%s) than reported in count(%s)",
cxn['host'] + ':'+cxn['database'], (filerowcount-1), tablerowcount)
@staticmethod
def _getcols(cur, table):
"""
Will execute a pg query to get the column names for the given table.
:param cur:
:param table:
:return:
"""
query = ' '.join(("SELECT * FROM", table, "LIMIT 0")) # for testing
cur.execute(query)
colnames = [desc[0] for desc in cur.description]
LOG.info("COLS (%s): %s", table, colnames)
# abstract
def fetch(self, is_dl_forced=False):
"""
abstract method to fetch all data from an external resource.
this should be overridden by subclasses
:return: None
"""
raise NotImplementedError
def parse(self, limit):
"""
abstract method to parse all data from an external resource,
that was fetched in fetch() this should be overridden by subclasses
:return: None
"""
raise NotImplementedError
| monarch-initiative/dipper | dipper/sources/PostgreSQLSource.py | PostgreSQLSource.py | py | 6,689 | python | en | code | 53 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dipper.sources.Source.Source",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "psycopg2.connect",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "psycopg... |
4785885470 | from collections import defaultdict, deque
n = int(input())
d = defaultdict(list)
for i in range(1, n):
l = list(map(int, input().split()))
now = 1
for j in range(i+1, n+1):
d[i].append((j, l[now-1]))
d[j].append((i, l[now-1]))
now += 1
print(d)
s = set()
max = 0
def dfs(now, flg, visited):
global max
if visited[now-1] == 1:
return
print(now, s)
visited[now-1] = 1
if flg == 0:
s.add(now)
for next in d[now]:
flg ^= 1
dfs(next[0], flg, visited)
dfs(1, 0, [0]*n)
print()
| K5h1n0/compe_prog_new | abc318/d/main.py | main.py | py | 570 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
71992464828 | import json
# Đọc dữ liệu từ file input1.json và input2.json
with open('input1.json', 'r', encoding='utf-8') as file1, open('input2.json', 'r', encoding='utf-8') as file2:
data1 = json.load(file1)
data2 = json.load(file2)
# Tìm các cặp key có cùng giá trị trong cả hai file
common_key_value_pairs = []
for key1, value1 in data1.items():
for key2, value2 in data2.items():
if value1 == value2 and key1 != key2:
common_key_value_pairs.append((key2, key1, value1))
# # Ghi các khóa giống nhau vào tệp output2.txt
# with open('output2.txt', 'w', encoding='utf-8') as output_file:
# for key1, key2, value1 in common_key_value_pairs:
# output_file.write(f"{key1} = {key2} : {value1}\n")
# Tạo một dictionary để lưu trữ kết quả theo định dạng bạn mong muốn
output_data = {}
count = 1
for key1, key2, value1 in common_key_value_pairs:
output_data[count] = [key1, key2, value1]
count += 1
# Ghi dictionary kết quả vào file output.json
with open('output.json', 'w') as output_file:
json.dump(output_data, output_file, indent=2)
| mminhlequang/python_tools | key_have_same_value/main.py | main.py | py | 1,139 | python | vi | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 32,
"usage_type": "call"
}
] |
8743120221 | import pandas as pd #pandas是强大的分析结构化数据的工具集 as是赋予pandas别名
from matplotlib import pyplot as plt #2D绘图库,通过这个库将数据绘制成各种2D图形(直方图,散点图,条形图等)
#全国哪一个城市地铁线最多
def subline_count():
df1 = df.iloc[:, :-1] #筛选前三列 df是下面main读取的
df2 = df1.drop_duplicates(subset=['city', 'subwayline']) # 去重
# drop_duplicates是pandas里面的函数 subset用来指定特定的列,不填参数就默认所有列
df3 = df2['city'].value_counts() #pandas里面的value_counts()函数可以对Series里面每个值进行计数并排序
df3.plot.bar() #bar条形图
plt.savefig("城市地铁数量排行榜.png")
plt.show() #将处理后的数据显示出来
print(df3)
if __name__=='__main__' :
df = pd.read_csv('subway.csv', encoding='utf-8') #读取subway.csv文件,并制定字符集的类型
plt.rcParams['font.sans-serif'] = 'fangsong' #font.sans-serif就是修改字体,后面是仿宋字体
#rcParams可以修改默认属性,包括窗体大小,每英寸的点数,线颜色,样式,坐标轴,坐标和网络属性,文本,字体等
subline_count() #运行函数
| rlxy/python | 爬虫/数据分析/城市地铁数量排行榜/analysis.py | analysis.py | py | 1,315 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ma... |
71168432827 | '''
This project is a GUI calculator for a high yield savings account.
The GUI will display 4 input boxes. An intial deposit, monthly deposit, APY yield, and years to calculate
The result will be a number at the end of the year, as well as a graph that displays the growth of the account.
Possible extras could include a bar graph or just numbers that display how much of the final amount was the initial, monthly deposit,
or interest earned.
'''
#Imports
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# Make tkinter window with classname and size
m = tk.Tk(className="high yield savings calculator")
m.attributes('-fullscreen', True)
# Create canvas to draw and do animations
canvas = tk.Canvas(m, width=m.winfo_screenwidth(), height=m.winfo_screenheight(), bg="white")
canvas.create_line(0, 120, m.winfo_screenwidth(), 120, fill="black", width=2)
canvas.pack(fill="both", expand=True)
title = tk.Label(m, text="High Yield Savings Calculator", font=("Mistral 60 bold"), bg="white")
title.pack()
screen_width = m.winfo_screenwidth()
center, quarter = screen_width // 2, screen_width // 1.5
title.place(x=center, y=18, anchor="n")
initial_var, monthly_var, APY_var, years_var = tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar()
def calculate(initial, monthly, APY, years):
apy_ratio = APY / 100
total_monthly = (monthly * 12) * years
total_months = int((years * 12))
count = years
contribution_interest = 0
for i in range(0, total_months):
contribution_interest += (monthly * apy_ratio * count)
total = initial + total_monthly + contribution_interest
return total, contribution_interest, total_monthly
total_bal = None
error_msg = None
piegraph = None
def display_total_balance(total, contribution_interest, initial, total_monthly):
global total_bal
if total_bal:
total_bal.config(text='Total balance is $' + str(total))
else:
total_bal = tk.Label(m, text='Total balance is $' + str(total), fg='green', font=('Modern', 40), bg="white")
total_bal.place(x=quarter, y=165, anchor='n')
display_pie_graph(initial, total_monthly, contribution_interest)
def display_pie_graph(initial, total_monthly, contribution_interest):
global piegraph
# Make canvas where we can draw plots and graph
fig = Figure(figsize=(6, 4), dpi=130)
# Make subplot so we have place to plot our pie graph
subplot = fig.add_subplot(111)
# Prepare the data for the pie chart
labels = ['Initial', 'Contributions', 'Interest']
sizes = [initial, total_monthly, contribution_interest]
explode = (0.1, 0.1, 0.1) # Separation of our pie datas
colors = ('yellow', 'cyan', 'green')
wp = {'linewidth': 0.5, 'edgecolor': "red"}
# Create the pie chart
wedges, texts, autotexts = subplot.pie(sizes,
autopct='%1.1f%%',
explode=explode,
shadow=True,
colors=colors,
startangle=90,
wedgeprops=wp,
textprops=dict(color="black"))
subplot.axis('equal') # Equal aspect ratio ensures the pie is circular
# Make legend, 1st and 2nd are location, 3rd and 4th are size
subplot.legend(wedges, labels,
title="Entries",
bbox_to_anchor=(0.18, 1.1))
# Create a FigureCanvasTkAgg widget that binds the graph in the Tkinter window
piegraph = FigureCanvasTkAgg(fig, master=m)
piegraph.draw()
# Place the graph in the Tkinter window
piegraph.get_tk_widget().place(x=quarter, y=290, anchor='n')
def remove_pie_graph():
global piegraph
if piegraph:
piegraph.get_tk_widget().destroy()
def display_error_message():
global error_msg
if error_msg:
error_msg.config(text='Please enter a valid number')
else:
error_msg = tk.Label(m, text='Please enter a valid number', fg='red', font=('Georgia', 20), anchor='center', bg="white")
error_msg.place(x=center, y=165, anchor='n')
def remove_widgets():
global total_bal, error_msg
if total_bal:
total_bal.destroy()
total_bal = None
if error_msg:
error_msg.destroy()
error_msg = None
remove_pie_graph()
def submit():
remove_widgets()
try:
initial = float(initial_var.get())
monthly = float(monthly_var.get())
APY = float(APY_var.get())
years = int(years_var.get())
if initial < 0 or monthly < 0 or APY < 0 or years < 0:
raise ValueError
# Calculate the total balance
total, contribution_interest, total_monthly = calculate(initial, monthly, APY, years)
# Display the total balance
display_total_balance(total, contribution_interest, initial, total_monthly)
except ValueError:
# Display the error message
display_error_message()
def main():
# Label the questions
initial_question = tk.Label(m, text='Initial Deposit:', font=('Georgia', 20), anchor='n', bg="white")
monthly_question = tk.Label(m, text='Monthly Deposit:', font=('Georgia', 20), anchor='n', bg="white")
APY_question = tk.Label(m, text='APY:', font=('Georgia', 20), anchor='n', bg="white")
years_question = tk.Label(m, text='Years to calculate:', font=('Georgia', 20), anchor='n', bg="white")
# Place the questions
initial_question.place(x=8, y=170)
monthly_question.place(x=8, y=275)
APY_question.place(x=8, y=380)
years_question.place(x=8, y=485)
# Make the input box
initial_box = tk.Entry(m, textvariable=initial_var, width=20, font=('Arial 22'), borderwidth=2, highlightthickness=2)
monthly_box = tk.Entry(m, textvariable=monthly_var, width=20, font=('Arial 22'), borderwidth=2, highlightthickness=2)
APY_box = tk.Entry(m, textvariable=APY_var, width=20, font=('Arial 22'), borderwidth=2, highlightthickness=2)
years_box = tk.Entry(m, textvariable=years_var, width=20, font=('Arial 22'), borderwidth=2, highlightthickness=2)
# Place the input boxes
initial_box.place(x=10, y=220)
monthly_box.place(x=10, y=315)
APY_box.place(x=10, y=420)
years_box.place(x=10, y=525)
#Make and place the button
button = tk.Button(text="$Calculate$", width=12, height=5, bg="white", fg="green", font = ('Castellar 20 bold'), anchor = 'center', command = submit, borderwidth=0, highlightthickness=0)
button.place(x=10, y=600)
m.mainloop()
main()
| MaxC1880/HYSAcalculator | HYSAcalculator.py | HYSAcalculator.py | py | 6,918 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.Tk",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line... |
5469847519 | import os
import numpy as np
from datetime import datetime
import time
from Utils import _add_loss_summaries
from model import *
#from augmentation import pre_process_image
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 367
NUM_EXAMPLES_PER_EPOCH_FOR_TEST = 101
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 1
TEST_ITER = 200 # ceil(NUM_EXAMPLES_PER_EPOCH_FOR_TEST / TRAIN_BATCH_SIZE)
# =========== This function converts prediction to image ===========================
def color_image(image, num_classes=11):
import matplotlib as mpl
import matplotlib.cm
norm = mpl.colors.Normalize(vmin=0., vmax=num_classes)
mycm = mpl.cm.get_cmap('Set1')
return mycm(norm(image))
def train(total_loss, global_step):
""" fix lr """
lr = INITIAL_LEARNING_RATE
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def training():
# should be changed if your model stored by different convention
startstep = 801 #if not is_finetune else int(FLAGS.finetune.split('-')[-1])
image_filenames, label_filenames = get_filename_list(path_train)
val_image_filenames, val_label_filenames = get_filename_list(path_val)
with tf.Graph().as_default():
train_data_node = tf.placeholder( tf.float32, shape=[TRAIN_BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH])
train_labels_node = tf.placeholder(tf.int64, shape=[TRAIN_BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
phase_train = tf.placeholder(tf.bool, name='phase_train')
global_step = tf.Variable(0, trainable=False)
# For CamVid
images, labels = CamVidInputs(image_filenames, label_filenames, TRAIN_BATCH_SIZE)
print ('Camvid:', images, '===000===', labels)
val_images, val_labels = CamVidInputs(val_image_filenames, val_label_filenames, TRAIN_BATCH_SIZE)
# Build a Graph that computes the logits predictions from the inference model.
loss, eval_prediction = inference(train_data_node, train_labels_node, TRAIN_BATCH_SIZE, phase_train)
# Build a Graph that trains the model with one batch of examples and updates the model parameters.
train_op = train(loss, global_step)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
# Build an initialization operation to run below.
try:
print("Trying to restore last checkpoint from ", path_ckpt, " ...")
# Use TensorFlow to find the latest checkpoint - if any.
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=path_ckpt)
print ('last chkr point:', last_chk_path)
# Try and load the data in the checkpoint.
saver.restore(sess, save_path=last_chk_path)
# If we get to this point, the checkpoint was successfully loaded.
print("Restored checkpoint from:", last_chk_path)
except:
# If the above failed for some reason, simply
# initialize all the variables for the TensorFlow graph.
print("Failed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Summery placeholders
summary_writer = tf.summary.FileWriter(path_train, sess.graph)
average_pl = tf.placeholder(tf.float32)
acc_pl = tf.placeholder(tf.float32)
iu_pl = tf.placeholder(tf.float32)
average_summary = tf.summary.scalar("test_average_loss", average_pl)
acc_summary = tf.summary.scalar("test_accuracy", acc_pl)
iu_summary = tf.summary.scalar("Mean_IU", iu_pl)
for step in range(train_iteration):
image_batch ,label_batch = sess.run([images, labels])
# since we still use mini-batches in validation, still set bn-layer phase_train = True
#print ('Batch:', image_batch, ' ----0000---', label_batch)
#image_batch_a = pre_process_image (image_batch, True)
feed_dict = {
train_data_node: image_batch,
train_labels_node: label_batch,
phase_train: True
}
start_time = time.time()
#print ('Step:', step)
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if (step<50):
print ('Step:',step)
if step % 100 == 0:
num_examples_per_step = TRAIN_BATCH_SIZE
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# eval current training batch pre-class accuracy
pred = sess.run(eval_prediction, feed_dict=feed_dict)
per_class_acc(pred, label_batch)
if step % val_iter == 0:
print("start validating.....")
total_val_loss = 0.0
hist = np.zeros((NUM_CLASSES, NUM_CLASSES))
for test_step in range(TEST_ITER):
val_images_batch, val_labels_batch = sess.run([val_images, val_labels])
_val_loss, _val_pred = sess.run([loss, eval_prediction], feed_dict={
train_data_node: val_images_batch,
train_labels_node: val_labels_batch,
phase_train: True
})
total_val_loss += _val_loss
hist += get_hist(_val_pred, val_labels_batch)
print("val loss: ", total_val_loss / TEST_ITER)
acc_total = np.diag(hist).sum() / hist.sum()
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
test_summary_str = sess.run(average_summary, feed_dict={average_pl: total_val_loss / TEST_ITER})
acc_summary_str = sess.run(acc_summary, feed_dict={acc_pl: acc_total})
iu_summary_str = sess.run(iu_summary, feed_dict={iu_pl: np.nanmean(iu)})
print_hist_summery(hist)
print(" end validating.... ")
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.add_summary(test_summary_str, step)
summary_writer.add_summary(acc_summary_str, step)
summary_writer.add_summary(iu_summary_str, step)
# Save the model checkpoint periodically.
if step % save_model_itr == 0 or (step + 1) == train_iteration:
checkpoint_path = os.path.join(path_ckpt, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
coord.request_stop()
coord.join(threads)
# --------------------------------------------------------
training()
| mohbattharani/Segmentation_ | SegNet/train.py | train.py | py | 7,704 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 21,
"usage_type": "call"
},
{
"api_nam... |
19580309816 | import pytest
from torch.optim import RMSprop as _RMSprop
from neuralpy.optimizer import RMSprop
@pytest.mark.parametrize(
"learning_rate, alpha, eps, weight_decay, momentum, centered",
[
(-6, 0.001, 0.001, 0.001, 0.001, False),
(False, 0.001, 0.001, 0.001, 0.001, False),
("invalid", 0.001, 0.001, 0.001, 0.001, False),
(0.0, False, 0.001, 0.001, 0.001, False),
(0.001, False, 0.001, 0.001, 0.001, False),
(0.001, "", 0.001, 0.001, 0.001, False),
(0.001, 0.001, False, 0.001, 0.001, False),
(0.001, 0.001, -6, 0.001, 0.001, False),
(0.001, 0.001, 0.2, True, 0.001, False),
(0.001, 0.001, 0.2, "", 0.001, False),
(0.001, 0.001, 0.2, 0.32, False, False),
(0.001, 0.001, 0.2, 0.32, "invalid", False),
(0.001, 0.001, 0.2, 0.32, 0.32, 3),
(0.001, 0.001, 0.2, 0.32, 0.32, "invalid"),
],
)
def test_rmsprop_should_throw_value_error(
learning_rate, alpha, eps, weight_decay, momentum, centered
):
with pytest.raises(ValueError):
RMSprop(
learning_rate=learning_rate,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
# Possible values that are valid
learning_rates = [0.001, 0.1]
alphas = [0.2, 1.0]
epses = [0.2, 1.0]
momentums = [0.32]
weight_decays = [0.32]
centeredes = [False, True]
@pytest.mark.parametrize(
"learning_rate, alpha, eps, weight_decay, momentum, centered",
[
(learning_rate, alpha, eps, weight_decay, momentum, centered)
for learning_rate in learning_rates
for alpha in alphas
for eps in epses
for weight_decay in weight_decays
for momentum in momentums
for centered in centeredes
],
)
def test_rmsprop_get_layer_method(
learning_rate, alpha, eps, weight_decay, momentum, centered
):
x = RMSprop(
learning_rate=learning_rate,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _RMSprop) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == learning_rate
assert details["keyword_arguments"]["alpha"] == alpha
assert details["keyword_arguments"]["eps"] == eps
assert details["keyword_arguments"]["momentum"] == momentum
assert details["keyword_arguments"]["weight_decay"] == weight_decay
assert details["keyword_arguments"]["centered"] == centered
def test_rmsprop_get_layer_method_without_parameter():
x = RMSprop()
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _RMSprop) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == 0.001
assert details["keyword_arguments"]["alpha"] == 0.99
assert details["keyword_arguments"]["eps"] == 1e-08
assert details["keyword_arguments"]["momentum"] == 0.0
assert details["keyword_arguments"]["weight_decay"] == 0.0
assert details["keyword_arguments"]["centered"] is False
| imdeepmind/NeuralPy | tests/neuralpy/optimizer/test_rmsprop.py | test_rmsprop.py | py | 3,352 | python | en | code | 78 | github-code | 6 | [
{
"api_name": "pytest.raises",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "neuralpy.optimizer.RMSprop",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytes... |
38470272604 | import collections
def flatten_path(nested, parent_key=()):
items = []
for k, v in nested.items():
new_key = parent_key + (k,)
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_path(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def flatten(nested, sep='.'):
return {sep.join(k): v for k, v in flatten_path(nested).items()}
| BRGM/inept | inept/utils.py | utils.py | py | 439 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.abc",
"line_number": 8,
"usage_type": "attribute"
}
] |
35945080718 | import json
import csv
filename = 'data/predictions/test_prediction_RD_15_0.00003_4_finnum_5_bertuncase.csv'
j = 0
predictions = []
with open(filename, 'r') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
j += 1
if j == 1: continue
new_row = []
new_row += [row[0]]
new_row += [row[1].replace('[', '').replace(']', '').split(",")]
for i, i_number in enumerate(new_row[1]):
try:
new_row[1][i] = int(i_number)
except:
new_row[1][i] = new_row[1][i].replace("'","").replace("'","").replace(" ","")
new_row += [row[2].replace('[', '').replace(']', '').split(",")]
for i, i_number in enumerate(new_row[2]):
try:
new_row[2][i] = int(i_number)
except:
new_row[2][i] = new_row[2][i].replace("'","").replace("'","").replace(" ","")
print(new_row)
predictions += [new_row]
with open('data/predictions/test_prediction_RD_15_0.00003_4_finnum_5_bertuncase.json','w') as f:
json.dump(predictions, f)
| MikeDoes/ETH_NLP_Project | predictions_to_json.py | predictions_to_json.py | py | 1,144 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.reader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 36,
"usage_type": "call"
}
] |
43348614388 | from scrabzl import Word, Dictionary
import unicodedata
def strip_accents(text):
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def no_special_chars(word):
ret = "'" not in word
ret = ret and ' ' not in word
ret = ret and '.' not in word
ret = ret and '-' not in word
return ret
def create_dictionaries(dictionary_path, max_word_length, language):
words = []
with open(dictionary_path, 'r') as f:
for word in f.readlines():
word = strip_accents(word).upper().strip()
if (
len(word) > 1 and len(word) <= max_word_length and
no_special_chars(word)
):
words.append(Word(word))
words = tuple(sorted(set(words)))
dictionary = Dictionary(words)
dictionary.dump(language=language)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Create dictionaries.')
parser.add_argument('dictionary_path', metavar='dictionary-path', type=str,
help='Path to a dictionary txt file containing one word per line')
parser.add_argument('dictionary_name', metavar='dictionary-name', type=str,
help='Name of the dictionary')
parser.add_argument('--max-word-length', type=int, default=7,
help='Maximum word length of the words in the dictionary (default: 7)')
args = parser.parse_args()
create_dictionaries(args.dictionary_path, args.max_word_length, args.dictionary_name) | charleswilmot/scrabzl | src/create_dictionary.py | create_dictionary.py | py | 1,733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unicodedata.normalize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "scrabzl.Word",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scrabzl.Dictionary",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "argparse.Argume... |
36942726510 | from PIL import Image
myImg = Image.open('Image1.jpg')
newImg = myImg.convert('L')
print("Do you want your ", myImg, "converted to GRY?")
print("Type: y or n")
answer = str(input("y or n?: "))
if answer == "y":
newImg.show()
newImg.save('Image1_Grayscale.jpg')
if answer == "n":
myImg.show()
| Sir-Lance/CS1400 | EX7-3.py | EX7-3.py | py | 304 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 2,
"usage_type": "name"
}
] |
13231283002 | """
@Author : Hirsi
@ Time : 2020/7/3
"""
"""
思路(线程池)
1.定义变量,保存源文件夹,目标文件夹所在的路径
2.在目标路径创建新的文件夹
3.获取源文件夹中所有的文件(列表)
4.便利列表,得到所有的文件名
5.定义函数,进行文件拷贝
文件拷贝函数 参数(源文件夹路径,目标文件夹路径,文件名)
1.拼接源文件和目标文件的具体路径
2.打开源文件,创建目标文件
3.读取源文件的内容,写入到目标文件中(while)
"""
import os
import multiprocessing
import time
# 5.定义函数,进行文件拷贝
def copy_work(source_dir,dest_dir,file_name):
print(multiprocessing.current_process().name)
# 1.拼接源文件和目标文件的具体路径,打开源文件,创建目标文件
source_path=source_dir+'/'+file_name
dest_path=dest_dir+'/'+file_name
# 3.读取源文件的内容,写入到目标文件中(while)
with open(source_path,'rb') as source_file:
with open(dest_path,'wb') as dest_file:
while True:
read_data = source_file.read(1024)
if read_data:
dest_file.write(read_data)
time.sleep(0.5)
else:
break
if __name__ == '__main__':
# 1.定义变量,保存源文件夹,目标文件夹所在的路径
source_dir='./test'
dest_dir='/home/hirsi/桌面/test'
# 2.在目标路径创建新的文件夹
try:
os.mkdir(dest_dir)
except:
print('文件已存在!')
# 3.获取源文件夹中所有的文件(列表)
file_list = os.listdir(source_dir)
# ***创建进程池
pool = multiprocessing.Pool(3)
# 4.遍历列表,得到所有的文件名
for file_name in file_list:
# 单进程
# copy_work(source_dir,dest_dir,file_name)
pool.apply_async(copy_work,(source_dir,dest_dir,file_name))
# 不再接受新的任务
pool.close()
# 让主进程等待进程池结束后再退出
pool.join()
print('复制完成!') | gitHirsi/PythonNotes02 | day07-多任务-进程/10-文件夹拷贝器_多进程版.py | 10-文件夹拷贝器_多进程版.py | py | 2,139 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "multiprocessing.current_process",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.listdir",
... |
23706350056 | #!/usr/bin/env python
"""Plot sky positions onto an Aitoff map of the sky.
Usage:
%s <filename>... [--racol=<racol>] [--deccol=<deccol>] [--mjdcol=<mjdcol>] [--filtercol=<filtercol>] [--expnamecol=<expnamecol>] [--commentcol=<commentcol>] [--usepatches] [--alpha=<alpha>] [--outfile=<outfile>] [--tight] [--delimiter=<delimiter>] [--pointsize=<pointsize>]
%s (-h | --help)
%s --version
Options:
-h --help Show this screen.
--version Show version.
--racol=<racol> Column that represents RA. [default: ra]
--deccol=<deccol> Column that represents declination. [default: dec]
--mjdcol=<mjdcol> Column that represents MJD. [default: mjd]
--filtercol=<filtercol> Column that represents filter. [default: filter]
--expnamecol=<expnamecol> Column that represents exposure name.
--commentcol=<commentcol> Column that represents a comment (e.g. a survey comment, for Pan-STARRS).
--usepatches Plot patches (defined shapes), not points, e.g. ATLAS square footprints or Pan-STARRS circles mapped onto the sky.
--outfile=<outfile> Output file.
--alpha=<alpha> Transparency. [default: 0.1]
--tight Tight layout.
--delimiter=<delimiter> Delimiter to use [default: \t]
--pointsize=<pointsize> Point size [default: 0.1]
E.g.:
%s ~/atlas/dophot/small_area_fields_subset.txt --alpha=0.1 --usepatches --outfile=/tmp/test.png
"""
import sys
__doc__ = __doc__ % (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0])
from docopt import docopt
from gkutils.commonutils import Struct, readGenericDataFile, cleanOptions, sexToDec, getMJDFromSqlDate, GalactictoJ2000, EcliptictoJ2000, getDateFromMJD, transform
import csv
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import colors
import matplotlib.patches as patches
import math
# ###########################################################################################
# Main program
# ###########################################################################################
# Colors as defined in lightcurve.js
colors = ["#6A5ACD", #SlateBlue
"#008000", #Green
"#DAA520", #GoldenRod
"#A0522D", #Sienna
"#FF69B4", #HotPink
"#DC143C", #Crimson
"#008B8B", #DarkCyan
"#FF8C00", #Darkorange
"#FFD700", #Gold
"#0000FF", #Blue
"#4B0082", #Indigo
"#800080", #Purple
"#A52A2A", #Brown
"#DB7093", #PaleVioletRed
"#708090", #SlateGray
"#800000", #Maroon
"#B22222", #FireBrick
"#9ACD32", #YellowGreen
"#FA8072", #Salmon
"#000000"]; #Black
def doPlot(options, objects, plotNumber = 111, alpha = 0.2, minMJD = 0.0, maxMJD = 60000.0, usePatches = False):
gx = []
gy = []
rx = []
ry = []
ix = []
iy = []
zx = []
zy = []
yx = []
yy = []
wx = []
wy = []
cx = []
cy = []
ox = []
oy = []
for row in objects:
try:
ra = float(row[options.racol])
except ValueError as e:
ra = sexToDec(row[options.racol], ra=True)
try:
dec = float(row[options.deccol])
except ValueError as e:
dec = sexToDec(row[options.deccol], ra=False)
if ra > 180.0:
ra = 360.0 - ra
else:
ra = (-1.0) * ra
try:
mjd = float(row[options.mjdcol])
# Maybe we got JD, not MJD - check.
if mjd > 2400000.5:
mjd = mjd - 2400000.5
except ValueError as e:
mjd = getMJDFromSqlDate(row[options.mjdcol])
#if dec > -9.0 and dec < -8.0:
#if mjd > 57053: # January 31st
#if mjd > 57174: # June 1st
if mjd is not None and mjd > minMJD and mjd < maxMJD:
if row[options.filtercol][0] == 'g':
gx.append(ra)
gy.append(dec)
elif row[options.filtercol][0] == 'r':
rx.append(ra)
ry.append(dec)
elif row[options.filtercol][0] == 'i':
ix.append(ra)
iy.append(dec)
elif row[options.filtercol][0] == 'z':
zx.append(ra)
zy.append(dec)
elif row[options.filtercol][0] == 'y':
yx.append(ra)
yy.append(dec)
elif row[options.filtercol][0] == 'w':
wx.append(ra)
wy.append(dec)
elif row[options.filtercol][0] == 'c':
cx.append(ra)
cy.append(dec)
elif row[options.filtercol][0] == 'o':
ox.append(ra)
oy.append(dec)
#print (row[options.racol], row[options.deccol], row[options.expnamecol], row[options.commentcol], row[options.filtercol])
degtorad = math.pi/180.
gx = np.array(gx) * degtorad
gy = np.array(gy) * degtorad
rx = np.array(rx) * degtorad
ry = np.array(ry) * degtorad
ix = np.array(ix) * degtorad
iy = np.array(iy) * degtorad
zx = np.array(zx) * degtorad
zy = np.array(zy) * degtorad
yx = np.array(yx) * degtorad
yy = np.array(yy) * degtorad
wx = np.array(wx) * degtorad
wy = np.array(wy) * degtorad
cx = np.array(cx) * degtorad
cy = np.array(cy) * degtorad
ox = np.array(ox) * degtorad
oy = np.array(oy) * degtorad
fig2 = pl.figure(2)
ax1 = fig2.add_subplot(plotNumber, projection="hammer")
s = 5.4 * degtorad
r = 1.4 * degtorad
if usePatches:
# Square exposures for ATLAS, circular ones for PS1
for x,y in zip(gx,gy):
ax1.add_patch(patches.Circle((x, y), r, color=colors[0], alpha = float(options.alpha)))
for x,y in zip(rx,ry):
ax1.add_patch(patches.Circle((x, y), r, color=colors[1], alpha = float(options.alpha)))
for x,y in zip(ix,iy):
ax1.add_patch(patches.Circle((x, y), r, color=colors[2], alpha = float(options.alpha)))
for x,y in zip(zx,zy):
ax1.add_patch(patches.Circle((x, y), r, color=colors[3], alpha = float(options.alpha)))
for x,y in zip(yx,yy):
ax1.add_patch(patches.Circle((x, y), r, color=colors[4], alpha = float(options.alpha)))
for x,y in zip(wx,wy):
ax1.add_patch(patches.Circle((x, y), r, color=colors[5], alpha = float(options.alpha)))
for x,y in zip(cx,cy):
ax1.add_patch(patches.Rectangle((x-s/2.0, y-s/2.0), s/math.cos(y), s, color=colors[6], alpha = float(options.alpha)))
for x,y in zip(ox,oy):
ax1.add_patch(patches.Rectangle((x-s/2.0, y-s/2.0), s/math.cos(y), s, color=colors[7], alpha = float(options.alpha)))
else:
ax1.scatter(gx,gy, alpha=float(options.alpha), edgecolors='none', color=colors[0], s = float(options.pointsize))
ax1.scatter(rx,ry, alpha=float(options.alpha), edgecolors='none', color=colors[1], s = float(options.pointsize))
ax1.scatter(ix,iy, alpha=float(options.alpha), edgecolors='none', color=colors[2], s = float(options.pointsize))
ax1.scatter(zx,zy, alpha=float(options.alpha), edgecolors='none', color=colors[3], s = float(options.pointsize))
ax1.scatter(yx,yy, alpha=float(options.alpha), edgecolors='none', color=colors[4], s = float(options.pointsize))
ax1.scatter(wx,wy, alpha=float(options.alpha), edgecolors='none', color=colors[5], s = float(options.pointsize))
ax1.scatter(cx,cy, alpha=float(options.alpha), edgecolors='none', color=colors[6], s = float(options.pointsize))
ax1.scatter(ox,oy, alpha=float(options.alpha), edgecolors='none', color=colors[7], s = float(options.pointsize))
gleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[0])
rleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[1])
ileg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[2])
zleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[3])
yleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[4])
wleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[5])
cleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[6])
oleg = ax1.scatter(-10,-10, alpha=1.0, edgecolors='none', color=colors[7])
#leg = ax1.legend(loc='upper right', scatterpoints = 1, prop = {'size':6})
#leg = ax1.legend([gleg, rleg, ileg, zleg], ['g', 'r', 'i', 'z'], loc='upper right', scatterpoints = 1, prop = {'size':6})
#leg = ax1.legend([gleg, rleg, ileg, zleg, yleg], ['g', 'r', 'i', 'z', 'y'], loc='upper right', scatterpoints = 1, prop = {'size':6})
#leg = ax1.legend([gleg, rleg, ileg, zleg, yleg, wleg], ['g', 'r', 'i', 'z', 'y', 'w'], loc='upper right', scatterpoints = 1, prop = {'size':4})
#leg = ax1.legend([gleg, rleg, ileg, zleg, yleg, wleg, cleg, oleg], ['g', 'r', 'i', 'z', 'y', 'w', 'c', 'o'], loc='upper right', scatterpoints = 1, prop = {'size':4})
#leg = ax1.legend([cleg, oleg], ['c', 'o'], loc='upper right', scatterpoints = 1, prop = {'size':4})
#leg.get_frame().set_linewidth(0.0)
#leg.get_frame().set_alpha(0.0)
ax1.plot([-math.pi, math.pi], [0,0],'r-')
ax1.plot([0,0],[-math.pi, math.pi], 'r-')
labels = ['10h', '8h', '6h', '4h', '2h', '0h', '22h', '20h', '18h', '16h', '14h']
ax1.axes.xaxis.set_ticklabels(labels)
# Plot the galactic plane
gp = []
for l in range(0, 36000, 1):
equatorialCoords = transform([l/100.0, 0.0], GalactictoJ2000)
gp.append(equatorialCoords)
ras = []
decs = []
for row in gp:
ra, dec = row
if ra > 180.0:
ra = 360.0 - ra
else:
ra = (-1.0) * ra
ras.append(ra)
decs.append(dec)
ras = np.array(ras) * degtorad
decs = np.array(decs) * degtorad
ax1.plot(ras,decs, 'k.', markersize=1.0)
# Plot the ecliptic plane
ep = []
for l in range(0, 36000, 1):
equatorialCoords = transform([l/100.0, 0.0], EcliptictoJ2000)
ep.append(equatorialCoords)
ras = []
decs = []
for row in ep:
ra, dec = row
if ra > 180.0:
ra = 360.0 - ra
else:
ra = (-1.0) * ra
ras.append(ra)
decs.append(dec)
ras = np.array(ras) * degtorad
decs = np.array(decs) * degtorad
ax1.plot(ras,decs, 'b.', markersize=1.0)
#ax1.axes.yaxis.set_ticklabels([])
# plot celestial equator
#ax1.plot(longitude2,latitude2,'g-')
#for i in range(0,6):
# ax1.text(xrad[i], yrad[i], lab[i])
#pl.title("%s" % getDateFromMJD(maxMJD).split(' ')[0], color='b', fontsize=12)
pl.grid(True)
return pl
def plotHammerProjection(options, filename, objects, alpha = 0.2, minMJD = 0.0, maxMJD = 60000.0, usePatches = False):
print (maxMJD -1, maxMJD)
# pl = doPlot(options, objects, plotNumber = 212, alpha = alpha, minMJD = maxMJD - 1, maxMJD = maxMJD)
pl = doPlot(options, objects, plotNumber = 111, alpha = alpha, minMJD = minMJD, maxMJD = maxMJD, usePatches = usePatches)
#pl = doPlot(options, objects, plotNumber = 212, alpha = alpha, minMJD = 57168, maxMJD = 57169)
if options.tight:
pl.tight_layout()
if options.outfile:
pl.savefig(options.outfile, dpi=600)
pl.clf()
else:
pl.show()
#pl.savefig(filename + '_%s' % str(maxMJD) + '.png', dpi=600)
def doStats(options, filename, objects):
"""Do some stats on the list of objects - e.g. How many occurrences of something"""
from collections import Counter
mjds = []
fp = {}
for row in objects:
try:
mjd = float(row['mjd'])
except ValueError as e:
mjd = getMJDFromSqlDate(row['mjd'])
wholeMJD = int(mjd)
mjds.append(wholeMJD)
try:
fp[wholeMJD].append(row[options.expnamecol])
except KeyError as e:
fp[wholeMJD] = [row[options.expnamecol]]
# Count the number of exposures per night
mjdFrequency = Counter(mjds)
for k,v in mjdFrequency.items():
print (k,v)
print ()
# Now count the frequency of fpa_object per night. This will tell us how much
# sky is ACTUALLY covered each night.
for k,v in fp.items():
footprintFrequency = Counter(fp[k])
print (k, len(footprintFrequency))
def main(argv = None):
opts = docopt(__doc__, version='0.1')
opts = cleanOptions(opts)
options = Struct(**opts)
# maxMJD = 57169 = 27th May 2015. GPC1 out of sync after that.
# minMJD = 57053 = 31st January 2015.
# minMJD = 56991 = 30th November 2014 - when we restarted the survey
# plotHammerProjection(options, filename, objectsList, alpha=0.7, minMJD = 57032.0, maxMJD = 57169.0)
# plotHammerProjection(options, filename, objectsList, alpha=0.2, minMJD = 56991.0, maxMJD = 57169.0)
# plotHammerProjection(options, filename, objectsList, alpha=0.7, minMJD = 0.0, maxMJD = 57169.0)
#for mjd in range(55230, 57169):
# plotHammerProjection(options, filename, objectsList, alpha=0.2, minMJD = 55229, maxMJD = mjd)
# For object plots min MJD is 56444 and (current) max is 57410
# for mjd in range(56443, 57411):
# plotHammerProjection(options, filename, objectsList, alpha=0.4, minMJD = 56443, maxMJD = mjd)
# 2016-06-23 KWS Added code to use "patches" to plot accurate ATLAS and PS1 footprints. We don't
# want to use patches if we are plotting object locations.
# jan01 = 57388
# feb01 = 57419
# mar01 = 57448
# apr01 = 57479
# may01 = 57509
# jun01 = 57540
# jul01 = 57570
# aug01 = 57601
# sep01 = 57632
# oct01 = 57662
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = jan01, maxMJD = feb01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = feb01, maxMJD = mar01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = mar01, maxMJD = apr01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = apr01, maxMJD = may01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = may01, maxMJD = jun01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = jun01, maxMJD = jul01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = jul01, maxMJD = aug01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = aug01, maxMJD = sep01)
# plotHammerProjection(options, filename, objectsList, alpha=0.02, usePatches = True, minMJD = sep01, maxMJD = oct01)
#alpha = 0.002
for filename in options.filename:
objectsList = readGenericDataFile(filename, delimiter=options.delimiter)
plotHammerProjection(options, filename, objectsList, alpha=float(options.alpha), usePatches = options.usepatches)
#doStats(options, filename, objectsList)
if __name__ == '__main__':
main()
| genghisken/gkplot | gkplot/scripts/skyplot.py | skyplot.py | py | 15,360 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "gkutils.commonutils.sexToDec",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "gkutils.... |
27773482180 | import threading
from sqlalchemy import Column, UnicodeText, Integer
from telepyrobot.db import BASE, SESSION
from telepyrobot.utils.msg_types import Types
class Notes(BASE):
__tablename__ = "self_notes"
user_id = Column(Integer, primary_key=True)
name = Column(UnicodeText, primary_key=True)
value = Column(UnicodeText, nullable=False)
msgtype = Column(Integer, default=Types.TEXT)
file_id = Column(UnicodeText)
file_ref = Column(UnicodeText)
def __init__(self, user_id, name, value, msgtype, file_id, file_ref):
"""initializing db"""
self.user_id = user_id
self.name = name
self.value = value
self.msgtype = msgtype
self.file_id = file_id
self.file_ref = file_ref
def __repr__(self):
"""get db message"""
return f"<Note {self.name}>"
Notes.__table__.create(checkfirst=True)
INSERTION_LOCK = threading.RLock()
SELF_NOTES = {}
# Types of message
# TEXT = 1
# DOCUMENT = 2
# PHOTO = 3
# VIDEO = 4
# STICKER = 5
# AUDIO = 6
# VOICE = 7
# VIDEO_NOTE = 8
# ANIMATION = 9
# ANIMATED_STICKER = 10
# CONTACT = 11
def save_note(user_id, note_name, note_data, msgtype, file_id=None, file_ref=None):
global SELF_NOTES
with INSERTION_LOCK:
prev = SESSION.query(Notes).get((user_id, note_name))
if prev:
SESSION.delete(prev)
note = Notes(
user_id,
note_name,
note_data,
msgtype=int(msgtype),
file_id=file_id,
file_ref=file_ref,
)
SESSION.add(note)
SESSION.commit()
if not SELF_NOTES.get(user_id):
SELF_NOTES[user_id] = {}
SELF_NOTES[user_id][note_name] = {
"value": note_data,
"type": msgtype,
"file_id": file_id,
"file_ref": file_ref,
}
def get_note(user_id, note_name):
if not SELF_NOTES.get(user_id):
SELF_NOTES[user_id] = {}
return SELF_NOTES[user_id].get(note_name)
def get_all_notes(user_id):
if not SELF_NOTES.get(user_id):
SELF_NOTES[user_id] = {}
return None
allnotes = list(SELF_NOTES[user_id])
allnotes.sort()
return allnotes
def get_num_notes(user_id):
try:
num_notes = SESSION.query(Notes).count()
return num_notes
finally:
SESSION.close()
def rm_note(user_id, note_name):
global SELF_NOTES
with INSERTION_LOCK:
note = SESSION.query(Notes).get((user_id, note_name))
if note:
SESSION.delete(note)
SESSION.commit()
SELF_NOTES[user_id].pop(note_name)
return True
else:
SESSION.close()
return False
def __load_all_notes():
global SELF_NOTES
getall = SESSION.query(Notes).distinct().all()
for x in getall:
if not SELF_NOTES.get(x.user_id):
SELF_NOTES[x.user_id] = {}
SELF_NOTES[x.user_id][x.name] = {
"value": x.value,
"type": x.msgtype,
"file_id": x.file_id,
"file_ref": x.file_ref,
}
__load_all_notes()
| Divkix/TelePyroBot | telepyrobot/db/notes_db.py | notes_db.py | py | 3,140 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "telepyrobot.db.BASE",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.... |
36339047472 | import csv
from datetime import datetime
import random
Header=["Time","Sample number","Temperature","Humidity","Sensor response", "PM response", "Temperature MFC"]
dataLine=["","","","","","",""]
with open('main.csv','w') as main:
csv_writer=csv.writer(main, delimiter=",")
csv_writer.writerow(Header)
#csv_writer.writerow(lined)
i=0
while i < 10000:
dataLine[0]=datetime.now()
dataLine[1]=i
dataLine[2]=random.randint(0, 40)
dataLine[3]=random.randint(15, 90)
dataLine[4]=random.randint(0, 100)
dataLine[5]=random.randint(0, 100)
dataLine[6]=random.randint(0, 40)
csv_writer.writerow(dataLine)
i=i+1
#with open('main.csv','r') as main:
# csv_reader=csv
#for ligne in csv_writer:
# print(ligne)
| Virgile-Colrat/YFA-Project_python_interface | Sources/testcs.py | testcs.py | py | 723 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.writer",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "random.randint",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.