index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,800 | 45159cd361a67881971278bdc73e66556733f52e | import Validate
import app
class Read:
def __init__(self, cnx):
self.cnx = cnx
def delegate(self):
print("What do you want to read? Please chose from the following options, or press Q to go back:")
print("Table, Portfolios by Sector, Investments by VC Firm, Investments by GE Firm,"
" Partners at Firm, Firms in Location")
user_in = input("Read: ")
if user_in.lower() == "q":
return app.main(self.cnx)
v = Validate.Validate(self.cnx)
while not v.validateInput(user_in, ["Table", "Portfolios by Sector", "Investments by VC Firm",
"Investments by GE Firm", "Partners at Firm",
"Firms in Location"]):
print("Invalid Input Please try again with the options below, or enter Q to go back")
print("Table, Portfolios by Sector, Investments by VC Firm, Investments by GE Firm,"
" Partners at Firm, Firms in Location")
user_in = input("Create: ")
if user_in.lower() == "q":
return app.main(self.cnx)
user_in = user_in.lower()
if user_in == "table":
return self.readTable()
elif user_in == "portfolios by sector":
return self.getSectorPortfolioCo()
elif user_in == "investments by vc firm":
return self.getInvestmentsByVCFirm()
elif user_in == "investments by ge firm":
return self.getInvestmentsByGEFirm()
elif user_in == "partners at firm":
return self.getPartnersForVCFirm()
elif user_in == "firms in location":
return self.getFirmsInLocation()
def readTable(self):
print("Which table would you like to read, chose from the list below or press Q to go back to home page")
print("location, growth_equity_firms, vc_firms, portfolio_companies, sector, investment, partners")
user_in = input("Table: ")
v = Validate.Validate(self.cnx)
if user_in.lower() == "q":
return
while not v.validateInput(user_in, ["location", "growth_equity_firms", "vc_firms", "portfolio_companies",
"sector", "investment", "partners"]):
print("Invalid Input try again, chose from the list below or press Q to go back to home page")
print("location, growth_equity_firms, vc_firms, portfolio_companies, sector, investment, partners")
user_in = input("Table: ")
if user_in.lower() == "q":
return
c = self.cnx.cursor()
sql = "select * from " + user_in
c.execute(sql)
for row in c.fetchall():
print(row)
print("\n")
c.close()
def getSectorPortfolioCo(self):
c = self.cnx.cursor()
print("Select one of the following Sector Industries: ")
sql = "select distinct industry from sector"
c.execute(sql)
s = ""
for row in c.fetchall():
s = s + row['industry'] + ", "
print(s[:-2])
arr = self.validateReadInputValue("Sector Industry: ", "sector", "industry",
"Industry doesn't exist try again or press Q to return to home")
if not arr:
return
else:
c.callproc('getSectorPortfolioCo', {arr[0]})
for row in c.fetchall():
print(row)
print("\n")
c.close()
def getInvestmentsByVCFirm(self):
c = self.cnx.cursor()
print("Select one of the following VC Firms: ")
sql = "select name from vc_firms"
c.execute(sql)
s = ""
for row in c.fetchall():
s = s + row['name'] + ", "
print(s[:-2])
arr = self.validateReadInputValue("VC Firm Name: ", "vc_firms", "name",
"Firm doesn't exist try again or press Q to return to home")
if not arr:
return
else:
c.callproc('getInvestmentByVCFirm', {arr[0]})
for row in c.fetchall():
print(row)
print("\n")
c.close()
def getInvestmentsByGEFirm(self):
c = self.cnx.cursor()
print("Select one of the following GE Firms: ")
sql = "select name from growth_equity_firms"
c.execute(sql)
s = ""
for row in c.fetchall():
s = s + row['name'] + ", "
print(s[:-2])
arr = self.validateReadInputValue("GE Firm Name: ", "growth_equity_firms", "name",
"Firm doesn't exist try again or press Q to return to home")
if not arr:
return
else:
c.callproc('getInvestmentByGEFirm', {arr[0]})
for row in c.fetchall():
print(row)
print("\n")
c.close()
def getPartnersForVCFirm(self):
c = self.cnx.cursor()
print("Select one of the following VC Firms: ")
sql = "select name from vc_firms"
c.execute(sql)
s = ""
for row in c.fetchall():
s = s + row['name'] + ", "
print(s[:-2])
arr = self.validateReadInputValue("VC Firm Name: ", "vc_firms", "name",
"Firm doesn't exist try again or press Q to return to home")
if not arr:
return
else:
c.callproc('getPartnersForVCFirm', {arr[0]})
for row in c.fetchall():
print(row)
print("\n")
c.close()
def getFirmsInLocation(self):
c = self.cnx.cursor()
print("Select one of the following Locations: ")
sql = "select city from location"
c.execute(sql)
s = ""
for row in c.fetchall():
s = s + row['city'] + ", "
print(s[:-2])
arr = self.validateReadInputValue("City: ", "location", "city",
"Firm doesn't exist try again or press Q to return to home")
if not arr:
return
else:
c.callproc('getFirmsInLocation', {arr[0]})
for row in c.fetchall():
print(row)
print("\n")
c.close()
def validateReadInputValue(self, in1, table, column, message):
try:
identifier = input(in1)
v = Validate.Validate(self.cnx)
while not v.validateExists(table, column, identifier,
message):
identifier = input(in1)
if identifier.lower() == 'q':
return []
except ValueError:
print("Read failed, id was not valid")
return []
return [identifier]
|
22,801 | a8318a166d1f1060124480ef1f1f6683bf145884 | #!/usr/bin/env python3
# Script for message insertion
# Last Significant Bit steganography for RGB images
# by Vz, 2018
from PIL import Image
import sys
bits_in_byte = 8
last_bit_zero_mask = 254
last_bit_one_mask = 1
input_filenames = []
if len(sys.argv) > 1:
input_filenames = sys.argv[1:]
else:
input_filenames.append(input("original container file name: "))
for orig_filename in input_filenames:
try:
orig_image = Image.open(orig_filename)
except:
print("Error: error during opening the container", "ERR: " + orig_filename, sep = '\n')
continue
width, height = orig_image.size
new_filename = input("generated image file name: ")
if new_filename.find(".") == -1:
new_filename = new_filename + ".png"
rgb_ch = input("choose rgb channel (R/G/B): ")
while rgb_ch not in ('R', 'G', 'B'):
print("Error: wrong channel value")
rgb_ch = input("choose rgb channel (R/G/B): ")
message = input("message: ")
try:
message.encode("ascii")
except UnicodeEncodeError:
print("Error: message is not in ascii", "ERR: " + orig_filename, sep = '\n')
continue
if (len(message) * bits_in_byte > width * height):
print("Error: message is too big for this container", "ERR: " + orig_filename, sep = '\n')
continue
new_image = Image.new("RGB", (width, height))
ascii_codes = [ord(symbol) for symbol in list(message)]
message_bits = []
for code in ascii_codes:
bit_list = [int(bit) for bit in bin(code)[2:]]
bit_list = [0] * (bits_in_byte - len(bit_list)) + bit_list
message_bits += bit_list
mes_bits_counter = 0
channels = dict()
for x in range(width):
for y in range(height):
channels['R'], channels['G'], channels['B'] = orig_image.getpixel((x, y))
if mes_bits_counter < len(message_bits):
if message_bits[mes_bits_counter] == 0:
channels[rgb_ch] &= last_bit_zero_mask
else:
channels[rgb_ch] |= last_bit_one_mask
new_image.putpixel((x,y), (channels['R'], channels['G'], channels['B']))
mes_bits_counter += 1
try:
new_image.save(new_filename)
except:
print("Error: error during saving file", "ERR: " + orig_filename, sep = '\n')
continue
print("OK:", new_filename)
|
22,802 | 09e761763cad73a2fc2c7c2195e23bcc5454b80e | from scripts.dataset.dataset_generator import *
from scripts.processing.processing_not_streamlined import *
from scripts.processing.processing_streamlined import *
from scripts.training.training_not_streamlined import *
from scripts.training.training_streamlined import *
from scripts.training.shared_model_functions import convert_and_save_model
from scripts.arduino.arduino import *
# pad all strings to the same length so that the entries etc align (require equal spacing for every character)
def pad_to_max(string, end_txt = "", free = 0):
chars_to_add = 42 - len(string) - free
end_txt_length = len(end_txt)
padded_string = string
for _ in range(chars_to_add - end_txt_length):
padded_string += " "
padded_string += end_txt
return padded_string
def round_to_x(widget, x = 2):
value = widget.get()
if round(value, x) != value:
widget.set(round(value, x))
return widget.get()
class custom_cmd():
def __init__(self, logs):
self.logs = logs
def write(self, text):
self.logs.insert(tk.END, text)
self.logs.see(tk.END)
def flush(self):
try:
self.logs.delete("end-1c linestart", "end")
except:
pass
class main_app():
def __init__(self, root):
self.prev_data_path = None
self.data = None
self.thread = threading.Thread(target = send_to_arduino)
self.root = root
self.images = {"browse_button" : tk.PhotoImage(file = os.path.join(wd,"Images","browse_button.png")),
"next_button" : tk.PhotoImage(file = os.path.join(wd,"Images","next_button.png")),
"back_button" : tk.PhotoImage(file = os.path.join(wd,"Images","back_button.png")),
"globe_button" : tk.PhotoImage(file = os.path.join(wd,"Images","globe_button.png")),
"arduino_button" : tk.PhotoImage(file = os.path.join(wd,"Images","arduino_button.png")),
"home_button" : tk.PhotoImage(file = os.path.join(wd,"Images","home_button.png")),
"train_button" : tk.PhotoImage(file = os.path.join(wd,"Images","train_button.png")),
"save_button" : tk.PhotoImage(file = os.path.join(wd,"Images","save_button.png")),
"load_data" : tk.PhotoImage(file = os.path.join(wd,"Images","load_data.png")),
"process_data" : tk.PhotoImage(file = os.path.join(wd,"Images","process_data.png")),
"train_model" : tk.PhotoImage(file = os.path.join(wd,"Images","train_model.png")),
"upload_model" : tk.PhotoImage(file = os.path.join(wd,"Images","upload_model.png")),
"logo" : tk.PhotoImage(file = os.path.join(wd,"Images","logo.png")),
"logo_mac" : tk.PhotoImage(file = os.path.join(wd,"Images","logo_mac.png")),
"record_button" : tk.PhotoImage(file = os.path.join(wd,"Images","record_button.png"))}
if os.name == "nt":
ttk.Style(root).configure(".", background = background_color, foreground = text_color)
ttk.Style(root).configure("TEntry", foreground = "#000000")
ttk.Style(root).configure("TSpinbox", foreground = "#000000", selectbackground = "#f8f4f4", selectforeground = "#000000")
ttk.Style(root).configure("TLabelframe.Label", foreground = text_color)
ttk.Style(root).configure("menu.TFrame", background = menu_color)
ttk.Style(root).configure("menu.TLabel", background = menu_color)
ttk.Style(root).configure("menu.TButton", background = menu_color)
ttk.Style(root).configure("TButton", foreground = "#000000")
else:
ttk.Style(root).configure("menu.TFrame", background = "#ececec")
ttk.Style(root).configure("menu.TLabel", background = "#ececec")
ttk.Style(root).configure("menu.TButton", background = "#ececec")
#matplotlib.rcParams["axes.grid"] = False
if os.name == "nt":
matplotlib.rcParams["text.color"] = text_color
matplotlib.rcParams["axes.labelcolor"] = text_color
matplotlib.rcParams["xtick.color"] = text_color
matplotlib.rcParams["ytick.color"] = text_color
self.background_frame = ttk.Frame(root)
self.setup_menu_frame()
self.setup_home_page()
self.setup_data_page()
self.setup_processing_page()
self.setup_training_page()
self.setup_arduino_page()
self.background_frame.pack(expand = True, fill = tk.BOTH)
self.page_dict = {"home_page" : {"geometry" : home_window_size,
"widget" : self.home_page_frame},
"data_page" : {"geometry" : data_window_size,
"widget" : self.data_page_frame,
"menu_label" : "Select a dataset",
"back_button" : "Arduino",
"next_button" : "Processing"},
"processing_page" : {"geometry" : processing_window_size,
"widget" : self.processing_page_frame,
"menu_label" : "Select processing",
"back_button" : "Data",
"next_button" : "Training"},
"training_page" : {"geometry" : training_window_size,
"widget" : self.training_page_frame,
"menu_label" : "Train a model",
"back_button" : "Processing",
"next_button" : "Arduino"},
"arduino_page" : {"geometry" : arduino_window_size,
"widget" : self.arduino_page_frame,
"menu_label" : "Upload your model to Arduino",
"back_button" : "Training",
"next_button" : "Data"}}
self.prev_processing = {"processing_method" : None,
"sample_rate" : None,
"expected_duration" : None,
"window_size" : None,
"window_stride" : None}
def setup_menu_frame(self):
self.current_page = "home_page"
self.menu_frame = ttk.Frame(self.background_frame, style = "menu.TFrame")
self.menu_label = ttk.Label(self.menu_frame, text = "", style = "menu.TLabel")
self.menu_next_button = ttk.Button(self.menu_frame, image = self.images["next_button"], style = "menu.TButton", command = self.load_next_page, text = "", compound = tk.RIGHT)
self.menu_back_button = ttk.Button(self.menu_frame, image = self.images["back_button"], style = "menu.TButton", command = self.load_previous_page, text = "", compound = tk.LEFT)
self.menu_home_button = ttk.Button(self.menu_frame, image = self.images["home_button"], style = "menu.TButton", command = lambda : self.load_page("home_page"))
self.menu_frame.pack(expand = False, fill = tk.X, side = tk.TOP)
self.menu_label.pack(anchor = tk.CENTER)
# HOME PAGE
def setup_home_page(self):
self.home_page_frame = ttk.Frame(self.background_frame)
if os.name == "nt":
background_label = tk.Label(self.home_page_frame, image = self.images["logo"])
self.load_data_button = tk.Button(self.home_page_frame, activebackground = background_color, bd = 0, bg = button_color, relief = tk.FLAT, overrelief = tk.FLAT, image = self.images["load_data"], name = "data_page")
self.process_data_button = tk.Button(self.home_page_frame, activebackground = background_color, bd = 0, bg = button_color, relief = tk.FLAT, overrelief = tk.FLAT, image = self.images["process_data"], name = "processing_page")
self.train_model_button = tk.Button(self.home_page_frame, activebackground = background_color, bd = 0, bg = button_color, relief = tk.FLAT, overrelief = tk.FLAT, image = self.images["train_model"], name = "training_page")
self.upload_model_button = tk.Button(self.home_page_frame, activebackground = background_color, bd = 0, bg = button_color, relief = tk.FLAT, overrelief = tk.FLAT, image = self.images["upload_model"], name = "arduino_page")
else:
background_label = tk.Label(self.home_page_frame, image = self.images["logo_mac"])
self.load_data_button = tk.Button(self.home_page_frame, image = self.images["load_data"], name = "data_page")
self.process_data_button = tk.Button(self.home_page_frame, image = self.images["process_data"], name = "processing_page")
self.train_model_button = tk.Button(self.home_page_frame, image = self.images["train_model"], name = "training_page")
self.upload_model_button = tk.Button(self.home_page_frame, image = self.images["upload_model"], name = "arduino_page")
background_label.place(x=0, y=0, relwidth=1, relheight=1)
self.load_data_button.bind("<ButtonRelease-1>", self.quad_button_callback)
self.load_data_button.bind("<Enter>", self.show_button_info)
self.process_data_button.bind("<ButtonRelease-1>", self.quad_button_callback)
self.process_data_button.bind("<Enter>", self.show_button_info)
self.train_model_button.bind("<ButtonRelease-1>", self.quad_button_callback)
self.train_model_button.bind("<Enter>", self.show_button_info)
self.upload_model_button.bind("<ButtonRelease-1>", self.quad_button_callback)
self.upload_model_button.bind("<Enter>", self.show_button_info)
offset = 0.02
self.load_data_button.place(relx = 0 + offset, rely = 0 + offset, anchor='nw')
self.process_data_button.place(relx = 1.0 - offset, rely = 0 + offset, anchor='ne')
self.train_model_button.place(relx = 0 + offset, rely = 1.0 - offset, anchor='sw')
self.upload_model_button.place(relx = 1.0 - offset, rely = 1.0 - offset, anchor='se')
self.home_page_frame.pack(fill = tk.BOTH, expand = True)
def show_button_info(self, event):
page = str(event.widget).split(".")[-1]
if page == "data_page":
label = "Choose a dataset"
elif page == "processing_page":
label = "Configure processing"
elif page == "training_page":
label = "Train a model"
else:
label = "Upload a model"
self.menu_label.config(text = label)
def quad_button_callback(self, event):
x, y = self.home_page_frame.winfo_pointerxy()
if event.widget == self.home_page_frame.winfo_containing(x, y):
page = str(event.widget).split(".")[-1]
self.load_page(page)
# DATA PAGE
def setup_data_page(self):
self.data_page_frame = ttk.Frame(self.background_frame)
self.setup_dataset_source_frame()
self.setup_browse_dataset_frame()
self.setup_browse_dataset_global_frame()
self.setup_browse_dataset_make_frame()
self.setup_record_dataset_make_frame()
self.setup_expected_duration_frame()
self.setup_sample_rate_frame()
# PROCESSING PAGE
def setup_processing_page(self):
self.processing_page_frame = ttk.Frame(self.background_frame)
self.window_size_frame = ttk.Frame(self.processing_page_frame)
self.window_stride_frame = ttk.Frame(self.processing_page_frame)
self.setup_processing_method_frame()
self.setup_window_size_frame()
self.setup_window_stride_frame()
self.setup_streamlining_frame()
def setup_dataset_source_frame(self):
self.dataset_source_frame = ttk.Frame(self.data_page_frame)
self.dataset_source_label = ttk.Label(self.dataset_source_frame, text = pad_to_max(" Data mode:", free = 0))
self.browse_dataset_mode = tk.StringVar()
self.dataset_source_radio_button_make = ttk.Radiobutton(self.dataset_source_frame, text = "Make ", value = "make", variable = self.browse_dataset_mode, command = self.browse_dataset_change_mode)
self.dataset_source_radio_button_local = ttk.Radiobutton(self.dataset_source_frame, text = "Local ", value = "local", variable = self.browse_dataset_mode)
self.dataset_source_radio_button_global = ttk.Radiobutton(self.dataset_source_frame, text = "URL ", value = "global", variable = self.browse_dataset_mode, command = self.browse_dataset_change_mode)
self.dataset_source_radio_button_local.invoke()
self.dataset_source_radio_button_local.config(command = self.browse_dataset_change_mode)
self.dataset_source_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.dataset_source_label.pack(side = tk.LEFT)
self.dataset_source_radio_button_local.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.dataset_source_radio_button_global.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.dataset_source_radio_button_make.pack(side = tk.LEFT, fill = tk.X, expand = True)
def setup_browse_dataset_frame(self):
self.browse_dataset_frame = ttk.Frame(self.data_page_frame)
self.browse_dataset_label = ttk.Label(self.browse_dataset_frame, text = pad_to_max(" Browse dataset:", free = 4))
self.browse_dataset_button = ttk.Button(self.browse_dataset_frame, image = self.images["browse_button"], command = self.browse_dataset_button_callback)
self.browse_dataset_entry = ttk.Entry(self.browse_dataset_frame, exportselection = 0)
self.browse_dataset_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.browse_dataset_label.pack(side = tk.LEFT)
self.browse_dataset_entry.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.browse_dataset_button.pack(side = tk.LEFT)
def setup_browse_dataset_global_frame(self):
self.browse_dataset_global_frame = ttk.Frame(self.data_page_frame)
self.browse_dataset_global_label = ttk.Label(self.browse_dataset_global_frame, text = pad_to_max(" Dataset destination:", free = 4))
self.browse_dataset_global_button = ttk.Button(self.browse_dataset_global_frame, image = self.images["browse_button"], command = self.browse_dataset_global_button_callback)
self.browse_dataset_global_entry = ttk.Entry(self.browse_dataset_global_frame, exportselection = 0)
self.browse_dataset_global_label.pack(side = tk.LEFT)
self.browse_dataset_global_entry.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.browse_dataset_global_button.pack(side = tk.LEFT)
def setup_browse_dataset_make_frame(self):
self.browse_dataset_make_frame = ttk.Frame(self.data_page_frame)
self.browse_dataset_make_label = ttk.Label(self.browse_dataset_make_frame, text = pad_to_max(" Labels and samples per label:", free = 4))
self.num_samples_to_make = tk.StringVar(value = "1")
self.browse_dataset_make_text_box_number = ttk.Spinbox(self.browse_dataset_make_frame, width = 3, from_ = 1, to = 99, textvariable = self.num_samples_to_make, exportselection = 0, increment = 1, state = "readonly")
self.browse_dataset_make_text_box_label = ttk.Entry(self.browse_dataset_make_frame, exportselection = 0)
self.browse_dataset_make_label.pack(side = tk.LEFT)
self.browse_dataset_make_text_box_label.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.browse_dataset_make_text_box_number.pack(side = tk.LEFT)
def setup_record_dataset_make_frame(self):
self.record_dataset_make_frame = ttk.Frame(self.data_page_frame)
self.record_dataset_make_button = ttk.Button(self.record_dataset_make_frame, text="Record", command = self.record_dataset_make_button_callback, compound = tk.LEFT, image = self.images["record_button"])
self.record_dataset_make_button.pack(side = tk.RIGHT, padx=125)
def setup_processing_method_frame(self):
self.processing_method_frame = ttk.Frame(self.processing_page_frame)
self.processing_method_label = ttk.Label(self.processing_method_frame, text = pad_to_max(" Processing method:", free = 0))
self.selected_processing_method = tk.StringVar()
self.processing_method_radio_button_none = ttk.Radiobutton(self.processing_method_frame, text = "AVG ", value = "NONE", variable = self.selected_processing_method, command = self.processing_method_radio_button_callback)
self.processing_method_radio_button_stft = ttk.Radiobutton(self.processing_method_frame, text = "STFT", value = "STFT", variable = self.selected_processing_method)
self.processing_method_radio_button_rms = ttk.Radiobutton(self.processing_method_frame, text = "WRMS", value = "RMS", variable = self.selected_processing_method, command = self.processing_method_radio_button_callback)
self.processing_method_radio_button_stft.invoke()
self.processing_method_radio_button_stft.config(command = self.processing_method_radio_button_callback)
self.processing_method_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.processing_method_label.pack(side = tk.LEFT)
self.processing_method_radio_button_none.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.processing_method_radio_button_stft.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.processing_method_radio_button_rms.pack(side = tk.LEFT, fill = tk.X, expand = True)
def setup_sample_rate_frame(self):
self.sample_rate_frame = ttk.Frame(self.data_page_frame)
self.sample_rate_label = ttk.Label(self.sample_rate_frame, text = pad_to_max(" Sample rate (Hz):", free = -4))
self.selected_sample_rate = tk.IntVar()
self.sample_rate_radio_button_16000 = ttk.Radiobutton(self.sample_rate_frame, text = "16000", value = "16000", variable = self.selected_sample_rate)
self.sample_rate_radio_button_16000.invoke()
self.sample_rate_radio_button_41667 = ttk.Radiobutton(self.sample_rate_frame, text = "41667", value = "41667", variable = self.selected_sample_rate)
self.sample_rate_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.sample_rate_label.pack(side = tk.LEFT)
self.sample_rate_radio_button_16000.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.sample_rate_radio_button_41667.pack(side = tk.LEFT, fill = tk.X, expand = True)
def setup_expected_duration_frame(self):
self.expected_duration_frame = ttk.Frame(self.data_page_frame)
self.expected_duration_label = ttk.Label(self.expected_duration_frame, text = pad_to_max(" Expected duration (s): 1.00", "0.10"))
self.expected_duration_scale = ttk.Scale(self.expected_duration_frame, from_ = 0.1, to = 2, value = 1, command = self.expected_duration_scale_callback)
self.expected_duration_label_end = ttk.Label(self.expected_duration_frame, text = "2.00")
self.expected_duration_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.expected_duration_label.pack(side = tk.LEFT)
self.expected_duration_scale.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.expected_duration_label_end.pack(side = tk.LEFT)
def setup_window_size_frame(self):
self.window_size_label = ttk.Label(self.window_size_frame, text = pad_to_max(" Window size (samples, log2):"))
self.selected_window_size = tk.StringVar(value = "9")
self.window_size_spinbox = ttk.Spinbox(self.window_size_frame, width = 3, from_ = 5, to = 12, command = self.window_size_spinbox_callback, textvariable = self.selected_window_size, exportselection = 0, increment = 1, state = "readonly")
self.sample_rate_radio_button_16000.config(command = self.window_size_spinbox_callback)
self.sample_rate_radio_button_41667.config(command = self.window_size_spinbox_callback)
self.window_size_label_end = ttk.Label(self.window_size_frame, text = "(" + str(round((2 ** 9) / self.selected_sample_rate.get(), 3)) + "s)" )
self.window_size_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.window_size_label.pack(side = tk.LEFT)
self.window_size_spinbox.pack(side = tk.LEFT, expand = True)
self.window_size_label_end.pack(side = tk.LEFT, expand = True)
def setup_window_stride_frame(self):
self.window_stride_label = ttk.Label(self.window_stride_frame, text = pad_to_max(" Window stride (s): 0.03", "0.01"))
self.window_stride_scale = ttk.Scale(self.window_stride_frame, from_ = 0.01, to = 0.33, value = 0.03, command = self.window_stride_scale_callback)
self.window_stride_label_end = ttk.Label(self.window_stride_frame, text = "0.33")
self.window_stride_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.window_stride_label.pack(side = tk.LEFT)
self.window_stride_scale.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.window_stride_label_end.pack(side = tk.LEFT)
def browse_dataset_button_callback(self):
if self.browse_dataset_mode.get() == "local" or self.browse_dataset_mode.get() == "make":
data_path = filedialog.askdirectory()
if data_path != "":
self.browse_dataset_entry.delete(0, tk.END)
self.browse_dataset_entry.insert(0, data_path)
def record_dataset_make_button_callback(self):
if self.browse_dataset_mode.get() == "make":
path = self.browse_dataset_entry.get()
labels = self.browse_dataset_make_text_box_label.get()
sample_rate = self.selected_sample_rate.get()
seconds_of_audio = self.expected_duration_scale.get()
labels = labels.strip().split(",")
num_recordings = self.browse_dataset_make_text_box_number.get()
self.thread = threading.Thread(target = run_all, args = (self, path, labels, num_recordings, sample_rate, seconds_of_audio))
self.thread.setDaemon(True)
self.thread.start()
def browse_dataset_global_button_callback(self):
data = filedialog.askdirectory()
if data != "":
self.browse_dataset_global_entry.delete(0, tk.END)
self.browse_dataset_global_entry.insert(0, data)
def browse_dataset_change_mode(self):
if self.browse_dataset_mode.get() == "global":
self.browse_dataset_button.config(image = self.images["globe_button"])
self.browse_dataset_make_frame.pack_forget()
self.record_dataset_make_frame.pack_forget()
self.expected_duration_frame.pack_forget()
self.sample_rate_frame.pack_forget()
self.browse_dataset_label.config(text=pad_to_max(" Dataset URL:", free = 4))
self.browse_dataset_global_frame.pack(fill = tk.X, side = tk.TOP, expand = True)
self.expected_duration_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.sample_rate_frame.pack(fill = tk.X, expand = True)
elif self.browse_dataset_mode.get() == "make":
self.browse_dataset_button.config(image = self.images["browse_button"])
self.browse_dataset_global_frame.pack_forget()
self.browse_dataset_label.config(text=pad_to_max(" Dataset destination:", free = 4))
self.expected_duration_frame.pack_forget()
self.sample_rate_frame.pack_forget()
self.browse_dataset_make_frame.pack(fill = tk.X, side = tk.TOP, expand = True)
self.record_dataset_make_frame.pack(fill = tk.X, side = tk.TOP, expand = True)
self.expected_duration_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.sample_rate_frame.pack(fill = tk.X, expand = True)
else:
self.browse_dataset_label.config(text=pad_to_max(" Browse dataset:", free = 4))
self.browse_dataset_button.config(image = self.images["browse_button"])
self.browse_dataset_global_frame.pack_forget()
self.browse_dataset_make_frame.pack_forget()
self.record_dataset_make_frame.pack_forget()
def processing_method_radio_button_callback(self):
selection = self.selected_processing_method.get()
if selection == "STFT" or selection == "RMS":
self.streamlining_frame.pack_forget()
self.window_size_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.window_stride_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.streamlining_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
else:
self.window_size_frame.pack_forget()
self.window_stride_frame.pack_forget()
def expected_duration_scale_callback(self, _):
value = round_to_x(self.expected_duration_scale)
self.expected_duration_label.config(text = pad_to_max(" Expected duration (s): " + "{:.2f}".format(value), "0.10"))
value = round(value / 3, 2)
self.window_stride_scale.config(to = value)
self.window_stride_label_end.config(text = "{:.2f}".format(value))
if self.window_stride_scale.get() > value:
self.window_stride_scale.set(value)
def window_size_spinbox_callback(self): ###
value = int(self.window_size_spinbox.get())
self.window_size_label_end.config(text = "(" + str(round((2 ** value) / self.selected_sample_rate.get(), 3)) + "s)")
def window_stride_scale_callback(self, _):
value = round_to_x(self.window_stride_scale)
self.window_stride_label.config(text = pad_to_max(" Window stride (s): " + "{:.2f}".format(value), "0.01"))
def setup_streamlining_frame(self):
self.streamlining_frame = ttk.Frame(self.processing_page_frame)
self.centered_streamlining_frame = ttk.Frame(self.streamlining_frame)
self.streamlining = tk.BooleanVar()
self.streamlining_label = ttk.Label(self.centered_streamlining_frame, text = "Streamlining: ")
self.streamlining_checkbutton = ttk.Checkbutton(self.centered_streamlining_frame, variable = self.streamlining, takefocus = False)
self.streamlining_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
self.streamlining_label.grid(row = 0, column = 0)
self.streamlining_checkbutton.grid(row = 0, column = 1)
self.centered_streamlining_frame.pack(anchor = tk.CENTER)
# TRAINING PAGE
def setup_training_page(self):
self.real_time_plots = {"training_accuracy" : [],
"validation_accuracy" : []}
self.model = None
self.training_page_frame = ttk.Frame(self.background_frame)
self.real_time_plot_frame = ttk.Frame(self.training_page_frame)
self.real_time_plot_frame.pack()
self.fig = Figure(figsize = (5, 5), dpi = 100)
self.ax = self.fig.add_subplot(1, 1, 1)
if os.name == "nt":
self.fig.set_facecolor(background_color)
else:
self.fig.set_facecolor("#ececec")
self.ax.set_facecolor("#ffffff")
self.plot_canvas = FigureCanvasTkAgg(self.fig, self.training_page_frame)
self.plot_canvas.get_tk_widget().pack(side = tk.LEFT)
self.ani = animation.FuncAnimation(self.fig, self.animate, interval = 250, blit = False)
self.setup_training_settings_frame()
def animate(self, _):
self.ax.clear()
length = len(self.real_time_plots["training_accuracy"])
xaxis = [i for i in range(1,length + 1)]
self.ax.plot(xaxis, self.real_time_plots["training_accuracy"], label = "Training accuracy")
self.ax.plot(xaxis, self.real_time_plots["validation_accuracy"], label = "Validation accuracy")
self.ax.tick_params(top = False, bottom = False, left = False, right = False, labelleft = True, labelbottom = True)
if length == 0 or length == 1: # temporary
self.ax.set_xlim([1,2])
self.ax.set_ylim([0,1])
self.ax.get_xaxis().set_major_locator(MaxNLocator(integer = True))
self.ax.legend(loc = "lower right", labelcolor = "#000000")
else:
self.ax.set_xlim([1,length])
self.ax.get_xaxis().set_major_locator(MaxNLocator(integer = True))
self.ax.legend(loc = "lower right", labelcolor = "#000000")
self.plot_canvas.draw()
def setup_training_settings_frame(self):
self.training_settings_frame = ttk.Frame(self.training_page_frame)
self.setup_training_parameters_label_frame()
self.setup_model_results_label_frame()
self.training_settings_frame.pack(side = tk.RIGHT, expand = True, fil = tk.BOTH)
def setup_training_parameters_label_frame(self):
self.training_parameters_label_frame = ttk.LabelFrame(self.training_settings_frame, text = "Training parameters")
self.setup_validation_split_frame()
self.setup_test_split_frame()
self.setup_model_architecture_frame()
self.setup_batch_size_frame()
self.setup_epochs_frame()
self.train_button = ttk.Button(self.training_parameters_label_frame, image = self.images["train_button"], text = "Train", compound = tk.LEFT, command = self.train_button_callback)
self.train_button.pack(side = tk.TOP, anchor = tk.CENTER)
self.training_parameters_label_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
def train_button_callback(self):
if not self.thread.is_alive():
if not self.model is None:
self.real_time_plots["training_accuracy"] = []
self.real_time_plots["validation_accuracy"] = []
self.model = None
self.menu_label.configure(text = "Training...")
if self.streamlining.get():
self.thread = threading.Thread(target = preprocess_data_and_train_model, args = (self, ))
else:
self.thread = threading.Thread(target = train_model, args = (self, ))
self.thread.setDaemon(True)
self.thread.start()
def setup_model_architecture_frame(self):
self.model_architecture_frame = ttk.Frame(self.training_parameters_label_frame)
self.model_architecture_label = ttk.Label(self.model_architecture_frame, text = pad_to_max("Model architecture:", free = 12))
self.selected_model_architecture = tk.StringVar()
self.model_architecture_radio_button_dense = ttk.Radiobutton(self.model_architecture_frame, text = "DENSE", value = "DENSE", variable = self.selected_model_architecture)
self.model_architecture_radio_button_conv = ttk.Radiobutton(self.model_architecture_frame, text = "CONV", value = "CONV", variable = self.selected_model_architecture)
self.model_architecture_radio_button_conv.invoke()
self.model_architecture_label.pack(side = tk.LEFT)
self.model_architecture_radio_button_dense.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.model_architecture_radio_button_conv.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.model_architecture_frame.pack(expand = True, fill = tk.X, side = tk.TOP)
def setup_validation_split_frame(self):
self.validation_split_frame = ttk.Frame(self.training_parameters_label_frame)
self.validation_split_label = ttk.Label(self.validation_split_frame, text = pad_to_max("Validation split: 0.20", "0.01", free = 12))
self.validation_split_scale = ttk.Scale(self.validation_split_frame, from_ = 0.01, to = 0.49, command = self.validation_split_scale_callback)
self.validation_split_label_end = ttk.Label(self.validation_split_frame, text = "0.49")
self.validation_split_label.pack(side = tk.LEFT)
self.validation_split_scale.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.validation_split_label_end.pack(side = tk.LEFT)
self.validation_split_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
self.validation_split_scale.set(0.2)
def validation_split_scale_callback(self, _):
value = round_to_x(self.validation_split_scale)
self.validation_split_label.config(text = pad_to_max("Validation split: " + "{:.2f}".format(value), "0.01", free = 12))
def setup_test_split_frame(self):
self.test_split_frame = ttk.Frame(self.training_parameters_label_frame)
self.test_split_label = ttk.Label(self.test_split_frame, text = pad_to_max("Test split: 0.25", "0.01", free = 12))
self.test_split_scale = ttk.Scale(self.test_split_frame, from_ = 0.01, to = 0.49, command = self.test_split_scale_callback)
self.test_split_label_end = ttk.Label(self.test_split_frame, text = "0.49")
self.test_split_label.pack(side = tk.LEFT)
self.test_split_scale.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.test_split_label_end.pack(side = tk.LEFT)
self.test_split_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
self.test_split_scale.set(0.25)
def test_split_scale_callback(self, _):
value = round_to_x(self.test_split_scale)
self.test_split_label.config(text = pad_to_max("Test split: " + "{:.2f}".format(value), "0.01", free = 12))
def setup_batch_size_frame(self):
self.batch_size_frame = ttk.Frame(self.training_parameters_label_frame)
self.batch_size_label = ttk.Label(self.batch_size_frame, text = pad_to_max("Batch size: 32", "4", free = 12))
self.batch_size_scale = ttk.Scale(self.batch_size_frame, from_ = 4, to = 128, command = self.batch_size_scale_callback)
self.batch_size_scale.set(32)
self.batch_size_label_end = ttk.Label(self.batch_size_frame, text = "128 ")
self.batch_size_label.pack(side = tk.LEFT)
self.batch_size_scale.pack(side = tk.LEFT, fill = tk.X, expand = True)
self.batch_size_label_end.pack(side = tk.LEFT)
self.batch_size_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
def batch_size_scale_callback(self, _):
value = round_to_x(self.batch_size_scale, x = 0)
self.batch_size_label.config(text = pad_to_max("Batch size: " + str(round(value)), "4", free = 12))
def setup_epochs_frame(self):
self.epochs_frame = ttk.Frame(self.training_parameters_label_frame)
self.epochs_label = ttk.Label(self.epochs_frame, text = pad_to_max("Epochs: 20", "5", free = 12))
self.epochs_scale = ttk.Scale(self.epochs_frame, from_ = 5, to = 128, command = self.epochs_scale_callback)
self.epochs_scale.set(20)
self.epochs_label_end = ttk.Label(self.epochs_frame, text = "128 ")
self.epochs_label.pack(side = tk.LEFT)
self.epochs_scale.pack(side = tk.LEFT, expand = True, fill = tk.X)
self.epochs_label_end.pack(side = tk.LEFT)
self.epochs_frame.pack(side = tk.TOP, fill = tk.X, expand = True)
def epochs_scale_callback(self, _):
value = round_to_x(self.epochs_scale, x = 0)
self.epochs_label.config(text = pad_to_max("Epochs: " + str(round(value)), "5", free = 12))
def setup_model_results_label_frame(self):
self.model_results_label_frame = ttk.LabelFrame(self.training_settings_frame, text = "Model results")
self.time_per_epoch = ttk.Label(self.model_results_label_frame, text = "Time per epoch (s):")
self.test_accuracy_label = ttk.Label(self.model_results_label_frame, text = "Test accuracy:")
self.test_loss_label = ttk.Label(self.model_results_label_frame, text = "Test loss:")
self.time_per_epoch.pack(side = tk.TOP, expand = True, fill = tk.X)
self.test_accuracy_label.pack(side = tk.TOP, expand = True, fill = tk.X)
self.test_loss_label.pack(side = tk.TOP, expand = True, fill = tk.X)
self.model_results_label_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
self.convert_and_save_button = ttk.Button(self.model_results_label_frame, image = self.images["save_button"], text = "Convert & save model", compound = tk.LEFT, command = self.convert_and_save_button_callback)
self.convert_and_save_button.pack(side = tk.TOP, anchor = tk.CENTER)
def convert_and_save_button_callback(self):
if not self.model is None and not self.thread.is_alive():
save_path = filedialog.asksaveasfilename(defaultextension = ".h")
if save_path != "":
self.thread = threading.Thread(target = convert_and_save_model, args = (self, save_path))
self.thread.setDaemon(True)
self.thread.start()
# ARDUINO PAGE
def setup_arduino_page(self):
self.arduino_page_frame = ttk.Frame(self.background_frame)
self.setup_arduino_settings_frame()
self.arduino_cmd = ttk.Frame(self.arduino_page_frame)
self.logs = tk.Text(self.arduino_cmd, bg = "#000000", fg = "#ffffff", exportselection = 0, font = ("Courier",8))
self.logs.bind("<Key>", lambda _ : "break")
self.logs.pack(expand = True, fill = tk.BOTH)
self.arduino_cmd.pack(side = tk.TOP, expand = True, fill = tk.BOTH)
self.cmd = custom_cmd(self.logs)
sys.stdout = self.cmd # COMMENT THIS LINE IF YOU WANT CMD OUTSIDE OF THE UI
def setup_arduino_settings_frame(self):
self.arduino_settings_frame = ttk.Frame(self.arduino_page_frame)
self.arduino_settings_frame.pack(side = tk.TOP, expand = True, fill = tk.X)
self.centered_arduino_settings_frame = ttk.Frame(self.arduino_settings_frame)
self.browse_model_label = ttk.Label(self.centered_arduino_settings_frame, text = "Browse model: ")
self.browse_model_entry = ttk.Entry(self.centered_arduino_settings_frame, exportselection = 0, width = 50)
self.browse_model_button = ttk.Button(self.centered_arduino_settings_frame, image = self.images["browse_button"], command = self.browse_model_button_callback)
self.convert_and_upload_model_button = ttk.Button(self.centered_arduino_settings_frame, image = self.images["arduino_button"], text = "Upload model", compound = tk.LEFT, command = self.upload_model_button_callback)
self.convert_and_upload_model_button.pack(side = tk.RIGHT, padx = 2)
self.browse_model_label.pack(side = tk.LEFT)
self.browse_model_entry.pack(side = tk.LEFT)
self.browse_model_button.pack(side = tk.LEFT)
self.centered_arduino_settings_frame.pack(anchor = tk.CENTER)
def upload_model_button_callback(self):
self.thread = threading.Thread(target = send_to_arduino, args = (self, ))
self.thread.setDaemon(True)
self.thread.start()
def browse_model_button_callback(self):
file_path = filedialog.askopenfilename()
if file_path != "":
self.browse_model_entry.delete(0, tk.END)
self.browse_model_entry.insert(0, file_path)
# PAGE LOADING
def load_next_page(self):
if self.thread.is_alive():
return
if self.current_page == "data_page":
self.load_page("processing_page")
elif self.current_page == "processing_page": # send selections to ml script here
if self.streamlining.get() or (self.get_curr_data_path() == self.prev_data_path and self.processing_is_consistent()):
self.load_page("training_page")
else:
self.thread = threading.Thread(target = load_data, args = (self,))
self.thread.setDaemon(True)
self.thread.start()
elif self.current_page == "training_page":
self.load_page("arduino_page")
elif self.current_page == "arduino_page":
self.load_page("data_page")
def load_previous_page(self):
if self.thread.is_alive():
return
if self.current_page == "data_page":
self.load_page("arduino_page")
elif self.current_page == "processing_page":
self.load_page("data_page")
elif self.current_page == "training_page":
self.load_page("processing_page")
elif self.current_page == "arduino_page":
if self.streamlining.get() or (self.get_curr_data_path() == self.prev_data_path and self.processing_is_consistent()):
self.load_page("training_page")
else:
self.thread = threading.Thread(target = load_data, args = (self,))
self.thread.setDaemon(True)
self.thread.start()
def unload_pages(self):
self.home_page_frame.pack_forget()
self.data_page_frame.pack_forget()
self.processing_page_frame.pack_forget()
self.training_page_frame.pack_forget()
self.arduino_page_frame.pack_forget()
def load_menu(self, back = True, next = True, home = True, label = True):
self.menu_next_button.pack_forget()
self.menu_home_button.pack_forget()
self.menu_back_button.pack_forget()
self.menu_label.pack_forget()
if next == True:
self.menu_next_button.pack(side = tk.RIGHT)
if home == True:
self.menu_home_button.pack(side = tk.RIGHT)
if back == True:
self.menu_back_button.pack(side = tk.LEFT)
if label == True:
self.menu_label.pack(anchor = tk.CENTER)
def load_page(self, page, force = False): # code is a bit messy here
if not force:
if self.thread.is_alive():
return
if page == "processing_page" or page == "training_page":
if page == "training_page" and self.prev_data_path != self.get_curr_data_path() and not self.streamlining.get() and not self.processing_is_consistent():
self.thread = threading.Thread(target = load_data, args = (self,))
self.thread.setDaemon(True)
self.thread.start()
return
if self.browse_dataset_entry.get() == "":
self.menu_label.configure(text = "Dataset source not selected")
return
if self.browse_dataset_mode.get() == "global" and self.browse_dataset_global_entry.get() == "":
self.menu_label.configure(text = "Dataset destination not selected")
return
self.current_page = page
self.root.geometry(self.page_dict[page]["geometry"])
self.unload_pages()
self.page_dict[page]["widget"].pack(fill = tk.BOTH, expand = True)
if page == "home_page":
self.load_menu(back = False, next = False, home = False)
self.menu_label.configure(text = "")
else:
self.load_menu()
self.menu_label.configure(text = self.page_dict[page]["menu_label"])
self.menu_back_button.config(text = self.page_dict[page]["back_button"])
self.menu_next_button.config(text = self.page_dict[page]["next_button"])
def get_curr_data_path(self):
if self.browse_dataset_mode.get() == "local" or self.browse_dataset_mode.get() == "make":
return app.browse_dataset_entry.get()
elif self.browse_dataset_mode.get() == "global":
return app.browse_dataset_global_entry.get()
def processing_is_consistent(self):
if self.selected_processing_method.get() != self.prev_processing["processing_method"] \
or self.selected_sample_rate.get() != self.prev_processing["sample_rate"] \
or self.expected_duration_scale.get() != self.prev_processing["expected_duration"]:
return False
if self.selected_processing_method.get() == "NONE":
return True
else:
return (self.window_size_spinbox.get() == self.prev_processing["window_size"] and self.window_stride_scale.get() == self.prev_processing["window_stride"])
if __name__ == "__main__":
root = tk.Tk()
# change font to monospace
font.nametofont("TkDefaultFont").config(family = "Courier", size = 11)
#root.option_add("*Font", font.nametofont("TkFixedFont"))
matplotlib.rc("font", family = "Courier New")
root.title("MLEcosystem")
root.geometry(home_window_size)
root.resizable(False, False)
app = main_app(root)
root.mainloop() |
22,803 | 08456b8cb20b2902077719608252e634c4fc7866 | class Solution:
def longestPalindrome2(self, s):
"""
:type s: str
:rtype: str
"""
if s is None or s == s[::-1]:
return s
maxLen = 1
start = 0
for i in range(1,len(s)):
if i - maxLen >= 1 and s[i - maxLen - 1:i + 1] == s[i - maxLen - 1:i + 1][::-1]:
start = i - maxLen - 1
maxLen += 2
continue
if i - maxLen >= 0 and s[i - maxLen:i + 1] == s[i - maxLen:i + 1][::-1]:
start = i - maxLen
maxLen += 1
return s[start:start+maxLen]
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if s is None or s == s[::-1]:
return s
n = len(s)
table = [[0 for x in range(n)] for y in range(n)]
maxLength = 1
for i in range(n):
table[i][i] = True
# check for sub-string of length 2.
start = 0
for i in range(n - 1):
if (s[i] == s[i + 1]) :
table[i][i + 1] = True
start = i
maxLength = 2
# Check for lengths greater than 2.
# k is length of substring
for k in range(3, n + 1):
# Fix the starting index
for i in range(n - k + 1):
# Get the ending index of
# substring from starting
# index i and length k
j = i + k - 1
# checking for sub-string from
# ith index to jth index iff
# st[i+1] to st[(j-1)] is a
# palindrome
if (table[i + 1][j - 1] and s[i] == s[j]) :
table[i][j] = True
if (k > maxLength) :
start = i
maxLength = k
return s[start:start + maxLength]
#print(Solution().checkPalindrome("baab"))
# print(Solution().longestPalindrome(None))
# print(Solution().longestPalindrome(""))
print(Solution().longestPalindrome("caba"))
print(Solution().longestPalindrome("adam"))
print(Solution().longestPalindrome("babad"))
print(Solution().longestPalindrome("cbbd"))
|
22,804 | 7d5df6d7ae1c31d01ed3ae1f09c1dea6a2c3d62c | from packcientifico.funciones import *
print(area_circulo(150))
print(perimetro_circulo(60))
print(area_triangulo(25,50))
print(perimetro_triangulo(10,30,60))
print(area_rectangulo(50,30))
print(perimetro_rectangulo(20,50))
print(distancia_recorrida(150,100))
# 2.- if __name__ == "__main__": sirve para aislar o desacoplar la ejecuciรณn del (bloque de cosas dentro)
# dentro de un programa (condiciรณn true), a cuando se importa totalmente dicho programa en otro.
# En ese caso, al ejecutar este otro, no se ejecutarรก el (bloque de cosas dentro) por tener condiciรณn false.
|
22,805 | 7a73bdeb00d064329a9cf6722bf86158ab9530ba | data = input("Enter data: ")
if data == data[::-1]:
print("It is palindrome")
else:
print("It is not palindrome") |
22,806 | 6c2aa744c885036e31663519c17c9a813cb6ce5b | import androidhelper
import time
droid = androidhelper.Android()
droid.toggleBluetoothState(True)
droid.dialogCreateAlert('Be a server?')
droid.dialogSetPositiveButtonText('Yes')
droid.dialogSetNegativeButtonText('No')
droid.dialogShow()
result = droid.dialogGetResponse()
is_server = result.result['which'] == 'positive'
if is_server:
droid.bluetoothMakeDiscoverable()
droid.bluetoothAccept()
else:
droid.bluetoothConnect()
if is_server:
result = droid.getInput('Chat', 'Enter a message').result
if result is None:
droid.exit()
droid.bluetoothWrite(result + '\n')
while True:
message = droid.bluetoothReadLine().result
droid.dialogCreateAlert('Chat Received', message)
droid.dialogSetPositiveButtonText('Ok')
droid.dialogShow()
droid.dialogGetResponse()
result = droid.getInput('Chat', 'Enter a message').result
if result is None:
break
droid.bluetoothWrite(result + '\n')
droid.exit()
|
22,807 | f5f368957fa9f72e9b84b3ed179779e063f658de | import logging
import weakref
from .protocol.dml import MessageManager
class MessageHandler(object):
"""A representation of a message handler that is bound to a
`Service` instance.
A message handler should be invoked by passing (sender, message).
"""
def __init__(self, service, name, func):
self._service = weakref.ref(service)
self.name = name
self._func = func
def __call__(self, sender, message):
return self._func(self._service(), sender, message)
class MessageHandlerDecorator(object):
"""A decorator used to define a new message handler for a `Service`
class.
"""
def __init__(self, name):
self.name = name
self._func = None
def __call__(self, func):
self._func = func
return self
def __get__(self, obj, objtype=None):
# Someone invoked `obj.attr`. Assuming that `obj` is an
# instance of `Service`, return a new `MessageHandler` instance.
# This `MessageHandler` instance will be bound to `obj` for its
# lifetime.
if isinstance(obj, Service):
return MessageHandler(obj, self.name, self._func)
return self
msghandler = MessageHandlerDecorator # Alias
class Service(object):
"""The base for any class that wishes to house a message handler."""
logger = logging.getLogger('SERVICE')
def __init__(self, message_mgr):
self.message_mgr = message_mgr
def iter_message_handlers(self):
"""A generator that can be used to iterate over all of the
message handlers that belong to this instance.
"""
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, MessageHandler):
yield attr
class ServiceParticipant(object):
"""The base for any class that wishes to house a service, and
invoke its message handlers.
"""
logger = logging.getLogger('SERVICE-PARTICIPANT')
def __init__(self):
self.message_mgr = MessageManager()
self.message_handlers = {}
def register_service(self, service):
"""Adds the given service's message handlers to our managed
message handlers.
"""
for message_handler in service.iter_message_handlers():
self.message_handlers[message_handler.name] = message_handler
def handle_message(self, sender, message):
"""Invokes the correct message handler for the given message."""
self.logger.debug('handle_message(%r, %r)', sender, message.handler)
message_handler = self.message_handlers.get(message.handler)
if message_handler is None:
self.logger.warning("sender=%r, No handler found: '%s'",
sender, message.handler)
return
message_handler(sender, message)
|
22,808 | ae18489cdd36057ec37bb369d4f31c5a3c4c6154 | dict={"name":"Raju", "marks":56}
user=input("enter number:")
if user in dict:
print("exist")
else:
print("not exist")
|
22,809 | f132f803806c153a85551c4473339c538e840a50 | from django import template
from django.db.models.aggregates import Count
from ..models import Post,Category,Tag
from django.contrib.auth.models import User
register = template.Library()
@register.simple_tag
def get_recent_posts(num=5):
return Post.objects.all().order_by('-created_time')[:num]
@register.simple_tag
def archives():
return Post.objects.dates('created_time','month',order='DESC')
@register.simple_tag
def get_categories():
return Category.objects.annotate(num_post = Count('post')).filter(num_post__gt=0)
@register.simple_tag
def get_tag():
return Tag.objects.annotate(num_post = Count('post')).filter(num_post__gt=0)
@register.simple_tag
def get_author():
return User.objects.annotate(num_post = Count('post')).filter(num_post__gt=0) |
22,810 | 9cf9040999116b8725dda703e4c3f09070b00478 | from workspace import generate_report
def report():
project_num = 13
statuses = ["Under Review", "Blocked", "In Progress"]
return generate_report.main(project_num, statuses)
|
22,811 | 8ca91a66d41331b1849260c4c078f96287475797 | ###################################################
# input: colmap points.bin images.bin
# paddle ocr imageXXX.jpg_ocr_result.npy
# output:
# bbox ไบ็ปดๅพ็ไธญbbox
# text ๆๅญ่ฏๅซ็ปๆ
# score ๆๅญ่ฏๅซ็ฝฎไฟกๅบฆ
# xyz ไธ็ปด่ดจๅฟ
# n_p ไธ็ปดๆณๅ
# points ๆๆinlierไธ็ปด็น
# tvec
# rvec
####################################################
from token import EXACT_TOKEN_TYPES
import numpy as np
import argparse
from read_write_model import *
import math
import open3d as o3d
import pickle
def load_ocr_result(result_path):
result = np.load(result_path, allow_pickle=True)
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
boxes = [line[0] for line in result]
for line in result:
# print(line)
pass
return boxes, txts, scores
#
def choose_text_points2d(img, box, expand=1):
left = np.min(box[:, 0, 0]) * expand
right = np.max(box[:, 0, 0]) * expand
down = np.min(box[:, 0, 1]) * expand
up = np.max(box[:, 0, 1]) * expand
point3D_ids_in = []
for xy, point3D_id in zip(img.xys, img.point3D_ids):
if(point3D_id != -1 and xy[0] > left and xy[0] < right and xy[1] > down and xy[1] < up):
point3D_ids_in.append(point3D_id)
return point3D_ids_in
def get_plane_norm(points, Rvec, tvec):
if(points.shape[0]<3):
# return (tvec - points[0]).norm()
return [np.nan]*3, points
elif(points.shape[0]<4):
n_w = np.cross(points[0]-points[1], points[0]-points[2])
else:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=4,
std_ratio=1.5)
points = np.asarray(pcd.points)
if(points.shape[0]<4):
return [np.nan]*3, points
plane_model, inliers = pcd.segment_plane(distance_threshold=0.05,
ransac_n=3,
num_iterations=1000)
n_w = plane_model[0:3]
# print("n_w ", n_w)
# print("Rvec ", Rvec)
points_inlier = pcd.select_by_index(inliers)
points = np.asarray(points_inlier.points)
n_c = np.dot(Rvec, n_w.T)
# print("n_c:", n_c)
if(n_c[2] > 0):
return -n_w, points
return n_w, points
def main(model_path, ocr_output_path):
images = read_images_binary(os.path.join(model_path, "images.bin"))
# print(len(images))
for image_id in images:
print(images[image_id].name)
image_name = images[image_id].name
Rvec = images[image_id].qvec2rotmat()
tvec = images[image_id].tvec
# print(Rvec)
boxes, txts, scores = load_ocr_result(
ocr_output_path + "/" + image_name + ".ocr_result.npy")
if(len(boxes) == 0):
continue
for box_id in range(len(boxes)):
if scores is not None and (scores[box_id] < 0.5 or math.isnan(scores[box_id])):
continue
box = np.reshape(
np.array(boxes[box_id]), [-1, 1, 2]).astype(np.int64)
cloud_path = ocr_output_path + image_name + str(box_id) + "_cloud3d_1.npy"
if(os.path.exists(cloud_path)):
points = np.load(cloud_path)
n_p, points_inlier = get_plane_norm(points, Rvec, tvec)
if(math.isnan(n_p[0])):
continue
else:
n_p = [np.nan]*3
continue
# flag_found_same = False
# xyz = np.average(points_inlier, axis=0)
# print("points_inlier ", points_inlier)
property = [boxes[box_id], txts[box_id], scores[box_id], n_p, points_inlier, images[image_id].qvec, tvec]
# for item in property:
# print(item)
with open(ocr_output_path + "/" + image_name + str(box_id) + ".all_property.bin", "wb") as fp:
pickle.dump(property, fp)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument(
# "--input_path",
# required=True,
# type=str,
# help="input picture paths"
# )
parser.add_argument(
"--ocr_output_path",
# required=True,
default='/home/ezxr/Documents/wxc/pic_ocr_flip/',
type=str,
help="visualized output paths"
)
parser.add_argument(
"--model_path",
# required=True,
default='/home/ezxr/Documents/wxc/f1_gz/0',
type=str,
help="input colmap model paths"
)
args = parser.parse_args()
main(args.model_path, args.ocr_output_path)
|
22,812 | 00d9c60f38f26353acd1e84900278a480ae4a427 | import sys
import numpy as np
from nltk.tokenize import TweetTokenizer
from utils.my_functions import parse_xml_data, get_polarity_lexicon, get_polarity_counts, get_one_hot_labels
from utils.models import get_dnn_model
from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn import metrics
from scipy.sparse import hstack
from sklearn.utils import class_weight
from tensorflow.keras.callbacks import ModelCheckpoint
################
# DATA LOADING #
################
# Paths
train_data_path = "data/train.xml"
dev_data_path = "data/dev.xml"
test_data_path = "data/test.xml"
polarity_lex_path = "utils/ElhPolar_esV1.lex"
# Load training data
train_tweets, train_labels, train_ids = parse_xml_data(train_data_path)
# Load development data
dev_tweets, dev_labels, dev_ids = parse_xml_data(dev_data_path)
# Load polarity lexicon dictionary
lex_dict = get_polarity_lexicon(polarity_lex_path)
# Get polarity word counts for each tweet
train_polarities = get_polarity_counts(train_tweets, lex_dict)
dev_polarities = get_polarity_counts(dev_tweets, lex_dict)
# Get one hot labels for dnn
train_labels_onehot = get_one_hot_labels(train_labels)
dev_labels_onehot = get_one_hot_labels(dev_labels)
# Get class weight for a "balanced" training for dnn
class_weights = class_weight.compute_class_weight("balanced", np.unique(train_labels), train_labels)
######################
# Experiments config #
######################
# Build tokenizer (removes upper case )
tokenizer = TweetTokenizer(preserve_case=False, reduce_len=True, strip_handles=False)
# Make a callable function for the vectorizer
tok_func = lambda s: tokenizer.tokenize(s)
# Auxiliary variables to store the best config
# By best accuracy
bestacc_accuracy = 0
bestacc_macro = 0
bestacc_config = []
# By best macro
bestmacro_accuracy = 0
bestmacro_macro = 0
bestmacro_config = []
aux_best_macro = 0
# Range of different vectorizers
vectorizer_types = range(4)
# Kernel types for SVM classifier
kernel_types = ["linear", "poly", "rbf", "sigmoid"]
# Create the list of classifiers. All the SVM variants plus DNN
classifiers = kernel_types + ["dnn"]
# Regularization param for SVM classifier
C = 0.1
# DNN train parameters
batch_size = 64
epochs = 400
'''
Loop for trying diferent combinations if vectorizers and classifiers
'''
for vectorizer_type in vectorizer_types:
for classifier_type in classifiers:
##############
# VECTORIZER #
##############
if vectorizer_type == 0:
vectorizer_name = "CountVectorizer"
vectorizer = CountVectorizer(tokenizer=tok_func, ngram_range=(1,1))
elif vectorizer_type == 1:
if classifier_type == "dnn": continue # This vectorizer does not work with dnn
vectorizer_name = "HashingVectorizer"
vectorizer = HashingVectorizer(tokenizer=tok_func, ngram_range=(1,1))
elif vectorizer_type == 2:
vectorizer_name = "TfidfTransformer"
vectorizer = Pipeline([("count", CountVectorizer(tokenizer=tok_func, ngram_range=(1,1))),
("tfid", TfidfTransformer())])
elif vectorizer_type == 3:
vectorizer_name = "TfidfVectorizer"
vectorizer = TfidfVectorizer(tokenizer=tok_func, ngram_range=(1,1))
# Apply vectorizer to train data
train_vectors = vectorizer.fit_transform(train_tweets)
# Apply vectorizer to development data
dev_vectors = vectorizer.transform(dev_tweets)
# Add the polarity counts to the vectors
train_vectors = hstack((train_vectors, train_polarities))
dev_vectors = hstack((dev_vectors, dev_polarities))
if classifier_type == "dnn":
# From scipy sparse to regular array
train_vectors = train_vectors.toarray()
dev_vectors = dev_vectors.toarray()
##############
# CLASSIFIER #
##############
# Build the classifier
if classifier_type == "dnn":
classifier = get_dnn_model(input_shape=train_vectors.shape[1:])
elif classifier_type == "linear":
classifier = LinearSVC(C=C)
else:
classifier = SVC(C=C, kernel=classifier_type)
#########
# TRAIN #
#########
if classifier_type == "dnn":
# Callback to store best model
best_model_path = f"saved_models/{vectorizer_name}_bestloss"
ckpt_callback = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=True)
classifier.fit(train_vectors, train_labels_onehot, batch_size, epochs, validation_data=(dev_vectors, dev_labels_onehot), callbacks=[ckpt_callback], class_weight=class_weights)
else:
classifier.fit(train_vectors, train_labels)
############
# EVALUATE #
############
# Load best model checkpoint (by validation loss)
if classifier_type == "dnn":
classifier.load_weights(best_model_path)
# Get predictions
dev_preds = classifier.predict(dev_vectors)
# Compute stats of the results
if classifier_type == "dnn":
dev_labels_num = np.argmax(dev_labels_onehot, axis=1)
dev_preds_num = np.argmax(dev_preds, axis=1)
accuracy = metrics.accuracy_score(dev_labels_num, dev_preds_num)
macro = metrics.precision_recall_fscore_support(dev_labels_num, dev_preds_num, average='macro')
else:
accuracy = metrics.accuracy_score(dev_labels, dev_preds)
macro = metrics.precision_recall_fscore_support(dev_labels, dev_preds, average='macro')
# Show stats
if classifier_type == "dnn":
print(f"\nResults of vectorizer {vectorizer_name} using a dnn:")
else:
print(f"\nResults of vectorizer {vectorizer_name} using a SVM with kernel type {classifier_type}:")
print(f"acc = {accuracy}")
print(f"macro = {macro}")
# Check if we get a new best model(by accuracy) to store it
if accuracy > bestacc_accuracy:
bestacc_config = [vectorizer_name, classifier_type]
bestacc_accuracy = accuracy
bestacc_macro = macro
# Check if we get a new best model(by macro) to store it
if macro[0] > aux_best_macro:
bestmacro_config = [vectorizer_name, classifier_type]
bestmacro_accuracy = accuracy
bestmacro_macro = macro
aux_best_macro = macro[0]
# Show the best models
print("\nThe best model config by accuracy with is:")
print(f"\tvectorizer = {bestacc_config[0]}")
print(f"\tclassifier = {bestacc_config[1]}")
print(f"\tresults: accuracy={bestacc_accuracy} - macro={bestacc_macro}")
print("\nThe best model config by macro with is:")
print(f"\tvectorizer = {bestmacro_config[0]}")
print(f"\tclassifier = {bestmacro_config[1]}")
print(f"\tresults: accuracy={bestmacro_accuracy} - macro={bestmacro_macro}")
|
22,813 | 9eeee0294ec8689c62d3957dfcccf200836f5a8e | from OpenGLCffi.GL import params
@params(api='gl', prms=['program', 'uniformCount', 'constuniformNames', 'uniformIndices'])
def glGetUniformIndices(program, uniformCount, constuniformNames, uniformIndices):
pass
@params(api='gl', prms=['program', 'uniformCount', 'uniformIndices', 'pname', 'params'])
def glGetActiveUniformsiv(program, uniformCount, uniformIndices, pname, params):
pass
@params(api='gl', prms=['program', 'uniformIndex', 'bufSize', 'length', 'uniformName'])
def glGetActiveUniformName(program, uniformIndex, bufSize, length, uniformName):
pass
@params(api='gl', prms=['program', 'uniformBlockName'])
def glGetUniformBlockIndex(program, uniformBlockName):
pass
@params(api='gl', prms=['program', 'uniformBlockIndex', 'pname', 'params'])
def glGetActiveUniformBlockiv(program, uniformBlockIndex, pname, params):
pass
@params(api='gl', prms=['program', 'uniformBlockIndex', 'bufSize', 'length', 'uniformBlockName'])
def glGetActiveUniformBlockName(program, uniformBlockIndex, bufSize, length, uniformBlockName):
pass
@params(api='gl', prms=['program', 'uniformBlockIndex', 'uniformBlockBinding'])
def glUniformBlockBinding(program, uniformBlockIndex, uniformBlockBinding):
pass
@params(api='gl', prms=['target', 'index', 'buffer', 'offset', 'size'])
def glBindBufferRange(target, index, buffer, offset, size):
pass
@params(api='gl', prms=['target', 'index', 'buffer'])
def glBindBufferBase(target, index, buffer):
pass
@params(api='gl', prms=['target', 'index', 'data'])
def glGetIntegeri_v(target, index, data):
pass
|
22,814 | 5e8452eae4cb09dc3919e1bca3de4a2b037e0f15 | """
Bucket sort
Best case: O(n+k) , where loop N element in list and separate to K number of bucket, and concatenate each bucket in the end
Worst case: O(n^2) , if formula for separation of element in bucket result in all element in 1 bucket, bucket sorting will take another O(N)
time to sort the bucket
"""
def bucket_sort(list,round_up_greatest_val):
result_list = [None]*len(list)
for item in list:
bucket_index = item * len(list) / round_up_greatest_val #this formula allow putting element to different "bucket"
bucket = []
if result_list[bucket_index] == None: #if bucket list is not created, initialize new bucket
bucket.append(item)
result_list.insert(bucket_index,bucket)
else:
bucket = result_list[bucket_index] #if bucket is created, check which position should it be insert in bucket
for j in range(0,len(bucket)):
if item < bucket[j]:
bucket.insert(j,item)
else:
bucket.insert(len(bucket)+1,item)
break
result_list = [item for sublist in result_list if sublist != None for item in sublist] #concatenate sublist in result list *note that here is concatenate with "python way"
return result_list |
22,815 | 14735548849d6e5aa5d80eafec22d4a3bb7c471c | import pytest
from pygears import gear
from pygears.sim import cosim, sim
from pygears.typing import Bool, Queue
from pygears.lib import drv, verif, delay_rng
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
def test_leave_looped(din_delay, dout_delay):
@gear
async def test(din: Bool) -> Bool:
c = Bool(True)
while c:
async with din as c:
if c:
yield 0
else:
yield 1
verif(drv(t=Bool, seq=[True, False, False, True]) | delay_rng(din_delay, din_delay),
f=test(name='dut'),
ref=test,
delays=[delay_rng(dout_delay, dout_delay)])
cosim('/dut', 'verilator')
sim()
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
def test_leave_branched(din_delay, dout_delay):
@gear
async def test(din: Bool) -> Bool:
c = Bool(True)
d = True
while c:
async with din as c:
if c:
d = 0
else:
d = 1
yield d
verif(drv(t=Bool, seq=[True, False, False, True]) | delay_rng(din_delay, din_delay),
f=test(name='dut'),
ref=test,
delays=[delay_rng(dout_delay, dout_delay)])
cosim('/dut', 'verilator')
sim()
# test_leave_branched(2, 2)
# # TODO: variable 'c' has to be a register here!
# @pytest.mark.parametrize('din_delay', [0, 1])
# @pytest.mark.parametrize('dout_delay', [0, 1])
# def test_leave_looped(din_delay, dout_delay):
# @gear
# async def test(din: Bool) -> Bool:
# c = Bool(True)
# while c:
# async with din as c:
# pass
# yield c
# verif(drv(t=Bool, seq=[True, False, False, True]) | delay_rng(din_delay, din_delay),
# f=test(name='dut'),
# ref=test,
# delays=[delay_rng(dout_delay, dout_delay)])
# cosim('/dut', 'verilator', outdir='/tmp', rebuild=True)
# sim()
# test_leave_looped(0, 1)
# # TODO: variable 'c' has to be a register here!
# @pytest.mark.parametrize('din_delay', [0, 1])
# @pytest.mark.parametrize('dout_delay', [0, 1])
# def test_leave_looped_async_for(din_delay, dout_delay):
# @gear
# async def test(din: Queue[Bool]) -> Bool:
# c = Bool(True)
# async for c, eot in din:
# pass
# yield c
# verif(drv(t=Queue[Bool], seq=[[True, False, False, True]]) | delay_rng(din_delay, din_delay),
# f=test(name='dut'),
# ref=test,
# delays=[delay_rng(dout_delay, dout_delay)])
# cosim('/dut', 'verilator')
# sim()
# test_leave_looped_async_for(2, 2)
|
22,816 | 1bf232ee923a11148e35e40ed8ff60beee2f0fc8 | import sys
sys.stdin = open("์ํธ.txt","r")
class Node:
def __init__(self,data,rlink=None,llink=None):
self.data = data
self.rlink = rlink
self.llink = llink
def insert(start,end):
value = start.data + end.data
start.rlink = Node(value)
start.rlink.llink = start
start.rlink.rlink = end
end.llink = start.rlink
for tc in range(int(input())):
N,M,K = map(int,input().split())
flag = True
for i in map(int,input().split()):
if flag:
head = Node(i)
last = head
flag=False
continue
last.rlink = Node(i)
last.rlink.llink = last
last = last.rlink
last.rlink = head
head.llink = last
now = head
for i in range(K):
for j in range(M):
now=now.rlink
pre=now.llink
if pre == last:
insert(pre,now)
last=now.llink
else:
insert(pre,now)
now=now.llink
print("#{}".format(tc + 1), end=" ")
count = 0
for u in range(N+K):
if count == 10:
break
print(last.data, end=" ")
last = last.llink
count+=1
print()
# 1 5 6 1 9 13 4 2 8 6
# 2 1736 2514 778 169 667 498 329 715 386 958
# 3 826 1494 668 954 375 1052 677 302 774 2234
|
22,817 | 4df4c983c481111fe1590e5c6c711a68d25ee3d9 | def distance(strand_a, strand_b):
if len(strand_a) != len(strand_b):
raise ValueError('The two DNA strands must be of the same length.')
differences = 0
index = 0
for i in strand_a:
if i != strand_b[index]:
differences += 1
index += 1
return differences |
22,818 | 1947c59c3e624ee4356dbdda7f4fa8d375af7a4a | import os
import errno
import numpy as np
import deepcell
from sklearn.model_selection import train_test_split
test_size = 0.1 # % of data saved as test
seed = 0 # seed for random train-test split
filename = 'mousebrain.npz'
DATA_DIR = 'data/'
DATA_FILE = os.path.join(DATA_DIR, filename)
training_data = np.load(DATA_FILE)
X = training_data['X']
y = training_data['y']
filename = 'mousebrain_reduced.npz'
DATA_DIR = 'data/'
DATA_FILE = os.path.join(DATA_DIR, filename)
np.savez(DATA_FILE, X = X[:100], y = y[:100])
print(DATA_FILE)
# confirm the data file is available
assert os.path.isfile(DATA_FILE)
training_data = np.load(DATA_FILE)
X = training_data['X']
y = training_data['y']
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=seed)
del X, y
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
print('X.shape: {}\ny.shape: {}'.format(X_test.shape, y_test.shape))
# Set up other required filepaths
# If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR
PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR)
# ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume
ROOT_DIR = '3dsamplebased_seg/'
MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX))
LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX))
# create directories if they do not exist
for d in (MODEL_DIR, LOG_DIR):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
#-----------------------------------------------------------------------------------------
from tensorflow.keras.optimizers import SGD
from deepcell.utils.train_utils import rate_scheduler
fgbg_model_name = 'sample_fgbg_3d_model'
sample_model_name = 'sample_edgeseg_3d_model'
n_epoch = 2 # Number of training epochs
receptive_field = 61 # should be adjusted for the scale of the data
optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
lr_sched = rate_scheduler(lr=0.01, decay=0.99)
# Transformation settings
transform = 'pixelwise'
dilation_radius = 1 # change dilation radius for edge dilation
separate_edge_classes = True # break edges into cell-background edge, cell-cell edge
n_features = 4 if separate_edge_classes else 3
# 3D Settings
frames_per_batch = 3
norm_method = 'whole_image' # data normalization - `whole_image` for 3d conv
# Sample mode settings
batch_size = 8 # number of images per batch (should be 2 ^ n)
win = (receptive_field - 1) // 2 # sample window size
win_z = (frames_per_batch - 1) // 2 # z window size
balance_classes = True # sample each class equally
max_class_samples = 1e6 # max number of samples per class.
from deepcell import model_zoo
fgbg_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
n_features=2,
norm_method=norm_method,
n_frames=frames_per_batch,
n_channels=X_train.shape[-1])
from deepcell.training import train_model_sample
fgbg_model = train_model_sample(
model=fgbg_model,
dataset=DATA_FILE, # full path to npz file
model_name=fgbg_model_name,
test_size=test_size,
seed=seed,
window_size=(win, win, (frames_per_batch - 1) // 2),
optimizer=optimizer,
batch_size=batch_size,
balance_classes=balance_classes,
max_class_samples=max_class_samples,
transform='fgbg',
n_epoch=n_epoch,
model_dir=MODEL_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False)
from deepcell import model_zoo
sample_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
n_features=4, # (background edge, interior edge, cell interior, background)
n_frames=frames_per_batch,
norm_method=norm_method,
n_channels=X_train.shape[-1])
from deepcell.training import train_model_sample
sample_model = train_model_sample(
model=sample_model,
dataset=DATA_FILE, # full path to npz file
window_size=(win, win, (frames_per_batch - 1) // 2),
model_name=sample_model_name,
test_size=test_size,
seed=seed,
transform=transform,
separate_edge_classes=separate_edge_classes,
dilation_radius=dilation_radius,
optimizer=optimizer,
batch_size=batch_size,
balance_classes=balance_classes,
max_class_samples=max_class_samples,
n_epoch=n_epoch,
log_dir=LOG_DIR,
model_dir=MODEL_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
fgbg_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name))
fgbg_model.save_weights(fgbg_weights_file)
sample_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(sample_model_name))
sample_model.save_weights(sample_weights_file)
|
22,819 | e9225acaf1ddf2d8dbccfce740ea765f61e632c7 | import pygame as pg
import re
from os import listdir
from os.path import isfile, join
def collide_hit_rect(first, second):
"""
Call back function which checks if two rectangles have collided with each other
:param first: the first rectangle
:param second: the second rectangle
:return: True if first collided with second. False otherwise
"""
return first.hit_rect.colliderect(second.hit_rect) or first.hit_rect.colliderect(second.hit_rect)
def collide_with_obstacles(sprite, group, direction):
"""
Checks where the sprite has collided with an obstacle
direction is needed to allow a sprite that collides
horizontally to continue moving vertically if its movement
was in a diagonal. Same goes for blocked vertical movement
and continued horizontal movement.
:param sprite: The sprite to check
:param group: The group of obstacles to check
:param direction: For vertical or horizontal movement
:return: True if there is a collision. False otherwise
"""
collided = False
if direction == 'x':
collided = True
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
# If the sprite is moving right, stop it and
# set its right face on the left side of the object it collided with.
if hits[0].rect.centerx > sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2
# If the sprite is moving right, stop it and
# set its left face on the left side of the object it collided with.
if hits[0].rect.centerx < sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2
# Completely stop the sprite
sprite.vel.x = -sprite.vel.x
# Update the sprite's center to the new position
sprite.hit_rect.centerx = sprite.pos.x
if direction == 'y':
collided = True
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
# If the sprite is moving upwards, then
# set its top to the bottom of the sprite it collided with.
if hits[0].rect.centery < sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height / 2
# If the sprite is moving downwards, then
# set its bottom to the top of the sprite it collided with.
if hits[0].rect.centery > sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.top - sprite.hit_rect.height / 2
# Completely stop the sprite
sprite.vel.y = -sprite.vel.y
sprite.hit_rect.centery = sprite.pos.y
return collided
def get_image_names(path, index=2):
files = [file for file in listdir(path) if isfile(join(path, file))]
return [path + file for file in sorted(files, key=lambda x: int(re.split(r'[_.]', x)[index]))]
|
22,820 | 5c351bd411c92c07bfcd6205f4cd7b337b9ce6d6 | # -*- coding: UTF-8 -*-
'''
# Author: Akasaka
# FileName: A.py
# Created: 2019.04.20(UTC+0800)20.21.47(ๆๆๅ
ญ)
'''
global g, idx
def dfs(r, c, x, y, n):
if n < 2:
return
s = n >> 1;
tx = r + s - 1
ty = c + s - 1
pos = (x - r) // s * 2 + (y - c) // s
global g, idx
idx += 1
for i in range(4):
if i == pos:
continue
g[tx + i // 2][ty + i % 2] = idx;
for i in range(4):
if i == pos:
dfs(r + i // 2 * s, c + i % 2 * s, x, y, s)
else:
dfs(r + i // 2 * s, c + i % 2 * s, tx + i // 2, ty + i % 2, s)
def main():
k, x, y = map(int, input().split())
global g, idx
idx = 0
n = 1 << k
g = [[0 for i in range(n)] for j in range(n)]
dfs(0, 0, x - 1, y - 1, n)
for i in range(n):
print(g[i])
if __name__ == '__main__':
main()
|
22,821 | 6839ef3f00d3416c69be64309376bbd346d048ce | from rest_framework import generics
from . import models
from . import serializers
class NoteList(generics.ListCreateAPIView):
queryset = models.Note.objects.all()
serializer_class = serializers.NoteSerializer
class NoteData(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Note.objects.all()
serializer_class = serializers.NoteSerializer |
22,822 | 1c5957bb0a4523ce89d74daa52656c83b19c6b5e | from django.contrib import admin
from .models import Blogpost, Books, Contact, DeliveryDetails
admin.site.register(Blogpost)
admin.site.register(Books)
admin.site.register(Contact)
admin.site.register(DeliveryDetails)
|
22,823 | 446b663d173fc1d3567da34adeec72ee1d40d841 | import time
class Comments():
def __init__(self):
self.threads = {}
def add(self, thread_id, data):
message_id = str(time.time()).replace('.', '')
message = {
'id': message_id,
'message': data['message']
}
if thread_id in self.threads:
self.threads[thread_id].append(message)
else:
self.threads[thread_id] = [message]
return message
def get_thread(self, thread_id):
thread = []
if thread_id in self.threads:
thread = self.threads[thread_id]
return thread
|
22,824 | 0280615284daac505191e64d7784620bfce96f18 | # Generated by Django 2.0.5 on 2018-05-30 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SendMsg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=32, unique=True)),
('code', models.CharField(max_length=6)),
('stime', models.DateTimeField()),
('times', models.IntegerField()),
],
),
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(db_index=True, max_length=32)),
('email', models.CharField(max_length=32, unique=True)),
('pwd', models.CharField(max_length=64)),
('ctime', models.DateTimeField(auto_now_add=True)),
],
),
]
|
22,825 | 034ab957da48e71cc3711527d903ee377c3a2ecd | f = open("Day09_input.txt", 'r')
aoc_inp = f.read()
aoc_inp = "".join(aoc_inp.split())
f.close()
def marker_start(s):
if s == "(":
return True
def get_marker(s):
return s[1:s.find(')')]
# print(get_marker("2x2)dsdsdsd"))
def get_marker_a_b(s):
return int(s[:s.find('x')]), int(s[s.find('x') + 1:])
def traverse_string(s):
i = 0
# print(len(s))
print("-----------------------------------------------------------------------------------------------------------")
while len(s) > 0:
#print("bokstav:", s[i])
if marker_start(s[0]):
print("-------------------------------------------------------")
marker = get_marker(s)
if marker.find('x') > -1:
print("Marker:", marker)
a, b = get_marker_a_b(marker)
print("Number of characters:", a)
print("Number of times to duplicate:", b)
to_copy = s[len(marker) + 2:len(marker) + 2 + a]
print("to_copy:", to_copy)
print("len(to_copy):", len(to_copy))
s = str(s[len(marker) + a + 2:])
i += len(str(to_copy * b)) - 1
else:
s = s[1:]
print(s)
i += 1
print(s)
print("length:", len(s))
print(i)
traverse_string(aoc_inp)
# traverse_string("ADVENT")
# traverse_string("A(1x5)BC")
# traverse_string("(3x3)XYZ")
# traverse_string("A(2x2)BCD(2x2)EFG")
# traverse_string("(6x1)(1x3)A")
# traverse_string("X(8x2)(3x3)ABCY")
# traverse_string("IKKEKOPIERMEG(33x2)KOPIERHERFRA(3x4)(99x77)OGHELTHIT")
# traverse_string("IKKEKOPIERMEG(33x2)KOPIERHERFRA(3x4)(99x77)OGHELTHIT(3x3)XYZ")
|
22,826 | 586bb0bd110422c239e040051d73c06a3a911a9f | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-encryption.html
if __name__ == '__main__':
"""
delete-bucket-encryption : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/delete-bucket-encryption.html
get-bucket-encryption : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-encryption.html
"""
parameter_display_string = """
# bucket : Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide .
# server-side-encryption-configuration :
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("s3api", "put-bucket-encryption", "bucket", "server-side-encryption-configuration", add_option_dict)
|
22,827 | 3a2d6ef0334a18176d6b0482bc68ab81c725b860 | # Generated by Django 2.2.5 on 2019-09-26 21:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20190927_0006'),
]
operations = [
migrations.AlterField(
model_name='essays',
name='essay_published',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 27, 0, 14, 12, 101433), verbose_name='date published'),
),
migrations.AlterField(
model_name='tutorials',
name='tutorial_published',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 27, 0, 14, 12, 42361), verbose_name='date published'),
),
]
|
22,828 | d18c6b58bfe1607400a73f3adaba953bc23e0613 | #!/usr/bin/env python
import fcntl
import socket
import struct
def Get_IP(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
ip = Get_IP('eth0')
print(ip)
|
22,829 | 099bcf3d561ec9a6112303f2d514da001d13adba | from torch import nn
import torch
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, r=1e-19):
"""
:param gamma: gamma>0ๅๅฐๆๅ็ฑปๆ ทๆฌ็ๆๅคฑใไฝฟๅพๆดๅ
ณๆณจไบๅฐ้พ็ใ้ๅ็ๆ ทๆฌใ่ถๅคง่ถๅ
ณๆณจไบๅฐ้พๆ ทๆฌ็ๅญฆไน
:param alpha:่ฐ่ๆญฃ่ดๆ ทๆฌๆฏไพ
:param r:ๆฐๅผ็จณๅฎ็ณปๆฐใ
"""
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.bce_loss = nn.BCELoss()
self.r = r
def forward(self, p, target):
target = target.float()
p_min = p.min()
p_max = p.max()
if p_min < 0 or p_max > 1:
raise ValueError('The range of predicted values should be [0, 1]')
p = p.reshape(-1, 1)
target = target.reshape(-1, 1)
loss = -self.alpha * (1 - p) ** self.gamma * (target * torch.log(p + self.r)) - \
(1 - self.alpha) * p ** self.gamma * ((1 - target) * torch.log(1 - p + self.r))
return loss.mean()
class FocalLossManyClassification(nn.Module):
def __init__(self, num_class, alpha=None, gamma=2, smooth=None, epsilon=1e-19):
"""
FocalLoss,้็จไบๅคๅ็ฑปใ่พๅ
ฅๅธฆๆsoftmax๏ผๆ ้ๅsoftmaxใ
:param num_class: ็ฑปๅซๆฐใ
:param alpha: ๅ็ฑปๅซๆ้็ณปๆฐ๏ผ่พๅ
ฅๅ่กจ๏ผ้ฟๅบฆ้่ฆไธ็ฑปๅซๆฐ็ธๅใ
:param gamma: ๅฐ้พๆ ทๆฌๅญฆไน ๅๅบฆ
:param smooth: ๆ ็ญพๅนณๆป็ณปๆฐ
:param epsilon: ๆฐๅผ็จณๅฎ็ณปๆฐ
"""
super(FocalLossManyClassification, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.smooth = smooth
if self.alpha is None:
self.alpha = torch.ones(self.num_class, 1)
elif isinstance(self.alpha, list):
assert len(self.alpha) == self.num_class
self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)
self.alpha = self.alpha / self.alpha.sum()
else:
raise TypeError('Not support alpha type')
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('Smooth value should be in [0,1]')
self.epsilon = epsilon
def forward(self, input_, target):
'''softmaxๆฟๆดป'''
logit = torch.softmax(input_, dim=1)
if logit.dim() > 2:
raise ValueError('The input dimension should be 2')
target = target.reshape(-1, 1)
alpha = self.alpha
if alpha.device != input_.device:
alpha = alpha.to(input_.device)
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(
one_hot_key, self.smooth, 1.0 - self.smooth)
pt = (one_hot_key * logit).sum(1) + self.epsilon
log_pt = pt.log()
alpha = alpha[idx]
loss = -1 * alpha * ((1 - pt) ** self.gamma) * log_pt
return loss.mean()
if __name__ == '__main__':
f = FocalLossManyClassification(10, alpha=[1, 2, 15, 4, 8, 6, 7, 7, 9, 4], smooth=0.1)
predict = torch.randn(64, 10, requires_grad=True)
targets = torch.randint(0, 9, (64,))
loss = f(torch.sigmoid(predict), targets)
print(loss)
loss.backward()
# print(targets)
|
22,830 | e1cb3faa7734558131403a56a21ccd9d8ffe57ea | #!/usr/bin/env python
import sys, os, re, copy
class TextState():
def __init__(self):
bold = False; itbold = False; italic = False
def dump(self):
for aa in TextState.__dict__:
print "dic", aa
for aa in self.__dict__:
print "ee", aa
if __name__ == '__main__':
ts = TextState()
ts2 = TextState()
ts3 = copy.copy(ts2)
print " TextState"
ts.dump()
print " TextState2"
ts2.dump()
print " TextState3"
ts3.dump()
|
22,831 | c72111453119dc347d3e09794ac99f2cb96d4642 | import json
import os
import fido2.features
import flask_login
from fido2.server import Fido2Server
from fido2.webauthn import (
AttestedCredentialData,
PublicKeyCredentialRpEntity,
PublicKeyCredentialUserEntity,
)
from flask import flash, redirect, render_template, session, url_for
from app import app, db, forms, load_user, models
rp_id = os.environ["AUTHN_ID"]
expected_origin = os.environ["AUTHN_ORIGIN"]
fido2.features.webauthn_json_mapping.enabled = True
rp = PublicKeyCredentialRpEntity(name=os.environ["AUTHN_NAME"], id=rp_id)
fido_server = Fido2Server(rp)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/register", methods=["GET", "POST"])
def register():
form = forms.RegisterForm()
if form.validate_on_submit():
user = models.User(username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
flash("Registration complete", "success")
return redirect(url_for("login"))
return render_template("register.html", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
form = forms.LoginForm()
if form.validate_on_submit():
# User has supplied correct credentials
# Store info in session and pass to 2FA check
session["valid_credentials_supplied"] = True
session["user"] = form.user.id
flash("Username/password correct", "success")
return redirect(url_for("select_2fa"))
return render_template("login.html", form=form)
@app.route("/logout")
def logout():
flask_login.logout_user()
flash("Logout complete", "success")
return redirect(url_for("index"))
@app.route("/select-2fa", methods=["GET", "POST"])
def select_2fa():
if not session.get("valid_credentials_supplied", False):
return redirect(url_for("login"))
user = models.User.query.filter_by(id=session["user"]).first()
if len(user.u2f_credentials.all()) == 0:
return redirect(url_for("add_2fa"))
keys = user.u2f_credentials.all()
return render_template("select_2fa.html", keys=keys, user=user)
@app.route("/validate-2fa/<name>", methods=["GET", "POST"])
def validate_2fa(name):
form = forms.SignTokenForm()
key = models.U2FCredentials.query.filter_by(
owner=session["user"], name=name
).first()
device = _restore_credential_data(key.device)
if form.validate_on_submit():
response = json.loads(form.response.data)
try:
result = fido_server.authenticate_complete(
session["webauthn_challenge"],
[
device,
],
response,
)
except:
flash("Token authentication failed", "error")
return redirect(url_for("select_2fa"))
# Log in the user
user = load_user(session["user"])
flask_login.login_user(user)
flash("Login complete", "success")
return redirect(url_for("index"))
key = models.U2FCredentials.query.filter_by(
owner=session["user"], name=name
).first()
device = _restore_credential_data(key.device)
options, state = fido_server.authenticate_begin(
[
device,
]
)
session["webauthn_challenge"] = state
# session['u2f_sign'] = sign.json
return render_template(
"validate_2fa.html",
form=form,
key=key,
authentication_options=json.dumps(dict(options)),
)
@app.route("/add-2fa", methods=["GET", "POST"])
def add_2fa():
rp_name = "Jonathan Street personal site"
form = forms.AddTokenForm()
user = models.User.query.filter_by(id=session["user"]).first()
if form.validate_on_submit():
auth_data = fido_server.register_complete(
session["webauthn_challenge"], json.loads(form.response.data)
)
cred_data = _store_credential_data(auth_data.credential_data)
# Complete 2FA registration
u2f_cred = models.U2FCredentials(
name=form.name.data, owner=user.id, device=cred_data
)
db.session.add(u2f_cred)
db.session.commit()
flash("Authentication token added", "success")
return redirect(url_for("login"))
# Start 2FA registration
options, state = fido_server.register_begin(
PublicKeyCredentialUserEntity(
id=b"user_id",
name=user.username,
display_name=user.username,
),
[],
user_verification="discouraged",
authenticator_attachment="cross-platform",
)
options = dict(options)
session["webauthn_challenge"] = state
return render_template(
"add_2fa.html",
registration_options=json.dumps(dict(options)),
form=form,
)
def _store_credential_data(cred):
return cred.hex()
def _restore_credential_data(cred):
return AttestedCredentialData.fromhex(cred)
|
22,832 | bf04ad44c4e03c645db7b701312676bb19ba48d7 | # encoding=utf-8
from sqlalchemy import Column, Integer
from sqlalchemy.types import TypeDecorator, CHAR
from sqlalchemy.dialects.postgresql import UUID
import risclog.sqlalchemy.model
import uuid
ENGINE_NAME = 'schaukasten'
class ObjectBase(risclog.sqlalchemy.model.ObjectBase):
_engine_name = ENGINE_NAME
id = Column(Integer, primary_key=True)
@property
def identifier(self):
"Value usable in the `get` method to retrieve the object from the DB."
return self.id
Object = risclog.sqlalchemy.model.declarative_base(
ObjectBase, class_registry=risclog.sqlalchemy.model.class_registry)
# according to sqlalchemy documentation
# http://docs.sqlalchemy.org/en/latest/core/custom_types.html#backend-agnostic-guid-type
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
|
22,833 | d0a10ca018a2b01f4b8468fdb9da6263cf7c1f25 | #%%
# Import Package
import os
import cv2 as cv
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
# %%
# Data Prepare
URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
path_to_zip = utils.get_file('flower_photos.tgz', origin=URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
print(category_list)
num_classes = len(category_list)
img_size = 150
def read_img(path, img_size):
img = cv.imread(path)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img = cv.resize(img, (img_size, img_size))
return img
imgs_tr = []
labs_tr = []
imgs_val = []
labs_val = []
for i, category in enumerate(category_list):
path = os.path.join(PATH, category)
imgs_list = os.listdir(path)
print("Total '%s' images : %d"%(category, len(imgs_list)))
ratio = int(np.round(0.05 * len(imgs_list)))
print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
print("=============================")
imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
labs = [i]*len(imgs_list)
imgs_tr += imgs[ratio:]
labs_tr += labs[ratio:]
imgs_val += imgs[:ratio]
labs_val += labs[:ratio]
imgs_tr = np.array(imgs_tr)/255.
labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
imgs_val = np.array(imgs_val)/255.
labs_val = utils.to_categorical(np.array(labs_val), num_classes)
print(imgs_tr.shape, labs_tr.shape)
print(imgs_val.shape, labs_val.shape)
#%%
def Dense_Layer(input, growth_rate, name="Dense_Layer"):
x = layers.BatchNormalization(name=name+"_BN_1")(input)
x = layers.ReLU(name=name+"_Act_1")(x)
x = layers.Conv2D(growth_rate*4, 1, name=name+"_Conv_1")(x)
x = layers.BatchNormalization(name=name+"_BN_2")(x)
x = layers.ReLU(name=name+"_Act_2")(x)
x = layers.Conv2D(growth_rate, 3, padding='same', name=name+"_Conv_2")(x)
x = layers.Concatenate(name=name+"_Concat")([input, x])
return x
def Dense_Block(input, num_layer, name="Dense_Block"):
x = Dense_Layer(input, 32, name=name+"_1")
for i in range(2, num_layer+1):
x = Dense_Layer(x, 32, name=name+"_%d"%i)
return x
def Transition_Layer(input, reduction, name="Transition_Layer"):
n_features = int(input.shape[-1])
x = layers.BatchNormalization(name=name+"_BN_1")(input)
x = layers.ReLU(name=name+"_Act_1")(x)
x = layers.Conv2D(int(n_features*reduction), 1, name=name+"_Conv_1")(x)
x = layers.AveragePooling2D(name=name+"_Pool")(x)
return x
def build_densenet(input_shape=(None, None, 3), num_classes = 100, num_blocks=121, name = "DenseNet"):
blocks_dict = {
121: [6, 12, 24, 16],
169: [6, 12, 32, 32],
201: [6, 12, 48, 32],
264: [6, 12, 64, 48]
}
assert num_blocks in blocks_dict.keys(), "Number of layer must be in %s"%blocks_dict.keys()
input = layers.Input(shape=input_shape, name=name+"_Input")
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=name+"_Stem_Pad_1")(input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name=name+"_Stem_Conv")(x)
x = layers.BatchNormalization(name=name+'_Stem_BN')(x)
x = layers.ReLU(name=name+"_Stem_Act")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name+"_Stem_Pad_2")(x)
x = layers.MaxPooling2D(3, strides=2, name=name+"_Stem_Pool")(x)
x = Dense_Block(x, blocks_dict[num_blocks][0], name=name+"_Dense_Block_1")
x = Transition_Layer(x, 0.5, name='Transition_1')
x = Dense_Block(x, blocks_dict[num_blocks][1], name=name+"_Dense_Block_2")
x = Transition_Layer(x, 0.5, name='Transition_2')
x = Dense_Block(x, blocks_dict[num_blocks][2], name=name+"_Dense_Block_3")
x = Transition_Layer(x, 0.5, name='Transition_3')
x = Dense_Block(x, blocks_dict[num_blocks][3], name=name+"_Dense_Block_4")
x = layers.BatchNormalization(name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
x = layers.GlobalAveragePooling2D(name=name+"_GAP")(x)
x = layers.Dense(num_classes, activation='softmax', name=name+"_Output")(x)
return models.Model(input, x, name=name)
num_blocks = 121
input_shape = imgs_tr.shape[1:]
dense = build_densenet(input_shape=input_shape, num_classes=num_classes, num_blocks=num_blocks, name = "DenseNet")
loss = 'binary_crossentropy' if num_classes==1 else 'categorical_crossentropy'
dense.compile(optimizer=optimizers.Adam(), loss=loss, metrics=['accuracy'])
# %%
# Training Network
epochs=100
batch_size=16
history=dense.fit(imgs_tr, labs_tr, epochs = epochs, batch_size=batch_size, validation_data=[imgs_val, labs_val])
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.title("Loss graph")
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['Train', 'Validation'], loc='upper right')
plt.subplot(122)
plt.title("Acc graph")
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show() |
22,834 | 661253bb6a9bbd6025d6b0f76871bf9539451bf1 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import UserSerializer
from rest_framework.decorators import authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
@api_view(['POST'])
def signup(request):
password = request.data.get('password')
password_confirmation = request.data.get('passwordConfirmation')
if password != password_confirmation:
return Response({'error': '๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค.'}, status=status.HTTP_400_BAD_REQUEST)
serializer = UserSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
user = serializer.save()
user.set_password(request.data.get('password'))
user.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['GET'])
@authentication_classes([JSONWebTokenAuthentication])
@permission_classes([IsAuthenticated])
def login(request):
user_id = request.user.id
username = request.user.username
data = {
"user_id": user_id,
"username": username
}
return Response(data)
|
22,835 | d5be1be8a42602c998856e978926199dedeab153 | # Generated by Django 3.2.5 on 2021-07-22 08:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('receipts', '0004_auto_20210717_1816'),
('receipts', '0004_auto_20210717_1925'),
]
operations = [
]
|
22,836 | 847744a0703bce2b47d597ed0107b9cc6e9dce06 |
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
help(load_from_yahoo) |
22,837 | 069a94cb911821c31d78fa266ecf68599499a35f | import unittest
from mcx4.microcontrollers import Microcontroller, MC4000, MC6000
from mcx4.interfaces import GPIO, XBUS, Register, Interface
import mcx4.exceptions as x
class MicrocontrollerTestCase(unittest.TestCase):
def setUp(self):
Microcontroller._part_count = 0
self.mc = Microcontroller(gpio=2, xbus=3)
def test_get_port_gpio(self):
p0 = self.mc.get_port('p0')
p1 = self.mc.get_port('p1')
self.assertIsInstance(p0, GPIO)
self.assertIsInstance(p1, GPIO)
self.assertEqual(self.mc.get_port('p0'), p0)
self.assertNotEqual(p1, p0)
with self.assertRaises(x.PortException):
self.mc.get_port('p2')
def test_get_port_xbus(self):
mc = Microcontroller(xbus=3)
x0 = mc.get_port('x0')
x1 = mc.get_port('x1')
x2 = mc.get_port('x2')
self.assertIsInstance(x0, XBUS)
self.assertIsInstance(x1, XBUS)
self.assertIsInstance(x2, XBUS)
self.assertEqual(x0, mc.get_port('x0'))
self.assertEqual(x1, mc.get_port('x1'))
self.assertEqual(x2, mc.get_port('x2'))
self.assertNotEqual(x1, x2)
self.assertNotEqual(x1, x0)
with self.assertRaises(x.PortException):
self.mc.get_port('x3')
self.assertNotEqual(x1, x0)
def test_get_invalid_port(self):
bad_ports = ['lawl', 'l0', 'python']
for p in bad_ports:
with self.assertRaises(x.PortException):
self.mc.get_port(p)
def test_link_nonport(self):
with self.assertRaises(TypeError):
self.mc.p0.link(self.mc)
def test_get_port_shorthand(self):
self.assertIsInstance(self.mc.p1, GPIO)
self.assertIsInstance(self.mc.x0, XBUS)
with self.assertRaises(x.PortException):
self.mc.x10
with self.assertRaises(AttributeError):
self.mc.foobarbizz
def test_doc_examples(self):
mc1 = MC4000()
mc2 = MC6000()
mc1.p0.link(mc2.p1) # Link the ports.
self.assertEqual(0, mc1.p0.read())
self.assertEqual(0, mc2.p1.read())
mc1.p0.write(100)
self.assertEqual(100, mc2.p1.read())
mc1 = MC4000()
mc2 = MC6000()
mc1.p0.link(mc2.p1) # Link the ports.
self.assertEqual(0, mc1.p0.read())
self.assertEqual(0, mc2.p1.read())
mc1.p0.write(100)
self.assertEqual(0, mc1.p0.read())
self.assertEqual(0, mc2.p1.read())
|
22,838 | 41fa486cbb086931933dc8872116779f7c0d3007 | def eggsfunc(obj):
return obj.value * 4
def hamfunc(obj, value):
return value + 'ham'
def Extender(aClass):
aClass.eggs = eggsfunc
aClass.ham = hamfunc
return aClass
@Extender
class Client1:
def __init__(self, value):
self.value = value
def spam(self):
return self.value * 2
@Extender
class Client2:
value = 'ni?'
X = Client1('Ni!')
print(X.spam())
print(X.eggs())
print(X.ham('bacon'))
Y = Client2()
print(Y.eggs())
print(Y.ham('bacon'))
|
22,839 | 3f874129737d78eed8d1afa10a8e8e8f496fd74b | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EdgeQL set compilation functions."""
import contextlib
import copy
import typing
from edb import errors
from edb.lang.common import parsing
from edb.lang.ir import ast as irast
from edb.lang.ir import utils as irutils
from edb.lang.schema import abc as s_abc
from edb.lang.schema import expr as s_expr
from edb.lang.schema import links as s_links
from edb.lang.schema import name as s_name
from edb.lang.schema import nodes as s_nodes
from edb.lang.schema import objtypes as s_objtypes
from edb.lang.schema import pointers as s_pointers
from edb.lang.schema import sources as s_sources
from edb.lang.schema import types as s_types
from edb.lang.schema import utils as s_utils
from edb.lang.edgeql import ast as qlast
from edb.lang.edgeql import parser as qlparser
from . import astutils
from . import context
from . import dispatch
from . import inference
from . import pathctx
from . import schemactx
from . import stmtctx
from . import typegen
PtrDir = s_pointers.PointerDirection
def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:
"""Create a new ir.Set instance with given attributes.
Absolutely all ir.Set instances must be created using this
constructor.
"""
ir_set = irast.Set(**kwargs)
ctx.all_sets.append(ir_set)
return ir_set
def new_set_from_set(
ir_set: irast.Set, *,
preserve_scope_ns: bool=False,
path_id: typing.Optional[irast.PathId]=None,
stype: typing.Optional[s_types.Type]=None,
ctx: context.ContextLevel) -> irast.Set:
"""Create a new ir.Set from another ir.Set.
The new Set inherits source Set's scope, schema item, expression,
and, if *preserve_scope_ns* is set, path_id. If *preserve_scope_ns*
is False, the new Set's path_id will be namespaced with the currently
active scope namespace.
"""
if path_id is None:
path_id = ir_set.path_id
if not preserve_scope_ns:
path_id = path_id.merge_namespace(ctx.path_id_namespace)
if stype is None:
stype = ir_set.stype
result = new_set(
path_id=path_id,
path_scope_id=ir_set.path_scope_id,
stype=stype,
expr=ir_set.expr,
ctx=ctx
)
result.rptr = ir_set.rptr
return result
def compile_path(expr: qlast.Path, *, ctx: context.ContextLevel) -> irast.Set:
"""Create an ir.Set representing the given EdgeQL path expression."""
anchors = ctx.anchors
path_tip = None
if expr.partial:
if ctx.partial_path_prefix is not None:
path_tip = ctx.partial_path_prefix
else:
raise errors.QueryError(
'could not resolve partial path ',
context=expr.context)
extra_scopes = {}
computables = []
path_sets = []
for i, step in enumerate(expr.steps):
if isinstance(step, qlast.Source):
# 'self' can only appear as the starting path label
# syntactically and is a known anchor
path_tip = anchors[step.__class__]
elif isinstance(step, qlast.Subject):
# '__subject__' can only appear as the starting path label
# syntactically and is a known anchor
path_tip = anchors[step.__class__]
elif isinstance(step, qlast.ObjectRef):
if i > 0:
raise RuntimeError(
'unexpected ObjectRef as a non-first path item')
refnode = None
if not step.module and step.name not in ctx.aliased_views:
# Check if the starting path label is a known anchor
refnode = anchors.get(step.name)
if refnode is not None:
path_tip = new_set_from_set(
refnode, preserve_scope_ns=True, ctx=ctx)
else:
stype = schemactx.get_schema_type(
step, item_types=(s_objtypes.ObjectType,), ctx=ctx)
if (stype.get_view_type(ctx.env.schema) is not None and
stype.get_name(ctx.env.schema) not in ctx.view_nodes):
# This is a schema-level view, as opposed to
# a WITH-block or inline alias view.
stype = stmtctx.declare_view_from_schema(stype, ctx=ctx)
path_tip = class_set(stype, ctx=ctx)
view_set = ctx.view_sets.get(stype)
if view_set is not None:
path_tip = new_set_from_set(view_set, ctx=ctx)
path_scope = ctx.path_scope_map.get(view_set)
extra_scopes[path_tip] = path_scope.copy()
view_scls = ctx.class_view_overrides.get(
stype.get_name(ctx.env.schema))
if view_scls is not None:
path_tip.stype = view_scls
elif isinstance(step, qlast.Ptr):
# Pointer traversal step
ptr_expr = step
ptr_target = None
direction = (ptr_expr.direction or
s_pointers.PointerDirection.Outbound)
if ptr_expr.target:
# ... link [IS Target]
ptr_target = schemactx.get_schema_type(
ptr_expr.target.maintype, ctx=ctx)
if not isinstance(ptr_target, s_objtypes.ObjectType):
raise errors.QueryError(
f'invalid type filter operand: '
f'{ptr_target.get_name(ctx.env.schema)} '
f'is not an object type',
context=ptr_expr.target.context)
ptr_name = ptr_expr.ptr.name
if ptr_expr.type == 'property':
# Link property reference; the source is the
# link immediately preceding this step in the path.
source = path_tip.rptr.ptrcls
else:
source = path_tip.stype
with ctx.newscope(fenced=True, temporary=True) as subctx:
if isinstance(source, s_abc.Tuple):
path_tip = tuple_indirection_set(
path_tip, source=source, ptr_name=ptr_name,
source_context=step.context, ctx=subctx)
else:
path_tip = ptr_step_set(
path_tip, source=source, ptr_name=ptr_name,
direction=direction, ptr_target=ptr_target,
ignore_computable=True,
source_context=step.context, ctx=subctx)
ptrcls = path_tip.rptr.ptrcls
if _is_computable_ptr(ptrcls, ctx=ctx):
computables.append(path_tip)
else:
# Arbitrary expression
if i > 0:
raise RuntimeError(
'unexpected expression as a non-first path item')
with ctx.newscope(fenced=True, temporary=True) as subctx:
path_tip = ensure_set(
dispatch.compile(step, ctx=subctx), ctx=subctx)
if path_tip.path_id.is_type_indirection_path(ctx.env.schema):
scope_set = path_tip.rptr.source
else:
scope_set = path_tip
extra_scopes[scope_set] = subctx.path_scope
for key_path_id in path_tip.path_id.iter_weak_namespace_prefixes():
mapped = ctx.view_map.get(key_path_id)
if mapped is not None:
path_tip = new_set(
path_id=mapped.path_id,
stype=path_tip.stype,
expr=mapped.expr,
rptr=mapped.rptr,
ctx=ctx)
break
path_sets.append(path_tip)
path_tip.context = expr.context
pathctx.register_set_in_scope(path_tip, ctx=ctx)
for ir_set in computables:
scope = ctx.path_scope.find_descendant(ir_set.path_id)
if scope is None:
# The path is already in the scope, no point
# in recompiling the computable expression.
continue
with ctx.new() as subctx:
subctx.path_scope = scope
comp_ir_set = computable_ptr_set(ir_set.rptr, ctx=subctx)
i = path_sets.index(ir_set)
if i != len(path_sets) - 1:
path_sets[i + 1].rptr.source = comp_ir_set
else:
path_tip = comp_ir_set
path_sets[i] = comp_ir_set
for ir_set, scope in extra_scopes.items():
node = ctx.path_scope.find_descendant(ir_set.path_id)
if node is None:
# The path portion not being a descendant means
# that is is already present in the scope above us,
# along with the view scope.
continue
fuse_scope_branch(ir_set, node, scope, ctx=ctx)
if ir_set.path_scope_id is None:
pathctx.assign_set_scope(ir_set, node, ctx=ctx)
return path_tip
def fuse_scope_branch(
ir_set: irast.Set, parent: irast.ScopeTreeNode,
branch: irast.ScopeTreeNode, *,
ctx: context.ContextLevel) -> None:
if parent.path_id is None:
parent.attach_subtree(branch)
else:
if branch.path_id is None and len(branch.children) == 1:
target_branch = next(iter(branch.children))
else:
target_branch = branch
if parent.path_id == target_branch.path_id:
new_root = irast.new_scope_tree()
for child in tuple(target_branch.children):
new_root.attach_child(child)
parent.attach_subtree(new_root)
else:
parent.attach_subtree(branch)
def ptr_step_set(
path_tip: irast.Set, *,
source: s_sources.Source,
ptr_name: str,
direction: PtrDir,
ptr_target: typing.Optional[s_nodes.Node]=None,
source_context: parsing.ParserContext,
ignore_computable: bool=False,
ctx: context.ContextLevel) -> irast.Set:
ptrcls = resolve_ptr(
source, ptr_name, direction,
target=ptr_target, source_context=source_context,
ctx=ctx)
target = ptrcls.get_far_endpoint(ctx.env.schema, direction)
path_tip = extend_path(
path_tip, ptrcls, direction, target,
ignore_computable=ignore_computable, ctx=ctx)
if ptr_target is not None and target != ptr_target:
path_tip = class_indirection_set(
path_tip, ptr_target, optional=False, ctx=ctx)
return path_tip
def resolve_ptr(
near_endpoint: s_sources.Source,
pointer_name: str,
direction: s_pointers.PointerDirection,
target: typing.Optional[s_nodes.Node]=None, *,
source_context: typing.Optional[parsing.ParserContext]=None,
ctx: context.ContextLevel) -> s_pointers.Pointer:
ptr = None
if isinstance(near_endpoint, s_sources.Source):
ctx.env.schema, ptr = near_endpoint.resolve_pointer(
ctx.env.schema,
pointer_name,
direction=direction,
look_in_children=False,
include_inherited=True,
far_endpoint=target)
if ptr is None:
if isinstance(near_endpoint, s_links.Link):
msg = (f'{near_endpoint.get_displayname(ctx.env.schema)} '
f'has no property {pointer_name!r}')
if target:
msg += f'of type {target.get_name(ctx.env.schema)!r}'
elif direction == s_pointers.PointerDirection.Outbound:
msg = (f'{near_endpoint.get_displayname(ctx.env.schema)} '
f'has no link or property {pointer_name!r}')
if target:
msg += f'of type {target.get_name(ctx.env.schema)!r}'
else:
nep_name = near_endpoint.get_displayname(ctx.env.schema)
path = f'{nep_name}.{direction}{pointer_name}'
if target:
path += f'[IS {target.get_displayname(ctx.env.schema)}]'
msg = f'{path!r} does not resolve to any known path'
err = errors.InvalidReferenceError(msg, context=source_context)
if direction == s_pointers.PointerDirection.Outbound:
near_enpoint_pointers = near_endpoint.get_pointers(
ctx.env.schema)
s_utils.enrich_schema_lookup_error(
err, pointer_name, modaliases=ctx.modaliases,
item_types=(s_pointers.Pointer,),
collection=near_enpoint_pointers.objects(ctx.env.schema),
schema=ctx.env.schema
)
raise err
else:
if direction == s_pointers.PointerDirection.Outbound:
bptr = schemactx.get_schema_ptr(pointer_name, ctx=ctx)
schema_cls = ctx.env.schema.get('schema::ScalarType')
if bptr.get_shortname(ctx.env.schema) == 'std::__type__':
ctx.env.schema, ptr = bptr.derive(
ctx.env.schema, near_endpoint, schema_cls)
if ptr is None:
# Reference to a property on non-object
msg = 'invalid property reference on a primitive type expression'
raise errors.InvalidReferenceError(msg, context=source_context)
return ptr
def extend_path(
source_set: irast.Set,
ptrcls: s_pointers.Pointer,
direction: PtrDir=PtrDir.Outbound,
target: typing.Optional[s_nodes.Node]=None, *,
ignore_computable: bool=False,
force_computable: bool=False,
unnest_fence: bool=False,
same_computable_scope: bool=False,
ctx: context.ContextLevel) -> irast.Set:
"""Return a Set node representing the new path tip."""
if ptrcls.is_link_property(ctx.env.schema):
src_path_id = source_set.path_id.ptr_path()
else:
if direction != s_pointers.PointerDirection.Inbound:
source = ptrcls.get_near_endpoint(ctx.env.schema, direction)
if not source_set.stype.issubclass(ctx.env.schema, source):
# Polymorphic link reference
source_set = class_indirection_set(
source_set, source, optional=True, ctx=ctx)
src_path_id = source_set.path_id
if target is None:
target = ptrcls.get_far_endpoint(ctx.env.schema, direction)
path_id = src_path_id.extend(ptrcls, direction, target,
ns=ctx.path_id_namespace,
schema=ctx.env.schema)
target_set = new_set(stype=target, path_id=path_id, ctx=ctx)
ptr = irast.Pointer(
source=source_set,
target=target_set,
ptrcls=ptrcls,
direction=direction
)
target_set.rptr = ptr
if (not ignore_computable and _is_computable_ptr(
ptrcls, force_computable=force_computable, ctx=ctx)):
target_set = computable_ptr_set(
ptr, unnest_fence=unnest_fence,
same_computable_scope=same_computable_scope, ctx=ctx)
return target_set
def _is_computable_ptr(
ptrcls, *,
force_computable: bool=False,
ctx: context.ContextLevel) -> bool:
try:
qlexpr = ctx.source_map[ptrcls][0]
except KeyError:
pass
else:
return qlexpr is not None
if ptrcls.is_pure_computable(ctx.env.schema):
return True
if force_computable and ptrcls.get_default(ctx.env.schema) is not None:
return True
def tuple_indirection_set(
path_tip: irast.Set, *,
source: s_sources.Source,
ptr_name: str,
source_context: parsing.ParserContext,
ctx: context.ContextLevel) -> irast.Set:
el_name = ptr_name
el_norm_name = source.normalize_index(ctx.env.schema, el_name)
el_type = source.get_subtype(ctx.env.schema, el_name)
path_id = irutils.tuple_indirection_path_id(
path_tip.path_id, el_norm_name, el_type,
schema=ctx.env.schema)
expr = irast.TupleIndirection(
expr=path_tip, name=el_norm_name, path_id=path_id,
context=source_context)
return generated_set(expr, ctx=ctx)
def class_indirection_set(
source_set: irast.Set,
target_scls: s_nodes.Node, *,
optional: bool,
ctx: context.ContextLevel) -> irast.Set:
poly_set = new_set(stype=target_scls, ctx=ctx)
rptr = source_set.rptr
if (rptr is not None and
not rptr.ptrcls.singular(ctx.env.schema, rptr.direction)):
cardinality = irast.Cardinality.MANY
else:
cardinality = irast.Cardinality.ONE
poly_set.path_id = irutils.type_indirection_path_id(
source_set.path_id, target_scls, optional=optional,
cardinality=cardinality,
schema=ctx.env.schema)
ptr = irast.Pointer(
source=source_set,
target=poly_set,
ptrcls=poly_set.path_id.rptr(),
direction=poly_set.path_id.rptr_dir()
)
poly_set.rptr = ptr
return poly_set
def class_set(
stype: s_nodes.Node, *,
path_id: typing.Optional[irast.PathId]=None,
ctx: context.ContextLevel) -> irast.Set:
if path_id is None:
path_id = pathctx.get_path_id(stype, ctx=ctx)
return new_set(path_id=path_id, stype=stype, ctx=ctx)
def generated_set(
expr: irast.Base, path_id: typing.Optional[irast.PathId]=None, *,
typehint: typing.Optional[s_types.Type]=None,
ctx: context.ContextLevel) -> irast.Set:
if typehint is not None:
ql_typeref = s_utils.typeref_to_ast(ctx.env.schema, typehint)
ir_typeref = typegen.ql_typeref_to_ir_typeref(ql_typeref, ctx=ctx)
else:
ir_typeref = None
alias = ctx.aliases.get('expr')
return new_expression_set(
expr, path_id, alias=alias, typehint=ir_typeref, ctx=ctx)
def get_expression_path_id(
t: s_types.Type, alias: str, *,
ctx: context.ContextLevel) -> irast.PathId:
typename = s_name.Name(module='__expr__', name=alias)
return pathctx.get_path_id(t, typename=typename, ctx=ctx)
def new_expression_set(
ir_expr, path_id=None, alias=None,
typehint: typing.Optional[irast.TypeRef]=None, *,
ctx: context.ContextLevel) -> irast.Set:
if typehint is not None and irutils.is_empty(ir_expr):
ir_expr.stype = typehint
result_type = inference.infer_type(ir_expr, ctx.env)
if path_id is None:
path_id = getattr(ir_expr, 'path_id', None)
if not path_id:
if alias is None:
raise ValueError('either path_id or alias are required')
path_id = get_expression_path_id(result_type, alias, ctx=ctx)
return new_set(
path_id=path_id,
stype=result_type,
expr=ir_expr,
context=ir_expr.context,
ctx=ctx
)
def scoped_set(
expr: irast.Base, *,
typehint: typing.Optional[s_types.Type]=None,
path_id: typing.Optional[irast.PathId]=None,
force_reassign: bool=False,
ctx: context.ContextLevel) -> irast.Set:
if not isinstance(expr, irast.Set):
ir_set = generated_set(expr, typehint=typehint,
path_id=path_id, ctx=ctx)
pathctx.assign_set_scope(ir_set, ctx.path_scope, ctx=ctx)
else:
if typehint is not None:
ir_set = ensure_set(expr, typehint=typehint,
path_id=path_id, ctx=ctx)
else:
ir_set = expr
if ir_set.path_scope_id is None or force_reassign:
if ctx.path_scope.find_child(ir_set.path_id) and path_id is None:
# Protect from scope recursion in the common case by
# wrapping the set into a subquery.
ir_set = generated_set(
ensure_stmt(ir_set, ctx=ctx), typehint=typehint, ctx=ctx)
pathctx.assign_set_scope(ir_set, ctx.path_scope, ctx=ctx)
return ir_set
def ensure_set(
expr: irast.Base, *,
typehint: typing.Optional[s_types.Type]=None,
path_id: typing.Optional[irast.PathId]=None,
ctx: context.ContextLevel) -> irast.Set:
if not isinstance(expr, irast.Set):
expr = generated_set(expr, typehint=typehint, path_id=path_id, ctx=ctx)
if (isinstance(expr, irast.EmptySet) and expr.stype is None and
typehint is not None):
inference.amend_empty_set_type(expr, typehint, schema=ctx.env.schema)
if (typehint is not None and
not expr.stype.implicitly_castable_to(typehint, ctx.env.schema)):
raise errors.QueryError(
f'expecting expression of type '
f'{typehint.get_name(ctx.env.schema)}, '
f'got {expr.stype.get_name(ctx.env.schema)}',
context=expr.context
)
return expr
def ensure_stmt(expr: irast.Base, *, ctx: context.ContextLevel) -> irast.Stmt:
if not isinstance(expr, irast.Stmt):
expr = irast.SelectStmt(
result=ensure_set(expr, ctx=ctx),
implicit_wrapper=True,
)
return expr
def computable_ptr_set(
rptr: irast.Pointer, *,
unnest_fence: bool=False,
same_computable_scope: bool=False,
ctx: context.ContextLevel) -> irast.Set:
"""Return ir.Set for a pointer defined as a computable."""
ptrcls = rptr.ptrcls
source_set = rptr.source
source_scls = source_set.stype
# process_view() may generate computable pointer expressions
# in the form "self.linkname". To prevent infinite recursion,
# self must resolve to the parent type of the view NOT the view
# type itself. Similarly, when resolving computable link properties
# make sure that we use rptr.ptrcls.derived_from.
if source_scls.is_view(ctx.env.schema):
source_set = new_set_from_set(
source_set, preserve_scope_ns=True, ctx=ctx)
source_set.stype = source_scls.peel_view(ctx.env.schema)
source_set.shape = []
if source_set.rptr is not None:
schema = ctx.env.schema
derived_from = source_set.rptr.ptrcls.get_derived_from(schema)
if (derived_from is not None and
not derived_from.generic(schema) and
derived_from.get_derived_from(schema) is not None and
ptrcls.is_link_property(schema)):
source_set.rptr.ptrcls = derived_from
try:
qlexpr, qlctx, inner_source_path_id, path_id_ns = \
ctx.source_map[ptrcls]
except KeyError:
ptrcls_default = ptrcls.get_default(ctx.env.schema)
if not ptrcls_default:
ptrcls_sn = ptrcls.get_shortname(ctx.env.schema)
raise ValueError(
f'{ptrcls_sn!r} is not a computable pointer')
if isinstance(ptrcls_default, s_expr.ExpressionText):
qlexpr = astutils.ensure_qlstmt(qlparser.parse(ptrcls_default))
else:
qlexpr = qlast.BaseConstant.from_python(ptrcls_default)
qlctx = None
inner_source_path_id = None
path_id_ns = None
if qlctx is None:
# Schema-level computable, completely detached context
newctx = ctx.detached
else:
newctx = _get_computable_ctx(
rptr=rptr,
source=source_set,
source_scls=source_scls,
inner_source_path_id=inner_source_path_id,
path_id_ns=path_id_ns,
same_scope=same_computable_scope,
qlctx=qlctx,
ctx=ctx)
if ptrcls.is_link_property(ctx.env.schema):
source_path_id = rptr.source.path_id.ptr_path()
else:
source_path_id = rptr.target.path_id.src_path()
path_id = source_path_id.extend(
ptrcls,
s_pointers.PointerDirection.Outbound,
ptrcls.get_target(ctx.env.schema),
ns=ctx.path_id_namespace,
schema=ctx.env.schema)
with newctx() as subctx:
subctx.view_scls = ptrcls.get_target(ctx.env.schema)
subctx.view_rptr = context.ViewRPtr(
source_scls, ptrcls=ptrcls, rptr=rptr)
subctx.anchors[qlast.Source] = source_set
subctx.empty_result_type_hint = ptrcls.get_target(ctx.env.schema)
if isinstance(qlexpr, qlast.Statement) and unnest_fence:
subctx.stmt_metadata[qlexpr] = context.StatementMetadata(
is_unnest_fence=True)
comp_ir_set = dispatch.compile(qlexpr, ctx=subctx)
if ptrcls in ctx.pending_cardinality:
comp_ir_set_copy = copy.copy(comp_ir_set)
specified_card, source_ctx = ctx.pending_cardinality[ptrcls]
stmtctx.get_pointer_cardinality_later(
ptrcls=ptrcls, irexpr=comp_ir_set_copy,
specified_card=specified_card, source_ctx=source_ctx,
ctx=ctx)
def _check_cardinality(ctx):
if ptrcls.singular(ctx.env.schema):
stmtctx.enforce_singleton_now(comp_ir_set_copy, ctx=ctx)
stmtctx.at_stmt_fini(_check_cardinality, ctx=ctx)
comp_ir_set.stype = ptrcls.get_target(ctx.env.schema)
comp_ir_set.path_id = path_id
comp_ir_set.rptr = rptr
rptr.target = comp_ir_set
return comp_ir_set
def _get_computable_ctx(
*,
rptr: irast.Pointer,
source: irast.Set,
source_scls: s_nodes.Node,
inner_source_path_id: irast.PathId,
path_id_ns: typing.Optional[irast.WeakNamespace],
same_scope: bool,
qlctx: context.ContextLevel,
ctx: context.ContextLevel) -> typing.ContextManager:
@contextlib.contextmanager
def newctx():
with ctx.new() as subctx:
subctx.class_view_overrides = {}
subctx.partial_path_prefix = None
subctx.modaliases = qlctx.modaliases.copy()
subctx.aliased_views = qlctx.aliased_views.new_child()
if source_scls.is_view(ctx.env.schema):
scls_name = source.stype.get_name(ctx.env.schema)
subctx.aliased_views[scls_name] = None
subctx.source_map = qlctx.source_map.copy()
subctx.view_nodes = qlctx.view_nodes.copy()
subctx.view_sets = qlctx.view_sets.copy()
subctx.view_map = qlctx.view_map.new_child()
source_scope = pathctx.get_set_scope(rptr.source, ctx=ctx)
if source_scope and source_scope.namespaces:
subctx.path_id_namespace |= source_scope.namespaces
if path_id_ns is not None:
subctx.path_id_namespace |= {path_id_ns}
subctx.pending_stmt_own_path_id_namespace = {
irast.WeakNamespace(ctx.aliases.get('ns')),
}
if path_id_ns is not None and same_scope:
subctx.pending_stmt_own_path_id_namespace.add(path_id_ns)
subns = subctx.pending_stmt_full_path_id_namespace = \
set(subctx.pending_stmt_own_path_id_namespace)
self_view = ctx.view_sets.get(source.stype)
if self_view:
if self_view.path_id.namespace:
subns.update(self_view.path_id.namespace)
inner_path_id = self_view.path_id.merge_namespace(
subctx.path_id_namespace | subns)
else:
if source.path_id.namespace:
subns.update(source.path_id.namespace)
if inner_source_path_id is not None:
# The path id recorded in the source map may
# contain namespaces referring to a temporary
# scope subtree used by `process_view()`.
# Since we recompile the computable expression
# using the current path id namespace, the
# original source path id needs to be fixed.
inner_path_id = inner_source_path_id \
.strip_namespace(qlctx.path_id_namespace) \
.merge_namespace(subctx.path_id_namespace)
else:
inner_path_id = pathctx.get_path_id(
source.stype, ctx=subctx)
inner_path_id = inner_path_id.merge_namespace(subns)
remapped_source = new_set_from_set(rptr.source, ctx=ctx)
remapped_source.rptr = rptr.source.rptr
subctx.view_map[inner_path_id] = remapped_source
yield subctx
return newctx
|
22,840 | 9515f3ca2a3b4b497d2ef7bce58cdff501765621 | from django.core.management.base import BaseCommand, CommandError
from django_instagram_photo_api.models import InstagramApp, Tag, Post
from django_instagram_photo_api.utils import sync_by_tag, save_post, get_medias_by_tag
class Command(BaseCommand):
help = 'Sync your app with hashtags by added list of id.'
def add_arguments(self, parser):
parser.add_argument('application_id', nargs='+', type=int)
def handle(self, *args, **options):
for app_id in options['application_id']:
try:
app = InstagramApp.objects.get(pk=app_id)
except InstagramApp.DoesNotExist:
raise CommandError('Application "%s" does not exist' % app_id)
is_show = app.tag_is_show
count = app.tag_count
token = app.access_token
tags = Tag.objects.filter(application_id=app_id)
for tag in tags:
sync_by_tag(app_id, tag.name, token, count, is_show) |
22,841 | 940ede43fd7d724dd350a06d4980e92e42a70e2e | class Character:
kind = 'Human'
def __init__(self, name):
self._hp = 10
self.name = name
def get_hp(self):
return self._hp if self._hp > 0 else 0
def set_hp(self, hp):
if hp > 0:
self._hp = hp
else:
self._hp = 0
@property
def hp(self):
return self.get_hp()
@hp.setter
def hp(self, val):
self.set_hp(val)
def run(self):
print(f'{self.name} ({Character.kind}) Running')
def shoot(self):
print(f'{self.name} Shooting')
def take(self):
print(f'{self.name} Taking')
@staticmethod
def say_kind():
print(f'Character kind: {Character.kind}')
class FlyingCharacter(Character):
def run(self):
print(f'{self.name} Running with jumping')
def fly(self):
print(f'{self.name} Flying')
class SwimmingCharacter(Character):
def run(self):
print(f'{self.name} Running on water')
def swim(self):
print(f'{self.name} Swimming')
bob = FlyingCharacter('Bob')
alice = SwimmingCharacter('Alice')
frank = Character('Frank')
def marathon(participants):
for participant in participants:
participant.run()
marathon([bob, alice, frank])
bob.fly()
bob.set_hp(100)
print(bob.get_hp())
bob.hp = 110
print(bob.hp)
bob.take()
alice.swim()
alice.run()
|
22,842 | 2d22c823a63c33dcd071ff1bb7b2367d0dd51f42 | import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "twitter-data", table_name = "twitter_state_selected", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = <your_database>, table_name = <your_table>, transformation_ctx = "datasource0")
## @type: ApplyMapping
## @args: [mapping = [("id", "long", "id", "long"), ("text", "string", "text", "string")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("id", "long", "id", "long"), ("text", "string", "text", "string")], transformation_ctx = "applymapping1")
## This is to convert a Dynamic Frame into a DataFrame first and finally a RDD
applymapping1_rdd = applymapping1.toDF().rdd
## This is the sorted Word Count code
counts_rdd = applymapping1_rdd.flatMap(lambda line: line["text"].split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.sortBy(lambda x: -x[1])
## This is to merge all the files into one
counts_df = counts_rdd.coalesce(1)
## This is to convert an RDD into DataFrame first and finally Dynamic Frame
counts_dynamicframe = DynamicFrame.fromDF(counts_df.toDF(), glueContext, "counts")
## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://aiops-2020/data-lake/pierre"}, format = "csv", transformation_ctx = "datasink2"]
## @return: datasink2
## @inputs: [frame = applymapping1]
datasink2 = glueContext.write_dynamic_frame.from_options(frame = counts_dynamicframe, connection_type = "s3", connection_options = {"path": <your_path_to_s3>}, format = "csv", transformation_ctx = "datasink2")
job.commit() |
22,843 | 25b9cf15b1c0e66478591855805ca42af37395f8 | import os
import logging
import redis
import gevent
import ast
from flask import Flask, render_template, jsonify
from flask_sockets import Sockets
from subprocess import check_output
REDIS_URL = os.environ['REDIS_URL']
REDIS_CHAN = 'server'
app = Flask(__name__, template_folder='build', static_folder='build/static')
app.debug = 'DEBUG' in os.environ
sockets = Sockets(app)
redis = redis.from_url(REDIS_URL)
class Manager(object):
def __init__(self):
self.clients = list()
self.pubsub = redis.pubsub()
self.pubsub.subscribe(REDIS_CHAN)
self.last_message = ''
def __iter_data(self):
for message in self.pubsub.listen():
raw_data = message.get('data')
if message['type'] == 'message':
data = ast.literal_eval(raw_data)
app.logger.info('Sending message: {}'.format(data))
self.last_message = data['message']
yield data
def register(self, client):
self.send(client, self.last_message)
self.clients.append(client)
def send(self, client, data):
try:
client.send(data)
except Exception:
self.clients.remove(client)
def run(self):
for data in self.__iter_data():
for client in self.clients:
if hash(client) != data['from']:
gevent.spawn(self.send, client, data['message'])
def start(self):
gevent.spawn(self.run)
manager = Manager()
manager.start()
@app.route('/')
def root():
return render_template('index.html')
@app.route('/execute')
def execute():
output = check_output(['python', '-c', manager.last_message])
return jsonify({'output': output})
@sockets.route('/connect')
def connect(ws):
manager.register(ws)
while not ws.closed:
gevent.sleep(0.1)
message = ws.receive()
if message:
data = {'from': hash(ws), 'message': message}
app.logger.info('Inserting message: {}'.format(data))
redis.publish(REDIS_CHAN, data)
|
22,844 | 4acd29505d9f2e528617176c247792ca61c9b884 | '''
Follow these steps to configure the webhook in Slack:
1. Navigate to https://<your-team-domain>.slack.com/services/new
2. Search for and select "Incoming WebHooks".
3. Choose the default channel where messages will be sent and click "Add Incoming WebHooks Integration".
4. Copy the webhook URL from the setup instructions and use it in the next section.
Follow these steps to encrypt your Slack hook URL for use in this function:
1. Create a KMS key - http://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html.
2. Encrypt the event collector token using the AWS CLI.
$ aws kms encrypt --key-id alias/<KMS key name> --plaintext "<SLACK_HOOK_URL>"
Note: You must exclude the protocol from the URL (e.g. "hooks.slack.com/services/abc123").
3. Copy the base-64 encoded, encrypted key (CiphertextBlob) to the ENCRYPTED_HOOK_URL variable.
4. Give your function's role permission for the kms:Decrypt action.
Example:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1443036478000",
"Effect": "Allow",
"Action": [
"kms:Decrypt"
],
"Resource": [
"<your KMS key ARN>"
]
}
]
}
'''
from __future__ import print_function
import boto3
import json
import logging
import datetime
from slackclient import SlackClient
from base64 import b64decode
from urllib2 import Request, urlopen, URLError, HTTPError
ENCRYPTED_SLACK_TOKEN = "CiBVhQSX5nYIrdq5ceR6CokUkwMNrVwB/19yzaVQE3VObRK6AQEBAgB4VYUEl+Z2CK3auXHkegqJFJMDDa1cAf9fcs2lUBN1Tm0AAACRMIGOBgkqhkiG9w0BBwaggYAwfgIBADB5BgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDGgO/qyI+JCNrHKfoAIBEIBM69UzS2OeFrD0xTSMfZJumJX8hvhBejQ7vVPYQMZrRjY+Rax7HrHbA3SUXcznQcLipzUnqxKEf/6f9iD4CoWCmDHzT6dzpXhtBp8CwA=="
ENCRYPTED_HOOK_URL = "CiBVhQSX5nYIrdq5ceR6CokUkwMNrVwB/19yzaVQE3VObRLQAQEBAgB4VYUEl+Z2CK3auXHkegqJFJMDDa1cAf9fcs2lUBN1Tm0AAACnMIGkBgkqhkiG9w0BBwaggZYwgZMCAQAwgY0GCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMd78Kw7APHJ8y5oCAAgEQgGD8tmHEj/KszBdqau89uWktLwp6Az3++0XOcILugA+SGUG5Oq+VC/m7opXHdQGJrr12HYrh4Cez8J6c6jC/hh+TeV8FX0szERtbweONYwRkYEjgQOK9a+is6BtVAlxDv2E="
SLACK_CHANNEL = 'standup' # Enter the Slack channel to send a message to
SLACK_API_ROOT = "https://slack.com/api/"
HOOK_URL = "https://" + boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_HOOK_URL))['Plaintext']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def check_in_date_range(date):
date = date['ts']
central_current_time = datetime.datetime.utcnow() - datetime.timedelta(hours=5)
range = central_current_time - datetime.datetime.fromtimestamp(float(date))
return range < datetime.timedelta(hours=10)
def find_users_missing_standup():
"""Find the users without a message within the last 10 hours in
Standup
"""
token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']
sc = SlackClient(token)
channels = sc.api_call('channels.list')['channels']
standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()
members = standup['members']
messages = sc.api_call('channels.history', channel=standup['id'])['messages']
messages_within_last_10_hours = filter(check_in_date_range, messages)
users_posted = (i['user'] for i in messages_within_last_10_hours if
'user' in i.keys())
difference = set(members).difference(users_posted)
return difference
def get_users_name(user_id):
token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']
sc = SlackClient(token)
logger.info("SlackClient Started")
return '@' + sc.api_call('users.info',user=user_id)['user']['name']
def main(event, context):
missing_users = find_users_missing_standup()
reply_candidates = [get_users_name(user) for user in missing_users]
logger.info("Reply Candidates %s", reply_candidates)
msg = 'Missing standups from ' + ', '.join(reply_candidates)
slack_message = {
'channel': SLACK_CHANNEL,
'text': msg
}
loggerinf("Preping Request")
req = Request(HOOK_URL, json.dumps(slack_message))
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
if __name__ == '__main__':
main()
|
22,845 | ec5f15c7db7abcc6d344d277b3936dd8dc0084e9 | from django.views.generic import TemplateView, CreateView, DetailView
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.shortcuts import render
from .models import Product, Category
from django.contrib.auth import authenticate,login
from django.views import View
from django.http import HttpResponseRedirect
#View ฤฤng kรฝ ngฦฐแปi dรนng
class SignUp(CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
#View ฤฤng kรฝ ngฦฐแปi dรนng thร nh cรดng
class SignUpDoneView(TemplateView):
template_name = 'registration/signup_done.html'
title = 'Signup successful'
#View hiแปn thแป trang chแปง (index.html)
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
categories = Category.objects.all()
product = Product.objects.all()[:6]
context = {
'categorys': categories,
'products' : product,
}
return context
class Product1(TemplateView):
template_name = "shop.html"
def get_context_data(self, **kwargs):
categories = Category.objects.all()
product = Product.objects.all()
context = {
'categorys1': categories,
'products1': product,
}
return context
def detail(request, pk):
detail = Product.objects.get(pk=pk)
context = {
'detail': detail,
'id': pk,
}
return render(request, 'product_detail.html', context) |
22,846 | 7958780e5fd6137e113b64c2629f95b02e6729a1 | import pyttsx3
import os
#text that want to convert to audio
txt_speech = pyttsx3.init()
speech = input("What text you want to convert to speech:")
#language you want to convert
language = "en"
txt_speech.say(speech)
txt_speech.runAndWait()
|
22,847 | 60aa8031ce415049ffda60e1a4c98ee5510446e5 | import sys
def splitPerMonkey(lines):
data = []
monkey = []
for line in lines:
if not line:
data.append(monkey[1:])
monkey = []
else:
monkey.append(line.split(":")[-1].strip())
if monkey:
data.append(monkey[1:])
return data
class Monkey(object):
def __init__(self, data):
self.items = list(map(int, data[0].split(",")))
self.op = self._parseOp(data[1])
self.mod = int(data[2].split(" ")[-1])
self.if_true = int(data[3].split(" ")[-1])
self.if_false = int(data[4].split(" ")[-1])
self.inspections = 0
def _parseOp(self, s):
s = s.split("=")[-1].strip()
s = s.split(" ")
return s
def _applyOp(self, n):
a = n
b = n
if self.op[2] != "old":
b = int(self.op[2])
if self.op[1] == "+":
return a + b
elif self.op[1] == "*":
return a * b
def playRound(self):
global MONKEYS
self.inspections += len(self.items)
for item in self.items:
worry = self._applyOp(item) // 3
if worry % self.mod == 0:
MONKEYS[self.if_true].items.append(worry)
else:
MONKEYS[self.if_false].items.append(worry)
self.items = []
if __name__ == "__main__":
lines = [l.strip() for l in sys.stdin]
monkey_data = splitPerMonkey(lines)
MONKEYS = [Monkey(d) for d in monkey_data]
for _ in range(20):
for m in MONKEYS:
m.playRound()
inspections = [m.inspections for m in MONKEYS]
inspections.sort()
print(inspections[-2] * inspections[-1])
|
22,848 | 42ea51c9cf2484c717374fa8cbbe205eaae81862 | class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Point(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return Point(x, y)
def __neg__(self):
x = self.x * - 1
y = self.y * -1
return Point(x, y)
def __str__(self):
return "x:%s y:%s" % (self.x, self.y)
def __mul__(self, other):
x = self.x * other
y = self.y * other
return Point(x, y)
|
22,849 | ee2da1ed1cb1f8a2e1ac88a1045afb3afbac9449 | import requests
from bs4 import BeautifulSoup
INPUT_FILE = open('wp_subdirectories.txt').readlines()
OUTPUT_FILE = open('WPLinks.txt', 'wb')
WP_URL = 'https://www.washingtonpost.com/'
links = []
written_links = []
def search_url(url):
try:
wp_home = requests.get(WP_URL, timeout=5)
except Exception:
return
wp_home_s = BeautifulSoup(wp_home.text)
all_links = wp_home_s.find_all('a')
for link in all_links:
try:
href = link['href']
if href.startswith('https://www.washingtonpost.com/news/'):
links.append(href)
except Exception:
pass
for link in links:
newslink = link.split('https://www.washingtonpost.com/')[1]
amp_link = 'https://www.washingtonpost.com/amphtml/' + newslink
try:
if requests.get(amp_link, timeout=5).status_code == 200:
if link not in written_links:
print link + ',' + amp_link
written_links.append(link)
except Exception:
pass
if __name__ == '__main__':
for url in INPUT_FILE:
search_url(url.replace('\n', ''))
|
22,850 | 99729a30fb7432eecca1097e5494ea7dda9774dd | # -*- coding: utf-8 -*-
from openerp.tests.common import TransactionCase
from datetime import datetime
class TestPublicHolidays(TransactionCase):
"""
Test Holidays count
"""
def setUp(self):
super(TestPublicHolidays, self).setUp()
self.holiday_obj = self.env['hr.holidays.public']
self.holiday_obj.create({
'name': 'Aid',
'date_start': '1999-05-11',
'date_end': '1999-05-17',
})
def test_count_days(self):
days = self.holiday_obj.count_days("1999-05-14", "1999-05-20")
self.assertEqual(days, 4, 'Error, I get %s instead of 6' % days)
days = self.holiday_obj.count_days(
datetime(year=1999, month=5, day=14), "1999-05-20")
self.assertEqual(days, 4, 'Error, I get %s instead of 6' % days)
def test_is_free(self):
free = self.holiday_obj.is_free("1999-05-20")
self.assertFalse(free, '1999-05-20 is not free')
free = self.holiday_obj.is_free("1999-05-17")
self.assertTrue(free, '1999-05-17 is free')
free = self.holiday_obj.is_free("1999-05-11")
self.assertTrue(free, '1999-05-11 is free')
free = self.holiday_obj.is_free("1999-05-14")
self.assertTrue(free, '1999-05-14 is free')
|
22,851 | 76f11a50f4db308b9f4e6875a31e3dbb32f250c2 | import itertools
import torch
import os
import copy
from datetime import datetime
import math
import numpy as np
import tqdm
import torch.nn.functional as F
def flatten(lst):
tmp = [i.contiguous().view(-1, 1) for i in lst]
return torch.cat(tmp).view(-1)
def unflatten_like(vector, likeTensorList):
# Takes a flat torch.tensor and unflattens it to a list of torch.tensors
# shaped like likeTensorList
outList = []
i = 0
for tensor in likeTensorList:
# n = module._parameters[name].numel()
n = tensor.numel()
outList.append(vector[:, i : i + n].view(tensor.shape))
i += n
return outList
def LogSumExp(x, dim=0):
m, _ = torch.max(x, dim=dim, keepdim=True)
return m + torch.log((x - m).exp().sum(dim=dim, keepdim=True))
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
def save_checkpoint(dir, epoch, name="checkpoint", **kwargs):
state = {"epoch": epoch}
state.update(kwargs)
filepath = os.path.join(dir, "%s-%d.pt" % (name, epoch))
torch.save(state, filepath)
def train_epoch(
loader,
model,
criterion,
optimizer,
cuda=True,
regression=False,
verbose=False,
subset=None,
):
loss_sum = 0.0
correct = 0.0
verb_stage = 0
num_objects_current = 0
num_batches = len(loader)
model.train()
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output = criterion(model, input, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.data.item() * input.size(0)
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
num_objects_current += input.size(0)
if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:
print(
"Stage %d/10. Loss: %12.4f. Acc: %6.2f"
% (
verb_stage + 1,
loss_sum / num_objects_current,
correct / num_objects_current * 100.0,
)
)
verb_stage += 1
return {
"loss": loss_sum / num_objects_current,
"accuracy": None if regression else correct / num_objects_current * 100.0,
}
def eval(loader, model, criterion, cuda=True, regression=False, verbose=False):
loss_sum = 0.0
correct = 0.0
num_objects_total = len(loader.dataset)
model.eval()
with torch.no_grad():
if verbose:
loader = tqdm.tqdm(loader)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output = criterion(model, input, target)
loss_sum += loss.item() * input.size(0)
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
"loss": loss_sum / num_objects_total,
"accuracy": None if regression else correct / num_objects_total * 100.0,
}
def predict(loader, model, verbose=False):
predictions = list()
targets = list()
model.eval()
if verbose:
loader = tqdm.tqdm(loader)
offset = 0
with torch.no_grad():
for input, target in loader:
input = input.cuda(non_blocking=True)
output = model(input)
batch_size = input.size(0)
predictions.append(F.softmax(output, dim=1).cpu().numpy())
targets.append(target.numpy())
offset += batch_size
return {"predictions": np.vstack(predictions), "targets": np.concatenate(targets)}
def moving_average(net1, net2, alpha=1):
for param1, param2 in zip(net1.parameters(), net2.parameters()):
param1.data *= 1.0 - alpha
param1.data += param2.data * alpha
def _check_bn(module, flag):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
def _get_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.momentum = momenta[module]
def bn_update(loader, model, verbose=False, subset=None, **kwargs):
"""
BatchNorm buffers update (if any).
Performs 1 epochs to estimate buffers average using train dataset.
:param loader: train dataset loader for buffers average estimation.
:param model: model being update
:return: None
"""
if not check_bn(model):
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
n = 0
num_batches = len(loader)
with torch.no_grad():
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for input, _ in loader:
input = input.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
b = input_var.data.size(0)
momentum = b / (n + b)
for module in momenta.keys():
module.momentum = momentum
model(input_var, **kwargs)
n += b
model.apply(lambda module: _set_momenta(module, momenta))
def inv_softmax(x, eps=1e-10):
return torch.log(x / (1.0 - x + eps))
def predictions(test_loader, model, seed=None, cuda=True, regression=False, **kwargs):
# will assume that model is already in eval mode
# model.eval()
preds = []
targets = []
for input, target in test_loader:
if seed is not None:
torch.manual_seed(seed)
if cuda:
input = input.cuda(non_blocking=True)
output = model(input, **kwargs)
if regression:
preds.append(output.cpu().data.numpy())
else:
probs = F.softmax(output, dim=1)
preds.append(probs.cpu().data.numpy())
targets.append(target.numpy())
return np.vstack(preds), np.concatenate(targets)
def schedule(epoch, lr_init, epochs, swa, swa_start=None, swa_lr=None):
t = (epoch) / (swa_start if swa else epochs)
lr_ratio = swa_lr / lr_init if swa else 0.01
if t <= 0.5:
factor = 1.0
elif t <= 0.9:
factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4
else:
factor = lr_ratio
return lr_init * factor
|
22,852 | 3aeb87de9a62cde97041fb417a4f32fdc3c8ce9c | ff = open('input','r')
ok = 0
bad = 0
for line in ff:
splits = line.split(' ')
rangesplits = splits[0].split('-')
rangelow = int(rangesplits[0])
rangehig = int(rangesplits[1])
pchar = splits[1][0]
count = 0
for c in splits[2]:
if (c == pchar):
count = count + 1
if (count >= rangelow and count <= rangehig):
ok = ok + 1
print("OK " + line)
else:
bad = bad + 1
print("BAD " + line)
print(ok)
print(bad)
|
22,853 | e2df42f3938b0a965d65ffed9825fdae163b1cb3 | #!/usr/bin/env python
#
# Curriculum Module Preprocess Script
# - Run once per run of the module by a user
# - Run before job submission. So -not- in an allocation.
# - onramp_run_params.cfg file is available in current working directory
#
import os
import sys
from configobj import ConfigObj, flatten_errors
from validate import Validator, ValidateError, is_integer
from subprocess import call, check_call, CalledProcessError
#
# Validate the configobj file we received from the server
# Note: The OnRamp server already does this for you, so you can trust
# the validity of the file.
#
# This will always be the name of the file, so fine to hardcode here
conf_file = "onramp_runparams.cfg"
config = ConfigObj(conf_file, configspec="config/onramp_uioptions.cfgspec")
validator = Validator()
results = config.validate(validator, preserve_errors=True)
if results != True:
print "Configuration file validation failed!"
for entry in flatten_errors(config, results):
section_list, key, error = entry
if key is not None:
section_list.append(key)
else:
section_list.append("[missing section]")
section_str = ', '.join(section_list)
if error == False:
error = "Missing value or section."
print section_str, ' = ', error
sys.exit(-11)
#
# Compile the ring program for each run
# - Note we could do this in the deploy script, just once, but this provides
# another example of how to do it.
#
#
# Change to the 'src' directory
#
os.chdir('src')
#
# Make the program
#
try:
rtn = check_call("make")
except CalledProcessError as e:
print "Error: %s" % e
sys.exit(-1)
# Exit 0 if all is ok
sys.exit(0)
# Exit with a negative value if there was a problem
#sys.exit(-1)
|
22,854 | 1b93a34a2dbc0c9303181c0a59768ba861d683d6 | import socket
import sys
import cv2
import pickle
import struct
from data import DataSerializer
cam = cv2.VideoCapture(1)
cam.set(3, 320)
cam.set(4, 240)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
def main():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "192.168.3.245"
port = 8888
print("connecting to: ", host, " | ", port)
try:
soc.connect((host, port))
except:
print("Connection error")
sys.exit()
print("Enter 'quit' to exit")
message = "Message"
while message != 'quit':
ret, frame = cam.read()
result, frame = cv2.imencode('.jpg', frame, encode_param)
''' Se crea un data serializer con el frame y un mensaje '''
send_data = DataSerializer(frame, "Saludos del cliente")
data = pickle.dumps(send_data, 0)
size = len(data)
soc.sendall(struct.pack(">L", size) + data)
data = b""
payload_size = struct.calcsize(">L")
while len(data) < payload_size:
print('a')
data += soc.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
print("Segundo while")
data += soc.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
recv_data=pickle.loads(frame_data, fix_imports=True, encoding="bytes")
print(recv_data.msg)
frame = recv_data.frame
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
cv2.imshow('ImageWindow',frame)
cv2.waitKey(1)
message = "Message"
soc.send(b'--quit--')
if __name__ == "__main__":
main() |
22,855 | a0aaad40695e42ddf204e6882460796ae7cd4fce | # ใไธญ็ญใ646. ๆ้ฟๆฐๅฏน้พ
# ็ปๅบย nย ไธชๆฐๅฏนใย ๅจๆฏไธไธชๆฐๅฏนไธญ๏ผ็ฌฌไธไธชๆฐๅญๆปๆฏๆฏ็ฌฌไบไธชๆฐๅญๅฐใ
# ็ฐๅจ๏ผๆไปฌๅฎไนไธ็ง่ท้ๅ
ณ็ณป๏ผๅฝไธไป
ๅฝย b < cย ๆถ๏ผๆฐๅฏน(c, d)ย ๆๅฏไปฅ่ทๅจย (a, b)ย ๅ้ขใๆไปฌ็จ่ฟ็งๅฝขๅผๆฅๆ้ ไธไธชๆฐๅฏน้พใ
# ็ปๅฎไธไธชๅฏนๆฐ้ๅ๏ผๆพๅบ่ฝๅคๅฝขๆ็ๆ้ฟๆฐๅฏน้พ็้ฟๅบฆใไฝ ไธ้่ฆ็จๅฐๆๆ็ๆฐๅฏน๏ผไฝ ๅฏไปฅไปฅไปปไฝ้กบๅบ้ๆฉๅ
ถไธญ็ไธไบๆฐๅฏนๆฅๆ้ ใ
# dp
# class Solution:
# def findLongestChain(self, pairs) -> int:
# pairs.sort()
# dp = [1] * len(pairs)
# for i in range(1, len(dp)):
# for j in range(i-1, -1, -1):
# if dp[j] > 1:
# if pairs[i][0] > pairs[j][1]:
# dp[i] = dp[j] + 1
# break
# if dp[i] == 1:
# for j in range(0, i):
# if pairs[i][0] > pairs[j][1]:
# dp[i] = 2
# break
# return dp[-1]
class Solution:
def findLongestChain(self, pairs) -> int:
pairs.sort(key=lambda x: x[1])
res, tmp = 1, pairs[0][1]
for i in range(1, len(pairs)):
if pairs[i][0] > tmp:
res += 1
tmp = pairs[i][1]
return res
if __name__ == "__main__":
s = Solution()
result = 2
pairs = [[1,2], [2,3], [3,4]]
print(s.findLongestChain(pairs))
print()
result = 4
pairs = [[-10,-8],[8,9],[-5,0],[6,10],[-6,-4],[1,7],[9,10],[-4,7]]
print(s.findLongestChain(pairs))
print()
result = 3
pairs = [[-6,9],[1,6],[8,10],[-1,4],[-6,-2],[-9,8],[-5,3],[0,3]]
print(s.findLongestChain(pairs))
|
22,856 | fa16e0e0c3e8623f7a7e30591db06df77a936d57 | # collections.Counter()
# problem statement:
#"""Ram is a shoe shop owner. His shop has X number of shoes.
#He has a list containing the size of each shoe he has in his shop.
#There are N number of customers who are willing to pay x1 amount of money
# only if they get the shoe of their desired size.
#Your task is to compute how much money Ram earned."""
from collections import Counter
X = int(input())
shoes = Counter(map(int, input().split()))
N = int(input())
income = 0
for i in range(N):
size, price = map(int, input().split())
if shoes[size]:
income += price
shoes[size] -= 1
print(income) |
22,857 | 162d67602d77eb27d103a95f89e49396d5581a9c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# test mpi4py
import mpi4py.MPI as mpi
import numpy as np
comm = mpi.COMM_WORLD
rank = comm.rank
siz = comm.size
if siz!=2:
if rank==0: print("must use 2 procs")
import sys; sys.exit()
if rank==0:
a = (1,2)
a = comm.gather(a,0)
else:
a = {2:'toto', 3: 'titi'}
a = comm.gather(a,0)
print("[%d]" % rank, a)
|
22,858 | 897c0b3f21a0cea435a35090060a4530600342fc | #!/usr/bin/env python
# coding: utf-8
from gevent import monkey
monkey.patch_all()
from celery_tasks import get_page
for x in range(1000):
get_page.delay("https://ianchenhq.com")
|
22,859 | 662e53d23e13de60c6ee339ab6fde5ba0a6336bb | ๏ปฟclass is_leap_year(object):
'''Test if a number is leap year or not.'''
def __init__(self, year):
self.year = year
if (year % 4 == 0):
if (year % 100 == 0):
if (year % 400 == 0):
True
else:
False
else:
True
else:
False
|
22,860 | 0cd6faebbe96d5c1547b83b10b683e7888d19e8a | # -*- coding: utf-8 -*-
import hashlib, random
from datetime import datetime
from random import sample, randrange
from simplecouchdb import schema
URL_CHARS = 'abcdefghijkmpqrstuvwxyzABCDEFGHIJKLMNPQRST23456789'
class Project(schema.Document):
name = schema.StringProperty(name='name')
text = schema.StringProperty(name='text')
markup = schema.StringProperty(name='markup', default='markdown')
category = schema.StringProperty(name='category')
tags = schema.ListProperty(name='tags')
active = schema.BooleanProperty(name='active', default=False)
featured = schema.BooleanProperty(name='featured', default=False)
ctime = schema.DateTimeProperty(name='ctime', auto_now_add=True)
mtime = schema.DateTimeProperty(name='mtime', auto_now=True)
preview_small = schema.StringProperty(name='preview_small')
preview_big = schema.StringProperty(name='preview_big')
download_mac = schema.StringProperty(name='download_mac')
download_pc = schema.StringProperty(name='download_pc')
db = None
def __unicode__(self):
"""docstring for __unicode__"""
return self.text
@classmethod
def all(self):
return self.db.view('projects/all')
@classmethod
def allActive(self):
return self.db.view('projects/allActive')
@classmethod
def allActiveNotFeatured(self):
return self.db.view('projects/allActiveNotFeatured')
@classmethod
def allFeatured(self):
return self.db.view('projects/allFeatured')
@classmethod
def create(self, doc):
"""docstring for create"""
return doc.save(self.db)
@classmethod
def retrieve(self, docid):
return self.db.get(docid)
@classmethod
def update(self, doc):
return self.db.save(doc)
@classmethod
def delete(self, docid):
doc = self.db.get(docid)
return self.db.delete(doc)
def __repr__(self):
return '<PROJECT %s>' % self.name
class User(schema.Document):
"""docstring for User"""
password = schema.StringProperty(name='password')
roles = schema.ListProperty(name='roles')
session = schema.DictProperty(name='session')
db = None
@classmethod
def update(self, doc):
return self.db.save(doc)
@classmethod
def valid_user(self, username, password):
"""docstring for valid_user"""
user = User.db.get(username)
if user['password'] == password:
return True
return False
@classmethod
def user_roles(self, username):
user = User.db.get(username)
return user['roles']
@staticmethod
def pwdhash(password, algo='sha512', salt=None):
salt = salt or hashlib.new(algo, str(random.random())).hexdigest()[:5]
hpwd = hashlib.new(algo, ''.join((salt, password))).hexdigest()
return '$'.join((algo, salt, hpwd))
@classmethod
def chkpwd(self, password, reference):
algo, salt, _ = reference.split('$')
return (User.pwdhash(password, algo, salt) == reference)
class Up(schema.Document):
"""docstring for Up"""
sid = schema.StringProperty(name='sid')
user = schema.StringProperty(name='user')
views = schema.IntegerProperty(name='views')
ctime = schema.DateTimeProperty(name='ctime', auto_now_add=True)
db = None
@classmethod
def create(self, user, content, name=None, content_type=None):
"""docstring for create"""
uid = ''.join(sample(URL_CHARS, randrange(3, 4)))
tdoc = Up(id=uid, sid=uid, user=user, views=0)
tdoc.save(self.db)
doc = self.db.get(uid)
self.db.put_attachment(doc, content, name, content_type)
return uid
@classmethod
def retrieve(self, docid):
"""docstring for retrieve"""
doc = self.db.get(docid)
doc['views'] += 1
self.db.save(doc)
attachments = doc['_attachments']
for attachment in attachments:
f = {
"content_type": attachments[attachment]['content_type'],
"file": self.db.fetch_attachment(docid, attachment)
}
return f;
@classmethod
def delete(self, docid):
"""docstring for delete"""
return self.db.delete(docid)
@classmethod
def list(self, user):
"""docstring for list"""
design = {
"map": 'function(doc) { if ((doc.doc_type === "Up") && (doc.user === "%s")) { emit(doc.ctime, doc); } }' % user
}
return self.db.temp_view(design) |
22,861 | ff29bf9a45da0a3d375a83f13f736ba4d46c5dc9 | #############################
# Import Required Libraries #
#############################
from __future__ import print_function
import os, sys, fnmatch, argparse
import numpy as np
from make_patches_MT import *
from args import get_parser
from PIL import Image
##########################
# Import Keras Libraries #
##########################
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, BatchNormalization
from keras.layers import Input, UpSampling2D, Dropout, Concatenate
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import backend as K
from keras.optimizers import SGD, Adam
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def multi_dice_coef_loss(y_true, y1_pred, y2_pred, y3_pred):
return -0.5 * dice_coef(y_true, y1_pred) - 0.7 * dice_coef(y_true, y2_pred) - dice_coef(y_true, y3_pred)
#########################
# DSC-Unet Architecture #
#########################
# cross
def get_unet(patch_height,patch_width, n_ch):
inputs = Input((patch_height, patch_width, n_ch))
conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(inputs)
#conv1 = BatchNormalization()(conv1)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv2)
#conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool2)
#conv3 = BatchNormalization()(conv3)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv3)
up1 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv3), conv2])
conv4 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv4)
up2 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv4), conv1])
conv5 = Convolution2D(32, (3, 3), activation='relu', padding='same')(up2)
#conv5 = BatchNormalization()(conv5)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv5)
conv21 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv5)
#conv21 = BatchNormalization()(conv21)
conv21 = Dropout(0.2)(conv21)
conv21 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv21)
pool21 = MaxPooling2D(pool_size=(2, 2))(conv21)
conv22 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool21)
#conv22 = BatchNormalization()(conv22)
conv22 = Dropout(0.2)(conv22)
conv22 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv22)
pool22 = MaxPooling2D(pool_size=(2, 2))(conv22)
conv23 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool22)
#conv23 = BatchNormalization()(conv23)
conv23 = Dropout(0.2)(conv23)
conv23 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv23)
up21 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv23), conv22, conv2])
conv24 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up21)
#conv24 = BatchNormalization()(conv24)
conv24 = Dropout(0.2)(conv24)
conv24 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv24)
up22 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv24), conv21, conv1])
conv25 = Convolution2D(32, (3, 3), activation='relu', padding='same')(up22)
conv25 = Dropout(0.2)(conv25)
conv25 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv25)
conv31 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv25)
conv31 = Dropout(0.2)(conv31)
conv31 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv31)
pool31 = MaxPooling2D(pool_size=(2, 2))(conv31)
conv32 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool31)
conv32 = Dropout(0.2)(conv32)
conv32 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv32)
pool32 = MaxPooling2D(pool_size=(2, 2))(conv32)
conv33 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool32)
conv33 = Dropout(0.2)(conv33)
conv33 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv33)
up31 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv33), conv32, conv22, conv2])
conv34 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up31)
conv34 = Dropout(0.2)(conv34)
conv34 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv34)
up32 = Concatenate(axis = -1)([UpSampling2D(size=(2, 2))(conv34), conv31, conv21, conv1])
conv35 = Convolution2D(32, (3, 3), activation='relu', padding='same')(up32)
conv35 = Dropout(0.2)(conv35)
conv35 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv35)
squeeze_conv = Convolution2D(1,(1,1), activation='sigmoid')
conv7 = squeeze_conv(conv5)
conv27 = squeeze_conv(conv25)
conv37 = squeeze_conv(conv35)
model = Model(input=inputs, output=[conv7, conv27, conv37])
model.compile(optimizer=Adam(lr=1e-4), loss = dice_coef_loss, loss_weights = [0.2, 0.3, 0.5], metrics=['accuracy'])
print ('DSC-Unet Loaded')
return model
def training_set_generator(training_list,batch_size,patch_dims):
while True:
np.random.shuffle(training_list)
num_slices = len(training_list) / batch_size
for slice in range(num_slices):
imgs_train = np.zeros((batch_size, patch_dims[0], patch_dims[1], 1),dtype='float32')
mask_train = np.zeros((batch_size, patch_dims[0], patch_dims[1], 1),dtype='float32')
for element in range(batch_size):
training_list_id = slice * batch_size + element
img, mask = read_images(training_list[training_list_id][0], training_list[training_list_id][1])
############
meanVal = np.mean(img)
stdVal = np.std(img)
img -= meanVal
img /= stdVal
############
imgs_train[element, :, :, 0] = img
mask_train[element, :, :, 0] = mask
yield (imgs_train, [mask_train, mask_train, mask_train])
def get_training_list(path, extension):
img_paths = []
mask_paths = []
print (path)
for root, directories, filenames in os.walk(path + '/img_list/'):
for filename in fnmatch.filter(filenames, extension):
img_paths.append(os.path.join(root,filename))
for root, directories, filenames in os.walk(path + '/mask_list/'):
for filename in fnmatch.filter(filenames, extension):
mask_paths.append(os.path.join(root,filename))
img_paths.sort()
mask_paths.sort()
img_paths = np.array(img_paths)
mask_paths = np.array (mask_paths)
training_list = np.array([img_paths, mask_paths])
training_list = training_list.transpose()
return training_list
def read_images (img_path_1, img_path_2):
img1 = Image.open(img_path_1).convert('L') #grayscale
img2 = Image.open(img_path_2).convert('L')
img1 = np.array(img1).astype('float32')
img2 = np.array(img2).astype('float32')
img2 /= 255.
img1 /= 255.
return np.array(img1), np.array(img2)
def main(args):
mode = args.mode
input_path = args.input_path
output_path = args.output_path
weights_path = args.weights_path
data_type = args.data_type
patch_size = (args.patch_size, args.patch_size)
if mode == 'train':
training_list = get_training_list(input_path, data_type)
total_length = len(training_list)
validation_split = 0.1
train_data_length = int (total_length * (1 - validation_split))
validation_data_length = int (total_length * validation_split)
total_length = int (total_length)
print("Creating Generators...")
train_data_gen = training_set_generator(training_list[:train_data_length],64, (128,128))
val_data_gen = training_set_generator(training_list[train_data_length:],64, (128,128))
val_steps = (total_length - train_data_length)//64
print("Beginning Training...")
model = get_unet(patch_size[0], patch_size[1], 1)
ckpt = ModelCheckpoint(filepath=weights_path, verbose=1, monitor='val_loss', mode='auto', save_best_only=True)
model.fit_generator(train_data_gen, train_data_length//64, epochs= 2, verbose=1, callbacks=[ckpt], validation_data=val_data_gen, validation_steps = val_steps)
elif mode == 'predict':
img_paths = get_images_pre(input_path, extension=data_type, recursive=True)
if not os.path.exists(output_path + 'predict'):
os.makedirs(output_path + 'predict')
if not os.path.exists(output_path + 'original'):
os.makedirs(output_path + 'original')
if not os.path.exists(output_path + 'merged'):
os.makedirs(output_path + 'merged')
for i, img_path in enumerate(img_paths):
img = Image.open(img_path).convert('L')
img = np.array(img).astype('float32')
img /= np.max(img)
og_img = np.array(img)
meanVal = np.mean(img)
stdVal = np.std(img)
img -= meanVal
img /= stdVal
img = img.reshape(1, img.shape[0], img.shape[1], 1)
model = get_unet(*img.shape[1:])
model.load_weights(weights_path)
o1, o2, out_img = model.predict(img)
im = out_img.reshape(img.shape[1], img.shape[2])
im = (im-np.min(im))/(np.max(im)-np.min(im)) * 255.0
og_img = (og_img-np.min(og_img))/(np.max(og_img)-np.min(og_img)+0.000000001) * 255.0
path_list = img_path.split(os.sep)
name_the_file = (path_list[-1].split('.'))[0]
#og_img = np.power(og_img, 0.5)
blended_im = 0.2*og_img + 0.8*im
stacked_im = np.dstack((og_img, og_img, blended_im))
anno_img = np.array(stacked_im).astype('uint8')
print (anno_img.shape)
print (og_img.shape)
im = Image.fromarray(im).convert('L')
og_img = Image.fromarray(og_img).convert('L')
im.save(output_path + 'predict/' + name_the_file + '_predict.png', 'PNG')
og_img.save(output_path + 'original/' + name_the_file + '_original.png', 'PNG')
anno_img = Image.fromarray(anno_img).convert('RGB')
anno_img.save(output_path + 'merged/' + name_the_file + '_annotated.png', 'PNG')
if __name__ == "__main__":
print ("running")
args = get_parser()
main(args) |
22,862 | 07e97924dfded6c24721f1324ca3643336ca1938 | '''
Copyright (C) 2017 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from hashlib import md5
import bpy
from bmesh.types import BMesh, BMVert, BMEdge, BMFace
from mathutils import Vector, Matrix
from .maths import (
Point, Direction, Normal, Frame,
Point2D, Vec2D, Direction2D,
Ray, XForm, BBox, Plane
)
class Hasher:
def __init__(self):
self._hasher = md5()
def add(self, s):
self._hasher.update(bytes(str(s), 'utf8'))
def get_hash(self):
return self._hasher.hexdigest()
def hash_cycle(cycle):
l = len(cycle)
h = [hash(v) for v in cycle]
m = min(h)
mi = h.index(m)
h = rotate_cycle(h, -mi)
if h[1] > h[-1]:
h.reverse()
h = rotate_cycle(h, 1)
return ' '.join(str(c) for c in h)
def hash_object(obj:bpy.types.Object):
if obj is None: return None
assert type(obj) is bpy.types.Object, "Only call hash_object on mesh objects!"
assert type(obj.data) is bpy.types.Mesh, "Only call hash_object on mesh objects!"
# get object data to act as a hash
me = obj.data
counts = (len(me.vertices), len(me.edges), len(me.polygons), len(obj.modifiers))
if me.vertices:
bbox = (tuple(min(v.co for v in me.vertices)), tuple(max(v.co for v in me.vertices)))
else:
bbox = (None, None)
vsum = tuple(sum((v.co for v in me.vertices), Vector((0,0,0))))
xform = tuple(e for l in obj.matrix_world for e in l)
mods = []
for mod in obj.modifiers:
if mod.type == 'SUBSURF':
mods += [('SUBSURF', mod.levels)]
elif mod.type == 'DECIMATE':
mods += [('DECIMATE', mod.ratio)]
else:
mods += [(mod.type)]
hashed = (counts, bbox, vsum, xform, hash(obj), str(mods)) # ob.name???
return hashed
def hash_bmesh(bme:BMesh):
if bme is None: return None
assert type(bme) is BMesh, 'Only call hash_bmesh on BMesh objects!'
counts = (len(bme.verts), len(bme.edges), len(bme.faces))
bbox = BBox(from_bmverts=bme.verts)
vsum = tuple(sum((v.co for v in bme.verts), Vector((0,0,0))))
hashed = (counts, tuple(bbox.min) if bbox.min else None, tuple(bbox.max) if bbox.max else None, vsum)
return hashed
|
22,863 | 7d1ccc30867ed59a279607dfa127b3ae58851f1d | from engineeringorange.messages.models import *
from engineeringorange.resume.models import *
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseRedirect
from django.shortcuts import redirect, render_to_response, get_object_or_404
import datetime
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def viewall(request, userid):
account = get_object_or_404(Accounts, userid=userid)
messages = Messages.objects.filter(toid=account).distinct().order_by('senddate').reverse()
return render_to_response('inbox.html', {'user': account, 'messages': messages}, context_instance=RequestContext(request))
@login_required
def sent(request, userid):
account = get_object_or_404(Accounts, userid=userid)
messages = Messages.objects.filter(fromid=account).distinct().order_by('senddate').reverse()
return render_to_response('inbox.html', {'user': account, 'messages': messages, 'sent': account}, context_instance=RequestContext(request))
@login_required
def viewmsg(request, userid, msgid):
account = get_object_or_404(Accounts, userid=userid)
message = get_object_or_404(Messages, msgid=msgid)
fromUser = Messages.objects.filter(fromid = account.userid, msgid = msgid)
toUser = Messages.objects.filter(toid = account.userid, msgid = msgid)
replyButton = BlankMessageForm(request.POST or None)
deleteButton = BlankMessageForm(request.POST or None)
if fromUser or toUser:
if toUser:
message.readdate = datetime.datetime.now()
message.save()
return render_to_response('message.html', {'user': account, 'message': message, 'delete': deleteButton}, context_instance=RequestContext(request))
else:
return HttpResponseRedirect('/messages/' + str(account.userid))
@login_required
def reply(request, userid, msgid):
account = get_object_or_404(Accounts, userid=userid)
message = get_object_or_404(Messages, msgid=msgid)
form = MessageForm(request.POST or None, initial={'subject': 'RE: '+ str(message.subject)})
to = message.fromid
sender = message.toid
if account == get_object_or_404(Accounts, email= message.fromid):
to = message.toid
sender = message.fromid
if account == get_object_or_404(Accounts, email = sender):
if request.POST and form.is_valid() :
newmsg = form.save(commit=False)
newmsg.fromid = account
newmsg.toid = get_object_or_404(Accounts, email = to)
newmsg.senddate = datetime.datetime.now()
newmsg.save()
return HttpResponseRedirect('/message/'+ str(account.userid) +'/' +str(newmsg.msgid))
else:
return render_to_response('createmessage.html/', {'form': form, 'user' : account}, context_instance=RequestContext(request))
return HttpResponseRedirect('/messages/' + str(account.userid))
@login_required
def delete(request, userid, msgid):
account = get_object_or_404(Accounts, userid=userid)
if request.POST:
message = get_object_or_404(Messages, msgid=msgid)
message.delete()
if account == get_object_or_404(Accounts, email = message.fromid):
return HttpResponseRedirect('/messages/sent/' + str(account.userid))
return HttpResponseRedirect('/messages/' + str(account.userid))
@login_required
def compose(request, userid):
account = get_object_or_404(Accounts, userid=userid)
result = Jobseeker.objects.filter(userid=userid).distinct()
if result:
form = SeekerMessageForm(request.POST or None)
else:
form = EmployerMessageForm(request.POST or None)
if request.POST and form.is_valid():
newmsg = form.save(commit=False)
newmsg.fromid = account
newmsg.senddate = datetime.datetime.now()
newmsg.save()
return HttpResponseRedirect('/message/'+ str(account.userid) +'/' +str(newmsg.msgid))
return render_to_response('createmessage.html/', {'form': form, 'user' : account}, context_instance=RequestContext(request))
|
22,864 | 6fd19f010b3cadb358b247d8a4c15002f11b99d9 | import argparse
import csv
import pprint
from observation_timeline import ObservationTimeline
from observation import Observation
def load_timeline(filename):
"""Loads an observations CSV file.
:param str filename: The name of the observations CSV file to read
:returns: A tuple with the following items:
- A dictionary that maps a suspect's name to the
item they are currently carrying (based on data in
the CSV file).
- An ObservationTimeline that contains the observation
data loaded from the CSV file.
:rtype: tuple
:raises ValueError: If there is an issue unpacking the values of a
row into name, location, time, and item. Note that this is
thrown when unpacking too few or two many values into a tuple
of variables.
:raises ValueError: If there is an issue loading the time (third
column in each row) into a datetime object.
:raises OSError: If there is an issue finding or opening the
file. Note that this is thrown by the open() function.
"""
try:
# Create an empty timeline
timeline = ObservationTimeline()
# Dictionary mapping agent's name to held item
carrying = {}
# Read data from input file
with open(filename, newline='') as csvfile:
obs = csv.reader(csvfile, delimiter='\n')
for row in obs:
# Unpack each row
col = tuple(row[0].split(','))
# If too many or too few arguments, Error
if not len(col) == 4:
raise ValueError("Unpacking row error")
# Adds observation to timeline
timeline.add(Observation(col[0], col[1], col[2]))
# If agent is carrying item, add to timeline
if not col[3] == '':
carrying.update({col[0]: col[3]})
# Return Tuple of carried item dict and ObsTimeline
return (carrying, timeline)
except OSError:
raise OSError("Cannot open file")
def main(args):
"""Program entry point.
- Loads a CSV file of observations
- Determines how items were exchanged during various rendezvous
- Prints the exchanges as they happen, if desired
- Prints the latest owner of a specific item, if desired.
- Otherwise neatly prints a dictionary mapping suspects to
the item they currently own.
This program will return an exit code of `1` in one of two
situations:
- If the CSV file cannot be opened (i.e., load_timeline raises an
:class:`OSError`), this program will simply print the exception
and end.
- If the CSV file cannot be loaded (i.e., load_timeline raises a
:class:`ValueError`), we will print an error messsage and end.
:param argparse.Namespace args: A Namespace that contains parsed
command line arguments.
:returns: Nothing
"""
# Tuple of carried items and timeline
time_tuple = load_timeline(args.observations)
# For each Observation in list, calculated final held item
for suspectPair in time_tuple[1].rendezvous():
# If user wanted exchanges, print each exchange
if args.exchanges:
print(suspectPair[0].name + " meets with " +
suspectPair[1].name +
" to exchange " + time_tuple[0][suspectPair[0].name] +
" for " + time_tuple[0][suspectPair[1].name] + ".")
# Trades items
temp_item = time_tuple[0][suspectPair[0].name]
time_tuple[0][suspectPair[0].name] = time_tuple[0][suspectPair[1].name]
time_tuple[0][suspectPair[1].name] = temp_item
# If no items specified or exchanges is true,
# print list of final help items
if (args.item == '') or (args.exchanges):
pprint.pprint(time_tuple[0], indent=4)
# If user specified an item, print who has said item
if not args.item == '':
for name, i in time_tuple[0].items():
if i == args.item:
print(name + " had the " + i)
if __name__ == '__main__':
# Initialize CLI argument parser
parser = argparse.ArgumentParser(
description='List rendezvous exchanges based on a '
'spreadsheet of suspect observations.'
)
# Add a positional argument for the observations file.
parser.add_argument('observations',
help='A CSV file to read observations from.')
# Add an optional flag, so that the user can tell us which item
# they want to see the owner of
parser.add_argument('--item', type=str, default='',
help='An optional item to print the owner of.')
# Add an optional flag, that will tell us to print exchanges as
# they occur instead of printing the whole mapping at the end.
parser.add_argument('--exchanges', action='store_true',
help='Print all exchanges')
# Parse the arguments
args = parser.parse_args()
# GO!
main(args)
|
22,865 | 834320c131d22bfd0bda498c6f683a1f136f5d30 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
#variable declarations
nx = 41
ny = 41
nt = 480
c = 1.2
dx = 2.0/(nx-1)
dy = 2.0/(ny-1)
sigma = 0.0009
nu = 0.01
dt = sigma*dx*dy/nu
x = np.linspace(0,2,nx)
y = np.linspace(0,2,ny)
#num y corresponds to rows
#num x corresponds to cols
u = np.ones((ny,nx))
v = np.ones((ny,nx))
un = np.ones((ny,nx))
vn = np.ones((ny,nx))
comb = np.ones((ny,nx))
#assign initial conditions
#u[y region, x region]
u[0.5/dy:1./dy+1., 0.5/dx:1./dx+1.] = 10.
v[0.5/dy:1./dy+1., 0.5/dx:1./dx+1.] = 1.
#plot ICs
fig = plt.figure(figsize=(11,7), dpi=100)
ax = fig.gca(projection='3d')
X,Y = np.meshgrid(x,y)
wire1 = ax.plot_wireframe(X,Y,u[:], cmap=cm.coolwarm)
wire2 = ax.plot_wireframe(X,Y,v[:], cmap=cm.coolwarm)
plt.show()
#advance in time
for n in range(nt+1):
#store copy of old values
un = u.copy()
vn = v.copy()
#code the numerical scheme, array operations
u[1:-1,1:-1] = un[1:-1,1:-1] - dt/dx*un[1:-1,1:-1]*(un[1:-1,1:-1]-un[0:-2,1:-1]) \
- dt/dy*vn[1:-1,1:-1]*(un[1:-1,1:-1]-un[1:-1,0:-2]) \
+ nu*dt/dx**2*(un[2:,1:-1] - 2.*un[1:-1,1:-1] + un[0:-2,1:-1]) \
+ nu*dt/dy**2*(un[1:-1,2:] - 2.*un[1:-1,1:-1] + un[1:-1,0:-2])
v[1:-1,1:-1] = vn[1:-1,1:-1] - dt/dx*un[1:-1,1:-1]*(vn[1:-1,1:-1]-vn[0:-2,1:-1]) \
- (dt/dy)*vn[1:-1,1:-1]*(vn[1:-1,1:-1]-vn[1:-1,0:-2]) \
+ nu*dt/dx**2*(vn[2:,1:-1] - 2.*vn[1:-1,1:-1] + vn[0:-2,1:-1]) \
+ nu*dt/dy**2*(vn[1:-1,2:] - 2.*vn[1:-1,1:-1] + vn[1:-1,0:-2])
#reestablishing bcs
u[0,:] = 1 #first line across x
u[-1,:] = 1 #last line across x
u[:,0] = 1 #first line down y
u[:,-1] = 1 #last line down y
v[0,:] = 1
v[-1,:] = 1
v[:,0] = 1
v[:,-1] = 1
#plotting
fig = plt.figure(figsize=(11,7), dpi=100)
ax = fig.gca(projection='3d')
X,Y = np.meshgrid(x,y)
wire1 = ax.plot_wireframe(X,Y,u)
wire2 = ax.plot_wireframe(X,Y,v)
plt.show()
|
22,866 | 772448f99a59ac23c8c496ccc7431507e35e3843 | # -*- mode: python; coding: utf-8 -*-
#
###########################################################################
# Fichero: utilidades.py
# -------------------------------------------------------------------------
# Proyecto: C.E.S.P.E.D.
# Autor: Josรฉ L. Domenech
# Descripcion:
#
# Varias utilidades
#
# Requiere: random, time
# -------------------------------------------------------------------------
# Historia:
# + 05/11/2019 - Primera version
###########################################################################
import random
import time
FLOAT_MS_IN_SEG = 1000.0
def identidad(arg):
"Funciรณn que devuelve sus argumentos sin modificar"
return arg
class __Constante:
'''Clase que sirve de closure del valor pasado para la funciรณn
`constantemente' '''
def __init__(self, val):
'''Constructor'''
self.valor = val
def obtener_valor(self):
'''Devuelve el valor pasado en el constructor'''
return self.valor
valor = None
def constantemente(arg):
'''Funciรณn que devuelve una funciรณn que siempre devuelve el mismo
valor (el indicado en el parรกmetro)'''
c = __Constante(arg)
return c.obtener_valor
def timestamp():
'''funcion que devuelve un timestamp en ms. (si esta disponible por el
sistema, si no s.), desde el epoch: time.gmtime(0)'''
return int(time.time() * FLOAT_MS_IN_SEG)
# Inicializar mรณdulo
random.seed(timestamp())
|
22,867 | 09ec1fdc9c8ed7fa2f632a6598417b47246c55d9 | #script to install latest version from API repository from GitHub
import os
import time
#deletes old programs
os.system("rm TGpub.py")
os.system("rm VOCGpub.py")
os.system("rm updateAPI.py")
#clones repository
os.system("git clone https://github.com/Anaphite/Data-Logging.git")
time.sleep(10)
#moves files out of API file to where update.py stored
os.system("mv /home/pi/Temperature/Data-Logging/TGpub.py /home/pi/Temperature/TGpub.py")
os.system("mv /home/pi/Temperature/Data-Logging/VOCGpub.py /home/pi/Temperature/VOCGpub.py")
os.system("mv /home/pi/Temperature/Data-Logging/updateAPI.py /home/pi/Temperature/updateAPI.py")
#removes the API directory now relavent contents have been removed
os.system("rm -r -f /home/pi/Temperature/Data-Logging")
|
22,868 | 177f05d6bf24e46c6b4c931e295380bb8eab4ae8 | # algothrim selection sort
import random
box = []
for i in range(0,8):
box.append(random.randrange(1, 1000, 1))
print box
for i in range(0, len(box)):
min = box[i]
count = 0
for j in range(0, len(box)):
if box[j] < min:
min = box[j]
pos = j
print min, pos
tmp = box[i]
box[i]= min
box[pos] = tmp
print box
|
22,869 | 1b89096bb69031e14664cc567e90ade5986ac189 | N = int(input())
target_list = []
if N % 2 == 1:
N += 1
for i in range(1, int(N/2)):
target = N - i
if target == i:
continue
else:
target_list.append(target)
print(len(target_list)) |
22,870 | 76756225e947e0af97d3e634108c98696cc80dd6 | # -*- coding: utf-8 -*-
"""
babel decorators module.
"""
import pyrin.globalization.locale.babel.services as babel_services
def babel_cli_handler(**options):
"""
decorator to register a babel cli handler.
:keyword bool replace: specifies that if there is another registered
cli handler with the same name, replace it
with the new one, otherwise raise
an error. defaults to False.
:raises InvalidCLIHandlerTypeError: invalid cli handler type error.
:raises DuplicatedCLIHandlerError: duplicated cli handler error.
:returns: babel cli handler class.
:rtype: BabelCLIHandlerBase
"""
def decorator(cls):
"""
decorates the given class and registers an instance
of it into available babel cli handlers.
:param BabelCLIHandlerBase cls: babel cli handler class.
:returns: babel cli handler class.
:rtype: BabelCLIHandlerBase
"""
instance = cls()
babel_services.register_cli_handler(instance, **options)
return cls
return decorator
|
22,871 | 491b012e024622ffbd6914297c99b6e7927de1e8 | from math import log, ceil
two = []
for i in range(27):
if i == 0:
two.append(1)
else:
two.append(two[i-1]*2)
for _ in range(int(input())):
c = int(input())
d = 0
for i in range(27):
if two[i] > c:
d = i
break
m = 0
# need to optimize this part
for i in range(2**d):
if c ^ i < 2**d:
m = max(m, i*(c ^ i))
print(m)
|
22,872 | c44400bbb91c860400e95fd1c8ecd671d7461087 | # Generated by Django 2.2.5 on 2020-03-29 14:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0022_payment'),
]
operations = [
migrations.DeleteModel(
name='Payment',
),
]
|
22,873 | fd62b3e1591773f620ffb1c18ec132f15561630b | from .encryption import *
def getED(inputStr):
return inputStr[:int(len(inputStr)/2)], inputStr[int(len(inputStr)/2):]
def transformOut(inputStr):
str1, str2 = getED(inputStr)
result = ""
i = 0
while (i < len(str1)) or (i < len(str2)):
if (i < len(str1)):
result += str1[i]
if (i < len(str2)):
result += str2[i]
i += 1
return result
def decrypt(input: str, key: str):
hash_str = ""
if hasNumbers(key):
res_str = transformnumbers(key, getNumbers)
hash_str += transformIn(res_str, "")
else:
res_str = transformnumbers(key, lenList)
hash_str += transformIn(res_str, "")
input = input.replace(hash_str, '')
result = transformOut(input).replace(key, '')[::-1]
return result |
22,874 | 4ca40e624dd6c87d4d12efc90735313daa66a24b | /home/runner/.cache/pip/pool/be/f0/32/52fa1b880078e8ba5dec387258f829ddc59d15a56ef7fe4fdfd4c45cc3 |
22,875 | 56a0a30b90d112b99f91766b610b36e81f46c9e1 | import csv
import os
from os import path
import sys
import argparse
import matplotlib.pyplot as plt
import statistics
'''
Script that plots the diffusion of the different events during the simulation, calculating the persons reached.
'''
def parse_arg(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, help='Path to the log file')
return parser.parse_args(argv)
#read the file and create a dictionary
def read_events(file):
file_events = open(str(file))
csv_events = list(csv.DictReader(file_events, delimiter=','))
events = {}
for row in csv_events:
if row['createdEvents'] != "":
created_events = row['createdEvents'].split(",")
for event in created_events:
events[event] = [float(row['tick']), {}]
for row in csv_events:
if row['arrivedEvents'] != "":
arrived_events = row['arrivedEvents'].split(",")
for event in arrived_events:
if row['id'] not in events[event][1]:
events[event][1][row['id']]=float(row['tick'])
#return a dict events: <creation_tick, [arrival_tick1, arrival_tick2, ...]>
return events
def plot(events):
for event in events:
x = []
x.append(events[event][0])
x.extend(list(events[event][1].values()))
y = [i for i in range(len(events[event][1])+1)]
plt.plot(x, y, label = event)
plt.xlabel('Tick')
# Set the y axis label of the current axis.
plt.ylabel('Diffusion of events')
# Set a title of the current axes.
plt.title('Events diffusion during simulation')
# Display a figure.
plt.show()
def main():
args = parse_arg(sys.argv[1:])
events = read_events(args.file)
plot(events)
if __name__ == "__main__":
main() |
22,876 | 44b4ca9166d70c56a298f2fad88117c1189a9cab | from person import Person
class Instructor:
def __init__(self, first_name, last_name, slack_handle, cohort, specialty):
super().__init__(self, first_name, last_name, slack_handle, cohort)
self.specialty = specialty
def assign_exercise(self, exercise, student):
student.exercises.append(exercise) |
22,877 | f31b2b71b35ee8f0fb1a94a70f126e54e743681b | # -*- coding: utf-8 -*-
from flask_login import current_user
from .. import db
from ..models import OperationLog, AlertLog
# ่ฎฐๅฝๆไฝๆฅๅฟ
def record_operation_log(operation, module, user_id='', result=u'ๆๅ'):
"""่ฎฐๅฝๆไฝๆฅๅฟ"""
if 'id' in dir(current_user):
user_id = current_user.id
else:
user_id = user_id
result = {
'operation': operation,
'operation_res': result,
'user_id': user_id,
'module': module
}
# print(user_id)
# OperationLog.user_id = user_id
db.session.add(OperationLog(**result))
db.session.commit()
def record_alert_log(_threshold, _recording, _type, _desc, server_id):
"""่ฎฐๅฝๅ่ญฆๆฅๅฟ"""
result = {
'threshold': _threshold,
'recording': _recording,
'type': _type,
'desc': _desc,
'server_id': server_id
}
# print(user_id)
# OperationLog.user_id = user_id
db.session.add(AlertLog(**result))
db.session.commit()
|
22,878 | 7ec37c5ada1b52deaa43218cd9bed3afd36e7b2c | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Check target_rpath generator flag for ninja.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform.startswith('linux'):
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'target-rpath'
test.run_gyp('test.gyp', '-G', 'target_rpath=/usr/lib/gyptest/', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
expect = '/usr/lib/gyptest/'
if test.run_readelf('shared_executable', chdir=CHDIR) != [expect]:
test.fail_test()
if test.run_readelf('shared_executable_no_so_suffix', chdir=CHDIR) != [expect]:
test.fail_test()
if test.run_readelf('static_executable', chdir=CHDIR):
test.fail_test()
test.pass_test()
|
22,879 | e5197f13ac0d66f3f7381887c99d4ca53de2728d |
class Solution:
def __init__(self):
self.lookup = {x: 1 for x in "qwertyuiop"}
self.lookup.update({x: 2 for x in "asdfghjkl"})
self.lookup.update({x: 3 for x in "zxcvbnm"})
def findWords(self, words: List[str]) -> List[str]:
res = []
for w in words:
if len(set([self.lookup[c] for c in w.lower()])) == 1:
res.append(w)
return res
|
22,880 | d7ecc0c2a7cc1ef7f3e8b167097aa3dc4471567b | from enum import Enum
from typing import List, Dict, Union, Tuple
class UserAgents:
def __init__(self, head: str, version: List[str]):
self.head = head
self.version = version
self.index = -1
def get_next_user_agent(self):
self.index = (self.index + 1) % len(self.version)
return '{head} {version}'.format(head=self.head, version=self.version[self.index])
class Language(Enum):
English = 'en_US'
Spanish = 'es_ES'
SimplifiedChinese = 'zh_CN'
TraditionalChinese_ = 'zh_TW'
German = 'de_DE'
Portuguese = 'pt_BR'
Korean = 'ko_KR'
Hebrew = 'he_IL'
Arabic = 'ar_AE'
Hindi = 'hi_IN'
Tamil = 'ta_IN'
Telugu = 'te_IN'
Kannada = 'kn_IN'
Malayalam = 'ml_IN'
Italian = 'it_IT'
Swedish = 'sv_SE'
French = 'fr_FR'
Japanese = 'ja_JP'
Dutch = 'nl_NL'
Polish = 'pl_PL'
Turkish = 'tr_TR'
EnglishAustralia = 'en_AU'
EnglishCanada = 'en_CA'
EnglishSingapore = 'en_SG'
EnglishSpain = 'en_ES'
EnglishUnitedArabEmirates = 'en_AE'
EnglishUnitedKingdom = 'en_GB'
SpanishMexico = 'es_MX'
SpanishUnitedStates = 'es_US'
class Currency(Enum):
ArabEmiratesDirham = "AED"
ArgentinePeso = "ARS"
AustralianDollar = "AUD"
AzerbaijanNewManat = "AZN"
BahamasDollar = "BSD"
BarbadianDollar = "BBD"
BermudaDollar = "BMD"
BrazilianReal = "BRL"
BruneianDollar = "BND"
BulgariaLev = "BGN"
CanadianDollar = "CAD"
CaymanianDollar = "KYD"
ChileanPeso = "CLP"
ChineseYuanRenminbi = "CNY"
ColombianPeso = "COP"
CostaRicanColon = "CRC"
CzechKoruna = "CZK"
DanishKrone = "DKK"
DominicanRepublicPeso = "DOP"
EgyptianPound = "EGP"
Euro = "EUR"
GhanaianCedi = "GHS"
GuatemalanQuetzal = "GTQ"
HongKongDollar = "HKD"
HungarianForint = "HUF"
IndianRupee = "INR"
IndonesianRupiah = "IDR"
IsraeliShekel = "ILS"
JamaicanDollar = "JMD"
JapaneseYen = "JPY"
KazakhstanTenge = "KZT"
KenyanShilling = "KES"
LebanesePound = "LBP"
MalaysianRinggit = "MYR"
MauritianRupee = "MUR"
MexicoPeso = "MXN"
MoroccanDirham = "MAD"
NamibiaDollar = "NAD"
NewZealandDollar = "NZD"
NigerianNaira = "NGN"
NorwegianKrone = "NOK"
PakistaniRupee = "PKR"
PanamanianBalboa = "PAB"
PeruvianSol = "PEN"
PhilippinePeso = "PHP"
PolishZloty = "PLN"
Pounds = "GBP"
QatariRiyal = "QAR"
RomanianLei = "RON"
RussianRuble = "RUB"
SaudiArabianRiyal = "SAR"
SingaporeDollar = "SGD"
SouthKoreanWon = "KRW"
SriLankanRupee = "LKR"
SwedishKrona = "SEK"
SwissFranc = "CHF"
TaiwanNewDollar = "TWD"
TanzaniaShilling = "TZS"
ThaiBaht = "THB"
TrinidadianDollar = "TTD"
TurkishLira = "TRY"
USDollar = "USD"
class Country(Enum):
Australia = "com.au"
Brazil = "com.br"
Canada = "ca"
ChinaMainland = "cn"
France = "fr"
Germany = "de"
India = "in"
Italy = "it"
Japan = "co.jp"
Mexico = "com.mx"
Netherlands = "nl"
Poland = "pl"
SaudiArabia = "sa"
Singapore = "sg"
Spain = "es"
Sweden = "se"
Turkey = "com.tr"
UnitedArabEmirates = "ae"
UnitedKingdom = "co.uk"
UnitedStates = "com"
def lang_and_currency(self) -> Tuple[Language, Currency]:
return {
Country.Australia: (Language.EnglishAustralia, Currency.AustralianDollar),
Country.Brazil: (Language.Portuguese, Currency.BrazilianReal),
Country.Canada: (Language.EnglishCanada, Currency.CanadianDollar),
Country.ChinaMainland: (Language.SimplifiedChinese, Currency.ChineseYuanRenminbi),
Country.France: (Language.French, Currency.Euro),
Country.Germany: (Language.German, Currency.Euro),
Country.India: (Language.Hindi, Currency.IndianRupee),
Country.Italy: (Language.Italian, Currency.Euro),
Country.Japan: (Language.Japanese, Currency.JapaneseYen),
Country.Mexico: (Language.SpanishMexico, Currency.MexicoPeso),
Country.Netherlands: (Language.Dutch, Currency.Euro),
Country.Poland: (Language.Polish, Currency.PolishZloty),
Country.SaudiArabia: (Language.Arabic, Currency.SaudiArabianRiyal),
Country.Singapore: (Language.EnglishSingapore, Currency.SingaporeDollar),
Country.Spain: (Language.Spanish, Currency.Euro),
Country.Sweden: (Language.Swedish, Currency.SwedishKrona),
Country.Turkey: (Language.Turkish, Currency.TurkishLira),
Country.UnitedArabEmirates: (Language.EnglishUnitedArabEmirates, Currency.ArabEmiratesDirham),
Country.UnitedKingdom: (Language.EnglishUnitedKingdom, Currency.Pounds),
Country.UnitedStates: (Language.English, Currency.USDollar)
}[self]
class Offer:
def __init__(self, price: Union[float, None], currency: str, rating: float, condition: str, ships_from: str,
sold_by: str, sold_by_url: str):
self.price = price
self.currency = currency
self.approx_review = rating
self.condition = condition
self.ships_from = ships_from
self.sold_by = sold_by
self.sold_by_url = sold_by_url
def __repr__(self):
return ('Offer(price={}, currency={}, approx_review={}, condition={}, '
'ships_from={}, sold_by={}, sold_by_url={})').format(self.price, repr(self.currency),
self.approx_review, repr(self.condition),
repr(self.ships_from), repr(self.sold_by),
repr(self.sold_by_url))
class OfferList:
def __init__(self, product_name: str, offer_count: int, offers: List[Offer], settings: Dict[str, bool]):
self.product_name = product_name
self.offer_count = offer_count
self.offers = offers
self.page = settings['page']
self.settings = settings
def __repr__(self):
offers_repr_length = 100
offers_repr = repr(self.offers)
print_offers = offers_repr[:offers_repr_length]
if offers_repr[offers_repr_length:]:
print_offers += '...'
return 'OfferList(product_name={}, offer_count={}, ' \
'offers={}, page={}, settings={})'.format(repr(self.product_name), self.offer_count,
print_offers, self.page, repr(self.settings)[:30] + '...')
class Review:
def __init__(self, reviewer: str, reviewer_url: str, review_url: str, title: str, rating: int, helpful: int,
body: str):
self.reviewer = reviewer
self.reviewer_url = reviewer_url
self.review_url = review_url
self.title = title
self.rating = rating
self.helpful = helpful
self.body = body
def __repr__(self):
body_repr_length = 100
body_repr = repr(self.body)
print_body = body_repr[:body_repr_length]
if body_repr[body_repr_length:]:
print_body += '...'
return 'Review(reviewer={}, reviewer_url={}, review_url={}, title={}, rating={}, helpful={}, body={})'.format(
repr(self.reviewer), repr(self.reviewer_url), repr(self.review_url),
repr(self.title), self.rating, self.helpful, print_body)
class ReviewList:
def __init__(self, reviews: List[Review], asin: str, country: Country, settings: Dict, last_page=False):
self.reviews = reviews
self.asin = asin
self.country = country
self.settings = settings
self.page = settings['pageNumber']
self.last_page = last_page
def __repr__(self):
reviews_repr_length = 100
reviews_repr = repr(self.reviews)
print_reviews = reviews_repr[:reviews_repr_length]
if reviews_repr[reviews_repr_length:]:
print_reviews += '...'
return 'ReviewList(reviews={}, asin={}, country={}, page={}, last_page={})'.format(print_reviews,
repr(self.asin),
self.country,
self.page, self.last_page)
class ReviewParameter:
class SortBy(Enum):
Helpful = 'helpful'
"""Sort by helpful. default"""
Recent = 'recent'
"""Sort by recent"""
class ReviewerType(Enum):
AllReviews = 'all_reviews'
"""Show all reviews. default"""
AVPOnlyReviews = 'avp_only_reviews'
"""Show only verified purchase reviews"""
class FormatType(Enum):
AllFormats = 'all_formats'
"""Show reviews for all format. default"""
CurrentFormat = 'current_format'
"""Show reviews for only current format"""
class MediaType(Enum):
AllContents = 'all_contents'
"""Show reviews with text, image or video. default"""
MediaReviewsOnly = 'media_reviews_only'
"""Show reviews with image or video"""
class FilterByStar(Enum):
AllStars = 'all_stars'
"""Show all reviews. default"""
FiveStar = 'five_star'
"""Show reviews with 5 star"""
FourStar = 'four_star'
"""Show reviews with 4 star"""
ThreeStar = 'three_star'
"""Show reviews with 3 star"""
TwoStar = 'two_star'
"""Show reviews with 2 star"""
OneStar = 'one_star'
"""Show reviews with 1 star"""
Positive = 'positive'
"""Show positive reviews. Maybe 5 and 4 stars."""
Critical = 'critical'
"""Show critical reviews. Maybe 3, 2 and 1 stars."""
class ReviewSettings:
def __init__(self,
sort_by: ReviewParameter.SortBy = ReviewParameter.SortBy.Helpful,
reviewer_type: ReviewParameter.SortBy = ReviewParameter.ReviewerType.AllReviews,
format_type: ReviewParameter.FormatType = ReviewParameter.FormatType.AllFormats,
media_type: ReviewParameter.MediaType = ReviewParameter.MediaType.AllContents,
filter_by_star: ReviewParameter.FilterByStar = ReviewParameter.FilterByStar.AllStars,
page_number: int = 1, filter_by_language: str = ''):
pass
|
22,881 | 07c6fd5172a66a01508a242eb90e0d2fd80d4d7b | # Konversi usia
n = int(input())
tahun = int(n/12)
bulan = n%12
print(tahun, bulan) |
22,882 | 4e02be244c8f6307825d8180a09ad287e9dacfee | #!/bin/env python
import threading
import time
import sys
import ConfigParser
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
class Uart(QtGui.QWidget):
size = 1
byteWritten = QtCore.pyqtSignal(int)
def __init__(self, name, base, memory, conf, core, parent=None):
QtGui.QWidget.__init__(self, parent)
self.conf = conf
self.base = base
self.memory = memory
self.setWindowTitle("UART: " + name)
self.setToolTip("This is a UART device connected to armyc")
self.text = QtGui.QTextEdit(self)
self.text.resize(650, 350)
self.text.setReadOnly(True)
self.byteWritten.connect(self.updateUart, Qt.BlockingQueuedConnection)
self.show()
@QtCore.pyqtSlot(int)
def updateUart(self, value):
scrollbar = self.text.verticalScrollBar()
self.text.insertPlainText(chr(value))
scrollbar.setValue(scrollbar.maximum())
self.show()
def writeByte(self, address, value):
self.byteWritten.emit(value)
self.memory.writeByte(0, 0)
def readByte(self, address):
pass
class Control(object):
size = 1
def __init__ ( self, name, base, memory, conf, core) :
from pyjoy import Controller
self.memory = memory
self.port = conf[name]["port"]
self.c = Controller(self.port)
self.core = core
self.active = True
self.t = threading.Thread(target=self.mov)
self.t.start()
def writeByte(self, address, value):
pass
def mov(self):
while self.active:
t = self.c.buttons()
ta = self.c.axes()
#arriba, derecha, abajo, izquierda
dir = [0, 0, 0, 0]
if t[4] == 1 or (ta[1] >= -32767 and ta[1] < 0):
dir[0] = 1
if t[6] == 1 or (ta[1] > 0 and ta[1] <= 32767):
dir[2] = 1
if t[5] == 1 or (ta[0] > 0 and ta[0] <= 32767):
dir[1] = 1
if t[7] == 1 or (ta[0] >= -32767 and ta[0] < 0):
dir[3] = 1
if dir[0] and dir[1]:
result = 1
elif dir[1] and dir[2]:
result = 3
elif dir[2] and dir[3]:
result = 5
elif dir[3] and dir[0]:
result = 7
elif dir[0]:
result = 0
elif dir[1]:
result = 2
elif dir[2]:
result = 4
elif dir[3]:
result = 6
else:
result = -1
self.memory.writeByte(0,result)
time.sleep(0.05)
def __del__(self):
self.active = False
class LcdMx( QtGui.QWidget ):
size=24
move = QtCore.pyqtSignal()
def __init__(self, name, base, memory, conf, core, parent=None):
QtGui.QWidget.__init__ ( self, parent )
self.conf = conf
self.base = base
self.memory = memory
self.setWindowTitle("LCD")
self.setToolTip("This is a LCD device connected to armyc")
self.resize(320, 240)
self.scene = QtGui.QGraphicsScene ()
self.scene.setSceneRect(0, 0, 320, 240)
self.view = QtGui.QGraphicsView(self.scene, self)
self.view.show()
self.c = ConfigParser.ConfigParser()
self.action = 0
self.width = 0
self.height = 0
self.id = ""
self.id_d = 0
self.id_t = 0
self.x = 0
self.y = 0
self.l = {}
self.count = 1
self.c.read([conf[name]["config"]])
for i in range(6):
self.memory.writeByte(i, 0)
self.move.connect(self.actions, Qt.BlockingQueuedConnection)
self.show()
def readReg(self, address):
return self.memory.readByte(address)
def writeReg(self, address, value):
self.memory.writeByte(address, value)
def writeByte(self, address, value):
if address == 3:
self.move.emit()
def readData(self, reg):
import struct
index = reg * 4
finish = index + 4
data = ""
for i in range(index, finish):
data += chr(self.readReg(i) & 0xff)
result = struct.unpack("I", data)
return result[0]
def writeRes(self, res1, res2):
import struct
num1 = struct.pack("I", res1)
num2 = struct.pack("I", res2)
for i in range(16, 20):
self.writeReg(i, ord(num1[i - 16]))
for i in range(20, 24):
self.writeReg(i, ord(num2[i - 20]))
@QtCore.pyqtSlot()
def actions(self):
action = self.readData(0)
if action == 1:
x = self.readData(1)
y = self.readData(2)
id = self.readData(3)
img = QtGui.QPixmap(str(self.c.get("images", str(id))))
object = QtGui.QGraphicsPixmapItem(img)
object.setPos(x, y)
self.scene.addItem(object);
tl = QtCore.QTimeLine ( 4 )
tl.setFrameRange ( 0 , 3 )
a = QtGui.QGraphicsItemAnimation ( )
a.setItem ( object )
a.setTimeLine ( tl )
self.l[self.count] = [x, y, img.width(), img.height(), a]
self.writeRes(self.count, 0)
print "id", self.count
self.count += 1
elif action == 2:
id = self.readData(1)
id_d = self.readData(2)
id_t = self.readData(3)
x = self.l[id][0]
y = self.l[id][1]
a = self.l[id][4]
tl = a.timeLine()
if tl.state() == 0:
a.clear()
i = 0
while i < 4:
a.setPosAt ( i/4, QtCore.QPointF ( x, y ) )
i += 1
if id_d == 0:
y -= 1
elif id_d == 1:
y -= 1
x += 1
elif id_d == 2:
x += 1
elif id_d == 3:
x += 1
y += 1
elif id_d == 4:
y += 1
elif id_d == 5:
x -= 1
y += 1
elif id_d == 6:
x -= 1
elif id_d == 7:
x -= 1
y -= 1
self.l[id][0] = x
self.l[id][1] = y
tl.start( )
elif action == 3:
id = self.readData(1)
self.writeRes(self.l[id][0], self.l[id][1])
elif action == 4:
id = self.readData(1)
x = self.readData(2)
y = self.readData(3)
a = self.l[id][4]
tl = a.timeLine()
a.clear()
a.setPosAt(1, QtCore.QPointF( x, y))
self.l[id][0] = x
self.l[id][1] = y
tl.start()
elif action == 5:
id = self.readData(1)
self.writeRes(self.l[id][2], self.l[id][3])
|
22,883 | a0b4bdc0ade12e8da620b5c0e8b9cc535a4a6820 | """
DSP-Tool
This file is part of the DSP-Tool application.
This application is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or (at
your option) any later version.
This application is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this application; If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pickle
import numpy as np
import scipy as sp
from util.Project import Project
from SideBar import SideBar
from PropertyBar import PropertyBar
import scipy.signal as sig
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from util.addSignal import addSignal
from util.openProject import openProject
from util.createProject import createProject
from PySide.QtCore import Signal, Slot
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from PySide.QtGui import QDialog, QErrorMessage, QTableWidget, QMessageBox
from PySide.QtGui import QMainWindow, QVBoxLayout, QMenu, QMenuBar, QWidget, QHBoxLayout, QVBoxLayout, QLabel, QScrollArea
class DSPToolFileMenu(QMenu):
"""
"""
def __init__(self, parent=None):
QMenu.__init__(self, "&Files")
self.parent = parent
self.setWindowTitle("DSP-Tool")
newProjectAction = self.addAction(self.trUtf8("&New Project"))
newProjectAction.triggered.connect(self.createNewProject)
openProjectAction = self.addAction(self.trUtf8("&Open Project"))
openProjectAction.triggered.connect(self.openProject)
self.saveProjectAction = self.addAction(self.trUtf8("&Save Project"))
self.saveProjectAction.triggered.connect(self.saveProject)
self.saveProjectAction.setEnabled(False)
quitAction = self.addAction(self.trUtf8("&Quit Program"))
quitAction.triggered.connect(self.parent.close)
@Slot()
def createNewProject(self):
"""
"""
create = createProject(self)
@Slot()
def openProject(self):
"""
"""
openP = openProject(self)
@Slot()
def saveProject(self):
"""
"""
self.parent.project.save()
msg = QMessageBox(self.parent)
msg.setText("Project Saved")
msg.show()
class DSPToolSignalsMenu(QMenu):
"""
"""
def __init__(self, parent=None):
QMenu.__init__(self, "&Signals")
self.parent = parent
self.addSignalAction = self.addAction(self.trUtf8("&Add Signal"))
self.addSignalAction.triggered.connect(self.addSignal)
self.addSignalAction.setEnabled(False)
self.applyOperationAction = self.addAction(self.trUtf8("&Apply Operation"))
self.applyOperationAction.triggered.connect(self.applyOperation)
self.applyOperationAction.setEnabled(False)
def applyOperation(self):
ApplyOperation.applyOperation(self.parent)
def addSignal(self):
"""
"""
add = addSignal(self)
class DSPToolMainWindow(QMainWindow):
"""
"""
def __init__(self):
QMainWindow.__init__(self)
self.project = None
self.initializeUI()
self.platform = sys.platform
def refreshTable(self):
'''
'''
for i in range(0, len(self.project.signalList)):
self.table.setRowCount(self.table.rowCount()+1)
for j in range (0, len(self.project.signalList[i])):
if self.table.columnCount() < j+1: self.table.setColumnCount(self.table.columnCount()+1)
label = self.project.signalList[i][j].getImage()
self.table.setCellWidget(i,j,label)
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
self.setLabels()
def setLabels(self):
'''
'''
self.table.setHorizontalHeaderLabels(self.project.horizontalLabels)
self.table.setVerticalHeaderLabels(self.project.verticalLabels)
def oneClickedEvent(self):
index = self.table.selectedIndexes()[0]
i = index.row()
j = index.column()
self.sideBar.setProperties(self.project.signalList[i][j])
def refreshProperties(self):
self.propertyBar.setProperties()
def initializeUI(self):
self.mainWidget = QWidget()
#Size
self.resize(1024,768)
#MenuBar
menuBar = QMenuBar()
self.fileMenu = DSPToolFileMenu(self)
menuBar.addMenu(self.fileMenu)
self.signalMenu = DSPToolSignalsMenu(self)
menuBar.addMenu(self.signalMenu)
self.setMenuBar(menuBar)
#Table Widget
self.table = QTableWidget()
self.table.setFixedWidth(824)
scrollTable = QScrollArea()
scrollTable.setWidget(self.table)
scrollTable.setWidgetResizable(True)
#Side and Property Bar
self.sideBar = SideBar()
self.propertyBar = PropertyBar(self)
#Layouts
hLayout = QHBoxLayout()
hLayout.addWidget(self.table)
hLayout.addWidget(self.sideBar)
hWidget = QWidget()
hWidget.setLayout(hLayout)
vLayout = QVBoxLayout()
vLayout.addWidget(hWidget)
vLayout.addWidget(self.propertyBar)
self.mainWidget.setLayout(vLayout)
self.setCentralWidget(self.mainWidget)
#Signals
self.table.cellClicked.connect(self.oneClickedEvent)
|
22,884 | 9d722997618643bd00e3adf7231d5079b4c1a9d9 | import json
import scrapy
import urllib.parse
from scrapy import Spider, Request
from bs4 import BeautifulSoup
from urllib.request import urlopen, urlretrieve
import re
import time
'''
from Crawler.items import DoubanItem
class doubanSpider(scrapy.Spider):
print("---------------")
name = "douban"
allowed_domain = ['douban.com']
offset = 0
last_number = 67095
time_count = 0
base_url = "https://www.douban.com/group/douban_wow/members?start="
start_urls = ["https://www.douban.com/group/douban_wow/members?start=0"]
#html = Request(start_url)
#print( html.body)
def parse(self, response):
html = response.text
soup = BeautifulSoup(html, "html.parser")
names = soup.select(".article .mod .member-list .name a")
locations = soup.select(".article .mod .member-list .name .pl")
for name, location in zip(names, locations):
item = DoubanItem()
item['name'] = name.get_text()
item['url'] = name.get('href')
location_text = location.get_text()
if len(location_text) > 0:
item['location'] = ''.join(re.findall('\((.*?)\)',location_text))
else:
item['location'] = location_text
yield item
if self.offset < self.last_number:
self.offset += 35
self.time_count += 1
if self.time_count == 5:
self.time_count = 0
time.sleep(3)
next_url = self.base_url + str(self.offset)
next_url = urllib.parse.unquote(next_url)
yield scrapy.Request(next_url, callback=self.parse)
def start_login(self, response):
login_url = "https://www.douban.com/accounts/login"
html = response.text
soup = BeautifulSoup(html, "html.parser")
if len(response.xpath("//img[@id='captcha_image']/@src")) >0:
urlretrieve(response.xpath("//img[@id='captcha_image']/@src").extract()[0],
"./captcha.jpg") #download pactcha
authcode = input("่พๅ
ฅ้ช่ฏ็ : ")
capid = response.xpath("//input[@name='captcha-id']/@value").extract()[0]
form_data = {
"form_email": "",
"form_password": "",
"captcha-solution": authcode,
"captcha-id": capid
}
return scrapy.FormRequest.from_response(response, formdata=form_data, callable = self.parse)
else:
form_data = {
"form_email": "",
"form_password": ""
}
return scrapy.FormRequest.from_response(response, formdata=form_data, callable=self.parse)
'''
|
22,885 | 20f903cd9e1a6ff8c667cb113a4e9c8aafbe5115 | #!/usr/bin/env python3
import xarray as xr
# A collection on helper functions that assume a dependency to xarray or are
# expected to process xarray objects.
# Jukka-Pekka Keskinen
# Finnish Meteorological Insitute
# 18.4.2023
#==========================================================================================#
def isCollocated(XRD,varstr):
iscol = ( ('zu_3d' in XRD[varstr].coords)
and ('x' in XRD[varstr].coords)
and ('y' in XRD[varstr].coords) )
return iscol
# |_|/ #
#=======================================|_|_|\=============================================#
# | #
|
22,886 | f1d7cdce0f0260552e2209247aa8f22bf1bbc05a | import Twitter_Func as hello
import pyodbc
import pandas as pd
Authserver =
Authdatabase =
Authusername =
Authpassword =
acnxn = pyodbc.connect('DRIVER={SQL Server};SERVER='+Authserver+';DATABASE='+Authdatabase+';UID='+Authusername+';PWD='+ Authpassword)
cursor = acnxn.cursor()
Twitter_Credentials = pd.read_sql_query('SELECT * FROM INSERT AUTH TABLE',acnxn)
print(Twitter_Credentials)
cursor.close()
print("here")
App_rate_limit = 0
n = len(Twitter_Credentials.index)
for i in range(0,n):
user_id = Twitter_Credentials['user_id'][i]
username = Twitter_Credentials['username'][i]
##Pass in the following arguments
BEARER_TOKEN=Twitter_Credentials['BEARER_TOKEN'][i]
#Set up consumer key
consumer_key = Twitter_Credentials['consumer_key'][i]
consumer_secret = Twitter_Credentials['consumer_secret'][i]
#Set up API access token
access_token = Twitter_Credentials['access_token'][i]
access_token_secret = Twitter_Credentials['access_token_secret'][i]
#set servername and login informatoin
stable_name = Twitter_Credentials['table_name'][i]
App_rate_limit = hello.daily(user_id,username,BEARER_TOKEN,consumer_key,consumer_secret,access_token,access_token_secret,Authserver,Authdatabase,Authusername,Authpassword,stable_name,App_rate_limit)
print("App rate limit:", App_rate_limit) |
22,887 | cfb1403e88b8ddb3ccc387205e5979f9a805e202 | import json
import math
class TE1:
def __init__(self, arg):
self.arg = arg
def REDWOOD(self):
argument = self.arg[0:]
A = float(argument[1])
B = float(argument[2])
pr=float(argument[3])
V1 = (A*float(argument[5])) +(B/ float(argument[5]))
V2 = (A*float(argument[7])) +(B/ float(argument[7]))
V3 = (A*float(argument[9])) +(B/ float(argument[9]))
V4 = (A*float(argument[11])) +(B/ float(argument[11]))
V5 = (A*float(argument[13])) +(B/ float(argument[13]))
V = (V1+V2+V3+V4+V5)/5
U1 = (float(argument[15])*pr)
U2 = (float(argument[17])*pr)
U3 = (float(argument[19])*pr)
U4 = (float(argument[21])*pr)
U5 = (float(argument[23])*pr)
U = (U1+U2+U3+U4+U5)/5
print(json.dumps({"ans":[{"Result":"Thus dynamic and kinematic viscosity at given oil at different temperature were determined."}],"Normality":[{"Kinematic viscosity" : str(V)+ " Ns/m2"}], "Nrmality":[{"Absolute Viscosity" : str(U) + " Ns/m2"}]}))
def CLEVELAND(self):
argument = self.arg[0:]
O = str(argument[1])
F = float(argument[2])
P =float(argument[3])
#print(argument)
print(json.dumps({"answer":[{"Result":"Thus the sample flask & fire point is determined"}], "Normality":[{"Oil sample" : str(O)}], "mality":[{"Flash Point" : str(F)}],"lity":[{"Fire Point" : str(P)}]}))
def Fire_Point(self):
argument = self.arg[0:]
A = float(argument[1])
B = float(argument[2])
c =float(argument[3])
print(json.dumps({"Normality":[{"Oil Sample" : str(A)}], "bnormality":[{"Flash Point" : str(B)}], "Amount":[{"Fire Point":str(C)}]}))
def Junkers(self):
argument = self.arg[0:]
Mw =float(argument[1])
Cpw=float(argument[2])
T1 =float(argument[3])
T2 =float(argument[4])
Vg =float(argument[5])
Hcv = (Mw*Cpw*(T2-T1))/Vg
print(json.dumps({"Normality":[{"Thus the calorific value of fuel was determined by using the junker calorimeter. Thus the value is" : str(Hcv)}]}))
def BOMB(self):
argument = self.arg[0:]
T1 =float(argument[1])
T2 =float(argument[2])
Ot = T2-T1
Maxt=float(argument[3])
Mint=float(argument[4])
T=float(argument[5])
R=(Maxt-Mint)/T
Tm=float(argument[6])
C=(R/2)*Tm
#Rt=float(argument[7])
dt=Ot+C
mf=float(argument[7])
Mw=2
Cpw=4.18
Mc=0.69
Hcv=(Mw+Mc)*Cpw*(dt/mf)
print(json.dumps({"Normality":[{"Thus the calorific value of fuel was determined by using the bomb calorimeter. Thus the value is" : str(Hcv)}]}))
def COMPRESSOR(self):
argument = self.arg[0:]
h1=(1000/1.165)*(float(argument[2])-float(argument[3]))
h2=(1000/1.165)*(float(argument[10])-float(argument[11]))
h3=(1000/1.165)*(float(argument[18])-float(argument[19]))
h4=(1000/1.165)*(float(argument[26])-float(argument[27]))
#Actual discharge
h=(h1+h2+h3+h4)/4
cd=0.68
d0=12*10**-3
a0=math.pi*(d0**2)
Vc= cd*a0*math.sqrt(2*9.81*h)
D=79*10**-3
A=math.pi*(D**2)
L=80*10**-3
N=(float(argument[6])+float(argument[14])+float(argument[22])+float(argument[30]))/4
#theoratical disacharge
Vt= (A*L*N)/60
Vc=0.035
E=Vc/Vt
#input power
I=(3600/180)*(5/300)
pc=1.165
p0=8621
O=(pc*Vc*p0*9.81)/1000
F=O/I
print(json.dumps({"Amount":[{"Result":"Thus the performace test on reciprocating air compressor is conducted and the graph is drawn."}],"Normality":[{"Vact" : str(Vc)}], "Nrmality":[{"Vtheo" : str(Vt)}], "mality":[{"Volumetric efficiency" : str(E)}], "lity":[{"Efficiency" : str(F)}]}))
def NATURAL(self):
argument = self.arg[0:]
Qs1 = (float(argument[1])) *(float(argument[2]))
Qs2 = (float(argument[9])) *(float(argument[10]))
Qs3 = (float(argument[17])) *(float(argument[18]))
Qs4 = (float(argument[25])) *(float(argument[26]))
Qs5 = (float(argument[33])) *(float(argument[34]))
Ts1 =(float(argument[3])+float(argument[4])+float(argument[5])+float(argument[6])+float(argument[7])+float(argument[8]))/6
Ts2 =(float(argument[11])+float(argument[12])+float(argument[13])+float(argument[14])+float(argument[15])+float(argument[16]))/6
Ts3 =(float(argument[19])+float(argument[20])+float(argument[21])+float(argument[22])+float(argument[23])+float(argument[24]))/6
Ts4 =(float(argument[27])+float(argument[28])+float(argument[29])+float(argument[30])+float(argument[31])+float(argument[32]))/6
Ts5 =(float(argument[35])+float(argument[36])+float(argument[37])+float(argument[38])+float(argument[39])+float(argument[40]))/6
D = 0.038
L = 0.8
As = math.pi*D**2*L
Ta = 30
T1 = Ts1 - Ta
T2 = Ts2 - Ta
T3 = Ts3 - Ta
T4 = Ts4 - Ta
T5 = Ts5 - Ta
T =(T1+T2+T3+T4+T5)/5
H1 = Qs1/(As*T1)
H2 = Qs2/(As*T2)
H3 = Qs3/(As*T3)
H4 = Qs4/(As*T4)
H5 = Qs5/(As*T5)
H = (H1+H2+H3+H4+H5)/5
Tf1 = (Ts1+Ta)/2
Tf2 = (Ts2+Ta)/2
Tf3 = (Ts3+Ta)/2
Tf4 = (Ts4+Ta)/2
Tf5 = (Ts5+Ta)/2
B1 = 1/(Tf1+273)
B2 = 1/(Tf2+273)
B3 = 1/(Tf3+273)
B4 = 1/(Tf4+273)
B5 = 1/(Tf5+273)
B = (B1+B2+B3+B4+B5)/5
g = 9.81
Gama = 16.96*10**-6
G =(g*L**3*B*T)/Gama**2
Cp = 1005
U=19.26*10**-6
K = 0.02756
Pr = (Cp*U)/K
Nu = (0.59*(Pr)**0.25)
Htheo = (Nu*K)/L
print(json.dumps({"Amount":[{"Result":"The surface heat transfer wett of a vertical tube losing water by natural convection is"}],"Normality":[{"Experimental effeiciency" : str(Nu)}], "Nrmality":[{"Theoratical efficiency" : str(Htheo)}]}))
def Forced(self):
argument = self.arg[0:]
Qs1 = (float(argument[1])) *(float(argument[2]))
Qs2 = (float(argument[10])) *(float(argument[11]))
Qs3 = (float(argument[19])) *(float(argument[20]))
Qs4 = (float(argument[28])) *(float(argument[29]))
Qs5 = (float(argument[37])) *(float(argument[38]))
Ts1 =(float(argument[5])+float(argument[6])+float(argument[7])+float(argument[8])+float(argument[9]))/5
Ts2 =(float(argument[14])+float(argument[15])+float(argument[16])+float(argument[17])+float(argument[18]))/5
Ts3 =(float(argument[23])+float(argument[24])+float(argument[25])+float(argument[26])+float(argument[27]))/5
Ts4 =(float(argument[32])+float(argument[33])+float(argument[34])+float(argument[35])+float(argument[36]))/5
Ts5 =(float(argument[41])+float(argument[42])+float(argument[43])+float(argument[44])+float(argument[45]))/5
D = 50*10**-3
L= 400*10**-3
As = math.pi*D**2*L
Ta = 32
T1 = Ts1 - Ta
T2 = Ts2 - Ta
T3 = Ts3 - Ta
T4 = Ts4 - Ta
T5 = Ts5 - Ta
T =(T1+T2+T3+T4+T5)/5
H1 = Qs1/(As*T1)
H2 = Qs2/(As*T2)
H3 = Qs3/(As*T3)
H4 = Qs4/(As*T4)
H5 = Qs5/(As*T5)
H = (H1+H2+H3+H4+H5)/5
d0=10*10**-3
A0=(math.pi/4)*d0**2
h1 = (float(argument[3])) -(float(argument[4]))
h2 = (float(argument[12])) -(float(argument[13]))
h3 = (float(argument[21])) -(float(argument[22]))
h4 = (float(argument[30])) -(float(argument[31]))
h5 = (float(argument[39])) -(float(argument[40]))
h = (h1+h2+h3+h4+h5)/5
Q=0.68*A0*(math.sqrt(2*9.81*h*(1000/1.16)))
A=(math.pi/4)*D**2
V=Q/A
Re =2208.13
pr = 0.69
Nu=0.023*(Re**0.8)*(pr**0.3)
K = 0.02756
Etheo =(Nu*K)/D
print(json.dumps({"Amount":[{"Result":"Thus the heat transfer wett by forced convertioon was determined by using fourced convection apparatus"}],"Normality":[{"Experimental effeiciency" : str(Nu)}], "Nrmality":[{"Theoratical efficiency" : str(Etheo)}]}))
def Pin_Fin(self):
argument = self.arg[0:]
Qs1 = (float(argument[1])) *(float(argument[2]))
Qs2 = (float(argument[10])) *(float(argument[11]))
Qs3 = (float(argument[19])) *(float(argument[20]))
Qs4 = (float(argument[28])) *(float(argument[29]))
Qs5 = (float(argument[37])) *(float(argument[38]))
Qact =(Qs1+Qs2+Qs3+Qs4+Qs5)/5
Ts1 =(float(argument[5])+float(argument[6])+float(argument[7])+float(argument[8]))/4
Ts2 =(float(argument[14])+float(argument[15])+float(argument[16])+float(argument[17]))/4
Ts3 =(float(argument[23])+float(argument[24])+float(argument[25])+float(argument[26]))/4
Ts4 =(float(argument[32])+float(argument[33])+float(argument[34])+float(argument[35]))/4
Ts5 =(float(argument[41])+float(argument[42])+float(argument[43])+float(argument[44]))/4
Ts=(Ts1+Ts2+Ts3+Ts4+Ts5)/5
As = math.pi*D**2*L
Ta = 32
T1 = Ts1 - Ta
T2 = Ts2 - Ta
T3 = Ts3 - Ta
T4 = Ts4 - Ta
T5 = Ts5 - Ta
T =(T1+T2+T3+T4+T5)/5
H1 = Qs1/(As*T1)
H2 = Qs2/(As*T2)
H3 = Qs3/(As*T3)
H4 = Qs4/(As*T4)
H5 = Qs5/(As*T5)
H = (H1+H2+H3+H4+H5)/5
Tf =(Ts+Ta)/2
A0=(math.pi/4)*d0**2
h1 = (float(argument[3])) -(float(argument[4]))
h2 = (float(argument[12])) -(float(argument[13]))
h3 = (float(argument[21])) -(float(argument[22]))
h4 = (float(argument[30])) -(float(argument[31]))
h5 = (float(argument[39])) -(float(argument[40]))
h = (h1+h2+h3+h4+h5)/5
Q=Cd*A0*(math.sqrt*2*g*h*(pw/pe))
A=(math.pi/4)*D**2
V=Q/A
Re = (V*d0)/Gama
Nu=0.023*(Re**0.8)*(Pr**0.3)
Etheo =(Nu*K)/d0
print(json.dumps({"Amount":[{"Result:The experiment was conducted & the result were found as pin fin efficiency"}],"Normality":[{"Experimental effeiciency" : str(Nu)}], "Nrmality":[{"Theoratical efficiency" : str(Etheo)}]}))
|
22,888 | 030eff474b588cf06c66e783df033207abe75018 | #! /usr/bin/env python
import ROOT
import root_numpy as rtnp
if __name__ == "__main__":
ROOT.gStyle.SetOptStat(0)
tf = ROOT.TFile.Open("data/reco_signal_50GeV_eta_0.0_pu_40_shiftped.root")
tree = tf.Get("amplitudes")
hsteps = tf.Get("hsteps")
steps = rtnp.hist2array(hsteps)
print steps
gr = ROOT.TGraphErrors(len(steps))
gr.SetTitle("")
for i,s in enumerate(steps):
hist = ROOT.TH1D("hist","",1000,0.95,1.05)
tree.Draw("amplitude[{i}]/amplitudeTruth>>hist".format(i=i))
bias = hist.GetMean()
err = hist.GetMeanError()
print "pedshift = {dp} => bias = {bias}".format(dp=s,bias=bias)
gr.SetPoint(i,s,bias)
gr.SetPointError(i,0,err)
lat = ROOT.TLatex()
lat.SetNDC(); lat.SetTextFont(42)
canv = ROOT.TCanvas("canv","",1200,1200)
canv.SetGridx()
canv.SetGridy()
canv.SetLeftMargin(0.17)
canv.SetRightMargin(0.1)
canv.SetBottomMargin(0.15)
gr.SetMarkerStyle(ROOT.kFullCircle)
gr.SetMarkerSize(1.5)
gr.Draw("ACPE")
xax = gr.GetXaxis(); yax = gr.GetYaxis()
xax.SetRangeUser(-1.0,1.0)
yax.SetRangeUser(0.994,1.006)
yax.SetDecimals()
xax.SetTitleOffset(1.1); xax.SetTitleSize(0.05)
yax.SetTitleOffset(1.5); yax.SetTitleSize(0.05)
xax.SetTitle('#Delta P (ADC counts)')
yax.SetTitle('A/A_{true}')
lat.DrawLatex(0.19, 0.92, '#bf{CMS}')
lat.DrawLatex(0.73, 0.92, '(13 TeV)')
canv.SaveAs("bias_vs_dped.pdf")
|
22,889 | e899d0931a39cc6046afb41e908e6a0f5fb5c3dc | from django.shortcuts import render
# Create your views here.
def book_now(request):
if request.GET:
print("request was GET!")
return render(request, 'book-now.html')
def show_available_times(request):
print(request)
return render(request, 'available-times.html') |
22,890 | 726424156a92942a62379e513d03d7eb62b22cc5 |
import re
from openpyxl import load_workbook
import math
import sys
#Calculates RPKM for each cell in a xslx file containing read counts
#python rpkm.py path_to_xslx_file path_to_output path_to_gene_length_file sheets(separated by spaces)
normExp_file=open(sys.argv[2],"w")
expression_file=load_workbook(sys.argv[1])
culture_data=expression_file["Culture"]
planta_data=expression_file["Planta"]
expression=""
totalReads1=0
totalReads2=0
totalReads3=0
totalReads4=0
totalReads5=0
totalReads6=0
totalReads7=0
totalReads8=0
for i in culture_data.iter_rows('A2:E8331'):
totalReads1+=float(i[1].value)
totalReads2+=float(i[2].value)
totalReads3+=float(i[3].value)
totalReads4+=float(i[4].value)
for i in planta_data.iter_rows('A2:E8331'):
totalReads5+=float(i[1].value)
totalReads6+=float(i[2].value)
totalReads7+=float(i[3].value)
totalReads8+=float(i[4].value)
totalReads=[totalReads1,totalReads2,totalReads3,totalReads4,totalReads5,totalReads6,totalReads7,totalReads8]
print totalReads
normExp1=0.0
normExp2=0.0
# RPKM = numberOfReads / ( geneLength/1000 * totalNumberOfReads/1,000,000 )
count=0
row=1
for i in culture_data.iter_rows('A2:E8331'):
row+=1
normExp=[]
with open(sys.argv[3]) as geneLengthFile:
for line in geneLengthFile:
if re.search(i[0].value,line):
count+=1
content=line.split("\t")
if int(content[1])!=0:
for j in range(1,5):
normExp.append(float(i[j].value)*1000000000/totalReads[j-1]/float(content[1]))
for k in planta_data.iter_rows('A'+str(row)+':E'+str(row)):
if k[0].value==i[0].value:
for j in range(5,9):
normExp.append(float(k[j-4].value)*1000000000/totalReads[j-1]/float(content[1]))
geneName=content[0]
normExp_file.write(geneName)
for n in range(0,len(normExp)):
normExp_file.write("\t")
normExp_file.write(str(normExp[i]))
normExp_file.write("\n")
else:
print line
print i[0].value
break
print count
normExp_file.close()
|
22,891 | dc36e4b7ae9e26f4b03f97c4580fb6a26728028c | # Name: Erin Rylie Clark
# Course: CSC 480
# Instructor: Daniel Kauffman
# Assignment: Board Stupid
# Term: Summer 2021
import math
import random
from boardstupid import *
from typing import Callable, Generator, Optional, Tuple, List
def expand(child: State) -> State:
"""
Get the moves that the child can make and select one at random
"""
C: float = 2 ** 0.5 # Start with sqrt(2)
state = child.board
#print("Child Moves: ", state.moves)
for _, m in enumerate(state.moves):
board = state.traverse(m)
# TODO: Check if the board is solved
#print("New Move: ", m)
#print("board: ", board.display)
new_move = State(m, C, board) # Add index of the move and C as the UCB
child.add_move(new_move, C) # Add the object to the list of moves
return child
def simulate(state: GameState) -> int:
"""
Get the moves that the child can make and select one at random
"""
moves = list(state.moves)
#print(" moves available: ", moves)
for i in range(len(state.moves)):
move = random.choice(moves)
#print(" move making: ", move)
move_idx = moves.index(move)
#print(" index of move: ", move_idx)
moves.pop(move_idx)
#print(" new moves available: ", moves)
state = state.traverse(move)
#print(" Winner: ", state.util)
#print(" New Board: ", state.display)
return state.util
def update_ucbs(parent: State, C: float = 2 ** 0.5):
for i, child in enumerate(parent.moves):
if child.attempts > 0 :
#print(" Old UCB: ", child.ucb)
#print(" Parent Attempts: ", parent.attempts)
#print(" Self Attempts: ", child.attempts)
#print(" Wins: ", child.wins)
#print(" C: ", C)
fraction = (math.log(parent.attempts)/child.attempts) ** 0.5
child.ucb = child.wins/child.attempts + C * fraction
#print(" New UCB: ", child.ucb)
parent.move_ucbs[i] = child.ucb
def update_selected(state: GameState, move: State, win_ratio: float) -> float:
ratio: float = move.wins/move.attempts
if ratio > win_ratio:
state.selected = move.idx
win_ratio = ratio
print(state.selected)
return win_ratio
def run_each_move(root: State, state: GameState) -> State:
"""
Run each initial move a number of times to get a better idea
of which ones will work best
"""
for m, base_move in enumerate(root.moves):
for _ in range(50):
base_move = expand(base_move)
outcome, _ = MCTS(base_move, state, False, 0)
if outcome == state.player:
base_move.winner()
else:
base_move.loser()
root.attempts += 1
root.moves[m] = base_move
print("Move: ", base_move.idx)
print(" Wins: ", base_move.wins)
update_ucbs(root)
return root
def MCTS(parent: State, state: GameState, root: int, win_ratio: float) -> int:
"""
Perform Monte Carlo Tree Search
"""
#print("\nPrior Move UCBs: ")
#for m in range(len(parent.move_ucbs)):
# print(" ", parent.move_ucbs[m])
# Find the maximum UCB value to explore
idx = parent.move_ucbs.index(max(parent.move_ucbs))
# TODO: May want to make this a random selection instead of first
# Select the child with the highest UCB value
child = parent.moves[idx]
#print("Child Selected: ", child.idx)
# Check if the child has moves expanded already
if child.attempts == 0 or len(child.moves) == 0: # Expand if no moves
child = expand(child)
# Pick child from expanded set
sim_idx = random.randint(0, len(child.moves)-1)
simulate_child = child.moves[sim_idx]
# Perform Simulation
outcome = simulate(simulate_child.board)
else: # Recurse if there are moves
#print("Recursing.................................")
outcome, _ = MCTS(child, state, False, 0)
# Update the wins/attempts
#print("Outcome: ", outcome)
#print("Player: ", state.player)
if outcome == state.player:
#print("********** Winner **********")
#print("Index in list for ucb: ", idx)
child.winner()
if root:
win_ratio = update_selected(state, child, win_ratio)
else:
#print("********** Loser **********")
#print("Index in list for ucb: ", idx)
child.loser()
parent.attempts += 1
update_ucbs(parent)
#print("New Move UCBs: ")
#for m in range(len(parent.move_ucbs)):
# print(" ", parent.move_ucbs[m])
return outcome, win_ratio
def find_best_move(state) -> None:
"""
Search the game tree for the optimal move for the current player, storing
the index of the move in the given GameState object's selected attribute.
The move must be an integer indicating an index in the 3D board - ranging
from 0 to 63 - with 0 as the index of the top-left space of the top board
and 63 as the index of the bottom-right space of the bottom board.
This function must perform a Monte Carlo Tree Search to select a move,
using additional functions as necessary. During the search, whenever a
better move is found, the selected attribute should be immediately updated
for retrieval by the instructor's game driver. Each call to this function
will be given a set number of seconds to run; when the time limit is
reached, the index stored in selected will be used for the player's turn.
"""
C: float = 2 ** 0.5 # Start with sqrt(2)
idx: int = -1
win_ratio: float = 0
# First create the root node for the game
root = State(None, None, state)
root = expand(root)
root = run_each_move(root, state)
print("New Move UCBs: ")
for m in range(len(root.move_ucbs)):
print(" ", root.move_ucbs[m])
#while True:
for _ in range(10000):
_, win_ratio = MCTS(root, state, True, win_ratio)
#print("\n\n\nRoot Attempts: ", root.attempts)
#print("Back in main\n\n\n")
# Pick one of the highest ranked by UCB's
|
22,892 | a8b49e6a22fdfc00cd42e9f49f5b98134d41cebd | # dp, space O(1), time O(n)
class Solution(object):
def minSwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
swap, noswap = 1, 0 # the number of swaps within A[:i] if A[i], B[i] is swapped/not swapped
for i in range(1, len(A)):
swap1, noswap1 = float('inf'), float('inf')
if A[i] > A[i-1] and B[i] > B[i-1]:
swap1 = min(swap1, swap+1)
noswap1 = min(noswap1, noswap)
if A[i] > B[i-1] and B[i] > A[i-1]:
swap1 = min(swap1, noswap+1)
noswap1 = min(noswap1, swap)
swap, noswap = swap1, noswap1
return min(swap, noswap)
"""
We have two integer sequences A and B of the same non-zero length.
We are allowed to swap elements A[i] and B[i]. Note that both elements are in the same index position in their respective sequences.
At the end of some number of swaps, A and B are both strictly increasing. (A sequence is strictly increasing if and only if A[0] < A[1] < A[2] < ... < A[A.length - 1].)
Given A and B, return the minimum number of swaps to make both sequences strictly increasing. It is guaranteed that the given input always makes it possible.
Example:
Input: A = [1,3,5,4], B = [1,2,3,7]
Output: 1
Explanation:
Swap A[3] and B[3]. Then the sequences are:
A = [1, 3, 5, 7] and B = [1, 2, 3, 4]
which are both strictly increasing.
Note:
A, B are arrays with the same length, and that length will be in the range [1, 1000].
A[i], B[i] are integer values in the range [0, 2000].
"""
|
22,893 | 0cf0128834d32c78ad7ba718e2bacba57e040636 | #!/usr/bin/env ipython
# ==========================================================================================#
# $Id:$
# ========================================================================
# @file DiCharm/Analysis/Single.py
#
# Various selectors for 1xCharm analysis
#
# This file is a part of
# <a href="http://cern.ch/lhcb-comp/Analysis/Bender/index.html">Bender project</a>
# <b>``Python-based Interactive Environment for Smart and Friendly
# Physics Analysis''</b>
#
# The package has been designed with the kind help from
# Pere MATO and Andrey TSAREGORODTSEV.
# And it is based on the
# <a href="http://cern.ch/lhcb-comp/Analysis/LoKi/index.html">LoKi project:</a>
# ``C++ ToolKit for Smart and Friendly Physics Analysis''
#
# By usage of this code one clearly states the disagreement
# with the smear campaign of Dr.O.Callot et al.:
# ``No Vanya's lines are allowed in LHCb/Gaudi software.''
#
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
#
# $Revision$
# Last modification $Date$
# by $Author$
# =============================================================================
"""
Various selectors for 1xCharm analysis
This file is a part of BENDER project:
``Python-based Interactive Environment for Smart and Friendly Physics Analysis''
The project has been designed with the kind help from Pere MATO and Andrey TSAREGORODTSEV.
And it is based on the LoKi project:
``C++ ToolKit for Smart and Friendly Physics Analysis''
By usage of this code one clearly states the disagreement
with the smear campain of Dr.O.Callot et al.:
``No Vanya's lines are allowed in LHCb/Gaudi software.''
"""
# =============================================================================
__author__ = 'Vanya BELYAEV Ivan.Belyaev@cern.ch'
__date__ = "2011-07-22"
__version__ = '$Revision$'
# =============================================================================
import ROOT
from AnalysisPython.PyRoUts import hID, SE, VE
from AnalysisPython.PySelector import Selector
from AnalysisPython.progress_bar import ProgressBar
# =============================================================================
from math import atan2, pi
import DiCharm.Ana as Ana
# =============================================================================
class Cuts1(object):
def __init__(self,
part,
tos_1,
pt_min):
first = part
first_name = '_' + first
first_trg = first + '_'
self.tos1 = lambda s: Ana.isTos(s, first_trg)
self.good_1 = lambda s: getattr(s, 'good' + first_name)
if tos_1:
self.tos = lambda s: self.tos1
else:
self.tos = lambda s: True
self.pt_1 = lambda s: getattr(s, 'pt' + first_name)
self.y_1 = lambda s: getattr(s, 'y' + first_name)
if 0 <= first_name.find('psi'):
pt_min = 0
self.pt_min = pt_min
print ' CUTS: ptmin = %f GeV ' % self.pt_min
if 0 <= first_name.find('psi'):
self.ok1 = lambda s: 0 <= self.pt_1(
s) <= 12 and 2 <= self.y_1(s) <= 4.0
else:
self.ok1 = lambda s: pt_min <= self.pt_1(
s) <= 12 and 2 <= self.y_1(s) <= 4.0
def __call__(self, item):
ok = True
ok = ok and self.ok1(item)
ok = ok and self.good_1(item)
ok = ok and self.tos(item)
return ok
class Weights1(object):
def __init__(self,
first,
tos_1):
first_name = '_' + first
first_trg = first + '_'
if 0 <= first_name.find('psi'):
self._eff_fun_1 = lambda s: Ana.eff_Jpsi(s, first_name)
self._eff_trg_1 = lambda s: Ana.trgEff_Jpsi(s, first_name)
elif 0 <= first_name.find('D0'):
self._eff_fun_1 = lambda s: Ana.eff_D0(s, first_name)
self._eff_trg_1 = lambda s: Ana.trgEff_D0(s, first_name)
elif 0 <= first_name.find('Dp'):
self._eff_fun_1 = lambda s: Ana.eff_Dp(s, first_name)
self._eff_trg_1 = lambda s: Ana.trgEff_Dp(s, first_name)
elif 0 <= first_name.find('Ds'):
self._eff_fun_1 = lambda s: Ana.eff_Ds(s, first_name)
self._eff_trg_1 = lambda s: Ana.trgEff_Ds(s, first_name)
elif 0 <= first_name.find('Lc'):
self._eff_fun_1 = lambda s: Ana.eff_Lc(s, first_name)
self._eff_trg_1 = lambda s: Ana.trgEff_Lc(s, first_name)
else:
raise AttributeError("Invalid first name '%s'" % first_name)
if tos_1:
self._eff_trg = lambda s: self._eff_trg_1(s)
else:
self._eff_trg = lambda s: VE(1, 0)
self.pt_1 = lambda s: getattr(s, 'pt' + first_name)
self.y_1 = lambda s: getattr(s, 'y' + first_name)
def __call__(self, item):
## acceptance & reconstruction & selection
e1 = self._eff_fun_1(item)
# PID: pions, kaons & protons
ePi = Ana.pidEff_pions(item)
eK = Ana.pidEff_kaons(item)
eP = Ana.pidEff_protons(item)
# correct for track recontruction efficiency
eTr = Ana.recEff_tracks(item)
# trigger efficiency
eTrg = self._eff_trg(item)
eff = VE(1, 0)
eREC = e1
ePID = ePi * eK * eP
eTRG = eTrg
eTRK = eTr
#
eff = VE(1, 0)
eff *= eREC
eff *= ePID
eff *= eTRG
eff *= eTRK
# final result
weight = 1.0
if 0 < eff.value():
weight = 1.0 / eff.value()
else:
weight = 0.0
if 0 == weight or weight > 5.e+4:
print ' ZERO weight : ', weight, \
( self.pt_1( item ), self.y_1 ( item ) ), \
(e1, ePi, eK, eP, eTr, eTrg)
return weight
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class C1(Selector):
"""
Create and fill the basic dataset for Lambda_c
"""
def __init__(self,
cuts,
dvar,
weight,
accept):
Selector.__init__(self, None, self) # initialize the base
self._cuts = cuts
self._weight = weight
self._dvar = dvar[0]
self.m = ROOT.RooRealVar(
"m_" + dvar[0], "mass(C)", dvar[1], dvar[2])
self.pt = ROOT.RooRealVar("pt", "pt(C)", 0, 12.0)
self.y = ROOT.RooRealVar("y", "y(C)", 2.0, 4.5)
self.lv01 = ROOT.RooRealVar("lv01", "lv01", -1.01, 1.01)
self.chi2dtf = ROOT.RooRealVar(
"chi2dtf", "chi2(dtf)/ndf", 0, 1.e+100)
self.weight = ROOT.RooRealVar("weight", "weight", 0.0, 1.e+20)
self.varset = ROOT.RooArgSet(
#
self.m,
self.pt,
self.y,
self.lv01,
self.chi2dtf,
#
# efficiency weight
self.weight
)
self.nPV = ROOT.RooRealVar("nPV", 'n(PV)', 0, 20)
self.nSpd = ROOT.RooRealVar("nSpd", 'n(Spd)', 0, 20000)
self.nBest = ROOT.RooRealVar("nBest", 'n(Best)', 0, 5000)
self.nLong = ROOT.RooRealVar("nLong", 'n(Long)', 0, 5000)
self.nOT = ROOT.RooRealVar("nOT", 'n(OT)', 0, 50000)
self.varset.add(self.nPV)
self.varset.add(self.nSpd)
self.varset.add(self.nBest)
self.varset.add(self.nLong)
self.varset.add(self.nOT)
self.data = ROOT.RooDataSet(
#
"Charm",
"Charm",
#
self.varset
)
self._events = 0
self._counter = SE()
self._accept = accept
self._progress = None
#
def dataset(self):
return self.data
# the only one important method
def Process(self, entry):
"""
Fills data set
"""
#
# == getting the next entry from the tree
#
if self.GetEntry(entry) <= 0:
return 0 # RETURN
self._events += 1
if 0 == self._events % 100000:
print self._events
if 0 != self._events % self._accept:
return 0
#
# == for more convenience
#
bamboo = self.fChain
y = getattr(bamboo, 'y_' + self._dvar)
if not 2.0 <= y <= 4.0:
return 0
pt = getattr(bamboo, 'pt_' + self._dvar)
if not 0.0 <= pt <= 12.0:
return 0
# apply cuts
if not self . _cuts(bamboo):
return 0
# calculate & store the efficiency weight
w = self._weight(bamboo)
if 0 == w:
return 0 # skip invalid weights
self.weight . setVal(w)
self.pt . setVal(pt)
self.y . setVal(y)
mass = getattr(bamboo, 'm_' + self._dvar)
lv01 = getattr(bamboo, 'lv01_' + self._dvar)
dtf = getattr(bamboo, 'dtf_' + self._dvar)
self.m . setVal(mass)
self.lv01 . setVal(lv01)
self.chi2dtf . setVal(dtf)
# GEC
self.nPV . setVal(bamboo.nPV_rec)
self.nSpd . setVal(bamboo.nSpd_gec)
self.nBest . setVal(bamboo.nBest_rec)
self.nLong . setVal(bamboo.nLong_rec)
self.nOT . setVal(bamboo.nOT_gec)
self.data .add(self.varset)
return 1
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class Psi(C1):
"""
Create and fill the basic dataset for RooFit
"""
def __init__(self,
cuts,
weight=lambda s: 1,
accept=50000):
C1.__init__(self,
lambda s: 3.0 <= s.m_psi <= 3.2 and cuts(s),
('psi', 3.0, 3.2),
weight,
accept)
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class D0(C1):
"""
Create and fill the basic dataset for RooFit
"""
def __init__(self,
cuts,
weight=lambda s: 1,
accept=50000):
C1.__init__(self,
lambda s: 1.8 <= s.m_D0 <= 1.92 and cuts(s),
('D0', 1.8, 1.92),
weight,
accept)
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class Dp(C1):
"""
Create and fill the basic dataset for RooFit
"""
def __init__(self,
cuts,
weight=lambda s: 1,
accept=10000):
C1.__init__(self,
lambda s: 1.82 <= s.m_Dp <= 1.91 and cuts(s),
('Dp', 1.82, 1.91),
weight,
accept)
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class Ds(C1):
"""
Create and fill the basic dataset for RooFit
"""
def __init__(self,
cuts,
weight=lambda s: 1,
accept=5000):
C1.__init__(self,
lambda s: 1.9 <= s.m_Ds <= 2.04 and cuts(s),
('Ds', 1.9, 2.04),
weight,
accept)
# ========================================================================
# Create&Fill the basic dataset for RooFit
# @date 2011-07-22
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
class Lc(C1):
"""
Create and fill the basic dataset for RooFit
"""
def __init__(self,
cuts,
weight=lambda s: 1,
accept=50):
C1.__init__(self,
lambda s: 2.24 <= s.m_Lc <= 2.33 and cuts(s),
('Lc', 2.24, 2.33),
weight,
accept)
# =============================================================================
if '__main__' == __name__:
print 80 * '*'
print __doc__
print ' Author : ', __author__
print ' Version : ', __version__
print ' Date : ', __date__
print ' Symbols : ', __all__
print 80 * '*'
# =============================================================================
# The END
# =============================================================================
|
22,894 | c8391a5e8cf486acc8bf564146a6678167b4fb22 | class Person():
def __init__(self, name, sername, money, hp, skorost):
self.name = name
self.sername = sername
self.money = money
self.hp = hp
self.skorost = skorost
self.atack = atack
self.proviziya = proviziya
def wazzep(self):
print("ะะตะฝั ะทะพะฒัั " + self.name + self.sername + "ะธะผะตั" + self.money + self.hp + self.skorost + self.atack + self.provizia)
if self.money == 1000:
print("ะั ะพัะตะฝั ะฑะพะณะฐัั!")
if self.hp == 1000:
print("ะั ะฟะพััะธ ะฑะตะทัะผะตััะฝั")
if self.skorost == 1000:
print("ะั ะพัะตะฝั ะฑัััั")
person1 = Person("Tom", "Redle", "1000", "1000", "1000")
person1.wazzep()
|
22,895 | c704c86a68091dbc82a044bdb35a6a822bf95440 | #!/usr/bin/env python
import ipcalc
import json
import re
import sys
import socket
import getpass
import requests
from termcolor import colored, cprint
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Disable the HTTPS warning ... for now
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(argv):
while True:
try:
# User Info
user = getpass.getuser() # Assumes your compuer login is the same as your InfoBlox login
password = getpass.getpass("Enter Infoblox Password: ")
IBlox = 'Your InfoBlox Info Here' ## Put your Infoblox DNS name or IP here.
for x in argv[1:]:
whatIP = x
#Sets a test to make sure it looks like an IP. Yes, non IP values could match
#but this works well for what I need it for.
pat = re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
testIP = pat.match(whatIP)
if testIP:
ip_address = whatIP #If it matches IP format, just carry on and set the variable.
else:
try:
ip_address = socket.gethostbyname(whatIP) #If it's anything else, try and do a DNS lookup on it.
print("Looking for: ", x, "IP - ", ip_address)
except socket.gaierror:
print("")
print("No DNS Entry exists for that host - exiting script")
print("")
sys.exit()
print('')
api_url_ip = requests.get('https://' + IBlox + '/wapi/v1.4.2/ipv4address?ip_address=' + ip_address, auth=(user,password), verify=False)
api_url_network = requests.get('https://' + IBlox + '/wapi/v1.4.2/network?contains_address=' + ip_address, auth=(user,password), verify=False)
ip_data = json.loads(api_url_ip.text)
network_data = json.loads(api_url_network.text)
#Uncomment below if you want to see the output of what JSON is pulling.
#print (json.dumps(ip_data))
def getNames(api_get_data):
for string in api_get_data:
return string['names']
def getNetwork(api_get_data):
if 'Error' in api_get_data:
print("")
print("No network for this IP exists. Exiting")
print("")
sys.exit()
else:
for network in api_get_data:
return network.get('network')
def getNetName(api_get_data):
for net_name in (api_get_data):
return net_name.get('comment')
def getREF(api_input_ref):
decode = json.loads(api_input_ref.text)
for ref in decode:
new_net_ref = ref['_ref']
return new_net_ref
def getType(api_get_data):
for type in api_get_data:
return type.get('types')
def getEXT(api_input_ref):
if "N/A" in api_input_ref:
pass
else:
decode = json.loads(api_input_ref.text)
if 'extattrs' in decode:
for attribute, value in decode['extattrs'].items():
if attribute:
for second, val in value.items():
if '_ref' in str(val):
pass
else:
print(attribute, "=", val)
else:
pass
else:
pass
network_address = getNetwork(ip_data)
net_ref = getREF(api_url_network)
# Now let's get the attributes for the IP and Network
net_extattr_get = requests.get('https://' + IBlox + '/wapi/v2.7/' + net_ref + '?_return_fields%2B=extattrs&*', verify=False, auth=(user,password))
# Below is the info that build the IP list from the above NetMRI gets.
subnet = ipcalc.Network(getNetwork(ip_data))
# Generate empty list
AddrList = []
# Use the calculator to build a list of all the IP addresses
for x in ipcalc.Network(getNetwork(ip_data)):
AddrList.append(str(x))
print('')
print('#' * 70)
print('')
print('IP ' + ip_address + ' is part of ' + getNetwork(ip_data))
print('Network: ', str(subnet.network()) + " | " + "Range: ", AddrList[0], " - ", AddrList[-2] + " | " + "Usable: ", len(AddrList))
print('Network Mask: ', str(subnet.netmask()), "|", " Default GW:", AddrList[-1])
print('Network Comment: ' + '"{}"'.format(getNetName(network_data)))
print('')
cprint('Network Attributes:', 'red', attrs=['bold'])
getEXT(net_extattr_get)
print('-' * 70)
if 'A' in getType(ip_data):
cprint('The DNS names associated with ' + ip_address + ' are:', 'red', attrs=['bold'])
print('\n'.join(str(p) for p in getNames(ip_data)))
else:
cprint('The DNS names associated with ' + ip_address + ' are:', 'red', attrs=['bold'])
print('No DNS records associated with this IP')
print('')
print('#' * 70)
sys.exit()
except ValueError:
print("Incorrect Infoblox Password - Try Again")
if __name__ == "__main__":
main(sys.argv) |
22,896 | 7989617c3523cfe5016fd85e0b6d699211972e37 |
from datetime import datetime as dt
from cached_property import cached_property
from sqlalchemy import Column, String, ForeignKey
from lxml import html
from fanfic.services import session
from fanfic.parsers import ProfileDetailsParser
from fanfic.utils import extract_int
from .base import Base
from .mixins import ScrapyItem
from .profile import Profile
class ProfileHTML(Base, ScrapyItem):
__tablename__ = 'profile_html'
book_id = Column(
ForeignKey('book_id.book_id'),
primary_key=True,
autoincrement=False,
)
html = Column(String, nullable=False)
@classmethod
def ingest(cls):
"""Parse HTML, load rows into Profile.
"""
for html in cls.query.all():
session.add(html.parse())
session.commit()
@cached_property
def tree(self):
"""Wrap the HTML as a Scrapy selector.
Returns: Selector
"""
return html.fragment_fromstring(self.html)
def title(self):
"""Query the title.
"""
return self.tree.xpath('b/text()')[0]
def user_id(self):
"""Query the title.
"""
href = self.tree.xpath('a/@href')[0]
return extract_int(href)
def username(self):
"""Query the username.
"""
return self.tree.xpath('a/text()')[0]
def summary(self):
"""Query the summary.
"""
return self.tree.xpath('div/text()')[0]
def xutimes(self):
"""Query data-xutime timestamps.
"""
return self.tree.xpath('//*[@data-xutime]/@data-xutime')
def published(self):
"""Query the published timestamp.
"""
xutimes = self.xutimes()
# If there are 2 xutimes, published is the second. Otherwise, it is the
# first and only xutime.
offset = 0 if len(xutimes) == 1 else 1
return dt.fromtimestamp(int(xutimes[offset]))
def updated(self):
"""Query the updated timestamp.
"""
xutimes = self.xutimes()
# If there are 2 xutimes, updated is the first. Otherwise, there is no
# updated date, just published.
return (
dt.fromtimestamp(int(xutimes[0]))
if len(xutimes) == 2 else None
)
def details_string(self):
"""Query the raw metadata string.
"""
parts = self.tree.xpath('span[position()=last()]//text()')
return ''.join(parts)
def details(self):
"""Parse fields out of the details string.
"""
details = ProfileDetailsParser(self.details_string())
return dict(
follows=details.follows(),
favorites=details.favorites(),
rating=details.rating(),
language=details.language(),
genres=details.genres(),
characters=details.characters(),
)
def parse(self):
"""Map into the Profile model.
Returns: Profile
"""
details = self.details()
return Profile(
book_id=self.book_id,
title=self.title(),
user_id=self.user_id(),
username=self.username(),
summary=self.summary(),
published=self.published(),
updated=self.updated(),
**details
)
|
22,897 | b398a3378c764a9deadd03ff3fe8e051f78f08f4 | #!/usr/local/bin/python3
from PIL import Image
import numpy as np
import re
import cv2
img=cv2.imread('D:/gray.png')
kernel = np.array([[0.0625,0.125,0.0625],[0.125,0.250,0.125],[0.0625,0.125,0.0625]], dtype=np.float32)
dst=cv2.filter2D(img,-1,kernel)
cv2.imwrite('D:/blurredconv.png',dst) |
22,898 | 6724111d378ca74b9a68100ca148c601eca7db4f | #Implementacion de marco principal de registro de producto
from tkinter import *
import tkinter as tk
from tkinter import messagebox #Ventanas emergentes
import sqlite3
ventana=tk.Tk()
ventana.title("Registro de producto")
ventana.resizable (0,0)
ventana.geometry("350x350")
ventana.iconbitmap("tokyo.ico")
barraMenu=Menu(ventana)
ventana.config(menu=barraMenu, width=300, height=300)
#-------------------FUNCIONES DE LA VENTANA--------------------------
def conexionBBDD():
miConexion=sqlite3.connect("Productos")
miCursor=miConexion.cursor()
try:
miCursor.execute('''
CREATE TABLE DATOSPRODUCTOS (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NOMBRE_PRODUCTO VARCHAR(50),
PRECIO_PRODUCTO INTEGER,
CANTIDAD_PRODUCTO INTEGER,
TIPO_PRODUCTO VARCHAR(60),
EXISTENCIA_PRODUCTO INTEGER)
''')
messagebox.showinfo("BBDD","BBDD creada con exito")
except:
messagebox.showwarning("!Atencion!","La BBDD ya existe")
def salirAplicacion():
valor=messagebox.askquestion("Salir","ยฟdeseas salir de la aplicacion?")
if valor=="yes":
ventana.destroy()
def limpiarCampos():
IdProducto.set("")
NombreProducto.set("")
PrecioProducto.set("")
CantidadProducto.set("")
TipoProducto.set("")
ExistenciaProducto.set("")
def crear():
miConexion=sqlite3.connect("Productos")
miCursor=miConexion.cursor()
#CONSULTAS PARAMETRIZADAS EVITA INYECCIONES SQL
datos=NombreProducto.get(),PrecioProducto.get(),CantidadProducto.get(),TipoProducto.get(),ExistenciaProducto.get()
miCursor.execute("INSERT INTO DATOSPRODUCTOS VALUES(NULL,?,?,?,?,?)",(datos))
miConexion.commit()
messagebox.showinfo("BBDD", "Registro insertado con exito")
def leer():
miConexion=sqlite3.connect("Productos")
miCursor=miConexion.cursor()
miCursor.execute("SELECT * FROM DATOSPRODUCTOS WHERE ID=" + IdProducto.get())
elProducto=miCursor.fetchall()
for producto in elProducto:
IdProducto.set(producto[0])
NombreProducto.set(producto[1])
PrecioProducto.set(producto[2])
CantidadProducto.set(producto[3])
TipoProducto.set(producto[4])
ExistenciaProducto.set(producto[5])
miConexion.commit()
def actualizar():
miConexion=sqlite3.connect("Productos")
miCursor=miConexion.cursor()
miCursor.execute("UPDATE DATOSPRODUCTOS SET NOMBRE_PRODUCTO='" + NombreProducto.get() +
"', PRECIO_PRODUCTO='" + PrecioProducto.get() +
"', CANTIDAD_PRODUCTO='" + CantidadProducto.get() +
"', TIPO_PRODUCTO= ' " + TipoProducto.get() +
"', EXISTENCIA_PRODUCTO ='" + ExistenciaProducto.get()+
"'WHERE ID="+ IdProducto.get())
miConexion.commit()
messagebox.showinfo("BBDD", "Registro actualizado con exito")
def eliminar():
miConexion=sqlite3.connect("Productos")
miCursor=miConexion.cursor()
miCursor.execute("DELETE FROM DATOSPRODUCTOS WHERE ID=" + IdProducto.get())
miConexion.commit()
messagebox.showinfo("BBDD","Registro borrado con exito")
def acercaDe():
messagebox.showinfo("Acerca de...","Proyecto Sistema Registro de Inventario de Productos")
def Licencia():
messagebox.showinfo("Licencia","Tokyo soft 2019 Todos los derechos reservados")
#Implementacion de panel que contendra los campos
cuadroRegistro=Frame(ventana)
cuadroRegistro.pack(fill="both",expand="True")
#Implamentacion de etiquetas
Id_etiqueta=Label(cuadroRegistro,text="Id producto")
Id_etiqueta.grid(row=0,column=0,sticky="e",padx=10,pady=10)
NombreProducto_etiqueta=Label(cuadroRegistro,text="Nombre producto")
NombreProducto_etiqueta.grid(row=1,column=0,sticky="e",padx=10,pady=10)
PrecioProducto_etiqueta=Label(cuadroRegistro,text="Precio producto")
PrecioProducto_etiqueta.grid(row=2,column=0,sticky="e",padx=10,pady=10)
CantidadProducto_etiqueta=Label(cuadroRegistro,text="Cantidad producto")
CantidadProducto_etiqueta.grid(row=3,column=0,sticky="e",padx=10,pady=10)
TipoProducto_etiqueta=Label(cuadroRegistro,text="Tipo producto")
TipoProducto_etiqueta.grid(row=4,column=0,sticky="e",padx=10,pady=10)
ExistenciaProducto_etiqueta=Label(cuadroRegistro,text="Existencia producto")
ExistenciaProducto_etiqueta.grid(row=5,column=0,sticky="e",padx=10,pady=10)
#Implementacion de los campos de captura
IdProducto=StringVar()
NombreProducto=StringVar()
PrecioProducto=StringVar()
CantidadProducto=StringVar()
TipoProducto=StringVar()
ExistenciaProducto=StringVar()
IdProducto_campo=Entry(cuadroRegistro,textvariable=IdProducto)
IdProducto_campo.grid(row=0,column=1,padx=10,pady=15)
NombreProducto_campo=Entry(cuadroRegistro,textvariable=NombreProducto)
NombreProducto_campo.grid(row=1,column=1,padx=10,pady=15)
PrecioProducto_campo=Entry(cuadroRegistro,textvariable=PrecioProducto)
PrecioProducto_campo.grid(row=2,column=1,padx=10,pady=15)
CantidadProducto_campo=Entry(cuadroRegistro,textvariable=CantidadProducto)
CantidadProducto_campo.grid(row=3,column=1,padx=10,pady=15)
TipoProducto_campo=Entry(cuadroRegistro,textvariable=TipoProducto)
TipoProducto_campo.grid(row=4,column=1,padx=10,pady=15)
ExistenciaProducto_campo=Entry(cuadroRegistro,textvariable=ExistenciaProducto)
ExistenciaProducto_campo.grid(row=5,column=1,padx=10,pady=15)
#-----------------------------BARRA DE MENU----------------------------------
bbddMenu=Menu(barraMenu, tearoff=0)
bbddMenu.add_command(label="Conectar",command=conexionBBDD)
bbddMenu.add_command(label="Salir",command=salirAplicacion)
borrarMenu=Menu(barraMenu, tearoff=0)
borrarMenu.add_command(label="Borrar campos",command=limpiarCampos)
crudMenu=Menu(barraMenu, tearoff=0)
crudMenu.add_command(label="Crear",command=crear)
crudMenu.add_command(label="Leer",command=leer)
crudMenu.add_command(label="Actualizar",command=actualizar)
crudMenu.add_command(label="Borrar",command=eliminar)
ayudaMenu=Menu(barraMenu, tearoff=0)
ayudaMenu.add_command(label="Licencia", command=Licencia)
ayudaMenu.add_command(label="Acerca de...", command=acercaDe)
barraMenu.add_cascade(label="BBDD", menu=bbddMenu)
barraMenu.add_cascade(label="Borrar", menu=borrarMenu)
barraMenu.add_cascade(label="CRUD", menu=crudMenu)
barraMenu.add_cascade(label="Ayuda", menu=ayudaMenu)
ventana.mainloop() |
22,899 | c948d09385f4b2b380c9f62e312b9230039b1ffb | # makeSet.py
mylist = [1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5]
print(len(mylist))
# ์งํฉ : ์ค๋ณต์ ํ๋ฝํ์ง ์๋ ์๋ฃ ๊ตฌ์กฐ์
๋๋ค.
# ๊ด๋ จ ํจ์ : set()
# ํน์ง : ํฉ์งํฉ, ๊ต์งํฉ, ์ฐจ์งํฉ ๋ฑ์ ํจ์๋ฅผ ์ ๊ณตํฉ๋๋ค.
# ์งํฉ๋ ๊ฒฐ๊ณผ๋ฌผ์ ์ค๊ดํธ๋ฅผ ์ฌ์ฉํฉ๋๋ค.
# ํจ์ : add(), update(), remove()
myset = set(mylist)
print(myset)
newlist = list(myset)
print(newlist)
set1 = set([1, 2, 3])
print(set1)
print('-' * 30)
set1.add(4)
print(set1)
print('-' * 30)
set1.update([5, 6, 7])
print(set1)
print('-' * 30)
set1.remove(4)
print(set1)
print('-' * 30)
set3 = set([1, 2, 3, 4])
set4 = set([3, 4, 5, 6])
set5 = set3.intersection(set4)
print(set5)
print('-' * 30)
set6 = set3.union(set4)
print(set6)
print('-' * 30)
set7 = set3.difference(set4)
print(set7)
print('-' * 30)
# ์ฐจ์งํฉ์ ๊ตํ ๋ฒ์น์ด ์ฑ๋ฆฝํ์ง ์๋๋ค.
# set3 - set4์ set4 - set3๋ ๋ค๋ฅด๋ค.
set8 = set4.difference(set3)
print(set8)
print('-' * 30)
# ๋ค์์ ์ด๋ค ์๋ฃ ๊ตฌ์กฐ๋ก ํํํ๋ฉด ์ข์ ๊น์?
# 1) ํ์ ๊ฐ์
์ ๋ณด(์์ด๋๋ 'hong'์ด๊ณ , ์ด๋ฆ์ 'ํ๊ธธ๋', ์ฃผ์๋ '๋งํฌ ๊ณต๋'์
๋๋ค.)
# dict
# 2) ๋ก๋ ๋ฒํธ ์์ฑ๊ธฐ : set
# 3) ๊ฒ์๋ฌผ์ ์ ๋ชฉ ์ ๋ณด : list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.