code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import os, csv
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from scipy import signal
class ProcessSignalData(object):
def __init__(self):
# path to video data from signal_output.py
self.dir = './processed_new/videos'
self.full_path = ''
self.dataframe = pd.DataFrame()
self.real_data = pd.DataFrame()
self.fake_data = pd.DataFrame()
self.dataset = pd.DataFrame()
self.real_data_mean = {}
self.fake_data_mean = {}
self.real_data_var = {}
self.fake_data_var = {}
self.real_data_std = {}
self.fake_data_std = {}
self.real_data_psd = {}
self.fake_data_psd = {}
self.real_data_csd = {}
self.fake_data_csd = {}
self.real_data_f1 = {}
self.fake_data_f1 = {}
self.real_data_test = {}
self.fake_data_test = {}
self.real_data_RCCE = {}
self.real_data_LCCE = {}
self.real_data_LCRC = {}
self.fake_data_RCCE = {}
self.fake_data_LCCE = {}
self.fake_data_LCRC = {}
self.real_count = 0
self.fake_count = 0
self.vid_count = 0
self.data_path_lcce = './lcce250.csv'
self.data_path_lcrc = './lcrc250.csv'
self.data_path_rcce = './rcce250.csv'
self.data_path_m = './mean_data16.csv'
self.data_path_v = './new_chrom/var_data16.csv'
self.data_path_s = './new_chrom/std_data16.csv'
self.data_path_p = './new_chrom/psd_data16.csv'
self.data_path_c = './new_chrom/csd_data_128.csv'
self.data_path_c = './f1_data_128.csv'
self.log_path = './process_log.csv'
self.test_data_lcce_path = './new_chrom/test_lcce.csv'
self.test_data_lcrc_path = './new_chrom/test_lcrc.csv'
self.test_data_rcce_path = './new_chrom/test_rcce.csv'
self.train_data_lcce_path = './new_chrom/train_lcce.csv'
self.train_data_lcrc_path = './new_chrom/train_lcrc.csv'
self.train_data_rcce_path = './new_chrom/train_rcce.csv'
self.test_data_v_path = './new_chrom/train_data_v32c.csv'
self.train_data_v_path = './new_chrom/test_data_v32c.csv'
self.test_data_m_path = './new_chrom/train_data_m32c.csv'
self.train_data_m_path = './new_chrom/test_data_m32c.csv'
self.test_data_s_path = './new_chrom/train_data_s32c.csv'
self.train_data_s_path = './new_chrom/test_data_s32c.csv'
self.test_data_p_path = './new_chrom/train_data_p128c.csv'
self.train_data_p_path = './new_chrom/test_data_p128c.csv'
self.test_data_c_path = './train_data_c128c.csv'
self.train_data_c_path = './test_data_c128c.csv'
self.test_data_f1_path = './train_data_f1-128c.csv'
self.train_data_f1_path = './test_data_f1-128c.csv'
self.test_data_test_path = './train_data_test.csv'
self.train_data_test_path = './test_data_test.csv'
self.main()
def new_chrom(self, red, green, blue):
# calculation of new X and Y
Xcomp = 3 * red - 2 * green
Ycomp = (1.5 * red) + green - (1.5 * blue)
# standard deviations
sX = np.std(Xcomp)
sY = np.std(Ycomp)
alpha = sX / sY
# -- rPPG signal
bvp = Xcomp - alpha * Ycomp
return bvp
def main(self):
# length of video in frames to process
sample_length = 250
# interval for mean, var, std
group_size = 32
#window for psd
psd_size = 128
for paths, subdir, files in os.walk(self.dir):
for file in files:
if file.endswith('.csv'):
self.full_path = os.path.join(paths, file)
if 'rejected' in self.full_path.lower() or '.txt' in self.full_path.lower() or 'imposter' in self.full_path.lower():
pass
else:
print(self.full_path)
self.dataset = pd.read_csv(self.full_path)
right_R = self.dataset['RC-R'].iloc[:sample_length]
left_R = self.dataset['LC-R'].iloc[:sample_length]
chin_R = self.dataset['C-R'].iloc[:sample_length]
forehead_R = self.dataset['F-R'].iloc[:sample_length]
outerR_R = self.dataset['OR-R'].iloc[:sample_length]
outerL_R = self.dataset['OL-R'].iloc[:sample_length]
center_R = self.dataset['CE-R'].iloc[:sample_length]
right_G = self.dataset['RC-G'].iloc[:sample_length]
left_G = self.dataset['LC-G'].iloc[:sample_length]
chin_G = self.dataset['C-G'].iloc[:sample_length]
forehead_G = self.dataset['F-G'].iloc[:sample_length]
outerR_G = self.dataset['OR-G'].iloc[:sample_length]
outerL_G = self.dataset['OL-G'].iloc[:sample_length]
center_G = self.dataset['CE-G'].iloc[:sample_length]
right_B = self.dataset['RC-B'].iloc[:sample_length]
left_B = self.dataset['LC-B'].iloc[:sample_length]
chin_B = self.dataset['C-B'].iloc[:sample_length]
forehead_B = self.dataset['F-B'].iloc[:sample_length]
outerR_B = self.dataset['OR-B'].iloc[:sample_length]
outerL_B = self.dataset['OL-B'].iloc[:sample_length]
center_B = self.dataset['CE-B'].iloc[:sample_length]
right_C = self.dataset['RC-chrom'].iloc[:sample_length]
left_C = self.dataset['LC-Chrom'].iloc[:sample_length]
chin_C = self.dataset['C-chrom'].iloc[:sample_length]
forehead_C = self.dataset['F-chrom'].iloc[:sample_length]
outerR_C = self.dataset['OR-chrom'].iloc[:sample_length]
outerL_C = self.dataset['OL-chrom'].iloc[:sample_length]
center_C = self.dataset['CE-chrom'].iloc[:sample_length]
chrom_R = right_C
chrom_L = left_C
chrom_CE = center_C
chrom_OL = outerL_C
chrom_OR = outerR_C
#chrom_R = self.new_chrom(right_R, right_G, right_B)
#chrom_L = self.new_chrom(left_R, left_G, left_B)
chrom_C = self.new_chrom(chin_R, chin_G, chin_B)
chrom_F = self.new_chrom(forehead_R, forehead_G, forehead_B)
#chrom_OR = self.new_chrom(outerR_R, outerR_G, outerR_B)
#chrom_OL = self.new_chrom(outerL_R, outerL_G, outerL_B)
#chrom_CE = self.new_chrom(center_R, center_G, center_B)
difg_LCRC = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['LC-G'].iloc[:sample_length]).abs()
difc_LCRC = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['LC-Chrom'].iloc[:sample_length]).abs()
difg_o1 = (self.dataset['C-G'].iloc[:sample_length] - self.dataset['F-G'].iloc[:sample_length]).abs()
difc_o1 = (self.dataset['C-chrom'].iloc[:sample_length] - self.dataset['F-chrom'].iloc[:sample_length]).abs()
difg_o2 = (self.dataset['OR-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difc_o2 = (self.dataset['OR-chrom'].iloc[:sample_length] - self.dataset['OL-chrom'].iloc[:sample_length]).abs()
difc_LCCe = (self.dataset['LC-Chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_RCCe = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_LCRC = (chrom_R.iloc[:sample_length] - chrom_L.iloc[:sample_length]).abs()
difc_LCCe = (chrom_L.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_RCCe = (chrom_R.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_LCOL = (chrom_L.iloc[:sample_length] - chrom_OL.iloc[:sample_length]).abs()
difc_RCOR = (chrom_R.iloc[:sample_length] - chrom_OR.iloc[:sample_length]).abs()
difg_LCOL = (self.dataset['LC-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difg_RCOR = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['OR-G'].iloc[:sample_length]).abs()
# green channel features
# right cheek - left cheek
difg_LCRC_lst = [difg_LCRC.iloc[i:i + group_size] for i in
range(0, len(difg_LCRC) - group_size + 1, group_size)]
# forehead - chin
difg_o1_lst = [difg_o1.iloc[i:i + group_size] for i in
range(0, len(difg_o1) - group_size + 1, group_size)]
# outer right - outer left
difg_o2_lst = [difg_o2.iloc[i:i + group_size] for i in
range(0, len(difg_o2) - group_size + 1, group_size)]
# chrominance features
# right cheek - left cheek
difc_LCRC_lst = [difc_LCRC.iloc[i:i + group_size] for i in
range(0, len(difc_LCRC) - group_size + 1, group_size)]
# forehead - chin
difc_o1_lst = [difc_o1.iloc[i:i + group_size] for i in
range(0, len(difc_o1) - group_size + 1, group_size)]
# outer right - outer left
difc_o2_lst = [difc_o2.iloc[i:i + group_size] for i in
range(0, len(difc_o2) - group_size + 1, group_size)]
# mean
difg_LCRC_mean = np.array([difg_LCRC_lst[i].mean() for i in range(len(difg_LCRC_lst))])
difc_LCRC_mean = np.array([difc_LCRC_lst[i].mean() for i in range(len(difc_LCRC_lst))])
print("MEAN")
print(difc_LCRC_mean)
difg_o1_mean = np.array([difg_o1_lst[i].mean() for i in range(len(difg_o1_lst))])
difc_o1_mean = np.array([difc_o1_lst[i].mean() for i in range(len(difc_o1_lst))])
difg_o2_mean = np.array([difg_o2_lst[i].mean() for i in range(len(difg_o2_lst))])
difc_o2_mean = np.array([difc_o2_lst[i].mean() for i in range(len(difc_o2_lst))])
# variance
difg_LCRC_var = np.array([difg_LCRC_lst[i].var() for i in range(len(difg_LCRC_lst))])
difc_LCRC_var = np.array([difc_LCRC_lst[i].var() for i in range(len(difc_LCRC_lst))])
print("VAR")
print(difc_LCRC_var)
difg_o1_var = np.array([difg_o1_lst[i].var() for i in range(len(difg_o1_lst))])
difc_o1_var = np.array([difc_o1_lst[i].var() for i in range(len(difc_o1_lst))])
difg_o2_var = np.array([difg_o2_lst[i].var() for i in range(len(difg_o2_lst))])
difc_o2_var = np.array([difc_o2_lst[i].var() for i in range(len(difc_o2_lst))])
# standard deviation
difg_LCRC_std = np.array([difg_LCRC_lst[i].std() for i in range(len(difg_LCRC_lst))])
difc_LCRC_std = np.array([difc_LCRC_lst[i].std() for i in range(len(difc_LCRC_lst))])
print("STD")
print(difc_LCRC_std)
difg_o1_std = np.array([difg_o1_lst[i].std() for i in range(len(difg_o1_lst))])
difc_o1_std = np.array([difc_o1_lst[i].std() for i in range(len(difc_o1_lst))])
difg_o2_std = np.array([difg_o2_lst[i].std() for i in range(len(difg_o2_lst))])
difc_o2_std = np.array([difc_o2_lst[i].std() for i in range(len(difc_o2_lst))])
# power spectral density
f, difg_LCRC_psd = signal.welch(difg_LCRC, nperseg=psd_size)
f, difc_LCCe_psd = signal.welch(difc_LCCe, nperseg=psd_size)
f, difc_RCCe_psd = signal.welch(difc_RCCe, nperseg=psd_size)
f, difc_LCRC_psd = signal.welch(difc_LCRC, nperseg=psd_size)
print("PSD")
print(difc_LCRC_psd)
f, difg_o1_psd = signal.welch(difg_o1, nperseg=psd_size)
f, difc_o1_psd = signal.welch(difc_o1, nperseg=psd_size)
f, difg_o2_psd = signal.welch(difg_o2, nperseg=psd_size)
f, difc_o2_psd = signal.welch(difc_o2, nperseg=psd_size)
# cross power spectral density
left_C.fillna(0, inplace=True)
center_C.fillna(0, inplace=True)
right_C.fillna(0, inplace=True)
outerL_C.fillna(0, inplace=True)
outerR_C.fillna(0, inplace=True)
f, difc_LCCe_v_csd = signal.csd(left_C, center_C, nperseg=128)
f, difc_LCRC_v_csd = signal.csd(left_C, right_C, nperseg=128)
f, difc_RCCe_v_csd = signal.csd(right_C, center_C, nperseg=128)
f, difc_LCOL_v_csd = signal.csd(left_C, outerL_C, nperseg=128)
f, difc_RCOR_v_csd =signal.csd(right_C, outerR_C, nperseg=128)
difc_LCCe_csd_0 = []
difc_LCRC_csd_0 = []
difc_RCCe_csd_0 = []
difc_LCOL_csd_0 = []
difc_RCOR_csd_0 = []
difc_LCCe_csd_1 = []
difc_LCRC_csd_1 = []
difc_RCCe_csd_1 = []
difc_LCOL_csd_1 = []
difc_RCOR_csd_1 = []
for i in range(len(difc_LCCe_v_csd)):
difc_LCCe_csd_0.append(difc_LCCe_v_csd[i].real)
difc_LCCe_csd_1.append(difc_LCCe_v_csd[i].imag)
for i in range(len(difc_LCRC_v_csd)):
difc_LCRC_csd_0.append(difc_LCRC_v_csd[i].real)
difc_LCRC_csd_1.append(difc_LCRC_v_csd[i].imag)
for i in range(len(difc_RCCe_v_csd)):
difc_RCCe_csd_0.append(difc_RCCe_v_csd[i].real)
difc_RCCe_csd_1.append(difc_RCCe_v_csd[i].imag)
for i in range(len(difc_LCOL_v_csd)):
difc_LCOL_csd_0.append(difc_LCOL_v_csd[i].real)
difc_LCOL_csd_1.append(difc_LCOL_v_csd[i].imag)
for i in range(len(difc_RCOR_v_csd)):
difc_RCOR_csd_0.append(difc_RCOR_v_csd[i].real)
difc_RCOR_csd_1.append(difc_RCOR_v_csd[i].imag)
csd2_LCCe = []
csd2_LCRC = []
csd2_RCCe = []
for i in range(len(difc_RCCe_csd_0)):
csd2_LCCe.append((difc_LCCe_csd_0[i], difc_LCCe_csd_1[i]))
csd2_LCRC.append((difc_LCRC_csd_0[i], difc_LCRC_csd_1[i]))
csd2_RCCe.append((difc_RCCe_csd_0[i], difc_RCCe_csd_1[i]))
# f1 feature
t = np.abs(difc_LCCe_v_csd)
j = np.argmax(t)
max_cLCCe = (difc_LCCe_csd_0[j], difc_LCCe_csd_1[j])
mean_cLCCe = [np.mean(np.asarray(difc_LCCe_csd_0)), np.mean(np.asarray(difc_LCCe_csd_1))]
f1LCCe = np.array([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])
t = np.abs(difc_LCRC_v_csd)
j = np.argmax(t)
max_cLCRC = (difc_LCRC_csd_0[j], difc_LCRC_csd_1[j])
mean_cLCRC = [np.mean(np.asarray(difc_LCRC_csd_0)), np.mean(np.asarray(difc_LCRC_csd_1))]
f1LCRC = np.array([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])
t = np.abs(difc_RCCe_v_csd)
j = np.argmax(t)
max_cRCCe = (difc_RCCe_csd_0[j], difc_RCCe_csd_1[j])
mean_cRCCe = [np.mean(np.asarray(difc_RCCe_csd_0)), np.mean(np.asarray(difc_RCCe_csd_1))]
f1RCCe = np.array([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])
t = np.abs(difc_LCOL_v_csd)
j = np.argmax(t)
max_cLCOL = (difc_LCOL_csd_0[j], difc_LCOL_csd_1[j])
mean_cLCOL = [np.mean(np.asarray(difc_LCOL_csd_0)), np.mean(np.asarray(difc_LCOL_csd_1))]
f1LCOL = np.array([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])
t = np.abs(difc_RCOR_v_csd)
j = np.argmax(t)
max_cRCOR = (difc_RCOR_csd_0[j], difc_RCOR_csd_1[j])
mean_cRCOR = [np.mean(np.asarray(difc_RCOR_csd_0)), np.mean(np.asarray(difc_RCOR_csd_1))]
f1RCOR = np.array([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])
derived_data_mean = np.concatenate([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,
difg_o2_mean, difc_o2_mean])
derived_data_var = np.concatenate([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,
difg_o2_var, difc_o2_var])
derived_data_std = np.concatenate([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,
difg_o2_std, difc_o2_std])
derived_data_psd = np.concatenate([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])
derived_data_csd = np.concatenate([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])
derived_data_rcsd = np.concatenate([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])
derived_data_f1 = np.concatenate([f1LCCe, f1LCRC, f1RCCe])
derived_data_test = np.concatenate([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])
chrom_data = self.dataset['RC-chrom'].iloc[50] - self.dataset['C-chrom'].iloc[50]
if 'fake' in self.full_path.lower():
self.fake_data_LCCE[self.fake_count] = difc_LCCe
self.fake_data_LCRC[self.fake_count] = difc_LCRC
self.fake_data_RCCE[self.fake_count] = difc_RCCe
self.fake_data_mean[self.fake_count] = derived_data_mean
self.fake_data_var[self.fake_count] = derived_data_var
self.fake_data_std[self.fake_count] = derived_data_std
self.fake_data_psd[self.fake_count] = derived_data_psd
self.fake_data_csd[self.fake_count] = derived_data_csd
self.fake_data_f1[self.fake_count] = derived_data_f1
self.fake_data_test[self.fake_count] = derived_data_test
self.fake_count += 1
else:
self.real_data_LCCE[self.real_count] = difc_LCCe
self.real_data_LCRC[self.real_count] = difc_LCRC
self.real_data_RCCE[self.real_count] = difc_RCCe
self.real_data_mean[self.real_count] = derived_data_mean
self.real_data_var[self.real_count] = derived_data_var
self.real_data_std[self.real_count] = derived_data_std
self.real_data_psd[self.real_count] = derived_data_psd
self.real_data_csd[self.real_count] = derived_data_csd
self.real_data_f1[self.real_count] = derived_data_f1
self.real_data_test[self.real_count] = derived_data_test
self.real_count += 1
self.vid_count += 1
self.real_df_LCCE = pd.DataFrame(self.real_data_LCCE)
self.real_df_LCRC = pd.DataFrame(self.real_data_LCRC)
self.real_df_RCCE = pd.DataFrame(self.real_data_RCCE)
self.fake_df_LCCE = pd.DataFrame(self.fake_data_LCCE)
self.fake_df_LCRC = pd.DataFrame(self.fake_data_LCRC)
self.fake_df_RCCE = pd.DataFrame(self.fake_data_RCCE)
self.real_df_m = pd.DataFrame(self.real_data_mean)
self.fake_df_m = pd.DataFrame(self.fake_data_mean)
self.real_df_v = pd.DataFrame(self.real_data_var)
self.fake_df_v = pd.DataFrame(self.fake_data_var)
self.real_df_s = pd.DataFrame(self.real_data_std)
self.fake_df_s = pd.DataFrame(self.fake_data_std)
self.real_df_p = pd.DataFrame(self.real_data_psd)
self.fake_df_p = pd.DataFrame(self.fake_data_psd)
self.real_df_csp = pd.DataFrame(self.real_data_csd)
self.fake_df_csp = pd.DataFrame(self.fake_data_csd)
self.real_df_f1 = pd.DataFrame(self.real_data_f1)
self.fake_df_f1 = pd.DataFrame(self.fake_data_f1)
self.real_df_test = pd.DataFrame(self.real_data_test)
self.fake_df_test = pd.DataFrame(self.fake_data_test)
r_lcce = self.real_df_LCCE.transpose()
r_lcrc = self.real_df_LCRC.transpose()
r_rcce = self.real_df_RCCE.transpose()
f_lcce = self.fake_df_LCCE.transpose()
f_lcrc = self.fake_df_LCRC.transpose()
f_rcce = self.fake_df_RCCE.transpose()
r_m = self.real_df_m.transpose()
f_m = self.fake_df_m.transpose()
r_v = self.real_df_v.transpose()
f_v = self.fake_df_v.transpose()
r_s = self.real_df_s.transpose()
f_s = self.fake_df_s.transpose()
r_p = self.real_df_s.transpose()
f_p = self.fake_df_s.transpose()
r_c = self.real_df_csp.transpose()
f_c = self.fake_df_csp.transpose()
r_f = self.real_df_f1.transpose()
f_f = self.fake_df_f1.transpose()
r_t = self.real_df_test.transpose()
f_t = self.fake_df_test.transpose()
r_f.to_csv("./real_f1.csv", index=False)
f_f.to_csv("./fake_f1.csv", index=False)
r_lcce['Target'] = 1
r_lcrc['Target'] = 1
r_rcce['Target'] = 1
f_lcce['Target'] = 0
f_lcrc['Target'] = 0
f_rcce['Target'] = 0
r_m['Target'] = 1
f_m['Target'] = 0
r_v['Target'] = 1
f_v['Target'] = 0
r_s['Target'] = 1
f_s['Target'] = 0
r_p['Target'] = 1
f_p['Target'] = 0
r_c['Target'] = 1
f_c['Target'] = 0
r_f['Target'] = 1
f_f['Target'] = 0
r_t['Target'] = 1
f_t['Target'] = 0
rf_lcce = r_lcce.append(f_lcce)
rf_lcrc = r_lcrc.append(f_lcrc)
rf_rcce = r_rcce.append(f_rcce)
rf_m = r_m.append(f_m)
rf_v = r_v.append(f_v)
rf_s = r_s.append(f_s)
rf_p = r_p.append(f_p)
rf_c = r_c.append(f_c)
rf_f = r_f.append(f_f)
rf_t = r_t.append(f_t)
test_v, train_v = train_test_split(rf_v, test_size=0.2)
test_m, train_m = train_test_split(rf_m, test_size=0.2)
test_s, train_s = train_test_split(rf_s, test_size=0.2)
test_p, train_p = train_test_split(rf_p, test_size=0.2)
test_c, train_c = train_test_split(rf_c, test_size=0.2)
test_f, train_f = train_test_split(rf_f, test_size=0.2)
test_t, train_t = train_test_split(rf_t, test_size=0.2)
test_lcce, train_lcce = train_test_split(rf_lcce, test_size=0.2)
test_lcrc, train_lcrc = train_test_split(rf_lcrc, test_size=0.2)
test_rcce, train_rcce = train_test_split(rf_rcce, test_size=0.2)
train_lcce.to_csv(self.train_data_lcce_path, index=False)
train_lcrc.to_csv(self.train_data_lcrc_path, index=False)
train_rcce.to_csv(self.train_data_rcce_path, index=False)
test_lcce.to_csv(self.test_data_lcce_path, index=False)
test_lcrc.to_csv(self.test_data_lcrc_path, index=False)
test_rcce.to_csv(self.test_data_rcce_path, index=False)
train_s.to_csv(self.train_data_s_path, index=False)
test_s.to_csv(self.test_data_s_path, index=False)
train_v.to_csv(self.train_data_v_path, index=False)
test_v.to_csv(self.test_data_v_path, index=False)
train_m.to_csv(self.train_data_m_path, index=False)
test_m.to_csv(self.test_data_m_path, index=False)
train_p.to_csv(self.train_data_p_path, index=False)
test_p.to_csv(self.test_data_p_path, index=False)
train_c.to_csv(self.train_data_c_path, index=False)
test_c.to_csv(self.test_data_c_path, index=False)
train_f.to_csv(self.train_data_f1_path, index=False)
test_f.to_csv(self.test_data_f1_path, index=False)
train_t.to_csv(self.train_data_test_path, index=False)
test_t.to_csv(self.test_data_test_path, index=False)
r_c.to_csv("./csd_real128.csv", index=False)
f_c.to_csv("./csd_fake128.csv", index=False)
p = ProcessSignalData()
|
[
"pandas.DataFrame",
"numpy.abs",
"scipy.signal.welch",
"numpy.argmax",
"numpy.std",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"os.walk",
"numpy.asarray",
"numpy.array",
"os.path.join",
"scipy.signal.csd",
"numpy.concatenate"
] |
[((364, 378), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (376, 378), True, 'import pandas as pd\n'), ((404, 418), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (416, 418), True, 'import pandas as pd\n'), ((444, 458), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (456, 458), True, 'import pandas as pd\n'), ((482, 496), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (494, 496), True, 'import pandas as pd\n'), ((3230, 3243), 'numpy.std', 'np.std', (['Xcomp'], {}), '(Xcomp)\n', (3236, 3243), True, 'import numpy as np\n'), ((3257, 3270), 'numpy.std', 'np.std', (['Ycomp'], {}), '(Ycomp)\n', (3263, 3270), True, 'import numpy as np\n'), ((3623, 3640), 'os.walk', 'os.walk', (['self.dir'], {}), '(self.dir)\n', (3630, 3640), False, 'import os, csv\n'), ((20621, 20654), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_LCCE'], {}), '(self.real_data_LCCE)\n', (20633, 20654), True, 'import pandas as pd\n'), ((20683, 20716), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_LCRC'], {}), '(self.real_data_LCRC)\n', (20695, 20716), True, 'import pandas as pd\n'), ((20745, 20778), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_RCCE'], {}), '(self.real_data_RCCE)\n', (20757, 20778), True, 'import pandas as pd\n'), ((20807, 20840), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_LCCE'], {}), '(self.fake_data_LCCE)\n', (20819, 20840), True, 'import pandas as pd\n'), ((20869, 20902), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_LCRC'], {}), '(self.fake_data_LCRC)\n', (20881, 20902), True, 'import pandas as pd\n'), ((20931, 20964), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_RCCE'], {}), '(self.fake_data_RCCE)\n', (20943, 20964), True, 'import pandas as pd\n'), ((20990, 21023), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_mean'], {}), '(self.real_data_mean)\n', (21002, 21023), True, 'import pandas as pd\n'), ((21049, 21082), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_mean'], {}), '(self.fake_data_mean)\n', (21061, 21082), True, 'import pandas as pd\n'), ((21108, 21140), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_var'], {}), '(self.real_data_var)\n', (21120, 21140), True, 'import pandas as pd\n'), ((21166, 21198), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_var'], {}), '(self.fake_data_var)\n', (21178, 21198), True, 'import pandas as pd\n'), ((21224, 21256), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_std'], {}), '(self.real_data_std)\n', (21236, 21256), True, 'import pandas as pd\n'), ((21282, 21314), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_std'], {}), '(self.fake_data_std)\n', (21294, 21314), True, 'import pandas as pd\n'), ((21340, 21372), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_psd'], {}), '(self.real_data_psd)\n', (21352, 21372), True, 'import pandas as pd\n'), ((21398, 21430), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_psd'], {}), '(self.fake_data_psd)\n', (21410, 21430), True, 'import pandas as pd\n'), ((21458, 21490), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_csd'], {}), '(self.real_data_csd)\n', (21470, 21490), True, 'import pandas as pd\n'), ((21518, 21550), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_csd'], {}), '(self.fake_data_csd)\n', (21530, 21550), True, 'import pandas as pd\n'), ((21577, 21608), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_f1'], {}), '(self.real_data_f1)\n', (21589, 21608), True, 'import pandas as pd\n'), ((21635, 21666), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_f1'], {}), '(self.fake_data_f1)\n', (21647, 21666), True, 'import pandas as pd\n'), ((21695, 21728), 'pandas.DataFrame', 'pd.DataFrame', (['self.real_data_test'], {}), '(self.real_data_test)\n', (21707, 21728), True, 'import pandas as pd\n'), ((21757, 21790), 'pandas.DataFrame', 'pd.DataFrame', (['self.fake_data_test'], {}), '(self.fake_data_test)\n', (21769, 21790), True, 'import pandas as pd\n'), ((23658, 23695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_v'], {'test_size': '(0.2)'}), '(rf_v, test_size=0.2)\n', (23674, 23695), False, 'from sklearn.model_selection import train_test_split\n'), ((23722, 23759), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_m'], {'test_size': '(0.2)'}), '(rf_m, test_size=0.2)\n', (23738, 23759), False, 'from sklearn.model_selection import train_test_split\n'), ((23786, 23823), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_s'], {'test_size': '(0.2)'}), '(rf_s, test_size=0.2)\n', (23802, 23823), False, 'from sklearn.model_selection import train_test_split\n'), ((23850, 23887), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_p'], {'test_size': '(0.2)'}), '(rf_p, test_size=0.2)\n', (23866, 23887), False, 'from sklearn.model_selection import train_test_split\n'), ((23914, 23951), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_c'], {'test_size': '(0.2)'}), '(rf_c, test_size=0.2)\n', (23930, 23951), False, 'from sklearn.model_selection import train_test_split\n'), ((23978, 24015), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_f'], {'test_size': '(0.2)'}), '(rf_f, test_size=0.2)\n', (23994, 24015), False, 'from sklearn.model_selection import train_test_split\n'), ((24042, 24079), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_t'], {'test_size': '(0.2)'}), '(rf_t, test_size=0.2)\n', (24058, 24079), False, 'from sklearn.model_selection import train_test_split\n'), ((24112, 24152), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_lcce'], {'test_size': '(0.2)'}), '(rf_lcce, test_size=0.2)\n', (24128, 24152), False, 'from sklearn.model_selection import train_test_split\n'), ((24185, 24225), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_lcrc'], {'test_size': '(0.2)'}), '(rf_lcrc, test_size=0.2)\n', (24201, 24225), False, 'from sklearn.model_selection import train_test_split\n'), ((24258, 24298), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rf_rcce'], {'test_size': '(0.2)'}), '(rf_rcce, test_size=0.2)\n', (24274, 24298), False, 'from sklearn.model_selection import train_test_split\n'), ((3752, 3777), 'os.path.join', 'os.path.join', (['paths', 'file'], {}), '(paths, file)\n', (3764, 3777), False, 'import os, csv\n'), ((4035, 4062), 'pandas.read_csv', 'pd.read_csv', (['self.full_path'], {}), '(self.full_path)\n', (4046, 4062), True, 'import pandas as pd\n'), ((12409, 12450), 'scipy.signal.welch', 'signal.welch', (['difg_LCRC'], {'nperseg': 'psd_size'}), '(difg_LCRC, nperseg=psd_size)\n', (12421, 12450), False, 'from scipy import signal\n'), ((12490, 12531), 'scipy.signal.welch', 'signal.welch', (['difc_LCCe'], {'nperseg': 'psd_size'}), '(difc_LCCe, nperseg=psd_size)\n', (12502, 12531), False, 'from scipy import signal\n'), ((12571, 12612), 'scipy.signal.welch', 'signal.welch', (['difc_RCCe'], {'nperseg': 'psd_size'}), '(difc_RCCe, nperseg=psd_size)\n', (12583, 12612), False, 'from scipy import signal\n'), ((12652, 12693), 'scipy.signal.welch', 'signal.welch', (['difc_LCRC'], {'nperseg': 'psd_size'}), '(difc_LCRC, nperseg=psd_size)\n', (12664, 12693), False, 'from scipy import signal\n'), ((12805, 12844), 'scipy.signal.welch', 'signal.welch', (['difg_o1'], {'nperseg': 'psd_size'}), '(difg_o1, nperseg=psd_size)\n', (12817, 12844), False, 'from scipy import signal\n'), ((12882, 12921), 'scipy.signal.welch', 'signal.welch', (['difc_o1'], {'nperseg': 'psd_size'}), '(difc_o1, nperseg=psd_size)\n', (12894, 12921), False, 'from scipy import signal\n'), ((12959, 12998), 'scipy.signal.welch', 'signal.welch', (['difg_o2'], {'nperseg': 'psd_size'}), '(difg_o2, nperseg=psd_size)\n', (12971, 12998), False, 'from scipy import signal\n'), ((13036, 13075), 'scipy.signal.welch', 'signal.welch', (['difc_o2'], {'nperseg': 'psd_size'}), '(difc_o2, nperseg=psd_size)\n', (13048, 13075), False, 'from scipy import signal\n'), ((13432, 13473), 'scipy.signal.csd', 'signal.csd', (['left_C', 'center_C'], {'nperseg': '(128)'}), '(left_C, center_C, nperseg=128)\n', (13442, 13473), False, 'from scipy import signal\n'), ((13515, 13555), 'scipy.signal.csd', 'signal.csd', (['left_C', 'right_C'], {'nperseg': '(128)'}), '(left_C, right_C, nperseg=128)\n', (13525, 13555), False, 'from scipy import signal\n'), ((13597, 13639), 'scipy.signal.csd', 'signal.csd', (['right_C', 'center_C'], {'nperseg': '(128)'}), '(right_C, center_C, nperseg=128)\n', (13607, 13639), False, 'from scipy import signal\n'), ((13681, 13722), 'scipy.signal.csd', 'signal.csd', (['left_C', 'outerL_C'], {'nperseg': '(128)'}), '(left_C, outerL_C, nperseg=128)\n', (13691, 13722), False, 'from scipy import signal\n'), ((13763, 13805), 'scipy.signal.csd', 'signal.csd', (['right_C', 'outerR_C'], {'nperseg': '(128)'}), '(right_C, outerR_C, nperseg=128)\n', (13773, 13805), False, 'from scipy import signal\n'), ((15706, 15729), 'numpy.abs', 'np.abs', (['difc_LCCe_v_csd'], {}), '(difc_LCCe_v_csd)\n', (15712, 15729), True, 'import numpy as np\n'), ((15754, 15766), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (15763, 15766), True, 'import numpy as np\n'), ((15981, 16049), 'numpy.array', 'np.array', (['[max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]]'], {}), '([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])\n', (15989, 16049), True, 'import numpy as np\n'), ((16075, 16098), 'numpy.abs', 'np.abs', (['difc_LCRC_v_csd'], {}), '(difc_LCRC_v_csd)\n', (16081, 16098), True, 'import numpy as np\n'), ((16123, 16135), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16132, 16135), True, 'import numpy as np\n'), ((16350, 16418), 'numpy.array', 'np.array', (['[max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]]'], {}), '([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])\n', (16358, 16418), True, 'import numpy as np\n'), ((16444, 16467), 'numpy.abs', 'np.abs', (['difc_RCCe_v_csd'], {}), '(difc_RCCe_v_csd)\n', (16450, 16467), True, 'import numpy as np\n'), ((16492, 16504), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16501, 16504), True, 'import numpy as np\n'), ((16719, 16787), 'numpy.array', 'np.array', (['[max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]]'], {}), '([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])\n', (16727, 16787), True, 'import numpy as np\n'), ((16813, 16836), 'numpy.abs', 'np.abs', (['difc_LCOL_v_csd'], {}), '(difc_LCOL_v_csd)\n', (16819, 16836), True, 'import numpy as np\n'), ((16861, 16873), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (16870, 16873), True, 'import numpy as np\n'), ((17088, 17156), 'numpy.array', 'np.array', (['[max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]]'], {}), '([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])\n', (17096, 17156), True, 'import numpy as np\n'), ((17182, 17205), 'numpy.abs', 'np.abs', (['difc_RCOR_v_csd'], {}), '(difc_RCOR_v_csd)\n', (17188, 17205), True, 'import numpy as np\n'), ((17230, 17242), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (17239, 17242), True, 'import numpy as np\n'), ((17457, 17525), 'numpy.array', 'np.array', (['[max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]]'], {}), '([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])\n', (17465, 17525), True, 'import numpy as np\n'), ((17567, 17675), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean, difg_o2_mean,\n difc_o2_mean]'], {}), '([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,\n difg_o2_mean, difc_o2_mean])\n', (17581, 17675), True, 'import numpy as np\n'), ((17767, 17869), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var, difg_o2_var,\n difc_o2_var]'], {}), '([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,\n difg_o2_var, difc_o2_var])\n', (17781, 17869), True, 'import numpy as np\n'), ((17961, 18063), 'numpy.concatenate', 'np.concatenate', (['[difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std, difg_o2_std,\n difc_o2_std]'], {}), '([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,\n difg_o2_std, difc_o2_std])\n', (17975, 18063), True, 'import numpy as np\n'), ((18156, 18217), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd]'], {}), '([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])\n', (18170, 18217), True, 'import numpy as np\n'), ((18258, 18380), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1,\n difc_RCCe_csd_0, difc_RCCe_csd_1]'], {}), '([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0,\n difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])\n', (18272, 18380), True, 'import numpy as np\n'), ((18418, 18485), 'numpy.concatenate', 'np.concatenate', (['[difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0]'], {}), '([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])\n', (18432, 18485), True, 'import numpy as np\n'), ((18525, 18565), 'numpy.concatenate', 'np.concatenate', (['[f1LCCe, f1LCRC, f1RCCe]'], {}), '([f1LCCe, f1LCRC, f1RCCe])\n', (18539, 18565), True, 'import numpy as np\n'), ((18607, 18728), 'numpy.concatenate', 'np.concatenate', (['[f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var,\n difc_LCRC_psd, difc_LCRC_mean]'], {}), '([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std,\n difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])\n', (18621, 18728), True, 'import numpy as np\n'), ((15883, 15910), 'numpy.asarray', 'np.asarray', (['difc_LCCe_csd_0'], {}), '(difc_LCCe_csd_0)\n', (15893, 15910), True, 'import numpy as np\n'), ((15921, 15948), 'numpy.asarray', 'np.asarray', (['difc_LCCe_csd_1'], {}), '(difc_LCCe_csd_1)\n', (15931, 15948), True, 'import numpy as np\n'), ((16252, 16279), 'numpy.asarray', 'np.asarray', (['difc_LCRC_csd_0'], {}), '(difc_LCRC_csd_0)\n', (16262, 16279), True, 'import numpy as np\n'), ((16290, 16317), 'numpy.asarray', 'np.asarray', (['difc_LCRC_csd_1'], {}), '(difc_LCRC_csd_1)\n', (16300, 16317), True, 'import numpy as np\n'), ((16621, 16648), 'numpy.asarray', 'np.asarray', (['difc_RCCe_csd_0'], {}), '(difc_RCCe_csd_0)\n', (16631, 16648), True, 'import numpy as np\n'), ((16659, 16686), 'numpy.asarray', 'np.asarray', (['difc_RCCe_csd_1'], {}), '(difc_RCCe_csd_1)\n', (16669, 16686), True, 'import numpy as np\n'), ((16990, 17017), 'numpy.asarray', 'np.asarray', (['difc_LCOL_csd_0'], {}), '(difc_LCOL_csd_0)\n', (17000, 17017), True, 'import numpy as np\n'), ((17028, 17055), 'numpy.asarray', 'np.asarray', (['difc_LCOL_csd_1'], {}), '(difc_LCOL_csd_1)\n', (17038, 17055), True, 'import numpy as np\n'), ((17359, 17386), 'numpy.asarray', 'np.asarray', (['difc_RCOR_csd_0'], {}), '(difc_RCOR_csd_0)\n', (17369, 17386), True, 'import numpy as np\n'), ((17397, 17424), 'numpy.asarray', 'np.asarray', (['difc_RCOR_csd_1'], {}), '(difc_RCOR_csd_1)\n', (17407, 17424), True, 'import numpy as np\n')]
|
import autokeras as ak
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
EPOCHS = 50
BATCH = 5
NAME = "autokeras_classification"
DATASET_PATH = "/hdd/4celebs_training_set"
def build_train_set(image_size):
def make_train_generator():
data_generator = image.ImageDataGenerator(rescale=1. / 255,
# shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
validation_split=0.15,
width_shift_range=0.2,
height_shift_range=0.2)
train_generator = data_generator.flow_from_directory(
DATASET_PATH,
target_size=(image_size, image_size),
batch_size=BATCH,
class_mode='categorical',
subset='training')
return train_generator
return tf.data.Dataset.from_generator(make_train_generator, (tf.float16, tf.float16))
def build_val_set(image_size):
def make_val_generator():
data_val_gen = image.ImageDataGenerator(rescale=1. / 255,
validation_split=0.1)
validation_generator = data_val_gen.flow_from_directory(
DATASET_PATH,
target_size=(image_size, image_size),
class_mode='categorical',
subset='validation')
return validation_generator
return tf.data.Dataset.from_generator(make_val_generator, (tf.float32, tf.float32))
def build_model():
model = ak.ImageClassifier(max_trials=100, objective="val_acc")
return model
def train(model, image_size):
tensorboard = TensorBoard(log_dir=f'output/logs/{NAME}',
write_graph=True,
# histogram_freq=5,
write_images=True,
write_grads=True,
profile_batch=3)
checkpointer = ModelCheckpoint(
filepath=f'output/weights/{NAME}_clooney.best.hdf5',
verbose=1,
save_best_only=True,
monitor='val_acc')
train_set = build_train_set(image_size)
# val_set = build_val_set(image_size)
model.fit(
train_set,
steps_per_epoch=EPOCHS * BATCH,
epochs=EPOCHS,
# validation_data=val_set,
validation_split=0.15,
verbose=2,
callbacks=[checkpointer, tensorboard])
if __name__ == '__main__':
model = build_model()
train(model, image_size=312)
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"autokeras.ImageClassifier",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.data.Dataset.from_generator",
"tensorflow.keras.callbacks.TensorBoard"
] |
[((1107, 1185), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['make_train_generator', '(tf.float16, tf.float16)'], {}), '(make_train_generator, (tf.float16, tf.float16))\n', (1137, 1185), True, 'import tensorflow as tf\n'), ((1646, 1722), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['make_val_generator', '(tf.float32, tf.float32)'], {}), '(make_val_generator, (tf.float32, tf.float32))\n', (1676, 1722), True, 'import tensorflow as tf\n'), ((1756, 1811), 'autokeras.ImageClassifier', 'ak.ImageClassifier', ([], {'max_trials': '(100)', 'objective': '"""val_acc"""'}), "(max_trials=100, objective='val_acc')\n", (1774, 1811), True, 'import autokeras as ak\n'), ((1879, 1998), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'f"""output/logs/{NAME}"""', 'write_graph': '(True)', 'write_images': '(True)', 'write_grads': '(True)', 'profile_batch': '(3)'}), "(log_dir=f'output/logs/{NAME}', write_graph=True, write_images=\n True, write_grads=True, profile_batch=3)\n", (1890, 1998), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n'), ((2184, 2307), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'f"""output/weights/{NAME}_clooney.best.hdf5"""', 'verbose': '(1)', 'save_best_only': '(True)', 'monitor': '"""val_acc"""'}), "(filepath=f'output/weights/{NAME}_clooney.best.hdf5',\n verbose=1, save_best_only=True, monitor='val_acc')\n", (2199, 2307), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n'), ((356, 517), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'zoom_range': '(0.1)', 'horizontal_flip': '(True)', 'validation_split': '(0.15)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)'}), '(rescale=1.0 / 255, zoom_range=0.1, horizontal_flip\n =True, validation_split=0.15, width_shift_range=0.2, height_shift_range=0.2\n )\n', (380, 517), False, 'from tensorflow.keras.preprocessing import image\n'), ((1272, 1337), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.1)'}), '(rescale=1.0 / 255, validation_split=0.1)\n', (1296, 1337), False, 'from tensorflow.keras.preprocessing import image\n')]
|
import asyncio
import io
import os
import sys
from actions_toolkit import core
from actions_toolkit.utils import to_command_properties, AnnotationProperties
test_env_vars = {
'my var': '',
'special char var \r\n];': '',
'my var2': '',
'my secret': '',
'special char secret \r\n];': '',
'my secret2': '',
'PATH': f'path1{os.pathsep}path2',
# Set inputs
'INPUT_MY_INPUT': 'val',
'INPUT_MISSING': '',
'INPUT_SPECIAL_CHARS_\'\t"\\': '\'\t"\\ response ',
'INPUT_MULTIPLE_SPACES_VARIABLE': 'I have multiple spaces',
'INPUT_BOOLEAN_INPUT': 'true',
'INPUT_BOOLEAN_INPUT_TRUE1': 'true',
'INPUT_BOOLEAN_INPUT_TRUE2': 'True',
'INPUT_BOOLEAN_INPUT_TRUE3': 'TRUE',
'INPUT_BOOLEAN_INPUT_FALSE1': 'false',
'INPUT_BOOLEAN_INPUT_FALSE2': 'False',
'INPUT_BOOLEAN_INPUT_FALSE3': 'FALSE',
'INPUT_WRONG_BOOLEAN_INPUT': 'wrong',
'INPUT_WITH_TRAILING_WHITESPACE': ' some val ',
'INPUT_MY_INPUT_LIST': 'val1\nval2\nval3',
# Save inputs
'STATE_TEST_1': 'state_val',
# File Commands
'GITHUB_PATH': '',
'GITHUB_ENV': ''
}
for k, v in test_env_vars.items():
os.environ[k] = v
file_path = os.path.join(os.getcwd(), 'test')
if not os.path.isdir(file_path):
os.makedirs(file_path)
def call(func, *args, **kw):
output = io.StringIO()
sys.stdout = output
func(*args, **kw)
sys.stdout = sys.__stdout__
return output.getvalue()
def create_file_command_file(command: str):
path = os.path.join(file_path, command)
os.environ[f'GITHUB_{command}'] = path
with open(path, 'a', encoding='utf-8', newline='') as fs:
fs.write('')
def verify_file_command(command: str, expected_contents: str):
path = os.path.join(file_path, command)
with open(path, 'r', encoding='utf-8', newline='') as fs:
contents = fs.read()
assert contents == expected_contents
os.unlink(path)
os.environ.pop(f'GITHUB_{command}', None)
assert call(core.export_variable, 'my var', 'var val') == f'::set-env name=my var::var val{os.linesep}'
assert call(core.export_variable, 'special char var \r\n,:',
'special val') == f'::set-env name=special char var %0D%0A%2C%3A::special val{os.linesep}'
assert os.getenv('special char var \r\n,:') == 'special val'
assert call(core.export_variable, 'my var2', 'var val\r\n') == f'::set-env name=my var2::var val%0D%0A{os.linesep}'
assert os.getenv('my var2') == 'var val\r\n'
assert call(core.export_variable, 'my var', True) == f'::set-env name=my var::true{os.linesep}'
assert call(core.export_variable, 'my var', 5) == f'::set-env name=my var::5{os.linesep}'
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', 'var val')
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}var val'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', True)
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}true'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', 5)
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}5'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
assert call(core.set_secret, 'secret val') == f'::add-mask::secret val{os.linesep}'
command = 'PATH'
create_file_command_file(command)
core.add_path('myPath')
assert os.getenv('PATH') == f'myPath{os.pathsep}path1{os.pathsep}path2'
verify_file_command(command, f'myPath{os.linesep}')
assert call(core.add_path, 'myPath') == f'::add-path::myPath{os.linesep}'
assert core.get_input('my input') == 'val'
assert core.get_input('my input', required=True) == 'val'
try:
core.get_input('missing', required=True)
except Exception as e:
assert str(e) == 'Input required and not supplied: MISSING'
else:
raise Exception('Expected raise Exception but it did not')
assert core.get_input('missing', required=False) == ''
assert core.get_input('My InPuT') == 'val'
assert core.get_input('special chars_\'\t"\\') == '\'\t"\\ response'
assert core.get_input('multiple spaces variable') == 'I have multiple spaces'
assert core.get_multiline_input('my input list') == ['val1', 'val2', 'val3']
assert core.get_input('with trailing whitespace') == 'some val'
assert core.get_input('with trailing whitespace', trim_whitespace=True) == 'some val'
assert core.get_input('with trailing whitespace', trim_whitespace=False) == ' some val '
assert core.get_boolean_input('boolean input') is True
assert core.get_boolean_input('boolean input', required=True) is True
assert core.get_boolean_input('boolean input true1') is True
assert core.get_boolean_input('boolean input true2') is True
assert core.get_boolean_input('boolean input true3') is True
assert core.get_boolean_input('boolean input false1') is False
assert core.get_boolean_input('boolean input false2') is False
assert core.get_boolean_input('boolean input false3') is False
try:
core.get_boolean_input('wrong boolean input')
except Exception as e:
assert str(e) == 'Input does not meet YAML 1.2 "Core Schema" specification: wrong boolean input\n' \
'Support boolean input list: `true | True | TRUE | false | False | FALSE`'
else:
raise Exception('Expected raise Exception but it did not')
assert call(core.set_output, 'some output', 'some value') == \
f'{os.linesep}::set-output name=some output::some value{os.linesep}'
assert call(core.set_output, 'some output', False) == f'{os.linesep}::set-output name=some output::false{os.linesep}'
assert call(core.set_output, 'some output', 1.01) == f'{os.linesep}::set-output name=some output::1.01{os.linesep}'
assert call(core.error, 'Error message') == f'::error::Error message{os.linesep}'
assert call(core.error, 'Error message\r\n\n') == f'::error::Error message%0D%0A%0A{os.linesep}'
message = 'this is my error message'
error = Exception(message)
assert call(core.error, error) == f'::error::Error: {message}{os.linesep}'
assert call(core.error, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::error title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
assert call(core.warning, 'Warning') == f'::warning::Warning{os.linesep}'
assert call(core.warning, '\r\nwarning\n') == f'::warning::%0D%0Awarning%0A{os.linesep}'
assert call(core.warning, error) == f'::warning::Error: {message}{os.linesep}'
assert call(core.warning, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::warning title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
assert call(core.notice, '\r\nnotice\n') == f'::notice::%0D%0Anotice%0A{os.linesep}'
assert call(core.notice, error) == f'::notice::Error: {message}{os.linesep}'
assert call(core.notice, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::notice title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
annotation_properties = AnnotationProperties(title='A title', file='root/test.txt', start_column=1, end_column=2,
start_line=5, end_line=5)
command_properties = to_command_properties(annotation_properties)
assert command_properties['title'] == 'A title'
assert command_properties['file'] == 'root/test.txt'
assert command_properties['col'] == 1
assert command_properties['endColumn'] == 2
assert command_properties['line'] == 5
assert command_properties['endLine'] == 5
assert command_properties.get('startColumn') is None
assert command_properties.get('startLine') is None
assert call(core.start_group, 'my-group') == f'::group::my-group{os.linesep}'
assert call(core.end_group) == f'::endgroup::{os.linesep}'
async def f():
async def in_group():
sys.stdout.write('in my group\n')
return True
result = await core.group('mygroup', in_group)
assert result is True
assert call(asyncio.run, f()) == f'::group::mygroup{os.linesep}in my group\n::endgroup::{os.linesep}'
assert call(core.debug, 'Debug') == f'::debug::Debug{os.linesep}'
assert call(core.debug, '\r\ndebug\n') == f'::debug::%0D%0Adebug%0A{os.linesep}'
assert call(core.save_state, 'state_1', 'some value') == f'::save-state name=state_1::some value{os.linesep}'
assert call(core.save_state, 'state_1', 1) == f'::save-state name=state_1::1{os.linesep}'
assert call(core.save_state, 'state_1', True) == f'::save-state name=state_1::true{os.linesep}'
assert core.get_state('TEST_1') == 'state_val'
os.environ.pop('RUNNER_DEBUG', None)
assert core.is_debug() is False
os.environ['RUNNER_DEBUG'] = '1'
assert core.is_debug() is True
os.environ.pop('RUNNER_DEBUG', None)
assert call(core.set_command_echo, True) == f'::echo::on{os.linesep}'
assert call(core.set_command_echo, False) == f'::echo::off{os.linesep}'
|
[
"sys.stdout.write",
"actions_toolkit.core.get_multiline_input",
"os.unlink",
"actions_toolkit.core.get_input",
"actions_toolkit.core.add_path",
"os.environ.pop",
"os.path.join",
"actions_toolkit.core.group",
"actions_toolkit.core.is_debug",
"actions_toolkit.utils.to_command_properties",
"actions_toolkit.core.export_variable",
"io.StringIO",
"actions_toolkit.core.get_boolean_input",
"actions_toolkit.core.get_state",
"os.getenv",
"os.makedirs",
"os.getcwd",
"os.path.isdir",
"actions_toolkit.utils.AnnotationProperties"
] |
[((2701, 2742), 'actions_toolkit.core.export_variable', 'core.export_variable', (['"""my var"""', '"""var val"""'], {}), "('my var', 'var val')\n", (2721, 2742), False, 'from actions_toolkit import core\n'), ((2993, 3029), 'actions_toolkit.core.export_variable', 'core.export_variable', (['"""my var"""', '(True)'], {}), "('my var', True)\n", (3013, 3029), False, 'from actions_toolkit import core\n'), ((3277, 3310), 'actions_toolkit.core.export_variable', 'core.export_variable', (['"""my var"""', '(5)'], {}), "('my var', 5)\n", (3297, 3310), False, 'from actions_toolkit import core\n'), ((3641, 3664), 'actions_toolkit.core.add_path', 'core.add_path', (['"""myPath"""'], {}), "('myPath')\n", (3654, 3664), False, 'from actions_toolkit import core\n'), ((7511, 7630), 'actions_toolkit.utils.AnnotationProperties', 'AnnotationProperties', ([], {'title': '"""A title"""', 'file': '"""root/test.txt"""', 'start_column': '(1)', 'end_column': '(2)', 'start_line': '(5)', 'end_line': '(5)'}), "(title='A title', file='root/test.txt', start_column=1,\n end_column=2, start_line=5, end_line=5)\n", (7531, 7630), False, 'from actions_toolkit.utils import to_command_properties, AnnotationProperties\n'), ((7693, 7737), 'actions_toolkit.utils.to_command_properties', 'to_command_properties', (['annotation_properties'], {}), '(annotation_properties)\n', (7714, 7737), False, 'from actions_toolkit.utils import to_command_properties, AnnotationProperties\n'), ((9029, 9065), 'os.environ.pop', 'os.environ.pop', (['"""RUNNER_DEBUG"""', 'None'], {}), "('RUNNER_DEBUG', None)\n", (9043, 9065), False, 'import os\n'), ((9162, 9198), 'os.environ.pop', 'os.environ.pop', (['"""RUNNER_DEBUG"""', 'None'], {}), "('RUNNER_DEBUG', None)\n", (9176, 9198), False, 'import os\n'), ((1195, 1206), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1204, 1206), False, 'import os\n'), ((1223, 1247), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1236, 1247), False, 'import os\n'), ((1253, 1275), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (1264, 1275), False, 'import os\n'), ((1320, 1333), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1331, 1333), False, 'import io\n'), ((1498, 1530), 'os.path.join', 'os.path.join', (['file_path', 'command'], {}), '(file_path, command)\n', (1510, 1530), False, 'import os\n'), ((1733, 1765), 'os.path.join', 'os.path.join', (['file_path', 'command'], {}), '(file_path, command)\n', (1745, 1765), False, 'import os\n'), ((1906, 1921), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (1915, 1921), False, 'import os\n'), ((1926, 1967), 'os.environ.pop', 'os.environ.pop', (['f"""GITHUB_{command}"""', 'None'], {}), "(f'GITHUB_{command}', None)\n", (1940, 1967), False, 'import os\n'), ((2246, 2282), 'os.getenv', 'os.getenv', (["'special char var \\r\\n,:'"], {}), "('special char var \\r\\n,:')\n", (2255, 2282), False, 'import os\n'), ((2424, 2444), 'os.getenv', 'os.getenv', (['"""my var2"""'], {}), "('my var2')\n", (2433, 2444), False, 'import os\n'), ((3672, 3689), 'os.getenv', 'os.getenv', (['"""PATH"""'], {}), "('PATH')\n", (3681, 3689), False, 'import os\n'), ((3872, 3898), 'actions_toolkit.core.get_input', 'core.get_input', (['"""my input"""'], {}), "('my input')\n", (3886, 3898), False, 'from actions_toolkit import core\n'), ((3916, 3957), 'actions_toolkit.core.get_input', 'core.get_input', (['"""my input"""'], {'required': '(True)'}), "('my input', required=True)\n", (3930, 3957), False, 'from actions_toolkit import core\n'), ((3977, 4017), 'actions_toolkit.core.get_input', 'core.get_input', (['"""missing"""'], {'required': '(True)'}), "('missing', required=True)\n", (3991, 4017), False, 'from actions_toolkit import core\n'), ((4182, 4223), 'actions_toolkit.core.get_input', 'core.get_input', (['"""missing"""'], {'required': '(False)'}), "('missing', required=False)\n", (4196, 4223), False, 'from actions_toolkit import core\n'), ((4238, 4264), 'actions_toolkit.core.get_input', 'core.get_input', (['"""My InPuT"""'], {}), "('My InPuT')\n", (4252, 4264), False, 'from actions_toolkit import core\n'), ((4282, 4321), 'actions_toolkit.core.get_input', 'core.get_input', (['"""special chars_\'\t"\\\\"""'], {}), '(\'special chars_\\\'\\t"\\\\\')\n', (4296, 4321), False, 'from actions_toolkit import core\n'), ((4352, 4394), 'actions_toolkit.core.get_input', 'core.get_input', (['"""multiple spaces variable"""'], {}), "('multiple spaces variable')\n", (4366, 4394), False, 'from actions_toolkit import core\n'), ((4431, 4472), 'actions_toolkit.core.get_multiline_input', 'core.get_multiline_input', (['"""my input list"""'], {}), "('my input list')\n", (4455, 4472), False, 'from actions_toolkit import core\n'), ((4509, 4551), 'actions_toolkit.core.get_input', 'core.get_input', (['"""with trailing whitespace"""'], {}), "('with trailing whitespace')\n", (4523, 4551), False, 'from actions_toolkit import core\n'), ((4574, 4638), 'actions_toolkit.core.get_input', 'core.get_input', (['"""with trailing whitespace"""'], {'trim_whitespace': '(True)'}), "('with trailing whitespace', trim_whitespace=True)\n", (4588, 4638), False, 'from actions_toolkit import core\n'), ((4661, 4726), 'actions_toolkit.core.get_input', 'core.get_input', (['"""with trailing whitespace"""'], {'trim_whitespace': '(False)'}), "('with trailing whitespace', trim_whitespace=False)\n", (4675, 4726), False, 'from actions_toolkit import core\n'), ((4753, 4792), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input"""'], {}), "('boolean input')\n", (4775, 4792), False, 'from actions_toolkit import core\n'), ((4809, 4863), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input"""'], {'required': '(True)'}), "('boolean input', required=True)\n", (4831, 4863), False, 'from actions_toolkit import core\n'), ((4880, 4925), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input true1"""'], {}), "('boolean input true1')\n", (4902, 4925), False, 'from actions_toolkit import core\n'), ((4941, 4986), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input true2"""'], {}), "('boolean input true2')\n", (4963, 4986), False, 'from actions_toolkit import core\n'), ((5002, 5047), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input true3"""'], {}), "('boolean input true3')\n", (5024, 5047), False, 'from actions_toolkit import core\n'), ((5063, 5109), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input false1"""'], {}), "('boolean input false1')\n", (5085, 5109), False, 'from actions_toolkit import core\n'), ((5126, 5172), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input false2"""'], {}), "('boolean input false2')\n", (5148, 5172), False, 'from actions_toolkit import core\n'), ((5189, 5235), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""boolean input false3"""'], {}), "('boolean input false3')\n", (5211, 5235), False, 'from actions_toolkit import core\n'), ((5255, 5300), 'actions_toolkit.core.get_boolean_input', 'core.get_boolean_input', (['"""wrong boolean input"""'], {}), "('wrong boolean input')\n", (5277, 5300), False, 'from actions_toolkit import core\n'), ((8988, 9012), 'actions_toolkit.core.get_state', 'core.get_state', (['"""TEST_1"""'], {}), "('TEST_1')\n", (9002, 9012), False, 'from actions_toolkit import core\n'), ((9073, 9088), 'actions_toolkit.core.is_debug', 'core.is_debug', ([], {}), '()\n', (9086, 9088), False, 'from actions_toolkit import core\n'), ((9138, 9153), 'actions_toolkit.core.is_debug', 'core.is_debug', ([], {}), '()\n', (9151, 9153), False, 'from actions_toolkit import core\n'), ((8296, 8329), 'sys.stdout.write', 'sys.stdout.write', (['"""in my group\n"""'], {}), "('in my group\\n')\n", (8312, 8329), False, 'import sys\n'), ((8370, 8401), 'actions_toolkit.core.group', 'core.group', (['"""mygroup"""', 'in_group'], {}), "('mygroup', in_group)\n", (8380, 8401), False, 'from actions_toolkit import core\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-17 19:38
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('buildinginfos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ApplicationRegistered',
fields=[
('application_id', models.AutoField(primary_key=True, serialize=False)),
('app_name', models.CharField(blank=True, max_length=50, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('app_folder', models.CharField(max_length=200)),
('registered_time', models.DateTimeField()),
],
options={
'db_table': 'application_registered',
},
),
migrations.CreateModel(
name='ApplicationRunning',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField()),
('app_agent_id', models.CharField(max_length=50)),
('status', models.CharField(blank=True, max_length=20, null=True)),
('app_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('app_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bemoss_applications.ApplicationRegistered')),
('building', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.BuildingInfo')),
('zone', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.ZoneInfo')),
],
options={
'db_table': 'application_running',
},
),
]
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((512, 563), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (528, 563), False, 'from django.db import migrations, models\n'), ((595, 649), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (611, 649), False, 'from django.db import migrations, models\n'), ((684, 740), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1000)', 'null': '(True)'}), '(blank=True, max_length=1000, null=True)\n', (700, 740), False, 'from django.db import migrations, models\n'), ((774, 806), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (790, 806), False, 'from django.db import migrations, models\n'), ((845, 867), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (865, 867), False, 'from django.db import migrations, models\n'), ((1102, 1195), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1118, 1195), False, 'from django.db import migrations, models\n'), ((1225, 1247), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1245, 1247), False, 'from django.db import migrations, models\n'), ((1283, 1314), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1299, 1314), False, 'from django.db import migrations, models\n'), ((1344, 1398), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (1360, 1398), False, 'from django.db import migrations, models\n'), ((1520, 1635), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""bemoss_applications.ApplicationRegistered"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'bemoss_applications.ApplicationRegistered')\n", (1537, 1635), False, 'from django.db import migrations, models\n'), ((1662, 1772), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""buildinginfos.BuildingInfo"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='buildinginfos.BuildingInfo')\n", (1679, 1772), False, 'from django.db import migrations, models\n'), ((1796, 1902), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""buildinginfos.ZoneInfo"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='buildinginfos.ZoneInfo')\n", (1813, 1902), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import argparse
from lelantos import tomographic_objects
parser = argparse.ArgumentParser(description='Plot the QSO catalog')
parser.add_argument('-i',
'--input', help='Input QSO catalog',required=True)
parser.add_argument('-bins', help='Output QSO catalog', default=100,required=False)
parser.add_argument('-zname',
'--redshift-name', help='Pixel file name', default="Z",required=False)
args = vars(parser.parse_args())
catalog = tomographic_objects.QSOCatalog.init_from_fits(args["input"],redshift_name=args["redshift_name"])
RA = catalog.coord[:,0]
RA[RA>180] = RA[RA>180] - 360
DEC = catalog.coord[:,1]
plt.hist2d(RA,DEC,int(args["bins"]))
plt.colorbar()
plt.show()
|
[
"lelantos.tomographic_objects.QSOCatalog.init_from_fits",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.colorbar"
] |
[((122, 181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot the QSO catalog"""'}), "(description='Plot the QSO catalog')\n", (145, 181), False, 'import argparse\n'), ((528, 630), 'lelantos.tomographic_objects.QSOCatalog.init_from_fits', 'tomographic_objects.QSOCatalog.init_from_fits', (["args['input']"], {'redshift_name': "args['redshift_name']"}), "(args['input'], redshift_name=\n args['redshift_name'])\n", (573, 630), False, 'from lelantos import tomographic_objects\n'), ((744, 758), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (756, 758), True, 'import matplotlib.pyplot as plt\n'), ((759, 769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (767, 769), True, 'import matplotlib.pyplot as plt\n')]
|
import unittest
from streamlink.plugins.dplay import Dplay
class TestPluginDplay(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.dplay.dk/videoer/studie-5/season-2-episode-1',
'https://www.dplay.no/videoer/danskebaten/sesong-1-episode-1',
'https://www.dplay.se/videos/breaking-news/breaking-news-med-filip-fredrik-750',
]
for url in should_match:
self.assertTrue(Dplay.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(Dplay.can_handle_url(url))
|
[
"streamlink.plugins.dplay.Dplay.can_handle_url"
] |
[((475, 500), 'streamlink.plugins.dplay.Dplay.can_handle_url', 'Dplay.can_handle_url', (['url'], {}), '(url)\n', (495, 500), False, 'from streamlink.plugins.dplay import Dplay\n'), ((698, 723), 'streamlink.plugins.dplay.Dplay.can_handle_url', 'Dplay.can_handle_url', (['url'], {}), '(url)\n', (718, 723), False, 'from streamlink.plugins.dplay import Dplay\n')]
|
"""
SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2020 Deutsches Elektronen-Synchrotron DESY.
See LICENSE.txt for license details.
"""
import unittest
from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase
class TestString(unittest.TestCase):
def test_null(self):
nul = StringField()
ser = nul.serialize()
self.assertEqual(ser, b'\xc0')
ser += b'remainder'
tmp = StringField()
self.assertEqual(tmp.deserialize(ser), b'remainder')
self.assertEqual(tmp.to_dict(), '')
def test_plain(self):
testStr = 'Hello world'
tmp = StringField(testStr, format=StringFmt.ASCII_8BIT)
self.assertEqual(tmp.bit_size(), 12 * 8)
ser = tmp.serialize()
self.assertEqual(ser, b'\xcbHello world')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testStr)
def test_bcd_plus(self):
testStr = '123.45-67 890'
tmp = StringField(testStr, format=StringFmt.BCD_PLUS)
self.assertEqual(tmp.bit_size(), 8 * 8)
ser = tmp.serialize()
self.assertEqual(ser, b'\x47\x12\x3c\x45\xb6\x7a\x89\x0a')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testStr + ' ') # append padding space
def test_ascii_6bit(self):
testStr = 'IPMI Hello world'
tmp = StringField(testStr, format=StringFmt.ASCII_6BIT)
self.assertEqual(tmp.bit_size(), 13 * 8)
ser = tmp.serialize()
self.assertEqual(ser,
b'\x8c\x29\xdc\xa6\x00Z\xb2\xec\x0b\xdc\xaf\xcc\x92')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), 'IPMI HELLO WORLD')
class ArrayTest(FruAreaBase):
_schema = [
('first_byte', FixedField, 'u8', {'default': 0}),
('second_byte', FixedField, 'u8', {'default': 0}),
('bits1', FixedField, 'u4', {'default': 0}),
('bits2', FixedField, 'u4', {'default': 0}),
]
class TestMisc(unittest.TestCase):
def test_uuid(self):
testUid = 'cafebabe-1234-5678-d00f-deadbeef4711'
tmp = GuidField(testUid)
self.assertEqual(tmp.bit_size(), 128)
ser = tmp.serialize()
self.assertEqual(ser, b'\xbe\xba\xfe\xca4\x12xV\xd0\x0f\xde\xad\xbe\xefG\x11')
ser += b'remainder'
tmp2 = GuidField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testUid)
def test_array(self):
tmp = ArrayField(ArrayTest, initdict=[
{ 'first_byte': 1, 'second_byte': 2, 'bits1': 3, 'bits2': 4, },
{ 'first_byte': 5, 'second_byte': 6, 'bits1': 7, 'bits2': 8, },
{ 'first_byte': 9, 'second_byte': 10, 'bits1': 11, 'bits2': 12, },
])
self.assertEqual(tmp.size_total(), 3 * 3)
ser = tmp.serialize()
self.assertEqual(ser, b'\x01\x024\x05\x06x\t\n\xbc')
tmp2 = ArrayField(ArrayTest)
tmp2.deserialize(ser)
self.assertEqual(tmp.__repr__(), tmp2.__repr__())
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"frugy.types.StringField",
"frugy.types.ArrayField",
"frugy.types.GuidField"
] |
[((3340, 3355), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3353, 3355), False, 'import unittest\n'), ((332, 345), 'frugy.types.StringField', 'StringField', ([], {}), '()\n', (343, 345), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((457, 470), 'frugy.types.StringField', 'StringField', ([], {}), '()\n', (468, 470), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((649, 698), 'frugy.types.StringField', 'StringField', (['testStr'], {'format': 'StringFmt.ASCII_8BIT'}), '(testStr, format=StringFmt.ASCII_8BIT)\n', (660, 698), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((871, 884), 'frugy.types.StringField', 'StringField', ([], {}), '()\n', (882, 884), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((1075, 1122), 'frugy.types.StringField', 'StringField', (['testStr'], {'format': 'StringFmt.BCD_PLUS'}), '(testStr, format=StringFmt.BCD_PLUS)\n', (1086, 1122), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((1311, 1324), 'frugy.types.StringField', 'StringField', ([], {}), '()\n', (1322, 1324), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((1550, 1599), 'frugy.types.StringField', 'StringField', (['testStr'], {'format': 'StringFmt.ASCII_6BIT'}), '(testStr, format=StringFmt.ASCII_6BIT)\n', (1561, 1599), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((1831, 1844), 'frugy.types.StringField', 'StringField', ([], {}), '()\n', (1842, 1844), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((2377, 2395), 'frugy.types.GuidField', 'GuidField', (['testUid'], {}), '(testUid)\n', (2386, 2395), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((2602, 2613), 'frugy.types.GuidField', 'GuidField', ([], {}), '()\n', (2611, 2613), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((2767, 2993), 'frugy.types.ArrayField', 'ArrayField', (['ArrayTest'], {'initdict': "[{'first_byte': 1, 'second_byte': 2, 'bits1': 3, 'bits2': 4}, {'first_byte':\n 5, 'second_byte': 6, 'bits1': 7, 'bits2': 8}, {'first_byte': 9,\n 'second_byte': 10, 'bits1': 11, 'bits2': 12}]"}), "(ArrayTest, initdict=[{'first_byte': 1, 'second_byte': 2, 'bits1':\n 3, 'bits2': 4}, {'first_byte': 5, 'second_byte': 6, 'bits1': 7, 'bits2':\n 8}, {'first_byte': 9, 'second_byte': 10, 'bits1': 11, 'bits2': 12}])\n", (2777, 2993), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n'), ((3198, 3219), 'frugy.types.ArrayField', 'ArrayField', (['ArrayTest'], {}), '(ArrayTest)\n', (3208, 3219), False, 'from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase\n')]
|
import cv2
import numpy as np
from utils.test_images_generator.generator_config import AVAILABLE_SHAPES_DICT
from utils.test_images_generator.generator_utils import generate_random_color, generate_random_image_points
def generate_random_image(width, height):
# ToDo generate white image
# https://numpy.org/doc/1.18/reference/generated/numpy.full.html
generated_image = np.zeros((height, width, 3), dtype=np.uint8)
# ToDo choose random number of shapes from AVAILABLE_SHAPES_DICT
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.randint.html
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.choice.html
chosen_shapes = []
for shape in chosen_shapes:
if shape == AVAILABLE_SHAPES_DICT['LINE']:
_draw_random_line(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['TRIANGLE']:
_draw_random_triangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['RECTANGLE']:
_draw_random_rectangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['CIRCLE']:
_draw_random_circle(generated_image)
return generated_image
def _draw_random_line(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_triangle(generated_image):
# ToDo draw random triangle (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/3.1.0/dc/da5/tutorial_py_drawing_functions.html
# https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga3069baf93b51565e386c8e591f8418e6
# format for triangle: reshape((-1, 1, 2)
# https://numpy.org/doc/1.18/reference/generated/numpy.reshape.html
return
def _draw_random_rectangle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_circle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
|
[
"numpy.zeros"
] |
[((385, 429), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (393, 429), True, 'import numpy as np\n')]
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
import swapper
from factory import SubFactory
from factory.django import DjangoModelFactory
from pytz import utc
from accelerator.tests.factories.application_type_factory import (
ApplicationTypeFactory
)
from accelerator.tests.factories.program_cycle_factory import (
ProgramCycleFactory
)
from accelerator.tests.factories.startup_factory import StartupFactory
from accelerator_abstract.models.base_application import (
INCOMPLETE_APP_STATUS,
)
Application = swapper.load_model('accelerator', 'Application')
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
cycle = SubFactory(ProgramCycleFactory)
startup = SubFactory(StartupFactory)
application_type = SubFactory(ApplicationTypeFactory)
application_status = INCOMPLETE_APP_STATUS
submission_datetime = utc.localize(datetime.now() + timedelta(-2))
|
[
"swapper.load_model",
"factory.SubFactory",
"datetime.datetime.now",
"datetime.timedelta"
] |
[((626, 674), 'swapper.load_model', 'swapper.load_model', (['"""accelerator"""', '"""Application"""'], {}), "('accelerator', 'Application')\n", (644, 674), False, 'import swapper\n'), ((780, 811), 'factory.SubFactory', 'SubFactory', (['ProgramCycleFactory'], {}), '(ProgramCycleFactory)\n', (790, 811), False, 'from factory import SubFactory\n'), ((826, 852), 'factory.SubFactory', 'SubFactory', (['StartupFactory'], {}), '(StartupFactory)\n', (836, 852), False, 'from factory import SubFactory\n'), ((876, 910), 'factory.SubFactory', 'SubFactory', (['ApplicationTypeFactory'], {}), '(ApplicationTypeFactory)\n', (886, 910), False, 'from factory import SubFactory\n'), ((997, 1011), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1009, 1011), False, 'from datetime import datetime, timedelta\n'), ((1014, 1027), 'datetime.timedelta', 'timedelta', (['(-2)'], {}), '(-2)\n', (1023, 1027), False, 'from datetime import datetime, timedelta\n')]
|
"""
WSGI config for etd_drop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "etd_drop.settings")
#Attempt to set environment variables from DOTENV, if we've been provided
#a path
DOTENV_path = os.environ.get('DOTENV', None)
if DOTENV_path is not None:
import dotenv
dotenv.read_dotenv(DOTENV_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"os.environ.get",
"os.environ.setdefault",
"django.core.wsgi.get_wsgi_application",
"dotenv.read_dotenv"
] |
[((234, 302), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""etd_drop.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'etd_drop.settings')\n", (255, 302), False, 'import os\n'), ((401, 431), 'os.environ.get', 'os.environ.get', (['"""DOTENV"""', 'None'], {}), "('DOTENV', None)\n", (415, 431), False, 'import os\n'), ((580, 602), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (600, 602), False, 'from django.core.wsgi import get_wsgi_application\n'), ((482, 513), 'dotenv.read_dotenv', 'dotenv.read_dotenv', (['DOTENV_path'], {}), '(DOTENV_path)\n', (500, 513), False, 'import dotenv\n')]
|
"""
Copyright 2015, University of Freiburg.
<NAME> <<EMAIL>>
"""
import re
def normalize_entity_name(name):
name = name.lower()
name = name.replace('!', '')
name = name.replace('.', '')
name = name.replace(',', '')
name = name.replace('-', '')
name = name.replace('_', '')
name = name.replace(' ', '')
name = name.replace('\'', '')
return name
def read_abbreviations(abbreviations_file):
'''
Return a set of abbreviations.
:param abbreviations_file:
:return:
'''
abbreviations = set()
with open(abbreviations_file, 'r') as f:
for line in f:
abbreviations.add(line.strip().decode('utf-8').lower())
return abbreviations
def remove_abbreviations_from_entity_name(entity_name,
abbreviations):
tokens = entity_name.lower().split(' ')
non_abbr_tokens = [t for t in tokens if t not in abbreviations]
return ' '.join(non_abbr_tokens)
def remove_prefixes_from_name(name):
if name.startswith('the'):
name = name[3:]
return name
def remove_suffixes_from_name(name):
if '#' in name or '(' in name:
name = remove_number_suffix(name)
name = remove_bracket_suffix(name)
return name
def remove_number_suffix(name):
res = re.match(r'.*( #[0-9]+)$', name)
if res:
name = name[:res.start(1)]
return name
else:
return name
def remove_bracket_suffix(name):
res = re.match(r'.*( \([^\(\)]+\))$', name)
if res:
name = name[:res.start(1)]
return name
else:
return name
|
[
"re.match"
] |
[((1304, 1335), 're.match', 're.match', (['""".*( #[0-9]+)$"""', 'name'], {}), "('.*( #[0-9]+)$', name)\n", (1312, 1335), False, 'import re\n'), ((1479, 1519), 're.match', 're.match', (['""".*( \\\\([^\\\\(\\\\)]+\\\\))$"""', 'name'], {}), "('.*( \\\\([^\\\\(\\\\)]+\\\\))$', name)\n", (1487, 1519), False, 'import re\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 19 11:30:56 2022
@author: adowa
"""
import numpy as np
import tensorflow as tf
from utils import (build_logistic_regression,
compile_logistic_regression)
from tensorflow.keras import regularizers
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
if __name__ == "__main__":
n_epochs = int(1e3) # just a large number
print_train = True
# clear memory states
tf.keras.backend.clear_session()
# generate random test data
X,y = make_classification(n_samples = 150,
n_features = 100,
n_informative = 3,
n_redundant = 10,
n_classes = 2,
n_clusters_per_class = 4,
flip_y = .01,
class_sep = .75,# how easy to separate the two classes
shuffle = True,
random_state = 12345,
)
# one-hot encoding for softmax
y = y.reshape((-1,1))
y = np.hstack([y,1-y])
# split the data into train, validation, and test
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = .1,random_state = 12345)
X_train,X_valid,y_train,y_valid = train_test_split(X_train,y_train,test_size = .1,random_state = 12345)
# add some 0.5 labeled data - don't use too much
X_noise = np.random.normal(X_train.mean(),X_train.std(),size = (int(X_train.shape[0]/4),100))
y_noise = np.array([[0.5,0.5]] * int(X_train.shape[0]/4))
X_train = np.concatenate([X_train,X_noise])
y_train = np.concatenate([y_train,y_noise])
# X_noise = np.random.normal(X_test.mean(),X_test.std(),size = (int(X_test.shape[0]/2),100))
# y_noise = np.array([[0.5,0.5]] * int(X_test.shape[0]/2))
# X_test = np.concatenate([X_test,X_noise])
# y_test = np.concatenate([y_test,y_noise])
# build the model
tf.random.set_seed(12345)
logistic_regression = build_logistic_regression(
input_size = X_train.shape[1],
output_size = 2,
special = False,
kernel_regularizer = regularizers.L2(l2 = 1e-3),
activity_regularizer = regularizers.L1(l1 = 1e-3),
print_model = True,
)
# compile the model
logistic_regression,callbacks = compile_logistic_regression(
logistic_regression,
model_name = 'temp.h5',
optimizer = None,
loss_function = None,
metric = None,
callbacks = None,
learning_rate = 1e-3,
tol = 1e-4,
patience = 10,
)
# train and validate the model
logistic_regression.fit(
X_train,
y_train,
batch_size = 4,
epochs = n_epochs,
verbose = print_train,
callbacks = callbacks,
validation_data = (X_valid,y_valid),
shuffle = True,
class_weight = None,# tf has this but I don't think it is the same as sklearn
)
y_pred = logistic_regression.predict(X_test)
print(roc_auc_score(y_test,y_pred,))
|
[
"tensorflow.random.set_seed",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.backend.clear_session",
"sklearn.datasets.make_classification",
"tensorflow.keras.regularizers.L1",
"numpy.hstack",
"sklearn.metrics.roc_auc_score",
"tensorflow.keras.regularizers.L2",
"numpy.concatenate",
"utils.compile_logistic_regression"
] |
[((557, 589), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (587, 589), True, 'import tensorflow as tf\n'), ((632, 823), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(150)', 'n_features': '(100)', 'n_informative': '(3)', 'n_redundant': '(10)', 'n_classes': '(2)', 'n_clusters_per_class': '(4)', 'flip_y': '(0.01)', 'class_sep': '(0.75)', 'shuffle': '(True)', 'random_state': '(12345)'}), '(n_samples=150, n_features=100, n_informative=3,\n n_redundant=10, n_classes=2, n_clusters_per_class=4, flip_y=0.01,\n class_sep=0.75, shuffle=True, random_state=12345)\n', (651, 823), False, 'from sklearn.datasets import make_classification\n'), ((1347, 1368), 'numpy.hstack', 'np.hstack', (['[y, 1 - y]'], {}), '([y, 1 - y])\n', (1356, 1368), True, 'import numpy as np\n'), ((1458, 1515), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(12345)'}), '(X, y, test_size=0.1, random_state=12345)\n', (1474, 1515), False, 'from sklearn.model_selection import train_test_split\n'), ((1554, 1623), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.1)', 'random_state': '(12345)'}), '(X_train, y_train, test_size=0.1, random_state=12345)\n', (1570, 1623), False, 'from sklearn.model_selection import train_test_split\n'), ((1851, 1885), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_noise]'], {}), '([X_train, X_noise])\n', (1865, 1885), True, 'import numpy as np\n'), ((1899, 1933), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_noise]'], {}), '([y_train, y_noise])\n', (1913, 1933), True, 'import numpy as np\n'), ((2222, 2247), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(12345)'], {}), '(12345)\n', (2240, 2247), True, 'import tensorflow as tf\n'), ((2805, 2994), 'utils.compile_logistic_regression', 'compile_logistic_regression', (['logistic_regression'], {'model_name': '"""temp.h5"""', 'optimizer': 'None', 'loss_function': 'None', 'metric': 'None', 'callbacks': 'None', 'learning_rate': '(0.001)', 'tol': '(0.0001)', 'patience': '(10)'}), "(logistic_regression, model_name='temp.h5',\n optimizer=None, loss_function=None, metric=None, callbacks=None,\n learning_rate=0.001, tol=0.0001, patience=10)\n", (2832, 2994), False, 'from utils import build_logistic_regression, compile_logistic_regression\n'), ((4086, 4115), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4099, 4115), False, 'from sklearn.metrics import roc_auc_score\n'), ((2545, 2570), 'tensorflow.keras.regularizers.L2', 'regularizers.L2', ([], {'l2': '(0.001)'}), '(l2=0.001)\n', (2560, 2570), False, 'from tensorflow.keras import regularizers\n'), ((2627, 2652), 'tensorflow.keras.regularizers.L1', 'regularizers.L1', ([], {'l1': '(0.001)'}), '(l1=0.001)\n', (2642, 2652), False, 'from tensorflow.keras import regularizers\n')]
|
import datetime
mynow = datetime.datetime.now()
print("My datetime is " , mynow)
mynumber = 10
mytext = "Hello"
print(mynumber, mytext)
x = 10
y = "10"
z = 10.1
sum1 = x+x
sum2 = y+y
print(sum1 , sum2)
print(type(x), type(y), type(z))
## List Type
grade = [9.5,8.5,6.45]
## range - We can use ragne to create list automatically
test_range = list(range(1,10))
print(test_range)
test_range_by_3 = list(range(1,20,2))
print(test_range_by_3)
rainfall = [10.0,10,'Test',[1,2,3,4]]
print(rainfall[2])
######### use dir() for help(str.upper) ## use q for quit help
## built in function dir(__builtins__)
grade_1 = [9.5,8.5,6.45,21]
grade_1_sum = sum(grade_1)
print(grade_1_sum)
grade_1_len = len(grade_1)
print(grade_1_len)
grade_avg = grade_1_sum/grade_1_len
print(grade_avg)
## dict
dict_grade = {"Kar":10,"Tes":10,"Vis":10}
dict_grade_sum = sum(dict_grade.values())
dict_grade_len = len(dict_grade.values())
dict_grade_avg = dict_grade_sum/dict_grade_len
print(dict_grade_avg)
## tuple has () and cannot add any new valuse ...
day_temperatures = {'morning': (1.1 , 2.2, 3.4), 'noon': (2.3, 4.5, 3.1), 'evening': (2.4, 3.5, 6.5)}
|
[
"datetime.datetime.now"
] |
[((25, 48), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (46, 48), False, 'import datetime\n')]
|
#!/usr/bin/env python3
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
import paramiko
'''
config-agent: {}
nsr_name: ccore_testbed_nsd
parameter: {}
vnfr:
1:
connection_point:
- ip_address: 172.16.58.3
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.180
mgmt_port: 0
name: NS1__homesteadprov_vnfd__1
vdur:
- id: 723aff17-7e10-423f-8780-28b6287608d2
management_ip: 10.66.113.180
name: iovdu_0
vm_management_ip: 10.0.113.16
2:
connection_point:
- ip_address: 192.168.127.12
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.178
mgmt_port: 0
name: NS1__homesteadprov_vnfd__2
vdur:
- id: 10684c1d-13fc-4164-a6d0-381ac725f85c
management_ip: 10.66.113.178
name: iovdu_0
vm_management_ip: 10.0.113.15
3:
connection_point:
- ip_address: 172.16.31.10
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.181
mgmt_port: 0
name: NS1__homesteadprov_vnfd__3
vdur:
- id: 84c3f536-a56a-4040-ac39-152574397300
management_ip: 10.66.113.181
name: iovdu_0
vm_management_ip: 10.0.113.18
4:
connection_point:
- ip_address: 172.16.17.32
name: sipp_vnfd/cp0
mgmt_ip_address: 10.66.113.177
mgmt_port: 2022
name: NS1__sipp_vnfd__4
vdur:
- id: 85035b3b-14b5-4f61-a943-66b0e1ef858a
management_ip: 10.66.113.177
name: iovdu
vm_management_ip: 10.0.113.14
5:
mgmt_ip_address: 10.66.113.179
mgmt_port: 0
name: NS1__dnsserver_vnfd__5
vdur:
- id: 4f365a48-49b5-44c0-b38d-50113ddc7fa4
management_ip: 10.66.113.179
name: iovdu_0
vm_management_ip: 10.0.113.17
6:
connection_point:
- ip_address: 172.16.31.10
name: sprout_vnfd/sigport
mgmt_ip_address: 10.66.113.182
mgmt_port: 0
name: NS1__sprout_vnfd__6
vdur:
- id: 604895c1-a7fe-4341-90d6-9a1c5b56dabd
management_ip: 10.66.113.182
name: iovdu_0
vm_management_ip: 10.0.113.19
vnfr_name: NS1__dnsserver_vnfd__5
'''
class ConfigurationError(Exception):
pass
def copy_file_ssh_sftp_data(server, username, dirname, filename, data):
sshclient = paramiko.SSHClient()
sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sshclient.load_system_host_keys(filename="/dev/null")
sshclient.connect(server, username=username, password="<PASSWORD>")
sftpclient = sshclient.open_sftp()
filehdl = sftpclient.open(dirname + '/' + filename, 'w')
filehdl.write(data)
filehdl.close()
sshclient.close()
'''
script to configure DNS server
'''
def configure_dns(logger, run_dir, vnf_mgmt_ip, vm_mgmt_ip):
#Add file to DNS server config
file_str = '''
# Static DNS for IMS solution
local-zone: "test.com." static
# Management A records
# A records for individual Clearwater nodes
# A record load-balancing
# S-CSCF cluster
# I-CSCF cluster
# Reverse lookups for individual nodes
'''.encode('ascii')
copy_file_ssh_sftp_data(vnf_mgmt_ip, 'fedora', '/tmp/', 'test.com.conf', file_str)
sh_file = "{}/configure_dnsserver-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating DNS server script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "fedora"
set pw "fedora"
set success 0
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@{vnf_mgmt_ip}
set spid $spawn_id
set timeout 60
expect -i $spid \
"*?assword:" {{
exp_send -i $spid "$pw\r"
if {{ $success == 0 }} {{
incr success -1
exp_continue
}}
}} "]$ " {{
set success 1
}} "yes/no" {{
exp_send -i $spid "yes\r"
exp_continue
}} timeout {{
set success -1
}}
send "sudo su\r"
expect "]# "
send "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf\r"
expect "]# "
sleep 5
send "yum install -y unbound\r"
expect "]# "
send "sed -iE 's/# interface: 0\.0\.0\.0/interface: 0\.0\.0\.0/' /etc/unbound/unbound.conf\r"
expect "]# "
send "sed -iE 's/# access-control: 0\.0\.0\.0\\/0.*/access-control: 0\.0\.0\.0\\/0 allow/' /etc/unbound/unbound.conf\r"
expect "]# "
send "sed -iE 's/interface-automatic: no/interface-automatic: yes/' /etc/unbound/unbound.conf\r"
expect "]# "
send "cp /tmp/test.com.conf /etc/unbound/local.d/test.com.conf\r"
expect "]# "
exp_close -i $spid
'''.format(vnf_mgmt_ip=vnf_mgmt_ip, vm_mgmt_ip=vm_mgmt_ip))
os.chmod(sh_file, stat.S_IRWXU)
cmd = "{sh_file}".format(sh_file=sh_file)
logger.debug("Executing shell cmd : %s", cmd)
rc = subprocess.call(cmd, shell=True)
if rc != 0:
raise ConfigurationError("Configuration of {} failed: {}".format(vnf_mgmt_ip, rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/dnsserver_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
def find_vnfr(vnfr_dict, name):
try:
for k, v in vnfr_dict.items():
if v['name'] == name:
return v
except KeyError:
logger.warn("Could not find vnfr for name : %s", name)
def find_cp_ip(vnfr, cp_name):
for cp in vnfr['connection_point']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", cp_name)
# This is temporary. All this data should come from VLR
def get_ipv4_subnet(vnfr, cp_name):
for cp in vnfr['connection_point']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
nw = cp['ip_address'].split(".")
subnet = nw[0]+"."+nw[1]+"."+nw[2]+".0/24"
return subnet
raise ValueError("Could not find vnfd %s connection point %s", cp_name)
def find_vnfr_mgmt_ip(vnfr):
#return vnfr['mgmt_interface']['ip_address']
return vnfr['mgmt_ip_address']
def find_vdur_mgmt_ip(vnfr):
return vnfr['vdur'][0]['vm_management_ip']
def find_param_value(param_list, input_param):
for item in param_list:
logger.debug("Parameter: %s", format(item))
if item['name'] == input_param:
return item['value']
dns_vnfr = find_vnfr(yaml_cfg['vnfr'], yaml_cfg['vnfr_name'])
dns_vnf_mgmt_ip = find_vnfr_mgmt_ip(dns_vnfr)
dns_vm_mgmt_ip = find_vdur_mgmt_ip(dns_vnfr)
logger.debug("Sleeping for 1 min while we wait for VNFs to boot up ..")
time.sleep(60)
logger.debug("Configuring DNS server VNF..")
configure_dns(logger, run_dir, dns_vnf_mgmt_ip, dns_vm_mgmt_ip)
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
|
[
"yaml.load",
"os.chmod",
"paramiko.SSHClient",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"logging.StreamHandler",
"os.path.exists",
"time.strftime",
"time.sleep",
"logging.Formatter",
"subprocess.call",
"os.path.join",
"paramiko.AutoAddPolicy",
"logging.getLogger",
"argparse.FileType"
] |
[((2261, 2281), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (2279, 2281), False, 'import paramiko\n'), ((4834, 4865), 'os.chmod', 'os.chmod', (['sh_file', 'stat.S_IRWXU'], {}), '(sh_file, stat.S_IRWXU)\n', (4842, 4865), False, 'import os\n'), ((4971, 5003), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (4986, 5003), False, 'import subprocess\n'), ((2324, 2348), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (2346, 2348), False, 'import paramiko\n'), ((3221, 3250), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (3234, 3250), False, 'import time\n'), ((5168, 5193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5191, 5193), False, 'import argparse\n'), ((5405, 5461), 'os.path.join', 'os.path.join', (["os.environ['RIFT_INSTALL']", '"""var/run/rift"""'], {}), "(os.environ['RIFT_INSTALL'], 'var/run/rift')\n", (5417, 5461), False, 'import os\n'), ((5638, 5697), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'level': 'logging.DEBUG'}), '(filename=log_file, level=logging.DEBUG)\n', (5657, 5697), False, 'import logging\n'), ((5715, 5734), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5732, 5734), False, 'import logging\n'), ((5749, 5772), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5770, 5772), False, 'import logging\n'), ((5964, 6037), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (5981, 6037), False, 'import logging\n'), ((6317, 6336), 'yaml.load', 'yaml.load', (['yaml_str'], {}), '(yaml_str)\n', (6326, 6336), False, 'import yaml\n'), ((8248, 8262), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (8258, 8262), False, 'import time\n'), ((5477, 5500), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (5491, 5500), False, 'import os\n'), ((5514, 5534), 'os.makedirs', 'os.makedirs', (['run_dir'], {}), '(run_dir)\n', (5525, 5534), False, 'import os\n'), ((5599, 5628), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (5612, 5628), False, 'import time\n'), ((5244, 5266), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (5261, 5266), False, 'import argparse\n')]
|
import scipy
import scipy.sparse.csgraph
import wall_generation, mesh
import mesh_operations
import utils
import triangulation, filters
from mesh_utilities import SurfaceSampler, tubeRemesh
import numpy as np
def meshComponents(m, cutEdges):
"""
Get the connected components of triangles of a mesh cut along the edges `cutEdges`.
Parameters
----------
m
The mesh to split
cutEdges
The edges (vertex index pairs) splitting the mesh up into disconnected regions
Returns
-------
ncomponents
Number of connected components
components
The component index for each mesh triangle.
"""
cutEdgeSet = set([(min(fs), max(fs)) for fs in cutEdges])
tri_sets = [set(t) for t in m.triangles()]
# Build dual graph, excluding dual edges that cross the fused segments.
def includeDualEdge(u, v):
common_vertices = tri_sets[u] & tri_sets[v]
return (len(common_vertices) == 2) and ((min(common_vertices), max(common_vertices)) not in cutEdgeSet)
dual_edges = [(u, v) for u in range(len(tri_sets))
for v in m.trisAdjTri(u)
if includeDualEdge(u, v)]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
return scipy.sparse.csgraph.connected_components(adj)
def wallMeshComponents(sheet, distinctTubeComponents = False):
"""
Get the connected wall components of a sheet's mesh (assigning the tubes "component" -1 by default or components -1, -2, ... if distinctTubeComponents is True).
"""
m = sheet.mesh()
nt = m.numTris()
iwt = np.array([sheet.isWallTri(ti) for ti in range(nt)], dtype=np.bool)
dual_edges = [(u, v) for u in range(nt)
for v in m.trisAdjTri(u)
if iwt[u] == iwt[v]]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
numComponents, components = scipy.sparse.csgraph.connected_components(adj)
wallLabels = components[iwt].copy()
renumber = np.empty(numComponents, dtype=np.int)
renumber[:] = -1 # This assigns all non-wall triangles the "component" -1
uniqueWallLabels = np.unique(wallLabels)
numWallComponents = len(uniqueWallLabels)
renumber[uniqueWallLabels] = np.arange(numWallComponents, dtype=np.int)
if distinctTubeComponents:
uniqueTubeLabels = np.unique(components[~iwt])
renumber[uniqueTubeLabels] = -1 - np.arange(len(uniqueTubeLabels), dtype=np.int)
components = renumber[components]
return numWallComponents, components
def remeshWallRegions(m, fuseMarkers, fuseSegments, pointSetLiesInWall, permitWallInteriorVertices = False, pointsLieInHole = None):
"""
Take an initial mesh of the sheet and determine the connected triangle components
that correspond to fused regions. Also remesh these regions so as not to have
interior wall vertices (if requested).
Parameters
----------
m
The initial sheet mesh
fuseMarkers
Fused vertices of the original sheet mesh
fuseSegments
Fused edges of the original sheet mesh
pointSetLiesInWall
Function for testing whether a given point set lies within a wall region
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
pointsLieInHole
If provided, this function is used to test whether a given mesh
component is actually a hole.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
ncomponents, components = meshComponents(m, fuseSegments)
############################################################################
# Determine which connected components are walls.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
numWalls = 0
wallLabels = -np.ones(m.numTris(), dtype=np.int)
for c in range(ncomponents):
component_tris = np.flatnonzero(components == c)
centers = triCenters[component_tris]
# Discard the entire component if it is actually a hole of the flattened input mesh.
if (pointsLieInHole is not None):
if (pointsLieInHole(centers)):
wallLabels[component_tris] = -2 # only labels -1 (tube) and >= 0 (wall) are kept
if (pointSetLiesInWall(centers)):
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
############################################################################
# Separately remesh each wall sub-mesh, preserving the non-wall component.
############################################################################
origV = m.vertices()
origF = m.triangles()
meshes = [(origV, origF[wallLabels == -1])]
remeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
for wall in range(numWalls):
# Extract the original wall mesh.
wallMesh = mesh.Mesh(*mesh_operations.submesh(origV, origF, wallLabels == wall))
# Perform the initial remeshing of the wall's boundary segments.
wallMesh, wmFuseMarkers, wmFusedEdges = wall_generation.triangulate_channel_walls(*mesh_operations.removeDanglingVertices(wallMesh.vertices()[:, 0:2], wallMesh.boundaryElements()), triArea=float('inf'), flags=remeshFlags)
# Note: if the wall mesh encloses holes, Triangle will also have triangulated the holes;
# we must detect these and remove them from the output.
# We decompose the remeshed wall into connected components (after
# cutting away the original wall boundary segments) and keep only the one
# inside the wall region. Exactly one component should remain after this
# process.
nc, wallMeshComponents = meshComponents(wallMesh, wmFusedEdges)
if (nc != 1):
wmV = wallMesh.vertices()
wmF = wallMesh.triangles()
triCenters = wmV[wmF].mean(axis=1)
keepTri = np.zeros(wallMesh.numTris(), dtype=np.bool)
keptComponents = 0
for c in range(nc):
component_tris = np.flatnonzero(wallMeshComponents == c)
if (pointSetLiesInWall(triCenters[component_tris])):
keepTri[component_tris] = True
keptComponents += 1
if (keptComponents != 1): raise Exception('Should have kept exactly one component of the remeshed wall')
# Extract only the kept component.
wallMesh = mesh.Mesh(*mesh_operations.removeDanglingVertices(wmV, wmF[keepTri]))
meshes.append(wallMesh)
mergedV, mergedF = mesh_operations.mergedMesh(meshes)
remeshedSheet = mesh.Mesh(mergedV, mergedF)
############################################################################
# Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
rsV = remeshedSheet.vertices()
addVertices(wallBoundaryVertices, rsV[remeshedSheet.boundaryVertices()])
for wallmesh in meshes[1:]:
wmV = wallmesh.vertices()
addVertices(wallBoundaryVertices, wmV[wallmesh.boundaryVertices()])
addVertices(wallVertices, wmV)
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in rsV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in rsV])
return remeshedSheet, isWallVtx, isWallBdryVtx
# Consider a point set to lie within a hole if over half of its points are within a hole (to a given tolerance)
class HoleTester:
def __init__(self, meshVertices, meshSampler):
self.V = meshVertices
self.sampler = meshSampler
self.eps = 1e-12 * utils.bbox_dims(meshVertices).max()
def dist(self, X):
"""
Compute the distance of each point in "X" to the sampler mesh.
"""
# This could be more efficient if SurfaceSampler had a method to get a
# distance to the sampled mesh...
if (X.shape[1] == 2):
X = np.pad(X, [(0, 0), (0, 1)])
closestPts = self.sampler.sample(X, self.V)
#print(closestPts)
return np.linalg.norm(X - closestPts, axis=1)
def pointWithinHole(self, X):
"""
Check whether each point in X individually lies with a hole.
"""
return self.dist(X) > self.eps
def __call__(self, X):
"""
Check whether a point set generally lies within a hole (i.e. if more
than half of its points are within a hole).
"""
return np.count_nonzero(self.pointWithinHole(X)) >= (X.shape[0] / 2)
# Note: if `targetEdgeSpacing` is set too low relative to `triArea`, `triangle`
# will insert new boundary points that fall in the interior of the flattened
# target surface (in strictly convex regions) when refining the triangulation.
#
# If the parametrization is subsequently used to lift the boundary points to
# 3D, these lifted points will not lie on the target surface's boundary. E.g.,
# they may lift off the ground plane even if all boundary vertices of the
# target surface lie on the ground plane.
def generateSheetMesh(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75):
"""
Extract the channel walls described by a signed distance function and use
them generate a high-quality triangle mesh of the inflatable sheet.
Parameters
----------
sdfVertices
Vertices for the wall SDF domain mesh.
sdfTris
Triangles for the wall SDF domain mesh.
sdf
Per-vertex signed distances to the channel walls
triArea
Maximum triangle area (passed to Triangle)
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
targetEdgeSpacing
The approximate resolution at which the extracted contours of the SDF are resampled
to generate the wall boundary curves.
minContourLen
The length threshold below which extracted contours are discarded.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(pts[:,0:2], edges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def generateSheetMeshCustomEdges(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False):
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:,0:2], customEdges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def meshWallsAndTubes(fusing_V, fusing_E, m, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles):
"""
Create a high quality mesh of the wall and tube regions enclosed by given fusing curves.
Parameters
----------
fusing_V, fusing_E
PSLG to be triangulated representing the fusing curves.
m
An initial mesh of the sheet region (with any hole triangles removed!) used to obtain the intersection of the wall regions with the sheet boundary.
isWallTri
Boolean array holding whether each wall of `m` is a wall triangle
holePoints, tubePoints, wallPoints
Lists of points within the hole, tube, and wall regions.
triArea
Maximum triangle area for the triangulation
permitWallInteriorVertices
Whether wall regions get interior vertices.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
############################################################################
# 1. Create a quality mesh of the air tubes.
############################################################################
# print(f"fusing_V.shape: {fusing_V.shape}")
# print(f"fusing_E.shape: {fusing_E.shape}")
# print(f"wallPoints: {np.array(wallPoints).shape}")
# print(f"holePoints: {np.array(holePoints).shape}")
mTubes, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(fusing_V[:, 0:2], fusing_E, holePoints=wallPoints + holePoints, triArea=triArea, omitQualityFlag=False, flags="j") # jettison vertices that got eaten by holes...
#utils.save((mTubes, fuseMarkers, fuseSegments), 'tubes_and_markers.pkl.gz')
if avoidSpuriousFusedTriangles:
try:
mTubes = tubeRemesh(mTubes, fuseMarkers, fuseSegments, minRelEdgeLen=0.3) # retriangulate where necessary to avoid spurious fused triangles in the tubes
except:
utils.save((mTubes, fuseMarkers, fuseSegments), utils.freshPath('tubeRemeshFailure', suffix='.pkl.gz'))
raise
fuseMarkers += [0 for i in range(mTubes.numVertices() - len(fuseMarkers))]
#mTubes.save('remeshedTubes.msh')
# For meshes without finite-thickness wall regions, we are done
# (and all vertices in fusing_V are wall/wall boundary vertices.)
if not np.any(isWallTri):
isWallVtx = np.array(fuseMarkers, dtype=np.bool)
return mTubes, isWallVtx, isWallVtx
############################################################################
# 2. Triangulate the wall meshes without inserting any Steiner points.
############################################################################
# We need to triangulate the new collection of boundary segments, which consists of the
# boundary segments from the new tube mesh along with the original mesh boundary segments
# that border wall regions.
boundarySegments = [(mTubes.vertices(), mTubes.boundaryElements())]
#print(boundarySegments)
# mesh.save("tube_bdry.obj", *boundarySegments[0])
wallBoundaryElements = m.boundaryElements()[isWallTri[m.elementsAdjacentBoundary()]]
if len(wallBoundaryElements) > 0:
boundarySegments.append((m.vertices(), wallBoundaryElements))
# mesh.save("sheet_bdry_intersect_walls.obj", *boundarySegments[1])
newPts, newEdges = mesh_operations.mergedMesh(boundarySegments)
# mesh.save("new_contour.obj", newPts, newEdges)
wallmeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
mWall, _, _ = wall_generation.triangulate_channel_walls(newPts[:,0:2], newEdges, holePoints=tubePoints + holePoints, triArea=triArea if permitWallInteriorVertices else float('inf'), omitQualityFlag=False, flags="j" + wallmeshFlags) # jettison vertices that got eaten by holes...
# mWall.save("walls.obj")
############################################################################
# 3. Merge the tube and wall meshes
############################################################################
mFinal = mesh.Mesh(*mesh_operations.mergedMesh([mTubes, mWall]), embeddingDimension=3)
# mFinal.save("final.obj")
############################################################################
# 4. Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
finalV = mFinal.vertices()
addVertices(wallBoundaryVertices, finalV[mFinal.boundaryVertices()])
wmV = mWall.vertices()
addVertices(wallBoundaryVertices, wmV[mWall.boundaryVertices()])
addVertices(wallVertices, wmV)
# Also include fused vertices marked inside the tube mesh (i.e., those
# fused by zero-width curves)
addVertices(wallBoundaryVertices, mTubes.vertices()[np.array(fuseMarkers, dtype=np.bool)])
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in finalV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in finalV])
return mFinal, isWallVtx, isWallBdryVtx
def newMeshingAlgorithm(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False, avoidSpuriousFusedTriangles = True):
############################################################################
# 1. Perform an initial, low quality triangulation used only to segment the
# design domain into tube and wall regions.
############################################################################
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:, 0:2], customEdges, triArea=float('inf'), omitQualityFlag=True, flags="YY")
# m.save('initial_triangulation.msh')
############################################################################
# 2. Determine the wall components/hole points.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
numWalls = 0
wallPoints = []
tubePoints = []
holePoints = []
ncomponents, components = meshComponents(m, fuseSegments)
# First detect and remove holes
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
if (pointsLieInHole(centers)):
components[component_tris] = -1 #mark for deletion
holePoints.append(p)
continue
if len(holePoints) > 0:
print(f'Detected {len(holePoints)} holes')
# Note: there shouldn't be any dangling vertices since no new vertices
# are inserted inside the holes.
m = mesh.Mesh(m.vertices(), m.elements()[components >= 0])
ncomponents, components = meshComponents(m, fuseSegments)
triCenters = m.vertices()[m.triangles()].mean(axis=1)
# m.save('without_holes.msh')
wallLabels = -np.ones(m.numTris(), dtype=np.int) # assign -1 to tubes
# Next, pick a point within each air tube
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
triCentersAreInWall = np.mean(sdfSampler.sample(centers, sdf)) < 0
if (triCentersAreInWall):
wallPoints.append(p)
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
else:
tubePoints.append(p)
return meshWallsAndTubes(customPts, customEdges, m, wallLabels >= 0, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def generateSheetMeshNewAlgorithm(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75, avoidSpuriousFusedTriangles=True):
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
return newMeshingAlgorithm(sdfVertices, sdfTris, sdf, pts, edges, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def remeshSheet(isheet, triArea, permitWallInteriorVertices = False, omitWallsContainingPoints=[]):
"""
Remesh an inflatable sheet design with a high quality triangulation
(leaving the fusing curves unchanged).
We can omit certain walls by passing a nonempty point set for the `omitWallsContainingPoints` argument.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
nwmc, wmc = wallMeshComponents(isheet, distinctTubeComponents=True)
im = isheet.mesh()
imV = im.vertices()
imF = im.triangles()
# Convert walls specified by `omitWallsContainingPoints` into tube regions.
ssampler = SurfaceSampler(imV, imF)
omittedWallComponents = []
if (len(omitWallsContainingPoints) > 0):
tris, _ = ssampler.closestTriAndBaryCoords(np.array(omitWallsContainingPoints))
omittedWallComponents = np.unique(wmc[tris])
if (np.any(omittedWallComponents < 0)): raise Exception("omitWallsContainingPoints contains non-wall points.")
wmc[tris] = -1 # Reassign omitted walls to the first tube region.
# Generate tube and wall points (one in each tube/wall component)
tubePoints = []
wallPoints = []
triCenters = imV[:, 0:2][imF].mean(axis=1)
#print(np.unique(wmc))
for c in range(np.min(wmc), nwmc):
#print(f'Component: {c}')
if c in omittedWallComponents: continue
p = triCenters[np.where(wmc == c)[0][0]]
if (c < 0): tubePoints.append(p)
else : wallPoints.append(p)
# Generate hole points inside each internal boundary loop; this
# requires a low-quality triangulation of the boundary loops.
sheetBoundary = mesh_operations.removeDanglingVertices(imV, im.boundaryElements())
mHoleDetect, mFuseMarkers, mFuseSegments = wall_generation.triangulate_channel_walls(sheetBoundary[0][:, 0:2], sheetBoundary[1], triArea=float('inf'), omitQualityFlag=True, flags="YY")
nholeDetectComponents, holeDetectComponents = meshComponents(mHoleDetect, mFuseSegments)
holeTest = HoleTester(imV, ssampler)
holePoints = np.array([triCenters[np.where(holeDetectComponents == c)[0][0]] for c in range(nholeDetectComponents)])
holePoints = list(holePoints[holeTest.pointWithinHole(holePoints)])
# Get all design curves for remeshing. These consist of the union of two disjoint sets of curves:
# - Boundaries of the wall regions.
# - The intersection of the tube and sheet boundaries.
wm = mesh.Mesh(*mesh_operations.mergedMesh([(imV, imF[wmc == i]) for i in range(nwmc)])) # Mesh of walls only.
sheetBoundaryIntersectTubes = mesh_operations.removeDanglingVertices(imV, im.boundaryElements()[wmc[im.elementsAdjacentBoundary()] < 0])
fusing_V, fusing_E = mesh_operations.mergedMesh([(wm.vertices(), wm.boundaryElements()), sheetBoundaryIntersectTubes])
isWallTri = (wmc >= 0)
return meshWallsAndTubes(fusing_V, fusing_E, im, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles=True)
import triangulation, field_sampler
def forward_design_mesh(V, E, fusedPts, holePts, triArea):
"""
Create an inflatable sheet mesh from a collection of curves and points indicating
whether the closed curve containing them should be considered a wall or a hole
(instead of a tube/pillow).
"""
sdfV, sdfF, pointMarkers, edgeMarkers = triangulation.triangulate(V[:, 0:2], E, holePts=holePts, triArea=1e8, omitQualityFlag=True, outputPointMarkers=True, outputEdgeMarkers=True)
minit = mesh.Mesh(sdfV[:, 0:2], sdfF)
# Create a SDF field indicating the wall regions (the data needed by newMeshingAlgorithm)
nc, c = meshComponents(minit, edgeMarkers)
sdf = c
fs = field_sampler.FieldSampler(minit)
if len(fusedPts) > 0:
fusedComponents = np.array(np.unique(fs.sample(fusedPts, c)), dtype=np.int)
sdf[c == fusedComponents] = -1
return newMeshingAlgorithm(sdfV, sdfF, sdf, V, E, triArea=triArea)
|
[
"mesh_utilities.tubeRemesh",
"numpy.empty",
"utils.freshPath",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"numpy.linalg.norm",
"numpy.unique",
"numpy.pad",
"utils.bbox_dims",
"field_sampler.FieldSampler",
"numpy.transpose",
"mesh.Mesh",
"mesh_utilities.SurfaceSampler",
"numpy.min",
"mesh_operations.submesh",
"wall_generation.extract_contours",
"numpy.flatnonzero",
"mesh_operations.removeDanglingVertices",
"numpy.any",
"triangulation.triangulate",
"numpy.where",
"numpy.array",
"mesh_operations.mergedMesh",
"wall_generation.triangulate_channel_walls"
] |
[((1304, 1350), 'scipy.sparse.csgraph.connected_components', 'scipy.sparse.csgraph.connected_components', (['adj'], {}), '(adj)\n', (1345, 1350), False, 'import scipy\n'), ((1983, 2029), 'scipy.sparse.csgraph.connected_components', 'scipy.sparse.csgraph.connected_components', (['adj'], {}), '(adj)\n', (2024, 2029), False, 'import scipy\n'), ((2086, 2123), 'numpy.empty', 'np.empty', (['numComponents'], {'dtype': 'np.int'}), '(numComponents, dtype=np.int)\n', (2094, 2123), True, 'import numpy as np\n'), ((2225, 2246), 'numpy.unique', 'np.unique', (['wallLabels'], {}), '(wallLabels)\n', (2234, 2246), True, 'import numpy as np\n'), ((2326, 2368), 'numpy.arange', 'np.arange', (['numWallComponents'], {'dtype': 'np.int'}), '(numWallComponents, dtype=np.int)\n', (2335, 2368), True, 'import numpy as np\n'), ((7225, 7259), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['meshes'], {}), '(meshes)\n', (7251, 7259), False, 'import mesh_operations\n'), ((7280, 7307), 'mesh.Mesh', 'mesh.Mesh', (['mergedV', 'mergedF'], {}), '(mergedV, mergedF)\n', (7289, 7307), False, 'import wall_generation, mesh\n'), ((11433, 11562), 'wall_generation.extract_contours', 'wall_generation.extract_contours', (['sdfVertices', 'sdfTris', 'sdf'], {'targetEdgeSpacing': 'targetEdgeSpacing', 'minContourLen': 'minContourLen'}), '(sdfVertices, sdfTris, sdf,\n targetEdgeSpacing=targetEdgeSpacing, minContourLen=minContourLen)\n', (11465, 11562), False, 'import wall_generation, mesh\n'), ((11694, 11764), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['pts[:, 0:2]', 'edges', 'triArea'], {}), '(pts[:, 0:2], edges, triArea)\n', (11735, 11764), False, 'import wall_generation, mesh\n'), ((11782, 11818), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (11796, 11818), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((12468, 12554), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['customPts[:, 0:2]', 'customEdges', 'triArea'], {}), '(customPts[:, 0:2], customEdges,\n triArea)\n', (12509, 12554), False, 'import wall_generation, mesh\n'), ((12568, 12604), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (12582, 12604), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((14509, 14674), 'wall_generation.triangulate_channel_walls', 'wall_generation.triangulate_channel_walls', (['fusing_V[:, 0:2]', 'fusing_E'], {'holePoints': '(wallPoints + holePoints)', 'triArea': 'triArea', 'omitQualityFlag': '(False)', 'flags': '"""j"""'}), "(fusing_V[:, 0:2], fusing_E,\n holePoints=wallPoints + holePoints, triArea=triArea, omitQualityFlag=\n False, flags='j')\n", (14550, 14674), False, 'import wall_generation, mesh\n'), ((16450, 16494), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['boundarySegments'], {}), '(boundarySegments)\n', (16476, 16494), False, 'import mesh_operations\n'), ((19273, 19309), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['sdfVertices', 'sdfTris'], {}), '(sdfVertices, sdfTris)\n', (19287, 19309), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((21522, 21651), 'wall_generation.extract_contours', 'wall_generation.extract_contours', (['sdfVertices', 'sdfTris', 'sdf'], {'targetEdgeSpacing': 'targetEdgeSpacing', 'minContourLen': 'minContourLen'}), '(sdfVertices, sdfTris, sdf,\n targetEdgeSpacing=targetEdgeSpacing, minContourLen=minContourLen)\n', (21554, 21651), False, 'import wall_generation, mesh\n'), ((22533, 22557), 'mesh_utilities.SurfaceSampler', 'SurfaceSampler', (['imV', 'imF'], {}), '(imV, imF)\n', (22547, 22557), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((25286, 25443), 'triangulation.triangulate', 'triangulation.triangulate', (['V[:, 0:2]', 'E'], {'holePts': 'holePts', 'triArea': '(100000000.0)', 'omitQualityFlag': '(True)', 'outputPointMarkers': '(True)', 'outputEdgeMarkers': '(True)'}), '(V[:, 0:2], E, holePts=holePts, triArea=\n 100000000.0, omitQualityFlag=True, outputPointMarkers=True,\n outputEdgeMarkers=True)\n', (25311, 25443), False, 'import triangulation, field_sampler\n'), ((25442, 25471), 'mesh.Mesh', 'mesh.Mesh', (['sdfV[:, 0:2]', 'sdfF'], {}), '(sdfV[:, 0:2], sdfF)\n', (25451, 25471), False, 'import wall_generation, mesh\n'), ((25635, 25668), 'field_sampler.FieldSampler', 'field_sampler.FieldSampler', (['minit'], {}), '(minit)\n', (25661, 25668), False, 'import triangulation, field_sampler\n'), ((2428, 2455), 'numpy.unique', 'np.unique', (['components[~iwt]'], {}), '(components[~iwt])\n', (2437, 2455), True, 'import numpy as np\n'), ((4548, 4579), 'numpy.flatnonzero', 'np.flatnonzero', (['(components == c)'], {}), '(components == c)\n', (4562, 4579), True, 'import numpy as np\n'), ((8936, 8974), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - closestPts)'], {'axis': '(1)'}), '(X - closestPts, axis=1)\n', (8950, 8974), True, 'import numpy as np\n'), ((15426, 15443), 'numpy.any', 'np.any', (['isWallTri'], {}), '(isWallTri)\n', (15432, 15443), True, 'import numpy as np\n'), ((15465, 15501), 'numpy.array', 'np.array', (['fuseMarkers'], {'dtype': 'np.bool'}), '(fuseMarkers, dtype=np.bool)\n', (15473, 15501), True, 'import numpy as np\n'), ((22754, 22774), 'numpy.unique', 'np.unique', (['wmc[tris]'], {}), '(wmc[tris])\n', (22763, 22774), True, 'import numpy as np\n'), ((22787, 22820), 'numpy.any', 'np.any', (['(omittedWallComponents < 0)'], {}), '(omittedWallComponents < 0)\n', (22793, 22820), True, 'import numpy as np\n'), ((23172, 23183), 'numpy.min', 'np.min', (['wmc'], {}), '(wmc)\n', (23178, 23183), True, 'import numpy as np\n'), ((8814, 8841), 'numpy.pad', 'np.pad', (['X', '[(0, 0), (0, 1)]'], {}), '(X, [(0, 0), (0, 1)])\n', (8820, 8841), True, 'import numpy as np\n'), ((14864, 14928), 'mesh_utilities.tubeRemesh', 'tubeRemesh', (['mTubes', 'fuseMarkers', 'fuseSegments'], {'minRelEdgeLen': '(0.3)'}), '(mTubes, fuseMarkers, fuseSegments, minRelEdgeLen=0.3)\n', (14874, 14928), False, 'from mesh_utilities import SurfaceSampler, tubeRemesh\n'), ((17165, 17208), 'mesh_operations.mergedMesh', 'mesh_operations.mergedMesh', (['[mTubes, mWall]'], {}), '([mTubes, mWall])\n', (17191, 17208), False, 'import mesh_operations\n'), ((18021, 18057), 'numpy.array', 'np.array', (['fuseMarkers'], {'dtype': 'np.bool'}), '(fuseMarkers, dtype=np.bool)\n', (18029, 18057), True, 'import numpy as np\n'), ((22685, 22720), 'numpy.array', 'np.array', (['omitWallsContainingPoints'], {}), '(omitWallsContainingPoints)\n', (22693, 22720), True, 'import numpy as np\n'), ((5548, 5605), 'mesh_operations.submesh', 'mesh_operations.submesh', (['origV', 'origF', '(wallLabels == wall)'], {}), '(origV, origF, wallLabels == wall)\n', (5571, 5605), False, 'import mesh_operations\n'), ((6710, 6749), 'numpy.flatnonzero', 'np.flatnonzero', (['(wallMeshComponents == c)'], {}), '(wallMeshComponents == c)\n', (6724, 6749), True, 'import numpy as np\n'), ((1257, 1281), 'numpy.transpose', 'np.transpose', (['dual_edges'], {}), '(dual_edges)\n', (1269, 1281), True, 'import numpy as np\n'), ((1916, 1940), 'numpy.transpose', 'np.transpose', (['dual_edges'], {}), '(dual_edges)\n', (1928, 1940), True, 'import numpy as np\n'), ((7109, 7166), 'mesh_operations.removeDanglingVertices', 'mesh_operations.removeDanglingVertices', (['wmV', 'wmF[keepTri]'], {}), '(wmV, wmF[keepTri])\n', (7147, 7166), False, 'import mesh_operations\n'), ((8493, 8522), 'utils.bbox_dims', 'utils.bbox_dims', (['meshVertices'], {}), '(meshVertices)\n', (8508, 8522), False, 'import utils\n'), ((15084, 15138), 'utils.freshPath', 'utils.freshPath', (['"""tubeRemeshFailure"""'], {'suffix': '""".pkl.gz"""'}), "('tubeRemeshFailure', suffix='.pkl.gz')\n", (15099, 15138), False, 'import utils\n'), ((23297, 23315), 'numpy.where', 'np.where', (['(wmc == c)'], {}), '(wmc == c)\n', (23305, 23315), True, 'import numpy as np\n'), ((23989, 24024), 'numpy.where', 'np.where', (['(holeDetectComponents == c)'], {}), '(holeDetectComponents == c)\n', (23997, 24024), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn import linear_model
np.random.seed(123)
np.set_printoptions(suppress=True, linewidth=120)
X = np.random.random([10, 5]).astype(np.float)
y = np.random.random(10).astype(np.float)
# sklearn
linear = linear_model.LinearRegression()
linear.fit(X, y)
# Pure Python
X = np.hstack([np.ones([10, 1]), X])
IX = np.linalg.inv(np.matmul(X.T, X))
XIX = np.matmul(X, IX)
w = np.matmul(y, XIX)
print("----- Code Output -----")
print("sklearn coef", linear.coef_)
print("sklearn intercept", linear.intercept_)
print("numpy coef", w[1:])
print("numpy intercept", w[0])
"""
----- Code Output -----
sklearn coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
sklearn intercept 0.767935574124093
numpy coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
numpy intercept 0.7679355741241028
"""
|
[
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.ones",
"sklearn.linear_model.LinearRegression",
"numpy.random.random",
"numpy.matmul"
] |
[((53, 72), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (67, 72), True, 'import numpy as np\n'), ((73, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': '(120)'}), '(suppress=True, linewidth=120)\n', (92, 122), True, 'import numpy as np\n'), ((233, 264), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (262, 264), False, 'from sklearn import linear_model\n'), ((379, 395), 'numpy.matmul', 'np.matmul', (['X', 'IX'], {}), '(X, IX)\n', (388, 395), True, 'import numpy as np\n'), ((400, 417), 'numpy.matmul', 'np.matmul', (['y', 'XIX'], {}), '(y, XIX)\n', (409, 417), True, 'import numpy as np\n'), ((354, 371), 'numpy.matmul', 'np.matmul', (['X.T', 'X'], {}), '(X.T, X)\n', (363, 371), True, 'import numpy as np\n'), ((128, 153), 'numpy.random.random', 'np.random.random', (['[10, 5]'], {}), '([10, 5])\n', (144, 153), True, 'import numpy as np\n'), ((175, 195), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (191, 195), True, 'import numpy as np\n'), ((312, 328), 'numpy.ones', 'np.ones', (['[10, 1]'], {}), '([10, 1])\n', (319, 328), True, 'import numpy as np\n')]
|
import lzhw
from sys import getsizeof
from random import sample, choices
import pandas as pd
def test_weather():
weather = ["Sunny", "Sunny", "Overcast", "Rain", "Rain", "Rain", "Overcast", "Sunny", "Sunny",
"Rain", "Sunny", "Overcast", "Overcast", "Rain", "Rain", "Sunny", "Sunny"]
comp_weather = lzhw.LZHW(weather)
comp_weather2 = lzhw.LZHW(weather, sliding_window = 5)
assert getsizeof(weather) > comp_weather.size()
assert all(weather == comp_weather.decompress())
assert all(weather == comp_weather2.decompress())
def test_num():
numbers = choices(sample(range(0, 5), 5), k=20)
comp_num = lzhw.LZHW(numbers)
assert getsizeof(numbers) > comp_num.size()
assert numbers == list(map(int, comp_num.decompress()))
def test_read_write():
weather = ["Sunny", "Sunny", "Overcast", "Rain", "Rain", "Rain", "Overcast", "Sunny", "Sunny",
"Rain", "Sunny", "Overcast", "Overcast", "Rain", "Rain", "Sunny", "Sunny"]
comp_weather = lzhw.LZHW(weather)
comp_weather.save_to_file("test.pkl")
decomp = lzhw.decompress_from_file("test.pkl")
assert all(weather == decomp)
def test_comp_df():
df = pd.DataFrame({"a": [1, 1, 2, 2, 1, 3, 4, 4],
"b": ["A", "A", "B", "B", "A", "C,D", "D C", "D C"]})
comp_df = lzhw.CompressedDF(df, parallel=True)
comp_df2 = lzhw.CompressedDF(df, sliding_window = 10)
assert all(comp_df.compressed[1].decompress() == df.b)
assert all(comp_df2.compressed[0].decompress() == df.a)
def test_comp_chunks():
df = pd.DataFrame({"a": [1, 1, 2, 2, 1, 3, 4, 4],
"b": ["A", "A", "B", "B", "A", "C,D", "D C", "D C"]})
df.to_csv("example.csv", index=False)
chunks = 4
compressed_chunks = lzhw.CompressedFromCSV("example.csv", chunksize=chunks)
totals = (df.shape[0] / chunks)
assert totals == len(compressed_chunks.all_comp.keys())
compressed_chunks.save_to_file("comp_ex.txt")
decomp_chunk = lzhw.decompress_df_from_file("comp_ex.txt")
assert len(decomp_chunk) == totals
assert all(decomp_chunk[0].a == df.a[:4])
assert all(compressed_chunks.all_comp[1].compressed[1].decompress() == df.b[4:8])
|
[
"pandas.DataFrame",
"lzhw.CompressedFromCSV",
"lzhw.decompress_df_from_file",
"lzhw.decompress_from_file",
"lzhw.LZHW",
"sys.getsizeof",
"lzhw.CompressedDF"
] |
[((323, 341), 'lzhw.LZHW', 'lzhw.LZHW', (['weather'], {}), '(weather)\n', (332, 341), False, 'import lzhw\n'), ((362, 398), 'lzhw.LZHW', 'lzhw.LZHW', (['weather'], {'sliding_window': '(5)'}), '(weather, sliding_window=5)\n', (371, 398), False, 'import lzhw\n'), ((645, 663), 'lzhw.LZHW', 'lzhw.LZHW', (['numbers'], {}), '(numbers)\n', (654, 663), False, 'import lzhw\n'), ((1005, 1023), 'lzhw.LZHW', 'lzhw.LZHW', (['weather'], {}), '(weather)\n', (1014, 1023), False, 'import lzhw\n'), ((1079, 1116), 'lzhw.decompress_from_file', 'lzhw.decompress_from_file', (['"""test.pkl"""'], {}), "('test.pkl')\n", (1104, 1116), False, 'import lzhw\n'), ((1182, 1284), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1, 2, 2, 1, 3, 4, 4], 'b': ['A', 'A', 'B', 'B', 'A', 'C,D', 'D C',\n 'D C']}"], {}), "({'a': [1, 1, 2, 2, 1, 3, 4, 4], 'b': ['A', 'A', 'B', 'B', 'A',\n 'C,D', 'D C', 'D C']})\n", (1194, 1284), True, 'import pandas as pd\n'), ((1318, 1354), 'lzhw.CompressedDF', 'lzhw.CompressedDF', (['df'], {'parallel': '(True)'}), '(df, parallel=True)\n', (1335, 1354), False, 'import lzhw\n'), ((1370, 1410), 'lzhw.CompressedDF', 'lzhw.CompressedDF', (['df'], {'sliding_window': '(10)'}), '(df, sliding_window=10)\n', (1387, 1410), False, 'import lzhw\n'), ((1567, 1669), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1, 2, 2, 1, 3, 4, 4], 'b': ['A', 'A', 'B', 'B', 'A', 'C,D', 'D C',\n 'D C']}"], {}), "({'a': [1, 1, 2, 2, 1, 3, 4, 4], 'b': ['A', 'A', 'B', 'B', 'A',\n 'C,D', 'D C', 'D C']})\n", (1579, 1669), True, 'import pandas as pd\n'), ((1770, 1825), 'lzhw.CompressedFromCSV', 'lzhw.CompressedFromCSV', (['"""example.csv"""'], {'chunksize': 'chunks'}), "('example.csv', chunksize=chunks)\n", (1792, 1825), False, 'import lzhw\n'), ((1991, 2034), 'lzhw.decompress_df_from_file', 'lzhw.decompress_df_from_file', (['"""comp_ex.txt"""'], {}), "('comp_ex.txt')\n", (2019, 2034), False, 'import lzhw\n'), ((412, 430), 'sys.getsizeof', 'getsizeof', (['weather'], {}), '(weather)\n', (421, 430), False, 'from sys import getsizeof\n'), ((675, 693), 'sys.getsizeof', 'getsizeof', (['numbers'], {}), '(numbers)\n', (684, 693), False, 'from sys import getsizeof\n')]
|
"""
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_USERNAME, \
CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP, STATE_ON
from homeassistant.helpers import validate_config
from homeassistant.components.switch import SwitchDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
REQUIREMENTS = ['pynetio==0.1.6']
DEFAULT_USERNAME = 'admin'
DEFAULT_PORT = 1234
URL_API_NETIO_EP = "/api/netio/<host>"
CONF_OUTLETS = "outlets"
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
ATTR_TODAY_MWH = "today_mwh"
ATTR_TOTAL_CONSUMPTION_KWH = "total_energy_kwh"
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
ATTR_CURRENT_POWER_W = "current_power_w"
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
ATTR_START_DATE = 'start_date'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Configure the netio platform."""
from pynetio import Netio
if validate_config({"conf": config}, {"conf": [CONF_OUTLETS,
CONF_HOST]}, _LOGGER):
if len(DEVICES) == 0:
hass.wsgi.register_view(NetioApiView)
dev = Netio(config[CONF_HOST],
config.get(CONF_PORT, DEFAULT_PORT),
config.get(CONF_USERNAME, DEFAULT_USERNAME),
config.get(CONF_PASSWORD, DEFAULT_USERNAME))
DEVICES[config[CONF_HOST]] = Device(dev, [])
# Throttle the update for all NetioSwitches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(DEVICES[config[CONF_HOST]].netio, key,
config[CONF_OUTLETS][key])
DEVICES[config[CONF_HOST]].entities.append(switch)
add_devices_callback(DEVICES[config[CONF_HOST]].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = "api:netio"
def get(self, request, host):
"""Request handler."""
data = request.args
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
dev.update_ha_state()
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a netio linked switch."""
def __init__(self, netio, outlet, name):
"""Defined to handle throttle."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Netio device's name."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self):
"""Turn switch on."""
self._set(True)
def turn_off(self):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = "1" if value else "0"
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.update_ha_state()
@property
def is_on(self):
"""Return switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Called by HA."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {ATTR_CURRENT_POWER_W: self.current_power_w,
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
|
[
"homeassistant.helpers.validate_config",
"datetime.timedelta",
"homeassistant.util.Throttle",
"collections.namedtuple",
"logging.getLogger"
] |
[((578, 605), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (595, 605), False, 'import logging\n'), ((987, 1030), 'collections.namedtuple', 'namedtuple', (['"""device"""', "['netio', 'entities']"], {}), "('device', ['netio', 'entities'])\n", (997, 1030), False, 'from collections import namedtuple\n'), ((1100, 1121), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (1109, 1121), False, 'from datetime import timedelta\n'), ((1279, 1358), 'homeassistant.helpers.validate_config', 'validate_config', (["{'conf': config}", "{'conf': [CONF_OUTLETS, CONF_HOST]}", '_LOGGER'], {}), "({'conf': config}, {'conf': [CONF_OUTLETS, CONF_HOST]}, _LOGGER)\n", (1294, 1358), False, 'from homeassistant.helpers import validate_config\n'), ((1859, 1896), 'homeassistant.util.Throttle', 'util.Throttle', (['MIN_TIME_BETWEEN_SCANS'], {}), '(MIN_TIME_BETWEEN_SCANS)\n', (1872, 1896), False, 'from homeassistant import util\n')]
|
import os
image_dir ="H:/Python Space/Hard_Hat _Detection/images"
label_dir= "H:/Python Space/Hard_Hat _Detection/labels"
print("No. of Training images", len(os.listdir(image_dir + "/train")))
print("No. of Training labels", len(os.listdir(label_dir + "/train")))
print("No. of valid images", len(os.listdir(image_dir + "/val")))
print("No. of valid labels", len(os.listdir(label_dir + "/val")))
|
[
"os.listdir"
] |
[((160, 192), 'os.listdir', 'os.listdir', (["(image_dir + '/train')"], {}), "(image_dir + '/train')\n", (170, 192), False, 'import os\n'), ((231, 263), 'os.listdir', 'os.listdir', (["(label_dir + '/train')"], {}), "(label_dir + '/train')\n", (241, 263), False, 'import os\n'), ((300, 330), 'os.listdir', 'os.listdir', (["(image_dir + '/val')"], {}), "(image_dir + '/val')\n", (310, 330), False, 'import os\n'), ((366, 396), 'os.listdir', 'os.listdir', (["(label_dir + '/val')"], {}), "(label_dir + '/val')\n", (376, 396), False, 'import os\n')]
|
import numpy as np
import ray
import pyspiel
from open_spiel.python.algorithms.psro_v2.ars_ray.shared_noise import *
from open_spiel.python.algorithms.psro_v2.ars_ray.utils import rewards_combinator
from open_spiel.python.algorithms.psro_v2 import rl_policy
from open_spiel.python import rl_environment
import tensorflow.compat.v1 as tf
import random
# Function that loads the game.
# @ray.remote
# def worker(env_name):
# game = pyspiel.load_game_as_turn_based(env_name,
# {"players": pyspiel.GameParameter(
# 2)})
# env = rl_environment.Environment(game)
# return env.name
#
# SB worker
# @ray.remote
# class Worker(object):
# def __init__(self,
# env_name,
# env_seed=2,
# deltas=None,
# slow_oracle_kargs=None,
# fast_oracle_kargs=None
# ):
# pass
#
# def output(self):
# import sys
# return sys.path
@ray.remote
class Worker(object):
"""
Object class for parallel rollout generation.
"""
def __init__(self,
env_name,
env_seed=2,
deltas=None,
slow_oracle_kargs=None,
fast_oracle_kargs=None
):
# initialize rl environment.
from open_spiel.python import rl_environment
import pyspiel
self._num_players = 2
game = pyspiel.load_game_as_turn_based(env_name,
{"players": pyspiel.GameParameter(
self._num_players)})
self._env = rl_environment.Environment(game)
# Each worker gets access to the shared noise table
# with independent random streams for sampling
# from the shared noise table.
self.deltas = SharedNoiseTable(deltas, env_seed + 7)
self._policies = [[] for _ in range(self._num_players)]
self._slow_oracle_kargs = slow_oracle_kargs
self._fast_oracle_kargs = fast_oracle_kargs
self._delta_std = self._fast_oracle_kargs['noise']
self._sess = tf.get_default_session()
if self._sess is None:
self._sess = tf.Session()
if self._slow_oracle_kargs is not None:
self._slow_oracle_kargs['session'] = self._sess
def sample_episode(self,
unused_time_step,
agents,
is_evaluation=False,
noise=None,
chosen_player=None):
"""
Sample an episode and get the cumulative rewards. Notice that we do not
update the agents during this sampling.
:param unused_time_step: placeholder for openspiel.
:param agents: a list of policies, one per player.
:param is_evaluation: evaluation flag.
:param noise: noise to be added to current policy.
:param live_agent_id: id of the agent being trained.
:return: a list of returns, one per player.
"""
time_step = self._env.reset()
cumulative_rewards = 0.0
while not time_step.last():
if time_step.is_simultaneous_move():
action_list = []
for i, agent in enumerate(agents):
if i == chosen_player:
output = agent.step(time_step,
is_evaluation=is_evaluation,
noise=noise)
else:
output = agent.step(time_step, is_evaluation=is_evaluation)
action_list.append(output.action)
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
else:
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(
time_step, is_evaluation=is_evaluation)
action_list = [agent_output.action]
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
# No agents update at this step. This step may not be necessary.
if not is_evaluation:
for agent in agents:
agent.step(time_step)
return cumulative_rewards
def do_sample_episode(self,
probabilities_of_playing_policies,
chosen_player,
num_rollouts = 1,
is_evaluation = False):
"""
Generate multiple rollouts using noisy policies.
"""
with self._sess:
rollout_rewards = [[] for _ in range(self._num_players)]
deltas_idx = []
for _ in range(num_rollouts):
agents = self.sample_agents(probabilities_of_playing_policies, chosen_player)
if is_evaluation:
deltas_idx.append(-1)
reward = self.sample_episode(None, agents, is_evaluation)
for i, rew in enumerate(reward):
rollout_rewards[i].append(rew)
else:
# The idx marks the beginning of a sequence of noise with length dim.
# Refer to shared_noise.py
idx, delta = self.deltas.get_delta(agents[chosen_player].get_weights().size)
delta = (self._delta_std * delta).reshape(agents[chosen_player].get_weights().shape)
deltas_idx.append(idx)
# compute reward used for positive perturbation rollout. List, one reward per player.
pos_reward = self.sample_episode(None, agents, is_evaluation, delta, chosen_player)
# compute reward used for negative pertubation rollout. List, one reward per player.
neg_reward = self.sample_episode(None, agents, is_evaluation, -delta, chosen_player)
# a list of lists, one per player. For each player, a list contains the positive
# rewards and negative rewards in a format [[pos rew, neg rew],
# [pos rew, neg rew]]
#, one row per noise.
rollout_rewards = rewards_combinator(rollout_rewards, pos_reward, neg_reward)
return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards}
def freeze_all(self):
"""Freezes all policies within policy_per_player.
Args:
policies_per_player: List of list of number of policies.
"""
for policies in self._policies:
for pol in policies:
pol.freeze()
# def sync_total_policies(self, extra_policies_weights, policies_types, chosen_player):
# with self._sess:
# if chosen_player is not None:
# self._policies[chosen_player][-1].set_weights(extra_policies_weights[chosen_player][-1])
# else:
# for player in range(self._num_players):
# for i, policy_type in enumerate(policies_types[player]):
# new_pol = self.best_responder(policy_type, player)
# new_pol.set_weights(extra_policies_weights[player][i])
# self._policies[player].append(new_pol)
def get_num_policies(self):
return len(self._policies[0])
# def best_responder(self, policy_type, player):
# if policy_type == "DQN":
# agent_class = rl_policy.DQNPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "PG":
# agent_class = rl_policy.PGPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "ARS_parallel":
# agent_class = rl_policy.ARSPolicy_parallel
# new_pol = agent_class(self._env, player, **self._fast_oracle_kargs)
# else:
# raise ValueError("Agent class not supported in workers")
#
# return new_pol
def sample_agents(self, probabilities_of_playing_policies, chosen_player):
agents = self.sample_strategy_marginal(self._policies, probabilities_of_playing_policies)
agents[chosen_player] = self._policies[chosen_player][-1]
return agents
def sample_strategy_marginal(self, total_policies, probabilities_of_playing_policies):
"""Samples strategies given marginal probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list, with the k-th element
also a list specifying the play probabilities of the k-th player's
policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
num_players = len(total_policies)
sampled_policies = []
for k in range(num_players):
current_policies = total_policies[k]
current_probabilities = probabilities_of_playing_policies[k]
sampled_policy_k = self.random_choice(current_policies, current_probabilities)
sampled_policies.append(sampled_policy_k)
return sampled_policies
def random_choice(self, outcomes, probabilities):
"""Samples from discrete probability distribution.
`numpy.choice` does not seem optimized for repeated calls, this code
had higher performance.
Args:
outcomes: List of categorical outcomes.
probabilities: Discrete probability distribtuion as list of floats.
Returns:
Entry of `outcomes` sampled according to the distribution.
"""
cumsum = np.cumsum(probabilities)
return outcomes[np.searchsorted(cumsum / cumsum[-1], random.random())]
def output(self):
return "asdf"
|
[
"pyspiel.GameParameter",
"open_spiel.python.algorithms.psro_v2.ars_ray.utils.rewards_combinator",
"numpy.cumsum",
"tensorflow.compat.v1.Session",
"random.random",
"numpy.array",
"open_spiel.python.rl_environment.Environment",
"tensorflow.compat.v1.get_default_session"
] |
[((1739, 1771), 'open_spiel.python.rl_environment.Environment', 'rl_environment.Environment', (['game'], {}), '(game)\n', (1765, 1771), False, 'from open_spiel.python import rl_environment\n'), ((2240, 2264), 'tensorflow.compat.v1.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2262, 2264), True, 'import tensorflow.compat.v1 as tf\n'), ((10247, 10271), 'numpy.cumsum', 'np.cumsum', (['probabilities'], {}), '(probabilities)\n', (10256, 10271), True, 'import numpy as np\n'), ((2321, 2333), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow.compat.v1 as tf\n'), ((1624, 1664), 'pyspiel.GameParameter', 'pyspiel.GameParameter', (['self._num_players'], {}), '(self._num_players)\n', (1645, 1664), False, 'import pyspiel\n'), ((3887, 3914), 'numpy.array', 'np.array', (['time_step.rewards'], {}), '(time_step.rewards)\n', (3895, 3914), True, 'import numpy as np\n'), ((4264, 4291), 'numpy.array', 'np.array', (['time_step.rewards'], {}), '(time_step.rewards)\n', (4272, 4291), True, 'import numpy as np\n'), ((10333, 10348), 'random.random', 'random.random', ([], {}), '()\n', (10346, 10348), False, 'import random\n'), ((6514, 6573), 'open_spiel.python.algorithms.psro_v2.ars_ray.utils.rewards_combinator', 'rewards_combinator', (['rollout_rewards', 'pos_reward', 'neg_reward'], {}), '(rollout_rewards, pos_reward, neg_reward)\n', (6532, 6573), False, 'from open_spiel.python.algorithms.psro_v2.ars_ray.utils import rewards_combinator\n')]
|
import clusters as c
import tensorflow as tf
import pandas as pd
import re
def one_hot(i, n):
""" Makes a one-hot vector of length n with 1 in position i. """
one_hot = [0 for x in range(n)]
one_hot[i] = 1
return one_hot
datasets = c.datasets
data_type = 'metaphlan_bugs_list'
body_site = 'stool'
df, dataframes, key_sets = c.__load_data(datasets, data_type, body_site)
cols = [re.sub('_','1',re.sub('\|', '0', x)) for x in df.columns]
df.columns = cols
labels = c.get_labels(dataframes, body_site, key_sets, df)
one_hots = {}
n = len(key_sets)
for i, key in zip(range(n), key_sets):
one_hots[key] = one_hot(i, n)
lala = [one_hots[label] for label in labels]
lala = pd.Series(lala, index=df.index)
lala = labels.apply(lambda x: "Ascniar" in x).astype(int)
train_fn = tf.estimator.inputs.pandas_input_fn(x=df.iloc[0:4000], y=lala[0:4000], batch_size = 20, shuffle=True)
test_fn = tf.estimator.inputs.pandas_input_fn(x=df.iloc[4000:], y = lala[4000:], batch_size = 20, shuffle=True)
features = [tf.feature_column.numeric_column(x) for x in df.columns[0:-3]]
nn = tf.estimator.DNNClassifier(
feature_columns=features,
hidden_units = [100, 100],
activation_fn = tf.nn.relu,
n_classes = n,
dropout = .5,
)
nn.train(input_fn=train_fn)
nn.evaluate(input_fn=test_fn)
|
[
"tensorflow.feature_column.numeric_column",
"clusters.get_labels",
"pandas.Series",
"clusters.__load_data",
"tensorflow.estimator.inputs.pandas_input_fn",
"re.sub",
"tensorflow.estimator.DNNClassifier"
] |
[((344, 389), 'clusters.__load_data', 'c.__load_data', (['datasets', 'data_type', 'body_site'], {}), '(datasets, data_type, body_site)\n', (357, 389), True, 'import clusters as c\n'), ((484, 533), 'clusters.get_labels', 'c.get_labels', (['dataframes', 'body_site', 'key_sets', 'df'], {}), '(dataframes, body_site, key_sets, df)\n', (496, 533), True, 'import clusters as c\n'), ((692, 723), 'pandas.Series', 'pd.Series', (['lala'], {'index': 'df.index'}), '(lala, index=df.index)\n', (701, 723), True, 'import pandas as pd\n'), ((793, 896), 'tensorflow.estimator.inputs.pandas_input_fn', 'tf.estimator.inputs.pandas_input_fn', ([], {'x': 'df.iloc[0:4000]', 'y': 'lala[0:4000]', 'batch_size': '(20)', 'shuffle': '(True)'}), '(x=df.iloc[0:4000], y=lala[0:4000],\n batch_size=20, shuffle=True)\n', (828, 896), True, 'import tensorflow as tf\n'), ((905, 1006), 'tensorflow.estimator.inputs.pandas_input_fn', 'tf.estimator.inputs.pandas_input_fn', ([], {'x': 'df.iloc[4000:]', 'y': 'lala[4000:]', 'batch_size': '(20)', 'shuffle': '(True)'}), '(x=df.iloc[4000:], y=lala[4000:],\n batch_size=20, shuffle=True)\n', (940, 1006), True, 'import tensorflow as tf\n'), ((1088, 1222), 'tensorflow.estimator.DNNClassifier', 'tf.estimator.DNNClassifier', ([], {'feature_columns': 'features', 'hidden_units': '[100, 100]', 'activation_fn': 'tf.nn.relu', 'n_classes': 'n', 'dropout': '(0.5)'}), '(feature_columns=features, hidden_units=[100, 100\n ], activation_fn=tf.nn.relu, n_classes=n, dropout=0.5)\n', (1114, 1222), True, 'import tensorflow as tf\n'), ((1020, 1055), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['x'], {}), '(x)\n', (1052, 1055), True, 'import tensorflow as tf\n'), ((413, 434), 're.sub', 're.sub', (['"""\\\\|"""', '"""0"""', 'x'], {}), "('\\\\|', '0', x)\n", (419, 434), False, 'import re\n')]
|
# credit card default dataset: https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients
# kaggle link: https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset
import pandas as pd
from fim import fpgrowth#,fim
import numpy as np
#import math
#from itertools import chain, combinations
import itertools
from numpy.random import random
#from scipy import sparse
from bisect import bisect_left
from random import sample
#from scipy.stats.distributions import poisson, gamma, beta, bernoulli, binom
from time import time
#import scipy
#from sklearn.preprocessing import binarize
import operator
#from collections import Counter, defaultdict
from scipy.sparse import csc_matrix
from sklearn.ensemble import RandomForestClassifier#, AdaBoostClassifier
class hyb(object):
def __init__(self, binary_data,Y,Yb):
"""
:param binary_data: X_train? excludes labels?
:param Y: is this the y labels for the data predicted by interpretable model? no, is correct label
:param Yb: is this the y labels for the data to be predicted by the black box model? no, is all predictions from the black box model
:return: None
"""
self.df = binary_data
self.Y = Y
# no. of training examples
self.N = float(len(Y))
self.Yb = Yb
def set_parameters(self, alpha = 1, beta = 0.1):
"""
initialise weights in objective function
:param alpha: weight of interpretability (no. of rules) in objective function
:param beta: weight of transparency (proportion of rules predicted by interpretable model) in objective function
:return: None
"""
# input al and bl are lists
self.alpha = alpha
self.beta = beta
def generate_rulespace(self,supp,maxlen,N, need_negcode = False,njobs = 5, method = 'fpgrowth',criteria = 'IG',add_rules = []):
"""
generates initial rulespace, from which rules are taken
then screens the rules using self.screen_rules
:param supp: (int, according to arg of fpgrowth)
:param maxlen: (int)
:param N: (int)
:param add_rules: seems to be useless??
"""
print('generating rulespace...')
if method == 'fpgrowth':
if need_negcode:
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
else:
df = 1 - self.df
# [0] needed to get first dimension (others empty)
pindex = np.where(self.Y==1)[0]
nindex = np.where(self.Y!=1)[0]
itemMatrix = [[item for item in df.columns if row[item] ==1] for i,row in df.iterrows() ]
# are the supp arguments for fpgrowth supposed to be different? according to the lower bounds (minsupp) in the paper
prules= fpgrowth([itemMatrix[i] for i in pindex],supp = supp,zmin = 1,zmax = maxlen)
prules = [np.sort(x[0]).tolist() for x in prules]
nrules= fpgrowth([itemMatrix[i] for i in nindex],supp = supp,zmin = 1,zmax = maxlen)
nrules = [np.sort(x[0]).tolist() for x in nrules]
else:
print('Using random forest to generate rules ...')
prules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,self.Y)
for n in range(n_estimators):
prules.extend(extract_rules(clf.estimators_[n],self.df.columns))
prules = [list(x) for x in set(tuple(np.sort(x)) for x in prules)]
nrules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,1-self.Y)
for n in range(n_estimators):
nrules.extend(extract_rules(clf.estimators_[n],self.df.columns))
nrules = [list(x) for x in set(tuple(np.sort(x)) for x in nrules)]
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
print('unpruned prules (' + str(len(prules)) + '):\n' + str(prules))
print()
print('unpruned nrules (' + str(len(nrules)) + '):\n' + str(nrules))
self.prules, self.pRMatrix, self.psupp, self.pprecision, self.perror = self.screen_rules(prules,df,self.Y,N,supp)
self.nrules, self.nRMatrix, self.nsupp, self.nprecision, self.nerror = self.screen_rules(nrules,df,1-self.Y,N,supp)
print('rulespace generated')
# print '\tTook %0.3fs to generate %d rules' % (self.screen_time, len(self.rules))
def screen_rules(self,rules,df,y,N,supp,criteria = 'precision',njobs = 5,add_rules = []):
"""
screens rules??? how????
helper, used by self.generate_rulespace
"""
print ('screening rules')
start_time = time() #removed time. and changed import statement above
itemInd = {}
# create a dictionary of col name : index -- why??
for i,name in enumerate(df.columns):
itemInd[name] = int(i)
len_rules = [len(rule) for rule in rules]
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
# array of indices corresponding to the features in the rules e.g. [r1a r1b r2a r3a]
indices = np.array(list(itertools.chain.from_iterable([[itemInd[x] for x in rule] for rule in rules])))
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
indptr =list(accumulate(len_rules))
indptr.insert(0,0)
indptr = np.array(indptr)
data = np.ones(len(indices))
# standard CSC representation where the row indices for column i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in data[indptr[i]:indptr[i+1]]
# csc_matrix helps expand the compressed representation (data, indices, indptr), which ignores many of the zeros in the expanded matrix
ruleMatrix = csc_matrix((data,indices,indptr),shape = (len(df.columns),len(rules)))
# mat = sparse.csr_matrix.dot(df,ruleMatrix)
mat = np.matrix(df)*ruleMatrix
lenMatrix = np.matrix([len_rules for i in range(df.shape[0])])
Z = (mat ==lenMatrix).astype(int)
Zpos = [Z[i] for i in np.where(y>0)][0]
TP = np.array(np.sum(Zpos,axis=0).tolist()[0])
supp_select = np.where(TP>=supp*sum(y)/100)[0]
# if len(supp_select)<=N:
# rules = [rules[i] for i in supp_select]
# RMatrix = np.array(Z[:,supp_select])
# rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
# supp = np.array(np.sum(Z,axis=0).tolist()[0])[supp_select]
# else:
FP = np.array(np.sum(Z,axis = 0))[0] - TP
# TN = len(y) - np.sum(self.Y) - FP
# FN = np.sum(y) - TP
p1 = TP.astype(float)/(TP+FP)
# p2 = FN.astype(float)/(FN+TN)
# pp = (TP+FP).astype(float)/(TP+FP+TN+FN)
supp_select = np.array([i for i in supp_select if p1[i]>np.mean(y)])
select = np.argsort(p1[supp_select])[::-1][:N].tolist()
ind = list(supp_select[select])
rules = [rules[i] for i in ind]
RMatrix = np.array(Z[:,ind])
rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
supp = np.array(np.sum(Z,axis=0).tolist()[0])[ind]
return rules, RMatrix, supp, p1[ind], FP[ind]
def train(self, Niteration = 5000, print_message=True, interpretability = 'size'):
"""
unused
"""
print('training hybrid...')
self.maps = []
int_flag = int(interpretability =='size')
T0 = 0.01
nprules = len(self.prules)
pnrules = len(self.nrules)
prs_curr = sample(list(range(nprules)),3)
nrs_curr = sample(list(range(pnrules)),3)
obj_curr = 1000000000
obj_min = obj_curr
self.maps.append([-1,obj_curr,prs_curr,nrs_curr,[]])
p = np.sum(self.pRMatrix[:,prs_curr],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs_curr],axis = 1)>0
overlap_curr = np.multiply(p,n)
pcovered_curr = p ^ overlap_curr
ncovered_curr = n ^ overlap_curr
covered_curr = np.logical_xor(p,n)
Yhat_curr,TP,FP,TN,FN = self.compute_obj(pcovered_curr,covered_curr)
print(Yhat_curr,TP,FP,TN,FN)
nfeatures = len(np.unique([con.split('_')[0] for i in prs_curr for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_curr for con in self.nrules[i]]))
obj_curr = ( FN + FP)/self.N +self.alpha*(int_flag *(len(prs_curr) + len(nrs_curr))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_curr)/self.N
self.actions = []
for iter in range(Niteration):
if iter >0.75 * Niteration:
prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_opt[:],nrs_opt[:],pcovered_opt[:],ncovered_opt[:],overlap_opt[:],covered_opt[:], Yhat_opt[:]
prs_new,nrs_new , pcovered_new,ncovered_new,overlap_new,covered_new= self.propose_rs(prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr, obj_min,print_message)
self.covered1 = covered_new[:]
self.Yhat_curr = Yhat_curr
# if sum(covered_new)<len(self.Y):
# # bbmodel.fit(self.df.iloc[~covered_new],self.Y[~covered_new])
# bbmodel.fit(self.df,self.Y)
Yhat_new,TP,FP,TN,FN = self.compute_obj(pcovered_new,covered_new)
self.Yhat_new = Yhat_new
nfeatures = len(np.unique([con.split('_')[0] for i in prs_new for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_new for con in self.nrules[i]]))
obj_new = (FP + FN)/self.N +self.alpha*(int_flag *(len(prs_new) + len(nrs_new))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_new)/self.N
T = T0**(iter/Niteration)
alpha = np.exp(float(-obj_new +obj_curr)/T) # minimize
if obj_new < self.maps[-1][1]:
prs_opt,nrs_opt,obj_opt,pcovered_opt,ncovered_opt,overlap_opt,covered_opt, Yhat_opt = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
accuracy_min = float(TP+TN)/self.N
explainability_min = sum(covered_new)/self.N
covered_min = covered_new
print('\n** max at iter = {} ** \n {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N,nfeatures,perror,nerror,oerror,berror ))
self.maps.append([iter,obj_new,prs_new,nrs_new])
if print_message:
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
if print_message:
print('\niter = {}, alpha = {}, {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(alpha,2),round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N, nfeatures,perror,nerror,oerror,berror ))
print('prs = {}, nrs = {}'.format(prs_new, nrs_new))
if random() <= alpha:
prs_curr,nrs_curr,obj_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
self.prs_min = prs_opt
self.nrs_min = nrs_opt
print('training complete')
return self.maps,accuracy_min,covered_min
def diagnose(self, pcovered, ncovered, overlapped, covered, Yhat):
"""
returns sums of the misclassification errors
helper, used in self.train
what is "~"???? invert/complement function, https://stackoverflow.com/questions/8305199/the-tilde-operator-in-python
integers ~x become (-x) - 1
"""
perror = sum(self.Y[pcovered]!=Yhat[pcovered])
nerror = sum(self.Y[ncovered]!=Yhat[ncovered])
oerror = sum(self.Y[overlapped]!=Yhat[overlapped])
# does it work as expected?
berror = sum(self.Y[~covered]!=Yhat[~covered])
return perror, nerror, oerror, berror
def compute_obj(self,pcovered,covered):
"""
helper, used in self.train
"""
Yhat = np.zeros(int(self.N))
Yhat[pcovered] = 1
Yhat[~covered] = self.Yb[~covered] #self.Y[~covered]#
TP,FP,TN,FN = getConfusion(Yhat,self.Y)
return Yhat,TP,FP,TN,FN
def propose_rs(self, prs,nrs,pcovered,ncovered,overlapped, covered,Yhat, vt,print_message = False):
"""
helper, used in self.train
"""
incorr = np.where(Yhat[covered]!=self.Y[covered])[0]# correct interpretable models
incorrb = np.where(Yhat[~covered]!=self.Y[~covered])[0]
overlapped_ind = np.where(overlapped)[0]
p = np.sum(self.pRMatrix[:,prs],axis = 1)
n = np.sum(self.nRMatrix[:,nrs],axis = 1)
ex = -1
if sum(covered) ==self.N: # covering all examples.
if print_message:
print('===== already covering all examples ===== ')
# print('0')
move = ['cut']
self.actions.append(0)
if len(prs)==0:
sign = [0]
elif len(nrs)==0:
sign = [1]
else:
sign = [int(random()<0.5)]
elif len(incorr) ==0 and (len(incorrb)==0 or len(overlapped) ==self.N) or sum(overlapped) > sum(covered):
if print_message:
print(' ===== 1 ===== ')
self.actions.append(1)
# print('1')
move = ['cut']
sign = [int(random()<0.5)]
# elif (len(incorr) == 0 and (sum(covered)>0)) or len(incorr)/sum(covered) >= len(incorrb)/sum(~covered):
# if print_message:
# print(' ===== 2 ===== ')
# self.actions.append(2)
# ex = sample(list(np.where(~covered)[0]) + list(np.where(overlapped)[0]),1)[0]
# if overlapped[ex] or len(prs) + len(nrs) >= (vt + self.beta)/self.alpha:
# # print('2')
# move = ['cut']
# sign = [int(random()<0.5)]
# else:
# # print('3')
# move = ['expand']
# sign = [int(random()<0.5)]
else:
# if sum(overlapped)/sum(pcovered)>.5 or sum(overlapped)/sum(ncovered)>.5:
# if print_message:
# print(' ===== 3 ===== ')
# # print('4')
# move = ['cut']
# sign = [int(len(prs)>len(nrs))]
# else:
t = random()
if t< 1./3: # try to decrease errors
self.actions.append(3)
if print_message:
print(' ===== decrease error ===== ')
ex = sample(list(incorr) + list(incorrb),1)[0]
if ex in incorr: # incorrectly classified by the interpretable model
rs_indicator = (pcovered[ex]).astype(int) # covered by prules
if random()<0.5:
# print('7')
move = ['cut']
sign = [rs_indicator]
else:
# print('8')
move = ['cut','add']
sign = [rs_indicator,rs_indicator]
# elif overlapped[ex]:
# if random()<0.5 :
# # print('5')
# move = ['cut']
# sign = [1 - self.Y[ex]]
# else:
# # print('6')
# move = ['cut','add']
# sign = [1 - self.Y[ex],1 - self.Y[ex]]
else: # incorrectly classified by the black box model
# print('9')
move = ['add']
sign = [int(self.Y[ex]==1)]
elif t<2./3: # decrease coverage
self.actions.append(4)
if print_message:
print(' ===== decrease size ===== ')
move = ['cut']
sign = [round(random())]
else: # increase coverage
self.actions.append(5)
if print_message:
print(' ===== increase coverage ===== ')
move = ['expand']
sign = [round(random())]
# if random()<0.5:
# move.append('add')
# sign.append(1-rs_indicator)
# else:
# move.extend(['cut','add'])
# sign.extend([1-rs_indicator,1-rs_indicator])
for j in range(len(move)):
if sign[j]==1:
prs = self.action(move[j],sign[j],ex,prs,Yhat,pcovered)
else:
nrs = self.action(move[j],sign[j],ex,nrs,Yhat,ncovered)
p = np.sum(self.pRMatrix[:,prs],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs],axis = 1)>0
o = np.multiply(p,n)
return prs, nrs,p,n^o,o, np.logical_xor(p,n) + o
def action(self,move, rs_indicator, ex, rules,Yhat,covered):
"""
helper, used in self.propose_rs
"""
if rs_indicator==1:
RMatrix = self.pRMatrix
# error = self.perror
supp = self.psupp
else:
RMatrix = self.nRMatrix
# error = self.nerror
supp = self.nsupp
Y = self.Y if rs_indicator else 1- self.Y
if move=='cut' and len(rules)>0:
# print('======= cut =======')
""" cut """
if random()<0.25 and ex >=0:
candidate = list(set(np.where(RMatrix[ex,:]==1)[0]).intersection(rules))
if len(candidate)==0:
candidate = rules
cut_rule = sample(candidate,1)[0]
else:
p = []
all_sum = np.sum(RMatrix[:,rules],axis = 1)
for index,rule in enumerate(rules):
Yhat= ((all_sum - np.array(RMatrix[:,rule]))>0).astype(int)
TP,FP,TN,FN = getConfusion(Yhat,Y)
p.append(TP.astype(float)/(TP+FP+1))
# p.append(log_betabin(TP,TP+FP,self.alpha_1,self.beta_1) + log_betabin(FN,FN+TN,self.alpha_2,self.beta_2))
p = [x - min(p) for x in p]
p = np.exp(p)
p = np.insert(p,0,0)
p = np.array(list(accumulate(p)))
if p[-1]==0:
cut_rule = sample(rules,1)[0]
else:
p = p/p[-1]
index = find_lt(p,random())
cut_rule = rules[index]
rules.remove(cut_rule)
elif move == 'add' and ex>=0:
# print('======= add =======')
""" add """
score_max = -self.N *10000000
if self.Y[ex]*rs_indicator + (1 - self.Y[ex])*(1 - rs_indicator)==1:
# select = list(np.where(RMatrix[ex] & (error +self.alpha*self.N < self.beta * supp))[0]) # fix
select = list(np.where(RMatrix[ex])[0])
else:
# select = list(np.where( ~RMatrix[ex]& (error +self.alpha*self.N < self.beta * supp))[0])
select = list(np.where( ~RMatrix[ex])[0])
self.select = select
if len(select)>0:
if random()<0.25:
add_rule = sample(select,1)[0]
else:
# cover = np.sum(RMatrix[(~covered)&(~covered2), select],axis = 0)
# =============== Use precision as a criteria ===============
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
# mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.sum(mat,axis = 1)
# FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
# TN = np.sum(Y[Yhat_neg_index]==0)-FP
# FN = sum(Y[Yhat_neg_index]) - TP
# p = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select]
# add_rule = select[sample(list(np.where(p==max(p))[0]),1)[0]]
# =============== Use objective function as a criteria ===============
for ind in select:
z = np.logical_or(RMatrix[:,ind],Yhat)
TP,FP,TN,FN = getConfusion(z,self.Y)
score = FP+FN -self.beta * sum(RMatrix[~covered ,ind])
if score > score_max:
score_max = score
add_rule = ind
if add_rule not in rules:
rules.append(add_rule)
else: # expand
# print(['======= expand =======', len(rules)])
# candidates = np.where(error < self.beta * supp-self.alpha*self.N)[0] # fix
candidates = [x for x in range(RMatrix.shape[1])]
if rs_indicator:
select = list(set(candidates).difference(rules))
else:
select = list(set(candidates).difference(rules))
# self.error = error
self.supp = supp
self.select = select
self.candidates = candidates
self.rules = rules
if random()<0.25:
add_rule = sample(select, 1)[0]
else:
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
Yhat_neg_index = np.where(~covered)[0]
mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.array(np.sum(mat,axis = 0).tolist()[0])
TP = np.sum(mat,axis = 1)
FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
TN = np.sum(Y[Yhat_neg_index]==0)-FP
FN = sum(Y[Yhat_neg_index]) - TP
score = (FP + FN)+ self.beta * (TN + FN)
# score = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select] # using precision as the criteria
add_rule = select[sample(list(np.where(score==min(score))[0]),1)[0]]
if add_rule not in rules:
rules.append(add_rule)
return rules
def print_rules(self, rules_max):
"""
unused
"""
for rule_index in rules_max:
print(self.rules[rule_index])
def predict_text(self,df,Y,Yb):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = Yb
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def predict(self, df, Y,Yb ):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
# if isinstance(self.df, scipy.sparse.csc.csc_matrix)==False:
dfn = 1-df #df has negative associations
dfn.columns = [name.strip() + 'neg' for name in df.columns]
df_test = pd.concat([df,dfn],axis = 1)
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = np.array([i for i in Yb])
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def accumulate(iterable, func=operator.add):
"""
helper, used in hyb.action, hyb.screen_rules
"""
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def find_lt(a, x):
"""
hepler, used in hyb.action
"""
""" Find rightmost value less than x"""
i = bisect_left(a, x)
if i:
return int(i-1)
else:
return 0
def getConfusion(Yhat,Y):
"""
helper, used in hyb.computeObject, hyb.action
"""
if len(Yhat)!=len(Y):
raise NameError('Yhat has different length')
TP = np.dot(np.array(Y),np.array(Yhat))
FP = np.sum(Yhat) - TP
TN = len(Y) - np.sum(Y)-FP
FN = len(Yhat) - np.sum(Yhat) - TN
return TP,FP,TN,FN
def extract_rules(tree, feature_names):
"""
helper, used in hyb.generate_rulespace, when using random forest to generate rulespace
"""
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
# get ids of child nodes
idx = np.argwhere(left == -1)[:,0]
def recurse(left, right, child, lineage=None):
if lineage is None:
lineage = []
if child in left:
parent = np.where(left == child)[0].item()
suffix = 'neg'
else:
parent = np.where(right == child)[0].item()
suffix = ''
# lineage.append((parent, split, threshold[parent], features[parent]))
lineage.append((features[parent].strip()+suffix))
if parent == 0:
lineage.reverse()
return lineage
else:
return recurse(left, right, parent, lineage)
rules = []
for child in idx:
rule = []
for node in recurse(left, right, child):
rule.append(node)
rules.append(rule)
return rules
def binary_code(df,collist,Nlevel, length):
"""
preprocessing
converts a column of continuous values to binary format, into Nlevel number of parts
modifies df in place
:param df: dataframe to be modified
:param collist: list of names of columns with continuous values to be modified
:param Nlevel: number of parts to split the column into (will create new N-1 columns, deleting the original)
:return: None
"""
for col in collist:
for q in range(1,Nlevel,1):
threshold = df[col].quantile(float(q)/Nlevel)
df[col+'_geq_'+str(int(q))+'q'] = (df[col] >= threshold).astype(float)
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
df.drop(collist,axis = 1, inplace = True)
# =============================================================================
#
# =============================================================================
import os
# =============================================================================
# preprocessing
# =============================================================================
df = pd.read_excel('default of credit card clients.xls', sheet_name='Data', header=1)
length = len(df)
print('length: ' + str(length))
# drop duplicates
df.drop_duplicates(subset=[x for x in df.columns if x != 'ID'], inplace=True)
print('drop_duplicates')
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
# drop weird values
dict_correct_vals = {'EDUCATION': [1, 2, 3, 4],
'MARRIAGE': [1, 2, 3]}
cols_payment_hist = ['PAY_'+str(x) for x in range(7) if x!= 1]
# removed this part because it somehow drops too many rows (half the samples have 0 in PAY_0, and so is unspecified in the paper)
# =============================================================================
# for col in cols_payment_hist:
# dict_correct_vals[col] = [x for x in range(-1,10,1) if x!=0]
# =============================================================================
for col in dict_correct_vals.keys():
df[col] = df[col].apply(lambda x: x if x in dict_correct_vals[col] else 0)
df = df[df[col] != 0]
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
cols_bill_amt = ['BILL_AMT' + str(x) for x in range(1,7)]
cols_past_payment = ['PAY_AMT'+str(x) for x in range(1,7)]
binary_code(df, ['LIMIT_BAL', 'EDUCATION', 'AGE'] + cols_payment_hist + cols_bill_amt + cols_past_payment, 4, length=length)
# OHE
df = pd.get_dummies(df, columns=['MARRIAGE'])
df['SEX'] = df['SEX'].apply(lambda x: 0 if 2 else x)
# =============================================================================
# black box models
# =============================================================================
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import f1_score, roc_auc_score, average_precision_score, precision_recall_curve
import matplotlib.pyplot as plt
X, y = df.drop(labels=['ID', 'default payment next month'], axis='columns'), df.loc[:,'default payment next month']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
from time import time
from scipy.stats import randint as sp_randint
# =============================================================================
# # build a classifier
# clf = RandomForestClassifier(n_estimators=20)
#
#
# # Utility function to report best scores
# def report(results, n_top=3):
# for i in range(1, n_top + 1):
# candidates = np.flatnonzero(results['rank_test_score'] == i)
# for candidate in candidates:
# print("Model with rank: {0}".format(i))
# print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
# results['mean_test_score'][candidate],
# results['std_test_score'][candidate]))
# print("Parameters: {0}".format(results['params'][candidate]))
# print("")
#
#
# # specify parameters and distributions to sample from
# param_dist = {"n_estimators":sp_randint(10, 1000), # added this argument, removed the n_estimators argument above
# "max_depth": [3, None],
# "max_features": sp_randint(1, 11),
# "min_samples_split": sp_randint(2, 11),
# "bootstrap": [True, False],
# "criterion": ["gini", "entropy"]}
#
# # run randomized search
# n_iter_search = 20
# random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
# n_iter=n_iter_search, cv=10, iid=False)
#
# start = time()
# random_search.fit(X_train, y_train)
# print("RandomizedSearchCV took %.2f seconds for %d candidates"
# " parameter settings." % ((time() - start), n_iter_search))
# report(random_search.cv_results_)
# =============================================================================
clf = RandomForestClassifier(n_estimators=20, random_state=0, **{'bootstrap': True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9, 'min_samples_split': 10})
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print('rf accuracy: ' + str(acc))
yb = clf.predict(X_test)
model = hyb(X_train, y_train, yb)
model.set_parameters(alpha=1, beta=0.1)
model.set_parameters(alpha=0.01, beta=0.95) # added after running
model.generate_rulespace(supp=30, maxlen=10, N=model.N, method='rf')
maps,accuracy_min,covered_min = model.train()
Yhat,covered,Yb = model.predict(X_test, y_test, yb)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_true=y_test, y_pred=Yhat)
coverage = len(covered)/len(Yhat)
print('accuracy: ' + str(accuracy))
print('coverage: ' + str(coverage))
|
[
"numpy.sum",
"random.sample",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"numpy.argsort",
"numpy.mean",
"numpy.exp",
"numpy.multiply",
"fim.fpgrowth",
"numpy.insert",
"numpy.logical_xor",
"pandas.concat",
"sklearn.ensemble.RandomForestClassifier",
"pandas.get_dummies",
"pandas.read_excel",
"numpy.sort",
"numpy.argwhere",
"numpy.matrix",
"bisect.bisect_left",
"time.time",
"numpy.where",
"numpy.array",
"numpy.random.random",
"numpy.logical_or",
"itertools.chain.from_iterable"
] |
[((29338, 29423), 'pandas.read_excel', 'pd.read_excel', (['"""default of credit card clients.xls"""'], {'sheet_name': '"""Data"""', 'header': '(1)'}), "('default of credit card clients.xls', sheet_name='Data', header=1\n )\n", (29351, 29423), True, 'import pandas as pd\n'), ((30790, 30830), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['MARRIAGE']"}), "(df, columns=['MARRIAGE'])\n", (30804, 30830), True, 'import pandas as pd\n'), ((31877, 31942), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=0, stratify=y)\n', (31893, 31942), False, 'from sklearn.model_selection import train_test_split, RandomizedSearchCV\n'), ((33670, 33844), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(20)', 'random_state': '(0)'}), "(n_estimators=20, random_state=0, **{'bootstrap': \n True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9,\n 'min_samples_split': 10})\n", (33692, 33844), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((34317, 34359), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'Yhat'}), '(y_true=y_test, y_pred=Yhat)\n', (34331, 34359), False, 'from sklearn.metrics import accuracy_score\n'), ((26529, 26546), 'bisect.bisect_left', 'bisect_left', (['a', 'x'], {}), '(a, x)\n', (26540, 26546), False, 'from bisect import bisect_left\n'), ((5287, 5293), 'time.time', 'time', ([], {}), '()\n', (5291, 5293), False, 'from time import time\n'), ((5959, 5975), 'numpy.array', 'np.array', (['indptr'], {}), '(indptr)\n', (5967, 5975), True, 'import numpy as np\n'), ((7620, 7639), 'numpy.array', 'np.array', (['Z[:, ind]'], {}), '(Z[:, ind])\n', (7628, 7639), True, 'import numpy as np\n'), ((8520, 8537), 'numpy.multiply', 'np.multiply', (['p', 'n'], {}), '(p, n)\n', (8531, 8537), True, 'import numpy as np\n'), ((8642, 8662), 'numpy.logical_xor', 'np.logical_xor', (['p', 'n'], {}), '(p, n)\n', (8656, 8662), True, 'import numpy as np\n'), ((13867, 13904), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs], axis=1)\n', (13873, 13904), True, 'import numpy as np\n'), ((13917, 13954), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs], axis=1)\n', (13923, 13954), True, 'import numpy as np\n'), ((18099, 18116), 'numpy.multiply', 'np.multiply', (['p', 'n'], {}), '(p, n)\n', (18110, 18116), True, 'import numpy as np\n'), ((25157, 25185), 'pandas.concat', 'pd.concat', (['[df, dfn]'], {'axis': '(1)'}), '([df, dfn], axis=1)\n', (25166, 25185), True, 'import pandas as pd\n'), ((25920, 25945), 'numpy.array', 'np.array', (['[i for i in Yb]'], {}), '([i for i in Yb])\n', (25928, 25945), True, 'import numpy as np\n'), ((26797, 26808), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (26805, 26808), True, 'import numpy as np\n'), ((26809, 26823), 'numpy.array', 'np.array', (['Yhat'], {}), '(Yhat)\n', (26817, 26823), True, 'import numpy as np\n'), ((26834, 26846), 'numpy.sum', 'np.sum', (['Yhat'], {}), '(Yhat)\n', (26840, 26846), True, 'import numpy as np\n'), ((27316, 27339), 'numpy.argwhere', 'np.argwhere', (['(left == -1)'], {}), '(left == -1)\n', (27327, 27339), True, 'import numpy as np\n'), ((2901, 2974), 'fim.fpgrowth', 'fpgrowth', (['[itemMatrix[i] for i in pindex]'], {'supp': 'supp', 'zmin': '(1)', 'zmax': 'maxlen'}), '([itemMatrix[i] for i in pindex], supp=supp, zmin=1, zmax=maxlen)\n', (2909, 2974), False, 'from fim import fpgrowth\n'), ((3060, 3133), 'fim.fpgrowth', 'fpgrowth', (['[itemMatrix[i] for i in nindex]'], {'supp': 'supp', 'zmin': '(1)', 'zmax': 'maxlen'}), '([itemMatrix[i] for i in nindex], supp=supp, zmin=1, zmax=maxlen)\n', (3068, 3133), False, 'from fim import fpgrowth\n'), ((4454, 4486), 'pandas.concat', 'pd.concat', (['[self.df, df]'], {'axis': '(1)'}), '([self.df, df], axis=1)\n', (4463, 4486), True, 'import pandas as pd\n'), ((6507, 6520), 'numpy.matrix', 'np.matrix', (['df'], {}), '(df)\n', (6516, 6520), True, 'import numpy as np\n'), ((8395, 8437), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs_curr]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs_curr], axis=1)\n', (8401, 8437), True, 'import numpy as np\n'), ((8452, 8494), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs_curr]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs_curr], axis=1)\n', (8458, 8494), True, 'import numpy as np\n'), ((13668, 13710), 'numpy.where', 'np.where', (['(Yhat[covered] != self.Y[covered])'], {}), '(Yhat[covered] != self.Y[covered])\n', (13676, 13710), True, 'import numpy as np\n'), ((13760, 13804), 'numpy.where', 'np.where', (['(Yhat[~covered] != self.Y[~covered])'], {}), '(Yhat[~covered] != self.Y[~covered])\n', (13768, 13804), True, 'import numpy as np\n'), ((13831, 13851), 'numpy.where', 'np.where', (['overlapped'], {}), '(overlapped)\n', (13839, 13851), True, 'import numpy as np\n'), ((17995, 18032), 'numpy.sum', 'np.sum', (['self.pRMatrix[:, prs]'], {'axis': '(1)'}), '(self.pRMatrix[:, prs], axis=1)\n', (18001, 18032), True, 'import numpy as np\n'), ((18047, 18084), 'numpy.sum', 'np.sum', (['self.nRMatrix[:, nrs]'], {'axis': '(1)'}), '(self.nRMatrix[:, nrs], axis=1)\n', (18053, 18084), True, 'import numpy as np\n'), ((26870, 26879), 'numpy.sum', 'np.sum', (['Y'], {}), '(Y)\n', (26876, 26879), True, 'import numpy as np\n'), ((26904, 26916), 'numpy.sum', 'np.sum', (['Yhat'], {}), '(Yhat)\n', (26910, 26916), True, 'import numpy as np\n'), ((2415, 2447), 'pandas.concat', 'pd.concat', (['[self.df, df]'], {'axis': '(1)'}), '([self.df, df], axis=1)\n', (2424, 2447), True, 'import pandas as pd\n'), ((2583, 2604), 'numpy.where', 'np.where', (['(self.Y == 1)'], {}), '(self.Y == 1)\n', (2591, 2604), True, 'import numpy as np\n'), ((2627, 2648), 'numpy.where', 'np.where', (['(self.Y != 1)'], {}), '(self.Y != 1)\n', (2635, 2648), True, 'import numpy as np\n'), ((3483, 3550), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'length'}), '(n_estimators=n_estimators, max_depth=length)\n', (3505, 3550), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((4011, 4078), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'length'}), '(n_estimators=n_estimators, max_depth=length)\n', (4033, 4078), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n'), ((5741, 5818), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[[itemInd[x] for x in rule] for rule in rules]'], {}), '([[itemInd[x] for x in rule] for rule in rules])\n', (5770, 5818), False, 'import itertools\n'), ((12143, 12151), 'numpy.random.random', 'random', ([], {}), '()\n', (12149, 12151), False, 'from numpy.random import random\n'), ((15679, 15687), 'numpy.random.random', 'random', ([], {}), '()\n', (15685, 15687), False, 'from numpy.random import random\n'), ((18149, 18169), 'numpy.logical_xor', 'np.logical_xor', (['p', 'n'], {}), '(p, n)\n', (18163, 18169), True, 'import numpy as np\n'), ((19026, 19059), 'numpy.sum', 'np.sum', (['RMatrix[:, rules]'], {'axis': '(1)'}), '(RMatrix[:, rules], axis=1)\n', (19032, 19059), True, 'import numpy as np\n'), ((19497, 19506), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (19503, 19506), True, 'import numpy as np\n'), ((19527, 19545), 'numpy.insert', 'np.insert', (['p', '(0)', '(0)'], {}), '(p, 0, 0)\n', (19536, 19545), True, 'import numpy as np\n'), ((24548, 24559), 'numpy.where', 'np.where', (['p'], {}), '(p)\n', (24556, 24559), True, 'import numpy as np\n'), ((24584, 24595), 'numpy.where', 'np.where', (['n'], {}), '(n)\n', (24592, 24595), True, 'import numpy as np\n'), ((25782, 25793), 'numpy.where', 'np.where', (['p'], {}), '(p)\n', (25790, 25793), True, 'import numpy as np\n'), ((25818, 25829), 'numpy.where', 'np.where', (['n'], {}), '(n)\n', (25826, 25829), True, 'import numpy as np\n'), ((6677, 6692), 'numpy.where', 'np.where', (['(y > 0)'], {}), '(y > 0)\n', (6685, 6692), True, 'import numpy as np\n'), ((7149, 7166), 'numpy.sum', 'np.sum', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (7155, 7166), True, 'import numpy as np\n'), ((18718, 18726), 'numpy.random.random', 'random', ([], {}), '()\n', (18724, 18726), False, 'from numpy.random import random\n'), ((18936, 18956), 'random.sample', 'sample', (['candidate', '(1)'], {}), '(candidate, 1)\n', (18942, 18956), False, 'from random import sample\n'), ((22554, 22562), 'numpy.random.random', 'random', ([], {}), '()\n', (22560, 22562), False, 'from numpy.random import random\n'), ((22971, 22990), 'numpy.sum', 'np.sum', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (22977, 22990), True, 'import numpy as np\n'), ((3000, 3013), 'numpy.sort', 'np.sort', (['x[0]'], {}), '(x[0])\n', (3007, 3013), True, 'import numpy as np\n'), ((3159, 3172), 'numpy.sort', 'np.sort', (['x[0]'], {}), '(x[0])\n', (3166, 3172), True, 'import numpy as np\n'), ((6717, 6737), 'numpy.sum', 'np.sum', (['Zpos'], {'axis': '(0)'}), '(Zpos, axis=0)\n', (6723, 6737), True, 'import numpy as np\n'), ((7445, 7455), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (7452, 7455), True, 'import numpy as np\n'), ((7475, 7502), 'numpy.argsort', 'np.argsort', (['p1[supp_select]'], {}), '(p1[supp_select])\n', (7485, 7502), True, 'import numpy as np\n'), ((19654, 19670), 'random.sample', 'sample', (['rules', '(1)'], {}), '(rules, 1)\n', (19660, 19670), False, 'from random import sample\n'), ((19765, 19773), 'numpy.random.random', 'random', ([], {}), '()\n', (19771, 19773), False, 'from numpy.random import random\n'), ((20515, 20523), 'numpy.random.random', 'random', ([], {}), '()\n', (20521, 20523), False, 'from numpy.random import random\n'), ((22596, 22613), 'random.sample', 'sample', (['select', '(1)'], {}), '(select, 1)\n', (22602, 22613), False, 'from random import sample\n'), ((22752, 22770), 'numpy.where', 'np.where', (['(~covered)'], {}), '(~covered)\n', (22760, 22770), True, 'import numpy as np\n'), ((23111, 23141), 'numpy.sum', 'np.sum', (['(Y[Yhat_neg_index] == 0)'], {}), '(Y[Yhat_neg_index] == 0)\n', (23117, 23141), True, 'import numpy as np\n'), ((24132, 24149), 'numpy.sum', 'np.sum', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (24138, 24149), True, 'import numpy as np\n'), ((24449, 24466), 'numpy.sum', 'np.sum', (['n'], {'axis': '(0)'}), '(n, axis=0)\n', (24455, 24466), True, 'import numpy as np\n'), ((25395, 25412), 'numpy.sum', 'np.sum', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (25401, 25412), True, 'import numpy as np\n'), ((25683, 25700), 'numpy.sum', 'np.sum', (['n'], {'axis': '(0)'}), '(n, axis=0)\n', (25689, 25700), True, 'import numpy as np\n'), ((27497, 27520), 'numpy.where', 'np.where', (['(left == child)'], {}), '(left == child)\n', (27505, 27520), True, 'import numpy as np\n'), ((27593, 27617), 'numpy.where', 'np.where', (['(right == child)'], {}), '(right == child)\n', (27601, 27617), True, 'import numpy as np\n'), ((7751, 7768), 'numpy.sum', 'np.sum', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (7757, 7768), True, 'import numpy as np\n'), ((14684, 14692), 'numpy.random.random', 'random', ([], {}), '()\n', (14690, 14692), False, 'from numpy.random import random\n'), ((16121, 16129), 'numpy.random.random', 'random', ([], {}), '()\n', (16127, 16129), False, 'from numpy.random import random\n'), ((20224, 20245), 'numpy.where', 'np.where', (['RMatrix[ex]'], {}), '(RMatrix[ex])\n', (20232, 20245), True, 'import numpy as np\n'), ((20405, 20427), 'numpy.where', 'np.where', (['(~RMatrix[ex])'], {}), '(~RMatrix[ex])\n', (20413, 20427), True, 'import numpy as np\n'), ((20561, 20578), 'random.sample', 'sample', (['select', '(1)'], {}), '(select, 1)\n', (20567, 20578), False, 'from random import sample\n'), ((21566, 21602), 'numpy.logical_or', 'np.logical_or', (['RMatrix[:, ind]', 'Yhat'], {}), '(RMatrix[:, ind], Yhat)\n', (21579, 21602), True, 'import numpy as np\n'), ((3774, 3784), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (3781, 3784), True, 'import numpy as np\n'), ((4304, 4314), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (4311, 4314), True, 'import numpy as np\n'), ((14373, 14381), 'numpy.random.random', 'random', ([], {}), '()\n', (14379, 14381), False, 'from numpy.random import random\n'), ((17234, 17242), 'numpy.random.random', 'random', ([], {}), '()\n', (17240, 17242), False, 'from numpy.random import random\n'), ((17481, 17489), 'numpy.random.random', 'random', ([], {}), '()\n', (17487, 17489), False, 'from numpy.random import random\n'), ((18781, 18810), 'numpy.where', 'np.where', (['(RMatrix[ex, :] == 1)'], {}), '(RMatrix[ex, :] == 1)\n', (18789, 18810), True, 'import numpy as np\n'), ((19150, 19176), 'numpy.array', 'np.array', (['RMatrix[:, rule]'], {}), '(RMatrix[:, rule])\n', (19158, 19176), True, 'import numpy as np\n')]
|
import os
import enum
import functools
import itertools
import collections
import concurrent.futures as cf
from . import errors
from . import utils
from . import exceptions
from .models import (
ParallelJob,
ParallelArg,
ParallelStatus,
FailedTask,
SequentialMapResult,
NamedMapResult,
)
# __all__ = ["decorate", "arg", "future", "map", "async_map", "par", "async_par"]
__all__ = ["map", "async_map", "par", "async_par"]
__version__ = "0.9.1"
__author__ = "<NAME> <<EMAIL>>"
class ExecutorStrategy(enum.Enum):
THREAD_EXECUTOR = "thread"
PROCESS_EXECUTOR = "process"
THREAD_EXECUTOR = ExecutorStrategy.THREAD_EXECUTOR
PROCESS_EXECUTOR = ExecutorStrategy.PROCESS_EXECUTOR
class BaseParallelExecutor:
def __init__(
self,
jobs,
max_workers=None,
timeout=None,
silent=False,
ResultClass=SequentialMapResult,
):
self.jobs = jobs
self.max_workers = max_workers
self.timeout = timeout
self.silent = silent
self.ResultClass = ResultClass
self.__status = ParallelStatus.NOT_STARTED
self.__executor = None
self.__results = None
def _get_executor_class(self): # pragma: no cover
raise NotImplementedError()
@property
def status(self):
return self.__status
def start(self):
if self.__status == ParallelStatus.STARTED:
raise exceptions.ParallelStatusException(errors.STATUS_EXECUTOR_RUNNING)
self.__status = ParallelStatus.STARTED
ExecutorClass = self._get_executor_class()
self.__executor = ExecutorClass(max_workers=self.max_workers)
for job in self.jobs:
future = self.__executor.submit(job.fn, *job.args, **(job.kwargs))
# TODO: Check status for ParallelJob
job.future = future
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.shutdown()
def results(self, timeout=None):
if self.__results:
return self.__results
if self.__status == ParallelStatus.NOT_STARTED:
raise exceptions.ParallelStatusException(errors.STATUS_EXECUTOR_NOT_STARTED)
ResultClass = self.ResultClass
self.__results = ResultClass()
for job in self.jobs:
try:
result = job.future.result(timeout=(timeout or self.timeout))
except cf.TimeoutError as e:
raise exceptions.TimeoutException() from e
except Exception as exc:
if not self.silent:
self.__status = ParallelStatus.FAILED
raise exc
self.__results.new_result(job.name, FailedTask(job, exc))
else:
self.__results.new_result(job.name, result)
self.__status = ParallelStatus.DONE
return self.__results
def shutdown(self):
self.__executor.shutdown()
class ThreadExecutor(BaseParallelExecutor):
def _get_executor_class(self):
return cf.ThreadPoolExecutor
class ProcessExecutor(BaseParallelExecutor):
def _get_executor_class(self):
return cf.ProcessPoolExecutor
EXECUTOR_MAPPING = {
ExecutorStrategy.THREAD_EXECUTOR: ThreadExecutor,
ExecutorStrategy.PROCESS_EXECUTOR: ProcessExecutor,
}
class ParallelHelper:
def __init__(self, executor=ExecutorStrategy.THREAD_EXECUTOR):
if isinstance(executor, ExecutorStrategy):
executor = EXECUTOR_MAPPING[executor]
self.ExecutorClass = executor
def get_result_class(self, params):
# return SequentialMapResult
if isinstance(params, collections.abc.Sequence):
return SequentialMapResult
return NamedMapResult
def map(
self,
fn,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_for_callable_from_params(
fn, params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
with self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
) as ex:
return ex.results()
def async_map(
self,
fn,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_for_callable_from_params(
fn, params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
ex = self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
)
return ex
def par(
self,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_jobs_from_params(
params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
with self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
) as ex:
return ex.results()
def split(
self,
collection,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
workers=None,
timeout=None,
extras=None,
):
workers = workers or min(32, (os.cpu_count() or 1) + 4)
chunks = utils.split_collection(collection, workers)
jobs = [
ParallelJob(fn, None, [chunk], (extras or {}).copy())
for chunk in chunks
]
with self.ExecutorClass(
jobs,
max_workers=workers,
timeout=timeout,
ResultClass=SequentialMapResult,
) as ex:
results = ex.results()
return list(itertools.chain.from_iterable(results))
#[item for sublist in l for item in sublist]
def map(
fn,
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).map(
fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def async_map(
fn,
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).async_map(
fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def par(
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).par(
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def split(
collection,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
workers=None,
timeout=None,
extras=None,
):
return ParallelHelper(executor).split(
collection, fn, extras=extras, workers=workers, timeout=timeout
)
class ParallelCallable:
def __init__(
self, fn, executor, timeout, max_workers,
):
self.fn = fn
self.executor = executor
self.timeout = timeout
self.max_workers = max_workers
def map(
self,
params,
executor=None,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
executor = executor or self.executor
return ParallelHelper(executor).map(
self.fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=(max_workers or self.max_workers),
timeout=(timeout or self.timeout),
silent=silent,
)
def async_map(
self,
params,
executor=None,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
executor = executor or self.executor
return ParallelHelper(executor).async_map(
self.fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=(max_workers or self.max_workers),
timeout=(timeout or self.timeout),
silent=silent,
)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
class ParallelDecorator:
def __init__(
self,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
timeout=None,
max_workers=None,
):
self.fn = fn
self.thread = ParallelCallable(
fn,
ExecutorStrategy.THREAD_EXECUTOR,
timeout=timeout,
max_workers=max_workers,
)
self.process = ParallelCallable(
fn,
ExecutorStrategy.PROCESS_EXECUTOR,
timeout=timeout,
max_workers=max_workers,
)
if executor == ExecutorStrategy.THREAD_EXECUTOR:
self.default_executor = self.thread
else:
self.default_executor = self.process
map = lambda self, *args, **kwargs: self.default_executor.map(*args, **kwargs)
async_map = lambda self, *args, **kwargs: self.default_executor.async_map(
*args, **kwargs
)
def future(self, *args, **kwargs):
return job(self.fn, *args, **kwargs)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def decorate(*args, **kwargs):
if len(args) == 1 and callable(args[0]):
# Invoked without parameters
obj = ParallelDecorator(args[0])
return obj
else:
def wrapper(fn):
obj = ParallelDecorator(fn, *args, **kwargs)
return obj
return wrapper
thread = ParallelHelper(ThreadExecutor)
process = ParallelHelper(ProcessExecutor)
arg = lambda *args, **kwargs: ParallelArg(*args, **kwargs)
arg.__doc__ = "TODO"
def job(*args, **kwargs):
"TODO"
fn, *args = args
return ParallelJob(fn, None, args, kwargs)
NOT_STARTED = ParallelStatus.NOT_STARTED
STARTED = ParallelStatus.STARTED
DONE = ParallelStatus.DONE
FAILED = ParallelStatus.FAILED
|
[
"os.cpu_count",
"itertools.chain.from_iterable"
] |
[((6253, 6291), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['results'], {}), '(results)\n', (6282, 6291), False, 'import itertools\n'), ((5805, 5819), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (5817, 5819), False, 'import os\n')]
|
"""
twominutejournal.journal
~~~~~~~~~~~~~~~~~~~~~~~~
A daily gratitude journal library.
"""
import uuid
import datetime
from .errors import EntryAlreadyExistsError
def create_prompt(question: str, responses: int) -> dict:
'''Create a new journal prompt.'''
if not isinstance(question, str):
raise TypeError('question must be of type str')
if not isinstance(responses, int):
raise TypeError('responses must be of type int')
return {
'question': question,
'responses': responses
}
def save_prompt(prompt: dict, storage_adapter: object):
'''Save a journal prompt.'''
if not isinstance(prompt, dict):
raise TypeError('prompt must be of type dict')
storage_adapter.store_prompt(prompt)
def get_todays_prompts(storage_adapter: object) -> list:
'''Get today's journal prompts.'''
today = datetime.datetime.today()
last_entry = storage_adapter.get_last_entry()
# compare latest entry date to today's date
if last_entry.get('created').date() == today.date():
raise EntryAlreadyExistsError(
'An entry has already been written today')
prompts = storage_adapter.get_prompts()
return prompts
def create_entry() -> dict:
'''Create a new journal entry.'''
return {
'id': str(uuid.uuid4()),
'created': datetime.datetime.today()
}
def view_all_entries(storage_adapter: object) -> list:
'''View all journal entries.'''
return storage_adapter.get_all_entries()
def create_response(prompt: str, body: str) -> dict:
'''Create a new journal response.'''
if not isinstance(prompt, str):
raise TypeError('prompt must be of type str.')
if not isinstance(body, str):
raise TypeError('body must be of type str.')
return {
'id': str(uuid.uuid4()),
'prompt': prompt,
'body': body
}
def submit_responses(entry: dict, responses: list, storage_adapter: object):
'''Submit an entry and list of responses.'''
if not isinstance(entry, dict):
raise TypeError('entry must be of type dict')
if not isinstance(responses, list):
raise TypeError('responses must be of type list')
storage_adapter.store_entry(entry)
for response in responses:
storage_adapter.store_response(response, entry['id'])
def view_entry_responses(entry_id: str, storage_adapter: object) -> list:
'''View the responses for a journal entry.'''
if not isinstance(entry_id, str):
raise TypeError('entry_id must be of type str')
responses = storage_adapter.get_entry_responses(entry_id)
return responses
|
[
"uuid.uuid4",
"datetime.datetime.today"
] |
[((885, 910), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (908, 910), False, 'import datetime\n'), ((1359, 1384), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1382, 1384), False, 'import datetime\n'), ((1325, 1337), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1335, 1337), False, 'import uuid\n'), ((1836, 1848), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1846, 1848), False, 'import uuid\n')]
|
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from djangobmf.workflows import Workflow, State, Transition
from djangobmf.settings import CONTRIB_TIMESHEET
from djangobmf.utils.model_from_name import model_from_name
def cancel_condition(object, user):
if getattr(object, 'referee_id', False) and object.referee_id != user.pk:
return False
return True
class GoalWorkflow(Workflow):
class States:
open = State(_(u"Open"), default=True, delete=False)
completed = State(_(u"Completed"), update=False, delete=True)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
complete = Transition(_("Complete this Goal"), "open", "completed")
cancel = Transition(_("Cancel this Goal"), "open", "cancelled", condition=cancel_condition)
reopen = Transition(_("Reopen this Goal"), ["completed", "cancelled"], "open")
def complete(self):
if self.instance.task_set.filter(completed=False).count() > 0: # TODO untested
raise ValidationError(_('You can not complete a goal which has open tasks'))
self.instance.completed = True
def cancel(self):
# TODO autoclose all tasks - needs testing / exception catching
# for obj in self.instance.task_set.filter(completed=False):
# obj.bmfworkflow_transition('cancel', self.user)
if self.instance.task_set.filter(completed=False).count() > 0: # TODO untested
raise ValidationError(_('You can not complete a goal which has open tasks'))
self.instance.completed = True
def reopen(self):
self.instance.completed = False
def start_condition(object, user):
if getattr(object, 'employee_id', False) and object.employee_id != user.pk: # TODO: untested
return False
return True
def finish_condition(object, user):
if object.goal and object.goal.referee_id and user.pk != object.goal.referee_id: # TODO: untested
return False
return True
class TaskWorkflow(Workflow):
class States:
new = State(_(u"New"), True, delete=False)
open = State(_(u"Open"), delete=False)
hold = State(_(u"Hold"), delete=False)
todo = State(_(u"Todo"), delete=False)
started = State(_(u"Started"), delete=False)
review = State(_(u"Review"), delete=False, update=False)
finished = State(_(u"Finished"), update=False, delete=True)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
start = Transition(
_("Work on this task"),
["new", "hold", "open", "todo"],
"started",
condition=start_condition,
)
todo = Transition(
_("Mark as todo"),
["new", "open", "started", "hold"],
"todo",
)
hold = Transition(
_("Set this task on hold"),
["new", "open", "started", "todo"],
"hold",
)
stop = Transition(
_("Stop working on this task"),
"started",
"open",
)
finish = Transition(
_("Finish this task"),
["started", "open", "hold", "new", "review", "todo"],
"finished",
condition=finish_condition,
)
review = Transition(
_("Set to review"),
["started", "open", "hold", "new", "todo"],
"review",
)
reopen = Transition(
_("Reopen this task"),
['finished', 'cancelled'],
'open',
)
unreview = Transition(
_("Reopen this task"),
['review'],
'open',
)
cancel = Transition(
_("Cancel this task"),
('new', 'hold', 'open', 'review'),
'cancelled',
condition=finish_condition,
)
def start(self):
self.instance.in_charge = self.user.djangobmf_employee
self.instance.employee = self.user.djangobmf_employee
if self.instance.project:
project = self.instance.project
elif self.instance.goal:
project = self.instance.goal.project
else:
project = None
timesheet = model_from_name(CONTRIB_TIMESHEET)
if timesheet is not None:
obj = timesheet(
task=self.instance,
employee=self.user.djangobmf_employee,
auto=True,
project=project,
summary=self.instance.summary
)
obj.save()
def todo(self):
self.instance.in_charge = self.user.djangobmf_employee
self.instance.employee = self.user.djangobmf_employee
self.stop()
def stop(self):
if not self.instance.in_charge and self.instance.employee:
self.instance.in_charge = self.instance.employee
timesheet = model_from_name(CONTRIB_TIMESHEET)
if timesheet is not None:
for obj in timesheet.objects.filter(
task=self.instance,
employee__in=[self.instance.in_charge, self.user.djangobmf_employee],
end=None,
auto=True,
):
obj.bmfworkflow_transition('finish', self.user)
def hold(self):
self.stop()
def unreview(self):
self.instance.employee = self.instance.in_charge
def reopen(self):
self.instance.employee = self.instance.in_charge
self.instance.completed = False
def review(self):
if not self.instance.in_charge and self.instance.employee:
self.instance.in_charge = self.instance.employee
if self.instance.goal:
self.instance.employee = self.instance.goal.referee
self.stop()
def finish(self):
self.stop()
self.instance.due_date = None
self.instance.completed = True
def cancel(self):
self.finish()
|
[
"django.utils.translation.ugettext_lazy",
"djangobmf.utils.model_from_name.model_from_name"
] |
[((4424, 4458), 'djangobmf.utils.model_from_name.model_from_name', 'model_from_name', (['CONTRIB_TIMESHEET'], {}), '(CONTRIB_TIMESHEET)\n', (4439, 4458), False, 'from djangobmf.utils.model_from_name import model_from_name\n'), ((5092, 5126), 'djangobmf.utils.model_from_name.model_from_name', 'model_from_name', (['CONTRIB_TIMESHEET'], {}), '(CONTRIB_TIMESHEET)\n', (5107, 5126), False, 'from djangobmf.utils.model_from_name import model_from_name\n'), ((590, 600), 'django.utils.translation.ugettext_lazy', '_', (['u"""Open"""'], {}), "(u'Open')\n", (591, 600), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((656, 671), 'django.utils.translation.ugettext_lazy', '_', (['u"""Completed"""'], {}), "(u'Completed')\n", (657, 671), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((726, 741), 'django.utils.translation.ugettext_lazy', '_', (['u"""Cancelled"""'], {}), "(u'Cancelled')\n", (727, 741), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((824, 847), 'django.utils.translation.ugettext_lazy', '_', (['"""Complete this Goal"""'], {}), "('Complete this Goal')\n", (825, 847), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((898, 919), 'django.utils.translation.ugettext_lazy', '_', (['"""Cancel this Goal"""'], {}), "('Cancel this Goal')\n", (899, 919), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((998, 1019), 'django.utils.translation.ugettext_lazy', '_', (['"""Reopen this Goal"""'], {}), "('Reopen this Goal')\n", (999, 1019), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2224, 2233), 'django.utils.translation.ugettext_lazy', '_', (['u"""New"""'], {}), "(u'New')\n", (2225, 2233), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2276, 2286), 'django.utils.translation.ugettext_lazy', '_', (['u"""Open"""'], {}), "(u'Open')\n", (2277, 2286), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2323, 2333), 'django.utils.translation.ugettext_lazy', '_', (['u"""Hold"""'], {}), "(u'Hold')\n", (2324, 2333), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2370, 2380), 'django.utils.translation.ugettext_lazy', '_', (['u"""Todo"""'], {}), "(u'Todo')\n", (2371, 2380), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2420, 2433), 'django.utils.translation.ugettext_lazy', '_', (['u"""Started"""'], {}), "(u'Started')\n", (2421, 2433), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2472, 2484), 'django.utils.translation.ugettext_lazy', '_', (['u"""Review"""'], {}), "(u'Review')\n", (2473, 2484), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2539, 2553), 'django.utils.translation.ugettext_lazy', '_', (['u"""Finished"""'], {}), "(u'Finished')\n", (2540, 2553), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2608, 2623), 'django.utils.translation.ugettext_lazy', '_', (['u"""Cancelled"""'], {}), "(u'Cancelled')\n", (2609, 2623), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2716, 2738), 'django.utils.translation.ugettext_lazy', '_', (['"""Work on this task"""'], {}), "('Work on this task')\n", (2717, 2738), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2896, 2913), 'django.utils.translation.ugettext_lazy', '_', (['"""Mark as todo"""'], {}), "('Mark as todo')\n", (2897, 2913), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3032, 3058), 'django.utils.translation.ugettext_lazy', '_', (['"""Set this task on hold"""'], {}), "('Set this task on hold')\n", (3033, 3058), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3177, 3207), 'django.utils.translation.ugettext_lazy', '_', (['"""Stop working on this task"""'], {}), "('Stop working on this task')\n", (3178, 3207), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3303, 3324), 'django.utils.translation.ugettext_lazy', '_', (['"""Finish this task"""'], {}), "('Finish this task')\n", (3304, 3324), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3507, 3525), 'django.utils.translation.ugettext_lazy', '_', (['"""Set to review"""'], {}), "('Set to review')\n", (3508, 3525), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3656, 3677), 'django.utils.translation.ugettext_lazy', '_', (['"""Reopen this task"""'], {}), "('Reopen this task')\n", (3657, 3677), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3791, 3812), 'django.utils.translation.ugettext_lazy', '_', (['"""Reopen this task"""'], {}), "('Reopen this task')\n", (3792, 3812), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3909, 3930), 'django.utils.translation.ugettext_lazy', '_', (['"""Cancel this task"""'], {}), "('Cancel this task')\n", (3910, 3930), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1204, 1257), 'django.utils.translation.ugettext_lazy', '_', (['"""You can not complete a goal which has open tasks"""'], {}), "('You can not complete a goal which has open tasks')\n", (1205, 1257), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1646, 1699), 'django.utils.translation.ugettext_lazy', '_', (['"""You can not complete a goal which has open tasks"""'], {}), "('You can not complete a goal which has open tasks')\n", (1647, 1699), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
# -*- coding: utf-8 -*-
import numpy
import warnings
import operator
import collections
from sagar.crystal.structure import Cell
from sagar.element.base import get_symbol
def read_vasp(filename='POSCAR'):
"""
Import POSCAR/CONTCAR or filename with .vasp suffix
parameter:
filename: string, the filename
return: Cell object.
"""
# TODO: read velocities, now not supported. or not needed?
with open(filename, "r") as f:
# _read_string return Cell object.
return _read_string(f.read())
def _read_string(data):
"""
_read_string make io easy to be tested.
parameter: string of vasp input
return: Cell object
"""
lines = [l for l in data.split('\n') if l.rstrip()]
name = lines[0]
lattice_scale = float(lines[1].split()[0])
# lattice vectors
lattice = []
for i in [2, 3, 4]:
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
lattice.append(vec)
lattice = numpy.array(lattice)
if lattice_scale < 0:
# In vasp , a negative scale factor is treated as a volume.
# http://pymatgen.org/_modules/pymatgen/io/vasp/inputs.html#POSCAR
vol = abs(numpy.linalg.det(lattice))
lattice *= (-lattice_scale / vol) ** (1 / 3)
else:
lattice *= lattice_scale
# atoms
vasp5 = False
_fifth_line = lines[5].split()
# VASP 5.x use the fifth line to represent atomic symbols
try:
for i in _fifth_line:
int(i)
numofatoms = _fifth_line
except ValueError:
vasp5 = True
atomtypes = _fifth_line
numofatoms = lines[6].split() # list of string here
if not vasp5:
warnings.warn("symbols of elements in fifth line are missing, "
"all atoms are init to NaN_i (i=0,1,2...)", UserWarning, stacklevel=2)
atomtypes = [str("NaN_{:}".format(i)) for i in range(len(numofatoms))]
atoms = []
for i, num in enumerate(numofatoms):
# https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
numofatoms[i] = int(num)
[atoms.append(atomtypes[i]) for na in range(numofatoms[i])]
if not vasp5:
line_coortype = 6
else:
line_coortype = 7
# TODO: Supporting Cartesian coordinates vasp input
coortype = lines[line_coortype].split()[0]
if coortype[0] in "sS":
warnings.warn("Sorry! Selective dynamics "
"are not supported now", FutureWarning, stacklevel=2)
line_coortype += 1
coortype = lines[line_coortype].split()[0]
if coortype[0] in "cCkK":
line_first_pos = line_coortype + 1
iscart=True
else:
iscart =False
if coortype[0] in "dD":
line_first_pos = line_coortype + 1
positions = []
total_atoms = sum(numofatoms)
for i in range(line_first_pos, line_first_pos + total_atoms):
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
positions.append(vec)
if iscart:
positions = numpy.dot(numpy.array(positions),numpy.linalg.inv(lattice))
return Cell(lattice, positions, atoms)
def write_vasp(cell, filename='POSCAR', suffix='.vasp', long_format=True):
"""
write vasp POSCAR type into file, vasp5 format only.
always write atoms sorted POSCAR.
parameters:
cell: Cell object, the Cell that you wanna write into vasp POSCAR.
filename: string, filename of output file, default='POSCAR'
suffix: string, suffix of filename, default='.vasp'
long_format: bool, if True format %.16f will be write, else %.6f
ref: https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
if optional parameters (filename and suffix) are not set,
the filename will be 'POSCAR.vasp'
"""
# TODO: write Cartesian coor POSCAR
filname_suffix = ''.join([filename, suffix])
with open(filname_suffix, "w") as f:
f.write(_write_string(cell, long_format))
def _write_string(cell, long_format, print_vacc=False):
"""
_write_string make io easy to be tested.
return: string represtent POSCAR
"""
# 对原子种类合并排序,用以产生体系名称和原子顺序数目和正确的坐标排序
# sorted is a list of tuple(atom, na)
atoms_dict = collections.Counter(cell.atoms)
if not print_vacc:
del atoms_dict[0]
sorted_symbols = sorted(atoms_dict.items(), key=operator.itemgetter(0))
list_symbols = ["{:}{:}".format(get_symbol(atom), na)
for atom, na in sorted_symbols]
comment = ' '.join(list_symbols)
comment += '\n'
scale = '{:9.6f}'.format(1.0)
scale += '\n'
lattice_string = ""
if long_format:
latt_form = '21.16f'
else:
latt_form = '11.6f'
for vec in cell.lattice:
lattice_string += ' '
for v in vec:
lattice_string += '{:{form}}'.format(v, form=latt_form)
lattice_string += '\n'
# atom types and their numbers
atom_types = ' '.join([get_symbol(i[0]) for i in sorted_symbols])
atom_types += '\n'
atom_numbers = ' '.join([str(i[1]) for i in sorted_symbols])
atom_numbers += '\n'
# TODO: write Cartesian coor
coor_type = 'Direct\n'
# argsort atoms and resort coor
idx = numpy.argsort(cell.atoms)
coord = cell.positions[idx]
atoms = cell.atoms[idx]
positions_string = ""
if long_format:
pos_form = '19.16f'
else:
pos_form = '9.6f'
for i, vec in enumerate(coord):
if atoms[i] == 0:
continue
positions_string += ' '
for v in vec:
positions_string += '{:{form}}'.format(v, form=pos_form)
positions_string += ' ' + get_symbol(atoms[i])
positions_string += '\n'
poscar_string = ''.join([comment,
scale,
lattice_string,
atom_types,
atom_numbers,
coor_type,
positions_string])
return poscar_string
|
[
"sagar.crystal.structure.Cell",
"numpy.argsort",
"sagar.element.base.get_symbol",
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.inv",
"collections.Counter",
"operator.itemgetter",
"warnings.warn"
] |
[((996, 1016), 'numpy.array', 'numpy.array', (['lattice'], {}), '(lattice)\n', (1007, 1016), False, 'import numpy\n'), ((3122, 3153), 'sagar.crystal.structure.Cell', 'Cell', (['lattice', 'positions', 'atoms'], {}), '(lattice, positions, atoms)\n', (3126, 3153), False, 'from sagar.crystal.structure import Cell\n'), ((4222, 4253), 'collections.Counter', 'collections.Counter', (['cell.atoms'], {}), '(cell.atoms)\n', (4241, 4253), False, 'import collections\n'), ((5222, 5247), 'numpy.argsort', 'numpy.argsort', (['cell.atoms'], {}), '(cell.atoms)\n', (5235, 5247), False, 'import numpy\n'), ((1711, 1852), 'warnings.warn', 'warnings.warn', (['"""symbols of elements in fifth line are missing, all atoms are init to NaN_i (i=0,1,2...)"""', 'UserWarning'], {'stacklevel': '(2)'}), "(\n 'symbols of elements in fifth line are missing, all atoms are init to NaN_i (i=0,1,2...)'\n , UserWarning, stacklevel=2)\n", (1724, 1852), False, 'import warnings\n'), ((2390, 2487), 'warnings.warn', 'warnings.warn', (['"""Sorry! Selective dynamics are not supported now"""', 'FutureWarning'], {'stacklevel': '(2)'}), "('Sorry! Selective dynamics are not supported now',\n FutureWarning, stacklevel=2)\n", (2403, 2487), False, 'import warnings\n'), ((1205, 1230), 'numpy.linalg.det', 'numpy.linalg.det', (['lattice'], {}), '(lattice)\n', (1221, 1230), False, 'import numpy\n'), ((3061, 3083), 'numpy.array', 'numpy.array', (['positions'], {}), '(positions)\n', (3072, 3083), False, 'import numpy\n'), ((3084, 3109), 'numpy.linalg.inv', 'numpy.linalg.inv', (['lattice'], {}), '(lattice)\n', (3100, 3109), False, 'import numpy\n'), ((4355, 4377), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (4374, 4377), False, 'import operator\n'), ((4416, 4432), 'sagar.element.base.get_symbol', 'get_symbol', (['atom'], {}), '(atom)\n', (4426, 4432), False, 'from sagar.element.base import get_symbol\n'), ((4957, 4973), 'sagar.element.base.get_symbol', 'get_symbol', (['i[0]'], {}), '(i[0])\n', (4967, 4973), False, 'from sagar.element.base import get_symbol\n'), ((5659, 5679), 'sagar.element.base.get_symbol', 'get_symbol', (['atoms[i]'], {}), '(atoms[i])\n', (5669, 5679), False, 'from sagar.element.base import get_symbol\n')]
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 <NAME>, <NAME>, <NAME>, <NAME>
import sys
import os
from models.de import DE
sys.path.append(os.path.join(os.path.dirname(__file__), '../common/'))
from common.pg_search import PGsearch
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from config import Config
class DeWebServiceWrapper:
def __init__(self, args, ps, cacheW, staticDir):
def makeWS(assembly):
return DeWebService(args, ps, cacheW[assembly], staticDir, assembly)
self.assemblies = Config.assemblies
self.wss = {a: makeWS(a) for a in self.assemblies}
def process(self, j, args, kwargs):
if "assembly" not in j:
raise Exception("assembly not defined")
if j["assembly"] not in self.assemblies:
raise Exception("invalid assembly")
return self.wss[j["assembly"]].process(j, args, kwargs)
class DeWebService(object):
def __init__(self, args, ps, cache, staticDir, assembly):
self.args = args
self.ps = ps
self.cache = cache
self.staticDir = staticDir
self.assembly = assembly
self.actions = {"search": self.search}
def process(self, j, args, kwargs):
action = args[0]
try:
return self.actions[action](j, args[1:])
except:
raise
def search(self, j, args):
gene = j["gene"] # TODO: check for valid gene
ct1 = j["ct1"]
ct2 = j["ct2"]
if not ct1 or not ct2:
raise Exception("ct1 and/or ct2 empty!")
try:
de = DE(self.cache, self.ps, self.assembly, gene, ct1, ct2)
nearbyDEs = de.nearbyDEs()
diffCREs = {"data": None}
if nearbyDEs["data"]:
diffCREs = de.diffCREs(nearbyDEs["xdomain"])
return {gene: {"xdomain": nearbyDEs["xdomain"],
"coord": de.coord().toDict(),
"diffCREs": diffCREs,
"nearbyDEs": nearbyDEs},
"assembly": self.assembly,
"gene": gene,
"ct1": ct1,
"ct2": ct2}
except:
raise
return {}
|
[
"os.path.dirname",
"models.de.DE"
] |
[((167, 192), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (182, 192), False, 'import os\n'), ((277, 302), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((1619, 1673), 'models.de.DE', 'DE', (['self.cache', 'self.ps', 'self.assembly', 'gene', 'ct1', 'ct2'], {}), '(self.cache, self.ps, self.assembly, gene, ct1, ct2)\n', (1621, 1673), False, 'from models.de import DE\n')]
|
import discord
from discord.ext import commands
import league
import matplotlib.pyplot as plt
import seaborn as sns
import os
client = commands.Bot(command_prefix='?')
token = os.environ.get('DISCORD_TOKEN')
@client.event
async def on_ready():
print('Bot is ready.')
@client.command()
async def rank(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
tier, rank, lp = summoner.get_rank()
await ctx.send(f'{summoner.name} is {tier} {rank}, {lp} LP')
@client.command()
async def stats(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
avg_kills, avg_deaths, avg_assists, avg_gold = summoner.get_stats()
await ctx.send(f'Average Kills: {avg_kills}\nAverage Deaths: {avg_deaths}\nAverage Assists: {avg_assists}\nGold Average: {avg_gold}')
@client.command()
async def champs(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
wins, champions = summoner.get_champs()
sns.set()
graph = sns.countplot(y=champions,hue=wins)
plt.legend(('Loss','Win'))
graph.figure.savefig('graph.png',bbox_inches='tight')
await ctx.send(file=discord.File('graph.png'))
os.remove('graph.png')
client.run(token)
|
[
"os.remove",
"discord.File",
"matplotlib.pyplot.legend",
"league.Summoner",
"os.environ.get",
"seaborn.countplot",
"discord.ext.commands.Bot",
"seaborn.set"
] |
[((136, 168), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""?"""'}), "(command_prefix='?')\n", (148, 168), False, 'from discord.ext import commands\n'), ((177, 208), 'os.environ.get', 'os.environ.get', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (191, 208), False, 'import os\n'), ((349, 390), 'league.Summoner', 'league.Summoner', ([], {'name': 'name', 'region': 'region'}), '(name=name, region=region)\n', (364, 390), False, 'import league\n'), ((571, 612), 'league.Summoner', 'league.Summoner', ([], {'name': 'name', 'region': 'region'}), '(name=name, region=region)\n', (586, 612), False, 'import league\n'), ((899, 940), 'league.Summoner', 'league.Summoner', ([], {'name': 'name', 'region': 'region'}), '(name=name, region=region)\n', (914, 940), False, 'import league\n'), ((988, 997), 'seaborn.set', 'sns.set', ([], {}), '()\n', (995, 997), True, 'import seaborn as sns\n'), ((1010, 1046), 'seaborn.countplot', 'sns.countplot', ([], {'y': 'champions', 'hue': 'wins'}), '(y=champions, hue=wins)\n', (1023, 1046), True, 'import seaborn as sns\n'), ((1050, 1077), 'matplotlib.pyplot.legend', 'plt.legend', (["('Loss', 'Win')"], {}), "(('Loss', 'Win'))\n", (1060, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1190, 1212), 'os.remove', 'os.remove', (['"""graph.png"""'], {}), "('graph.png')\n", (1199, 1212), False, 'import os\n'), ((1159, 1184), 'discord.File', 'discord.File', (['"""graph.png"""'], {}), "('graph.png')\n", (1171, 1184), False, 'import discord\n')]
|
import logging
import dill
from sklearn.metrics import calinski_harabasz_score
from topicnet.cooking_machine import Dataset
from topicnet.cooking_machine.models import (
BaseScore as BaseTopicNetScore,
TopicModel
)
from .base_custom_score import BaseCustomScore
_Logger = logging.getLogger()
class CalinskiHarabaszScore(BaseCustomScore):
"""
Uses of Calinski-Harabasz:
https://link.springer.com/article/10.1007/s40815-017-0327-9
"""
def __init__(
self,
name: str,
validation_dataset: Dataset
):
super().__init__(name)
self._score = _CalinskiHarabaszScore(validation_dataset)
class _CalinskiHarabaszScore(BaseTopicNetScore):
def __init__(self, validation_dataset):
super().__init__()
self._dataset = validation_dataset
self._keep_dataset_in_memory = validation_dataset._small_data
self._dataset_internals_folder_path = validation_dataset._internals_folder_path
self._dataset_file_path = validation_dataset._data_path
def call(self, model: TopicModel):
theta = model.get_theta(dataset=self._dataset)
theta.columns = range(len(theta.columns))
objects_clusters = theta.values.argmax(axis=0)
# TODO: or return some numeric?
if len(set(objects_clusters)) == 1:
_Logger.warning(
'Only one unique cluster! Returning None as score value'
)
return float('nan')
return calinski_harabasz_score(theta.T.values, objects_clusters)
# TODO: this piece is copy-pastd among three different scores
def save(self, path: str) -> None:
dataset = self._dataset
self._dataset = None
with open(path, 'wb') as f:
dill.dump(self, f)
self._dataset = dataset
@classmethod
def load(cls, path: str):
"""
Parameters
----------
path
Returns
-------
an instance of this class
"""
with open(path, 'rb') as f:
score = dill.load(f)
score._dataset = Dataset(
score._dataset_file_path,
internals_folder_path=score._dataset_internals_folder_path,
keep_in_memory=score._keep_dataset_in_memory,
)
return score
|
[
"topicnet.cooking_machine.Dataset",
"sklearn.metrics.calinski_harabasz_score",
"dill.load",
"dill.dump",
"logging.getLogger"
] |
[((285, 304), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (302, 304), False, 'import logging\n'), ((1516, 1573), 'sklearn.metrics.calinski_harabasz_score', 'calinski_harabasz_score', (['theta.T.values', 'objects_clusters'], {}), '(theta.T.values, objects_clusters)\n', (1539, 1573), False, 'from sklearn.metrics import calinski_harabasz_score\n'), ((2130, 2279), 'topicnet.cooking_machine.Dataset', 'Dataset', (['score._dataset_file_path'], {'internals_folder_path': 'score._dataset_internals_folder_path', 'keep_in_memory': 'score._keep_dataset_in_memory'}), '(score._dataset_file_path, internals_folder_path=score.\n _dataset_internals_folder_path, keep_in_memory=score.\n _keep_dataset_in_memory)\n', (2137, 2279), False, 'from topicnet.cooking_machine import Dataset\n'), ((1790, 1808), 'dill.dump', 'dill.dump', (['self', 'f'], {}), '(self, f)\n', (1799, 1808), False, 'import dill\n'), ((2091, 2103), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (2100, 2103), False, 'import dill\n')]
|
from crum import get_current_user
from django.views.generic import TemplateView
from apps.quotas.models import UsageLimitations, Quota, Plans
class PlanOverview(TemplateView):
template_name = "plans_overview.html"
def get_context_data(self, **kwargs):
context = super(PlanOverview, self).get_context_data()
current_user = get_current_user()
current_org = current_user.profile.primary_org
context['limitations'] = UsageLimitations.limitations
quota_obj = Quota.objects.get(pk=current_org.pk)
context['plan_name'] = quota_obj.get_plan_display()
context['limitations_filesharing_F'] = UsageLimitations.limitations['filesharing']['F'] / (1024 * 1024)
context['limitations_filesharing_S'] = UsageLimitations.limitations['filesharing']['S'] / (1024 * 1024)
if quota_obj.plan == Plans.FREE.value:
context['plan_validity_date'] = "unlimited"
else:
context['plan_validity_date'] = quota_obj.paid_until
return context
|
[
"apps.quotas.models.Quota.objects.get",
"crum.get_current_user"
] |
[((349, 367), 'crum.get_current_user', 'get_current_user', ([], {}), '()\n', (365, 367), False, 'from crum import get_current_user\n'), ((505, 541), 'apps.quotas.models.Quota.objects.get', 'Quota.objects.get', ([], {'pk': 'current_org.pk'}), '(pk=current_org.pk)\n', (522, 541), False, 'from apps.quotas.models import UsageLimitations, Quota, Plans\n')]
|
#!/usr/bin/env python3.8
# You might want to change the line above to generic python, however, it does require python 3.8 or above to run correctly.
# You MIGHT get away with older versions... but... no warranty here.
import os, sys, io, re
import argparse
import json, csv
import requests
from datetime import datetime,timedelta
import time
import py_helper as ph
from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto
from py_helper import DownloadContent
#
# Purpose : Given a string (MAC Address, OUI Code or "other") search through the IEEE MA-L, MA-M, MA-L and CID OUI Code listings.
#
# This code does not dynamically search the online database, it downloads the CSV's and caches them. (See Usage Notes)
#
# Usage Notes:
#
# This module can be used as a command line tool, a plugin using the "run(**kwargs)/arguments/args Pattern" or just as a module
# for some other python program.
#
# There are "output" calls in this code, Msg/DbgMsg, these are controlled by the DebugMode and CmdLineMode functions of the
# py_helper module. Accordingly, when using as a Module or plugin, you want CmdLineMode(False) somewhere in your code prior to
# calling anything in here to avoid output. DebugMode(True) [or False] controls the debug output. The default is CmdLineMode(False).
#
# There are two dependencies in this module...
#
# 1. My py_helper library needs to be installed for this thing to work.
# 2. The variable "StorageLocation" has to be set to someplace writable by the caller. It can be "/tmp" if need be.
# The caches do not have to be stored permanantly, as they will be dynamically pulled down everytime there is a
# query when they are not present, however, it's not a good idea to keep pulling these down from IEEE when a
# cache will do. These files don't change much.
#
# So, please consider this dependency when implementing or customizing this code. Don't abuse the download.
#
#
# Global Variables and Constants
#
#
# Originally, this was designed as plugin for another cmdline tool. But I felt it meritted it's own module
# accordingly, there are some plugin artifacts (like the output Msg calls and other plugin infrastructure)
# still in the code. Also, IT CAN still be used as a plugin.
#
VERSION=(0,0,4)
Version = __version__ = ".".join([ str(x) for x in VERSION ])
# Plugins Name
Name = "oui"
# Short Description
ShortDescription = "Get OUI Info from IEEE"
# Description of what this does
Description = "Given a MAC Address or OUI Code, retrieve the registration from IEEE"
# Help, Usage Description
Help = f"{Name} [mac-address|oui-code|search-str]"
# Last Result
LastResult = None
# Internal Functions list
InternalFunctions = list(['run'])
# Tags
Tags = [ "plugins", "command" ]
#
# Non-Plugin Functional Bits
#
# Find Temp Space
__tempspace__ = os.environ["TEMP"] if sys.platform == "win32" else os.environ.get("tmp","/tmp")
# Storage Location (User may want to change this location)
StorageLocation=f"{__tempspace__}/ieee_oui_cache"
# RefreshInterval (in days)
RefreshInterval = 30
# Refresh Cmdline Flag
__Refresh__ = False
# URL To IEEE File
Caches = {
"oui-ma-l" : [ "oui.csv","http://standards-oui.ieee.org/oui/oui.csv",None,None ],
"oui-ma-m" : [ "mam.csv","http://standards-oui.ieee.org/oui28/mam.csv",None,None ],
"oui-ma-s" : [ "oui36.csv","http://standards-oui.ieee.org/oui36/oui36.csv",None,None ],
"cid" : [ "cid.csv","http://standards-oui.ieee.org/cid/cid.csv",None,None ]
}
# Parser
__Parser__ = None
# Parsed Args
__Args__ = None
__Unknowns__ = None
# Expressions and RegEx Objects
MACExpr = "^([a-fA-F\d]{2}[\:\-\s]{0,1}){6}$"
OUIExpr = "^([a-fA-F\d]{2}[\:\-\s]{0,1}){3}$"
MACAddress_re = re.compile(MACExpr)
OUICode_re = re.compile(OUIExpr)
#
# Internal functions
#
# Local Cache Loader
def LoadCache(datafile):
"""Load Local Cache"""
items = list()
if os.path.exists(datafile):
with open(datafile,"r",newline="") as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile,dialect=dialect)
for row in reader:
items.append(row)
else:
Msg(f"Can't find local cache, {datafile}")
return items
# Clean strings
def Strip(mac_oui):
"""Strip extraneous characters out of MAC/OUI"""
data = mac_oui.replace(":","").replace("-","").replace(" ","")
return data
# Get All IEEE Files
def GetCaches(storage_location=None,caches=None):
"""Get IEEE Data Files"""
global StorageLocation, Caches, RefreshInterval, __Refresh__
caches = caches if caches != None else Caches
storage_location = storage_location if storage_location != None else StorageLocation
if not os.path.exists(storage_location):
os.makedirs(storage_location,mode=0o777,exist_ok=True)
dl_files = list()
for key,value in caches.items():
fname,url,cache_location,cache = value
if cache_location == None:
cache_location = os.path.join(storage_location,f"{fname}")
value[2] = cache_location
# Prep td in case cache does not exist yet
td = timedelta(days=RefreshInterval + 1)
if os.path.exists(cache_location):
# If file in fs is > RefreshInternval old, redownload
modtime = datetime.fromtimestamp(os.path.getmtime(cache_location))
localtm = datetime.fromtimestamp(time.mktime(time.localtime()))
td = localtm - modtime
if not os.path.exists(cache_location) or td.days > RefreshInterval or __Refresh__:
Msg(f"{key} does not exist or is out of date, downloading...")
if DownloadContent(url,cache_location):
dl_files.append((key,cache_location))
value[3] = LoadCache(cache_location)
if value[3] == None:
value[3] = LoadCache(value[2])
return dl_files
# Search (The bidness end of this thing)
def Search(search_str):
"""Search The Caches for Matching OUI or string"""
global MACAddress_re, OUICode_re, Name
found = list()
code = None
if MACAddress_re.search(search_str):
# MACAddress
DbgMsg("MAC Address detected")
m = Strip(search_str)
code = m[0:6]
elif OUICode_re.search(search_str):
# OUI
DbgMsg("OUI Code detected")
code = Strip(search_str)
code = code.upper()
DbgMsg(f"{Name} : Beginning Search {search_str} / {code}")
for cache_name,cache_list in Caches.items():
fname,url,cache_file,cache = cache_list
DbgMsg(f"Checking {cache_name}")
if cache == None:
cache_list[3] = cache = LoadCache(cache_file)
for row in cache:
if code and code == row["Assignment"]:
DbgMsg(f"Found row by {code}")
found.append(row)
elif code == None:
for value in row.values():
if search_str in value:
DbgMsg(f"Found row by {search_str}")
found.append(row)
return found
# Build Parser
def BuildParser():
"""Build Parser"""
global __Parser__
if __Parser__ == None:
parser = __Parser__ = argparse.ArgumentParser(prog="oui",description="IEEE OUI Code Lookup")
parser.add_argument("-s",help="Storage Location for Cache files")
parser.add_argument("-r",action="store_true",help="Force refresh")
parser.add_argument("-d","--debug",action="store_true",help="Enter debug mode")
parser.add_argument("-t","--test",action="store_true",help="Execute Test Stub")
parser.add_argument("searchfor",nargs="*",help="OUI Codes to look up")
# Parse Args
def ParseArgs(arguments=None):
"""Parse Arguments"""
global __Parser__, __Args__, __Unknowns__
global StorageLocation, __Refresh__
if arguments:
args,unknowns = __Parser__.parse_known_args()
else:
args,unknowns = __Parser__.parse_known_args()
__Args__ = args
__Unknowns__ = unknowns
# Check Debug Mode Flag
if args.debug:
DebugMode(True)
DbgMsg("Debug Mode Enabled")
# Set Cache Location if supplied
if args.s: StorageLocation = args.s
__Refresh__ = args.r
return args,unknowns
# Init Pattern
def Initialize():
"""Initialize Module"""
BuildParser()
#
# Run Pattern entry point (for plugin model)
#
# Plugin Starting Point
def run(**kwargs):
"""Required Plugin Entry Point"""
global Name, StorageLocation, Caches
DbgMsg(f"Entering {Name}")
# If arguments, a list of cmdline args was supplied
arguments = kwargs.get("arguments",None)
# If args, a argparse namespace was supplied (i.e. pre-processed cmdline args)
args = kwargs.get("args",None)
data = list() # Any returned data, for machine processing (i.e. csv rows)
if arguments:
args,unknowns = ParseArgs(arguments)
if not args:
args,unknowns = ParseArgs()
# Make sure the caches are available (or get them)
downloaded = GetCaches(StorageLocation,Caches)
if args.test:
Test()
return data
# Alrighty then, let's git down to bidness
for arg in args.searchfor:
found = Search(arg)
if len(found) > 0:
data.extend(found)
if len(data) == 0:
Msg("Nothing found")
elif CmdLineMode():
keys = data[0].keys()
Msg(",".join(keys))
for row in data:
values = row.values()
line = ",".join(values)
Msg(line)
return data
#
# Test Stub
#
# Test Stub
def Test():
"""Test Stub"""
pass
#
# Pre Execute Inits
#
Initialize()
#
# Main Loop
#
if __name__ == "__main__":
CmdLineMode(True)
run()
|
[
"py_helper.DbgMsg",
"os.makedirs",
"argparse.ArgumentParser",
"csv.DictReader",
"os.path.exists",
"csv.Sniffer",
"py_helper.DownloadContent",
"os.environ.get",
"datetime.timedelta",
"os.path.getmtime",
"time.localtime",
"py_helper.Msg",
"os.path.join",
"py_helper.DebugMode",
"py_helper.CmdLineMode",
"re.compile"
] |
[((3696, 3715), 're.compile', 're.compile', (['MACExpr'], {}), '(MACExpr)\n', (3706, 3715), False, 'import os, sys, io, re\n'), ((3729, 3748), 're.compile', 're.compile', (['OUIExpr'], {}), '(OUIExpr)\n', (3739, 3748), False, 'import os, sys, io, re\n'), ((2872, 2901), 'os.environ.get', 'os.environ.get', (['"""tmp"""', '"""/tmp"""'], {}), "('tmp', '/tmp')\n", (2886, 2901), False, 'import os, sys, io, re\n'), ((3868, 3892), 'os.path.exists', 'os.path.exists', (['datafile'], {}), '(datafile)\n', (3882, 3892), False, 'import os, sys, io, re\n'), ((6104, 6162), 'py_helper.DbgMsg', 'DbgMsg', (['f"""{Name} : Beginning Search {search_str} / {code}"""'], {}), "(f'{Name} : Beginning Search {search_str} / {code}')\n", (6110, 6162), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((7977, 8003), 'py_helper.DbgMsg', 'DbgMsg', (['f"""Entering {Name}"""'], {}), "(f'Entering {Name}')\n", (7983, 8003), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((9029, 9046), 'py_helper.CmdLineMode', 'CmdLineMode', (['(True)'], {}), '(True)\n', (9040, 9046), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((4122, 4164), 'py_helper.Msg', 'Msg', (['f"""Can\'t find local cache, {datafile}"""'], {}), '(f"Can\'t find local cache, {datafile}")\n', (4125, 4164), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((4651, 4683), 'os.path.exists', 'os.path.exists', (['storage_location'], {}), '(storage_location)\n', (4665, 4683), False, 'import os, sys, io, re\n'), ((4687, 4741), 'os.makedirs', 'os.makedirs', (['storage_location'], {'mode': '(511)', 'exist_ok': '(True)'}), '(storage_location, mode=511, exist_ok=True)\n', (4698, 4741), False, 'import os, sys, io, re\n'), ((5012, 5047), 'datetime.timedelta', 'timedelta', ([], {'days': '(RefreshInterval + 1)'}), '(days=RefreshInterval + 1)\n', (5021, 5047), False, 'from datetime import datetime, timedelta\n'), ((5054, 5084), 'os.path.exists', 'os.path.exists', (['cache_location'], {}), '(cache_location)\n', (5068, 5084), False, 'import os, sys, io, re\n'), ((5907, 5937), 'py_helper.DbgMsg', 'DbgMsg', (['"""MAC Address detected"""'], {}), "('MAC Address detected')\n", (5913, 5937), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((6255, 6287), 'py_helper.DbgMsg', 'DbgMsg', (['f"""Checking {cache_name}"""'], {}), "(f'Checking {cache_name}')\n", (6261, 6287), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((6766, 6837), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""oui"""', 'description': '"""IEEE OUI Code Lookup"""'}), "(prog='oui', description='IEEE OUI Code Lookup')\n", (6789, 6837), False, 'import argparse\n'), ((7567, 7582), 'py_helper.DebugMode', 'DebugMode', (['(True)'], {}), '(True)\n', (7576, 7582), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((7585, 7613), 'py_helper.DbgMsg', 'DbgMsg', (['"""Debug Mode Enabled"""'], {}), "('Debug Mode Enabled')\n", (7591, 7613), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((8691, 8711), 'py_helper.Msg', 'Msg', (['"""Nothing found"""'], {}), "('Nothing found')\n", (8694, 8711), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((8718, 8731), 'py_helper.CmdLineMode', 'CmdLineMode', ([], {}), '()\n', (8729, 8731), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((4028, 4068), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'dialect': 'dialect'}), '(csvfile, dialect=dialect)\n', (4042, 4068), False, 'import json, csv\n'), ((4888, 4930), 'os.path.join', 'os.path.join', (['storage_location', 'f"""{fname}"""'], {}), "(storage_location, f'{fname}')\n", (4900, 4930), False, 'import os, sys, io, re\n'), ((5396, 5458), 'py_helper.Msg', 'Msg', (['f"""{key} does not exist or is out of date, downloading..."""'], {}), "(f'{key} does not exist or is out of date, downloading...')\n", (5399, 5458), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((5465, 5501), 'py_helper.DownloadContent', 'DownloadContent', (['url', 'cache_location'], {}), '(url, cache_location)\n', (5480, 5501), False, 'from py_helper import DownloadContent\n'), ((6025, 6052), 'py_helper.DbgMsg', 'DbgMsg', (['"""OUI Code detected"""'], {}), "('OUI Code detected')\n", (6031, 6052), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((5179, 5211), 'os.path.getmtime', 'os.path.getmtime', (['cache_location'], {}), '(cache_location)\n', (5195, 5211), False, 'import os, sys, io, re\n'), ((5317, 5347), 'os.path.exists', 'os.path.exists', (['cache_location'], {}), '(cache_location)\n', (5331, 5347), False, 'import os, sys, io, re\n'), ((6425, 6455), 'py_helper.DbgMsg', 'DbgMsg', (['f"""Found row by {code}"""'], {}), "(f'Found row by {code}')\n", (6431, 6455), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((8854, 8863), 'py_helper.Msg', 'Msg', (['line'], {}), '(line)\n', (8857, 8863), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n'), ((3956, 3969), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (3967, 3969), False, 'import json, csv\n'), ((5261, 5277), 'time.localtime', 'time.localtime', ([], {}), '()\n', (5275, 5277), False, 'import time\n'), ((6566, 6602), 'py_helper.DbgMsg', 'DbgMsg', (['f"""Found row by {search_str}"""'], {}), "(f'Found row by {search_str}')\n", (6572, 6602), False, 'from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto\n')]
|
"""
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: This module implements Interface for parsing catalog files
:since: 03/12/13
:author: ssavrim
"""
import os
import yaml
from lxml import etree
from acs.ErrorHandling.AcsConfigException import AcsConfigException
class CatalogParser(object):
"""
This class implements the Catalog Parser interface
It is an entry point to parse xml files and return it as a dictionary
It is a folder (i.e.: _Catalogs/UseCase) composed of
a list of .xml files validated by a .xsd schema
i.e.: _Catalogs/UseCase/usecase.xsd (MANDATORY to check the xml file)
/telephony.xml (MUST respect defined rules in usecase.xsd)
/cws/cws.xml (MUST respect defined rules in usecase.xsd)
"""
ID_ATTRIB = "Id"
DOMAIN_ATTRIB = "Domain"
SUBDOMAIN_ATTRIB = "SubDomain"
FEATURE_ATTRIB = "Feature"
CLASSNAME_ELEM = "ClassName"
DESCRIPTION_ELEM = "Description"
PARAMETERS_ELEM = "Parameters"
PARAMETER_ELEM = "Parameter"
XML_SCHEMA_FILE = ""
YAML_CONFIG_FILE = ""
CATALOG_EXTENTION = ".xml"
def __init__(self, catalog_paths):
"""
Validate optional XML schema (xsd) & YAML logic files
& load their contents
:type catalog_paths: list
:param catalog_paths: catalog paths list.
Catalogs can come from different locations (acs, acs_test_scripts)
"""
self._xml_schema = None
self._yaml_config = None
self._catalog_paths = catalog_paths
if self.XML_SCHEMA_FILE and os.path.isfile(self.XML_SCHEMA_FILE):
self._xml_schema = self.__load_xml_schema(self.XML_SCHEMA_FILE)
if self.YAML_CONFIG_FILE and os.path.isfile(self.YAML_CONFIG_FILE):
self._yaml_config = self.__load_yaml_config(self.YAML_CONFIG_FILE)
def __load_xml_schema(self, xml_schema):
"""
Load xml schema to validate the xml catalog
:type xml_schema: string
:param xml_schema: Xml schema to load
:rtype: etree.XMLSchema
:return: XML schema to validate the xml catalog file
"""
try:
return etree.XMLSchema(etree.parse(xml_schema))
except etree.XMLSchemaParseError as xml_schema_error:
raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR,
"'%s' schema is invalid ! (%s)"
% (xml_schema, str(xml_schema_error)))
def __load_yaml_config(self, yaml_config):
"""
Load YAML configuration to validate
the xml catalog domain, subdomain & feature
:type yaml_config: string
:param yaml_config: YAML config to load
:rtype: dict
:return: dict of complete YAML file
"""
try:
with open(yaml_config) as f:
return yaml.load(f)
except yaml.scanner.ScannerError as yaml_config_error:
raise AcsConfigException(AcsConfigException.YAML_PARSING_ERROR,
"'%s' is invalid ! (%s)"
% (yaml_config, str(yaml_config_error)))
def __check_xml_schema(self, catalog_file):
"""
Validate catalog file regarding loaded xml schema (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: etree.XML
:return: X to validate the xml catalog file
"""
try:
# Parse the xml file
catalog_etree = etree.parse(catalog_file)
if catalog_etree and self._xml_schema:
# Apply xml schema on the xml file
self._xml_schema.assertValid(catalog_etree)
except etree.Error as xml_parsing_error:
raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (%s)"
% (catalog_file, str(xml_parsing_error)))
return catalog_etree
def _check_xml_logic(self, catalog_file):
"""
Validate catalog file regarding loaded YAML logic
regarding to Domains, SubDomains & Features
Return a boolean setting if xml file logic is valid
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: none
:return: none
"""
# Parse the xml file
catalog_etree = etree.parse(catalog_file)
if catalog_etree and self._yaml_config:
domains = self._yaml_config.get("DOMAINS")
if not domains:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (DOMAINS section does not exists!)"
% self._yaml_config_file)
for node in catalog_etree.getroot():
item_id = node.attrib[CatalogParser.ID_ATTRIB]
item_domain = node.attrib[CatalogParser.DOMAIN_ATTRIB]
item_subdomain = node.attrib[CatalogParser.SUBDOMAIN_ATTRIB]
item_feature = node.attrib[CatalogParser.FEATURE_ATTRIB]
# Check that logic between Domain, SubDomain & Feature is respected
item_features = self._check_subdomain(catalog_file,
item_id,
item_domain,
item_subdomain,
domains.keys(),
domains.get(item_domain))
self._check_feature(catalog_file,
item_id,
item_subdomain,
item_feature,
item_features)
def _check_subdomain(self,
catalog_file,
item_id,
item_domain,
item_subdomain,
possible_domains,
possible_subdomains):
"""
Validate SubDomain attributes using YAML config
:type catalog_file: string
:param catalog_file: Catalog file
:type item_id: string
:param item_id: Id of the item (UseCase or Test Step)
:type item_domain: string
:param item_domain: Domain to be checked
:type item_subdomain: string
:param item_subdomain: SubDomain to be checked
:type possible_domains: list
:param possible_domains: List of possible domains
:type possible_subdomains: list
:param possible_subdomains: List of possible subdomains
:rtype: list
:return: List of associated features when item_subdomain is validated
"""
subdomains = self._yaml_config.get("SUB_DOMAINS")
if not subdomains:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (SUB_DOMAINS section does not exists!)"
% self._yaml_config_file)
elif possible_subdomains is None:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' is invalid ! (Domain %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_domain),
str(item_id), str(possible_domains)))
elif len(possible_subdomains) == 0:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (no SubDomains exist for Domain %s)"
% (self._yaml_config_file, str(item_domain)))
elif item_subdomain not in possible_subdomains:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (SubDomain %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_subdomain),
str(item_id), str(possible_subdomains)))
else:
return subdomains.get(item_subdomain)
def _check_feature(self, catalog_file, item_id, item_subdomain,
item_feature, possible_features):
"""
Validate Feature attributes using YAML config
:type catalog_file: string
:param catalog_file: Catalog file
:type item_id: string
:param item_id: Id of the item (UseCase or Test Step)
:type item_subdomain: string
:param item_subdomain: SubDomain to be checked
:type item_feature: string
:param item_feature: Feature to be checked
:type possible_features: list
:param possible_features: List of possible features
:rtype: none
:return: none
"""
if possible_features is None:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' is invalid ! (SubDomain %s does not exist)"
% (self._yaml_config_file, str(item_subdomain)))
elif len(possible_features) > 0 and item_feature not in possible_features:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (Feature %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_feature),
str(item_id), str(possible_features)))
elif len(possible_features) == 0 and len(item_feature) > 0:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (Feature %s is not valid for item %s; No features expected)"
% (catalog_file, str(item_feature), str(item_id)))
def validate_catalog_file(self, catalog_file):
"""
Validate catalog file regarding xml schema & logic (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: etree.XML
:return: X to validate the xml catalog file
"""
catalog_etree = self.__check_xml_schema(catalog_file)
if catalog_etree:
self._check_xml_logic(catalog_file)
return catalog_etree
def parse_catalog_file(self, catalog_file):
"""
Parse catalog and validate regarding loaded xml schema (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: dict
:return: Dictionary containing catalog elements
"""
raise AcsConfigException(AcsConfigException.FEATURE_NOT_IMPLEMENTED,
"'parse_catalog_file' is not implemented !")
def parse_catalog_folder(self):
"""
Parse folder(s) which are known to contain catalogs (usecases, test steps, parameters)
If multiple catalogs are in the folder, it will return a concatenated dictionary.
If a key is already defined in the dictionary, raise an AcsConfigException
:type catalog_paths: list
:param catalog_paths: catalog paths list. Catalogs can come from different locations (acs, acs_test_scripts)
:type: dict
:return: Dictionary containing catalog elements
"""
concatenated_dictionary = {}
for catalog_path in self._catalog_paths:
for root, _, catalog_list in os.walk(catalog_path):
for catalog_file in catalog_list:
if catalog_file.lower().endswith(self.CATALOG_EXTENTION):
temp_dictionary = self.parse_catalog_file(os.path.join(root, catalog_file))
for item_name in temp_dictionary.iterkeys():
if item_name in concatenated_dictionary:
raise AcsConfigException(AcsConfigException.PROHIBITIVE_BEHAVIOR,
"item '%s' is defined more than one time !" % item_name)
concatenated_dictionary.update(temp_dictionary)
return concatenated_dictionary
|
[
"yaml.load",
"acs.ErrorHandling.AcsConfigException.AcsConfigException",
"os.walk",
"os.path.isfile",
"lxml.etree.parse",
"os.path.join"
] |
[((5600, 5625), 'lxml.etree.parse', 'etree.parse', (['catalog_file'], {}), '(catalog_file)\n', (5611, 5625), False, 'from lxml import etree\n'), ((11979, 12090), 'acs.ErrorHandling.AcsConfigException.AcsConfigException', 'AcsConfigException', (['AcsConfigException.FEATURE_NOT_IMPLEMENTED', '"""\'parse_catalog_file\' is not implemented !"""'], {}), '(AcsConfigException.FEATURE_NOT_IMPLEMENTED,\n "\'parse_catalog_file\' is not implemented !")\n', (11997, 12090), False, 'from acs.ErrorHandling.AcsConfigException import AcsConfigException\n'), ((2657, 2693), 'os.path.isfile', 'os.path.isfile', (['self.XML_SCHEMA_FILE'], {}), '(self.XML_SCHEMA_FILE)\n', (2671, 2693), False, 'import os\n'), ((2809, 2846), 'os.path.isfile', 'os.path.isfile', (['self.YAML_CONFIG_FILE'], {}), '(self.YAML_CONFIG_FILE)\n', (2823, 2846), False, 'import os\n'), ((4681, 4706), 'lxml.etree.parse', 'etree.parse', (['catalog_file'], {}), '(catalog_file)\n', (4692, 4706), False, 'from lxml import etree\n'), ((8180, 8337), 'acs.ErrorHandling.AcsConfigException.AcsConfigException', 'AcsConfigException', (['AcsConfigException.YAML_PARSING_ERROR', '("\'%s\' file is invalid ! (SUB_DOMAINS section does not exists!)" % self.\n _yaml_config_file)'], {}), '(AcsConfigException.YAML_PARSING_ERROR, \n "\'%s\' file is invalid ! (SUB_DOMAINS section does not exists!)" % self.\n _yaml_config_file)\n', (8198, 8337), False, 'from acs.ErrorHandling.AcsConfigException import AcsConfigException\n'), ((12806, 12827), 'os.walk', 'os.walk', (['catalog_path'], {}), '(catalog_path)\n', (12813, 12827), False, 'import os\n'), ((3272, 3295), 'lxml.etree.parse', 'etree.parse', (['xml_schema'], {}), '(xml_schema)\n', (3283, 3295), False, 'from lxml import etree\n'), ((3974, 3986), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (3983, 3986), False, 'import yaml\n'), ((5781, 5934), 'acs.ErrorHandling.AcsConfigException.AcsConfigException', 'AcsConfigException', (['AcsConfigException.YAML_PARSING_ERROR', '("\'%s\' file is invalid ! (DOMAINS section does not exists!)" % self.\n _yaml_config_file)'], {}), '(AcsConfigException.YAML_PARSING_ERROR, \n "\'%s\' file is invalid ! (DOMAINS section does not exists!)" % self.\n _yaml_config_file)\n', (5799, 5934), False, 'from acs.ErrorHandling.AcsConfigException import AcsConfigException\n'), ((13023, 13055), 'os.path.join', 'os.path.join', (['root', 'catalog_file'], {}), '(root, catalog_file)\n', (13035, 13055), False, 'import os\n'), ((13233, 13354), 'acs.ErrorHandling.AcsConfigException.AcsConfigException', 'AcsConfigException', (['AcsConfigException.PROHIBITIVE_BEHAVIOR', '("item \'%s\' is defined more than one time !" % item_name)'], {}), '(AcsConfigException.PROHIBITIVE_BEHAVIOR, \n "item \'%s\' is defined more than one time !" % item_name)\n', (13251, 13354), False, 'from acs.ErrorHandling.AcsConfigException import AcsConfigException\n')]
|
from rest_framework import serializers
from .models import Category, Comment, Genre, Review, Title
class CategorySerializer(serializers.ModelSerializer):
'''Serializer for Category model'''
class Meta:
fields = ('name', 'slug')
model = Category
lookup_field = 'slug'
class GenreSerializer(serializers.ModelSerializer):
'''Serializer for Genre model'''
class Meta:
fields = ('name', 'slug')
model = Genre
lookup_field = 'slug'
class CategoryReprField(serializers.SlugRelatedField):
'''Serializer for Category model'''
def to_representation(self, value):
return {'name': value.name, 'slug': value.slug}
class GenreReprField(serializers.SlugRelatedField):
'''GenreReprField Serializer'''
def to_representation(self, value):
return {'name': value.name, 'slug': value.slug}
class TitleSerializer(serializers.ModelSerializer):
'''Serializer for Title model.'''
category = CategoryReprField(slug_field='slug',
queryset=Category.objects.all())
genre = GenreReprField(slug_field='slug',
queryset=Genre.objects.all(),
many=True)
class Meta:
fields = (
'id',
'name',
'year',
'rating',
'description',
'genre',
'category',
)
model = Title
class ReviewSerializer(serializers.ModelSerializer):
'''Serializer for Review model. Slug related field author.'''
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
def get_serializer_context(self):
return {'title_id': self.kwargs['title_id'], 'request': self.request}
def validate(self, data):
'''Call the instance's validate() method and
raise error if user has already added a review for this tittle.
'''
title_id = self.context.get('request').parser_context['kwargs']['title_id']
if (Review.objects.filter(title_id=title_id, author=self.context['request'].user).exists()
and self.context['request'].method == 'POST'):
raise serializers.ValidationError('This user has already added review for this title')
return data
class Meta:
fields = ('id', 'text', 'author', 'score', 'pub_date',)
model = Review
class CommentSerializer(serializers.ModelSerializer):
'''Serializer for Comment model. Slug related field author.'''
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
model = Comment
fields = ('id', 'text', 'author', 'pub_date')
class EmailSerializer(serializers.Serializer):
'''Email Serializer'''
email = serializers.EmailField(required=True)
|
[
"rest_framework.serializers.SlugRelatedField",
"rest_framework.serializers.EmailField",
"rest_framework.serializers.ValidationError"
] |
[((1583, 1650), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'slug_field': '"""username"""', 'read_only': '(True)'}), "(slug_field='username', read_only=True)\n", (1611, 1650), False, 'from rest_framework import serializers\n'), ((2563, 2630), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'slug_field': '"""username"""', 'read_only': '(True)'}), "(slug_field='username', read_only=True)\n", (2591, 2630), False, 'from rest_framework import serializers\n'), ((2836, 2873), 'rest_framework.serializers.EmailField', 'serializers.EmailField', ([], {'required': '(True)'}), '(required=True)\n', (2858, 2873), False, 'from rest_framework import serializers\n'), ((2222, 2307), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""This user has already added review for this title"""'], {}), "('This user has already added review for this title'\n )\n", (2249, 2307), False, 'from rest_framework import serializers\n')]
|
from PyQt4 import uic
import uuid
import os
class OTModule(object):
"""
Module abstract class implementation
"""
def __init__(self, name):
self._unique_id = uuid.uuid1() #: Module name
self._name = name #: Module unique identifier
def save(self, saver):
"""
Save the module values into the saver variable
@param saver: Dictionary used to store the module information
@type saver: Dict
"""
saver['unique_id'] = self._unique_id
saver['name'] = self.name
saver['class'] = self.__class__
def load(self, loader):
"""
Load the module values from the loader variable
@param loader: Dictionary with the stored module information
@type loader: Dict
"""
self.name = loader['name']
self._unique_id = loader['unique_id']
def close(self):
"""
Close the module
"""
pass
############################################################################
############ Properties ####################################################
############################################################################
@property
def name(self): return self._name
@name.setter
def name(self, value): self._name = value
############################################################################
@property
def uid(self): return self._unique_id
|
[
"uuid.uuid1"
] |
[((184, 196), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (194, 196), False, 'import uuid\n')]
|
from datetime import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import src.config as cfg
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
def main():
connection_rw_size = cfg.CONNECTION_RW_SIZE
timw_rw_size_min = cfg.TIME_RW_SIZE
features_csv = f'../../data/processed/full_ft_netflow_crw_{connection_rw_size}_trw_{timw_rw_size_min}_2.csv'
features_df = pd.read_csv(features_csv)
label = np.array(features_df['VPN'])
baseline_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN']
comb_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN','Tot Pkts', 'TotLen']
time_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
conn_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
time_features = features_df.loc[:, 'Time Flow count': 'Time-Rev Pkt Len Tot'].columns
conn_features = features_df.loc[:, 'Con Flow count': 'Conn-Rev Pkt Len Tot'].columns
for tft in time_features:
baseline_drop_table.append(tft)
conn_drop_table.append(tft)
for ctf in conn_features:
baseline_drop_table.append(ctf)
time_drop_table.append(ctf)
rf_baseline_ft = features_df.drop(baseline_drop_table, axis=1)
rf_time_ft = features_df.drop(time_drop_table, axis=1)
rf_conn_ft = features_df.drop(conn_drop_table, axis=1)
rf_comb_ft = features_df.drop(comb_drop_table,axis=1)
baseline_ft_name = list(rf_baseline_ft.columns)
time_ft_name=list(rf_time_ft.columns)
conn_ft_name=list(rf_conn_ft.columns)
comb_ft_name=list(rf_comb_ft.columns)
rf_baseline_ft_array=np.array(rf_baseline_ft)
rf_time_ft_array=np.array(rf_time_ft)
rf_conn_ft_array=np.array(rf_conn_ft)
rf_comb_ft_array=np.array(rf_comb_ft)
baseline_predictions, baseline_y_test, baseline_model=random_forest_classifyer(rf_baseline_ft_array,label)
time_predictions, time_y_test, time_model = random_forest_classifyer(rf_time_ft_array, label)
conn_predictions, conn_y_test, conn_model = random_forest_classifyer(rf_conn_ft_array, label)
comb_predictions, comb_y_test, comb_model = random_forest_classifyer(rf_comb_ft_array, label)
print('///////////////// Baseline /////////////////')
print('\n')
evaluate_rf_results(baseline_predictions,baseline_y_test,baseline_model, rf_baseline_ft_array, label)
feature_importance(baseline_model, baseline_ft_name)
print('///////////////// Time /////////////////')
print('\n')
evaluate_rf_results(time_predictions,time_y_test, time_model, rf_time_ft_array, label)
print('///////////////// Connection /////////////////')
print('\n')
evaluate_rf_results(conn_predictions,conn_y_test,conn_model,rf_conn_ft_array,label)
print('///////////////// Combined /////////////////')
print('\n')
evaluate_rf_results(comb_predictions, comb_y_test, comb_model, rf_comb_ft_array, label)
feature_importance(comb_model,comb_ft_name)
def random_forest_classifyer(feature_data, label, test_size=0.3, random_state=None):
x_train, x_test, y_train, y_test = train_test_split(
feature_data, label, test_size=test_size, random_state=random_state)
random_forest = RandomForestClassifier(n_jobs=2, random_state=random_state)
start_time=datetime.now()
random_forest.fit(x_train, y_train)
print(datetime.now()-start_time)
start_time = datetime.now()
predictions = random_forest.predict(x_test)
print(datetime.now() - start_time)
return predictions, y_test, random_forest
def evaluate_rf_results(predictions, y_test, model, feature_data, label):
start_time = datetime.now()
rfc_cv_score = cross_val_score(model, feature_data, label, cv=10, scoring='roc_auc')
print(datetime.now() - start_time)
start_time = datetime.now()
rfc_cv_score_acc = cross_val_score(model, feature_data, label, cv=10, scoring='accuracy')
print(datetime.now() - start_time)
print("=== Confusion Matrix ===")
start_time = datetime.now()
cf_matrix = confusion_matrix(y_test, predictions)
print(datetime.now() - start_time)
print(cf_matrix)
print('\n')
print("=== Classification Report ===")
start_time = datetime.now()
class_report = classification_report(y_test, predictions)
print(datetime.now() - start_time)
print(class_report)
print('\n')
print("=== All AUC Scores ===")
print(rfc_cv_score)
print('\n')
print("=== Mean AUC Score ===")
print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
print('\n')
print("=== All ACC Scores ===")
print(rfc_cv_score_acc)
print('\n')
print("=== Mean ACC Score ===")
print("Mean ACC Score - Random Forest: ", rfc_cv_score_acc.mean())
return cf_matrix, class_report, rfc_cv_score, rfc_cv_score.mean()
def feature_importance(model, feature_names):
fi = pd.DataFrame({'feature': feature_names,
'importance': model.feature_importances_}). \
sort_values('importance', ascending=False)
print(fi)
return fi
if __name__ == '__main__':
main()
# print(features_connection)
# connection_train_ft, connection_test_ft, connection_train_labels, conection_test_labels=train_test_split(features_connection, label, test_size=0.3, random_state=42)
#
# print(f'Training Features Shape: {connection_train_ft.shape}' )
# print(f'Training Labels Shape:{connection_test_ft.shape}' )
# print(f'Testing Features Shape:{connection_train_labels.shape}')
# print(f'Testing Labels Shape:{conection_test_labels.shape}')
#
# accuracy_list=[]
#
# random_forest= RandomForestClassifier(n_jobs=2)
# # t = tqdm(total=1000)
# # for xx in range(0,1000):
#
# random_forest.fit(connection_train_ft,connection_train_labels)
#
# predictions=random_forest.predict(connection_test_ft)
#
#
#
# matches=0
#
# # print(int(predictions[20]))
# # print(int(conection_test_labels[20]))
#
#
# for x in range(0, predictions.shape[0]):
# if int(predictions[x])==int(conection_test_labels[x]):
# matches+=1
#
# # print(matches/predictions.shape[0])
# accuracy_list.append(matches/predictions.shape[0])
# # t.update(1)
#
# print('///////////////////////////////////')
# print(max(accuracy_list))
# print(min(accuracy_list))
# print(sum(accuracy_list)/len(accuracy_list))
#
# rfc_cv_score = cross_val_score(random_forest, features_connection, label, cv=10, scoring='roc_auc')
# print("=== Confusion Matrix ===")
# print(confusion_matrix(conection_test_labels, predictions))
# print('\n')
# print("=== Classification Report ===")
# print(classification_report(conection_test_labels, predictions))
# print('\n')
# print("=== All AUC Scores ===")
# print(rfc_cv_score)
# print('\n')
# print("=== Mean AUC Score ===")
# print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
#
# fi=pd.DataFrame({'feature': features_connection_names,
# 'importance': random_forest.feature_importances_}).\
# sort_values('importance', ascending = False)
#
# print(fi)
# print("/////////////")
# print(fi.head())
|
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.classification_report",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"datetime.datetime.now"
] |
[((636, 661), 'pandas.read_csv', 'pd.read_csv', (['features_csv'], {}), '(features_csv)\n', (647, 661), True, 'import pandas as pd\n'), ((675, 703), 'numpy.array', 'np.array', (["features_df['VPN']"], {}), "(features_df['VPN'])\n", (683, 703), True, 'import numpy as np\n'), ((1911, 1935), 'numpy.array', 'np.array', (['rf_baseline_ft'], {}), '(rf_baseline_ft)\n', (1919, 1935), True, 'import numpy as np\n'), ((1957, 1977), 'numpy.array', 'np.array', (['rf_time_ft'], {}), '(rf_time_ft)\n', (1965, 1977), True, 'import numpy as np\n'), ((1999, 2019), 'numpy.array', 'np.array', (['rf_conn_ft'], {}), '(rf_conn_ft)\n', (2007, 2019), True, 'import numpy as np\n'), ((2041, 2061), 'numpy.array', 'np.array', (['rf_comb_ft'], {}), '(rf_comb_ft)\n', (2049, 2061), True, 'import numpy as np\n'), ((3376, 3466), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feature_data', 'label'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(feature_data, label, test_size=test_size, random_state=\n random_state)\n', (3392, 3466), False, 'from sklearn.model_selection import train_test_split\n'), ((3491, 3550), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(2)', 'random_state': 'random_state'}), '(n_jobs=2, random_state=random_state)\n', (3513, 3550), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3566, 3580), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3578, 3580), False, 'from datetime import datetime\n'), ((3676, 3690), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3688, 3690), False, 'from datetime import datetime\n'), ((3917, 3931), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3929, 3931), False, 'from datetime import datetime\n'), ((3951, 4020), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'feature_data', 'label'], {'cv': '(10)', 'scoring': '"""roc_auc"""'}), "(model, feature_data, label, cv=10, scoring='roc_auc')\n", (3966, 4020), False, 'from sklearn.model_selection import cross_val_score\n'), ((4077, 4091), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4089, 4091), False, 'from datetime import datetime\n'), ((4115, 4185), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'feature_data', 'label'], {'cv': '(10)', 'scoring': '"""accuracy"""'}), "(model, feature_data, label, cv=10, scoring='accuracy')\n", (4130, 4185), False, 'from sklearn.model_selection import cross_val_score\n'), ((4280, 4294), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4292, 4294), False, 'from datetime import datetime\n'), ((4311, 4348), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4327, 4348), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((4485, 4499), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4497, 4499), False, 'from datetime import datetime\n'), ((4519, 4561), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4540, 4561), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((3631, 3645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3643, 3645), False, 'from datetime import datetime\n'), ((3749, 3763), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3761, 3763), False, 'from datetime import datetime\n'), ((4031, 4045), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4043, 4045), False, 'from datetime import datetime\n'), ((4196, 4210), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4208, 4210), False, 'from datetime import datetime\n'), ((4359, 4373), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4371, 4373), False, 'from datetime import datetime\n'), ((4572, 4586), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4584, 4586), False, 'from datetime import datetime\n'), ((5152, 5239), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature_names, 'importance': model.feature_importances_}"], {}), "({'feature': feature_names, 'importance': model.\n feature_importances_})\n", (5164, 5239), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Classes to handle AEA configurations."""
import re
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast
# from aea.helpers.base import generate_fingerprint
DEFAULT_AEA_CONFIG_FILE = "aea-config.yaml"
DEFAULT_SKILL_CONFIG_FILE = "skill.yaml"
DEFAULT_CONNECTION_CONFIG_FILE = "connection.yaml"
DEFAULT_PROTOCOL_CONFIG_FILE = "protocol.yaml"
DEFAULT_PRIVATE_KEY_PATHS = {"fetchai": "", "ethereum": ""}
T = TypeVar("T")
"""
A dependency is a dictionary with the following (optional) keys:
- version: a version specifier(s) (e.g. '==0.1.0').
- index: the PyPI index where to download the package from (default: https://pypi.org)
- git: the URL to the Git repository (e.g. https://github.com/fetchai/agents-aea.git)
- ref: either the branch name, the tag, the commit number or a Git reference (default: 'master'.)
If the 'git' field is set, the 'version' field will be ignored.
They are supposed to be forwarded to the 'pip' command.
"""
Dependency = dict
"""
A dictionary from package name to dependency data structure (see above).
The package name must satisfy the constraints on Python packages names.
For details, see https://www.python.org/dev/peps/pep-0426/#name.
The main advantage of having a dictionary is that we implicitly filter out dependency duplicates.
We cannot have two items with the same package name since the keys of a YAML object form a set.
"""
Dependencies = Dict[str, Dependency]
class ConfigurationType(Enum):
"""Configuration types."""
AGENT = "agent"
PROTOCOL = "protocol"
CONNECTION = "connection"
SKILL = "skill"
def _get_default_configuration_file_name_from_type(
item_type: Union[str, ConfigurationType]
) -> str:
"""Get the default configuration file name from item type."""
item_type = ConfigurationType(item_type)
if item_type == ConfigurationType.AGENT:
return DEFAULT_AEA_CONFIG_FILE
elif item_type == ConfigurationType.PROTOCOL:
return DEFAULT_PROTOCOL_CONFIG_FILE
elif item_type == ConfigurationType.CONNECTION:
return DEFAULT_CONNECTION_CONFIG_FILE
elif item_type == ConfigurationType.SKILL:
return DEFAULT_SKILL_CONFIG_FILE
else:
raise ValueError("Item type not valid: {}".format(str(item_type)))
class ProtocolSpecificationParseError(Exception):
"""Exception for parsing a protocol specification file."""
class JSONSerializable(ABC):
"""Interface for JSON-serializable objects."""
@property
@abstractmethod
def json(self) -> Dict:
"""Compute the JSON representation."""
@classmethod
def from_json(cls, obj: Dict):
"""Build from a JSON object."""
class Configuration(JSONSerializable, ABC):
"""Configuration class."""
class CRUDCollection(Generic[T]):
"""Interface of a CRUD collection."""
def __init__(self):
"""Instantiate a CRUD collection."""
self._items_by_id = {} # type: Dict[str, T]
def create(self, item_id: str, item: T) -> None:
"""
Add an item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
:raises ValueError: if the item with the same id is already in the collection.
"""
if item_id in self._items_by_id:
raise ValueError("Item with name {} already present!".format(item_id))
else:
self._items_by_id[item_id] = item
def read(self, item_id: str) -> Optional[T]:
"""
Get an item by its name.
:param item_id: the item id.
:return: the associated item, or None if the item id is not present.
"""
return self._items_by_id.get(item_id, None)
def update(self, item_id: str, item: T) -> None:
"""
Update an existing item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
"""
self._items_by_id[item_id] = item
def delete(self, item_id: str) -> None:
"""Delete an item."""
if item_id in self._items_by_id.keys():
del self._items_by_id[item_id]
def read_all(self) -> List[Tuple[str, T]]:
"""Read all the items."""
return [(k, v) for k, v in self._items_by_id.items()]
class PublicId(JSONSerializable):
"""This class implement a public identifier.
A public identifier is composed of three elements:
- author
- name
- version
The concatenation of those three elements gives the public identifier:
author/name:version
>>> public_id = PublicId("author", "my_package", "0.1.0")
>>> assert public_id.author == "author"
>>> assert public_id.name == "my_package"
>>> assert public_id.version == "0.1.0"
>>> another_public_id = PublicId("author", "my_package", "0.1.0")
>>> assert hash(public_id) == hash(another_public_id)
>>> assert public_id == another_public_id
"""
AUTHOR_REGEX = r"[a-zA-Z_][a-zA-Z0-9_]*"
PACKAGE_NAME_REGEX = r"[a-zA-Z_][a-zA-Z0-9_]*"
VERSION_REGEX = r"(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?"
PUBLIC_ID_REGEX = r"^({})/({}):({})$".format(
AUTHOR_REGEX, PACKAGE_NAME_REGEX, VERSION_REGEX
)
def __init__(self, author: str, name: str, version: str):
"""Initialize the public identifier."""
self._author = author
self._name = name
self._version = version
@property
def author(self):
"""Get the author."""
return self._author
@property
def name(self):
"""Get the name."""
return self._name
@property
def version(self):
"""Get the version."""
return self._version
@classmethod
def from_str(cls, public_id_string: str) -> "PublicId":
"""
Initialize the public id from the string.
>>> str(PublicId.from_str("author/package_name:0.1.0"))
'author/package_name:0.1.0'
A bad formatted input raises value error:
>>> PublicId.from_str("bad/formatted:input")
Traceback (most recent call last):
...
ValueError: Input 'bad/formatted:input' is not well formatted.
:param public_id_string: the public id in string format.
:return: the public id object.
:raises ValueError: if the string in input is not well formatted.
"""
if not re.match(cls.PUBLIC_ID_REGEX, public_id_string):
raise ValueError(
"Input '{}' is not well formatted.".format(public_id_string)
)
else:
username, package_name, version = re.findall(
cls.PUBLIC_ID_REGEX, public_id_string
)[0][:3]
return PublicId(username, package_name, version)
@property
def json(self) -> Dict:
"""Compute the JSON representation."""
return {"author": self.author, "name": self.name, "version": self.version}
@classmethod
def from_json(cls, obj: Dict):
"""Build from a JSON object."""
return PublicId(obj["author"], obj["name"], obj["version"],)
def __hash__(self):
"""Get the hash."""
return hash((self.author, self.name, self.version))
def __str__(self):
"""Get the string representation."""
return "{author}/{name}:{version}".format(
author=self.author, name=self.name, version=self.version
)
def __eq__(self, other):
"""Compare with another object."""
return (
isinstance(other, PublicId)
and self.author == other.author
and self.name == other.name
and self.version == other.version
)
def __lt__(self, other):
"""Compare two public ids."""
return str(self) < str(other)
ProtocolId = PublicId
SkillId = PublicId
class PackageConfiguration(Configuration, ABC):
"""This class represent a package configuration."""
def __init__(self, name: str, author: str, version: str):
"""Initialize a package configuration."""
self.name = name
self.author = author
self.version = version
@property
def public_id(self) -> PublicId:
"""Get the public id."""
return PublicId(self.author, self.name, self.version)
class ConnectionConfig(PackageConfiguration):
"""Handle connection configuration."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
class_name: str = "",
protocols: Optional[Set[PublicId]] = None,
restricted_to_protocols: Optional[Set[PublicId]] = None,
excluded_protocols: Optional[Set[PublicId]] = None,
dependencies: Optional[Dependencies] = None,
description: str = "",
**config
):
"""Initialize a connection configuration object."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.class_name = class_name
self.protocols = protocols if protocols is not None else []
self.restricted_to_protocols = (
restricted_to_protocols if restricted_to_protocols is not None else set()
)
self.excluded_protocols = (
excluded_protocols if excluded_protocols is not None else set()
)
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
self.config = config
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"class_name": self.class_name,
"protocols": sorted(map(str, self.protocols)),
"restricted_to_protocols": sorted(map(str, self.restricted_to_protocols)),
"excluded_protocols": sorted(map(str, self.excluded_protocols)),
"dependencies": self.dependencies,
"description": self.description,
"config": self.config,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
restricted_to_protocols = obj.get("restricted_to_protocols", set())
restricted_to_protocols = {
PublicId.from_str(id_) for id_ in restricted_to_protocols
}
excluded_protocols = obj.get("excluded_protocols", set())
excluded_protocols = {PublicId.from_str(id_) for id_ in excluded_protocols}
dependencies = obj.get("dependencies", {})
protocols = {PublicId.from_str(id_) for id_ in obj.get("protocols", set())}
return ConnectionConfig(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
class_name=cast(str, obj.get("class_name")),
protocols=cast(Set[PublicId], protocols),
restricted_to_protocols=cast(Set[PublicId], restricted_to_protocols),
excluded_protocols=cast(Set[PublicId], excluded_protocols),
dependencies=cast(Dependencies, dependencies),
description=cast(str, obj.get("description", "")),
**cast(dict, obj.get("config"))
)
class ProtocolConfig(PackageConfiguration):
"""Handle protocol configuration."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
dependencies: Optional[Dependencies] = None,
description: str = "",
):
"""Initialize a connection configuration object."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"dependencies": self.dependencies,
"description": self.description,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
dependencies = cast(Dependencies, obj.get("dependencies", {}))
return ProtocolConfig(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
dependencies=dependencies,
description=cast(str, obj.get("description", "")),
)
class HandlerConfig(Configuration):
"""Handle a skill handler configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a handler configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return HandlerConfig(class_name=class_name, **obj.get("args", {}))
class BehaviourConfig(Configuration):
"""Handle a skill behaviour configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a behaviour configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return BehaviourConfig(class_name=class_name, **obj.get("args", {}))
class ModelConfig(Configuration):
"""Handle a skill model configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a model configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return ModelConfig(class_name=class_name, **obj.get("args", {}))
class SkillConfig(PackageConfiguration):
"""Class to represent a skill configuration file."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
protocols: List[PublicId] = None,
dependencies: Optional[Dependencies] = None,
description: str = "",
):
"""Initialize a skill configuration."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.protocols = (
protocols if protocols is not None else []
) # type: List[PublicId]
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
self.handlers = CRUDCollection[HandlerConfig]()
self.behaviours = CRUDCollection[BehaviourConfig]()
self.models = CRUDCollection[ModelConfig]()
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"protocols": sorted(map(str, self.protocols)),
"dependencies": self.dependencies,
"handlers": {key: h.json for key, h in self.handlers.read_all()},
"behaviours": {key: b.json for key, b in self.behaviours.read_all()},
"models": {key: m.json for key, m in self.models.read_all()},
"description": self.description,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
name = cast(str, obj.get("name"))
author = cast(str, obj.get("author"))
version = cast(str, obj.get("version"))
license = cast(str, obj.get("license"))
protocols = cast(
List[PublicId],
[PublicId.from_str(id_) for id_ in obj.get("protocols", [])],
)
dependencies = cast(Dependencies, obj.get("dependencies", {}))
description = cast(str, obj.get("description", ""))
skill_config = SkillConfig(
name=name,
author=author,
version=version,
license=license,
protocols=protocols,
dependencies=dependencies,
description=description,
)
for behaviour_id, behaviour_data in obj.get("behaviours", {}).items(): # type: ignore
behaviour_config = BehaviourConfig.from_json(behaviour_data)
skill_config.behaviours.create(behaviour_id, behaviour_config)
for handler_id, handler_data in obj.get("handlers", {}).items(): # type: ignore
handler_config = HandlerConfig.from_json(handler_data)
skill_config.handlers.create(handler_id, handler_config)
for model_id, model_data in obj.get("models", {}).items(): # type: ignore
model_config = ModelConfig.from_json(model_data)
skill_config.models.create(model_id, model_config)
return skill_config
class AgentConfig(PackageConfiguration):
"""Class to represent the agent configuration file."""
def __init__(
self,
agent_name: str = "",
aea_version: str = "",
author: str = "",
version: str = "",
license: str = "",
fingerprint: str = "",
registry_path: str = "",
description: str = "",
logging_config: Optional[Dict] = None,
):
"""Instantiate the agent configuration object."""
super().__init__(agent_name, author, version)
self.agent_name = agent_name
self.aea_version = aea_version
self.license = license
self.fingerprint = fingerprint
self.registry_path = registry_path
self.description = description
self.private_key_paths = CRUDCollection[str]()
self.ledger_apis = CRUDCollection[Dict]()
self.logging_config = logging_config if logging_config is not None else {}
self._default_ledger = None # type: Optional[str]
self._default_connection = None # type: Optional[PublicId]
self.connections = set() # type: Set[PublicId]
self.protocols = set() # type: Set[PublicId]
self.skills = set() # type: Set[PublicId]
if self.logging_config == {}:
self.logging_config["version"] = 1
self.logging_config["disable_existing_loggers"] = False
@property
def default_connection(self) -> str:
"""Get the default connection."""
assert self._default_connection is not None, "Default connection not set yet."
return str(self._default_connection)
@default_connection.setter
def default_connection(self, connection_id: Optional[Union[str, PublicId]]):
"""
Set the default connection.
:param connection_id: the name of the default connection.
:return: None
"""
if connection_id is None:
self._default_connection = None
elif isinstance(connection_id, str):
self._default_connection = PublicId.from_str(connection_id)
else:
self._default_connection = connection_id
@property
def default_ledger(self) -> str:
"""Get the default ledger."""
assert self._default_ledger is not None, "Default ledger not set yet."
return self._default_ledger
@default_ledger.setter
def default_ledger(self, ledger_id: str):
"""
Set the default ledger.
:param ledger_id: the id of the default ledger.
:return: None
"""
self._default_ledger = ledger_id
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"agent_name": self.agent_name,
"aea_version": self.aea_version,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"registry_path": self.registry_path,
"description": self.description,
"private_key_paths": {
key: path for key, path in self.private_key_paths.read_all()
},
"ledger_apis": {key: config for key, config in self.ledger_apis.read_all()},
"logging_config": self.logging_config,
"default_ledger": self.default_ledger,
"default_connection": self.default_connection,
"connections": sorted(map(str, self.connections)),
"protocols": sorted(map(str, self.protocols)),
"skills": sorted(map(str, self.skills)),
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
agent_config = AgentConfig(
agent_name=cast(str, obj.get("agent_name")),
aea_version=cast(str, obj.get("aea_version")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
registry_path=cast(str, obj.get("registry_path")),
description=cast(str, obj.get("description", "")),
logging_config=cast(Dict, obj.get("logging_config", {})),
)
for crypto_id, path in obj.get("private_key_paths", {}).items(): # type: ignore
agent_config.private_key_paths.create(crypto_id, path)
for ledger_id, ledger_data in obj.get("ledger_apis", {}).items(): # type: ignore
agent_config.ledger_apis.create(ledger_id, ledger_data)
# parse connection public ids
connections = set(
map(lambda x: PublicId.from_str(x), obj.get("connections", []))
)
agent_config.connections = cast(Set[PublicId], connections)
# parse protocol public ids
protocols = set(map(lambda x: PublicId.from_str(x), obj.get("protocols", [])))
agent_config.protocols = cast(Set[PublicId], protocols)
# parse skills public ids
skills = set(map(lambda x: PublicId.from_str(x), obj.get("skills", [])))
agent_config.skills = cast(Set[PublicId], skills)
# set default connection
default_connection_name = obj.get("default_connection", None)
agent_config.default_connection = default_connection_name
default_ledger_id = obj.get("default_ledger", None)
agent_config.default_ledger = default_ledger_id
return agent_config
class SpeechActContentConfig(Configuration):
"""Handle a speech_act content configuration."""
def __init__(self, **args):
"""Initialize a speech_act content configuration."""
self.args = args # type: Dict[str, str]
self._check_consistency()
def _check_consistency(self):
"""Check consistency of the args."""
for content_name, content_type in self.args.items():
if type(content_name) is not str or type(content_type) is not str:
raise ProtocolSpecificationParseError(
"Contents' names and types must be string."
)
# Check each content definition key/value (i.e. content name/type) is not empty
if content_name == "" or content_type == "":
raise ProtocolSpecificationParseError(
"Contents' names and types cannot be empty."
)
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return self.args
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
return SpeechActContentConfig(**obj)
class ProtocolSpecification(ProtocolConfig):
"""Handle protocol specification."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
description: str = "",
):
"""Initialize a protocol specification configuration object."""
super().__init__(name, author, version, license, description=description)
self.speech_acts = CRUDCollection[SpeechActContentConfig]()
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"description": self.description,
"speech_acts": {
key: speech_act.json for key, speech_act in self.speech_acts.read_all()
},
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
protocol_specification = ProtocolSpecification(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
description=cast(str, obj.get("description", "")),
)
for speech_act, speech_act_content in obj.get("speech_acts", {}).items(): # type: ignore
speech_act_content_config = SpeechActContentConfig.from_json(
speech_act_content
)
protocol_specification.speech_acts.create(
speech_act, speech_act_content_config
)
protocol_specification._check_consistency()
return protocol_specification
def _check_consistency(self):
"""Validate the correctness of the speech_acts."""
if len(self.speech_acts.read_all()) == 0:
raise ProtocolSpecificationParseError(
"There should be at least one performative defined in the speech_acts."
)
content_dict = {}
for performative, speech_act_content_config in self.speech_acts.read_all():
if type(performative) is not str:
raise ProtocolSpecificationParseError(
"A 'performative' is not specified as a string."
)
if performative == "":
raise ProtocolSpecificationParseError(
"A 'performative' cannot be an empty string."
)
for content_name, content_type in speech_act_content_config.args.items():
if content_name in content_dict.keys():
if content_type != content_dict[content_name]:
raise ProtocolSpecificationParseError(
"The content '{}' appears more than once with different types in speech_acts.".format(
content_name
)
)
content_dict[content_name] = content_type
|
[
"typing.cast",
"typing.TypeVar",
"re.findall",
"re.match"
] |
[((1292, 1304), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1299, 1304), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((23671, 23703), 'typing.cast', 'cast', (['Set[PublicId]', 'connections'], {}), '(Set[PublicId], connections)\n', (23675, 23703), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((23861, 23891), 'typing.cast', 'cast', (['Set[PublicId]', 'protocols'], {}), '(Set[PublicId], protocols)\n', (23865, 23891), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((24038, 24065), 'typing.cast', 'cast', (['Set[PublicId]', 'skills'], {}), '(Set[PublicId], skills)\n', (24042, 24065), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((7358, 7405), 're.match', 're.match', (['cls.PUBLIC_ID_REGEX', 'public_id_string'], {}), '(cls.PUBLIC_ID_REGEX, public_id_string)\n', (7366, 7405), False, 'import re\n'), ((12050, 12080), 'typing.cast', 'cast', (['Set[PublicId]', 'protocols'], {}), '(Set[PublicId], protocols)\n', (12054, 12080), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((12118, 12162), 'typing.cast', 'cast', (['Set[PublicId]', 'restricted_to_protocols'], {}), '(Set[PublicId], restricted_to_protocols)\n', (12122, 12162), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((12195, 12234), 'typing.cast', 'cast', (['Set[PublicId]', 'excluded_protocols'], {}), '(Set[PublicId], excluded_protocols)\n', (12199, 12234), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((12261, 12293), 'typing.cast', 'cast', (['Dependencies', 'dependencies'], {}), '(Dependencies, dependencies)\n', (12265, 12293), False, 'from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((7588, 7637), 're.findall', 're.findall', (['cls.PUBLIC_ID_REGEX', 'public_id_string'], {}), '(cls.PUBLIC_ID_REGEX, public_id_string)\n', (7598, 7637), False, 'import re\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # First look at our dataset
#
# In this notebook, we will look at the necessary steps required before any
# machine learning takes place. It involves:
#
# * loading the data;
# * looking at the variables in the dataset, in particular, differentiate
# between numerical and categorical variables, which need different
# preprocessing in most machine learning workflows;
# * visualizing the distribution of the variables to gain some insights into
# the dataset.
# %% [markdown]
# ## Loading the adult census dataset
#
# We will use data from the "Current Population adult_census" from 1994 that we
# downloaded from [OpenML](http://openml.org/).
#
# We use pandas to read this dataset.
#
# ```{note}
# [Pandas](https://pandas.pydata.org/) is a Python library used for
# manipulating 1 and 2 dimensional structured data.
# ```
# %%
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
# %% [markdown]
# We can look at the OpenML webpage to learn more about this dataset:
# <http://www.openml.org/d/1590>
#
# The goal with this data is to predict whether a person earns over 50K a year
# from heterogeneous data such as age, employment, education, family
# information, etc.
# %% [markdown]
# ## The variables (columns) in the dataset
#
# The data are stored in a pandas dataframe. A dataframe is type of structured
# data composed of 2 dimensions. This type of data are also referred as tabular
# data.
#
# The rows represents a record. In the field of machine learning or descriptive
# statistics, the terms commonly used to refer to rows are "sample",
# "instance", or "observation".
#
# The columns represents a type of information collected. In the field of
# machined learning and descriptive statistics, the terms commonly used to
# refer to columns are "feature", "variable", "attribute", or "covariate".
# %%
adult_census.head() # Print the first few lines of our dataframe
# %% [markdown]
# The column named **class** is our target variable (i.e., the variable which
# we want to predict). The two possible classes are `<=50K` (low-revenue) and
# `>50K` (high-revenue). The resulting prediction problem is therefore a
# binary classification problem, while we will use the other columns as input
# variables for our model.
# %%
target_column = 'class'
adult_census[target_column].value_counts()
# %% [markdown]
# ```{note}
# Classes are slightly imbalanced, meaning there are more samples of one or
# more classes compared to others. Class imbalance happens often in practice
# and may need special techniques when building a predictive model.
#
# For example in a medical setting, if we are trying to predict whether
# subjects will develop a rare disease, there will be a lot more healthy
# subjects than ill subjects in the dataset.
# ```
# %% [markdown]
# The dataset contains both numerical and categorical data. Numerical values
# take continuous values, for example `age`. Categorical values can have a
# finite number of values, for example `native-country`.
# %%
numerical_columns = [
'age', 'education-num', 'capital-gain', 'capital-loss',
'hours-per-week']
categorical_columns = [
'workclass', 'education', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country']
all_columns = numerical_columns + categorical_columns + [
target_column]
adult_census = adult_census[all_columns]
# %% [markdown]
# Note that for simplicity, we have ignored the "fnlwgt" (final weight) column
# that was crafted by the creators of the dataset when sampling the dataset to
# be representative of the full census database.
# %% [markdown]
# We can check the number of samples and the number of columns available in
# the dataset:
# %%
print(f"The dataset contains {adult_census.shape[0]} samples and "
f"{adult_census.shape[1]} columns")
# %% [markdown]
# We can compute the number of features by counting the number of columns and
# subtract 1, since of the column is the target.
# %%
print(f"The dataset contains {adult_census.shape[1] - 1} features.")
# %% [markdown]
# ## Visual inspection of the data
# Before building a predictive model, it is a good idea to look at the data:
#
# * maybe the task you are trying to achieve can be solved without machine
# learning;
# * you need to check that the information you need for your task is actually
# present in the dataset;
# * inspecting the data is a good way to find peculiarities. These can
# arise during data collection (for example, malfunctioning sensor or missing
# values), or from the way the data is processed afterwards (for example
# capped values).
# %% [markdown]
# Let's look at the distribution of individual features, to get some insights
# about the data. We can start by plotting histograms, note that this only
# works for features containing numerical values:
# %%
_ = adult_census.hist(figsize=(20, 14))
# %% [markdown]
# ```{tip}
# In the cell, we are calling the following pattern: `_ = func()`. It assigns
# the output of `func()` into the variable called `_`. By convention, in Python
# `_` serves as a "garbage" variable to store results that we are not
# interested in.
# ```
#
# We can already make a few comments about some of the variables:
#
# * `age`: there are not that many points for 'age > 70'. The dataset
# description does indicate that retired people have been filtered out
# (`hours-per-week > 0`);
# * `education-num`: peak at 10 and 13, hard to tell what it corresponds to
# without looking much further. We'll do that later in this notebook;
# * `hours-per-week` peaks at 40, this was very likely the standard number of
# working hours at the time of the data collection;
# * most values of `capital-gain` and `capital-loss` are close to zero.
# %% [markdown]
# For categorical variables, we can look at the distribution of values:
# %%
adult_census['sex'].value_counts()
# %%
adult_census['education'].value_counts()
# %% [markdown]
# As noted above, `education-num` distribution has two clear peaks around 10
# and 13. It would be reasonable to expect that `education-num` is the number
# of years of education.
#
# Let's look at the relationship between `education` and `education-num`.
# %%
pd.crosstab(index=adult_census['education'],
columns=adult_census['education-num'])
# %% [markdown]
# This shows that `education` and `education-num` gives you the same
# information. For example, `education-num=2` is equivalent to
# `education='1st-4th'`. In practice that means we can remove `education-num`
# without losing information. Note that having redundant (or highly correlated)
# columns can be a problem for machine learning algorithms.
# %% [markdown]
# ```{note}
# In the upcoming notebooks, we will only keep the `education` variable,
# excluding the `education-num` variable.
# ```
# %% [markdown]
# Another way to inspect the data is to do a `pairplot` and show how each
# variable differs according to our target, `class`. Plots along the diagonal
# show the distribution of individual variables for each `class`. The plots on
# the off-diagonal can reveal interesting interactions between variables.
# %%
import seaborn as sns
n_samples_to_plot = 5000
columns = ['age', 'education-num', 'hours-per-week']
_ = sns.pairplot(data=adult_census[:n_samples_to_plot], vars=columns,
hue=target_column, plot_kws={'alpha': 0.2},
height=3, diag_kind='hist', diag_kws={'bins': 30})
# %% [markdown]
#
# By looking at the data you could infer some hand-written rules to predict the
# class:
#
# * if you are young (less than 25 year-old roughly), you are in the
# `<=50K` class;
# * if you are old (more than 70 year-old roughly), you are in the
# `<=50K` class;
# * if you work part-time (less than 40 hours roughly) you are in the
# `<=50K` class.
#
# These hand-written rules could work reasonably well without the need for any
# machine learning. Note however that it is not very easy to create rules for
# the region `40 < hours-per-week < 60` and `30 < age < 70`. We can hope that
# machine learning can help in this region. Also note that visualization can
# help creating hand-written rules but is limited to 2 dimensions (maybe 3
# dimensions), whereas machine learning models can build models in
# high-dimensional spaces.
#
# Another thing worth mentioning in this plot: if you are young (less than 25
# year-old roughly) or old (more than 70 year-old roughly) you tend to work
# less. This is a non-linear relationship between age and hours per week.
# Linear machine learning models can only capture linear interactions, so this
# may be a factor when deciding which model to chose.
#
# In a machine-learning setting, an algorithm automatically create the "rules"
# in order to make predictions on new data.
# %% [markdown]
# The plot below shows the rules of a simple model, called decision tree.
# We will explain how this model works in a latter notebook, for now let us
# just consider the model predictions when trained on this dataset:
#
# 
#
# The background color in each area represents the probability of the class
# `high-income` as estimated by the model. Values towards 0 (dark blue)
# indicates that the model predicts `low-income` with a high probability.
# Values towards 1 (dark orange) indicates that the model predicts
# `high-income` with a high probability. Values towards 0.5 (white) indicates
# that the model is not very sure about its prediction.
#
# Looking at the plot here is what we can gather:
#
# * In the region `age < 28.5` (left region) the prediction is `low-income`.
# The dark blue color indicates that the model is quite sure about its
# prediction.
# * In the region `age > 28.5 AND hours-per-week < 40.5`
# (bottom-right region), the prediction is `low-income`. Note that the blue
# is a bit lighter that for the left region which means that the algorithm is
# not as certain in this region.
# * In the region `age > 28.5 AND hours-per-week > 40.5` (top-right region),
# the prediction is `low-income`. However the probability of the class
# `low-income` is very close to 0.5 which means the model is not sure at all
# about its prediction.
#
# It is interesting to see that a simple model create rules similar to the ones
# that we could have created by hand. Note that machine learning is really
# interesting when creating rules by hand is not straightforward, for example
# because we are in high dimension (many features) or because there is no
# simple and obvious rules that separate the two classes as in the top-right
# region
# %% [markdown]
#
# In this notebook we have:
#
# * loaded the data from a CSV file using `pandas`;
# * looked at the different kind of variables to differentiate between
# categorical and numerical variables;
# * inspected the data with `pandas` and `seaborn`. Data inspection can allow
# you to decide whether using machine learning is appropriate for your data
# and to highlight potential peculiarities in your data.
#
# Ideas which will be discussed more in details later:
#
# * if your target variable is imbalanced (e.g., you have more samples from one
# target category than another), you may need special techniques for training
# and evaluating your machine learning model;
# * having redundant (or highly correlated) columns can be a problem for
# some machine learning algorithms;
# * contrary to decision tree, linear models can only capture linear
# interaction, so be aware of non-linear relationships in your data.
|
[
"pandas.read_csv",
"pandas.crosstab",
"seaborn.pairplot"
] |
[((1160, 1203), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/adult-census.csv"""'], {}), "('../datasets/adult-census.csv')\n", (1171, 1203), True, 'import pandas as pd\n'), ((6502, 6590), 'pandas.crosstab', 'pd.crosstab', ([], {'index': "adult_census['education']", 'columns': "adult_census['education-num']"}), "(index=adult_census['education'], columns=adult_census[\n 'education-num'])\n", (6513, 6590), True, 'import pandas as pd\n'), ((7548, 7717), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'adult_census[:n_samples_to_plot]', 'vars': 'columns', 'hue': 'target_column', 'plot_kws': "{'alpha': 0.2}", 'height': '(3)', 'diag_kind': '"""hist"""', 'diag_kws': "{'bins': 30}"}), "(data=adult_census[:n_samples_to_plot], vars=columns, hue=\n target_column, plot_kws={'alpha': 0.2}, height=3, diag_kind='hist',\n diag_kws={'bins': 30})\n", (7560, 7717), True, 'import seaborn as sns\n')]
|
import pygame
# Constantes de jeu
MAX_TIRS = 10 # nombre maximum de boulets sur l'ecran
MAX_ALIEN = 10
PROBA_ALIEN = 22 # probabilité qu'un alien apparaisse
NOUVEL_ALIEN = 12 # Rafraichissement de l'ecran entre chaque alien
ECRAN = pygame.Rect(0, 0, 1825, 900)
SONS = True # Mettre a True si on veut activer le sons
|
[
"pygame.Rect"
] |
[((238, 266), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', '(1825)', '(900)'], {}), '(0, 0, 1825, 900)\n', (249, 266), False, 'import pygame\n')]
|
# -*- coding: utf-8 -*-
"""
Read Noise Calculation Class
============================
This software has the ReadNoiseCalc class. This class calculates the read noise
of the SPARC4 EMCCDs as a function of their operation mode. The calculations
are done based on a series of characterization of the SPARC4 cameras. For the
conventional mode, it is read the respective value of the read noise in the
Tabelas_Valores_Ruido_Leitura spreadsheet. For the EM mode, it is done an
interpolation of the data presented by the respective spreadshhet, as a
function of the EM gain.
"""
# <NAME>.
# 08/10/2019.
import openpyxl
from scipy.interpolate import interp1d
class Read_Noise_Calculation:
"""Read Noise Calculation Class.
Parameters
----------
ccd_operation_mode: dictionary
A dictionary with the parameter of the CCD operation mode.
em_mode : [0, 1]
CCD Electron Multiplying Mode
em_gain : float
CCD Electron Multiplying gain
hss : [0.1, 1, 10, 20, 30]
Horizontal Shift Spedd of the pixels
preamp : [1, 2]
Pre-amplifer gain
binn : [1, 2]
Binning of the pixels
directory : string
Directory of the spreadsheet with the read noise of the CCD
"""
def __init__(self, ccd_operation_mode, directory):
"""Initialize the class."""
self.em_mode = ccd_operation_mode['em_mode']
self.em_gain = ccd_operation_mode['em_gain']
self.hss = ccd_operation_mode['hss']
self.preamp = ccd_operation_mode['preamp']
self.binn = ccd_operation_mode['binn']
self.directory = directory
def get_operation_mode(self):
"""Print the operation mode on the screen."""
print('em_mode = ', self.em_mode)
print('em_gain = ', self.em_gain)
print('hss = ', self.hss)
print('preamp = ', self.preamp)
print('binn = ', self.binn)
def calculate_read_noise(self):
"""Calculate the read noise of the CCD.
For the conventional mode, it is used the read noise values of the
Read_noise_and_gain_values spreadsheet
For the EM mode, the read noise is obtained through an interpolation of
the values presente by the respective spreadsheet, as a function of the
CCD EM gain.
"""
if self.em_mode == 0:
self._calculate_read_noise_conventional_mode()
if self.em_mode == 1:
self._calculate_read_noise_em_mode()
return self.read_noise
def _calculate_read_noise_conventional_mode(self):
"""Calculate the read noise for the conventional mode."""
indice_tab = 0
if self.hss == 1:
if self.preamp == 1:
indice_tab = 19
if self.preamp == 2:
indice_tab = 21
if self.hss == 0.1:
if self.preamp == 1:
indice_tab = 23
if self.preamp == 2:
indice_tab = 25
if self.binn == 2:
indice_tab += 1
path = r'code/RNC/spreadsheet' \
+ '/' + self.directory + '/' + 'Read_noise_and_gain_values.xlsx'
spreadsheet = openpyxl.load_workbook(path).active
self.read_noise = spreadsheet.cell(indice_tab, 6).value
def _calculate_read_noise_em_mode(self):
"""Calculate the read noise for the EM mode."""
tab_name = 'code/RNC/spreadsheet' + '/' + self.directory + '/' + 'RN_PA'\
+ str(int(self.preamp)) + 'B' + str(int(self.binn))\
+ 'HSS' + str(int(self.hss)) + '.xlsx'
spreadsheet = list(openpyxl.load_workbook(tab_name).active.values)
column_em_gain = [value[0] for value in spreadsheet[1:12]]
column_noise = [value[1] for value in spreadsheet[1:12]]
f = interp1d(column_em_gain, column_noise)
read_noise = f(self.em_gain)
self.read_noise = float(read_noise)
|
[
"scipy.interpolate.interp1d",
"openpyxl.load_workbook"
] |
[((3817, 3855), 'scipy.interpolate.interp1d', 'interp1d', (['column_em_gain', 'column_noise'], {}), '(column_em_gain, column_noise)\n', (3825, 3855), False, 'from scipy.interpolate import interp1d\n'), ((3198, 3226), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['path'], {}), '(path)\n', (3220, 3226), False, 'import openpyxl\n'), ((3625, 3657), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['tab_name'], {}), '(tab_name)\n', (3647, 3657), False, 'import openpyxl\n')]
|
#!/usr/bin/python3
import sys
while True:
for line in sys.stdin:
lline = line.lower()
sys.stdout.write(lline)
sys.stdout.write("\n")
break
|
[
"sys.stdout.write"
] |
[((116, 138), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (132, 138), False, 'import sys\n'), ((91, 114), 'sys.stdout.write', 'sys.stdout.write', (['lline'], {}), '(lline)\n', (107, 114), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'dtmm'
copyright = '2018, <NAME>'
author = '<NAME>'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.7.0'
numfig = True
import sys,os
sys.path.insert(0, os.path.abspath(os.path.split(__file__)[0]))
# custom matplotlib plot_template
if sys.argv[2] in ('latex', 'latexpdf'):
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{%- for option in options %}
{{ option }}
{% endfor %}
\t{{caption}}
{% endfor %}
"""
else:
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
\t{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }} {% if source_link or (html_show_formats and not multi_image) %} (
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% endfor %}
"""
if sys.argv[2] in ('latex', 'latexpdf'):
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{%- for option in options %}
{{ option }}
{% endfor %}
\t{{caption}}
{% endfor %}
"""
else:
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
\t{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }} {% if source_link or (html_show_formats and not multi_image) %} (
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% endfor %}
"""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
"sphinx.ext.doctest",
"sphinx.ext.imgmath",
"sphinx.ext.autodoc",
'sphinx.ext.napoleon',
#"sphinx.ext.jsmath",
#'matplotlib.sphinxext.plot_directive',
'plot_directive'
]
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.githubpages',
"sphinx.ext.imgmath",
'sphinx.ext.napoleon',
"sphinx.ext.doctest",
'sphinx.ext.inheritance_diagram',
'autoapi.extension',
'matplotlib.sphinxext.plot_directive'
]
autoapi_keep_files = False
napoleon_numpy_docstring = True
autoapi_dirs = ['../dtmm']
autoapi_options = ['members', 'undoc-members', 'show-inheritance', 'special-members']
autoapi_options = ['members', 'show-inheritance']
autoapi_ignore = ["*/test/*.py","*/test"]
numfig = True
import os
doctest_global_setup = '''
try:
import numpy as np
import dtmm
from dtmm.fft import *
from dtmm.color import *
from dtmm.data import *
from dtmm.tmm import *
from dtmm.jones4 import *
from dtmm.jones import *
from dtmm.window import *
from dtmm.linalg import *
from dtmm.rotation import *
except ImportError:
pass
field_in = (np.ones((1,4,6,6))+0j, np.array((3.,)), 100)
field_data_in = field_in
field_data_out = (np.ones((1,4,6,6))+0j, np.array((3.,)), 100)
field_bulk_data = (np.ones((1,2,1,4,6,6))+0j, np.array((3.,)), 100)
field = field_in
optical_data = np.array((1.,)), np.ones((1,6,6,3))*2+0j, np.zeros((1,6,6,3))
data = optical_data
NLAYERS, HEIGHT, WIDTH = 1,6,6
WAVELENGTHS = [500]
PIXELSIZE = 200
'''
plot_working_directory = "examples"#os.path.abspath("../examples")
imgmath_image_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
|
[
"os.path.split"
] |
[((946, 969), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (959, 969), False, 'import os\n')]
|
import math
K = int(input())
ans = 0
for i in range(1, K+1):
for j in range(i, K+1):
for k in range(j, K+1):
if (i == j) and (j == k):
ans += math.gcd(i, math.gcd(j, k))
elif (i == j) or (j == k):
ans += 3 * math.gcd(i, math.gcd(j, k))
else:
ans += 6 * math.gcd(i, math.gcd(j, k))
print(ans)
|
[
"math.gcd"
] |
[((194, 208), 'math.gcd', 'math.gcd', (['j', 'k'], {}), '(j, k)\n', (202, 208), False, 'import math\n'), ((288, 302), 'math.gcd', 'math.gcd', (['j', 'k'], {}), '(j, k)\n', (296, 302), False, 'import math\n'), ((361, 375), 'math.gcd', 'math.gcd', (['j', 'k'], {}), '(j, k)\n', (369, 375), False, 'import math\n')]
|
from . import models
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
import json
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
#Se define endpoint para home
def index(request):
events=[]
_userID=request.session.get("user",0)
if(_userID == 0):
sesion=False
else:
sesion=True
events = models.Event.objects.filter(person__id=_userID).order_by("-creation_date")
context={"sesion":sesion, "events":events}
return render(request,"index.html",context)
@csrf_exempt
#Se define endpoint para registro de usuario
def register(request):
context = {}
return render(request, "registro.html", context)
#Se define endpoint para modificacion de evento
@csrf_exempt
def modifyEvent(request,id_ev):
print("modifyEventId "+ id_ev)
_event=models.Event.objects.get(pk=id_ev)
print("eventView: " + str(_event))
_categories=models.Category.objects.all()
_type= models.Type.objects.all()
context={"event":_event, 'categories':_categories, 'types':_type}
return render(request, "modify.html", context)
#Se define endpoint para creacion de evento
@csrf_exempt
def createEvent(request):
_categories = models.Category.objects.all()
_type = models.Type.objects.all()
context = {'categories':_categories, 'types':_type}
return render(request, "createEvent.html", context)
@csrf_exempt
def eventDetails(request,id_ev):
_event=models.Event.objects.get(pk=id_ev)
context={"evento":_event}
return render(request, "details.html", context)
|
[
"django.shortcuts.render"
] |
[((635, 673), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (641, 673), False, 'from django.shortcuts import render, redirect\n'), ((784, 825), 'django.shortcuts.render', 'render', (['request', '"""registro.html"""', 'context'], {}), "(request, 'registro.html', context)\n", (790, 825), False, 'from django.shortcuts import render, redirect\n'), ((1204, 1243), 'django.shortcuts.render', 'render', (['request', '"""modify.html"""', 'context'], {}), "(request, 'modify.html', context)\n", (1210, 1243), False, 'from django.shortcuts import render, redirect\n'), ((1481, 1525), 'django.shortcuts.render', 'render', (['request', '"""createEvent.html"""', 'context'], {}), "(request, 'createEvent.html', context)\n", (1487, 1525), False, 'from django.shortcuts import render, redirect\n'), ((1661, 1701), 'django.shortcuts.render', 'render', (['request', '"""details.html"""', 'context'], {}), "(request, 'details.html', context)\n", (1667, 1701), False, 'from django.shortcuts import render, redirect\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from threading import Thread
from Parsers.Common import *
class Parser(Thread):
def __init__(self):
"""
Initialize a Parser thread.
"""
Thread.__init__(self)
self.deamon = True
self.result = None
def run(self):
"""
Details on thread running.
"""
self.result = self.parse()
def parse(self):
"""
Specific parse method to be override.
"""
pass
def getResult(self):
"""
Return the result after the Parser finnish running.
:return:
"""
return self.result
def __str__(self):
return getFormattedString(self.result)
if __name__ == '__main__':
p = Parser()
p.start()
p.join()
print(p.getResult())
|
[
"threading.Thread.__init__"
] |
[((214, 235), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (229, 235), False, 'from threading import Thread\n')]
|
import random
targetNumber = 6
def throwDie():
print("rolling...")
rand = random.randint(1, 6)
print(str(rand) + "!")
return rand
# roll a 6-sided die until the given target number comes up.
# Return the total number of throws.
def rollDieUntilTarget(target):
print("Rolling until a " + str(target) + " comes up...")
currentResult = throwDie()
throws = 1
while currentResult is not target:
currentResult = throwDie()
throws += 1
return throws
totalThrows = rollDieUntilTarget(targetNumber)
print("Took " + str(totalThrows) + " throws to roll a " \
+ str(targetNumber) + ".")
|
[
"random.randint"
] |
[((84, 104), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (98, 104), False, 'import random\n')]
|
# Generated by Django 3.2 on 2021-05-30 18:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrapers', '0011_alter_retsinfosentences_document'),
('documents', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='DocumentEmbeddings',
new_name='DocumentEmbedding',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((279, 367), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""DocumentEmbeddings"""', 'new_name': '"""DocumentEmbedding"""'}), "(old_name='DocumentEmbeddings', new_name=\n 'DocumentEmbedding')\n", (301, 367), False, 'from django.db import migrations\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
#read generator parameters into DataFrame
df_gen = pd.read_excel('NEISO_data_file/generators.xlsx',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('NEISO_data_file/paths.csv',header=0)
#list zones
zones = ['CT', 'ME', 'NH', 'NEMA', 'RI', 'SEMA', 'VT', 'WCMA']
##time series of load for each zone
df_load_all = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load_all = df_load_all[zones]
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/NEISO_dispatchable_hydro.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('NEISO_data_file/must_run.xlsx',header=0)
# must run generation
must_run_CT = []
must_run_ME = []
must_run_NEMA = []
must_run_NH = []
must_run_RI = []
must_run_SEMA = []
must_run_VT = []
must_run_WCMA = []
must_run_CT = np.ones((8760,1))*df_must.loc[0,'CT']
must_run_ME = np.ones((8760,1))*df_must.loc[0,'ME']
must_run_NEMA = np.ones((8760,1))*df_must.loc[0,'NEMA']
must_run_NH = np.ones((8760,1))*df_must.loc[0,'NH']
must_run_RI = np.ones((8760,1))*df_must.loc[0,'RI']
must_run_SEMA = np.ones((8760,1))*df_must.loc[0,'SEMA']
must_run_VT = np.ones((8760,1))*df_must.loc[0,'VT']
must_run_WCMA = np.ones((8760,1))*df_must.loc[0,'WCMA']
must_run = np.column_stack((must_run_CT,must_run_ME,must_run_NEMA,must_run_NH,must_run_RI,must_run_SEMA,must_run_VT,must_run_WCMA))
df_total_must_run = pd.DataFrame(must_run,columns=('CT','ME','NEMA','NH','RI','SEMA','VT','WCMA'))
df_total_must_run.to_csv('NEISO_data_file/must_run_hourly.csv')
#natural gas prices
df_ng_all = pd.read_excel('../Time_series_data/Gas_prices/NG.xlsx', header=0)
df_ng_all = df_ng_all[zones]
#oil prices
df_oil_all = pd.read_excel('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)
df_oil_all = df_oil_all[zones]
# time series of offshore wind generation for each zone
df_offshore_wind_all = pd.read_excel('../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',header=0)
# time series of solar generation
df_solar = pd.read_excel('NEISO_data_file/hourly_solar_gen.xlsx',header=0)
solar_caps = pd.read_excel('NEISO_data_file/solar_caps.xlsx',header=0)
# time series of onshore wind generation
df_onshore_wind = pd.read_excel('NEISO_data_file/hourly_onshore_wind_gen.xlsx',header=0)
onshore_wind_caps = pd.read_excel('NEISO_data_file/wind_onshore_caps.xlsx',header=0)
def setup(year, Hub_height, Offshore_capacity):
##time series of natural gas prices for each zone
df_ng = globals()['df_ng_all'].copy()
df_ng = df_ng.reset_index()
##time series of oil prices for each zone
df_oil = globals()['df_oil_all'].copy()
df_oil = df_oil.reset_index()
##time series of load for each zone
df_load = globals()['df_load_all'].loc[year*8760:year*8760+8759].copy()
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/NEISO_dispatchable_imports.csv',header=0)
##hourly time series of exports by zone
df_exports = pd.read_csv('Path_setup/NEISO_exports.csv',header=0)
# time series of offshore wind generation for each zone
df_offshore_wind = globals()['df_offshore_wind_all'].loc[:, Hub_height].copy()
df_offshore_wind = df_offshore_wind.loc[year*8760:year*8760+8759]
df_offshore_wind = df_offshore_wind.reset_index()
offshore_wind_caps = pd.read_excel('NEISO_data_file/wind_offshore_caps.xlsx')
############
# sets #
############
#write data.dat file
import os
from shutil import copy
from pathlib import Path
path = str(Path.cwd().parent) + str(Path('/UCED/LR/NEISO' +'_'+ str(Hub_height) +'_'+ str(Offshore_capacity) +'_'+ str(year)))
os.makedirs(path,exist_ok=True)
generators_file='NEISO_data_file/generators.xlsx'
dispatch_file='../UCED/NEISO_dispatch.py'
dispatchLP_file='../UCED/NEISO_dispatchLP.py'
wrapper_file='../UCED/NEISO_wrapper.py'
simulation_file='../UCED/NEISO_simulation.py'
copy(dispatch_file,path)
copy(wrapper_file,path)
copy(simulation_file,path)
copy(dispatchLP_file,path)
copy(generators_file,path)
filename = path + '/data.dat'
#write data.dat file
# filename = 'NEISO_data_file/data.dat'
with open(filename, 'w') as f:
# generator sets by zone
for z in zones:
# zone string
z_int = zones.index(z)
f.write('set Zone%dGenerators :=\n' % (z_int+1))
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z:
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_CT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYCT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_WCMA :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYWCMA_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# HQ imports
f.write('set HQ_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'HQVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NB imports
f.write('set NB_Imports_ME :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NBME_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# generator sets by type
# coal
f.write('set Coal :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'coal':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# # oil
# f.write('set Oil :=\n')
# # pull relevant generators
# for gen in range(0,len(df_gen)):
# if df_gen.loc[gen,'typ'] == 'oil':
# unit_name = df_gen.loc[gen,'name']
# unit_name = unit_name.replace(' ','_')
# f.write(unit_name + ' ')
# f.write(';\n\n')
# Slack
f.write('set Slack :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'slack':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Hydro
f.write('set Hydro :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Ramping
f.write('set Ramping :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro' or df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# gas generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# Natural Gas
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dGas :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# oil generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dOil :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# zones
f.write('set zones :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sources
f.write('set sources :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sinks
f.write('set sinks :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
################
# parameters #
################
# simulation details
SimHours = 8760
f.write('param SimHours := %d;' % SimHours)
f.write('\n')
f.write('param SimDays:= %d;' % int(SimHours/24))
f.write('\n\n')
HorizonHours = 48
f.write('param HorizonHours := %d;' % HorizonHours)
f.write('\n\n')
HorizonDays = int(HorizonHours/24)
f.write('param HorizonDays := %d;' % HorizonDays)
f.write('\n\n')
# create parameter matrix for transmission paths (source and sink connections)
f.write('param:' + '\t' + 'limit' + '\t' +'hurdle :=' + '\n')
for z in zones:
for x in zones:
f.write(z + '\t' + x + '\t')
match = 0
for p in range(0,len(df_paths)):
source = df_paths.loc[p,'start_zone']
sink = df_paths.loc[p,'end_zone']
if source == z and sink == x:
match = 1
p_match = p
if match > 0:
f.write(str(round(df_paths.loc[p_match,'limit'],3)) + '\t' + str(round(df_paths.loc[p_match,'hurdle'],3)) + '\n')
else:
f.write('0' + '\t' + '0' + '\n')
f.write(';\n\n')
# create parameter matrix for generators
f.write('param:' + '\t')
for c in df_gen.columns:
if c != 'name':
f.write(c + '\t')
f.write(':=\n\n')
for i in range(0,len(df_gen)):
for c in df_gen.columns:
if c == 'name':
unit_name = df_gen.loc[i,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + '\t')
elif c == 'typ' or c == 'zone':
f.write(str(df_gen.loc[i,c]) + '\t')
else:
f.write(str(round(df_gen.loc[i,c],3)) + '\t')
f.write('\n')
f.write(';\n\n')
# times series data
# zonal (hourly)
f.write('param:' + '\t' + 'SimDemand' + '\t' + 'SimOffshoreWind' \
+ '\t' + 'SimSolar' + '\t' + 'SimOnshoreWind' + '\t' + 'SimMustRun:=' + '\n')
for z in zones:
wz = offshore_wind_caps.loc[0,z]
sz = solar_caps.loc[0,z]
owz = onshore_wind_caps.loc[0,z]
for h in range(0,len(df_load)):
f.write(z + '\t' + str(h+1) + '\t' + str(round(df_load.loc[h,z],3))\
+ '\t' + str(round(df_offshore_wind.loc[h,Hub_height]*wz,3))\
+ '\t' + str(round(df_solar.loc[h,'Solar_Output_MWh']*sz,3))\
+ '\t' + str(round(df_onshore_wind.loc[h,'Onshore_Output_MWh']*owz,3))\
+ '\t' + str(round(df_total_must_run.loc[h,z],3)) + '\n')
f.write(';\n\n')
# zonal (daily)
f.write('param:' + '\t' + 'SimGasPrice' + '\t' + 'SimOilPrice:=' + '\n')
for z in zones:
for d in range(0,int(SimHours/24)):
f.write(z + '\t' + str(d+1) + '\t' + str(round(df_ng.loc[d,z], 3)) + '\t' + str(round(df_oil.loc[d,z], 3)) + '\n')
f.write(';\n\n')
#system wide (daily)
f.write('param:' + '\t' + 'SimNY_imports_CT' + '\t' + 'SimNY_imports_VT' + '\t' + 'SimNY_imports_WCMA' + '\t' + 'SimNB_imports_ME' + '\t' + 'SimHQ_imports_VT' + '\t' + 'SimCT_hydro' + '\t' + 'SimME_hydro' + '\t' + 'SimNH_hydro' + '\t' + 'SimNEMA_hydro' + '\t' + 'SimRI_hydro' + '\t' + 'SimVT_hydro' + '\t' + 'SimWCMA_hydro:=' + '\n')
for d in range(0,len(df_imports)):
f.write(str(d+1) + '\t' + str(round(df_imports.loc[d,'NY_imports_CT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_VT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_WCMA'],3)) + '\t' + str(round(df_imports.loc[d,'NB_imports_ME'],3)) + '\t' + str(round(df_imports.loc[d,'HQ_imports_VT'],3)) + '\t' + str(round(df_hydro.loc[d,'CT'],3)) + '\t' + str(round(df_hydro.loc[d,'ME'],3)) + '\t' + str(round(df_hydro.loc[d,'NH'],3)) + '\t' + str(round(df_hydro.loc[d,'NEMA'],3)) + '\t' + str(round(df_hydro.loc[d,'RI'],3)) + '\t' + str(round(df_hydro.loc[d,'VT'],3)) + '\t' + str(round(df_hydro.loc[d,'WCMA'],3)) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimCT_exports_NY' + '\t' + 'SimWCMA_exports_NY' + '\t' + 'SimVT_exports_NY' + '\t' + 'SimVT_exports_HQ' + '\t' + 'SimME_exports_NB' + '\t' + 'SimReserves:=' + '\n')
for h in range(0,len(df_load)):
f.write(str(h+1) + '\t' + str(round(df_exports.loc[h,'CT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'WCMA_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_HQ'],3)) + '\t' + str(round(df_exports.loc[h,'ME_exports_NB'],3)) + '\t' + str(round(df_reserves.loc[h,'reserves'],3)) + '\n')
f.write(';\n\n')
return None
|
[
"pandas.DataFrame",
"numpy.sum",
"os.makedirs",
"pandas.read_csv",
"numpy.ones",
"pandas.read_excel",
"numpy.column_stack",
"pathlib.Path.cwd",
"shutil.copy"
] |
[((177, 235), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/generators.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/generators.xlsx', header=0)\n", (190, 235), True, 'import pandas as pd\n'), ((297, 347), 'pandas.read_csv', 'pd.read_csv', (['"""NEISO_data_file/paths.csv"""'], {'header': '(0)'}), "('NEISO_data_file/paths.csv', header=0)\n", (308, 347), True, 'import pandas as pd\n'), ((474, 574), 'pandas.read_csv', 'pd.read_csv', (['"""../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv"""'], {'header': '(0)'}), "(\n '../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',\n header=0)\n", (485, 574), True, 'import pandas as pd\n'), ((643, 708), 'pandas.read_csv', 'pd.read_csv', (['"""Hydro_setup/NEISO_dispatchable_hydro.csv"""'], {'header': '(0)'}), "('Hydro_setup/NEISO_dispatchable_hydro.csv', header=0)\n", (654, 708), True, 'import pandas as pd\n'), ((762, 818), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/must_run.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/must_run.xlsx', header=0)\n", (775, 818), True, 'import pandas as pd\n'), ((1428, 1559), 'numpy.column_stack', 'np.column_stack', (['(must_run_CT, must_run_ME, must_run_NEMA, must_run_NH, must_run_RI,\n must_run_SEMA, must_run_VT, must_run_WCMA)'], {}), '((must_run_CT, must_run_ME, must_run_NEMA, must_run_NH,\n must_run_RI, must_run_SEMA, must_run_VT, must_run_WCMA))\n', (1443, 1559), True, 'import numpy as np\n'), ((1569, 1659), 'pandas.DataFrame', 'pd.DataFrame', (['must_run'], {'columns': "('CT', 'ME', 'NEMA', 'NH', 'RI', 'SEMA', 'VT', 'WCMA')"}), "(must_run, columns=('CT', 'ME', 'NEMA', 'NH', 'RI', 'SEMA',\n 'VT', 'WCMA'))\n", (1581, 1659), True, 'import pandas as pd\n'), ((1745, 1810), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Gas_prices/NG.xlsx"""'], {'header': '(0)'}), "('../Time_series_data/Gas_prices/NG.xlsx', header=0)\n", (1758, 1810), True, 'import pandas as pd\n'), ((1866, 1939), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Oil_prices/Oil_prices.xlsx"""'], {'header': '(0)'}), "('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)\n", (1879, 1939), True, 'import pandas as pd\n'), ((2051, 2156), 'pandas.read_excel', 'pd.read_excel', (['"""../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx"""'], {'header': '(0)'}), "(\n '../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',\n header=0)\n", (2064, 2156), True, 'import pandas as pd\n'), ((2193, 2257), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/hourly_solar_gen.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/hourly_solar_gen.xlsx', header=0)\n", (2206, 2257), True, 'import pandas as pd\n'), ((2270, 2328), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/solar_caps.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/solar_caps.xlsx', header=0)\n", (2283, 2328), True, 'import pandas as pd\n'), ((2388, 2459), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/hourly_onshore_wind_gen.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/hourly_onshore_wind_gen.xlsx', header=0)\n", (2401, 2459), True, 'import pandas as pd\n'), ((2479, 2544), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/wind_onshore_caps.xlsx"""'], {'header': '(0)'}), "('NEISO_data_file/wind_onshore_caps.xlsx', header=0)\n", (2492, 2544), True, 'import pandas as pd\n'), ((1003, 1021), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1010, 1021), True, 'import numpy as np\n'), ((1055, 1073), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1062, 1073), True, 'import numpy as np\n'), ((1109, 1127), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1116, 1127), True, 'import numpy as np\n'), ((1163, 1181), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1170, 1181), True, 'import numpy as np\n'), ((1215, 1233), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1222, 1233), True, 'import numpy as np\n'), ((1269, 1287), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1276, 1287), True, 'import numpy as np\n'), ((1323, 1341), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1330, 1341), True, 'import numpy as np\n'), ((1377, 1395), 'numpy.ones', 'np.ones', (['(8760, 1)'], {}), '((8760, 1))\n', (1384, 1395), True, 'import numpy as np\n'), ((3238, 3260), 'pandas.DataFrame', 'pd.DataFrame', (['reserves'], {}), '(reserves)\n', (3250, 3260), True, 'import pandas as pd\n'), ((3378, 3444), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/NEISO_dispatchable_imports.csv"""'], {'header': '(0)'}), "('Path_setup/NEISO_dispatchable_imports.csv', header=0)\n", (3389, 3444), True, 'import pandas as pd\n'), ((3510, 3563), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/NEISO_exports.csv"""'], {'header': '(0)'}), "('Path_setup/NEISO_exports.csv', header=0)\n", (3521, 3563), True, 'import pandas as pd\n'), ((3864, 3920), 'pandas.read_excel', 'pd.read_excel', (['"""NEISO_data_file/wind_offshore_caps.xlsx"""'], {}), "('NEISO_data_file/wind_offshore_caps.xlsx')\n", (3877, 3920), True, 'import pandas as pd\n'), ((4226, 4258), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (4237, 4258), False, 'import os\n'), ((4516, 4541), 'shutil.copy', 'copy', (['dispatch_file', 'path'], {}), '(dispatch_file, path)\n', (4520, 4541), False, 'from shutil import copy\n'), ((4545, 4569), 'shutil.copy', 'copy', (['wrapper_file', 'path'], {}), '(wrapper_file, path)\n', (4549, 4569), False, 'from shutil import copy\n'), ((4573, 4600), 'shutil.copy', 'copy', (['simulation_file', 'path'], {}), '(simulation_file, path)\n', (4577, 4600), False, 'from shutil import copy\n'), ((4604, 4631), 'shutil.copy', 'copy', (['dispatchLP_file', 'path'], {}), '(dispatchLP_file, path)\n', (4608, 4631), False, 'from shutil import copy\n'), ((4635, 4662), 'shutil.copy', 'copy', (['generators_file', 'path'], {}), '(generators_file, path)\n', (4639, 4662), False, 'from shutil import copy\n'), ((3200, 3216), 'numpy.sum', 'np.sum', (['rv[i, :]'], {}), '(rv[i, :])\n', (3206, 3216), True, 'import numpy as np\n'), ((4106, 4116), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4114, 4116), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bs4 import BeautifulSoup
from django.http import QueryDict
from cms.api import add_plugin
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import CascadeElement
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapRowForm, BootstrapColumnPlugin, BS3_BREAKPOINT_KEYS)
from cmsplugin_cascade.generic.cms_plugins import HeadingPlugin
from tests.test_base import CascadeTestCase
class SectionPluginTest(CascadeTestCase):
def setUp(self):
super(SectionPluginTest, self).setUp()
# add a Bootstrap Container Plugin
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': BS3_BREAKPOINT_KEYS})
self.assertIsInstance(container_model, CascadeElement)
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
ModelForm = container_plugin.get_form(self.request, container_model)
post_data = QueryDict('', mutable=True)
post_data.setlist('breakpoints', ['sm', 'md'])
form = ModelForm(post_data, None, instance=container_model)
soup = BeautifulSoup(form.as_p(), features='lxml')
input_element = soup.find(id="id_glossary_breakpoints_0")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'breakpoints', 'value': 'xs'},
input_element.attrs)
input_element = soup.find(id="id_glossary_breakpoints_2")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'breakpoints', 'value': 'md', 'checked': ''},
input_element.attrs)
input_element = soup.find(id="id_glossary_fluid")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'fluid'},
input_element.attrs)
container_plugin.save_model(self.request, container_model, form, False)
self.assertListEqual(container_model.glossary['breakpoints'], ['sm', 'md'])
self.assertTrue('fluid' in container_model.glossary)
self.assertEqual(str(container_model), 'for tablets, laptops')
# add a RowPlugin with 1 ColumnPlugin
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)
row_plugin = row_model.get_plugin_class_instance()
row_change_form = BootstrapRowForm({'num_children': 1})
row_change_form.full_clean()
row_plugin.save_model(self.request, row_model, row_change_form, False)
self.assertDictEqual(row_model.glossary, {})
self.assertIsInstance(row_model, CascadeElement)
column_models = CascadeElement.objects.filter(parent_id=row_model.id)
self.assertEqual(column_models.count(), 1)
# work with the ColumnPlugin
self.column_model = column_models.first()
self.assertIsInstance(self.column_model, CascadeElement)
self.column_plugin = self.column_model.get_plugin_class_instance()
self.assertIsInstance(self.column_plugin, BootstrapColumnPlugin)
self.assertEqual(self.column_model.parent.id, row_model.id)
self.plugin_list = [container_model, row_model, self.column_model]
def test_section(self):
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
self.assertIsInstance(heading_model, CascadeElement)
heading_plugin = heading_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(heading_plugin, HeadingPlugin)
ModelForm = heading_plugin.get_form(self.request, heading_model)
post_data = QueryDict('', mutable=True)
post_data.update(tag_type='h2', content="Hello", element_id='foo')
form = ModelForm(post_data, None, instance=heading_model)
html = form.as_p()
needle = '<input id="id_glossary_element_id" name="element_id" type="text" value="foo" />'
self.assertInHTML(needle, html)
self.assertTrue(form.is_valid())
heading_plugin.save_model(self.request, heading_model, form, False)
# check identifier
html = heading_plugin.get_identifier(heading_model)
expected = '<code>h2</code>: Hello <code>id="foo"</code>'
self.assertHTMLEqual(html, expected)
# render the Container Plugin with the Heading Plgin as a child
self.plugin_list.append(heading_model)
build_plugin_tree(self.plugin_list)
# context = get_request_context(self.request)
# html = heading_model.render_plugin(context)
html = self.get_html(heading_model, self.get_request_context())
expected = '<h2 id="foo">Hello</h2>'
self.assertHTMLEqual(html, expected)
# add another heading model with the same id
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
form = ModelForm(post_data, None, instance=heading_model)
self.assertFalse(form.is_valid())
expected = '<ul class="errorlist"><li>glossary<ul class="errorlist"><li>The element ID 'foo' is not unique for this page.</li></ul></li></ul>'
self.assertHTMLEqual(str(form.errors), expected)
|
[
"cmsplugin_cascade.bootstrap3.container.BootstrapRowForm",
"cms.utils.plugins.build_plugin_tree",
"cms.api.add_plugin",
"django.http.QueryDict",
"cmsplugin_cascade.models.CascadeElement.objects.filter"
] |
[((719, 831), 'cms.api.add_plugin', 'add_plugin', (['self.placeholder', 'BootstrapContainerPlugin', '"""en"""'], {'glossary': "{'breakpoints': BS3_BREAKPOINT_KEYS}"}), "(self.placeholder, BootstrapContainerPlugin, 'en', glossary={\n 'breakpoints': BS3_BREAKPOINT_KEYS})\n", (729, 831), False, 'from cms.api import add_plugin\n'), ((1184, 1211), 'django.http.QueryDict', 'QueryDict', (['""""""'], {'mutable': '(True)'}), "('', mutable=True)\n", (1193, 1211), False, 'from django.http import QueryDict\n'), ((2412, 2490), 'cms.api.add_plugin', 'add_plugin', (['self.placeholder', 'BootstrapRowPlugin', '"""en"""'], {'target': 'container_model'}), "(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)\n", (2422, 2490), False, 'from cms.api import add_plugin\n'), ((2576, 2613), 'cmsplugin_cascade.bootstrap3.container.BootstrapRowForm', 'BootstrapRowForm', (["{'num_children': 1}"], {}), "({'num_children': 1})\n", (2592, 2613), False, 'from cmsplugin_cascade.bootstrap3.container import BootstrapContainerPlugin, BootstrapRowPlugin, BootstrapRowForm, BootstrapColumnPlugin, BS3_BREAKPOINT_KEYS\n'), ((2864, 2917), 'cmsplugin_cascade.models.CascadeElement.objects.filter', 'CascadeElement.objects.filter', ([], {'parent_id': 'row_model.id'}), '(parent_id=row_model.id)\n', (2893, 2917), False, 'from cmsplugin_cascade.models import CascadeElement\n'), ((3467, 3542), 'cms.api.add_plugin', 'add_plugin', (['self.placeholder', 'HeadingPlugin', '"""en"""'], {'target': 'self.column_model'}), "(self.placeholder, HeadingPlugin, 'en', target=self.column_model)\n", (3477, 3542), False, 'from cms.api import add_plugin\n'), ((3840, 3867), 'django.http.QueryDict', 'QueryDict', (['""""""'], {'mutable': '(True)'}), "('', mutable=True)\n", (3849, 3867), False, 'from django.http import QueryDict\n'), ((4619, 4654), 'cms.utils.plugins.build_plugin_tree', 'build_plugin_tree', (['self.plugin_list'], {}), '(self.plugin_list)\n', (4636, 4654), False, 'from cms.utils.plugins import build_plugin_tree\n'), ((5003, 5078), 'cms.api.add_plugin', 'add_plugin', (['self.placeholder', 'HeadingPlugin', '"""en"""'], {'target': 'self.column_model'}), "(self.placeholder, HeadingPlugin, 'en', target=self.column_model)\n", (5013, 5078), False, 'from cms.api import add_plugin\n')]
|
from specter.runner import activate
activate()
|
[
"specter.runner.activate"
] |
[((36, 46), 'specter.runner.activate', 'activate', ([], {}), '()\n', (44, 46), False, 'from specter.runner import activate\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_core.ipynb (unless otherwise specified).
__all__ = ['XLAOptimProxy', 'DeviceMoverTransform', 'isAffineCoordTfm', 'isDeviceMoverTransform', 'has_affinecoord_tfm',
'has_devicemover_tfm', 'get_last_affinecoord_tfm_idx', 'insert_batch_tfm', 'XLAOptCallback']
# Internal Cell
from .utils import xla_imported
# Internal Cell
try:
import torch_xla
except ImportError:
pass
# Internal Cell
if xla_imported():
import torch_xla.core.xla_model as xm
from fastcore.foundation import GetAttr, patch
from fastcore.transform import Transform,DisplayedTransform
from fastcore.basics import store_attr
from torch import Tensor
import torch
from fastai.vision.augment import AffineCoordTfm, RandomResizedCropGPU
from fastai.data.core import DataLoaders
from fastai.data.load import DataLoader
from fastai.learner import Learner
from fastai.callback.core import Callback, TrainEvalCallback
from fastai.learner import Recorder
# Cell
class XLAOptimProxy(GetAttr):
"Proxy optimizer to override `opt.step` with Pytorch XLA sync method `xm.optimizer_step` "
_default='opt'
def __init__(self,opt, barrier):
self.opt = opt
self._barrier = barrier
def step(self):
xm.optimizer_step(self.opt,barrier=self._barrier)
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# Cell
class DeviceMoverTransform(DisplayedTransform):
"Transform to move input to new device and reverse to cpu"
def __init__(self, device_to, device_from=torch.device('cpu')):
store_attr('device_to,device_from')
def encodes(self, o:Tensor):
return o.to(self.device_to)
def decodes(self, o:Tensor):
return o.to(self.device_from)
# Cell
def isAffineCoordTfm(o:Transform):
"check whether the transform is either an AffineCoordTfm or RandomResizedCropGPU"
return isinstance(o,(AffineCoordTfm,RandomResizedCropGPU))
def isDeviceMoverTransform(o:Transform):
"check whether the transform is a DeviceMoverTransform"
return isinstance(o,DeviceMoverTransform)
def has_affinecoord_tfm(dls: DataLoaders) -> bool:
"returns true if train dataloader has an AffineCoordTfm in the batch_tfms"
if not hasattr(dls.train,'after_batch'): return False
if not hasattr(dls.train.after_batch,'fs'): return False
idxs = dls.train.after_batch.fs.argwhere(isAffineCoordTfm)
return len(idxs) > 0
def has_devicemover_tfm(dl: DataLoader) -> bool:
"returns true if train dataloader has a DeviceMoverTransform in the batch_tfms"
if not hasattr(dl,'after_batch'): return False
if not hasattr(dl.after_batch,'fs'): return False
idxs = dl.after_batch.fs.argwhere(isDeviceMoverTransform)
return len(idxs) > 0
def get_last_affinecoord_tfm_idx(dl:DataLoader)-> int: # -1 if none
"returns index of last AffineCoordTfm if it exists, otherwise returns -1"
idxs = dl.after_batch.fs.argwhere(isAffineCoordTfm)
return -1 if len(idxs) == 0 else idxs[-1]
# Cell
def insert_batch_tfm(dl:DataLoader, batch_tfm:Transform, idx:int):
"adds a batch_tfm in the batch_tfms for the dataloader at idx location"
dl.after_batch.fs.insert(idx, batch_tfm)
# Cell
@patch
def setup_input_device_mover(self: Learner, new_device):
"setup batch_tfms to use cpu if dataloader batch_tfms has AffineCoordTfms"
if not has_affinecoord_tfm(self.dls):
self.dls.device = new_device
return
self.dls.device = None
if has_devicemover_tfm(self.dls.train):
return # skip adding device mover if already added
dm_tfm = DeviceMoverTransform(new_device)
for dl in self.dls.loaders:
if not has_devicemover_tfm(dl):
idx = get_last_affinecoord_tfm_idx(dl)
if idx != -1:
insert_batch_tfm(dl, dm_tfm, idx+1)
# Cell
class XLAOptCallback(Callback):
'Callback to replace `opt.step` with `xm.optimizer_step(opt)` as required to run on TPU'
run_after,run_before = TrainEvalCallback,Recorder
def __init__(self, barrier=True):
self._barrier = barrier
def before_fit(self):
'replace opt with proxy which calls `xm.optimizer_step` instead of `opt.step` and set `dls.device` and model to `xla_device`'
# set dls device to none so prevent trigger of moving to batch input to XLA device
# as this move will be done by the DeviceMoverTransform which has been added to the dls after_batch tfms
if has_affinecoord_tfm(self.dls):
self.dls.device = None
if self.learn.opt is not None:
if not isinstance(self.learn.opt,XLAOptimProxy):
# force opt to reinitialize its parameters and make sure its parameters
opt = self.learn.opt
self.learn.opt = XLAOptimProxy(opt, barrier=self._barrier)
def after_fit(self):
'restore original opt '
if isinstance(self.learn.opt, XLAOptimProxy):
opt = self.learn.opt.opt
self.learn.opt = opt
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# Cell
@patch
def to_xla(self:Learner, new_device=None):
"Setup learner for single tpu core training"
self.add_cb(XLAOptCallback())
if new_device is None:
new_device = xm.xla_device()
self.model.to(new_device)
self.setup_input_device_mover(new_device)
self.opt = None
return self
# Cell
@patch
def detach_xla(self:Learner):
"reset TPU single core setup and move model and dls back to cpu "
self.remove_cb(XLAOptCallback)
self.dls.device = torch.device('cpu')
self.model = self.model.to(self.dls.device)
self.opt = None
return self
|
[
"torch_xla.core.xla_model.xla_device",
"torch_xla.core.xla_model.optimizer_step",
"fastcore.basics.store_attr",
"torch.device"
] |
[((5669, 5688), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5681, 5688), False, 'import torch\n'), ((1260, 1310), 'torch_xla.core.xla_model.optimizer_step', 'xm.optimizer_step', (['self.opt'], {'barrier': 'self._barrier'}), '(self.opt, barrier=self._barrier)\n', (1277, 1310), True, 'import torch_xla.core.xla_model as xm\n'), ((1597, 1616), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1609, 1616), False, 'import torch\n'), ((1627, 1662), 'fastcore.basics.store_attr', 'store_attr', (['"""device_to,device_from"""'], {}), "('device_to,device_from')\n", (1637, 1662), False, 'from fastcore.basics import store_attr\n'), ((5368, 5383), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (5381, 5383), True, 'import torch_xla.core.xla_model as xm\n')]
|
import pandas as pd
import plotly.express as px
from django.views.generic.base import TemplateView
from plotly.offline import plot
from sars_dashboard.calls.models import PangolinCall
from sars_dashboard.projects.models import Project
from sars_dashboard.samples.models import Sample
from sars_dashboard.voc_definitions import VOCS
class SarsDashboardView(TemplateView):
template_name = "dashboards/sars-cov-2-dashboard.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
no_data = "No data."
first_project = Project.objects.first()
if first_project is None:
context["project_name"] = "No project found"
else:
context["project_name"] = first_project.title
samples_of_project = Sample.objects.filter(project=first_project)
if not samples_of_project.exists():
context["processed_samples"] = "-"
else:
context["processed_samples"] = samples_of_project.count()
calls_of_project = PangolinCall.objects.filter(sample__in=samples_of_project)
if not calls_of_project.exists():
context["over_time_plot"] = no_data
context["table"] = no_data
context["unique_calls"] = "-"
else:
calls_all_data = self.extract_and_mask_lineages_per_day(calls_of_project)
context["over_time_plot"] = self.plot_lineages_over_time(calls_all_data)
context["table"] = self.get_table_of_lineages(calls_all_data)
context["unique_calls"] = calls_of_project.count()
latest_run_date = samples_of_project.order_by("-date").first()
if latest_run_date is None:
context["latest_run_plot"] = no_data
context["last_update"] = "-"
else:
samples_of_latest_run = samples_of_project.filter(date=latest_run_date.date)
calls_of_last_run = PangolinCall.objects.filter(
sample__in=samples_of_latest_run
)
if not calls_of_last_run.exists():
context["latest_run_plot"] = no_data
context["last_update"] = "-"
else:
lineages_of_last_run = self.extract_and_mask_lineages_per_day(
calls_of_last_run
)
context["latest_run_plot"] = self.plot_lineages_of_last_run(
lineages_of_last_run
)
context["last_update"] = latest_run_date.date
return context
def get_table_of_lineages(self, table):
"""Creates a table of the lineages in the last run"""
table["Week"] = pd.to_datetime(table["Date"]).dt.strftime("%W")
table["Year"] = pd.to_datetime(table["Date"]).dt.strftime("%Y")
table = table.groupby(["Year", "Week", "Lineage"]).sum().reset_index()
if len(table) == 0:
return pd.DataFrame(columns=["Year", "Week", "Lineage", "# of Lineages"])
table = (
table.pivot(
index=["Year", "Week"], columns="Lineage", values="# of Lineages"
)
.fillna(0.0)
.reset_index()
)
table = table.astype(int)
table = table.to_html(
classes=(
'table table-bordered dataTable" id="dataTable" width="100%" '
'cellspacing="0" role="grid" style="width: 100%;'
),
index=False,
index_names=False,
justify="center",
border=0,
)
return table
def plot_lineages_of_last_run(self, data):
"""Created a plot of the lineages of the last run as a doughnut chart"""
fig = px.pie(
data,
values="# of Lineages",
names="Lineage",
hover_data=["Lineage"],
hole=0.8,
)
fig.update_traces(textposition="inside", textinfo="percent+label")
fig.update_layout(
legend=dict(
orientation="h",
)
)
fig.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
)
plot_div = plot(fig, output_type="div", include_plotlyjs=False)
return plot_div
def plot_lineages_over_time(self, data):
"""Created a plot of the lineages over time as a bar chart"""
fig = px.bar(data, x="Date", y="# of Lineages", color="Lineage")
fig.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
)
fig.update_layout(
legend=dict(orientation="h", yanchor="bottom", xanchor="left", y=-0.85, x=0)
)
fig.update_xaxes(
rangeslider_visible=True,
)
plot_div = plot(fig, output_type="div", include_plotlyjs=False)
return plot_div
def extract_and_mask_lineages_per_day(self, calls):
"""Extracts the lineages per day from given calls and masks the lineages that are not in the list of VOCs"""
all_lineages = []
for call in calls:
all_lineages.append(
{
"Called Lineage": call.lineage,
"Date": call.sample.date,
}
)
all_lineages = pd.DataFrame(all_lineages)
if len(all_lineages) == 0:
return pd.DataFrame(columns=["Date", "Lineage", "# of Lineages"])
for masked_lineage, lineage in VOCS.items():
all_lineages.loc[
all_lineages["Called Lineage"].str.contains(lineage), "Lineage"
] = masked_lineage
all_lineages["Lineage"] = all_lineages["Lineage"].fillna("Other")
all_lineages = (
all_lineages.groupby(by=["Date", "Lineage"])
.size()
.reset_index(name="# of Lineages")
)
return all_lineages
|
[
"pandas.DataFrame",
"sars_dashboard.calls.models.PangolinCall.objects.filter",
"plotly.offline.plot",
"sars_dashboard.samples.models.Sample.objects.filter",
"plotly.express.bar",
"sars_dashboard.voc_definitions.VOCS.items",
"plotly.express.pie",
"sars_dashboard.projects.models.Project.objects.first",
"pandas.to_datetime"
] |
[((584, 607), 'sars_dashboard.projects.models.Project.objects.first', 'Project.objects.first', ([], {}), '()\n', (605, 607), False, 'from sars_dashboard.projects.models import Project\n'), ((801, 845), 'sars_dashboard.samples.models.Sample.objects.filter', 'Sample.objects.filter', ([], {'project': 'first_project'}), '(project=first_project)\n', (822, 845), False, 'from sars_dashboard.samples.models import Sample\n'), ((1050, 1108), 'sars_dashboard.calls.models.PangolinCall.objects.filter', 'PangolinCall.objects.filter', ([], {'sample__in': 'samples_of_project'}), '(sample__in=samples_of_project)\n', (1077, 1108), False, 'from sars_dashboard.calls.models import PangolinCall\n'), ((3728, 3820), 'plotly.express.pie', 'px.pie', (['data'], {'values': '"""# of Lineages"""', 'names': '"""Lineage"""', 'hover_data': "['Lineage']", 'hole': '(0.8)'}), "(data, values='# of Lineages', names='Lineage', hover_data=['Lineage'\n ], hole=0.8)\n", (3734, 3820), True, 'import plotly.express as px\n'), ((4174, 4226), 'plotly.offline.plot', 'plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)'}), "(fig, output_type='div', include_plotlyjs=False)\n", (4178, 4226), False, 'from plotly.offline import plot\n'), ((4382, 4440), 'plotly.express.bar', 'px.bar', (['data'], {'x': '"""Date"""', 'y': '"""# of Lineages"""', 'color': '"""Lineage"""'}), "(data, x='Date', y='# of Lineages', color='Lineage')\n", (4388, 4440), True, 'import plotly.express as px\n'), ((4746, 4798), 'plotly.offline.plot', 'plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)'}), "(fig, output_type='div', include_plotlyjs=False)\n", (4750, 4798), False, 'from plotly.offline import plot\n'), ((5255, 5281), 'pandas.DataFrame', 'pd.DataFrame', (['all_lineages'], {}), '(all_lineages)\n', (5267, 5281), True, 'import pandas as pd\n'), ((5436, 5448), 'sars_dashboard.voc_definitions.VOCS.items', 'VOCS.items', ([], {}), '()\n', (5446, 5448), False, 'from sars_dashboard.voc_definitions import VOCS\n'), ((1936, 1997), 'sars_dashboard.calls.models.PangolinCall.objects.filter', 'PangolinCall.objects.filter', ([], {'sample__in': 'samples_of_latest_run'}), '(sample__in=samples_of_latest_run)\n', (1963, 1997), False, 'from sars_dashboard.calls.models import PangolinCall\n'), ((2928, 2994), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Year', 'Week', 'Lineage', '# of Lineages']"}), "(columns=['Year', 'Week', 'Lineage', '# of Lineages'])\n", (2940, 2994), True, 'import pandas as pd\n'), ((5337, 5395), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Date', 'Lineage', '# of Lineages']"}), "(columns=['Date', 'Lineage', '# of Lineages'])\n", (5349, 5395), True, 'import pandas as pd\n'), ((2681, 2710), 'pandas.to_datetime', 'pd.to_datetime', (["table['Date']"], {}), "(table['Date'])\n", (2695, 2710), True, 'import pandas as pd\n'), ((2753, 2782), 'pandas.to_datetime', 'pd.to_datetime', (["table['Date']"], {}), "(table['Date'])\n", (2767, 2782), True, 'import pandas as pd\n')]
|
import os
import glob
import sys
import error_handle as eh
###########################################################################################################################
def initialize():
###########################################################################################################################
global CIRCUIT_FILE
global CODE_FILE
global CODE_DATA
global NODE_DATA
global NODE_COUNT
global DEFAULT_NODE_VOLTAGE
global DUPLICATE_FLAG
global MAXIMUM_SIMULATION_COUNT
global INPUT_VARIABLES_COUNT
global INPUT_VARIABLES_VALUE
global INPUT_VARIABLES
global OUTPUT_NODE_INDEX
global subckt_node_count
global MAINCKT_SETUP_NODE
global PRINT_NODE
global PRINTF_NODE
global PLOT_NODE
global FIX_VOLTAGE_NODE
global SCAN_NODE
global INPUT_NODE
global OUTPUT_NODE
global TIME_ANALYSIS_SETUP_NODE
global MAINCKT_DATA
global CLOCK_DATA
global SETUP_DATA
global screen
global initial_x
global initial_y
global y_div
global TRUTH_TABLE_OUTPUT_DATA_FILE
global TRUTH_TABLE_INPUT_DATA_FILE
global MAINCKT_SETUP_DATA
global MAINCKT_SETUP_NODE_INDEX
global FIX_VOLTAGE_INDEX
global PRINT_NODE_INDEX
global PRINTF_NODE_INDEX
global PLOT_NODE_INDEX
global SCAN_NODE_INDEX
global INPUT_NODE_INDEX
global OUTPUT_NODE_INDEX
global TRISTATE_BUFFER_ENABLE
global TRISTATE_BUFFER_OUTPUT
global tristate_buffer_list
global ANALYSIS
global TOTAL_SIMULATION_TIME
global TIME_ANALYSIS_DATA_FILENAME
global TIME_ANALYSIS_DATA_FILE
global TIME_ANALYSIS_SETUP_NODE_INDEX
global TIME_ANALYSIS_SETUP_COUNT
global TIME_ANALYSIS_DATA_ARRAY
global PLOT_DATA
global PLOT_ALLOW_FLAG
global TURTLES
global PREPATH
global BOARD_NAME
global BOARD_INPUT
global BOARD_OUTPUT
global BOARD_INFO
global PORT_NAME
global arduinoSerialData
global CONNECT_OUT_FLAG
global board_input_data
global circuit_simulation_flag
global maincircuit_setup_simulation_flag
global serial_communication_flag
global clocking_flag
global serial_sync_time
global serial_sync_time0
global current_time
global last_time
global start_time
global ddr_DATA
global DDR_DATA
global connect_out_tb
global screen_rt
global turtle_write
global temp_print_node
global temp_height
global CONNECT_OUT_NODE_OUT
global CONNECT_OUT_NODE_IN
global TRUTH_TABLE_INPUT_DATA_FILENAME
global TRUTH_TABLE_OUTPUT_DATA_FILENAME
global tt_time_flag
global serial_sync_time1
global mcktstp_time_flag
global MAINCKT_PRINT_ARRAY
global scan_sampling_time
global MAINCKT_SETUP_FILENAME
global print_plot_result1_flag
global print_plot_result2_flag
global ANALYSIS_DATA
global ANALYSIS_DATA_FILENAME
global end_time
global tt_time_print_flag
global tt_time_plot_flag
###########################################################################################################################
PREPATH=os.path.realpath(os.path.join(os.path.dirname(__file__),'..'))
if len(sys.argv)==2:
CIRCUIT_FILE=sys.argv[1]
else:
eh.display_error(0,0,-4,0)
try:
CODE_FILE=open(CIRCUIT_FILE,"r")
except:
eh.display_error(0,0,-3,CIRCUIT_FILE)
CODE_DATA=CODE_FILE.readlines()
###########################################################################################################################
TURTLES=[]
ANALYSIS=None
TRUTH_TABLE_OUTPUT_DATA_FILE=None
TRUTH_TABLE_INPUT_DATA_FILE=None
MAINCKT_SETUP_FILENAME=None
TOTAL_SIMULATION_TIME=None
TIME_ANALYSIS_DATA_FILE=None
FIX_VOLTAGE_INDEX=[]
PRINT_NODE_INDEX=[]
PRINTF_NODE_INDEX=[]
PLOT_NODE_INDEX=[]
SCAN_NODE_INDEX=[]
INPUT_NODE_INDEX=[]
OUTPUT_NODE_INDEX=[]
TIME_ANALYSIS_SETUP_NODE_INDEX=[]
TRISTATE_BUFFER_OUTPUT=[]
TRISTATE_BUFFER_ENABLE=[]
TIME_ANALYSIS_DATA_ARRAY=[]
PLOT_DATA=[]
SETUP_DATA=[]
MAINCKT_SETUP_DATA=None
INPUT_VARIABLES_VALUE=[]
connect_out_tb=[]
tristate_buffer_list=[]
CLOCK_DATA=None
TIME_ANALYSIS_SETUP_NODE=[]
MAINCKT_SETUP_NODE=[]
MAINCKT_SETUP_NODE_INDEX=[]
PRINT_NODE=[]
PRINTF_NODE=[]
PLOT_NODE=[]
SCAN_NODE=[]
INPUT_NODE=[]
OUTPUT_NODE=[]
MAINCKT_DATA=[]
FIX_VOLTAGE_NODE=[]
NODE_DATA=[]
NODE_COUNT=0
DEFAULT_NODE_VOLTAGE=0
DUPLICATE_FLAG=0
MAXIMUM_SIMULATION_COUNT=1
TIME_ANALYSIS_SETUP_COUNT=0
INPUT_VARIABLES=[]
BOARD_INPUT=[]
BOARD_OUTPUT=[]
BOARD_INFO=None
arduinoSerialData=None
board_input_data="00000000"
circuit_simulation_flag=True
maincircuit_setup_simulation_flag=True
serial_communication_flag=True
clocking_flag=True
serial_sync_time=0.03
ddr_DATA=0
DDR_DATA=0
subckt_node_count=-1
CONNECT_OUT_NODE_OUT=[]
CONNECT_OUT_NODE_IN=[]
TRUTH_TABLE_INPUT_DATA_FILENAME=None
TRUTH_TABLE_OUTPUT_DATA_FILENAME=None
TIME_ANALYSIS_DATA_FILENAME=None
tt_time_flag=0
mcktstp_time_flag=0
MAINCKT_PRINT_ARRAY=[]
scan_sampling_time=0.01
print_plot_result1_flag=True
print_plot_result2_flag=True
ANALYSIS_DATA=None
ANALYSIS_DATA_FILENAME=None
end_time=0
tt_time_print_flag=True
tt_time_plot_flag=True
###########################################################################################################################
files=glob.glob(PREPATH+"/SUBCIRCUITS_USER_DEFINED/*")
for f in files:
os.remove(f)
###########################################################################################################################
def is_number(n):
try:
int(n)
return True
except ValueError:
return False
###########################################################################################################################
|
[
"os.path.dirname",
"os.remove",
"error_handle.display_error",
"glob.glob"
] |
[((5236, 5286), 'glob.glob', 'glob.glob', (["(PREPATH + '/SUBCIRCUITS_USER_DEFINED/*')"], {}), "(PREPATH + '/SUBCIRCUITS_USER_DEFINED/*')\n", (5245, 5286), False, 'import glob\n'), ((3072, 3101), 'error_handle.display_error', 'eh.display_error', (['(0)', '(0)', '(-4)', '(0)'], {}), '(0, 0, -4, 0)\n', (3088, 3101), True, 'import error_handle as eh\n'), ((5306, 5318), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (5315, 5318), False, 'import os\n'), ((2973, 2998), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2988, 2998), False, 'import os\n'), ((3162, 3202), 'error_handle.display_error', 'eh.display_error', (['(0)', '(0)', '(-3)', 'CIRCUIT_FILE'], {}), '(0, 0, -3, CIRCUIT_FILE)\n', (3178, 3202), True, 'import error_handle as eh\n')]
|
from slackapptk.request.any import AnyRequest
from slackapptk.web.classes.view import View
__all__ = [
'AnyRequest',
'ViewRequest',
'View'
]
class ViewRequest(AnyRequest):
def __init__(
self,
app,
payload
):
super().__init__(
app=app,
rqst_type=payload['type'],
rqst_data=payload,
user_id=payload['user']['id']
)
self.view = View.from_view(view=payload['view'])
|
[
"slackapptk.web.classes.view.View.from_view"
] |
[((444, 480), 'slackapptk.web.classes.view.View.from_view', 'View.from_view', ([], {'view': "payload['view']"}), "(view=payload['view'])\n", (458, 480), False, 'from slackapptk.web.classes.view import View\n')]
|
from packaging import version as version_parser
from deps_report.models import Dependency
from deps_report.models.results import VersionResult
def get_display_output_for_dependency(dependency: Dependency) -> str:
"""Get display name for dependency with some details (transitive, dev-only...)."""
properties = []
if dependency.for_dev:
properties.append("dev")
if dependency.transitive:
properties.append("transitive")
if len(properties) == 0:
return dependency.name
return f"{dependency.name} ({','.join(properties)})"
def get_number_of_dependencies_with_outdated_major(results: list[VersionResult]) -> int:
"""Get the number of dependencies with outdated major versions."""
count = 0
for result in results:
latest_version = version_parser.parse(result.latest_version)
installed_version = version_parser.parse(result.installed_version)
if isinstance(latest_version, version_parser.LegacyVersion) or isinstance(
installed_version, version_parser.LegacyVersion
):
continue
if latest_version.major > installed_version.major:
count += 1
return count
|
[
"packaging.version.parse"
] |
[((801, 844), 'packaging.version.parse', 'version_parser.parse', (['result.latest_version'], {}), '(result.latest_version)\n', (821, 844), True, 'from packaging import version as version_parser\n'), ((873, 919), 'packaging.version.parse', 'version_parser.parse', (['result.installed_version'], {}), '(result.installed_version)\n', (893, 919), True, 'from packaging import version as version_parser\n')]
|
""" Module of realisation choice relevant information in articles """
import langdetect
import openpyxl
import pandas as pd
import modules.pytextrank.pytextrank.pytextrank as pyt
from nltk.corpus import wordnet
from modules.kku.trans.mtranslate.mtranslate import translate
class Article:
""" Class of articles """
def __init__(self, number: int, name: str, punkt1=None, punkt2=None,
punkt3=None, punkt4=None, punkt5=None, punkt6=None,
punkt7=None, punkt8=None) -> None:
""" Creates article """
self.number = number
self.name = name
self.punkt1 = punkt1
self.punkt2 = punkt2
self.punkt3 = punkt3
self.punkt4 = punkt4
self.punkt5 = punkt5
self.punkt6 = punkt6
self.punkt7 = punkt7
self.punkt8 = punkt8
self.translated_name = None
self.translated_punkt1 = None
self.translated_punkt2 = None
self.translated_punkt3 = None
self.translated_punkt4 = None
self.translated_punkt5 = None
self.translated_punkt6 = None
self.translated_punkt7 = None
self.translated_punkt8 = None
self.relevant_words = []
def transalte_name(self) -> None:
""" Translates name into english in order to use later Natural language
Google Api. This information will be in self.translated_name """
translated = translate(self.name, "en")
self.translated_name = translated
def translate_punkts(self) -> None:
""" Translates punkt into english in order to use later Natural
language Google Api."""
punkts = [self.punkt1, self.punkt2, self.punkt3, self.punkt4,
self.punkt5, self.punkt6, self.punkt7, self.punkt8]
for i in range(8):
try:
translated = translate(punkts[i], "en")
if i == 0:
self.translated_punkt1 = translated
if i == 1:
self.translated_punkt2 = translated
if i == 2:
self.translated_punkt3 = translated
if i == 3:
self.translated_punkt4 = translated
if i == 4:
self.translated_punkt5 = translated
if i == 5:
self.translated_punkt6 = translated
if i == 6:
self.translated_punkt7 = translated
if i == 7:
self.translated_punkt8 = translated
except TypeError:
pass
def relevant_information_in_name(self) -> list:
""" Choising relevant information in name of articles
This words will be in self.relevant_name """
text = self.name
sentence, keywords = pyt.top_keywords_sentences(text, phrase_limit=15,
sent_word_limit=150)
for i in self.translated_name.split():
self.relevant_words.append(i)
return self.relevant_words
def relevant_information_in_punkts(self) -> list:
""" Choising relevant indormation in each punkts of articles
This words will be in self.relevant_point(number)"""
for i in range(8):
if i == 0:
if self.translated_punkt1 is not None:
text = self.translated_punkt1
else:
text = ""
if i == 1:
if self.translated_punkt2 is not None:
text = self.translated_punkt2
else:
text = ""
if i == 2:
if self.translated_punkt3 is not None:
text = self.translated_punkt3
else:
text = ""
if i == 3:
if self.translated_punkt4 is not None:
text = self.translated_punkt4
else:
text = ""
if i == 4:
if self.translated_punkt5 is not None:
text = self.translated_punkt5
else:
text = ""
if i == 5:
if self.translated_punkt6 is not None:
text = self.translated_punkt6
else:
text = ""
if i == 6:
if self.translated_punkt7 is not None:
text = self.translated_punkt7
else:
text = ""
if i == 7:
if self.translated_punkt8 is not None:
text = self.translated_punkt8
else:
text = ""
sentence, keywords = pyt.top_keywords_sentences(text,
phrase_limit=5000,
sent_word_limit=5000)
if keywords.split() != []:
for k in keywords.split():
self.relevant_words.append(k)
return self.relevant_words
def find_synonyms(self, relevant_words_list) -> set:
""" Returns list of synonyms and this words, that can be relevant
to each article """
synonyms = []
for word in relevant_words_list:
try:
for syn in wordnet.synsets(word):
for k in syn.lemmas():
translated = translate(k.name(), "uk")
synonyms.append(translated)
except AttributeError:
pass
return set(self.synonyms_check(synonyms))
def synonyms_check(self, set_syn) -> list:
""" Check are all word in synonyms set ukrainian """
for word in set_syn:
try:
if langdetect.detect(word) == "uk":
pass
else:
set_syn.remove(word)
except langdetect.lang_detect_exception.LangDetectException:
set_syn.remove(word)
return set_syn
if __name__ == "__main__":
RESULT = pd.read_excel("articles_with_punkts.xlsx")
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
FINAL = [1, 2]
for i in RESULT.index:
try:
ARTICLE = RESULT.loc[[i]]
INDEX = int(ARTICLE["Рядки"].str.find("."))
NUMBER = int(ARTICLE["Рядки"].str[6:INDEX].str.strip())
NAME = ARTICLE["Рядки"].str[INDEX + 1:].str.strip().to_string()[3:].strip()
PUNKTS = []
for k in ARTICLE:
if ARTICLE[k].to_string().find("NaN") == -1:
if k != "Рядки":
PUNKTS.append(k)
ARTICLE_CL = Article(NUMBER, NAME)
try:
ARTICLE_CL.punkt1 = ARTICLE[PUNKTS[0]].to_string()[3:].strip()
ARTICLE_CL.punkt2 = ARTICLE[PUNKTS[1]].to_string()[3:].strip()
ARTICLE_CL.punkt3 = ARTICLE[PUNKTS[2]].to_string()[3:].strip()
ARTICLE_CL.punkt4 = ARTICLE[PUNKTS[3]].to_string()[3:].strip()
ARTICLE_CL.punkt5 = ARTICLE[PUNKTS[4]].to_string()[3:].strip()
ARTICLE_CL.punkt6 = ARTICLE[PUNKTS[5]].to_string()[3:].strip()
ARTICLE_CL.punkt7 = ARTICLE[PUNKTS[6]].to_string()[3:].strip()
ARTICLE_CL.punkt8 = ARTICLE[PUNKTS[7]].to_string()[3:].strip()
except IndexError:
pass
ARTICLE_CL.transalte_name()
ARTICLE_CL.translate_punkts()
ARTICLE_CL.relevant_information_in_name()
WORDS = ARTICLE_CL.relevant_information_in_punkts()
UKR_WORDS = ARTICLE_CL.find_synonyms(WORDS)
print(UKR_WORDS)
FINAL.append(UKR_WORDS)
except ValueError:
pass
workbook = openpyxl.load_workbook("articles_with_punkts.xlsx")
worksheet = workbook.active
worksheet["J"] = FINAL
workbook.save("articles_with_punkts.xlsx")
|
[
"modules.kku.trans.mtranslate.mtranslate.translate",
"nltk.corpus.wordnet.synsets",
"openpyxl.load_workbook",
"pandas.read_excel",
"modules.pytextrank.pytextrank.pytextrank.top_keywords_sentences",
"langdetect.detect",
"pandas.set_option"
] |
[((6125, 6167), 'pandas.read_excel', 'pd.read_excel', (['"""articles_with_punkts.xlsx"""'], {}), "('articles_with_punkts.xlsx')\n", (6138, 6167), True, 'import pandas as pd\n'), ((6172, 6208), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', 'None'], {}), "('display.width', None)\n", (6185, 6208), True, 'import pandas as pd\n'), ((6213, 6254), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (6226, 6254), True, 'import pandas as pd\n'), ((7885, 7936), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['"""articles_with_punkts.xlsx"""'], {}), "('articles_with_punkts.xlsx')\n", (7907, 7936), False, 'import openpyxl\n'), ((1420, 1446), 'modules.kku.trans.mtranslate.mtranslate.translate', 'translate', (['self.name', '"""en"""'], {}), "(self.name, 'en')\n", (1429, 1446), False, 'from modules.kku.trans.mtranslate.mtranslate import translate\n'), ((2811, 2881), 'modules.pytextrank.pytextrank.pytextrank.top_keywords_sentences', 'pyt.top_keywords_sentences', (['text'], {'phrase_limit': '(15)', 'sent_word_limit': '(150)'}), '(text, phrase_limit=15, sent_word_limit=150)\n', (2837, 2881), True, 'import modules.pytextrank.pytextrank.pytextrank as pyt\n'), ((4748, 4821), 'modules.pytextrank.pytextrank.pytextrank.top_keywords_sentences', 'pyt.top_keywords_sentences', (['text'], {'phrase_limit': '(5000)', 'sent_word_limit': '(5000)'}), '(text, phrase_limit=5000, sent_word_limit=5000)\n', (4774, 4821), True, 'import modules.pytextrank.pytextrank.pytextrank as pyt\n'), ((1847, 1873), 'modules.kku.trans.mtranslate.mtranslate.translate', 'translate', (['punkts[i]', '"""en"""'], {}), "(punkts[i], 'en')\n", (1856, 1873), False, 'from modules.kku.trans.mtranslate.mtranslate import translate\n'), ((5368, 5389), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word'], {}), '(word)\n', (5383, 5389), False, 'from nltk.corpus import wordnet\n'), ((5829, 5852), 'langdetect.detect', 'langdetect.detect', (['word'], {}), '(word)\n', (5846, 5852), False, 'import langdetect\n')]
|
import numpy as np
# dictionary describing options available to tune this algorithm
options = {
"peak_size": {"purpose": "Estimate of the peak size, in pixels. If 'auto', attempts to determine automatically. Otherwise, this should be an integer.",
"default": "auto",
"type": "int",
"has_auto": True},
"refine_positions": {"purpose": "TODO",
"default": False,
"type": "bool"},
"progress_object": {"purpose": "Object used to present a progress bar to the user. For definition, see UI_interface folder.",
"default": None},
}
def run(data):
# TODO: need to actually implement this peak finder.
return np.zeros((4,2))
|
[
"numpy.zeros"
] |
[((750, 766), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (758, 766), True, 'import numpy as np\n')]
|
import os
import typing
from sqlalchemy.orm import Session
import const
from database import models
from database.database import SessionLocal
from db.api_key import add_initial_api_key_for_admin
from db.wireguard import server_add_on_init
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients
def setup_on_start():
_db: Session = SessionLocal()
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all()
for s in servers:
try:
last_state = s.is_running
if is_installed() and last_state and is_running(s):
start_interface(s)
except Exception as e:
print(e)
if const.CLIENT:
load_environment_clients(_db)
if const.SERVER_INIT_INTERFACE is not None:
server_add_on_init(_db)
if const.SERVER_STARTUP_API_KEY is not None:
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME)
_db.close()
|
[
"script.wireguard.load_environment_clients",
"script.wireguard.start_interface",
"script.wireguard.is_running",
"database.database.SessionLocal",
"script.wireguard.is_installed",
"db.api_key.add_initial_api_key_for_admin",
"os.getenv",
"db.wireguard.server_add_on_init"
] |
[((382, 396), 'database.database.SessionLocal', 'SessionLocal', ([], {}), '()\n', (394, 396), False, 'from database.database import SessionLocal\n'), ((728, 757), 'script.wireguard.load_environment_clients', 'load_environment_clients', (['_db'], {}), '(_db)\n', (752, 757), False, 'from script.wireguard import is_installed, start_interface, is_running, load_environment_clients\n'), ((815, 838), 'db.wireguard.server_add_on_init', 'server_add_on_init', (['_db'], {}), '(_db)\n', (833, 838), False, 'from db.wireguard import server_add_on_init\n'), ((914, 941), 'os.getenv', 'os.getenv', (['"""ADMIN_USERNAME"""'], {}), "('ADMIN_USERNAME')\n", (923, 941), False, 'import os\n'), ((950, 1035), 'db.api_key.add_initial_api_key_for_admin', 'add_initial_api_key_for_admin', (['_db', 'const.SERVER_STARTUP_API_KEY', 'ADMIN_USERNAME'], {}), '(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME\n )\n', (979, 1035), False, 'from db.api_key import add_initial_api_key_for_admin\n'), ((562, 576), 'script.wireguard.is_installed', 'is_installed', ([], {}), '()\n', (574, 576), False, 'from script.wireguard import is_installed, start_interface, is_running, load_environment_clients\n'), ((596, 609), 'script.wireguard.is_running', 'is_running', (['s'], {}), '(s)\n', (606, 609), False, 'from script.wireguard import is_installed, start_interface, is_running, load_environment_clients\n'), ((627, 645), 'script.wireguard.start_interface', 'start_interface', (['s'], {}), '(s)\n', (642, 645), False, 'from script.wireguard import is_installed, start_interface, is_running, load_environment_clients\n')]
|
import hashlib
import logging
import os
import shutil
import struct
import tempfile
import falcon
from datalad_service.common.stream import update_file
from datalad_service.handlers.git import _check_git_access, _handle_failed_access
def hashdirmixed(key):
"""Python implementation of git-annex hashing for non-bare git repos
https://git-annex.branchable.com/internals/hashing/"""
digest = hashlib.md5(key.encode()).digest()
first_word = struct.unpack('<I', digest[:4])[0]
nums = [first_word >> (6 * x) & 31 for x in range(4)]
letters = ["0123456789zqjxkmvwgpfZQJXKMVWGPF"[i] for i in nums]
return ("{0:s}{1:s}".format(letters[1], letters[0]), "{0:s}{1:s}".format(letters[3], letters[2]))
def key_to_path(key):
return os.path.join('.git', 'annex', 'objects', *hashdirmixed(key), key, key)
class GitAnnexResource(object):
"""{worker}/{dataset}/annex/{key} serves git-annex object requests
This allows OpenNeuro to act as a special remote, adding or removing objects from .git/annex/objects/
"""
def __init__(self, store):
self.store = store
self.logger = logging.getLogger('datalad_service.' + __name__)
def on_head(self, req, resp, worker, dataset, key):
"""HEAD requests check if objects exist already"""
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
resp.status = falcon.HTTP_OK
else:
resp.status = falcon.HTTP_NOT_FOUND
def on_get(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
resp.status = falcon.HTTP_OK
fd = open(annex_object_path, 'rb')
resp.stream = fd
resp.stream_len = os.fstat(fd.fileno()).st_size
else:
resp.status = falcon.HTTP_NOT_FOUND
def on_post(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
# Don't allow objects to be replaced
resp.status = falcon.HTTP_CONFLICT
else:
os.makedirs(os.path.dirname(annex_object_path), exist_ok=True)
# Begin writing stream to temp file and hard link once done
# It should not be written unless the full request completes
update_file(annex_object_path, req.stream)
resp.status = falcon.HTTP_OK
def on_delete(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
os.remove(annex_object_path)
resp.status = falcon.HTTP_NO_CONTENT
|
[
"os.remove",
"os.path.dirname",
"struct.unpack",
"os.path.exists",
"datalad_service.handlers.git._handle_failed_access",
"datalad_service.common.stream.update_file",
"datalad_service.handlers.git._check_git_access",
"logging.getLogger"
] |
[((459, 490), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'digest[:4]'], {}), "('<I', digest[:4])\n", (472, 490), False, 'import struct\n'), ((1129, 1177), 'logging.getLogger', 'logging.getLogger', (["('datalad_service.' + __name__)"], {}), "('datalad_service.' + __name__)\n", (1146, 1177), False, 'import logging\n'), ((1616, 1649), 'os.path.exists', 'os.path.exists', (['annex_object_path'], {}), '(annex_object_path)\n', (1630, 1649), False, 'import os\n'), ((2132, 2165), 'os.path.exists', 'os.path.exists', (['annex_object_path'], {}), '(annex_object_path)\n', (2146, 2165), False, 'import os\n'), ((2785, 2818), 'os.path.exists', 'os.path.exists', (['annex_object_path'], {}), '(annex_object_path)\n', (2799, 2818), False, 'import os\n'), ((3627, 3660), 'os.path.exists', 'os.path.exists', (['annex_object_path'], {}), '(annex_object_path)\n', (3641, 3660), False, 'import os\n'), ((1387, 1418), 'datalad_service.handlers.git._check_git_access', '_check_git_access', (['req', 'dataset'], {}), '(req, dataset)\n', (1404, 1418), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((1439, 1471), 'datalad_service.handlers.git._handle_failed_access', '_handle_failed_access', (['req', 'resp'], {}), '(req, resp)\n', (1460, 1471), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((1903, 1934), 'datalad_service.handlers.git._check_git_access', '_check_git_access', (['req', 'dataset'], {}), '(req, dataset)\n', (1920, 1934), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((1955, 1987), 'datalad_service.handlers.git._handle_failed_access', '_handle_failed_access', (['req', 'resp'], {}), '(req, resp)\n', (1976, 1987), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((2556, 2587), 'datalad_service.handlers.git._check_git_access', '_check_git_access', (['req', 'dataset'], {}), '(req, dataset)\n', (2573, 2587), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((2608, 2640), 'datalad_service.handlers.git._handle_failed_access', '_handle_failed_access', (['req', 'resp'], {}), '(req, resp)\n', (2629, 2640), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((3162, 3204), 'datalad_service.common.stream.update_file', 'update_file', (['annex_object_path', 'req.stream'], {}), '(annex_object_path, req.stream)\n', (3173, 3204), False, 'from datalad_service.common.stream import update_file\n'), ((3398, 3429), 'datalad_service.handlers.git._check_git_access', '_check_git_access', (['req', 'dataset'], {}), '(req, dataset)\n', (3415, 3429), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((3450, 3482), 'datalad_service.handlers.git._handle_failed_access', '_handle_failed_access', (['req', 'resp'], {}), '(req, resp)\n', (3471, 3482), False, 'from datalad_service.handlers.git import _check_git_access, _handle_failed_access\n'), ((3674, 3702), 'os.remove', 'os.remove', (['annex_object_path'], {}), '(annex_object_path)\n', (3683, 3702), False, 'import os\n'), ((2954, 2988), 'os.path.dirname', 'os.path.dirname', (['annex_object_path'], {}), '(annex_object_path)\n', (2969, 2988), False, 'import os\n')]
|
from plume.perceptron import PerceptronClassifier
import numpy as np
x_train = np.array([[3, 3], [4, 3], [1, 1]])
y_train = np.array([1, 1, -1])
clf = PerceptronClassifier(dual=False)
clf.fit(x_train, y_train)
print(clf.get_model())
print(clf.predict(x_train))
clf1 = PerceptronClassifier()
clf1.fit(x_train, y_train)
print(clf1.get_model())
print(clf1.predict(x_train))
|
[
"plume.perceptron.PerceptronClassifier",
"numpy.array"
] |
[((80, 114), 'numpy.array', 'np.array', (['[[3, 3], [4, 3], [1, 1]]'], {}), '([[3, 3], [4, 3], [1, 1]])\n', (88, 114), True, 'import numpy as np\n'), ((125, 145), 'numpy.array', 'np.array', (['[1, 1, -1]'], {}), '([1, 1, -1])\n', (133, 145), True, 'import numpy as np\n'), ((153, 185), 'plume.perceptron.PerceptronClassifier', 'PerceptronClassifier', ([], {'dual': '(False)'}), '(dual=False)\n', (173, 185), False, 'from plume.perceptron import PerceptronClassifier\n'), ((272, 294), 'plume.perceptron.PerceptronClassifier', 'PerceptronClassifier', ([], {}), '()\n', (292, 294), False, 'from plume.perceptron import PerceptronClassifier\n')]
|
from transformers import BertForTokenClassification
import torch
from transformers import BertTokenizer
import numpy as np
import nltk.data
nltk.download('punkt')
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig, AutoModelForTokenClassification, AutoConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import BertForTokenClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from seqeval.metrics import accuracy_score
from sklearn.metrics import f1_score, classification_report, precision_score, recall_score
import torch.nn as nn
from tqdm import trange
import numpy as np
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
import os
##########################################################
# import wandb
# from transformers import TrainingArguments, Trainer
# wandb.init(project="project", entity="3rd_year_project")
##########################################################
class NER_BERT(object):
device = torch.device("cuda")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tag2idx = {'O':0, 'ID':1, 'PHI':2, 'NAME':3, 'CONTACT':4, 'DATE':5, 'AGE':6, 'PROFESSION':7, 'LOCATION':8, 'PAD': 9}
tag_values = ["O","ID", "PHI", "NAME", "CONTACT", "DATE", "AGE", "PROFESSION", "LOCATION", "PAD"]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', num_labels=len(tag2idx), do_lower_case=False)
MAX_LEN = 75 # max length of sequence, needs for padding
bs = 32 # batch size
"""Abstract class that other NER plugins should implement"""
def __init__(self):
#config = AutoConfig.from_pretrained('ArishkaBelovishka/bert-i2b2')
#self.model = AutoModelForTokenClassification.from_pretrained('ArishkaBelovishka/bert-i2b2', config = config)
# Uncomment the following if you want to load your fine-tuned model from Models folder.
# If you just want to run NER use hugging-face repository where fine-tuned on half of i2b2 data model lives.
if os.path.exists("Models/BERT_epoch-10.pt"):
print("Loading model")
state_dict = torch.load("Models/BERT_epoch-10.pt", map_location=torch.device('cuda'))
print("Loaded model")
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
state_dict = state_dict,
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
else:
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
def perform_NER(self,text):
"""Implementation of the method that should perform named entity recognition"""
# tokenizer to divide data into sentences (thanks, nltk)
list_of_sents = sent_tokenize(text)
list_of_tuples_by_sent = []
for sent in list_of_sents:
# , truncation=True
tokenized_sentence = self.tokenizer.encode(sent, truncation=True) # BERT tokenizer is clever, it will internally divide the sentence by words, so all we need to provide there is sentence and it will return an array where each token is either special token/word/subword, refer to BERT WordPiece tokenizer approach
# truncation=True to comply with 512 length of the sentence
input_ids = torch.tensor([tokenized_sentence])
with torch.no_grad():
# Run inference/classification
output = self.model(input_ids)
label_indices = np.argmax(output[0].to("cpu").numpy(), axis=2)
tokens = self.tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(self.tag_values[label_idx])
new_tokens.append(token)
list_of_tuples = []
for token, label in zip(new_tokens, new_labels):
list_of_tuples.append((token, label))
#print("{}\t{}".format(label, token))
list_of_tuples_by_sent.append(list_of_tuples)
# remove [CLS] and [SEP] tokens to comply wth xml structure
for i in range(len(list_of_tuples_by_sent)):
for tag in self.tag_values:
if ('[CLS]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[CLS]', tag))
if ('[SEP]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[SEP]', tag))
return list_of_tuples_by_sent
# Needed for transform_sequences
def tokenize_and_preserve_labels(self, sentence, text_labels):
tokenized_sentence = []
labels = []
for word, label in zip(sentence, text_labels):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = NER_BERT.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
def transform_sequences(self,tokens_labels):
"""method that transforms sequences of (token,label) into feature sequences. Returns two sequence lists for X and Y"""
print("I am in transform seq")
# result - one document, result[i] is sentence in document, result [i][i] is word in sentence
tokenized_sentences = []
labels = []
for index, sentence in enumerate(tokens_labels):
text_labels = []
sentence_to_feed = []
for word_label in sentence:
text_labels.append(word_label[1])
sentence_to_feed.append(word_label[0])
a, b = self.tokenize_and_preserve_labels(sentence_to_feed, text_labels)
tokenized_sentences.append(a)
labels.append(b)
# Now need to split long tokenized sequences into subsequences of length less than 512 tokens
# not to loose valuable information in NER, basically not to cut sentences
# i2b2 docs are very ugly and sentences in them are usually way too long as doctors forget to put full stops...
# tokenized_sentences AND labels are the same strucutre of 2d arrays
# I need to take care of the issue if I am going to split beginning of the word and its end, like
# Arina is tokenized as "Ari" and "##na", thus I cannot separate the two, otherwise it will not make sense
distributed_tokenized_sentences, distributed_labels = [], []
for sent, label in zip(tokenized_sentences, labels):
if len(sent) > NER_BERT.MAX_LEN:
while len(sent) > NER_BERT.MAX_LEN:
#print("I am in while loop to truncate sequence")
index = NER_BERT.MAX_LEN - 2
for i in range(NER_BERT.MAX_LEN - 2, 0, -1):
if sent[i][:2] == "##":
index = index - 1
else:
break
new_sent = sent[:index] # 511 because we want to append [SEP] token in the end
new_label = label[:index]
sent = sent[index:] # update given sent
label = label[index:]
distributed_tokenized_sentences.append(new_sent)
distributed_labels.append(new_label)
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
#print(sent)
else:
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
input_ids = pad_sequences([NER_BERT.tokenizer.convert_tokens_to_ids(txt) for txt in distributed_tokenized_sentences],
maxlen=NER_BERT.MAX_LEN, dtype="long", value=0.0,
truncating="post", padding="post")
tags = pad_sequences([[NER_BERT.tag2idx.get(l) for l in lab] for lab in distributed_labels],
maxlen=NER_BERT.MAX_LEN, value=NER_BERT.tag2idx["PAD"], padding="post",
dtype="long", truncating="post")
# Result is pair X (array of sentences, where each sentence is an array of words) and Y (array of labels)
return input_ids, tags
def learn(self, X_train,Y_train, epochs=1):
"""Function that actually train the algorithm"""
# if torch.cuda.is_available():
# self.model.cuda()
tr_masks = [[float(i != 0.0) for i in ii] for ii in X_train]
print("READY TO CREATE SOME TENZORS!!!!!!!!!!!!!!!!!!!!!!!!!!")
tr_inputs = torch.tensor(X_train).type(torch.long)
tr_tags = torch.tensor(Y_train).type(torch.long)
tr_masks = torch.tensor(tr_masks).type(torch.long)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=NER_BERT.bs)
print("READY TO PREPARE OPTIMIZER!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Weight decay in Adam optimiser (adaptive gradient algorithm) is a regularisation technique which is extensively disucssed in this paper:
# https://arxiv.org/abs/1711.05101
# (Like L2 for SGD but different)
# resularisation of the model objective function in order to prevent overfitting of the model.
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01}, # in AdamW implementation (default: 1e-2)
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(self.model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
# TODO: change to new implementation of AdamW: torch.optim.AdamW(...)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=3e-5,
eps=1e-8
)
max_grad_norm = 1.0
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
# We need it to adjust learning rate if the accuracy does not change between epochs much,
# basically pushing the model to learn.
# https://sajjjadayobi.github.io/blog/markdown/2021/05/23/adamw-warmup.html
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
print("START TRAINING!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
## Store the average loss after each epoch so we can plot them.
loss_values, validation_loss_values = [], []
# just for intermediate model save naming
epoch_num = 3
for _ in trange(epochs, desc="Epoch"):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
# clean the cache not to fail with video memory
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# just for intermediate model save naming
epoch_num += 1
# Put the model into training mode.
self.model.train()
# Reset the total loss for this epoch.
total_loss = 0
print("Start backprop and optimisation!!! Epoch has passed!!!!!!!!!!!!!!!!!!!!!!!")
# Training loop
for step, batch in enumerate(train_dataloader):
print("We are in the batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# add batch to gpu
batch = tuple(b.to(NER_BERT.device) for b in batch)
b_input_ids, b_input_mask, b_labels = batch
# Always clear any previously calculated gradients before performing a backward pass.
self.model.zero_grad()
# forward pass
# This will return the loss (rather than the model output)
# because we have provided the `labels`.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# get the loss
loss = outputs[0]
# Perform a backward pass to calculate the gradients.
loss.backward()
# track train loss
total_loss += loss.item()
# Clip the norm of the gradient
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(parameters=self.model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
# Update the learning rate.
scheduler.step()
print("We processed one batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
print("Average train loss: {}".format(avg_train_loss))
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
# Save intermediate weights of the model, i.e. if computer goes crazy and drops the training or you
# want to test the performance from different epochs
torch.save(self.model.state_dict(), os.path.join("Models_intermediate/", 'BERT_epoch-{}.pt'.format(epoch_num)))
#Plot the learning curve.
plt.figure()
plt.plot(loss_values, 'b-o', label="training loss")
# Label the plot.
plt.title("Learning curve")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
def evaluate(self, X_test,Y_test):
if torch.cuda.is_available():
self.model.cuda()
"""Function to evaluate algorithm"""
val_masks = [[float(i != 0.0) for i in ii] for ii in X_test]
val_inputs = torch.tensor(X_test).type(torch.long)
val_tags = torch.tensor(Y_test).type(torch.long)
val_masks = torch.tensor(val_masks).type(torch.long)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=NER_BERT.bs)
# seed
# for _ in range(2):
#valid_dataloader = DataLoader(valid_data, shuffle=True, batch_size=NER_BERT.bs)
# for one random seed of valid_dataloader:
# ...
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
# Put the model into evaluation mode to set dropout and batch normalization layers to evaluation mode to have consistent results
self.model.eval()
# Reset the validation loss for this epoch.
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(self.device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients,
# saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have not provided labels.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# Move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
eval_loss += outputs[0].mean().item()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.extend(label_ids)
eval_loss = eval_loss / len(valid_dataloader)
print("Validation loss: {}".format(eval_loss))
pred_tags = [NER_BERT.tag_values[p_i] for p, l in zip(predictions, true_labels)
for p_i, l_i in zip(p, l) if NER_BERT.tag_values[l_i] != "PAD"]
###############################################################################
# reconstruct given text for purposes of algorithms' performance comparison
# our X_test is again a list of sentences, i.e. 2d array
tokens = [self.tokenizer.convert_ids_to_tokens(sent) for sent in X_test]
# Unpack tokens into 1d array to be able to go through it with labels
# [PAD] and not just PAD because that is what BERT actually puts
tokens_flat = [item for sublist in tokens for item in sublist if item != "[PAD]"]
#for sentence in tokens:
new_tokens, new_labels = [], []
for token, pred in zip(tokens_flat, pred_tags):
#print("{}\t{}".format(token, pred))
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(pred)
new_tokens.append(token)
###############################################################################
valid_tags = [NER_BERT.tag_values[l_i] for l in true_labels
for l_i in l if NER_BERT.tag_values[l_i] != "PAD"]
print("Validation Accuracy: {}".format(accuracy_score(valid_tags, pred_tags))) # was other way around, why?
print("Validation F1-Score: {}".format(f1_score(valid_tags, pred_tags, average='weighted'))) # correct
print("Validation precision: {}".format(precision_score(valid_tags, pred_tags, average='weighted')))
print("Validation recall: {}".format(recall_score(valid_tags, pred_tags, average='weighted')))
labels = ["ID", "PHI", "NAME", "CONTACT", "DATE", "AGE",
"PROFESSION", "LOCATION"]
print(classification_report(valid_tags, pred_tags, digits=4, labels=labels))
print()
###############################################################################
# to evaluate union/intersection of algorithms
# for t, l in zip(new_tokens, new_labels):
# print("{}\t{}".format(t, l))
return new_labels
# # Use plot styling from seaborn.
# sns.set(style='darkgrid')
# # Increase the plot size and font size.
# sns.set(font_scale=1.5)
# plt.rcParams["figure.figsize"] = (12,6)
# # Plot the learning curve.
# plt.plot(loss_values, 'b-o', label="training loss")
# plt.plot(validation_loss_values, 'r-o', label="validation loss")
# # Label the plot.
# plt.title("Learning curve")
# plt.xlabel("Epoch")
# plt.ylabel("Loss")
# plt.legend()
# plt.show()
def save(self, model_path):
"""
Function to save model. Models are saved as h5 files in Models directory. Name is passed as argument
:param model_path: Name of the model file
:return: Doesn't return anything
"""
torch.save(self.model.state_dict(), "Models/"+model_path+".pt")
print("Saved model to disk")
|
[
"matplotlib.pyplot.title",
"seqeval.metrics.accuracy_score",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.metrics.f1_score",
"torch.utils.data.TensorDataset",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.utils.data.SequentialSampler",
"matplotlib.pyplot.show",
"tqdm.trange",
"matplotlib.pyplot.legend",
"sklearn.metrics.recall_score",
"transformers.get_linear_schedule_with_warmup",
"transformers.AdamW",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"nltk.tokenize.sent_tokenize",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.xlabel",
"torch.tensor"
] |
[((1162, 1182), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1174, 1182), False, 'import torch\n'), ((2192, 2233), 'os.path.exists', 'os.path.exists', (['"""Models/BERT_epoch-10.pt"""'], {}), "('Models/BERT_epoch-10.pt')\n", (2206, 2233), False, 'import os\n'), ((3178, 3197), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (3191, 3197), False, 'from nltk.tokenize import sent_tokenize\n'), ((9640, 9683), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tr_inputs', 'tr_masks', 'tr_tags'], {}), '(tr_inputs, tr_masks, tr_tags)\n', (9653, 9683), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((9708, 9733), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (9721, 9733), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((9761, 9830), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'NER_BERT.bs'}), '(train_data, sampler=train_sampler, batch_size=NER_BERT.bs)\n', (9771, 9830), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((11073, 11129), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': '(3e-05)', 'eps': '(1e-08)'}), '(optimizer_grouped_parameters, lr=3e-05, eps=1e-08)\n', (11078, 11129), False, 'from transformers import BertForTokenClassification, AdamW\n'), ((11636, 11734), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (11667, 11734), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((12054, 12082), 'tqdm.trange', 'trange', (['epochs'], {'desc': '"""Epoch"""'}), "(epochs, desc='Epoch')\n", (12060, 12082), False, 'from tqdm import trange\n'), ((14939, 14951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14949, 14951), True, 'import matplotlib.pyplot as plt\n'), ((14960, 15011), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_values', '"""b-o"""'], {'label': '"""training loss"""'}), "(loss_values, 'b-o', label='training loss')\n", (14968, 15011), True, 'import matplotlib.pyplot as plt\n'), ((15046, 15073), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning curve"""'], {}), "('Learning curve')\n", (15055, 15073), True, 'import matplotlib.pyplot as plt\n'), ((15082, 15101), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (15092, 15101), True, 'import matplotlib.pyplot as plt\n'), ((15110, 15128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (15120, 15128), True, 'import matplotlib.pyplot as plt\n'), ((15137, 15149), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15147, 15149), True, 'import matplotlib.pyplot as plt\n'), ((15159, 15169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15167, 15169), True, 'import matplotlib.pyplot as plt\n'), ((15221, 15246), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15244, 15246), False, 'import torch\n'), ((15591, 15637), 'torch.utils.data.TensorDataset', 'TensorDataset', (['val_inputs', 'val_masks', 'val_tags'], {}), '(val_inputs, val_masks, val_tags)\n', (15604, 15637), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((15662, 15691), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['valid_data'], {}), '(valid_data)\n', (15679, 15691), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((15719, 15788), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'sampler': 'valid_sampler', 'batch_size': 'NER_BERT.bs'}), '(valid_data, sampler=valid_sampler, batch_size=NER_BERT.bs)\n', (15729, 15788), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3724, 3758), 'torch.tensor', 'torch.tensor', (['[tokenized_sentence]'], {}), '([tokenized_sentence])\n', (3736, 3758), False, 'import torch\n'), ((19642, 19711), 'sklearn.metrics.classification_report', 'classification_report', (['valid_tags', 'pred_tags'], {'digits': '(4)', 'labels': 'labels'}), '(valid_tags, pred_tags, digits=4, labels=labels)\n', (19663, 19711), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((3777, 3792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3790, 3792), False, 'import torch\n'), ((9463, 9484), 'torch.tensor', 'torch.tensor', (['X_train'], {}), '(X_train)\n', (9475, 9484), False, 'import torch\n'), ((9520, 9541), 'torch.tensor', 'torch.tensor', (['Y_train'], {}), '(Y_train)\n', (9532, 9541), False, 'import torch\n'), ((9578, 9600), 'torch.tensor', 'torch.tensor', (['tr_masks'], {}), '(tr_masks)\n', (9590, 9600), False, 'import torch\n'), ((15413, 15433), 'torch.tensor', 'torch.tensor', (['X_test'], {}), '(X_test)\n', (15425, 15433), False, 'import torch\n'), ((15470, 15490), 'torch.tensor', 'torch.tensor', (['Y_test'], {}), '(Y_test)\n', (15482, 15490), False, 'import torch\n'), ((15528, 15551), 'torch.tensor', 'torch.tensor', (['val_masks'], {}), '(val_masks)\n', (15540, 15551), False, 'import torch\n'), ((16879, 16894), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16892, 16894), False, 'import torch\n'), ((19128, 19165), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['valid_tags', 'pred_tags'], {}), '(valid_tags, pred_tags)\n', (19142, 19165), False, 'from seqeval.metrics import accuracy_score\n'), ((19245, 19296), 'sklearn.metrics.f1_score', 'f1_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19253, 19296), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((19357, 19415), 'sklearn.metrics.precision_score', 'precision_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19372, 19415), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((19463, 19518), 'sklearn.metrics.recall_score', 'recall_score', (['valid_tags', 'pred_tags'], {'average': '"""weighted"""'}), "(valid_tags, pred_tags, average='weighted')\n", (19475, 19518), False, 'from sklearn.metrics import f1_score, classification_report, precision_score, recall_score\n'), ((2346, 2366), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2358, 2366), False, 'import torch\n'), ((17531, 17556), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(2)'}), '(logits, axis=2)\n', (17540, 17556), True, 'import numpy as np\n')]
|
import sys
import requests
from urllib.parse import urljoin
JFROG_API_KEY_HEADER_NAME = 'X-JFrog-Art-Api'
class DockerRegistryPagination:
def __init__(self, concatenating_key):
self.concatenating_key = concatenating_key
def __call__(self, url, *args, **kwargs):
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list = response.json().get(self.concatenating_key, [])
while 'next' in response.links.keys():
url = urljoin(url, response.links['next']['url'])
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list.extend(response.json().get(self.concatenating_key, []))
return concatenated_list
class ArtifactoryIntegrationLogic:
def __init__(self, base_url, api_key, default_repo=None, username=None):
self.username = username
self.base_url = base_url
if not self.base_url.startswith('https://'):
self.base_url = 'https://' + base_url
if self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
self.api_key = api_key
self.default_repo = default_repo
def get_artifactory_headers(self):
return {
JFROG_API_KEY_HEADER_NAME: self.api_key,
}
def _get_all_repos_data(self):
res = requests.get(
self.base_url + '/artifactory/api/repositories',
headers=self.get_artifactory_headers(),
)
if res.status_code != 200:
if res.status_code == 403:
raise Exception(
'Artifactory token is not valid or has been revoked.'
)
raise Exception(
f'Failed to get repositories. '
f'Error: {res.text}. Code {res.status_code}'
)
return res.json()
def list_repos(self, search=''):
all_repos_data = self._get_all_repos_data()
return sorted([i['key'] for i in all_repos_data if search.lower() in i['key'].lower()])
def get_repo_type(self, repo_name):
all_repos_data = self._get_all_repos_data()
for i in all_repos_data:
if i['key'] == repo_name:
return i['packageType']
raise Exception(
f'Repository {repo_name} does not exist or user does not have permissions for it.'
)
def _list_docker_folders(self, repo, search=''):
request_func = DockerRegistryPagination('repositories')
try:
repos = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/_catalog' % repo,
headers=self.get_artifactory_headers(),
)
return [i for i in repos if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get images list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_folders(self, repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
folders = self._list_docker_folders(repo, search)
return sorted(folders)
def _list_docker_images(self, folder, repo, search=''):
request_func = DockerRegistryPagination('tags')
try:
tags = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/%s/tags/list' % (repo, folder),
headers=self.get_artifactory_headers()
)
return [i for i in tags if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get tag list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_images(self, folder='', repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
images = self._list_docker_images(folder, repo, search)
return sorted(images)
rt_domain = sys.argv[1]
api_key = sys.argv[2]
user = sys.argv[3]
with open("images.csv", "w") as outfile:
rt = ArtifactoryIntegrationLogic(f"https://{rt_domain}", api_key, username=user)
repositories = rt.list_repos()
for repository in repositories:
repo_type = rt.get_repo_type(repository).lower()
if repo_type == "docker":
repo_folders = rt.list_folders(repo=repository)
for repo_folder in repo_folders:
folder_images = rt.list_images(repo=repository, folder=repo_folder)
for folder_image in folder_images:
outfile.write(f"{repository}, {repo_folder}, {folder_image}\r\n")
|
[
"urllib.parse.urljoin",
"requests.get"
] |
[((301, 335), 'requests.get', 'requests.get', (['url', '*args'], {}), '(url, *args, **kwargs)\n', (313, 335), False, 'import requests\n'), ((513, 556), 'urllib.parse.urljoin', 'urljoin', (['url', "response.links['next']['url']"], {}), "(url, response.links['next']['url'])\n", (520, 556), False, 'from urllib.parse import urljoin\n'), ((580, 614), 'requests.get', 'requests.get', (['url', '*args'], {}), '(url, *args, **kwargs)\n', (592, 614), False, 'import requests\n')]
|
import pytest, fastai
from fastai.utils.mem import *
from math import isclose
# Important: When modifying this test module, make sure to validate that it runs w/o
# GPU, by running: CUDA_VISIBLE_DEVICES="" pytest
# most tests are run regardless of cuda available or not, we just get zeros when gpu is not available
if torch.cuda.is_available():
have_cuda = 1
if "CUDA_VISIBLE_DEVICES" in os.environ and not len(os.environ["CUDA_VISIBLE_DEVICES"]):
print('detected no gpu env emulation with CUDA_VISIBLE_DEVICES=""')
have_cuda = 0
# This must run before any tests:
# force pytorch to load cuDNN and its kernels to claim unreclaimable memory (~0.5GB) if it hasn't done so already, so that we get correct measurements
if have_cuda: torch.ones((1, 1)).cuda()
def gpu_mem_consume_some(n): return torch.ones((n, n)).cuda()
def gpu_mem_consume_16mb(): return gpu_mem_consume_some(2000)
def gpu_cache_clear(): torch.cuda.empty_cache()
def gpu_mem_reclaim(): gc.collect(); gpu_cache_clear()
def check_gpu_mem_zeros(total, used, free):
assert total == 0, "have total GPU RAM"
assert used == 0, "have used GPU RAM"
assert free == 0, "have free GPU RAM"
def check_gpu_mem_non_zeros(total, used, free):
assert total > 0, "have total GPU RAM"
assert used > 0, "have used GPU RAM"
assert free > 0, "have free GPU RAM"
def test_gpu_mem_by_id():
# test by currently selected device
total, used, free = get_gpu_mem()
if have_cuda: check_gpu_mem_non_zeros(total, used, free)
else: check_gpu_mem_zeros(total, used, free)
# wrong id that can't exist
check_gpu_mem_zeros(*get_gpu_mem(99))
def test_gpu_mem_all():
# all available gpus
mem_per_id = get_gpu_mem_all()
if have_cuda:
for mem in mem_per_id: check_gpu_mem_non_zeros(*mem)
else:
assert len(mem_per_id) == 0
def test_gpu_with_max_free_mem():
# all available gpus
id, free = get_gpu_with_max_free_mem()
if have_cuda:
assert id != None, "have gpu id"
assert free > 0, "have gpu free ram"
else:
assert id == None, "have no gpu id"
assert free == 0, "have no gpu free ram"
@pytest.mark.skipif(not have_cuda, reason="requires cuda")
def test_gpu_mem_measure_consumed_reclaimed():
gpu_mem_reclaim()
used_before = get_gpu_mem()[1]
# 1. measure memory consumption
x1 = gpu_mem_consume_16mb();
used_after = get_gpu_mem()[1]
diff_real = used_after - used_before
diff_expected_min = 15 # could be slightly different
assert diff_real >= diff_expected_min, f"check gpu consumption, expected at least {diff_expected_min}, got {diff_real} diff"
# 2. measure memory reclamation
del x1 # this may or may not trigger automatic gc.collect - can't rely on that
gpu_mem_reclaim() # force gc.collect and cache clearing
used_after_reclaimed = get_gpu_mem()[1]
# allow 2mb tolerance for rounding of 1 mb on each side
assert isclose(used_before, used_after_reclaimed, abs_tol=2), f"reclaim all consumed memory, started with {used_before}, now {used_after_reclaimed} used"
|
[
"pytest.mark.skipif",
"math.isclose"
] |
[((2163, 2220), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_cuda)'], {'reason': '"""requires cuda"""'}), "(not have_cuda, reason='requires cuda')\n", (2181, 2220), False, 'import pytest, fastai\n'), ((2951, 3004), 'math.isclose', 'isclose', (['used_before', 'used_after_reclaimed'], {'abs_tol': '(2)'}), '(used_before, used_after_reclaimed, abs_tol=2)\n', (2958, 3004), False, 'from math import isclose\n')]
|
'''
The Normal CDF
100xp
Now that you have a feel for how the Normal PDF looks, let's consider its CDF. Using the
samples you generated in the last exercise (in your namespace as samples_std1, samples_std3,
and samples_std10), generate and plot the CDFs.
Instructions
-Use your ecdf() function to generate x and y values for CDFs: x_std1, y_std1, x_std3, y_std3
and x_std10, y_std10, respectively.
-Plot all three CDFs as dots (do not forget the marker and linestyle keyword arguments!).
-Make a 2% margin in your plot.
-Hit submit to make a legend, showing which standard deviations you used, and to show your plot.
There is no need to label the axes because we have not defined what is being described by the Normal distribution; we are just looking at shapes of CDFs.
'''
import numpy as np
import matplotlib.pyplot as plt
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
# Seed random number generator
np.random.seed(42)
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3,
# samples_std10
samples_std1 = np.random.normal(20, 1, size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
_ = plt.plot(x_std1, y_std1, marker='.', linestyle='none')
_ = plt.plot(x_std3, y_std3, marker='.', linestyle='none')
_ = plt.plot(x_std10, y_std10, marker='.', linestyle='none')
# Make 2% margin
plt.margins(0.02)
# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
|
[
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.legend",
"numpy.sort",
"numpy.arange",
"numpy.random.normal"
] |
[((1123, 1141), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1137, 1141), True, 'import numpy as np\n'), ((1274, 1310), 'numpy.random.normal', 'np.random.normal', (['(20)', '(1)'], {'size': '(100000)'}), '(20, 1, size=100000)\n', (1290, 1310), True, 'import numpy as np\n'), ((1326, 1362), 'numpy.random.normal', 'np.random.normal', (['(20)', '(3)'], {'size': '(100000)'}), '(20, 3, size=100000)\n', (1342, 1362), True, 'import numpy as np\n'), ((1379, 1416), 'numpy.random.normal', 'np.random.normal', (['(20)', '(10)'], {'size': '(100000)'}), '(20, 10, size=100000)\n', (1395, 1416), True, 'import numpy as np\n'), ((1562, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std1', 'y_std1'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std1, y_std1, marker='.', linestyle='none')\n", (1570, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1675), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std3', 'y_std3'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std3, y_std3, marker='.', linestyle='none')\n", (1629, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1736), 'matplotlib.pyplot.plot', 'plt.plot', (['x_std10', 'y_std10'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_std10, y_std10, marker='.', linestyle='none')\n", (1688, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1772), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.02)'], {}), '(0.02)\n', (1766, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1877), 'matplotlib.pyplot.legend', 'plt.legend', (["('std = 1', 'std = 3', 'std = 10')"], {'loc': '"""lower right"""'}), "(('std = 1', 'std = 3', 'std = 10'), loc='lower right')\n", (1822, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1886, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1013), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1007, 1013), True, 'import numpy as np\n'), ((1052, 1071), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1061, 1071), True, 'import numpy as np\n')]
|
import sys
a="hello"
def myfunc():
print("xxxx")
a="du"
print (a)
myfunc()
print(a)
x="xxxxx\""
print(x, x[3:9])
print (str.format("abc {}",a))
for x in range(10):
if x%2 == 0:
print(x)
else:
pass
# while True:
# print(a)
powOf = lambda a : a*a
print(powOf(4))
def lbdInside(n):
return lambda a:a+n
print (lbdInside(3)(5))
class Xxx:
Name=""
def __init__(self):
self.Name="hello"
x = Xxx()
print (x.Name)
import helloworld
print(helloworld)
print(dir(helloworld))
import datetime
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
import json
print(json.dumps({"A":"a"}))
print(json.dumps(("a","b")))
print(json.dumps([1,"a"]))
print(json.dumps({"a","b"}))
import threading, queue
|
[
"datetime.datetime.now",
"json.dumps"
] |
[((639, 661), 'json.dumps', 'json.dumps', (["{'A': 'a'}"], {}), "({'A': 'a'})\n", (649, 661), False, 'import json\n'), ((669, 691), 'json.dumps', 'json.dumps', (["('a', 'b')"], {}), "(('a', 'b'))\n", (679, 691), False, 'import json\n'), ((699, 719), 'json.dumps', 'json.dumps', (["[1, 'a']"], {}), "([1, 'a'])\n", (709, 719), False, 'import json\n'), ((727, 749), 'json.dumps', 'json.dumps', (["{'a', 'b'}"], {}), "({'a', 'b'})\n", (737, 749), False, 'import json\n'), ((565, 588), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (586, 588), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
Description
-----------
This module defines the :obj:`ParaMol.Tasks.parametrization.Parametrization` class, which is a ParaMol task that performs force field parametrization.
"""
import numpy as np
import logging
# ParaMol libraries
from .task import *
from ..Optimizers.optimizer import *
from ..Parameter_space.parameter_space import *
from ..Objective_function.objective_function import *
from ..Utils.interface import *
# ------------------------------------------------------------
# #
# PARAMETRIZATION TASK #
# #
# ------------------------------------------------------------
class Parametrization(Task):
"""
ParaMol parametrization task.
"""
def __init__(self):
pass
# ---------------------------------------------------------- #
# #
# PUBLIC METHODS #
# #
# ---------------------------------------------------------- #
def run_task(self, settings, systems, parameter_space=None, objective_function=None, optimizer=None, interface=None, adaptive_parametrization=False, apply_charge_correction=False, restart=False):
"""
Method that performs the standard ParaMol parametrization.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List containing instances of ParaMol systems.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instances of ParameterSpace.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of the objective function.
optimizer : one of the optimizers defined in the subpackage :obj:`ParaMol.Optimizers`
Instance of the optimizer.
interface: :obj:`ParaMol.Utils.interface.ParaMolInterface`
ParaMol system instance.
adaptive_parametrization: bool
Flag that signals if this parametrization is being done inside a an adaptive parametrization loop. If `False` the sytem xml file is not written in this method (default is `False`).
apply_charge_correction : bool
Whether or not to apply charge correction. Important if charges are being optimized.
restart : bool
Flag that controls whether or not to perform a restart.
Returns
-------
systems, parameter_space, objective_function, optimizer
"""
print("!=================================================================================!")
print("! PARAMETRIZATION !")
print("!=================================================================================!")
for system in systems:
# Perform basic assertions
self._perform_assertions(settings, system)
# Create force field optimizable for every system
system.force_field.create_force_field_optimizable()
# Create IO Interface
if interface is None:
interface = ParaMolInterface()
else:
assert type(interface) is ParaMolInterface
# Create ParameterSpace
if parameter_space is None:
parameter_space = self.create_parameter_space(settings, systems, interface, restart=restart)
else:
assert type(parameter_space) is ParameterSpace
# Create properties and objective function
if objective_function is None:
properties = self.create_properties(settings.properties, settings.parameter_space, systems, parameter_space)
objective_function = self.create_objective_function(settings.objective_function, settings.restart, parameter_space, properties, systems)
else:
assert type(objective_function) is ObjectiveFunction
if settings.objective_function["parallel"]:
# Number of structures might have been changed and therefore it is necessary to re-initialize the parallel objective function
objective_function.init_parallel()
# Recalculate variance in case reference data has changed.
if objective_function.properties is not None:
for property in objective_function.properties:
property.calculate_variance()
'''
for prop in objective_function.properties:
if prop.name == "REGULARIZATION":
# TODO: if commented, reg in adaptive parametrization is done w.r.t. to the initial parameters at iter 0
#prop.set_initial_parameters_values(parameter_space.initial_optimizable_parameters_values_scaled)
pass
'''
# Print Initial Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Create optimizer
if optimizer is None:
optimizer = self.create_optimizer(settings.optimizer["method"],
settings.optimizer[settings.optimizer["method"].lower()])
else:
assert type(optimizer) is Optimizer
# ================================================================================= #
# APPLY CHARGE CORRECTION #
# ================================================================================= #
if apply_charge_correction:
for system in systems:
# Apply charge correction
self._apply_charge_correction(system)
# Create optimizable force field
system.force_field.create_force_field_optimizable()
# Get optimizable parameters
parameter_space.get_optimizable_parameters(systems)
# Calculate prior widths, scaling constants and apply jacobi preconditioning (they may have changes if charges changed).
# Otherwise, we may assume that the change is so small that this has no effect... quite good approximation, hence these lines may be commented
# parameter_space.calculate_scaling_constants()
# parameter_space.calculate_prior_widths()
parameter_space.jacobi_preconditioning()
# Update the OpenMM context
parameter_space.update_systems(systems, parameter_space.optimizable_parameters_values_scaled)
# ================================================================================= #
# END APPLY CHARGE CORRECTION #
# ================================================================================= #
# ================================================================================= #
# PARAMETERS OPTIMZIATION #
# ================================================================================= #
# Perform Optimization
print("Using {} structures in the optimization.".format(np.sum([system.n_structures for system in systems])))
parameters_values = self._perform_optimization(settings, optimizer, objective_function, parameter_space)
# Update the parameters in the force field
parameter_space.update_systems(systems, parameters_values)
# Print Final Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Write ParameterSpace restart file
self.write_restart_pickle(settings.restart, interface, "restart_parameter_space_file", parameter_space.__dict__)
# Write final system to xml file
if not adaptive_parametrization:
for system in systems:
system.engine.write_system_xml("{}_reparametrized.xml".format(system.name))
print("!=================================================================================!")
print("! PARAMETRIZATION TERMINATED SUCCESSFULLY :) !")
print("!=================================================================================!")
return systems, parameter_space, objective_function, optimizer
# -----------------------------------------------------------#
# #
# PRIVATE METHODS #
# #
# -----------------------------------------------------------#
def _perform_optimization(self, settings, optimizer, objective_function, parameter_space):
"""
Method that wraps the functions used to perform the optimization of the parameters.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of objective function.
optimizer : :obj:`ParaMol.Optimizers.optimizer.Optimizer`
Instance of optimizer.
Returns
-------
parameters_values: list
List of optimized parameters
"""
# Determine whether to perform constrained or unconstrained optimization
constrained = False
for parameter in parameter_space.optimizable_parameters:
if parameter.param_key == "charge":
# If charges are present in the optimizable parameters, perform constrained optimization
constrained = True
break
print("Number of parameters to be optimized: {}.".format(len(parameter_space.optimizable_parameters_values_scaled)))
if constrained:
print("ParaMol will perform constrained optimization.")
constraints = self._get_constraints(scipy_method=settings.optimizer["scipy"]["method"],
parameter_space=parameter_space)
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled,
constraints=constraints)
else:
print("ParaMol will perform unconstrained optimization.")
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled)
return parameters_values
def _apply_charge_correction(self, system):
"""
Method that applies charge correction to the system.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Notes
----
Due to numerical errors, the numerical total charge of the system may not be equal to the real total charge of the system.
Hence, in order to overcome this problem, which causes unexpected behaviour specially when constraints are being applied, the excess or deficiency of charge is shared equally amongst all atoms.
This usually changes the charge in each atom by a very small (negligible) amount.
Note that this method only changes the charges in the ParaMol ForceField of the ParaMolSystem. Therefore, it is required to update the OpenMM systems after this method is called.
Returns
-------
total_charge : float
Final total charge of the system.
"""
if "NonbondedForce" in system.force_field.force_field:
# Get total charge and calculate charge correction
total_charge = self._get_total_charge(system)
logging.info("Applying charge correction.")
logging.info("Total charge before correction: {}e .".format(total_charge))
charge_correction = total_charge / system.n_atoms
logging.info("Charge correction {}e per atom.".format(charge_correction))
# Add charge correction to all atoms
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
nonbonded_term.parameters["charge"].value -= charge_correction
total_charge = self._get_total_charge(system)
logging.info("Total charge after correction: {}e .\n".format(total_charge))
return total_charge
else:
logging.info("Not applying charge correction.")
return 1
# -----------------------------------------------------------#
# #
# STATIC METHODS #
# #
# -----------------------------------------------------------#
@staticmethod
def _get_total_charge(system):
"""
Method that gets the system's total charge as in the ParaMol ForceField of the ParaMolSystem.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
total_charge : float
Final total charge of the system.
"""
total_charge = 0.0
if "NonbondedForce" in system.force_field.force_field:
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
total_charge += nonbonded_term.parameters["charge"].value
return total_charge
@staticmethod
def _perform_assertions(settings, system):
"""
Method that asserts if the parametrization asked by the user contains the necessary data (coordinates, forces, energies, esp).
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
True
"""
assert system.ref_coordinates is not None, "Conformations data was not set."
if settings.properties["include_energies"]:
assert system.ref_energies is not None, "Energies were not set."
if settings.properties["include_forces"]:
assert system.ref_forces is not None, "Forces were not set."
if settings.properties["include_esp"]:
assert system.ref_esp is not None, "ESP was not set."
assert system.ref_esp_grid is not None, "ESP was not set."
return True
@staticmethod
def _get_constraints(scipy_method, parameter_space, total_charge=0.0, threshold=1e-8):
"""
Method that gets the constraints to be passed into the SciPy optimizer.
Parameters
----------
scipy_method : str
SciPy method. Should be "COBYLA", SLSQP" or "trust-consr".
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
total_charge : float
System's total charge
threshold : float
Constraint's threshold.
Returns
-------
list
List with constraints.
"""
if scipy_method == "COBYLA":
# Constraint functions must all be >=0 (a single function if only 1 constraint).
# Each function takes the parameters x as its first argument, and it can return either a single number or an array or list of numbers.
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "SLSQP":
# Total charge constraint defined as an equality
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "trust-constr":
from scipy.optimize import LinearConstraint
constraint_vector = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
return LinearConstraint(constraint_vector, [total_charge - threshold], [total_charge + threshold])
else:
raise NotImplementedError("SciPy method {} does not support constraints.".format(scipy_method))
|
[
"logging.info",
"scipy.optimize.LinearConstraint",
"numpy.sum",
"numpy.asarray"
] |
[((12519, 12562), 'logging.info', 'logging.info', (['"""Applying charge correction."""'], {}), "('Applying charge correction.')\n", (12531, 12562), False, 'import logging\n'), ((13266, 13313), 'logging.info', 'logging.info', (['"""Not applying charge correction."""'], {}), "('Not applying charge correction.')\n", (13278, 13313), False, 'import logging\n'), ((7538, 7589), 'numpy.sum', 'np.sum', (['[system.n_structures for system in systems]'], {}), '([system.n_structures for system in systems])\n', (7544, 7589), True, 'import numpy as np\n'), ((17981, 18077), 'scipy.optimize.LinearConstraint', 'LinearConstraint', (['constraint_vector', '[total_charge - threshold]', '[total_charge + threshold]'], {}), '(constraint_vector, [total_charge - threshold], [\n total_charge + threshold])\n', (17997, 18077), False, 'from scipy.optimize import LinearConstraint\n'), ((16661, 16674), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (16671, 16674), True, 'import numpy as np\n'), ((16854, 16867), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (16864, 16867), True, 'import numpy as np\n'), ((17358, 17371), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (17368, 17371), True, 'import numpy as np\n'), ((17551, 17564), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (17561, 17564), True, 'import numpy as np\n')]
|
import sys
from flask import Flask
import telegram
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
from config import config
bot = None
spotify = None
def create_app(config_name):
global bot
global spotify
app = Flask(__name__)
app.config.from_object(config[config_name])
bot = telegram.Bot(config[config_name].TELEGRAM_API_TOKEN)
webhook_url = config[config_name].WEBHOOK_URL
status = bot.set_webhook(webhook_url)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(webhook_url))
client_credentials_manager = SpotifyClientCredentials(
config[config_name].SPOTIFY_CLIENT_ID,
config[config_name].SPOTIFY_CLIENT_SECRET
)
spotify = spotipy.Spotify(
client_credentials_manager=client_credentials_manager,
auth=config[config_name].SPOTIFY_TOKEN
)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
[
"flask.Flask",
"telegram.Bot",
"spotipy.Spotify",
"spotipy.oauth2.SpotifyClientCredentials",
"sys.exit"
] |
[((278, 293), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'from flask import Flask\n'), ((353, 405), 'telegram.Bot', 'telegram.Bot', (['config[config_name].TELEGRAM_API_TOKEN'], {}), '(config[config_name].TELEGRAM_API_TOKEN)\n', (365, 405), False, 'import telegram\n'), ((694, 805), 'spotipy.oauth2.SpotifyClientCredentials', 'SpotifyClientCredentials', (['config[config_name].SPOTIFY_CLIENT_ID', 'config[config_name].SPOTIFY_CLIENT_SECRET'], {}), '(config[config_name].SPOTIFY_CLIENT_ID, config[\n config_name].SPOTIFY_CLIENT_SECRET)\n', (718, 805), False, 'from spotipy.oauth2 import SpotifyClientCredentials\n'), ((837, 952), 'spotipy.Spotify', 'spotipy.Spotify', ([], {'client_credentials_manager': 'client_credentials_manager', 'auth': 'config[config_name].SPOTIFY_TOKEN'}), '(client_credentials_manager=client_credentials_manager, auth\n =config[config_name].SPOTIFY_TOKEN)\n', (852, 952), False, 'import spotipy\n'), ((563, 574), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (571, 574), False, 'import sys\n')]
|
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.com">https://docs.battlesnake.com</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#0F74F4"
head = "pixel"
tail = "pixel"
return start_response((color, head, tail))
@bottle.post('/move')
def move():
data = bottle.request.json
"""
TODO: Using the data from the endpoint request object, your
snake AI must choose a direction to move in.
"""
game_data = json.dumps(data)
print(game_data)
you = data["you"]
body = you["body"]
head = body[0]
health = you["health"]
width = data["board"]["width"]
height = data["board"]["height"]
directions = {"up": 0, "down": 0, "left": 0, "right": 0}
snakes = data["board"]["snakes"]
foods = data["board"]["food"]
# get avg distance of closest snake to each direction
min_distances = [0, 0, 0, 0]
# order: up, down, left, right
for snake in snakes:
# make sure snake is not you
if(snake["id"] != you["id"]):
distances = [0, 0, 0, 0]
for coords in snake["body"]:
distances[0] = abs(coords["x"] - head["x"])
distances[0] += abs(coords["y"] - (head["y"]-1))
distances[1] = abs(coords["x"] - head["x"])
distances[1] += abs(coords["y"] - (head["y"]+1))
distances[2] = abs(coords["x"] - (head["x"]-1))
distances[2] += abs(coords["y"] - head["y"])
distances[3] = abs(coords["x"] - (head["x"]+1))
distances[3] += abs(coords["y"] - head["y"])
if (health < 50):
# if health is low, use avg distance (focus on getting food)
# if high, use total distance (focus on avoiding snakes)
distances = [distance/len(snake["body"]) for distance in distances]
for i in range(4):
if(min_distances[i] == 0 or distances[i] < min_distances[i]):
min_distances[i] = distances[i]
directions["up"] += min_distances[0]
directions["down"] += min_distances[1]
directions["left"] += min_distances[2]
directions["right"] += min_distances[3]
min_distances = [-1, -1, -1, -1]
# get distance of closest food
for food in foods:
distances = [0, 0, 0, 0]
distances[0] = abs(food["x"] - head["x"])
distances[0] += abs(food["y"] - (head["y"]-1))
distances[1] = abs(food["x"] - head["x"])
distances[1] += abs(food["y"] - (head["y"]+1))
distances[2] = abs(food["x"] - (head["x"]-1))
distances[2] += abs(food["y"] - head["y"])
distances[3] = abs(food["x"] - (head["x"]+1))
distances[3] += abs(food["y"] - head["y"])
for i in range(4):
if(min_distances[i] == -1 or distances[i] < min_distances[i]):
min_distances[i] = distances[i]
directions["up"] -= min_distances[0]
directions["down"] -= min_distances[1]
directions["left"] -= min_distances[2]
directions["right"] -= min_distances[3]
# stop snake from eating itself
for seg in body:
if("up" in directions and seg["x"] == head["x"] and seg["y"] == head["y"]-1):
del directions["up"]
if("down" in directions and seg["x"] == head["x"] and seg["y"] == head["y"]+1):
del directions["down"]
if("left" in directions and seg["x"] == head["x"]-1 and seg["y"] == head["y"]):
del directions["left"]
if("right" in directions and seg["x"] == head["x"]+1 and seg["y"] == head["y"]):
del directions["right"]
if len(directions) <= 1:
# either snake is trapped, or there is only one viable direction
# in either case, there is no point in checking any more segments
break
# avoid wall collisions
if("up" in directions and head["y"] == 0):
del directions["up"]
if("down" in directions and head["y"] == height-1):
del directions["down"]
if ("left" in directions and head["x"] == 0):
del directions["left"]
if ("right" in directions and head["x"] == width-1):
del directions["right"]
print(directions)
if len(directions) >= 1:
current_movement = max(directions, key=directions.get)
else:
# snake is trapped, just return any direction
current_movement = "up"
return move_response(current_movement)
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
|
[
"api.ping_response",
"bottle.default_app",
"bottle.static_file",
"json.dumps",
"bottle.route",
"api.start_response",
"api.move_response",
"api.end_response",
"os.getenv",
"bottle.post"
] |
[((129, 146), 'bottle.route', 'bottle.route', (['"""/"""'], {}), "('/')\n", (141, 146), False, 'import bottle\n'), ((300, 335), 'bottle.route', 'bottle.route', (['"""/static/<path:path>"""'], {}), "('/static/<path:path>')\n", (312, 335), False, 'import bottle\n'), ((562, 582), 'bottle.post', 'bottle.post', (['"""/ping"""'], {}), "('/ping')\n", (573, 582), False, 'import bottle\n'), ((757, 778), 'bottle.post', 'bottle.post', (['"""/start"""'], {}), "('/start')\n", (768, 778), False, 'import bottle\n'), ((1083, 1103), 'bottle.post', 'bottle.post', (['"""/move"""'], {}), "('/move')\n", (1094, 1103), False, 'import bottle\n'), ((4739, 4758), 'bottle.post', 'bottle.post', (['"""/end"""'], {}), "('/end')\n", (4750, 4758), False, 'import bottle\n'), ((4994, 5014), 'bottle.default_app', 'bottle.default_app', ([], {}), '()\n', (5012, 5014), False, 'import bottle\n'), ((518, 558), 'bottle.static_file', 'bottle.static_file', (['path'], {'root': '"""static/"""'}), "(path, root='static/')\n", (536, 558), False, 'import bottle\n'), ((738, 753), 'api.ping_response', 'ping_response', ([], {}), '()\n', (751, 753), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((1045, 1080), 'api.start_response', 'start_response', (['(color, head, tail)'], {}), '((color, head, tail))\n', (1059, 1080), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((1277, 1293), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1287, 1293), False, 'import json\n'), ((4704, 4735), 'api.move_response', 'move_response', (['current_movement'], {}), '(current_movement)\n', (4717, 4735), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((4919, 4933), 'api.end_response', 'end_response', ([], {}), '()\n', (4931, 4933), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((966, 982), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (976, 982), False, 'import json\n'), ((4892, 4908), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4902, 4908), False, 'import json\n'), ((5078, 5104), 'os.getenv', 'os.getenv', (['"""IP"""', '"""0.0.0.0"""'], {}), "('IP', '0.0.0.0')\n", (5087, 5104), False, 'import os\n'), ((5113, 5138), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""8080"""'], {}), "('PORT', '8080')\n", (5122, 5138), False, 'import os\n'), ((5148, 5172), 'os.getenv', 'os.getenv', (['"""DEBUG"""', '(True)'], {}), "('DEBUG', True)\n", (5157, 5172), False, 'import os\n')]
|
#!/usr/bin/env python3
import site
import configs
SOURCE_CODE_FILEPATH = '/home/jovyan/work/src'
def set_import_path(import_path=configs.SOURCE_CODE_FILEPATH):
site.addsitedir(import_path)
print("Added the following path to the import paths "
"list:\n{}".format(import_path))
if __name__ == '__main__':
set_import_path()
|
[
"site.addsitedir"
] |
[((168, 196), 'site.addsitedir', 'site.addsitedir', (['import_path'], {}), '(import_path)\n', (183, 196), False, 'import site\n')]
|
from setuptools import setup
setup(
name="tf-ffcv",
version="0.0.2",
packages=["tf_ffcv"],
description='Utilitaries to integrate tensorflow to FFCV',
author='MadryLab',
author_email='<EMAIL>',
)
|
[
"setuptools.setup"
] |
[((30, 201), 'setuptools.setup', 'setup', ([], {'name': '"""tf-ffcv"""', 'version': '"""0.0.2"""', 'packages': "['tf_ffcv']", 'description': '"""Utilitaries to integrate tensorflow to FFCV"""', 'author': '"""MadryLab"""', 'author_email': '"""<EMAIL>"""'}), "(name='tf-ffcv', version='0.0.2', packages=['tf_ffcv'], description=\n 'Utilitaries to integrate tensorflow to FFCV', author='MadryLab',\n author_email='<EMAIL>')\n", (35, 201), False, 'from setuptools import setup\n')]
|
# Copyright (c) 2011-2017 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from django.urls import reverse
from django.test import TestCase
from huxley.utils.test import models
class RegistrationAdminTest(TestCase):
fixtures = ['conference']
def test_preference_export(self):
'''Tests that the admin panel can export registration data.'''
registration = models.new_registration()
models.new_superuser(username='superuser', password='<PASSWORD>')
self.client.login(username='superuser', password='<PASSWORD>')
response = self.client.get(reverse('admin:core_registration_info'))
header = [
"Registration Time", "School Name", "Total Number of Delegates",
"Beginners", "Intermediates", "Advanced", "Spanish Speakers",
"Chinese Speakers", "Assignments Finalized", "Waivers Complete",
"Delegate Fees Paid", "Delegate Fees Owed", "Paid Registration Fee?",
"Country 1", "Country 2", "Country 3", "Country 4", "Country 5",
"Country 6", "Country 7", "Country 8", "Country 9", "Country 10",
"Committee Preferences", "Registration Comments"
]
fields_csv = ",".join(map(str, header)) + "\r\n"
country_preferences = [cp
for cp in registration.country_preferences.all(
).order_by('countrypreference')]
country_preferences += [''] * (10 - len(country_preferences))
committee_preferences = [', '.join(
cp.name for cp in registration.committee_preferences.all())]
fields = [
registration.registered_at,
registration.school.name,
registration.num_beginner_delegates +
registration.num_intermediate_delegates +
registration.num_advanced_delegates,
registration.num_beginner_delegates,
registration.num_intermediate_delegates,
registration.num_advanced_delegates,
registration.num_spanish_speaking_delegates,
registration.num_chinese_speaking_delegates,
registration.assignments_finalized,
registration.waivers_completed,
registration.delegate_fees_paid,
registration.delegate_fees_owed,
registration.registration_fee_paid
]
fields.extend(country_preferences)
fields.extend(committee_preferences)
fields.extend(registration.registration_comments)
fields_csv += ','.join(map(str, fields))
self.assertEquals(fields_csv, response.content[:-3].decode('utf-8'))
|
[
"django.urls.reverse",
"huxley.utils.test.models.new_superuser",
"huxley.utils.test.models.new_registration"
] |
[((457, 482), 'huxley.utils.test.models.new_registration', 'models.new_registration', ([], {}), '()\n', (480, 482), False, 'from huxley.utils.test import models\n'), ((492, 557), 'huxley.utils.test.models.new_superuser', 'models.new_superuser', ([], {'username': '"""superuser"""', 'password': '"""<PASSWORD>"""'}), "(username='superuser', password='<PASSWORD>')\n", (512, 557), False, 'from huxley.utils.test import models\n'), ((665, 704), 'django.urls.reverse', 'reverse', (['"""admin:core_registration_info"""'], {}), "('admin:core_registration_info')\n", (672, 704), False, 'from django.urls import reverse\n')]
|
import sys
sys.path.append('../')
import torchnet as tnt
from torch.autograd import Variable
import torch.nn.functional as F
from model_utils.load_utils import load_model, SAVE_ROOT
from model_utils.model_utils import get_layer_names
MODEL_NAME='mobilenetv2_imagenet'
model_init,model = load_model(MODEL_NAME)
layer_names, conv_layer_mask = get_layer_names(model,'conv')
layer_names_bn, bn_layer_mask = get_layer_names(model,'batchnorm')
fc_layer_mask = (1 - conv_layer_mask).astype(bool)
print(model)
bs = 64
from tensor_compression import get_compressed_model
import copy
import torch
import os
import numpy as np
CONV_SPLIT = 3
n_layers = len(layer_names)
n_layers_bn = len(layer_names_bn)
#decomposition_conv = 'cp3'
decomposition_conv = 'tucker2'
#X_FACTOR used (how much each layer will be compressed):
WEAKEN_FACTOR = None
X_FACTOR = 1.71
rank_selection_suffix = "{}x".format(X_FACTOR)
#specify rank of each layer
ranks_conv = [None if not (name.endswith('conv.2') or name.endswith('0.0') ) else -X_FACTOR
for name in layer_names[conv_layer_mask]]
ranks_fc = [-X_FACTOR] * (len(layer_names[fc_layer_mask]))
ranks_conv[0] = None
ranks_conv[1] = None
ranks_conv[2] = -X_FACTOR
ranks = np.array([None] * len(layer_names))
ranks[conv_layer_mask] = ranks_conv
decompositions = np.array([None] * len(layer_names))
decompositions[conv_layer_mask] = decomposition_conv
SPLIT_FACTOR = CONV_SPLIT
save_dir = "{}/models_finetuned/{}/{}/{}/layer_groups:{}".format(SAVE_ROOT,MODEL_NAME,
decomposition_conv,
rank_selection_suffix,
SPLIT_FACTOR)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
device = 'cuda'
split_tuples = np.array_split(np.arange(n_layers)[conv_layer_mask], CONV_SPLIT)[::-1]
split_tuples.reverse()
compressed_model = copy.deepcopy(model)
print(ranks)
for local_iter, tupl in enumerate(split_tuples):
lname,lname_bn, rank, decomposition = layer_names[tupl], layer_names_bn[tupl],ranks[tupl], decompositions[tupl]
if isinstance(tupl[0], np.ndarray):
print(lname, tupl[0])
compressed_model = get_compressed_model(MODEL_NAME,compressed_model,
ranks=rank, layer_names = lname, layer_names_bn = lname_bn,
decompositions = decomposition,
vbmf_weaken_factor = WEAKEN_FACTOR,return_ranks=True)
print(compressed_model)
#
filename = "{}/mobilenetv2_hooi.pth.tar".format(save_dir)
torch.save(compressed_model,filename)
print(filename)
def test(model,test_loader):
model.eval()
test_loss = tnt.meter.AverageValueMeter()
correct = 0
with torch.no_grad():
for data, target,index in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss=F.cross_entropy(output, target)
test_loss.add(loss.item()) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
loss.item(), correct, len(test_loader.sampler),
100. * float(correct) / len(test_loader.sampler)))
return float(correct) / float(len(test_loader.sampler))
from collections import defaultdict
def count_params(model):
n_params = defaultdict()
for name, param in model.named_parameters():
n_params[name] = param.numel()
return n_params
def count_params_by_layers(params_count_dict):
params_count_dict_modif = defaultdict()
for k, v in params_count_dict.items():
if '-' not in k:
k_head = k.strip('.weight').strip('.bias')
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
else:
k_head = '.'.join(k.split('-')[0].split('.')[:-1])
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
return params_count_dict_modif
params_count_dict_m = count_params(model)
params_count_dict_cm = count_params(compressed_model)
params_count_dict_m_init = count_params(model_init)
num_parameters = sum([param.nelement() for param in compressed_model.parameters()])
num_parameters1 = sum([param.nelement() for param in model.parameters()])
num_parameters2 = sum([param.nelement() for param in model_init.parameters()])
print('Params, a:initial, b:pruned, c:decomposed ')
x1=sum(params_count_dict_m.values())/sum(params_count_dict_cm.values())
x11=sum(params_count_dict_m_init.values())/sum(params_count_dict_cm.values())
print('a: '+str(sum(params_count_dict_m_init.values())))
print('a: '+str(num_parameters2))
print('b: '+str(sum(params_count_dict_m.values())))
print('b: '+str(num_parameters1))
print('c: '+str(sum(params_count_dict_cm.values())))
print('c: '+str(num_parameters))
print('Params ratio, a:initial/decomposed, b:pruned/decomposed')
print('a: '+str(x11))
print('b: '+str(x1))
print('a: '+str(num_parameters2/num_parameters))
print('b: '+str(num_parameters1/num_parameters))
print('Params pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-num_parameters/num_parameters2))
print('b: '+str(1-num_parameters/num_parameters1))
#
import sys
sys.path.append("../")
from flopco import FlopCo
model.cpu()
model_init.cpu()
compressed_model.cpu()
flopco_m = FlopCo(model, img_size=(1, 3, 224, 224), device='cpu')
flopco_m_init = FlopCo(model_init, img_size=(1, 3, 224, 224), device='cpu')
flopco_cm = FlopCo(compressed_model, img_size=(1, 3, 224, 224), device='cpu')
print('FLOPs a:init/decomposed, b:pruned/decomposed')
print('a: '+str(flopco_m_init.total_flops / flopco_cm.total_flops))
print('b: '+str(flopco_m.total_flops / flopco_cm.total_flops))
print('FLOPs pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-flopco_cm.total_flops/flopco_m_init.total_flops) )
print('b: '+str(1-flopco_cm.total_flops/flopco_m.total_flops) )
|
[
"sys.path.append",
"tensor_compression.get_compressed_model",
"copy.deepcopy",
"os.makedirs",
"torch.autograd.Variable",
"flopco.FlopCo",
"os.path.exists",
"torchnet.meter.AverageValueMeter",
"torch.nn.functional.cross_entropy",
"model_utils.model_utils.get_layer_names",
"torch.save",
"collections.defaultdict",
"numpy.arange",
"torch.no_grad",
"model_utils.load_utils.load_model"
] |
[((16, 38), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (31, 38), False, 'import sys\n'), ((307, 329), 'model_utils.load_utils.load_model', 'load_model', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (317, 329), False, 'from model_utils.load_utils import load_model, SAVE_ROOT\n'), ((364, 394), 'model_utils.model_utils.get_layer_names', 'get_layer_names', (['model', '"""conv"""'], {}), "(model, 'conv')\n", (379, 394), False, 'from model_utils.model_utils import get_layer_names\n'), ((426, 461), 'model_utils.model_utils.get_layer_names', 'get_layer_names', (['model', '"""batchnorm"""'], {}), "(model, 'batchnorm')\n", (441, 461), False, 'from model_utils.model_utils import get_layer_names\n'), ((2097, 2117), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2110, 2117), False, 'import copy\n'), ((2790, 2828), 'torch.save', 'torch.save', (['compressed_model', 'filename'], {}), '(compressed_model, filename)\n', (2800, 2828), False, 'import torch\n'), ((5879, 5901), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (5894, 5901), False, 'import sys\n'), ((5998, 6052), 'flopco.FlopCo', 'FlopCo', (['model'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(model, img_size=(1, 3, 224, 224), device='cpu')\n", (6004, 6052), False, 'from flopco import FlopCo\n'), ((6069, 6128), 'flopco.FlopCo', 'FlopCo', (['model_init'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(model_init, img_size=(1, 3, 224, 224), device='cpu')\n", (6075, 6128), False, 'from flopco import FlopCo\n'), ((6143, 6208), 'flopco.FlopCo', 'FlopCo', (['compressed_model'], {'img_size': '(1, 3, 224, 224)', 'device': '"""cpu"""'}), "(compressed_model, img_size=(1, 3, 224, 224), device='cpu')\n", (6149, 6208), False, 'from flopco import FlopCo\n'), ((1888, 1912), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1902, 1912), False, 'import os\n'), ((1918, 1939), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1929, 1939), False, 'import os\n'), ((2399, 2597), 'tensor_compression.get_compressed_model', 'get_compressed_model', (['MODEL_NAME', 'compressed_model'], {'ranks': 'rank', 'layer_names': 'lname', 'layer_names_bn': 'lname_bn', 'decompositions': 'decomposition', 'vbmf_weaken_factor': 'WEAKEN_FACTOR', 'return_ranks': '(True)'}), '(MODEL_NAME, compressed_model, ranks=rank, layer_names=\n lname, layer_names_bn=lname_bn, decompositions=decomposition,\n vbmf_weaken_factor=WEAKEN_FACTOR, return_ranks=True)\n', (2419, 2597), False, 'from tensor_compression import get_compressed_model\n'), ((2907, 2936), 'torchnet.meter.AverageValueMeter', 'tnt.meter.AverageValueMeter', ([], {}), '()\n', (2934, 2936), True, 'import torchnet as tnt\n'), ((3796, 3809), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (3807, 3809), False, 'from collections import defaultdict\n'), ((3998, 4011), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (4009, 4011), False, 'from collections import defaultdict\n'), ((2962, 2977), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2975, 2977), False, 'import torch\n'), ((1992, 2011), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (2001, 2011), True, 'import numpy as np\n'), ((3190, 3221), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (3205, 3221), True, 'import torch.nn.functional as F\n'), ((3107, 3121), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (3115, 3121), False, 'from torch.autograd import Variable\n'), ((3123, 3139), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (3131, 3139), False, 'from torch.autograd import Variable\n')]
|
"""Example program to show how to read a multi-channel time series from LSL."""
import math
import threading
# import pygame
from random import random
from sklearn.preprocessing import OneHotEncoder
from pylsl import StreamInlet, resolve_stream
import numpy as np
import pandas as pd
import time
from sklearn import model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import warnings
from statistics import mode
from datetime import datetime
import sys
import os
import models
import pywt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('error')
def handle_keyboard_chunk(chunk, keys):
''' Returns the button statuses from the LSL keyboard chunk '''
ks, times = chunk
new_chunk = [[], []]
for i in range(len(ks)):
if ks[i][0] in ('LCONTROL pressed', 'LCONTROL released', 'RCONTROL pressed', 'RCONTROL released'):
new_chunk[0].append(ks[i])
new_chunk[1].append(times[i])
chunk = tuple(new_chunk)
if not chunk[0]:
if keys is None:
return [[0, 0, 0]], False
return keys, False
if keys is None:
keys = [[0, 0, 0]]
else:
keys = list(keys[-1][:2])
out = np.zeros((0, 3)) # data should be appended in the format LSHIFT, RSHIFT, TIME
for i in range(len(chunk[0])):
action = chunk[0][i][0]
timestamp = chunk[1][i]
if action == 'LCONTROL pressed':
keys[0] = 1
elif action == 'LCONTROL released':
keys[0] = 0
elif action == 'RCONTROL pressed':
keys[1] = 1
elif action == 'RCONTROL released':
keys[1] = 0
else:
continue
out = np.append(out, [keys + [timestamp]], axis=0)
if len(out) == 0:
return keys, False
return out, True
def normalise_list(x):
x = np.array(x)
try:
out = ((x -x.min()) / (x.max() - x.min())).tolist()
except Warning:
out = [np.zeros(len(x[0])).tolist()]
return out
def normalise_eeg(eeg):
return [normalise_list(eeg[i::8]) for i in range(8)]
def my_filter(x, y, a=None, b=None):
# b = [0.9174, -0.7961, 0.9174]
# a = [-1, 0.7961, -0.8347]
# Parameters for a 40-hz low-pass filter
if a is None:
a = [-1, 0.331]
if b is None:
b = [0.3345, 0.3345]
if len(y) > len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum(a[i]*y[-1-i][col] + b[i]*x[-1-i][col] for i in range(len(a)))
# for i in range(len(a)):
# y[-1][col] += a[i]*y[-1-i][col] + b[i]*x[-1-i][col]
return y
def fir_filter(x, y, a=None):
if a is None:
a = [1.4, -0.8, 1.4] # 50 Hz notch filter
# a = [1] # do nothing
if len(x) >= len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum([a[i]*x[-1-i][col] for i in range(len(a))])
# print(y[-1][col])
return y
class EEG:
def __init__(self, user_id, game, data_length=100, ignore_lsl=False, ignore_BCI=False):
# first resolve an EEG stream on the lab network
self.user_id = user_id
self.game = game
self.data_length = data_length
if not ignore_lsl:
print("looking for an Keyboard stream...")
self.keyboard = resolve_stream('name', 'Keyboard')
print(self.keyboard)
self.keyboard_inlet = StreamInlet(self.keyboard[0])
if not ignore_lsl and not ignore_BCI:
print("looking for an EEG stream...")
self.eeg = resolve_stream('type', 'EEG')
print(self.eeg)
self.eeg_inlet = StreamInlet(self.eeg[0])
self.eeg_dataset = [] # of the format [channel0, c1, ..., timestamp, left_shift, right_shift]
self.filtered = []
self.fft = []
self.keys = None
self.running = False
self.clf = None
self.acc = 0
@property
def prev_dl(self):
return np.array([item[:-3] for item in self.filtered[-1:-1-self.data_length:-1]]).T.tolist()
def eeg_sample(self, data=None):
if data is None:
sample, timestamp = self.eeg_inlet.pull_sample()
data = [sample + [timestamp] + list(self.keys[-1][:2])]
self.eeg_dataset += data
self.filtered += [[0]*8 + list(data[0][-3:])]
# self.filtered = my_filter(self.eeg_dataset, self.filtered)
# self.filtered = my_filter(self.eeg_dataset, self.filtered, a=[-1, 1.452, -0.4523], b=[0.2737, 0, -0.2737])
self.filtered = my_filter(self.eeg_dataset, self.filtered,
b=[float(i) for i in '0.3749 -0.2339 0 0.2339 -0.3749'.split()],
a=[-1*float(i) for i in '1.0000 -1.8173 1.9290 -1.3011 0.2154'.split()]) # this one also works well!
# self.filtered = fir_filter(self.eeg_dataset, self.filtered) # this works well!
if len(self.filtered) > self.data_length:
norm = normalise_eeg(self.prev_dl)
fft = np.array([np.abs(np.fft.fft(n)) for n in norm]).flatten().tolist()
# fft = normalise_list(np.array([pywt.dwt(n, 'db2') for n in norm])[:100].flatten())
self.fft += [fft + self.filtered[-1][-2:]]
def mi_to_fft(self):
hist_mi = [f for f in os.listdir('users/data') if 'mi_' + self.user_id == f[:5]]
hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id == f[:6]]
needed_hist_fft = []
for fmi in hist_mi:
if 'fft_' + fmi[3:] not in hist_fft:
needed_hist_fft.append(fmi)
print('need to convert to fft:', needed_hist_fft)
print('loading {}'.format(needed_hist_fft))
for mi_file in needed_hist_fft:
loaded_data = np.load('users/data/' + mi_file)
self.eeg_dataset = []
self.filtered = []
self.fft = []
t0 = time.time()
for row in range(len(loaded_data)):
data = [loaded_data[row]]
self.eeg_sample(data)
if row % 1000 == 500:
tr = (time.time() - t0) * (len(loaded_data) - row) / row
print('time remaining: {}'.format(tr))
print()
fft_name = 'users/data/fft_' + mi_file[3:]
print('outputting to', fft_name)
np.save(fft_name, self.fft)
# print(pd.DataFrame(self.fft))
# good = 'users/data/good_' + mi_file[3:]
# good = np.load(good)
# print(pd.DataFrame(good))
#
# print(f'{np.array_equal(self.fft, good) = }')
def gather_data(self):
thread = threading.Thread(target=self.__gather)
thread.start()
return thread
def __gather(self):
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
while self.running:
# get a new sample (you can also omit the timestamp part if you're not interested in it)
chunk = self.keyboard_inlet.pull_chunk()
self.keys, is_new = handle_keyboard_chunk(chunk, self.keys)
self.eeg_sample() # get and process the latest sample from the EEG headset
self.save_training()
def train(self, classifier='KNN', include_historical=False, **kwargs):
thread = threading.Thread(target=self.__train, args=(classifier, include_historical), kwargs=kwargs)
thread.start()
return thread
def __train(self, classifier='KNN', include_historical=False, **kwargs):
print('data recording complete. building model... (this may take a few moments)')
# hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id in f and 'npy' in f] # grab historical data for user
#
# # take only the most recent data if we don't include_historical
# if not include_historical or classifier == 'ANN':
# print('ignoring historical data...')
# hist_fft = [hist_fft[-1]]
#
# print('loading {}'.format(hist_fft))
# data = [np.load('users/data/' + f).tolist()[::5] for f in hist_fft]
#
# # X = [dat[:][:-2] for dat in data]
# # Y_i = [dat[:][-2:] for dat in data]
# # Y_o = []
# # X_o = []
# data_o = []
#
# # merge historical data together
# for i in range(len(data)):
# # Y_o += Y_i[i]
# # X_o += X[i]
# print('data', i, 'shape', np.array(data[i]).shape)
# data_o += data[i]
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
y = np_fmi[-1, -2:].tolist()
return [x + y]
data = self.filtered
data_o = []
for line in range(len(data)-100):
data_o += get_fmi_dl(line, data)
# data_o = data
print('balancing data')
# print(data_o)
print('data shape:', np.array(data_o).shape)
fft_df = pd.DataFrame(data_o, columns=['c' + str(i) for i in range(802)])
fft_df['y'] = fft_df.apply(lambda row: row.c800 + 2 * row.c801, axis=1)
fft_df = fft_df.loc[fft_df['y'] != 3].reset_index(drop=True)
m = min(fft_df.y.value_counts()) # grab the count of the least common y value (left, right, or none)
y_vals = fft_df.y.unique()
print('got min={}, unique={}'.format(m, y_vals))
randomized_df = fft_df.sample(frac=1).reset_index(drop=True)
out = np.zeros((m*3, 803))
for i, y in enumerate(y_vals):
arr = randomized_df.loc[randomized_df['y'] == y].head(m).to_numpy()
out[i*m:i*m + m] = arr
print('consolidated data')
randomized_df = pd.DataFrame(out)
randomized_df = randomized_df.sample(frac=1).reset_index(drop=True)
print('reordered data')
Y = randomized_df[[800, 801]].to_numpy()
del randomized_df[800], randomized_df[801], randomized_df[802]
X = randomized_df.to_numpy()
print('created X and Y. X.shape={}, Y.shape={}'.format(X.shape, Y.shape))
# y =
# one hot encoding for Y values
# Y_i = list(Y_o)
Y_i = [[0], [1], [2], [3]] + [[2*Y[i][-2] + Y[i][-1]] for i in range(len(Y))]
enc = OneHotEncoder()
print('fitting one hot encoder')
enc.fit(Y_i)
# X = X_o
Y = enc.transform(Y_i).toarray()[4:]
if len(X) == 0 or len(Y) == 0:
print('no training data provided')
return
def train_test_split(X, Y, test_size):
stop_idx = int(len(Y) * test_size)
return X[:stop_idx], X[stop_idx:], Y[:stop_idx], Y[stop_idx:]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
# if classifier == 'KNN':
# self.clf = models.KNN(n_neighbors=3, **kwargs)
# elif classifier == "LDA":
# self.clf = models.LDA()
# elif classifier == "SVM":
# self.clf = models.SVM(**kwargs)
# elif classifier == "ANN":
# self.clf = models.ANN(**kwargs)
# elif classifier == "RNN":
# self.clf = models.RNN(**kwargs)
# elif classifier == "CNN":
# self.clf = models.CNN(**kwargs)
# else:
# print('no valid classifier provided ({}). Using KNN'.format(classifier))
# self.clf = models.KNN(n_neighbors=3)
print('training model ({} classifier)...'.format(self.clf))
self.clf.fit(X_train, Y_train)
print('analysing model...')
preds = self.clf.predict(X_test)
acc = accuracy_score(Y_test, preds)
print('combined acc:', acc)
self.acc = round(acc, 4)
print('combined acc:', self.acc)
print()
print('model complete.')
def build_model(self, classifier, **kwargs):
thread = threading.Thread(target=self._build_model, args=(classifier, ), kwargs=kwargs)
thread.start()
return thread
def _build_model(self, classifier, **kwargs):
if classifier == 'KNN':
self.clf = models.KNN(n_neighbors=3, **kwargs)
elif classifier == "LDA":
self.clf = models.LDA()
elif classifier == "SVM":
self.clf = models.SVM(**kwargs)
elif classifier == "ANN":
self.clf = models.ANN(**kwargs)
elif classifier == "RNN":
self.clf = models.RNN(**kwargs)
elif classifier == "CNN":
self.clf = models.CNN2(transfer=True, **kwargs)
else:
print(f'no valid classifier provided ({classifier}). Using KNN')
self.clf = models.KNN(n_neighbors=3)
def save_training(self):
suffix = '_' + datetime.today().strftime('%d%m%y_%H%M%S') + '.npy'
print('saving eeg data:', np.array(self.eeg_dataset).shape)
eeg_file = './users/data/mi_' + self.user_id + suffix
np.save(eeg_file, self.eeg_dataset)
print('saving filtered eeg data:', np.array(self.filtered).shape)
filt_eeg_file = './users/data/fmi_' + self.user_id + suffix
np.save(filt_eeg_file, self.filtered)
print('saving filtered fft data:', np.array(self.fft).shape)
fft_eeg_file = './users/data/fft_' + self.user_id + suffix
np.save(fft_eeg_file, self.fft)
def test(self, send_to=None):
thread = threading.Thread(target=self.__test, args=(send_to, ))
thread.start()
return thread
def __test(self, send_to=None):
assert self.clf
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
last_preds = []
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
return [x]
while self.running:
self.eeg_sample()
if len(self.filtered) > self.data_length:
pred = self.clf.predict(get_fmi_dl(-101, self.filtered))
# if pred[0][2]:
# last_preds += [1]
# elif pred[0][1]:
# last_preds += [-1]
# else:
# last_preds += [0]
# if len(last_preds) >= 25:
# last_preds = last_preds[1:]
# avg = sum(last_preds) / len(last_preds)
# left = avg < -0.25
# right = avg > 0.25
left = pred[0][0] or pred[0][2]
right = pred[0][1] or pred[0][2]
if send_to:
send_to((left, right))
elif send_to:
send_to((0, 0))
def close(self):
print('closing eeg and keyboard streams')
if hasattr(self, 'eeg_inlet'):
self.eeg_inlet.close_stream()
if hasattr(self, 'keyboard_inlet'):
self.keyboard_inlet.close_stream()
def main(user_id, train_time=30, test_time=30, classifier='CNN', model=''):
import motor_bci_game
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier, model=model)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
print(game.e.scores)
game.e.scores = [0]
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.p1.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.e.scores)
print('acc:', eeg.acc)
game.quit()
sys.exit()
def main_game_2(user_id, train_time=30, test_time=30, classifier='CNN'):
import game_2
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = game_2.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
print('scores:', game.block.scores)
game.block.scores = [0]
# time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.block.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.block.scores)
total = sum(game.block.scores) + len(game.block.scores) - 1
print('total blocks:', total)
print('percent caught:', sum(game.block.scores) / total)
game.quit()
sys.exit()
def train_test(user_id):
import motor_bci_game
# while len(user_id) != 2:
# user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
# if len(user_id) == 2:
# print(f'{user_id=}')
# break
# print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
training = eeg.train(classifier='CNN', include_historical=False, model='new_test') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.close()
print('scores:', game.e.scores)
game.quit()
sys.exit()
def convert_mi_to_fft(user_id):
import motor_bci_game
# user_id = '00'# + str(i)
print('user_id={}'.format(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
eeg.mi_to_fft()
eeg.close()
game.quit()
sys.exit()
if __name__ == '__main__':
user_id = '-5' # -9 is cameron post-training recordings, -8 is same for kevin
mode = 2
if mode == 1:
good = np.load('users/data/fmi_01_300921_211231.npy')
print(pd.DataFrame(good))
elif mode == 2:
main(user_id=user_id,
train_time=30,
test_time=30,
model='models/p00_models/cnn_model_2_200',
classifier='LDA'
)
elif mode == 3:
convert_mi_to_fft(user_id)
elif mode == 4:
train_test(user_id)
elif mode == 5:
main_game_2(user_id=user_id,
train_time=30,
test_time=30)
print('done?')
|
[
"motor_bci_game.Game",
"numpy.load",
"models.CNN2",
"sklearn.metrics.accuracy_score",
"models.RNN",
"models.LDA",
"pandas.DataFrame",
"models.KNN",
"numpy.fft.fft",
"pylsl.resolve_stream",
"pylsl.StreamInlet",
"numpy.append",
"threading.Thread",
"numpy.save",
"datetime.datetime.today",
"sklearn.preprocessing.OneHotEncoder",
"time.sleep",
"os.listdir",
"sys.exit",
"warnings.filterwarnings",
"models.SVM",
"game_2.Game",
"numpy.zeros",
"time.time",
"numpy.array",
"models.ANN"
] |
[((592, 624), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (615, 624), False, 'import warnings\n'), ((1244, 1260), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (1252, 1260), True, 'import numpy as np\n'), ((1889, 1900), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1897, 1900), True, 'import numpy as np\n'), ((15694, 15715), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (15713, 15715), False, 'import motor_bci_game\n'), ((16352, 16365), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16362, 16365), False, 'import time\n'), ((16635, 16645), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16643, 16645), False, 'import sys\n'), ((17051, 17064), 'game_2.Game', 'game_2.Game', ([], {}), '()\n', (17062, 17064), False, 'import game_2\n'), ((18132, 18142), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18140, 18142), False, 'import sys\n'), ((18505, 18526), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (18524, 18526), False, 'import motor_bci_game\n'), ((18805, 18815), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18813, 18815), False, 'import sys\n'), ((18959, 18980), 'motor_bci_game.Game', 'motor_bci_game.Game', ([], {}), '()\n', (18978, 18980), False, 'import motor_bci_game\n'), ((19084, 19094), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19092, 19094), False, 'import sys\n'), ((1739, 1783), 'numpy.append', 'np.append', (['out', '[keys + [timestamp]]'], {'axis': '(0)'}), '(out, [keys + [timestamp]], axis=0)\n', (1748, 1783), True, 'import numpy as np\n'), ((6727, 6765), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__gather'}), '(target=self.__gather)\n', (6743, 6765), False, 'import threading\n'), ((7409, 7504), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__train', 'args': '(classifier, include_historical)', 'kwargs': 'kwargs'}), '(target=self.__train, args=(classifier, include_historical),\n kwargs=kwargs)\n', (7425, 7504), False, 'import threading\n'), ((9736, 9758), 'numpy.zeros', 'np.zeros', (['(m * 3, 803)'], {}), '((m * 3, 803))\n', (9744, 9758), True, 'import numpy as np\n'), ((9971, 9988), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (9983, 9988), True, 'import pandas as pd\n'), ((10517, 10532), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (10530, 10532), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((11864, 11893), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'preds'], {}), '(Y_test, preds)\n', (11878, 11893), False, 'from sklearn.metrics import accuracy_score\n'), ((12121, 12198), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._build_model', 'args': '(classifier,)', 'kwargs': 'kwargs'}), '(target=self._build_model, args=(classifier,), kwargs=kwargs)\n', (12137, 12198), False, 'import threading\n'), ((13168, 13203), 'numpy.save', 'np.save', (['eeg_file', 'self.eeg_dataset'], {}), '(eeg_file, self.eeg_dataset)\n', (13175, 13203), True, 'import numpy as np\n'), ((13355, 13392), 'numpy.save', 'np.save', (['filt_eeg_file', 'self.filtered'], {}), '(filt_eeg_file, self.filtered)\n', (13362, 13392), True, 'import numpy as np\n'), ((13538, 13569), 'numpy.save', 'np.save', (['fft_eeg_file', 'self.fft'], {}), '(fft_eeg_file, self.fft)\n', (13545, 13569), True, 'import numpy as np\n'), ((13622, 13675), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__test', 'args': '(send_to,)'}), '(target=self.__test, args=(send_to,))\n', (13638, 13675), False, 'import threading\n'), ((19253, 19299), 'numpy.load', 'np.load', (['"""users/data/fmi_01_300921_211231.npy"""'], {}), "('users/data/fmi_01_300921_211231.npy')\n", (19260, 19299), True, 'import numpy as np\n'), ((3341, 3375), 'pylsl.resolve_stream', 'resolve_stream', (['"""name"""', '"""Keyboard"""'], {}), "('name', 'Keyboard')\n", (3355, 3375), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3443, 3472), 'pylsl.StreamInlet', 'StreamInlet', (['self.keyboard[0]'], {}), '(self.keyboard[0])\n', (3454, 3472), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3592, 3621), 'pylsl.resolve_stream', 'resolve_stream', (['"""type"""', '"""EEG"""'], {}), "('type', 'EEG')\n", (3606, 3621), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((3679, 3703), 'pylsl.StreamInlet', 'StreamInlet', (['self.eeg[0]'], {}), '(self.eeg[0])\n', (3690, 3703), False, 'from pylsl import StreamInlet, resolve_stream\n'), ((5817, 5849), 'numpy.load', 'np.load', (["('users/data/' + mi_file)"], {}), "('users/data/' + mi_file)\n", (5824, 5849), True, 'import numpy as np\n'), ((5958, 5969), 'time.time', 'time.time', ([], {}), '()\n', (5967, 5969), False, 'import time\n'), ((6407, 6434), 'numpy.save', 'np.save', (['fft_name', 'self.fft'], {}), '(fft_name, self.fft)\n', (6414, 6434), True, 'import numpy as np\n'), ((8793, 8829), 'numpy.array', 'np.array', (['data[index:index + length]'], {}), '(data[index:index + length])\n', (8801, 8829), True, 'import numpy as np\n'), ((12351, 12386), 'models.KNN', 'models.KNN', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3, **kwargs)\n', (12361, 12386), False, 'import models\n'), ((14074, 14110), 'numpy.array', 'np.array', (['data[index:index + length]'], {}), '(data[index:index + length])\n', (14082, 14110), True, 'import numpy as np\n'), ((19314, 19332), 'pandas.DataFrame', 'pd.DataFrame', (['good'], {}), '(good)\n', (19326, 19332), True, 'import pandas as pd\n'), ((5340, 5364), 'os.listdir', 'os.listdir', (['"""users/data"""'], {}), "('users/data')\n", (5350, 5364), False, 'import os\n'), ((5430, 5454), 'os.listdir', 'os.listdir', (['"""users/data"""'], {}), "('users/data')\n", (5440, 5454), False, 'import os\n'), ((9193, 9209), 'numpy.array', 'np.array', (['data_o'], {}), '(data_o)\n', (9201, 9209), True, 'import numpy as np\n'), ((12444, 12456), 'models.LDA', 'models.LDA', ([], {}), '()\n', (12454, 12456), False, 'import models\n'), ((13064, 13090), 'numpy.array', 'np.array', (['self.eeg_dataset'], {}), '(self.eeg_dataset)\n', (13072, 13090), True, 'import numpy as np\n'), ((13248, 13271), 'numpy.array', 'np.array', (['self.filtered'], {}), '(self.filtered)\n', (13256, 13271), True, 'import numpy as np\n'), ((13437, 13455), 'numpy.array', 'np.array', (['self.fft'], {}), '(self.fft)\n', (13445, 13455), True, 'import numpy as np\n'), ((4010, 4086), 'numpy.array', 'np.array', (['[item[:-3] for item in self.filtered[-1:-1 - self.data_length:-1]]'], {}), '([item[:-3] for item in self.filtered[-1:-1 - self.data_length:-1]])\n', (4018, 4086), True, 'import numpy as np\n'), ((12514, 12534), 'models.SVM', 'models.SVM', ([], {}), '(**kwargs)\n', (12524, 12534), False, 'import models\n'), ((12592, 12612), 'models.ANN', 'models.ANN', ([], {}), '(**kwargs)\n', (12602, 12612), False, 'import models\n'), ((12978, 12994), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (12992, 12994), False, 'from datetime import datetime\n'), ((12670, 12690), 'models.RNN', 'models.RNN', ([], {}), '(**kwargs)\n', (12680, 12690), False, 'import models\n'), ((6164, 6175), 'time.time', 'time.time', ([], {}), '()\n', (6173, 6175), False, 'import time\n'), ((12748, 12784), 'models.CNN2', 'models.CNN2', ([], {'transfer': '(True)'}), '(transfer=True, **kwargs)\n', (12759, 12784), False, 'import models\n'), ((12899, 12924), 'models.KNN', 'models.KNN', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3)\n', (12909, 12924), False, 'import models\n'), ((5082, 5095), 'numpy.fft.fft', 'np.fft.fft', (['n'], {}), '(n)\n', (5092, 5095), True, 'import numpy as np\n')]
|
import itertools
from .database import Database
class Connection(object):
_CONNECTION_ID = itertools.count()
def __init__(self, host = None, port = None, max_pool_size = 10,
network_timeout = None, document_class = dict,
tz_aware = False, _connect = True, **kwargs):
super(Connection, self).__init__()
self.host = host
self.port = port
self._databases = {}
self._id = next(self._CONNECTION_ID)
self.document_class = document_class
def __getitem__(self, db_name):
db = self._databases.get(db_name, None)
if db is None:
db = self._databases[db_name] = Database(self, db_name)
return db
def __getattr__(self, attr):
return self[attr]
def __repr__(self):
identifier = []
host = getattr(self,'host','')
port = getattr(self,'port',None)
if host is not None:
identifier = ["'{0}'".format(host)]
if port is not None:
identifier.append(str(port))
return "mongomock.Connection({0})".format(', '.join(identifier))
def server_info(self):
return {
"version" : "2.0.6",
"sysInfo" : "Mock",
"versionArray" : [
2,
0,
6,
0
],
"bits" : 64,
"debug" : False,
"maxBsonObjectSize" : 16777216,
"ok" : 1
}
def database_names(self):
return list(self._databases.keys())
#Connection is now depricated, it's called MongoClient instead
class MongoClient(Connection):
def stub(self):
pass
|
[
"itertools.count"
] |
[((97, 114), 'itertools.count', 'itertools.count', ([], {}), '()\n', (112, 114), False, 'import itertools\n')]
|
"""
Tests brusselator
"""
import numpy as np
from pymgrit.brusselator.brusselator import Brusselator
from pymgrit.brusselator.brusselator import VectorBrusselator
def test_brusselator_constructor():
"""
Test constructor
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
np.testing.assert_equal(brusselator.a, 1)
np.testing.assert_equal(brusselator.b, 3)
np.testing.assert_equal(True, isinstance(brusselator.vector_template, VectorBrusselator))
np.testing.assert_equal(True, isinstance(brusselator.vector_t_start, VectorBrusselator))
np.testing.assert_equal(brusselator.vector_t_start.get_values(), np.array([0, 1]))
def test_brusselator_step():
"""
Test step()
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
brusselator_res = brusselator.step(u_start=VectorBrusselator(), t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(brusselator_res.get_values(), np.array([0.08240173, 0.01319825]))
def test_vector_brusselator_constructor():
"""
Test constructor
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.value[0], 0)
np.testing.assert_equal(vector_brusselator.value[1], 0)
def test_vector_brusselator_add():
"""
Test __add__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_1 + vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, 3 * np.ones(2))
vector_brusselator_res += vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, 4 * np.ones(2))
def test_vector_brusselator_sub():
"""
Test __sub__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_2 - vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2))
vector_brusselator_res -= vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, -np.ones(2))
def test_vector_brusselator_mul():
"""
Test __mul__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_res = vector_brusselator_1 * 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*2)
vector_brusselator_res = 3 * vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*3)
vector_brusselator_res *= 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*6)
def test_vector_brusselator_norm():
"""
Test norm()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.value = np.array([1, 2])
np.testing.assert_equal(np.linalg.norm(np.array([1, 2])), vector_brusselator.norm())
def test_vector_brusselator_clone_zero():
"""
Test clone_zero()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_zero()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
np.testing.assert_equal(vector_brusselator_clone.value, np.zeros(2))
def test_vector_brusselator_clone_rand():
"""
Test clone_rand()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_rand()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
def test_vector_brusselator_set_values():
"""
Test the set_values()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.set_values(np.array([1, 2]))
np.testing.assert_equal(vector_brusselator.value, np.array([1, 2]))
def test_vector_brusselator_get_values():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.get_values(), np.zeros(2))
def test_vector_brusselator_plot_solution():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.plot_solution(), None)
|
[
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_equal",
"pymgrit.brusselator.brusselator.VectorBrusselator",
"pymgrit.brusselator.brusselator.Brusselator"
] |
[((257, 296), 'pymgrit.brusselator.brusselator.Brusselator', 'Brusselator', ([], {'t_start': '(0)', 't_stop': '(1)', 'nt': '(11)'}), '(t_start=0, t_stop=1, nt=11)\n', (268, 296), False, 'from pymgrit.brusselator.brusselator import Brusselator\n'), ((302, 343), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['brusselator.a', '(1)'], {}), '(brusselator.a, 1)\n', (325, 343), True, 'import numpy as np\n'), ((348, 389), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['brusselator.b', '(3)'], {}), '(brusselator.b, 3)\n', (371, 389), True, 'import numpy as np\n'), ((746, 785), 'pymgrit.brusselator.brusselator.Brusselator', 'Brusselator', ([], {'t_start': '(0)', 't_stop': '(1)', 'nt': '(11)'}), '(t_start=0, t_stop=1, nt=11)\n', (757, 785), False, 'from pymgrit.brusselator.brusselator import Brusselator\n'), ((1086, 1105), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1103, 1105), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1110, 1165), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['vector_brusselator.value[0]', '(0)'], {}), '(vector_brusselator.value[0], 0)\n', (1133, 1165), True, 'import numpy as np\n'), ((1170, 1225), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['vector_brusselator.value[1]', '(0)'], {}), '(vector_brusselator.value[1], 0)\n', (1193, 1225), True, 'import numpy as np\n'), ((1323, 1342), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1340, 1342), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1376, 1386), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1383, 1386), True, 'import numpy as np\n'), ((1414, 1433), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1431, 1433), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1853, 1872), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1870, 1872), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1906, 1916), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1913, 1916), True, 'import numpy as np\n'), ((1944, 1963), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (1961, 1963), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2375, 2394), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (2392, 2394), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2428, 2438), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2435, 2438), True, 'import numpy as np\n'), ((2892, 2911), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (2909, 2911), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((2943, 2959), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2951, 2959), True, 'import numpy as np\n'), ((3156, 3175), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3173, 3175), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((3513, 3532), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3530, 3532), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((3800, 3819), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (3817, 3819), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((4051, 4070), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (4068, 4070), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((4255, 4274), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (4272, 4274), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((647, 663), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (655, 663), True, 'import numpy as np\n'), ((943, 977), 'numpy.array', 'np.array', (['[0.08240173, 0.01319825]'], {}), '([0.08240173, 0.01319825])\n', (951, 977), True, 'import numpy as np\n'), ((1471, 1481), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1478, 1481), True, 'import numpy as np\n'), ((2001, 2011), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2008, 2011), True, 'import numpy as np\n'), ((2144, 2154), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2151, 2154), True, 'import numpy as np\n'), ((3393, 3404), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3401, 3404), True, 'import numpy as np\n'), ((3854, 3870), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3862, 3870), True, 'import numpy as np\n'), ((3926, 3942), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3934, 3942), True, 'import numpy as np\n'), ((4132, 4143), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4140, 4143), True, 'import numpy as np\n'), ((833, 852), 'pymgrit.brusselator.brusselator.VectorBrusselator', 'VectorBrusselator', ([], {}), '()\n', (850, 852), False, 'from pymgrit.brusselator.brusselator import VectorBrusselator\n'), ((1618, 1628), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1625, 1628), True, 'import numpy as np\n'), ((1744, 1754), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1751, 1754), True, 'import numpy as np\n'), ((2267, 2277), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2274, 2277), True, 'import numpy as np\n'), ((2552, 2562), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2559, 2562), True, 'import numpy as np\n'), ((2679, 2689), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2686, 2689), True, 'import numpy as np\n'), ((2784, 2794), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2791, 2794), True, 'import numpy as np\n'), ((3003, 3019), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3011, 3019), True, 'import numpy as np\n')]
|
from keras import layers
from keras import models
def cnn_model(shape=(80,80,3),dropout=0.5,last_activation='softmax'):
model=models.Sequential()
model.add(layers.Conv2D(64,(3,3),activation='relu',input_shape=shape))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(dropout))
model.add(layers.Dense(128,activation='relu'))
model.add(layers.Dense(4,activation=last_activation))
model.summary()
return 'cnn', model
def dense_model(shape=(80*80,),last_activation='softmax'):
model = models.Sequential()
model.add(layers.Dense(128, activation='relu',input_shape=shape))
model.add(layers.Dense(4, activation=last_activation))
model.summary()
return 'simple_dense',model
def inception_model():
pass
|
[
"keras.layers.Dropout",
"keras.layers.MaxPool2D",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential"
] |
[((143, 162), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (160, 162), False, 'from keras import models\n'), ((787, 806), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (804, 806), False, 'from keras import models\n'), ((178, 241), 'keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'shape'}), "(64, (3, 3), activation='relu', input_shape=shape)\n", (191, 241), False, 'from keras import layers\n'), ((254, 278), 'keras.layers.MaxPool2D', 'layers.MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (270, 278), False, 'from keras import layers\n'), ((294, 338), 'keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (307, 338), False, 'from keras import layers\n'), ((352, 376), 'keras.layers.MaxPool2D', 'layers.MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (368, 376), False, 'from keras import layers\n'), ((392, 437), 'keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (405, 437), False, 'from keras import layers\n'), ((454, 478), 'keras.layers.MaxPool2D', 'layers.MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (470, 478), False, 'from keras import layers\n'), ((495, 511), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (509, 511), False, 'from keras import layers\n'), ((528, 551), 'keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), '(dropout)\n', (542, 551), False, 'from keras import layers\n'), ((568, 604), 'keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (580, 604), False, 'from keras import layers\n'), ((620, 663), 'keras.layers.Dense', 'layers.Dense', (['(4)'], {'activation': 'last_activation'}), '(4, activation=last_activation)\n', (632, 663), False, 'from keras import layers\n'), ((822, 877), 'keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""', 'input_shape': 'shape'}), "(128, activation='relu', input_shape=shape)\n", (834, 877), False, 'from keras import layers\n'), ((893, 936), 'keras.layers.Dense', 'layers.Dense', (['(4)'], {'activation': 'last_activation'}), '(4, activation=last_activation)\n', (905, 936), False, 'from keras import layers\n')]
|
import govee_api.device as dev
import abc
class _AbstractGoveeDeviceFactory(abc.ABC):
""" Declare an interface for operations that create abstract Govee devices """
@abc.abstractmethod
def build(self, govee, identifier, topic, sku, name, connected):
""" Build Govee device """
pass
class _GoveeBulbFactory(_AbstractGoveeDeviceFactory):
""" Implement the operations to build Govee bulb devices """
def build(self, govee, identifier, topic, sku, name, connected):
if sku == 'H6085':
return dev.GoveeWhiteBulb(govee, identifier, topic, sku, name, connected)
else:
return dev.GoveeBulb(govee, identifier, topic, sku, name, connected)
class _GoveeLedStripFactory(_AbstractGoveeDeviceFactory):
""" Implement the operations to build Govee LED strip devices """
def build(self, govee, identifier, topic, sku, name, connected):
return dev.GoveeLedStrip(govee, identifier, topic, sku, name, connected)
#class _GoveeStringLightFactory(_AbstractGoveeDeviceFactory):
# """ Implement the operations to build Govee string light devices """
# def build(self, govee, identifier, topic, sku, name, connected):
# if sku == 'H7022':
# return dev.H7022GoveeStringLight(govee, identifier, topic, sku, name, connected)
# return None
|
[
"govee_api.device.GoveeWhiteBulb",
"govee_api.device.GoveeBulb",
"govee_api.device.GoveeLedStrip"
] |
[((928, 993), 'govee_api.device.GoveeLedStrip', 'dev.GoveeLedStrip', (['govee', 'identifier', 'topic', 'sku', 'name', 'connected'], {}), '(govee, identifier, topic, sku, name, connected)\n', (945, 993), True, 'import govee_api.device as dev\n'), ((551, 617), 'govee_api.device.GoveeWhiteBulb', 'dev.GoveeWhiteBulb', (['govee', 'identifier', 'topic', 'sku', 'name', 'connected'], {}), '(govee, identifier, topic, sku, name, connected)\n', (569, 617), True, 'import govee_api.device as dev\n'), ((651, 712), 'govee_api.device.GoveeBulb', 'dev.GoveeBulb', (['govee', 'identifier', 'topic', 'sku', 'name', 'connected'], {}), '(govee, identifier, topic, sku, name, connected)\n', (664, 712), True, 'import govee_api.device as dev\n')]
|
import subprocess
import pytest
def test_cli_meta():
assert subprocess.call(["pytest-check-links", "--version"]) == 0
assert subprocess.call(["pytest-check-links", "--help"]) == 0
@pytest.mark.parametrize("example,rc,expected,unexpected", [
["httpbin.md", 0, [" 6 passed"], [" failed"]],
["rst.rst", 1, [" 2 failed", " 7 passed"], [" warning"]]
])
def test_cli_pass(testdir, example, rc, expected, unexpected):
testdir.copy_example(example)
testdir.copy_example("setup.cfg")
proc = subprocess.Popen(["pytest-check-links"], stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
summary = stdout.decode('utf-8').strip().splitlines()[-1]
assert rc == proc.returncode
for ex in expected:
assert ex in summary, stdout.decode('utf-8')
for unex in unexpected:
assert unex not in summary, stdout.decode('utf-8')
|
[
"pytest.mark.parametrize",
"subprocess.Popen",
"subprocess.call"
] |
[((193, 365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""example,rc,expected,unexpected"""', "[['httpbin.md', 0, [' 6 passed'], [' failed']], ['rst.rst', 1, [' 2 failed',\n ' 7 passed'], [' warning']]]"], {}), "('example,rc,expected,unexpected', [['httpbin.md', 0,\n [' 6 passed'], [' failed']], ['rst.rst', 1, [' 2 failed', ' 7 passed'],\n [' warning']]])\n", (216, 365), False, 'import pytest\n'), ((514, 578), 'subprocess.Popen', 'subprocess.Popen', (["['pytest-check-links']"], {'stdout': 'subprocess.PIPE'}), "(['pytest-check-links'], stdout=subprocess.PIPE)\n", (530, 578), False, 'import subprocess\n'), ((66, 118), 'subprocess.call', 'subprocess.call', (["['pytest-check-links', '--version']"], {}), "(['pytest-check-links', '--version'])\n", (81, 118), False, 'import subprocess\n'), ((135, 184), 'subprocess.call', 'subprocess.call', (["['pytest-check-links', '--help']"], {}), "(['pytest-check-links', '--help'])\n", (150, 184), False, 'import subprocess\n')]
|
#!/usr/bin/env python
import numpy as np
import mixem
from mixem.distribution import MultivariateNormalDistribution
def generate_data():
dist_params = [
(np.array([4]), np.diag([1])),
(np.array([1]), np.diag([0.5]))
]
weights = [0.3, 0.7]
n_data = 5000
data = np.zeros((n_data, 1))
for i in range(n_data):
dpi = np.random.choice(range(len(dist_params)), p=weights)
dp = dist_params[dpi]
data[i] = np.random.multivariate_normal(dp[0], dp[1])
return data
def recover(data):
mu = np.mean(data)
sigma = np.var(data)
init_params = [
(np.array([mu + 0.1]), np.diag([sigma])),
(np.array([mu - 0.1]), np.diag([sigma]))
]
weight, distributions, ll = mixem.em(data, [MultivariateNormalDistribution(mu, sigma) for mu, sigma in init_params])
print(weight, distributions, ll)
if __name__ == '__main__':
data = generate_data()
recover(data)
|
[
"numpy.zeros",
"mixem.distribution.MultivariateNormalDistribution",
"numpy.mean",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.diag",
"numpy.var"
] |
[((301, 322), 'numpy.zeros', 'np.zeros', (['(n_data, 1)'], {}), '((n_data, 1))\n', (309, 322), True, 'import numpy as np\n'), ((558, 571), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (565, 571), True, 'import numpy as np\n'), ((584, 596), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (590, 596), True, 'import numpy as np\n'), ((466, 509), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['dp[0]', 'dp[1]'], {}), '(dp[0], dp[1])\n', (495, 509), True, 'import numpy as np\n'), ((169, 182), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (177, 182), True, 'import numpy as np\n'), ((184, 196), 'numpy.diag', 'np.diag', (['[1]'], {}), '([1])\n', (191, 196), True, 'import numpy as np\n'), ((208, 221), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (216, 221), True, 'import numpy as np\n'), ((223, 237), 'numpy.diag', 'np.diag', (['[0.5]'], {}), '([0.5])\n', (230, 237), True, 'import numpy as np\n'), ((627, 647), 'numpy.array', 'np.array', (['[mu + 0.1]'], {}), '([mu + 0.1])\n', (635, 647), True, 'import numpy as np\n'), ((649, 665), 'numpy.diag', 'np.diag', (['[sigma]'], {}), '([sigma])\n', (656, 665), True, 'import numpy as np\n'), ((677, 697), 'numpy.array', 'np.array', (['[mu - 0.1]'], {}), '([mu - 0.1])\n', (685, 697), True, 'import numpy as np\n'), ((699, 715), 'numpy.diag', 'np.diag', (['[sigma]'], {}), '([sigma])\n', (706, 715), True, 'import numpy as np\n'), ((772, 813), 'mixem.distribution.MultivariateNormalDistribution', 'MultivariateNormalDistribution', (['mu', 'sigma'], {}), '(mu, sigma)\n', (802, 813), False, 'from mixem.distribution import MultivariateNormalDistribution\n')]
|
from sqlalchemy.sql.expression import null, text
from sqlalchemy.sql.sqltypes import TIMESTAMP
from .database import Base
from sqlalchemy import Column, Integer, String, Boolean
class Post(Base):
__tablename__ = "user_posts"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String, nullable=False)
content = Column(String, nullable=False)
published = Column(Boolean, nullable=True, server_default="TRUE")
created_at = Column(TIMESTAMP(timezone=True),
nullable=False, server_default=text("now()"))
|
[
"sqlalchemy.sql.expression.text",
"sqlalchemy.sql.sqltypes.TIMESTAMP",
"sqlalchemy.Column"
] |
[((240, 289), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'nullable': '(False)'}), '(Integer, primary_key=True, nullable=False)\n', (246, 289), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((302, 332), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (308, 332), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((347, 377), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (353, 377), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((394, 447), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(True)', 'server_default': '"""TRUE"""'}), "(Boolean, nullable=True, server_default='TRUE')\n", (400, 447), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((472, 496), 'sqlalchemy.sql.sqltypes.TIMESTAMP', 'TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (481, 496), False, 'from sqlalchemy.sql.sqltypes import TIMESTAMP\n'), ((553, 566), 'sqlalchemy.sql.expression.text', 'text', (['"""now()"""'], {}), "('now()')\n", (557, 566), False, 'from sqlalchemy.sql.expression import null, text\n')]
|
import struct
from pymaginopolis.chunkyfile import model as model
from pymaginopolis.chunkyfile.model import Endianness, CharacterSet
GRPB_HEADER_SIZE = 20
CHARACTER_SETS = {
model.CharacterSet.ANSI: "latin1",
model.CharacterSet.UTF16LE: "utf-16le"
}
def get_string_size_format(characterset):
# FUTURE: big endian
if characterset == model.CharacterSet.UTF16BE or characterset == model.CharacterSet.UTF16LE:
return "H", 2, 2
else:
return "B", 1, 1
def parse_pascal_string_with_encoding(data):
"""
Read a character set followed by a pascal string
:param data:
:return: tuple containing string, number of bytes consumed and characterset
"""
# Read character set
character_set = struct.unpack("<H", data[0:2])[0]
character_set = model.CharacterSet(character_set)
chunk_name, string_size = parse_pascal_string(character_set, data[2:])
return chunk_name, string_size + 2, character_set
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size
def generate_pascal_string(characterset, value):
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
encoded_string = value.encode(CHARACTER_SETS[characterset])
return struct.pack("<" + string_size_format, len(value)) + encoded_string
class FileParseException(Exception):
""" Raised if a problem is found with the chunky file. """
pass
def check_size(expected, actual, desc):
""" Raise an exception if this part of the file is truncated """
if actual < expected:
raise FileParseException("%s truncated: expected 0x%x, got 0x%x" % (desc, expected, actual))
def parse_u24le(data):
""" Parse a 24-bit little endian number """
return data[0] | (data[1] << 8) | (data[2] << 16)
def parse_endianness_and_characterset(data):
check_size(4, len(data), "Endianness/characterset")
endianness, characterset = struct.unpack("<2H", data)
endianness = model.Endianness(endianness)
characterset = model.CharacterSet(characterset)
return endianness, characterset,
def tag_bytes_to_string(tag):
"""
Convert the raw bytes for a tag into a string
:param tag: bytes (eg. b'\x50\x4d\x42\x4d')
:return: tag (eg. "MBMP")
"""
return tag[::-1].decode("ansi").rstrip("\x00")
def parse_grpb_list(data):
"""
Parse a GRPB chunk
:param data: GRPB chunk
:return: tuple containing endianness, characterset, index entry size, item index and item heap
"""
endianness, characterset, index_entry_size, number_of_entries, heap_size, unk1 = struct.unpack("<2H4I", data[
0:GRPB_HEADER_SIZE])
endianness = Endianness(endianness)
characterset = CharacterSet(characterset)
# TODO: figure out what this is
if unk1 != 0xFFFFFFFF:
raise NotImplementedError("can't parse this GRPB because unknown1 isn't 0xFFFFFFFF")
# Read heap
heap = data[GRPB_HEADER_SIZE:GRPB_HEADER_SIZE + heap_size]
# Read index
index_size = index_entry_size * number_of_entries
index_data = data[GRPB_HEADER_SIZE + heap_size:GRPB_HEADER_SIZE + heap_size + index_size]
index_items = [index_data[i * index_entry_size:(i + 1) * index_entry_size] for i in range(0, number_of_entries)]
return endianness, characterset, index_entry_size, index_items, heap
|
[
"pymaginopolis.chunkyfile.model.Endianness",
"struct.unpack",
"pymaginopolis.chunkyfile.model.CharacterSet"
] |
[((800, 833), 'pymaginopolis.chunkyfile.model.CharacterSet', 'model.CharacterSet', (['character_set'], {}), '(character_set)\n', (818, 833), True, 'from pymaginopolis.chunkyfile import model as model\n'), ((2672, 2698), 'struct.unpack', 'struct.unpack', (['"""<2H"""', 'data'], {}), "('<2H', data)\n", (2685, 2698), False, 'import struct\n'), ((2716, 2744), 'pymaginopolis.chunkyfile.model.Endianness', 'model.Endianness', (['endianness'], {}), '(endianness)\n', (2732, 2744), True, 'from pymaginopolis.chunkyfile import model as model\n'), ((2764, 2796), 'pymaginopolis.chunkyfile.model.CharacterSet', 'model.CharacterSet', (['characterset'], {}), '(characterset)\n', (2782, 2796), True, 'from pymaginopolis.chunkyfile import model as model\n'), ((3342, 3390), 'struct.unpack', 'struct.unpack', (['"""<2H4I"""', 'data[0:GRPB_HEADER_SIZE]'], {}), "('<2H4I', data[0:GRPB_HEADER_SIZE])\n", (3355, 3390), False, 'import struct\n'), ((3517, 3539), 'pymaginopolis.chunkyfile.model.Endianness', 'Endianness', (['endianness'], {}), '(endianness)\n', (3527, 3539), False, 'from pymaginopolis.chunkyfile.model import Endianness, CharacterSet\n'), ((3559, 3585), 'pymaginopolis.chunkyfile.model.CharacterSet', 'CharacterSet', (['characterset'], {}), '(characterset)\n', (3571, 3585), False, 'from pymaginopolis.chunkyfile.model import Endianness, CharacterSet\n'), ((746, 776), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'data[0:2]'], {}), "('<H', data[0:2])\n", (759, 776), False, 'import struct\n'), ((1474, 1539), 'struct.unpack', 'struct.unpack', (["('<' + string_size_format)", 'data[0:string_size_size]'], {}), "('<' + string_size_format, data[0:string_size_size])\n", (1487, 1539), False, 'import struct\n')]
|
#
# This file is part of pyasn1-alt-modules software.
#
# Created by <NAME> with assistance from asn1ate v.0.6.0.
# Modified by <NAME> to add maps for use with opentypes.
# Modified by <NAME> to include the opentypemap manager.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# Certificate Extension for CMS Content Constraints (CCC)
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc6010.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import univ
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import opentypemap
certificateExtensionsMap = opentypemap.get('certificateExtensionsMap')
MAX = float('inf')
AttributeType = rfc5280.AttributeType
AttributeValue = rfc5280.AttributeValue
id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0')
class AttrConstraint(univ.Sequence):
pass
AttrConstraint.componentType = namedtype.NamedTypes(
namedtype.NamedType('attrType', AttributeType()),
namedtype.NamedType('attrValues', univ.SetOf(
componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class AttrConstraintList(univ.SequenceOf):
pass
AttrConstraintList.componentType = AttrConstraint()
AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class ContentTypeGeneration(univ.Enumerated):
pass
ContentTypeGeneration.namedValues = namedval.NamedValues(
('canSource', 0),
('cannotSource', 1)
)
class ContentTypeConstraint(univ.Sequence):
pass
ContentTypeConstraint.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')),
namedtype.OptionalNamedType('attrConstraints', AttrConstraintList())
)
# CMS Content Constraints (CCC) Extension and Object Identifier
id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18')
class CMSContentConstraints(univ.SequenceOf):
pass
CMSContentConstraints.componentType = ContentTypeConstraint()
CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
# Map of Certificate Extension OIDs to Extensions
# To be added to the ones that are in rfc5280.py
_certificateExtensionsMapUpdate = {
id_pe_cmsContentConstraints: CMSContentConstraints(),
}
certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
|
[
"pyasn1.type.namedval.NamedValues",
"pyasn1_alt_modules.opentypemap.get",
"pyasn1.type.constraint.ValueSizeConstraint",
"pyasn1.type.univ.ObjectIdentifier"
] |
[((712, 755), 'pyasn1_alt_modules.opentypemap.get', 'opentypemap.get', (['"""certificateExtensionsMap"""'], {}), "('certificateExtensionsMap')\n", (727, 755), False, 'from pyasn1_alt_modules import opentypemap\n'), ((882, 932), 'pyasn1.type.univ.ObjectIdentifier', 'univ.ObjectIdentifier', (['"""1.2.840.113549.1.9.16.1.0"""'], {}), "('1.2.840.113549.1.9.16.1.0')\n", (903, 932), False, 'from pyasn1.type import univ\n'), ((1380, 1418), 'pyasn1.type.constraint.ValueSizeConstraint', 'constraint.ValueSizeConstraint', (['(1)', 'MAX'], {}), '(1, MAX)\n', (1410, 1418), False, 'from pyasn1.type import constraint\n'), ((1513, 1572), 'pyasn1.type.namedval.NamedValues', 'namedval.NamedValues', (["('canSource', 0)", "('cannotSource', 1)"], {}), "(('canSource', 0), ('cannotSource', 1))\n", (1533, 1572), False, 'from pyasn1.type import namedval\n'), ((2035, 2078), 'pyasn1.type.univ.ObjectIdentifier', 'univ.ObjectIdentifier', (['"""1.3.6.1.5.5.7.1.18"""'], {}), "('1.3.6.1.5.5.7.1.18')\n", (2056, 2078), False, 'from pyasn1.type import univ\n'), ((2232, 2270), 'pyasn1.type.constraint.ValueSizeConstraint', 'constraint.ValueSizeConstraint', (['(1)', 'MAX'], {}), '(1, MAX)\n', (2262, 2270), False, 'from pyasn1.type import constraint\n'), ((1738, 1761), 'pyasn1.type.univ.ObjectIdentifier', 'univ.ObjectIdentifier', ([], {}), '()\n', (1759, 1761), False, 'from pyasn1.type import univ\n'), ((1199, 1237), 'pyasn1.type.constraint.ValueSizeConstraint', 'constraint.ValueSizeConstraint', (['(1)', 'MAX'], {}), '(1, MAX)\n', (1229, 1237), False, 'from pyasn1.type import constraint\n')]
|
import unittest
import numpy as np
from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues
class TestFilterObject(unittest.TestCase):
def test_gaussian2(self):
std = -0.1339048038303071
mean = -0.1339048038303071
x = 150.10086283379565
temp = self.gaussian_nb.get_gaussian(x, mean, std)
self.assertAlmostEqual(temp, 0)
def test_prior(self):
y = np.array([1] * 10 + [0] * 5)
self.gaussian_nb.set_unique_y(y)
self.gaussian_nb.set_prior(y)
prior = self.gaussian_nb.prior
self.assertAlmostEqual(prior[0], 0.33, places=2)
self.assertAlmostEqual(prior[1], 0.67, places=2)
def test_means_standard_deviations(self):
x = [[0, 0],
[0, 0],
[1, -1],
[1, 0],
[1, 0],
[2, 3]]
y = [0, 0, 0, 1, 1, 1]
x = np.array(x)
y = np.array(y)
self.gaussian_nb.fit(x, y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_means_standard_deviations_with_nan(self):
self.gaussian_nb.fit(self.x, self.y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_likelihoods(self):
self.gaussian_nb.fit(self.x, self.y)
x_in = np.array([1, 0])
likelihoods = self.gaussian_nb.get_all_likelihoods(x_in)
self.assertNotAlmostEqual(likelihoods[0], 0, places=2)
self.assertNotAlmostEqual(likelihoods[0], 1, places=2)
self.assertNotAlmostEqual(likelihoods[1], 0, places=2)
self.assertNotAlmostEqual(likelihoods[1], 1, places=2)
def setUp(self):
self.x = [[0, np.nan], # 0
[np.nan, 0], # 0
[0, 0], # 0
[1, -1], # 0
[1, np.nan], # 1
[np.nan, 0], # 1
[1, 0], # 1
[2, 3]] # 1
self.y = [0, 0, 0, 0, 1, 1, 1, 1]
self.y_uniq = [0, 1]
self.x = np.array(self.x)
self.y = np.array(self.y)
self.y_uniq = np.array(self.y_uniq)
self.gaussian_nb = GaussianNBWithMissingValues()
def test_predict_proba(self):
clf = GaussianNBWithMissingValues()
clf.fit(self.x, self.y)
x_in = np.array([[1, 0]])
proba = clf.predict_proba(x_in)
self.assertNotAlmostEqual(proba[0][0], 0, places=2)
self.assertNotAlmostEqual(proba[0][0], 1, places=2)
self.assertNotAlmostEqual(proba[0][1], 0, places=2)
self.assertNotAlmostEqual(proba[0][1], 1, places=2)
self.assertGreater(proba[0][1], proba[0][0])
def test_gaussian(self):
gaussian = self.gaussian_nb.get_gaussian(0.441, 1, 0.447213595)
self.assertAlmostEqual(gaussian, 0.40842, places=2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues",
"numpy.array"
] |
[((4045, 4060), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4058, 4060), False, 'import unittest\n'), ((450, 478), 'numpy.array', 'np.array', (['([1] * 10 + [0] * 5)'], {}), '([1] * 10 + [0] * 5)\n', (458, 478), True, 'import numpy as np\n'), ((928, 939), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (936, 939), True, 'import numpy as np\n'), ((952, 963), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (960, 963), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2515, 2523), True, 'import numpy as np\n'), ((3220, 3236), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (3228, 3236), True, 'import numpy as np\n'), ((3254, 3270), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (3262, 3270), True, 'import numpy as np\n'), ((3293, 3314), 'numpy.array', 'np.array', (['self.y_uniq'], {}), '(self.y_uniq)\n', (3301, 3314), True, 'import numpy as np\n'), ((3342, 3371), 'quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues', 'GaussianNBWithMissingValues', ([], {}), '()\n', (3369, 3371), False, 'from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues\n'), ((3421, 3450), 'quasimodo.assertion_fusion.gaussian_nb_with_missing_values.GaussianNBWithMissingValues', 'GaussianNBWithMissingValues', ([], {}), '()\n', (3448, 3450), False, 'from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues\n'), ((3498, 3516), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (3506, 3516), True, 'import numpy as np\n')]
|
from itertools import cycle
import numpy as np
from scipy import sparse
import h5py
from evaluation import load_nuswide, normalize
rng = np.random.RandomState(1701)
transformer = []
batch_size = 100
def load():
_, label, _, label_name, _, data = load_nuswide('nuswide-decaf.npz', 'train')
data = data.toarray()
data = normalize(data, axis = 1)
label = label.tolil()
return data, label, label_name
def save(fname, I, W):
h5out = h5py.File(fname + '.h5', 'w')
Iset = h5out.create_dataset('I', data = I)
Wset = h5out.create_dataset('W', data = W)
h5out.close()
def projection(X):
norm_X = np.linalg.norm(X, axis = 1)
for i in range(len(norm_X)):
if norm_X[i] > 1:
X[i, :] *= 1. / norm_X[i]
return X
def initialize_word_embeddings(label_name, embed_dim):
import gensim
model = gensim.models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
assert model.syn0.shape[1] == embed_dim
W = []
for name in label_name:
W.append(model[name])
return np.asarray(W)
def train(I, W, data, label, lr_I = 0.001, lr_W = 0.001, maxIter = None):
it = 0
loss = 0
sampleIter = cycle(rng.permutation(label.shape[0]))
universe = set(range(label.shape[1]))
I = projection(I)
W = projection(W)
print('Start training with lr_I {}, lr_W {}, maxIter {}'.format(lr_I, lr_W, maxIter))
while True:
# update
sampleId = sampleIter.next()
feat = np.dot(data[sampleId], I)
# obtain label and vlabel (violate label)
l = label.rows[sampleId]
if len(l) == 0:
continue
vl = list(universe.difference(l))
vllen = len(vl)
delta_feat = np.zeros(feat.shape)
delta_W = np.zeros(W.shape)
for y in l:
score = np.dot(W[y, :], feat)
margin = -1
esN = 0
while margin <= 0 and esN < (vllen - 1):
vy = vl[rng.randint(vllen)]
vscore = np.dot(W[vy, :], feat)
margin = vscore - score + 1
esN += 1
if margin > 0:
rank = transformer[(vllen - 1) / esN]
loss += rank * margin
# gradient
delta_feat += (W[y, :] - W[vy, :]) * rank
temp = feat * rank
delta_W[y, :] += temp
delta_W[vy, :] -= temp
I += np.tensordot(data[sampleId], delta_feat, axes = 0) * (lr_I / len(l))
W += delta_W * (lr_W / len(l))
if lr_I > 0.:
I = projection(I)
if lr_W > 0.:
W = projection(W)
it += 1
if maxIter is not None and it == maxIter:
print('Finished training at iteration {} with loss: {}'.format(it, loss / ((it - 1) % batch_size + 1)))
break
if it % batch_size == 0:
print('\titer: {}\tloss: {}'.format(it, loss / batch_size))
loss = 0
# save
if it % label.shape[0] == 0:
print('saving model...')
save('models/wsabie_model_iter_{}'.format(it), I, W)
return I, W
if __name__ == '__main__':
embed_dim = 300
random_init_W = True
# load data
data, label, label_name = load()
print('Data shape: {}'.format(data.shape))
print('Label shape: {}'.format(label.shape))
# initialize transformer
transformer = [0] * (label.shape[1] + 1)
for i in range(label.shape[1]):
transformer[i + 1] = transformer[i] + 1. / (i + 1)
# initialize model
I = rng.rand(data.shape[1], embed_dim).astype(data.dtype)
if random_init_W:
W = rng.rand(label.shape[1], embed_dim).astype(data.dtype)
else:
W = initialize_word_embeddings(label_name, embed_dim)
# train loop
I, W = train(I, W, data, label, lr_I = 0.001, lr_W = 0.00001,
maxIter = 2 * data.shape[0])
# save to hdf5 file
save('models/wsabie_model', I, W)
|
[
"h5py.File",
"gensim.models.Word2Vec.load_word2vec_format",
"numpy.tensordot",
"evaluation.normalize",
"numpy.asarray",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.linalg.norm",
"numpy.dot",
"evaluation.load_nuswide"
] |
[((138, 165), 'numpy.random.RandomState', 'np.random.RandomState', (['(1701)'], {}), '(1701)\n', (159, 165), True, 'import numpy as np\n'), ((250, 292), 'evaluation.load_nuswide', 'load_nuswide', (['"""nuswide-decaf.npz"""', '"""train"""'], {}), "('nuswide-decaf.npz', 'train')\n", (262, 292), False, 'from evaluation import load_nuswide, normalize\n'), ((326, 349), 'evaluation.normalize', 'normalize', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (335, 349), False, 'from evaluation import load_nuswide, normalize\n'), ((443, 472), 'h5py.File', 'h5py.File', (["(fname + '.h5')", '"""w"""'], {}), "(fname + '.h5', 'w')\n", (452, 472), False, 'import h5py\n'), ((610, 635), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (624, 635), True, 'import numpy as np\n'), ((816, 915), 'gensim.models.Word2Vec.load_word2vec_format', 'gensim.models.Word2Vec.load_word2vec_format', (['"""GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "(\n 'GoogleNews-vectors-negative300.bin', binary=True)\n", (859, 915), False, 'import gensim\n'), ((1023, 1036), 'numpy.asarray', 'np.asarray', (['W'], {}), '(W)\n', (1033, 1036), True, 'import numpy as np\n'), ((1429, 1454), 'numpy.dot', 'np.dot', (['data[sampleId]', 'I'], {}), '(data[sampleId], I)\n', (1435, 1454), True, 'import numpy as np\n'), ((1641, 1661), 'numpy.zeros', 'np.zeros', (['feat.shape'], {}), '(feat.shape)\n', (1649, 1661), True, 'import numpy as np\n'), ((1676, 1693), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (1684, 1693), True, 'import numpy as np\n'), ((1724, 1745), 'numpy.dot', 'np.dot', (['W[y, :]', 'feat'], {}), '(W[y, :], feat)\n', (1730, 1745), True, 'import numpy as np\n'), ((2220, 2268), 'numpy.tensordot', 'np.tensordot', (['data[sampleId]', 'delta_feat'], {'axes': '(0)'}), '(data[sampleId], delta_feat, axes=0)\n', (2232, 2268), True, 'import numpy as np\n'), ((1878, 1900), 'numpy.dot', 'np.dot', (['W[vy, :]', 'feat'], {}), '(W[vy, :], feat)\n', (1884, 1900), True, 'import numpy as np\n')]
|
import numpy as np
from ..AShape import AShape, AShape
class TileInfo:
"""
Tile info.
arguments
shape AShape
tiles Iterable of ints
errors during the construction:
ValueError
result:
.o_shape AShape
.axes_slices list of slice() to fetch original shape
from o_shape for each tile
"""
__slots__ = ['o_shape', 'axes_slices']
def __init__(self, shape, tiles):
if len(tiles) != shape.ndim:
raise ValueError(f'tiles should match shape.ndim {shape.ndim}')
self.o_shape = AShape(dim*tiles[i] for i,dim in enumerate(shape))
c = [0]*shape.ndim
axes_offsets = []
for n in range(np.prod(tiles)):
axes_offsets.append( c.copy() )
for i in range(shape.ndim-1,-1,-1):
c[i] += 1
if c[i] < tiles[i]:
break
c[i] = 0
axes_slices = []
for axes_offset in axes_offsets:
sl = []
for axis,tile in enumerate(axes_offset):
axis_size = shape[axis]
sl.append( slice(axis_size*tile, axis_size*(tile+1)) )
axes_slices.append(tuple(sl))
self.axes_slices = tuple(axes_slices)
|
[
"numpy.prod"
] |
[((738, 752), 'numpy.prod', 'np.prod', (['tiles'], {}), '(tiles)\n', (745, 752), True, 'import numpy as np\n')]
|
import random
import pickle
import numpy as np
import torch
M = 2**32 - 1
def init_fn(worker):
seed = torch.LongTensor(1).random_().item()
seed = (seed + worker) % M
np.random.seed(seed)
random.seed(seed)
def add_mask(x, mask, dim=1):
mask = mask.unsqueeze(dim)
shape = list(x.shape); shape[dim] += 21
new_x = x.new(*shape).zero_()
new_x = new_x.scatter_(dim, mask, 1.0)
s = [slice(None)]*len(shape)
s[dim] = slice(21, None)
new_x[s] = x
return new_x
def sample(x, size):
#https://gist.github.com/yoavram/4134617
i = random.sample(range(x.shape[0]), size)
return torch.tensor(x[i], dtype=torch.int16)
#x = np.random.permutation(x)
#return torch.tensor(x[:size])
def pkload(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
_shape = (240, 240, 155)
def get_all_coords(stride):
return torch.tensor(
np.stack([v.reshape(-1) for v in
np.meshgrid(
*[stride//2 + np.arange(0, s, stride) for s in _shape],
indexing='ij')],
-1), dtype=torch.int16)
_zero = torch.tensor([0])
def gen_feats():
x, y, z = 240, 240, 155
feats = np.stack(
np.meshgrid(
np.arange(x), np.arange(y), np.arange(z),
indexing='ij'), -1).astype('float32')
shape = np.array([x, y, z])
feats -= shape/2.0
feats /= shape
return feats
|
[
"numpy.random.seed",
"torch.LongTensor",
"pickle.load",
"numpy.array",
"random.seed",
"numpy.arange",
"torch.tensor"
] |
[((1122, 1139), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (1134, 1139), False, 'import torch\n'), ((180, 200), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (194, 200), True, 'import numpy as np\n'), ((205, 222), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (216, 222), False, 'import random\n'), ((627, 664), 'torch.tensor', 'torch.tensor', (['x[i]'], {'dtype': 'torch.int16'}), '(x[i], dtype=torch.int16)\n', (639, 664), False, 'import torch\n'), ((1357, 1376), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1365, 1376), True, 'import numpy as np\n'), ((802, 816), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (813, 816), False, 'import pickle\n'), ((108, 127), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (124, 127), False, 'import torch\n'), ((1249, 1261), 'numpy.arange', 'np.arange', (['x'], {}), '(x)\n', (1258, 1261), True, 'import numpy as np\n'), ((1263, 1275), 'numpy.arange', 'np.arange', (['y'], {}), '(y)\n', (1272, 1275), True, 'import numpy as np\n'), ((1277, 1289), 'numpy.arange', 'np.arange', (['z'], {}), '(z)\n', (1286, 1289), True, 'import numpy as np\n'), ((998, 1021), 'numpy.arange', 'np.arange', (['(0)', 's', 'stride'], {}), '(0, s, stride)\n', (1007, 1021), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-07 15:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smartshark', '0005_auto_20160607_1657'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.URLField()),
],
),
migrations.AddField(
model_name='pluginexecution',
name='project',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='smartshark.Project'),
preserve_default=False,
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.AutoField"
] |
[((770, 875), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': 'None', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""smartshark.Project"""'}), "(default=None, on_delete=django.db.models.deletion.CASCADE,\n to='smartshark.Project')\n", (787, 875), False, 'from django.db import migrations, models\n'), ((431, 524), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (447, 524), False, 'from django.db import migrations, models\n'), ((548, 580), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (564, 580), False, 'from django.db import migrations, models\n'), ((607, 624), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (622, 624), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Requires python3 package for influxDB to import InfluxDBClient:
sudo apt install python3-influxdb
API documentation for python InfluxDBClient:
https://influxdb-python.readthedocs.io/en/latest/api-documentation.html#
users = db_client.get_list_users()
print (users)
API documentation for influxdb
https://docs.influxdata.com/influxdb/v1.4/introduction/
conntect to command line interface of influxdb:
influx
> show users
> show databases
> use <DATABASE>
> show series
'''
import time
import json
import sys
import argparse
from influxdb import InfluxDBClient
def main():
parser = argparse.ArgumentParser(
description='''This application reads json from std in,
and pushes received data to an InfluxDB''',
)
parser.add_argument(
'--db_name',
type=str,
default='',
help='the name of the InfluxDB data is written to',
)
parser.add_argument(
'--db_host',
type=str,
default='localhost',
help='the host providing the influxdb',
)
parser.add_argument(
'--db_port',
type=int,
default=8086,
help='the port of the influxdb',
)
parser.add_argument(
'--db_user',
type=str,
default='',
help='the user allowed to write to the InfluxDB',
)
parser.add_argument(
'--db_password',
type=str,
default='',
help='the passwort for the specified user to access the InfluxDB',
)
parser.add_argument(
'--testing',
action='store_true',
default=False,
help='enabling testing mode without writing to InfluxDB',
)
args = parser.parse_args()
#
# Try to establish a DB connection
#
if args.testing == True:
print('testing mode, no DB used ...', file=sys.stderr)
else:
print('connection to InfluxDB ...', file=sys.stderr)
try:
print('db_host: ' + args.db_host, file=sys.stderr)
print('db_name: ' + args.db_name, file=sys.stderr)
db_client = InfluxDBClient(
args.db_host,
args.db_port,
args.db_user,
args.db_password,
args.db_name
)
except Exception as e:
print(e, file=sys.stderr)
raise OSError('connection to InfluxDB cannot be established!')
print('... connection established', file=sys.stderr)
print('... waiting for incomming data', file=sys.stderr)
#
# read from stdin and write to InfluxDB
#
# json_body = [
# {
# "measurement": "cpu_load_short",
# "tags": {
# "host": "server01",
# "region": "us-west"
# },
# "time": "2009-11-10T23:00:00Z",
# "fields": {
# "Float_value": 0.64,
# "Int_value": 3,
# "String_value": "Text",
# "Bool_value": True
# }
# }
# ]
# {
# "location": "wohnzimmer",
# "sensor": "dht11@0",
# "host": "the-crowsnest",
# "time": "2018-02-18T17:21:46.617120",
# "temperature": "18000",
# "pressure": null,
# "humidity_relative": "45000"
# }
while True:
try:
for line in sys.stdin:
# json.loads is for loading from strings
# json.load is for loading form other resources
data = json.loads(line)
mymeasurement = "roomclimate"
myhost = data['host']
mysensor = data['sensor']
mylocation = data['location']
mytime = data['time']
# sensor readings are pushed to database as they are
# further processing needs to be done by display / grafana
if data['temperature'] != None:
mytemperature = int(data['temperature'])
else:
# catch edge case, if sensor has no temperature
mytemperature = 0
if data['humidity_relative'] != None:
# BME280 Sensor has humidity_relative data as float
# 2019-02-06: nach update hat der BME280 die Luftfeuchtigkeit im passenden Format, *1000 nun nicht mehr erforderlich
# if mysensor == 'bme280':
# myhumidity = int(float(data['humidity_relative'])*1000)
# else:
myhumidity = int(data['humidity_relative'])
else:
# catch edge case, if sensor has no humidity_relative
myhumidity = 0
if data['pressure'] != None:
# BME280 Sensor has pressure data as float
if mysensor == 'bme280':
mypressure = int(float(data['pressure'])*1000)
else:
mypressure = int(data['pressure'])
else:
# catch edge case, if sensor has no pressure
mypressure = 0
json_body = [
{
"measurement": mymeasurement,
"tags": {
"host": myhost,
"sensor": mysensor,
"location": mylocation,
},
"time": mytime,
"fields": {
"temperature": mytemperature,
"humidity_relative": myhumidity,
"pressure": mypressure,
},
}
]
if args.testing == True:
print("measurement:"+ mymeasurement, file=sys.stderr)
print("host:" + myhost, file=sys.stderr)
print("sensor:" + mysensor, file=sys.stderr)
print("location:" + mylocation, file=sys.stderr)
print("time:" + mytime, file=sys.stderr)
print("temperature:"+ str(mytemperature), file=sys.stderr)
print("humidity_relative:"+ str(myhumidity), file=sys.stderr)
print("pressure:" + str(mypressure), file=sys.stderr)
print("\n", file=sys.stderr)
#print(json_body, file=sys.stderr)
else:
db_client.write_points(json_body)
except Exception as e:
print(e, file=sys.stderr)
continue
#
# main function
#
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Shutting down iiopoll2influx.py', file=sys.stderr)
except Exception as e:
print(e)
|
[
"influxdb.InfluxDBClient",
"argparse.ArgumentParser",
"json.loads"
] |
[((639, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This application reads json from std in,\n\t\tand pushes received data to an InfluxDB"""'}), '(description=\n """This application reads json from std in,\n\t\tand pushes received data to an InfluxDB"""\n )\n', (662, 774), False, 'import argparse\n'), ((1850, 1942), 'influxdb.InfluxDBClient', 'InfluxDBClient', (['args.db_host', 'args.db_port', 'args.db_user', 'args.db_password', 'args.db_name'], {}), '(args.db_host, args.db_port, args.db_user, args.db_password,\n args.db_name)\n', (1864, 1942), False, 'from influxdb import InfluxDBClient\n'), ((3059, 3075), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3069, 3075), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
import pytest
from fintopics.data.pipeline.regex import RegexExtractionPipeline
@pytest.fixture()
def regex_pipeline():
"""Creates a RegexExtractionPipeline fixture."""
return RegexExtractionPipeline()
@pytest.mark.asyncio
async def test_header_removal(datadir, regex_pipeline):
"""Tests removal of header."""
header_file = datadir / 'header.txt'
text = header_file.read_text()
actual_text = await regex_pipeline.coroutine(text)
assert actual_text['text'].strip() == ''
@pytest.mark.asyncio
async def test_exhibit_removal(datadir, regex_pipeline):
"""Tests removal of header."""
exhibit_file = datadir / 'exhibit.txt'
text = exhibit_file.read_text()
actual_text = await regex_pipeline.coroutine(text)
assert actual_text['text'].strip() == ''
|
[
"fintopics.data.pipeline.regex.RegexExtractionPipeline",
"pytest.fixture"
] |
[((109, 125), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (123, 125), False, 'import pytest\n'), ((212, 237), 'fintopics.data.pipeline.regex.RegexExtractionPipeline', 'RegexExtractionPipeline', ([], {}), '()\n', (235, 237), False, 'from fintopics.data.pipeline.regex import RegexExtractionPipeline\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-01-05 14:57:15
# @Author : <NAME> (<EMAIL>)
import os
import json
import time
from backend import keras
class TrainingCallbacks(keras.callbacks.Callback):
def __init__(self, task_path='', log_name='training'):
self.task_path = task_path
self.log_name = log_name
def on_train_begin(self, logs):
with open(f'{self.task_path}/{self.log_name}_logs.json', 'w') as f:
f.write('')
# if os.path.exists(f'{self.task_path}/{self.log_name}_logs.json'):
# os.remove(f'{self.task_path}/{self.log_name}_logs.json')
def on_train_end(self, logs):
txt = json.dumps({
'EPOCH': 'Finished',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': logs
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
os.remove(f'{self.task_path}/state.json')
def on_epoch_begin(self, epoch, logs):
txt = json.dumps({
'EPOCH': epoch,
'state': 'Begin',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
def on_epoch_end(self, epoch, logs):
txt = json.dumps({
'EPOCH': epoch,
'state': 'end',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
def on_train_batch_end(self, epoch, logs):
txt = json.dumps({
'batch': epoch,
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'\t{txt}\n')
class EvaluatingCallbacks(keras.callbacks.Callback):
def __init__(self, task_path='', log_name='evaluating'):
self.task_path = task_path
self.log_name = log_name
def on_test_begin(self, logs=None):
print('Evaluating ...')
with open(f'{self.task_path}/{self.log_name}_logs.json', 'w') as f:
f.write('')
# if os.path.exists(f'{self.task_path}/{self.log_name}_logs.json'):
# os.remove(f'{self.task_path}/{self.log_name}_logs.json')
def on_test_end(self, logs=None):
txt = json.dumps({
'EPOCH': 'Finished',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
os.remove(f'{self.task_path}/state.json')
def on_test_batch_end(self, batch, logs=None):
txt = json.dumps({
'batch': batch,
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'\t{txt}\n')
|
[
"os.remove",
"time.localtime"
] |
[((958, 999), 'os.remove', 'os.remove', (['f"""{self.task_path}/state.json"""'], {}), "(f'{self.task_path}/state.json')\n", (967, 999), False, 'import os\n'), ((2837, 2878), 'os.remove', 'os.remove', (['f"""{self.task_path}/state.json"""'], {}), "(f'{self.task_path}/state.json')\n", (2846, 2878), False, 'import os\n'), ((784, 800), 'time.localtime', 'time.localtime', ([], {}), '()\n', (798, 800), False, 'import time\n'), ((1184, 1200), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1198, 1200), False, 'import time\n'), ((1501, 1517), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1515, 1517), False, 'import time\n'), ((1829, 1845), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1843, 1845), False, 'import time\n'), ((2659, 2675), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2673, 2675), False, 'import time\n'), ((3041, 3057), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3055, 3057), False, 'import time\n')]
|
"""Provide the constant elasticity of substitution function."""
import numpy as np
from copulpy.config_copulpy import IS_DEBUG
from copulpy.clsMeta import MetaCls
class CESCls(MetaCls):
"""CES class."""
def __init__(self, alpha, y_weight, discount_factor):
"""Initialize class."""
self.attr = dict()
self.attr['discount_factor'] = discount_factor
self.attr['y_weight'] = y_weight
self.attr['alpha'] = alpha
self._check_attributes()
def evaluate(self, v_1, v_2):
"""Evaluate the CES function."""
self._additional_checks('evaluate_in', v_1, v_2)
y_weight, discount_factor, alpha = self.get_attr('y_weight', 'discount_factor', 'alpha')
rslt = (v_1 ** alpha + y_weight * v_2 ** alpha) ** (1 / alpha)
rslt = discount_factor * rslt
self._additional_checks('evaluate_out')
return rslt
def _check_attributes(self):
"""Check the attributes of the class."""
alpha, y_weights, discount_factors = self.get_attr('alpha', 'y_weight', 'discount_factor')
np.testing.assert_equal(alpha >= 0, True)
np.testing.assert_equal(np.all(y_weights >= 0), True)
np.testing.assert_equal(np.all(discount_factors >= 0), True)
@staticmethod
def _additional_checks(label, *args):
"""Perform some additional checks on selected features of the class instance."""
# We only run these tests during debugging as otherwise the performance deteriorates.
if not IS_DEBUG:
return
if label in ['evaluate_in']:
for var in args:
np.testing.assert_equal(np.all(var >= 0), True)
elif label in ['evaluate_out']:
rslt, = args
np.testing.assert_equal(np.all(0.0 <= rslt), True)
else:
raise NotImplementedError
|
[
"numpy.all",
"numpy.testing.assert_equal"
] |
[((1094, 1135), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(alpha >= 0)', '(True)'], {}), '(alpha >= 0, True)\n', (1117, 1135), True, 'import numpy as np\n'), ((1168, 1190), 'numpy.all', 'np.all', (['(y_weights >= 0)'], {}), '(y_weights >= 0)\n', (1174, 1190), True, 'import numpy as np\n'), ((1230, 1259), 'numpy.all', 'np.all', (['(discount_factors >= 0)'], {}), '(discount_factors >= 0)\n', (1236, 1259), True, 'import numpy as np\n'), ((1662, 1678), 'numpy.all', 'np.all', (['(var >= 0)'], {}), '(var >= 0)\n', (1668, 1678), True, 'import numpy as np\n'), ((1787, 1806), 'numpy.all', 'np.all', (['(0.0 <= rslt)'], {}), '(0.0 <= rslt)\n', (1793, 1806), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from SimpleCV import *
from numpy import linspace
from scipy.interpolate import UnivariateSpline
import sys, time, socket
#settings for the project)
srcImg = "../../sampleimages/orson_welles.jpg"
font_size = 20
sleep_for = 3 #seconds to sleep for
draw_color = Color.RED
while True:
image = Image(srcImg)
image.drawText("Original Size", 10,10, color=draw_color, fontsize=font_size)
image.show()
time.sleep(sleep_for)
rot = image.rotate(45)
rot.drawText("Rotated 45 degrees", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45, scale=0.5)
rot.drawText("Rotated 45 degrees and scaled", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,scale=0.5, point = (0,0) )
rot.drawText("Rotated 45 degrees and scaled around a point", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,"full")
rot.drawText("Rotated 45 degrees and full", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
atrans = image.shear([(image.width/2,0),(image.width-1,image.height/2),(image.width/2,image.height-1)])
atrans.drawText("Affine Transformation", 10,10, color=draw_color, fontsize=font_size)
atrans.show()
time.sleep(sleep_for)
ptrans = image.warp([(image.width*0.05,image.height*0.03),(image.width*0.9,image.height*0.1),(image.width*0.8,image.height*0.7),(image.width*0.2,image.height*0.9)])
ptrans.drawText("Perspective Transformation", 10,10, color=draw_color, fontsize=font_size)
ptrans.show()
time.sleep(sleep_for)
|
[
"time.sleep"
] |
[((434, 455), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (444, 455), False, 'import sys, time, socket\n'), ((591, 612), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (601, 612), False, 'import sys, time, socket\n'), ((770, 791), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (780, 791), False, 'import sys, time, socket\n'), ((979, 1000), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (989, 1000), False, 'import sys, time, socket\n'), ((1148, 1169), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (1158, 1169), False, 'import sys, time, socket\n'), ((1395, 1416), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (1405, 1416), False, 'import sys, time, socket\n'), ((1704, 1725), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (1714, 1725), False, 'import sys, time, socket\n')]
|
#!/usr/bin/env python3
import struct, sys, traceback;
if (len(sys.argv) < 4):
print("Usage: %s ANK16.FNT KANJI16.FNT FONT.BMP"%sys.argv[0]);
exit(1);
ank16 = None;
with open(sys.argv[1], 'rb') as f:
data = f.read();
ank16 = [data[i:i+16] for i in range(0, 256*16, 16)];
kanji16 = None;
charTable = {};
for row in range(0xa1, 0xff):
for col in range(0xa1, 0xff):
code = "%02x%02x"%(row & 0x7F, col & 0x7F);
char = struct.pack('BB', row, col).decode('euc-jp', errors='ignore');
sjis = char.encode('shift-jis', errors='ignore');
if (len(char) > 0 and len(sjis) > 0):
charTable[code] = {
'eucjp': (row & 0x7f, col & 0x7f),
'utf8': char,
'sjis': (sjis[0], sjis[1]),
'pixel': None,
};
with open(sys.argv[2], 'rb') as f:
for k in sorted(charTable.keys()):
ch = charTable[k];
row, col = ch['sjis'];
if row <= 0x84:
f.seek(512 + ((row - 0x81) * 189 + (col - 0x40)) * 32);
else:
f.seek(512 + ((row - 0x81) * 189 + (col - 0x40) - 378) * 32);
pixelData = f.read(32);
if pixelData != b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0':
ch['pixel'] = pixelData;
def writeBMP(f, metadata, x, y, data):
offset = metadata[2];
width = metadata[4];
height = metadata[5];
if (x < 0 or y < 0):
print("Error pos: ",x,y);
return;
f.seek(offset + (((height - y - 1) * width) >> 3) + x);
f.write(data);
def writeChar(f, metadata, x, y, data):
for i in range(16):
d = data[i*2];
writeBMP(f, metadata, x*2, y*16+i, struct.pack('B', (~d & 0xFF)));
d = data[i*2 + 1];
writeBMP(f, metadata, x*2+1, y*16+i, struct.pack('B', (~d & 0xFF)));
f = open(sys.argv[3], 'r+b');
try:
metadata = struct.unpack('<HLxxxxLLLLHHLLLLLL', f.read(14+40));
for ch, ank in enumerate(ank16):
if not ((ch >= 0x20 and ch <= 0x7e) or (ch >= 0xA1 and ch <= 0xdf)):
continue;
for i, d in enumerate(ank):
writeBMP(f, metadata, ch, i, struct.pack('B', (~d & 0xFF)));
for k in sorted(charTable.keys()):
ch = charTable[k];
row, col = ch['eucjp'];
if (None != ch['pixel'] and len(ch['pixel']) == 32):
writeChar(f, metadata, row - 0x20, col, ch['pixel']);
for ch, ank in enumerate(ank16):
if (ch >= 0x21 and ch <= 0x7e):
ank2 = b'';
for i, d in enumerate(ank):
ank2 += (struct.pack('B', d) + b'\0');
writeChar(f, metadata, 0x29 - 0x20, ch, ank2);
elif (ch >= 0xA1 and ch <= 0xDF):
ank2 = b'';
for i, d in enumerate(ank):
ank2 += (struct.pack('B', d) + b'\0');
writeChar(f, metadata, 0x2A - 0x20, ch & 0x7f, ank2);
except Exception as e:
traceback.print_exc();
finally:
f.close();
'''
def printPixels(data):
if (None == data):
return;
pixels = '';
for i, row in enumerate(data):
for j in range(8):
if (row & (1 << (7-j))):
pixels += '#';
else:
pixels += '.';
if (i & 1):
pixels += '\n';
print(pixels);
print('\n');
if (len(sys.argv) > 1 and sys.argv[1] == 'l'):
with open('KANJI16.FNT', 'rb') as f:
for i in range(8192):
f.seek(512 + i * 32);
print("%04x %d"%(i, i));
printPixels(f.read(32));
exit();
for k in sorted(charTable.keys()):
ch = charTable[k];
print(k, "%02x%02x"%ch['sjis'], ch['utf8']);
printPixels(ch['pixel']);
'''
|
[
"traceback.print_exc",
"struct.pack"
] |
[((2921, 2942), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2940, 2942), False, 'import struct, sys, traceback\n'), ((1700, 1726), 'struct.pack', 'struct.pack', (['"""B"""', '(~d & 255)'], {}), "('B', ~d & 255)\n", (1711, 1726), False, 'import struct, sys, traceback\n'), ((1804, 1830), 'struct.pack', 'struct.pack', (['"""B"""', '(~d & 255)'], {}), "('B', ~d & 255)\n", (1815, 1830), False, 'import struct, sys, traceback\n'), ((453, 480), 'struct.pack', 'struct.pack', (['"""BB"""', 'row', 'col'], {}), "('BB', row, col)\n", (464, 480), False, 'import struct, sys, traceback\n'), ((2153, 2179), 'struct.pack', 'struct.pack', (['"""B"""', '(~d & 255)'], {}), "('B', ~d & 255)\n", (2164, 2179), False, 'import struct, sys, traceback\n'), ((2578, 2597), 'struct.pack', 'struct.pack', (['"""B"""', 'd'], {}), "('B', d)\n", (2589, 2597), False, 'import struct, sys, traceback\n'), ((2798, 2817), 'struct.pack', 'struct.pack', (['"""B"""', 'd'], {}), "('B', d)\n", (2809, 2817), False, 'import struct, sys, traceback\n')]
|
from UtilGp import UtilGp
from dBase import ndb
from TranData import DbTranData
from frmPageShop import Shop
class Report:
@staticmethod
def show():
UtilGp.Login(Report.reportMenu, ndb.loadTranByItem,'Reports (Login)')
@staticmethod
def showDateRange():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions between two dates',))
fromDate = input("From Date(dd-MM-YYYY):")
toDate = input("To Date (dd-MM-YYYY):")
fromDate = UtilGp.strToDate(fromDate + ' 00:00:00')
toDate = UtilGp.strToDate(toDate + ' 23:59:00')
#print(fromDate,toDate)
dbTran=DbTranData.queryByDateRange(ndb.dbTran,fromDate,toDate)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
from frmPageShop import Shop
Shop.showFailure()
@staticmethod
def showBySortedAmount():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('All Transactions sorted on amount',))
dbTran = DbTranData.queryAll(ndb.dbTran)
dbTran.sort(key=lambda x:x.price)
#print(dbTran)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
from frmPageShop import Shop
Shop.showFailure()
@staticmethod
def showAmountRange():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions within amount range',))
fromAmount = float(input("From Amount:"))
toAmount = float(input("To Amount:"))
#print(fromDate,toDate)
dbTran=DbTranData.queryByAmount(ndb.dbTran,fromAmount,toAmount)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
Shop.showFailure()
@staticmethod
def showByCat():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions of a Category',))
for valueCat in ndb.dbCat:
print("%d %s"%(valueCat.catId,valueCat.category))
choiceCatId = int(input("Enter Catgory ID\nYour Choice:"))
isExist=False;resCat=None;
for valueCat in ndb.dbCat:
if valueCat.catId==choiceCatId:
isExist=True;resCat=valueCat;
break
if isExist:
dbTran = DbTranData.queryByCategory(ndb.dbTran, choiceCatId)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
Shop.showFailure()
else:
print("You Category Not Exist.")
Shop.showFailure()
@staticmethod
def showCatTot():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)'%('Total Amount Spent on a Category',))
for valueCat in ndb.dbCat:
print("%d %s"%(valueCat.catId,valueCat.category))
choiceCatId = int(input("Enter Catgory ID\nYour Choice:"))
isExist=False;resCat=None;
for valueCat in ndb.dbCat:
if valueCat.catId==choiceCatId:
isExist=True;resCat=valueCat;
break
if isExist:
dbTran = DbTranData.queryByCategory(ndb.dbTran, choiceCatId)
amount = sum(list(map(lambda x:x.price,dbTran)))
UtilGp.printCaptionData(('Category','Total Amount Spent is'),
(valueCat.category,'Rs.%.2f'%(amount,)), 45)
#print("Total Amount Spent for category %s is Rs.%f"%(valueCat.category,amount))
print("****End of Report****");print();
Shop.showFailure()
else:
print("You Category Not Exist.")
Shop.showFailure()
@staticmethod
def reportMenu():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports')
print("1-All transactions done between two dates")
print("2-All transactions that fall within a specified amount range")
print("3-All transactions done on a category")
print("4-Total amount spent on a category")
print("5-Transactions sorted based on amount")
choice=int(input("Your Choice:"))
if choice == 1:
Report.showDateRange()
elif choice == 2:
Report.showAmountRange()
elif choice == 3:
Report.showByCat()
elif choice == 4:
Report.showCatTot()
elif choice == 5:
Report.showBySortedAmount()
|
[
"frmPageShop.Shop.showFailure",
"TranData.DbTranData.queryAll",
"UtilGp.UtilGp.Login",
"TranData.DbTranData.queryByDateRange",
"UtilGp.UtilGp.title",
"TranData.DbTranData.printTran",
"TranData.DbTranData.queryByAmount",
"TranData.DbTranData.queryByCategory",
"UtilGp.UtilGp.printCaptionData",
"UtilGp.UtilGp.strToDate",
"UtilGp.UtilGp.clear",
"UtilGp.UtilGp.sleep"
] |
[((165, 235), 'UtilGp.UtilGp.Login', 'UtilGp.Login', (['Report.reportMenu', 'ndb.loadTranByItem', '"""Reports (Login)"""'], {}), "(Report.reportMenu, ndb.loadTranByItem, 'Reports (Login)')\n", (177, 235), False, 'from UtilGp import UtilGp\n'), ((286, 301), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (298, 301), False, 'from UtilGp import UtilGp\n'), ((302, 316), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (314, 316), False, 'from UtilGp import UtilGp\n'), ((317, 382), 'UtilGp.UtilGp.title', 'UtilGp.title', (["('Reports(%s)' % ('Transactions between two dates',))"], {}), "('Reports(%s)' % ('Transactions between two dates',))\n", (329, 382), False, 'from UtilGp import UtilGp\n'), ((502, 542), 'UtilGp.UtilGp.strToDate', 'UtilGp.strToDate', (["(fromDate + ' 00:00:00')"], {}), "(fromDate + ' 00:00:00')\n", (518, 542), False, 'from UtilGp import UtilGp\n'), ((560, 598), 'UtilGp.UtilGp.strToDate', 'UtilGp.strToDate', (["(toDate + ' 23:59:00')"], {}), "(toDate + ' 23:59:00')\n", (576, 598), False, 'from UtilGp import UtilGp\n'), ((646, 703), 'TranData.DbTranData.queryByDateRange', 'DbTranData.queryByDateRange', (['ndb.dbTran', 'fromDate', 'toDate'], {}), '(ndb.dbTran, fromDate, toDate)\n', (673, 703), False, 'from TranData import DbTranData\n'), ((710, 738), 'TranData.DbTranData.printTran', 'DbTranData.printTran', (['dbTran'], {}), '(dbTran)\n', (730, 738), False, 'from TranData import DbTranData\n'), ((832, 850), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (848, 850), False, 'from frmPageShop import Shop\n'), ((907, 922), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (919, 922), False, 'from UtilGp import UtilGp\n'), ((923, 937), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (935, 937), False, 'from UtilGp import UtilGp\n'), ((938, 1006), 'UtilGp.UtilGp.title', 'UtilGp.title', (["('Reports(%s)' % ('All Transactions sorted on amount',))"], {}), "('Reports(%s)' % ('All Transactions sorted on amount',))\n", (950, 1006), False, 'from UtilGp import UtilGp\n'), ((1024, 1055), 'TranData.DbTranData.queryAll', 'DbTranData.queryAll', (['ndb.dbTran'], {}), '(ndb.dbTran)\n', (1043, 1055), False, 'from TranData import DbTranData\n'), ((1129, 1157), 'TranData.DbTranData.printTran', 'DbTranData.printTran', (['dbTran'], {}), '(dbTran)\n', (1149, 1157), False, 'from TranData import DbTranData\n'), ((1251, 1269), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (1267, 1269), False, 'from frmPageShop import Shop\n'), ((1323, 1338), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (1335, 1338), False, 'from UtilGp import UtilGp\n'), ((1339, 1353), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (1351, 1353), False, 'from UtilGp import UtilGp\n'), ((1354, 1421), 'UtilGp.UtilGp.title', 'UtilGp.title', (["('Reports(%s)' % ('Transactions within amount range',))"], {}), "('Reports(%s)' % ('Transactions within amount range',))\n", (1366, 1421), False, 'from UtilGp import UtilGp\n'), ((1565, 1623), 'TranData.DbTranData.queryByAmount', 'DbTranData.queryByAmount', (['ndb.dbTran', 'fromAmount', 'toAmount'], {}), '(ndb.dbTran, fromAmount, toAmount)\n', (1589, 1623), False, 'from TranData import DbTranData\n'), ((1630, 1658), 'TranData.DbTranData.printTran', 'DbTranData.printTran', (['dbTran'], {}), '(dbTran)\n', (1650, 1658), False, 'from TranData import DbTranData\n'), ((1715, 1733), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (1731, 1733), False, 'from frmPageShop import Shop\n'), ((1781, 1796), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (1793, 1796), False, 'from UtilGp import UtilGp\n'), ((1797, 1811), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (1809, 1811), False, 'from UtilGp import UtilGp\n'), ((1812, 1873), 'UtilGp.UtilGp.title', 'UtilGp.title', (["('Reports(%s)' % ('Transactions of a Category',))"], {}), "('Reports(%s)' % ('Transactions of a Category',))\n", (1824, 1873), False, 'from UtilGp import UtilGp\n'), ((2575, 2590), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (2587, 2590), False, 'from UtilGp import UtilGp\n'), ((2591, 2605), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (2603, 2605), False, 'from UtilGp import UtilGp\n'), ((2606, 2673), 'UtilGp.UtilGp.title', 'UtilGp.title', (["('Reports(%s)' % ('Total Amount Spent on a Category',))"], {}), "('Reports(%s)' % ('Total Amount Spent on a Category',))\n", (2618, 2673), False, 'from UtilGp import UtilGp\n'), ((3642, 3657), 'UtilGp.UtilGp.sleep', 'UtilGp.sleep', (['(2)'], {}), '(2)\n', (3654, 3657), False, 'from UtilGp import UtilGp\n'), ((3658, 3672), 'UtilGp.UtilGp.clear', 'UtilGp.clear', ([], {}), '()\n', (3670, 3672), False, 'from UtilGp import UtilGp\n'), ((3673, 3696), 'UtilGp.UtilGp.title', 'UtilGp.title', (['"""Reports"""'], {}), "('Reports')\n", (3685, 3696), False, 'from UtilGp import UtilGp\n'), ((2261, 2312), 'TranData.DbTranData.queryByCategory', 'DbTranData.queryByCategory', (['ndb.dbTran', 'choiceCatId'], {}), '(ndb.dbTran, choiceCatId)\n', (2287, 2312), False, 'from TranData import DbTranData\n'), ((2325, 2353), 'TranData.DbTranData.printTran', 'DbTranData.printTran', (['dbTran'], {}), '(dbTran)\n', (2345, 2353), False, 'from TranData import DbTranData\n'), ((2418, 2436), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (2434, 2436), False, 'from frmPageShop import Shop\n'), ((2508, 2526), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (2524, 2526), False, 'from frmPageShop import Shop\n'), ((3059, 3110), 'TranData.DbTranData.queryByCategory', 'DbTranData.queryByCategory', (['ndb.dbTran', 'choiceCatId'], {}), '(ndb.dbTran, choiceCatId)\n', (3085, 3110), False, 'from TranData import DbTranData\n'), ((3184, 3299), 'UtilGp.UtilGp.printCaptionData', 'UtilGp.printCaptionData', (["('Category', 'Total Amount Spent is')", "(valueCat.category, 'Rs.%.2f' % (amount,))", '(45)'], {}), "(('Category', 'Total Amount Spent is'), (valueCat.\n category, 'Rs.%.2f' % (amount,)), 45)\n", (3207, 3299), False, 'from UtilGp import UtilGp\n'), ((3484, 3502), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (3500, 3502), False, 'from frmPageShop import Shop\n'), ((3574, 3592), 'frmPageShop.Shop.showFailure', 'Shop.showFailure', ([], {}), '()\n', (3590, 3592), False, 'from frmPageShop import Shop\n')]
|
import os
import logging
from six.moves.urllib.parse import urljoin
import six
from pelican import signals
from pelican.utils import pelican_open
if not six.PY3:
from codecs import open
logger = logging.getLogger(__name__)
source_files = []
PROCESS = ['articles', 'pages', 'drafts']
def link_source_files(generator):
"""
Processes each article/page object and formulates copy from and copy
to destinations, as well as adding a source file URL as an attribute.
"""
# Get all attributes from the generator that are articles or pages
posts = [
getattr(generator, attr, None) for attr in PROCESS
if getattr(generator, attr, None) is not None]
# Work on each item
for post in posts[0]:
if not 'SHOW_SOURCE_ON_SIDEBAR' in generator.settings and \
not 'SHOW_SOURCE_IN_SECTION' in generator.settings:
return
# Only try this when specified in metadata or SHOW_SOURCE_ALL_POSTS
# override is present in settings
if 'SHOW_SOURCE_ALL_POSTS' in generator.settings or \
'show_source' in post.metadata:
# Source file name can be optionally set in config
show_source_filename = generator.settings.get(
'SHOW_SOURCE_FILENAME', '{}.txt'.format(post.slug)
)
try:
# Get the full path to the original source file
source_out = os.path.join(
post.settings['OUTPUT_PATH'], post.save_as
)
# Get the path to the original source file
source_out_path = os.path.split(source_out)[0]
# Create 'copy to' destination for writing later
copy_to = os.path.join(
source_out_path, show_source_filename
)
# Add file to published path
source_url = urljoin(
post.save_as, show_source_filename
)
except Exception:
return
# Format post source dict & populate
out = dict()
out['copy_raw_from'] = post.source_path
out['copy_raw_to'] = copy_to
logger.debug('Linked %s to %s', post.source_path, copy_to)
source_files.append(out)
# Also add the source path to the post as an attribute for tpls
post.show_source_url = source_url
def _copy_from_to(from_file, to_file):
"""
A very rough and ready copy from / to function.
"""
with pelican_open(from_file) as text_in:
encoding = 'utf-8'
with open(to_file, 'w', encoding=encoding) as text_out:
text_out.write(text_in)
logger.info('Writing %s', to_file)
def write_source_files(*args, **kwargs):
"""
Called by the `page_writer_finalized` signal to process source files.
"""
for source in source_files:
_copy_from_to(source['copy_raw_from'], source['copy_raw_to'])
def register():
"""
Calls the shots, based on signals
"""
signals.article_generator_finalized.connect(link_source_files)
signals.page_generator_finalized.connect(link_source_files)
signals.page_writer_finalized.connect(write_source_files)
|
[
"six.moves.urllib.parse.urljoin",
"codecs.open",
"os.path.join",
"pelican.signals.article_generator_finalized.connect",
"pelican.signals.page_generator_finalized.connect",
"os.path.split",
"pelican.utils.pelican_open",
"logging.getLogger",
"pelican.signals.page_writer_finalized.connect"
] |
[((201, 228), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'import logging\n'), ((3080, 3142), 'pelican.signals.article_generator_finalized.connect', 'signals.article_generator_finalized.connect', (['link_source_files'], {}), '(link_source_files)\n', (3123, 3142), False, 'from pelican import signals\n'), ((3147, 3206), 'pelican.signals.page_generator_finalized.connect', 'signals.page_generator_finalized.connect', (['link_source_files'], {}), '(link_source_files)\n', (3187, 3206), False, 'from pelican import signals\n'), ((3211, 3268), 'pelican.signals.page_writer_finalized.connect', 'signals.page_writer_finalized.connect', (['write_source_files'], {}), '(write_source_files)\n', (3248, 3268), False, 'from pelican import signals\n'), ((2561, 2584), 'pelican.utils.pelican_open', 'pelican_open', (['from_file'], {}), '(from_file)\n', (2573, 2584), False, 'from pelican.utils import pelican_open\n'), ((2637, 2674), 'codecs.open', 'open', (['to_file', '"""w"""'], {'encoding': 'encoding'}), "(to_file, 'w', encoding=encoding)\n", (2641, 2674), False, 'from codecs import open\n'), ((1428, 1484), 'os.path.join', 'os.path.join', (["post.settings['OUTPUT_PATH']", 'post.save_as'], {}), "(post.settings['OUTPUT_PATH'], post.save_as)\n", (1440, 1484), False, 'import os\n'), ((1740, 1791), 'os.path.join', 'os.path.join', (['source_out_path', 'show_source_filename'], {}), '(source_out_path, show_source_filename)\n', (1752, 1791), False, 'import os\n'), ((1908, 1951), 'six.moves.urllib.parse.urljoin', 'urljoin', (['post.save_as', 'show_source_filename'], {}), '(post.save_as, show_source_filename)\n', (1915, 1951), False, 'from six.moves.urllib.parse import urljoin\n'), ((1620, 1645), 'os.path.split', 'os.path.split', (['source_out'], {}), '(source_out)\n', (1633, 1645), False, 'import os\n')]
|
import torch
import torch.nn as nn
from torch.nn import functional as F
from .base import ASPP, get_syncbn
class dec_deeplabv3(nn.Module):
def __init__(
self,
in_planes,
num_classes=19,
inner_planes=256,
sync_bn=False,
dilations=(12, 24, 36),
):
super(dec_deeplabv3, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aspp = ASPP(
in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations
)
self.head = nn.Sequential(
nn.Conv2d(
self.aspp.get_outplanes(),
256,
kernel_size=3,
padding=1,
dilation=1,
bias=False,
),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
aspp_out = self.aspp(x)
res = self.head(aspp_out)
return res
class dec_deeplabv3_plus(nn.Module):
def __init__(
self,
in_planes,
num_classes=19,
inner_planes=256,
sync_bn=False,
dilations=(12, 24, 36),
rep_head=True,
):
super(dec_deeplabv3_plus, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.rep_head = rep_head
self.low_conv = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=1), norm_layer(256), nn.ReLU(inplace=True)
)
self.aspp = ASPP(
in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations
)
self.head = nn.Sequential(
nn.Conv2d(
self.aspp.get_outplanes(),
256,
kernel_size=3,
padding=1,
dilation=1,
bias=False,
),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
if self.rep_head:
self.representation = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
x1, x2, x3, x4 = x
aspp_out = self.aspp(x4)
low_feat = self.low_conv(x1)
aspp_out = self.head(aspp_out)
h, w = low_feat.size()[-2:]
aspp_out = F.interpolate(
aspp_out, size=(h, w), mode="bilinear", align_corners=True
)
aspp_out = torch.cat((low_feat, aspp_out), dim=1)
res = {"pred": self.classifier(aspp_out)}
if self.rep_head:
res["rep"] = self.representation(aspp_out)
return res
class Aux_Module(nn.Module):
def __init__(self, in_planes, num_classes=19, sync_bn=False):
super(Aux_Module, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aux = nn.Sequential(
nn.Conv2d(in_planes, 256, kernel_size=3, stride=1, padding=1),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
res = self.aux(x)
return res
|
[
"torch.nn.Dropout2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.functional.interpolate"
] |
[((3355, 3428), 'torch.nn.functional.interpolate', 'F.interpolate', (['aspp_out'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(aspp_out, size=(h, w), mode='bilinear', align_corners=True)\n", (3368, 3428), True, 'from torch.nn import functional as F\n'), ((3470, 3508), 'torch.cat', 'torch.cat', (['(low_feat, aspp_out)'], {'dim': '(1)'}), '((low_feat, aspp_out), dim=1)\n', (3479, 3508), False, 'import torch\n'), ((833, 854), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (840, 854), True, 'import torch.nn as nn\n'), ((868, 885), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (880, 885), True, 'import torch.nn as nn\n'), ((899, 973), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (908, 973), True, 'import torch.nn as nn\n'), ((1524, 1558), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(1)'}), '(256, 256, kernel_size=1)\n', (1533, 1558), True, 'import torch.nn as nn\n'), ((1577, 1598), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1584, 1598), True, 'import torch.nn as nn\n'), ((2026, 2047), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2033, 2047), True, 'import torch.nn as nn\n'), ((2061, 2078), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (2073, 2078), True, 'import torch.nn as nn\n'), ((2144, 2210), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(512, 256, kernel_size=3, stride=1, padding=1, bias=True)\n', (2153, 2210), True, 'import torch.nn as nn\n'), ((2253, 2274), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2260, 2274), True, 'import torch.nn as nn\n'), ((2288, 2305), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (2300, 2305), True, 'import torch.nn as nn\n'), ((2319, 2385), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=1, padding=1, bias=True)\n', (2328, 2385), True, 'import torch.nn as nn\n'), ((2428, 2449), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2435, 2449), True, 'import torch.nn as nn\n'), ((2463, 2480), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (2475, 2480), True, 'import torch.nn as nn\n'), ((2494, 2568), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (2503, 2568), True, 'import torch.nn as nn\n'), ((3914, 3975), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_planes, 256, kernel_size=3, stride=1, padding=1)\n', (3923, 3975), True, 'import torch.nn as nn\n'), ((4018, 4039), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4025, 4039), True, 'import torch.nn as nn\n'), ((4053, 4070), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (4065, 4070), True, 'import torch.nn as nn\n'), ((4084, 4158), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (4093, 4158), True, 'import torch.nn as nn\n'), ((2673, 2739), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(512, 256, kernel_size=3, stride=1, padding=1, bias=True)\n', (2682, 2739), True, 'import torch.nn as nn\n'), ((2790, 2811), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2797, 2811), True, 'import torch.nn as nn\n'), ((2829, 2846), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (2841, 2846), True, 'import torch.nn as nn\n'), ((2864, 2930), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=1, padding=1, bias=True)\n', (2873, 2930), True, 'import torch.nn as nn\n'), ((2981, 3002), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2988, 3002), True, 'import torch.nn as nn\n'), ((3020, 3037), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (3032, 3037), True, 'import torch.nn as nn\n'), ((3055, 3121), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(256, 256, kernel_size=1, stride=1, padding=0, bias=True)\n', (3064, 3121), True, 'import torch.nn as nn\n')]
|
import argparse
import os
from process_utils import clean_reviews, make_sentences, clean_sentences
# Set script arguments
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--goodreads', action='store_true', help='Set collection to GoodReads (as opposed to UCSD)')
args = parser.parse_args()
# Set whether to scrape just the top 300 reviews or 300 reviews from each rating
goodreads = args.goodreads
# Set file directories
read_dir = 'data/01_raw/goodreads/' if goodreads \
else f'data/02_intermediate/ucsd_reviews/'
write_dir = f'data/03_processed/'
# If output directory does not exist, create it
if not os.path.isdir(write_dir):
os.mkdir(write_dir)
# List review files to be read in, ignoring hidden files
file_list = [f for f in os.listdir(read_dir) if f.endswith('.csv')]
# Set loop variables
num_files = len(file_list)
file_index = 0
# Loop through files in file list
for file_name in file_list[file_index:]:
file_index += 1
print('-------------------------------------------------------------------')
print(f'Processing file {file_index} of {num_files}: {file_name}\n')
# Clean file's review text
reviews_df = clean_reviews(file_name, read_dir)
# Tokenize reviews into sentences
all_sentences_df = make_sentences(reviews_df)
# Clean sentences
sentences_df = clean_sentences(all_sentences_df)
sentences_df.to_csv(f'{write_dir}{file_name}', index=False)
|
[
"os.mkdir",
"process_utils.clean_reviews",
"argparse.ArgumentParser",
"os.path.isdir",
"process_utils.make_sentences",
"process_utils.clean_sentences",
"os.listdir"
] |
[((132, 157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (155, 157), False, 'import argparse\n'), ((625, 649), 'os.path.isdir', 'os.path.isdir', (['write_dir'], {}), '(write_dir)\n', (638, 649), False, 'import os\n'), ((655, 674), 'os.mkdir', 'os.mkdir', (['write_dir'], {}), '(write_dir)\n', (663, 674), False, 'import os\n'), ((1165, 1199), 'process_utils.clean_reviews', 'clean_reviews', (['file_name', 'read_dir'], {}), '(file_name, read_dir)\n', (1178, 1199), False, 'from process_utils import clean_reviews, make_sentences, clean_sentences\n'), ((1262, 1288), 'process_utils.make_sentences', 'make_sentences', (['reviews_df'], {}), '(reviews_df)\n', (1276, 1288), False, 'from process_utils import clean_reviews, make_sentences, clean_sentences\n'), ((1331, 1364), 'process_utils.clean_sentences', 'clean_sentences', (['all_sentences_df'], {}), '(all_sentences_df)\n', (1346, 1364), False, 'from process_utils import clean_reviews, make_sentences, clean_sentences\n'), ((757, 777), 'os.listdir', 'os.listdir', (['read_dir'], {}), '(read_dir)\n', (767, 777), False, 'import os\n')]
|
import os
import numpy as np
import tensorrt as trt
from .utils import common, calibrator
class TRTModel:
def __init__(self, onnx_path, plan_path, mode="fp16", calibration_cache="calibration.cache",
calibration_dataset="", calibration_image_size="",
calibration_mean=[], calibration_std=[]):
"""
:param onnx_path: local path of onnx file.
:param plan_path: trt plan file to read/save.
:param mode: inference mode, fp16/int8.
:param calibration_cache: int8 cache file of calibration.
:param calibration_dataset: dataset.txt for calibration.
:param calibration_image_size: iamge size (w, h) for calibration.
:param calibration_mean: image mean for calibration.
:param calibration_std: image std for calibration.
"""
self.trt_logger = trt.Logger()
self.onnx_path = onnx_path
self.plan_path = plan_path
self.mode = mode
# for int8 calibration
if self.mode == "int8":
self.calib = self._get_calibrator(calibration_cache, calibration_dataset,
calibration_image_size, calibration_mean, calibration_std)
# init
self.engine = self._get_engine()
self.execution_context = self.engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
def _get_calibrator(self, cache, dataset, size, mean, std):
if not os.path.exists(dataset):
raise Exception("Calibration dataset: {} not exist!".format(self.calibration_dataset))
calib = calibrator.EntropyCalibrator(dataset, cache, size, mean, std)
return calib
def _check_network(self, network):
"""check network
:param network: INetworkDefinition
"""
if not network.num_outputs:
raise Exception("No output node found!")
input_nodes = [network.get_input(i) for i in range(network.num_inputs)]
output_nodes = [network.get_output(i) for i in range(network.num_outputs)]
print("Network description")
for i, inp in enumerate(input_nodes):
print("Input node {} | Name {} | Shape {}".format(i, inp.name, inp.shape))
print("Total layers: {}".format(network.num_layers))
for i in range(network.num_layers):
layer = network.get_layer(i)
print("index {}, layer name: {}".format(i, layer.name))
for i, out in enumerate(output_nodes):
print("Output node {} | Name {} | Shape {}".format(i, out.name, out.shape))
def _parse_onnx(self):
"""takes an ONNX file and creates a TensorRT engine to run inference with
"""
dynamic = False
flag = common.EXPLICIT_BATCH
with trt.Builder(self.trt_logger) as builder, builder.create_network(flag) as network, builder.create_builder_config() as config, trt.OnnxParser(network, self.trt_logger) as parser, trt.Runtime(self.trt_logger) as runtime:
config.max_workspace_size = common.GiB(1)
builder.max_batch_size = 1
if self.mode == "fp16":
config.set_flag(trt.BuilderFlag.FP16)
print("set FP16 mode.")
if self.mode == "int8":
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = self.calib
print("set INT8 mode.")
# Parse model file
print('Loading ONNX file from path {}...'.format(self.onnx_path))
with open(self.onnx_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print('Completed parsing of ONNX file')
# check netowrk
self._check_network(network)
# build engine
print('Building an engine from file {}; this may take a while...'.format(self.onnx_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
# save engine
with open(self.plan_path, "wb") as f:
f.write(plan)
return engine
def _get_engine(self):
"""generate tensorrt runtime engine
"""
if os.path.exists(self.plan_path):
print('Load trt plan from: {}'.format(self.plan_path))
with open(self.plan_path, "rb") as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
if os.path.exists(self.onnx_path):
return self._parse_onnx()
else:
raise Exception("ONNX model file {} not exist!".format(self.onnx_path))
def forward(self, image_tensors):
"""do infernece
:param image_tensors: list, inputs tensor of model.
:return outputs: list, outputs tensor of model.
"""
for i, image_tensor in enumerate(image_tensors):
image = np.array([image_tensor], dtype=np.float32, order='C')
self.inputs[i].host = image
trt_outputs = common.do_inference_v2(self.execution_context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream)
return trt_outputs
|
[
"tensorrt.Logger",
"tensorrt.OnnxParser",
"os.path.exists",
"tensorrt.Builder",
"numpy.array",
"tensorrt.Runtime"
] |
[((862, 874), 'tensorrt.Logger', 'trt.Logger', ([], {}), '()\n', (872, 874), True, 'import tensorrt as trt\n'), ((4532, 4562), 'os.path.exists', 'os.path.exists', (['self.plan_path'], {}), '(self.plan_path)\n', (4546, 4562), False, 'import os\n'), ((1538, 1561), 'os.path.exists', 'os.path.exists', (['dataset'], {}), '(dataset)\n', (1552, 1561), False, 'import os\n'), ((2855, 2883), 'tensorrt.Builder', 'trt.Builder', (['self.trt_logger'], {}), '(self.trt_logger)\n', (2866, 2883), True, 'import tensorrt as trt\n'), ((2980, 3020), 'tensorrt.OnnxParser', 'trt.OnnxParser', (['network', 'self.trt_logger'], {}), '(network, self.trt_logger)\n', (2994, 3020), True, 'import tensorrt as trt\n'), ((3032, 3060), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (3043, 3060), True, 'import tensorrt as trt\n'), ((4817, 4847), 'os.path.exists', 'os.path.exists', (['self.onnx_path'], {}), '(self.onnx_path)\n', (4831, 4847), False, 'import os\n'), ((5266, 5319), 'numpy.array', 'np.array', (['[image_tensor]'], {'dtype': 'np.float32', 'order': '"""C"""'}), "([image_tensor], dtype=np.float32, order='C')\n", (5274, 5319), True, 'import numpy as np\n'), ((4682, 4710), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (4693, 4710), True, 'import tensorrt as trt\n')]
|
"""empty message
Revision ID: 324666fdfa8a
Revises: <PASSWORD>
Create Date: 2016-08-04 13:45:37.492317
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'prod_process_association_product_id_fkey', 'prod_process_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_process_association', 'product', ['product_id'], ['id'], ondelete='CASCADE')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'prod_process_association', type_='foreignkey')
op.create_foreign_key(u'prod_process_association_product_id_fkey', 'prod_process_association', 'product', ['product_id'], ['id'])
### end Alembic commands ###
|
[
"alembic.op.create_foreign_key",
"alembic.op.drop_constraint"
] |
[((332, 447), 'alembic.op.drop_constraint', 'op.drop_constraint', (['u"""prod_process_association_product_id_fkey"""', '"""prod_process_association"""'], {'type_': '"""foreignkey"""'}), "(u'prod_process_association_product_id_fkey',\n 'prod_process_association', type_='foreignkey')\n", (350, 447), False, 'from alembic import op\n'), ((448, 563), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""prod_process_association"""', '"""product"""', "['product_id']", "['id']"], {'ondelete': '"""CASCADE"""'}), "(None, 'prod_process_association', 'product', [\n 'product_id'], ['id'], ondelete='CASCADE')\n", (469, 563), False, 'from alembic import op\n'), ((679, 751), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""prod_process_association"""'], {'type_': '"""foreignkey"""'}), "(None, 'prod_process_association', type_='foreignkey')\n", (697, 751), False, 'from alembic import op\n'), ((756, 889), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['u"""prod_process_association_product_id_fkey"""', '"""prod_process_association"""', '"""product"""', "['product_id']", "['id']"], {}), "(u'prod_process_association_product_id_fkey',\n 'prod_process_association', 'product', ['product_id'], ['id'])\n", (777, 889), False, 'from alembic import op\n')]
|
from typing import Union
from attr import define
from cbor2 import decoder
from .cose import COSECRV, COSEKTY, COSEAlgorithmIdentifier, COSEKey
from .exceptions import InvalidPublicKeyStructure, UnsupportedPublicKeyType
@define
class DecodedOKPPublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
crv: COSECRV
x: bytes
@define
class DecodedEC2PublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
crv: COSECRV
x: bytes
y: bytes
@define
class DecodedRSAPublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
n: bytes
e: bytes
def decode_credential_public_key(
key: bytes,
) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:
"""
Decode a CBOR-encoded public key and turn it into a data structure.
Supports OKP, EC2, and RSA public keys
"""
# Occassionally we might be given a public key in an "uncompressed" format,
# typically from older U2F security keys. As per the FIDO spec this is indicated by
# a leading 0x04 "uncompressed point compression method" format byte. In that case
# we need to fill in some blanks to turn it into a full EC2 key for signature
# verification
#
# See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats
if key[0] == 0x04:
return DecodedEC2PublicKey(
kty=COSEKTY.EC2,
alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,
crv=COSECRV.P256,
x=key[1:33],
y=key[33:65],
)
decoded_key: dict = decoder.loads(key)
kty = decoded_key[COSEKey.KTY]
alg = decoded_key[COSEKey.ALG]
if not kty:
raise InvalidPublicKeyStructure("Credential public key missing kty")
if not alg:
raise InvalidPublicKeyStructure("Credential public key missing alg")
if kty == COSEKTY.OKP:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
if not crv:
raise InvalidPublicKeyStructure("OKP credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("OKP credential public key missing x")
return DecodedOKPPublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
)
elif kty == COSEKTY.EC2:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
y = decoded_key[COSEKey.Y]
if not crv:
raise InvalidPublicKeyStructure("EC2 credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("EC2 credential public key missing x")
if not y:
raise InvalidPublicKeyStructure("EC2 credential public key missing y")
return DecodedEC2PublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
y=y,
)
elif kty == COSEKTY.RSA:
n = decoded_key[COSEKey.N]
e = decoded_key[COSEKey.E]
if not n:
raise InvalidPublicKeyStructure("RSA credential public key missing n")
if not e:
raise InvalidPublicKeyStructure("RSA credential public key missing e")
return DecodedRSAPublicKey(
kty=kty,
alg=alg,
n=n,
e=e,
)
raise UnsupportedPublicKeyType(f'Unsupported credential public key type "{kty}"')
|
[
"cbor2.decoder.loads"
] |
[((1595, 1613), 'cbor2.decoder.loads', 'decoder.loads', (['key'], {}), '(key)\n', (1608, 1613), False, 'from cbor2 import decoder\n')]
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato
from Instrucciones.Excepcion import Excepcion
class If(Instruccion):
'''
Esta clase representa la instrucción if.
La instrucción if recibe como parámetro una expresión lógica y la lista
de instrucciones a ejecutar si la expresión lógica es verdadera.
'''
def __init__(self,expLogica,instrucciones,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrucciones = instrucciones
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
for i in self.instrucciones:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
if cadenaTraducida == "":
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
else:
#cadenaTraducida traera la etiqueta de salida si es un elsif
codigo += "\tgoto " + cadenaTraducida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L2
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class Ifelse(Instruccion):
'''
Esta clase representa la instrucción if else.
La instrucción if else recibe como parámetro una expresión lógica y las listas
de instrucciones a ejecutar si la expresión lógica es verdadera o falsa.
'''
def __init__(self,expLogica,instrIfVerdadero,instrIfFalso,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.instrIfFalso = instrIfFalso
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
etiquetaSalida = arbol.generaEtiqueta()
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
for inst in self.instrIfFalso:
instruccion_ifFalso = inst.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_ifFalso, Excepcion):
return instruccion_ifFalso
codigo += instruccion_ifFalso
codigo += "\tgoto ." + etiquetaSalida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel ." + etiquetaSalida + "\n"
return codigo
# ...
# if temporal_logico
# goto L1
# goto L2
# label L2
# instrucciones_ifFalso
# goto L3
# label L1
# instrucciones_if
# label L3
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class IfElseIf(Instruccion):
'''
Esta clase representa la instrucción if elseif.
La instrucción if elseif recibe como parámetro una expresión lógica principal y la lista
de instrucciones, asi como una lista de elseif que contienen respectiva expresion logica e instrucciones
a ejecutar.
'''
def __init__(self,expLogica,instrIfVerdadero,l_elseif,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.l_elseif = l_elseif
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
etiquetaSalida = arbol.generaEtiqueta()
codigo = expresion_logica.codigo
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
#Sentencias elseif
for s_if in self.l_elseif:
sentencia_if = s_if.traducir(tabla,arbol,etiquetaSalida)
if isinstance(sentencia_if, Excepcion):
return sentencia_if
codigo += sentencia_if
#Label si el primer if es verdadero
codigo += "\tgoto " + etiquetaSalida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
#instrucciones if principal
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel " + etiquetaSalida + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L10
# label L10
# ................
#
# if temporal_logico2:
# goto L3
# goto L4
# Label L3
# instrucciones_elseif
# goto L2
# label L4
#
# ....................
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class IfElseIfElse(Instruccion):
'''
Esta clase representa la instrucción if elseif else.
La instrucción if elseif else recibe como parámetro una expresión lógica principal y la lista
de instrucciones, asi como una lista de elseif que contienen respectiva expresion logica e instrucciones
a ejecutar y las instrucciones si todas son falsas.
'''
def __init__(self,expLogica,instrIfVerdadero,l_elseif,instrIfFalso,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.l_elseif = l_elseif
self.instrIfFalso = instrIfFalso
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
etiquetaF = arbol.generaEtiqueta()
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
#Sentencias elseif
for s_if in self.l_elseif:
sentencia_if = s_if.traducir(tabla,arbol,etiquetaF)
if isinstance(sentencia_if, Excepcion):
return sentencia_if
codigo += sentencia_if
#instrucciones si todos son falsos
for instr in self.instrIfFalso:
instruccion_falsa = instr.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_falsa, Excepcion):
return instruccion_falsa
codigo += instruccion_falsa
codigo += "\tgoto " + etiquetaF + "\n"
#Label si el primer if es verdadero
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
#instrucciones if principal
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel " + etiquetaF + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L10
# label L10
# ................
#
# if temporal_logico2:
# goto L3
# goto L4
# Label L3
# instrucciones_elseif
# goto L2
# label L4
#
# ....................
# instrucciones_ifFalso
# goto L2
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
|
[
"Instrucciones.Excepcion.Excepcion",
"Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__"
] |
[((496, 562), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'None', 'linea', 'columna', 'strGram', 'strSent'], {}), '(self, None, linea, columna, strGram, strSent)\n', (516, 562), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n'), ((2768, 2834), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'None', 'linea', 'columna', 'strGram', 'strSent'], {}), '(self, None, linea, columna, strGram, strSent)\n', (2788, 2834), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n'), ((5413, 5479), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'None', 'linea', 'columna', 'strGram', 'strSent'], {}), '(self, None, linea, columna, strGram, strSent)\n', (5433, 5479), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n'), ((8429, 8495), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'None', 'linea', 'columna', 'strGram', 'strSent'], {}), '(self, None, linea, columna, strGram, strSent)\n', (8449, 8495), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n'), ((2175, 2284), 'Instrucciones.Excepcion.Excepcion', 'Excepcion', (['"""42804"""', '"""Semántico"""', '"""La expresion logica debe ser de tipo boolean"""', 'self.linea', 'self.columna'], {}), "('42804', 'Semántico',\n 'La expresion logica debe ser de tipo boolean', self.linea, self.columna)\n", (2184, 2284), False, 'from Instrucciones.Excepcion import Excepcion\n'), ((4758, 4867), 'Instrucciones.Excepcion.Excepcion', 'Excepcion', (['"""42804"""', '"""Semántico"""', '"""La expresion logica debe ser de tipo boolean"""', 'self.linea', 'self.columna'], {}), "('42804', 'Semántico',\n 'La expresion logica debe ser de tipo boolean', self.linea, self.columna)\n", (4767, 4867), False, 'from Instrucciones.Excepcion import Excepcion\n'), ((7707, 7816), 'Instrucciones.Excepcion.Excepcion', 'Excepcion', (['"""42804"""', '"""Semántico"""', '"""La expresion logica debe ser de tipo boolean"""', 'self.linea', 'self.columna'], {}), "('42804', 'Semántico',\n 'La expresion logica debe ser de tipo boolean', self.linea, self.columna)\n", (7716, 7816), False, 'from Instrucciones.Excepcion import Excepcion\n'), ((11135, 11244), 'Instrucciones.Excepcion.Excepcion', 'Excepcion', (['"""42804"""', '"""Semántico"""', '"""La expresion logica debe ser de tipo boolean"""', 'self.linea', 'self.columna'], {}), "('42804', 'Semántico',\n 'La expresion logica debe ser de tipo boolean', self.linea, self.columna)\n", (11144, 11244), False, 'from Instrucciones.Excepcion import Excepcion\n')]
|